diff --git a/.gitignore b/.gitignore
index 46d3499e5..0d0dcf8b0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
*.autosave
*.pyc
*.user
+*~
.*.swp
.DS_Store
.sw[a-z]
diff --git a/.tgitconfig b/.tgitconfig
new file mode 100644
index 000000000..5fa522d23
--- /dev/null
+++ b/.tgitconfig
@@ -0,0 +1,2 @@
+[tgit]
+ icon = doc/opencv.ico
diff --git a/3rdparty/include/opencl/1.2/CL/cl.hpp b/3rdparty/include/opencl/1.2/CL/cl.hpp
index 0480e3116..2502d4c52 100644
--- a/3rdparty/include/opencl/1.2/CL/cl.hpp
+++ b/3rdparty/include/opencl/1.2/CL/cl.hpp
@@ -210,7 +210,7 @@
#include
#endif
-#if defined(linux) || defined(__APPLE__) || defined(__MACOSX)
+#if defined(__linux__) || defined(__APPLE__) || defined(__MACOSX)
#include
#include
diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so
index 5b618a874..aac6634b4 100755
Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so differ
diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so
index 846fc88bd..d523f69de 100755
Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so differ
diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so
index 80bf459cc..e386bf4f9 100755
Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so differ
diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so
index e5cc7d296..028ab7d1e 100755
Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so differ
diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so
index d3cf3b124..48cbdd096 100755
Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so differ
diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so
index 6498151ba..7fe50875c 100755
Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so differ
diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so
index 58bef3455..15827d818 100755
Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so differ
diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so
index ce69b52ea..ec1edfb04 100755
Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so differ
diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.4.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.4.0.so
index 3e65fb171..4d777edf8 100755
Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.4.0.so and b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.4.0.so differ
diff --git a/3rdparty/lib/armeabi/libnative_camera_r2.2.0.so b/3rdparty/lib/armeabi/libnative_camera_r2.2.0.so
index 68805b589..1707a8850 100755
Binary files a/3rdparty/lib/armeabi/libnative_camera_r2.2.0.so and b/3rdparty/lib/armeabi/libnative_camera_r2.2.0.so differ
diff --git a/3rdparty/lib/armeabi/libnative_camera_r2.3.3.so b/3rdparty/lib/armeabi/libnative_camera_r2.3.3.so
index 88ac3f7e3..fb4b125fd 100755
Binary files a/3rdparty/lib/armeabi/libnative_camera_r2.3.3.so and b/3rdparty/lib/armeabi/libnative_camera_r2.3.3.so differ
diff --git a/3rdparty/lib/armeabi/libnative_camera_r3.0.1.so b/3rdparty/lib/armeabi/libnative_camera_r3.0.1.so
index fa41cb250..96b264d0e 100755
Binary files a/3rdparty/lib/armeabi/libnative_camera_r3.0.1.so and b/3rdparty/lib/armeabi/libnative_camera_r3.0.1.so differ
diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.0.0.so b/3rdparty/lib/armeabi/libnative_camera_r4.0.0.so
index a305c2b00..179eef9a9 100755
Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.0.0.so and b/3rdparty/lib/armeabi/libnative_camera_r4.0.0.so differ
diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.0.3.so b/3rdparty/lib/armeabi/libnative_camera_r4.0.3.so
index 8c34357cc..165dc463c 100755
Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.0.3.so and b/3rdparty/lib/armeabi/libnative_camera_r4.0.3.so differ
diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.1.1.so b/3rdparty/lib/armeabi/libnative_camera_r4.1.1.so
index a01ee15e2..a9a5d7da7 100755
Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.1.1.so and b/3rdparty/lib/armeabi/libnative_camera_r4.1.1.so differ
diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.2.0.so b/3rdparty/lib/armeabi/libnative_camera_r4.2.0.so
index a8ff89465..9037c6860 100755
Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.2.0.so and b/3rdparty/lib/armeabi/libnative_camera_r4.2.0.so differ
diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.3.0.so b/3rdparty/lib/armeabi/libnative_camera_r4.3.0.so
index aa1cfd844..026f0b48b 100755
Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.3.0.so and b/3rdparty/lib/armeabi/libnative_camera_r4.3.0.so differ
diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.4.0.so b/3rdparty/lib/armeabi/libnative_camera_r4.4.0.so
index 264f6f217..6aebec923 100755
Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.4.0.so and b/3rdparty/lib/armeabi/libnative_camera_r4.4.0.so differ
diff --git a/3rdparty/lib/mips/libnative_camera_r4.0.3.so b/3rdparty/lib/mips/libnative_camera_r4.0.3.so
index 14dfaf23b..6dee89780 100755
Binary files a/3rdparty/lib/mips/libnative_camera_r4.0.3.so and b/3rdparty/lib/mips/libnative_camera_r4.0.3.so differ
diff --git a/3rdparty/lib/mips/libnative_camera_r4.1.1.so b/3rdparty/lib/mips/libnative_camera_r4.1.1.so
index a37474256..71a6354ac 100755
Binary files a/3rdparty/lib/mips/libnative_camera_r4.1.1.so and b/3rdparty/lib/mips/libnative_camera_r4.1.1.so differ
diff --git a/3rdparty/lib/mips/libnative_camera_r4.2.0.so b/3rdparty/lib/mips/libnative_camera_r4.2.0.so
index 31cbb3a99..21bcffb4a 100755
Binary files a/3rdparty/lib/mips/libnative_camera_r4.2.0.so and b/3rdparty/lib/mips/libnative_camera_r4.2.0.so differ
diff --git a/3rdparty/lib/mips/libnative_camera_r4.3.0.so b/3rdparty/lib/mips/libnative_camera_r4.3.0.so
index 379fc7003..653c2f1ca 100755
Binary files a/3rdparty/lib/mips/libnative_camera_r4.3.0.so and b/3rdparty/lib/mips/libnative_camera_r4.3.0.so differ
diff --git a/3rdparty/lib/mips/libnative_camera_r4.4.0.so b/3rdparty/lib/mips/libnative_camera_r4.4.0.so
index 0f6c83713..8d6fdf2bc 100755
Binary files a/3rdparty/lib/mips/libnative_camera_r4.4.0.so and b/3rdparty/lib/mips/libnative_camera_r4.4.0.so differ
diff --git a/3rdparty/lib/x86/libnative_camera_r2.3.3.so b/3rdparty/lib/x86/libnative_camera_r2.3.3.so
index 5c46b1607..a47b8b2ce 100755
Binary files a/3rdparty/lib/x86/libnative_camera_r2.3.3.so and b/3rdparty/lib/x86/libnative_camera_r2.3.3.so differ
diff --git a/3rdparty/lib/x86/libnative_camera_r3.0.1.so b/3rdparty/lib/x86/libnative_camera_r3.0.1.so
index 77512e5de..faa13461f 100755
Binary files a/3rdparty/lib/x86/libnative_camera_r3.0.1.so and b/3rdparty/lib/x86/libnative_camera_r3.0.1.so differ
diff --git a/3rdparty/lib/x86/libnative_camera_r4.0.3.so b/3rdparty/lib/x86/libnative_camera_r4.0.3.so
index b5de08299..2d2fb8eb1 100755
Binary files a/3rdparty/lib/x86/libnative_camera_r4.0.3.so and b/3rdparty/lib/x86/libnative_camera_r4.0.3.so differ
diff --git a/3rdparty/lib/x86/libnative_camera_r4.1.1.so b/3rdparty/lib/x86/libnative_camera_r4.1.1.so
index 867137410..f40da0d9d 100755
Binary files a/3rdparty/lib/x86/libnative_camera_r4.1.1.so and b/3rdparty/lib/x86/libnative_camera_r4.1.1.so differ
diff --git a/3rdparty/lib/x86/libnative_camera_r4.2.0.so b/3rdparty/lib/x86/libnative_camera_r4.2.0.so
index 52e9a5792..0d4ac03b5 100755
Binary files a/3rdparty/lib/x86/libnative_camera_r4.2.0.so and b/3rdparty/lib/x86/libnative_camera_r4.2.0.so differ
diff --git a/3rdparty/lib/x86/libnative_camera_r4.3.0.so b/3rdparty/lib/x86/libnative_camera_r4.3.0.so
index af898ccad..7e1c5803a 100755
Binary files a/3rdparty/lib/x86/libnative_camera_r4.3.0.so and b/3rdparty/lib/x86/libnative_camera_r4.3.0.so differ
diff --git a/3rdparty/lib/x86/libnative_camera_r4.4.0.so b/3rdparty/lib/x86/libnative_camera_r4.4.0.so
index 108862f56..37ab6d080 100755
Binary files a/3rdparty/lib/x86/libnative_camera_r4.4.0.so and b/3rdparty/lib/x86/libnative_camera_r4.4.0.so differ
diff --git a/3rdparty/libjasper/CMakeLists.txt b/3rdparty/libjasper/CMakeLists.txt
index dda9cd255..7b3dcb08a 100644
--- a/3rdparty/libjasper/CMakeLists.txt
+++ b/3rdparty/libjasper/CMakeLists.txt
@@ -46,5 +46,5 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
- ocv_install_target(${JASPER_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT main)
+ ocv_install_target(${JASPER_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
endif()
diff --git a/3rdparty/libjpeg/CMakeLists.txt b/3rdparty/libjpeg/CMakeLists.txt
index 8d622f24f..02d71ade2 100644
--- a/3rdparty/libjpeg/CMakeLists.txt
+++ b/3rdparty/libjpeg/CMakeLists.txt
@@ -39,5 +39,5 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
- ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT main)
+ ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
endif()
diff --git a/3rdparty/libpng/CMakeLists.txt b/3rdparty/libpng/CMakeLists.txt
index 42b6263e5..8d3d5f497 100644
--- a/3rdparty/libpng/CMakeLists.txt
+++ b/3rdparty/libpng/CMakeLists.txt
@@ -55,5 +55,5 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
- ocv_install_target(${PNG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT main)
+ ocv_install_target(${PNG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
endif()
diff --git a/3rdparty/libtiff/CMakeLists.txt b/3rdparty/libtiff/CMakeLists.txt
index 6c34fb5e3..addbb5551 100644
--- a/3rdparty/libtiff/CMakeLists.txt
+++ b/3rdparty/libtiff/CMakeLists.txt
@@ -115,5 +115,5 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
- ocv_install_target(${TIFF_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT main)
+ ocv_install_target(${TIFF_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
endif()
diff --git a/3rdparty/libtiff/tif_config.h.cmakein b/3rdparty/libtiff/tif_config.h.cmakein
index 182f2833d..24f58119b 100644
--- a/3rdparty/libtiff/tif_config.h.cmakein
+++ b/3rdparty/libtiff/tif_config.h.cmakein
@@ -54,7 +54,7 @@
/* Native cpu byte order: 1 if big-endian (Motorola) or 0 if little-endian
(Intel) */
-#define HOST_BIGENDIAN 0
+#define HOST_BIGENDIAN @WORDS_BIGENDIAN@
/* Set the native cpu bit order (FILLORDER_LSB2MSB or FILLORDER_MSB2LSB) */
#define HOST_FILLORDER FILLORDER_LSB2MSB
@@ -156,15 +156,7 @@
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
significant byte first (like Motorola and SPARC, unlike Intel). */
-#if defined AC_APPLE_UNIVERSAL_BUILD
-# if defined __BIG_ENDIAN__
-# define WORDS_BIGENDIAN 1
-# endif
-#else
-# ifndef WORDS_BIGENDIAN
-/* # undef WORDS_BIGENDIAN */
-# endif
-#endif
+#cmakedefine WORDS_BIGENDIAN 1
/* Support Deflate compression */
#define ZIP_SUPPORT 1
diff --git a/3rdparty/openexr/CMakeLists.txt b/3rdparty/openexr/CMakeLists.txt
index 2b11436e1..c4facad2f 100644
--- a/3rdparty/openexr/CMakeLists.txt
+++ b/3rdparty/openexr/CMakeLists.txt
@@ -62,7 +62,7 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
- ocv_install_target(IlmImf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT main)
+ ocv_install_target(IlmImf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
endif()
set(OPENEXR_INCLUDE_PATHS ${OPENEXR_INCLUDE_PATHS} PARENT_SCOPE)
diff --git a/3rdparty/tbb/CMakeLists.txt b/3rdparty/tbb/CMakeLists.txt
index 272c195b3..e16f6cd38 100644
--- a/3rdparty/tbb/CMakeLists.txt
+++ b/3rdparty/tbb/CMakeLists.txt
@@ -248,9 +248,9 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
ocv_install_target(tbb EXPORT OpenCVModules
- RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT main
- LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT main
- ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT main
+ RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs
+ LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT libs
+ ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev
)
# get TBB version
diff --git a/3rdparty/zlib/CMakeLists.txt b/3rdparty/zlib/CMakeLists.txt
index f1b28fd39..410f2420b 100644
--- a/3rdparty/zlib/CMakeLists.txt
+++ b/3rdparty/zlib/CMakeLists.txt
@@ -95,5 +95,5 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
- ocv_install_target(${ZLIB_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT main)
+ ocv_install_target(${ZLIB_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
endif()
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2973f4ca0..b610ecf97 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -128,6 +128,7 @@ OCV_OPTION(WITH_1394 "Include IEEE1394 support" ON
OCV_OPTION(WITH_AVFOUNDATION "Use AVFoundation for Video I/O" ON IF IOS)
OCV_OPTION(WITH_CARBON "Use Carbon for UI instead of Cocoa" OFF IF APPLE )
OCV_OPTION(WITH_CUDA "Include NVidia Cuda Runtime support" ON IF (CMAKE_VERSION VERSION_GREATER "2.8" AND NOT IOS) )
+OCV_OPTION(WITH_VTK "Include VTK library support (and build opencv_viz module eiher)" OFF IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT) library support" ON IF (CMAKE_VERSION VERSION_GREATER "2.8" AND NOT IOS) )
OCV_OPTION(WITH_CUBLAS "Include NVidia Cuda Basic Linear Algebra Subprograms (BLAS) library support" OFF IF (CMAKE_VERSION VERSION_GREATER "2.8" AND NOT IOS) )
OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support" OFF IF (CMAKE_VERSION VERSION_GREATER "2.8" AND NOT ANDROID AND NOT IOS AND NOT APPLE) )
@@ -141,7 +142,7 @@ OCV_OPTION(WITH_IPP "Include Intel IPP support" OFF
OCV_OPTION(WITH_JASPER "Include JPEG2K support" ON IF (NOT IOS) )
OCV_OPTION(WITH_JPEG "Include JPEG support" ON)
OCV_OPTION(WITH_OPENEXR "Include ILM support via OpenEXR" ON IF (NOT IOS) )
-OCV_OPTION(WITH_OPENGL "Include OpenGL support" OFF IF (NOT ANDROID AND NOT APPLE) )
+OCV_OPTION(WITH_OPENGL "Include OpenGL support" OFF IF (NOT ANDROID) )
OCV_OPTION(WITH_OPENNI "Include OpenNI support" OFF IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_PNG "Include PNG support" ON)
OCV_OPTION(WITH_PVAPI "Include Prosilica GigE support" ON IF (NOT ANDROID AND NOT IOS) )
@@ -197,7 +198,7 @@ OCV_OPTION(INSTALL_C_EXAMPLES "Install C examples" OFF )
OCV_OPTION(INSTALL_PYTHON_EXAMPLES "Install Python examples" OFF )
OCV_OPTION(INSTALL_ANDROID_EXAMPLES "Install Android examples" OFF IF ANDROID )
OCV_OPTION(INSTALL_TO_MANGLED_PATHS "Enables mangled install paths, that help with side by side installs." OFF IF (UNIX AND NOT ANDROID AND NOT IOS AND BUILD_SHARED_LIBS) )
-
+OCV_OPTION(INSTALL_TESTS "Install accuracy and performance test binaries and test data" OFF)
# OpenCV build options
# ===================================================
@@ -205,6 +206,7 @@ OCV_OPTION(ENABLE_DYNAMIC_CUDA "Enabled dynamic CUDA linkage"
OCV_OPTION(ENABLE_PRECOMPILED_HEADERS "Use precompiled headers" ON IF (NOT IOS) )
OCV_OPTION(ENABLE_SOLUTION_FOLDERS "Solution folder in Visual Studio or in other IDEs" (MSVC_IDE OR CMAKE_GENERATOR MATCHES Xcode) IF (CMAKE_VERSION VERSION_GREATER "2.8.0") )
OCV_OPTION(ENABLE_PROFILING "Enable profiling in the GCC compiler (Add flags: -g -pg)" OFF IF CMAKE_COMPILER_IS_GNUCXX )
+OCV_OPTION(ENABLE_COVERAGE "Enable coverage collection with GCov" OFF IF CMAKE_COMPILER_IS_GNUCXX )
OCV_OPTION(ENABLE_OMIT_FRAME_POINTER "Enable -fomit-frame-pointer for GCC" ON IF CMAKE_COMPILER_IS_GNUCXX AND NOT (APPLE AND CMAKE_COMPILER_IS_CLANGCXX) )
OCV_OPTION(ENABLE_POWERPC "Enable PowerPC for GCC" ON IF (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES powerpc.*) )
OCV_OPTION(ENABLE_FAST_MATH "Enable -ffast-math (not recommended for GCC 4.6.x)" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) )
@@ -220,6 +222,7 @@ OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions"
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
OCV_OPTION(ENABLE_WINRT_MODE "Build with Windows Runtime support" OFF IF WIN32 )
+OCV_OPTION(ENABLE_WINRT_MODE_NATIVE "Build with Windows Runtime native C++ support" OFF IF WIN32 )
# uncategorized options
# ===================================================
@@ -267,13 +270,27 @@ if(WIN32)
message(STATUS "Can't detect runtime and/or arch")
set(OpenCV_INSTALL_BINARIES_PREFIX "")
endif()
+elseif(ANDROID)
+ set(OpenCV_INSTALL_BINARIES_PREFIX "sdk/native/")
else()
set(OpenCV_INSTALL_BINARIES_PREFIX "")
endif()
-set(OPENCV_SAMPLES_BIN_INSTALL_PATH "${OpenCV_INSTALL_BINARIES_PREFIX}samples")
+if(ANDROID)
+ set(OPENCV_SAMPLES_BIN_INSTALL_PATH "${OpenCV_INSTALL_BINARIES_PREFIX}samples/${ANDROID_NDK_ABI_NAME}")
+else()
+ set(OPENCV_SAMPLES_BIN_INSTALL_PATH "${OpenCV_INSTALL_BINARIES_PREFIX}samples")
+endif()
-set(OPENCV_BIN_INSTALL_PATH "${OpenCV_INSTALL_BINARIES_PREFIX}bin")
+if(ANDROID)
+ set(OPENCV_BIN_INSTALL_PATH "${OpenCV_INSTALL_BINARIES_PREFIX}bin/${ANDROID_NDK_ABI_NAME}")
+else()
+ set(OPENCV_BIN_INSTALL_PATH "${OpenCV_INSTALL_BINARIES_PREFIX}bin")
+endif()
+
+if(NOT OPENCV_TEST_INSTALL_PATH)
+ set(OPENCV_TEST_INSTALL_PATH "${OPENCV_BIN_INSTALL_PATH}")
+endif()
if(ANDROID)
set(LIBRARY_OUTPUT_PATH "${OpenCV_BINARY_DIR}/lib/${ANDROID_NDK_ABI_NAME}")
@@ -282,6 +299,7 @@ if(ANDROID)
set(OPENCV_3P_LIB_INSTALL_PATH sdk/native/3rdparty/libs/${ANDROID_NDK_ABI_NAME})
set(OPENCV_CONFIG_INSTALL_PATH sdk/native/jni)
set(OPENCV_INCLUDE_INSTALL_PATH sdk/native/jni/include)
+ set(OPENCV_SAMPLES_SRC_INSTALL_PATH samples/native)
else()
set(LIBRARY_OUTPUT_PATH "${OpenCV_BINARY_DIR}/lib")
set(3P_LIBRARY_OUTPUT_PATH "${OpenCV_BINARY_DIR}/3rdparty/lib${LIB_SUFFIX}")
@@ -292,9 +310,11 @@ else()
set(OPENCV_LIB_INSTALL_PATH "${OpenCV_INSTALL_BINARIES_PREFIX}lib${LIB_SUFFIX}")
endif()
set(OPENCV_3P_LIB_INSTALL_PATH "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib${LIB_SUFFIX}")
+ set(OPENCV_SAMPLES_SRC_INSTALL_PATH samples/native)
else()
set(OPENCV_LIB_INSTALL_PATH lib${LIB_SUFFIX})
set(OPENCV_3P_LIB_INSTALL_PATH share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH})
+ set(OPENCV_SAMPLES_SRC_INSTALL_PATH share/OpenCV/samples)
endif()
set(OPENCV_INCLUDE_INSTALL_PATH "include")
@@ -425,6 +445,19 @@ endif()
include(cmake/OpenCVPCHSupport.cmake)
include(cmake/OpenCVModule.cmake)
+# ----------------------------------------------------------------------------
+# Detect endianness of build platform
+# ----------------------------------------------------------------------------
+
+if(CMAKE_SYSTEM_NAME STREQUAL iOS)
+ # test_big_endian needs try_compile, which doesn't work for iOS
+ # http://public.kitware.com/Bug/view.php?id=12288
+ set(WORDS_BIGENDIAN 0)
+else()
+ include(TestBigEndian)
+ test_big_endian(WORDS_BIGENDIAN)
+endif()
+
# ----------------------------------------------------------------------------
# Detect 3rd-party libraries
# ----------------------------------------------------------------------------
@@ -434,7 +467,6 @@ include(cmake/OpenCVFindLibsGUI.cmake)
include(cmake/OpenCVFindLibsVideo.cmake)
include(cmake/OpenCVFindLibsPerf.cmake)
-
# ----------------------------------------------------------------------------
# Detect other 3rd-party libraries/tools
# ----------------------------------------------------------------------------
@@ -470,6 +502,9 @@ if(WITH_OPENCL)
include(cmake/OpenCVDetectOpenCL.cmake)
endif()
+# --- VTK support ---
+include(cmake/OpenCVDetectVTK.cmake)
+
# ----------------------------------------------------------------------------
# Add CUDA libraries (needed for apps/tools, samples)
# ----------------------------------------------------------------------------
@@ -557,6 +592,49 @@ include(cmake/OpenCVGenConfig.cmake)
# Generate Info.plist for the IOS framework
include(cmake/OpenCVGenInfoPlist.cmake)
+# Generate environment setup file
+if(INSTALL_TESTS AND OPENCV_TEST_DATA_PATH AND UNIX)
+ if(ANDROID)
+ get_filename_component(TEST_PATH ${OPENCV_TEST_INSTALL_PATH} DIRECTORY)
+ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/templates/opencv_run_all_tests_android.sh.in"
+ "${CMAKE_BINARY_DIR}/unix-install/opencv_run_all_tests.sh" @ONLY)
+ install(PROGRAMS "${CMAKE_BINARY_DIR}/unix-install/opencv_run_all_tests.sh"
+ DESTINATION ${CMAKE_INSTALL_PREFIX} COMPONENT tests)
+ else()
+ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/templates/opencv_testing.sh.in"
+ "${CMAKE_BINARY_DIR}/unix-install/opencv_testing.sh" @ONLY)
+ install(FILES "${CMAKE_BINARY_DIR}/unix-install/opencv_testing.sh"
+ DESTINATION /etc/profile.d/ COMPONENT tests)
+ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/templates/opencv_run_all_tests_unix.sh.in"
+ "${CMAKE_BINARY_DIR}/unix-install/opencv_run_all_tests.sh" @ONLY)
+ install(PROGRAMS "${CMAKE_BINARY_DIR}/unix-install/opencv_run_all_tests.sh"
+ DESTINATION ${OPENCV_TEST_INSTALL_PATH} COMPONENT tests)
+
+ endif()
+endif()
+
+if(NOT OPENCV_README_FILE)
+ if(ANDROID)
+ set(OPENCV_README_FILE ${CMAKE_CURRENT_SOURCE_DIR}/platforms/android/README.android)
+ endif()
+endif()
+
+if(NOT OPENCV_LICENSE_FILE)
+ set(OPENCV_LICENSE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/LICENSE)
+endif()
+
+# for UNIX it does not make sense as LICENSE and readme will be part of the package automatically
+if(ANDROID OR NOT UNIX)
+ install(FILES ${OPENCV_LICENSE_FILE}
+ PERMISSIONS OWNER_READ GROUP_READ WORLD_READ
+ DESTINATION ${CMAKE_INSTALL_PREFIX} COMPONENT libs)
+ if(OPENCV_README_FILE)
+ install(FILES ${OPENCV_README_FILE}
+ PERMISSIONS OWNER_READ GROUP_READ WORLD_READ
+ DESTINATION ${CMAKE_INSTALL_PREFIX} COMPONENT libs)
+ endif()
+endif()
+
# ----------------------------------------------------------------------------
# Summary:
# ----------------------------------------------------------------------------
@@ -666,7 +744,7 @@ endif()
if(WIN32)
status("")
status(" Windows RT support:" HAVE_WINRT THEN YES ELSE NO)
- if (ENABLE_WINRT_MODE)
+ if (ENABLE_WINRT_MODE OR ENABLE_WINRT_MODE_NATIVE)
status(" Windows SDK v8.0:" ${WINDOWS_SDK_PATH})
status(" Visual Studio 2012:" ${VISUAL_STUDIO_PATH})
endif()
@@ -704,6 +782,7 @@ else()
endif()
status(" OpenGL support:" HAVE_OPENGL THEN "YES (${OPENGL_LIBRARIES})" ELSE NO)
+status(" VTK support:" HAVE_VTK THEN "YES (ver ${VTK_VERSION})" ELSE NO)
# ========================== MEDIA IO ==========================
status("")
@@ -974,3 +1053,9 @@ ocv_finalize_status()
if("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_BINARY_DIR}")
message(WARNING "The source directory is the same as binary directory. \"make clean\" may damage the source tree")
endif()
+
+# ----------------------------------------------------------------------------
+# CPack stuff
+# ----------------------------------------------------------------------------
+
+include(cmake/OpenCVPackaging.cmake)
diff --git a/doc/license.txt b/LICENSE
similarity index 65%
rename from doc/license.txt
rename to LICENSE
index 8824228d0..5e32d88b4 100644
--- a/doc/license.txt
+++ b/LICENSE
@@ -1,16 +1,11 @@
-IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-
- By downloading, copying, installing or using the software you agree to this license.
- If you do not agree to this license, do not download, install,
- copy or use the software.
+By downloading, copying, installing or using the software you agree to this license.
+If you do not agree to this license, do not download, install,
+copy or use the software.
License Agreement
For Open Source Computer Vision Library
-
-Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
-Third party copyrights are property of their respective owners.
+ (3-clause BSD License)
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
@@ -22,13 +17,14 @@ are permitted provided that the following conditions are met:
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- * The name of the copyright holders may not be used to endorse or promote products
- derived from this software without specific prior written permission.
+ * Neither the names of the copyright holders nor the names of the contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
-In no event shall the Intel Corporation or contributors be liable for any direct,
+In no event shall copyright holders or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
diff --git a/README.md b/README.md
index 403f118ee..3a26ad855 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,7 @@
### OpenCV: Open Source Computer Vision Library
+[![Gittip](http://img.shields.io/gittip/OpenCV.png)](https://www.gittip.com/OpenCV/)
+
#### Resources
* Homepage:
@@ -18,6 +20,3 @@ Summary of guidelines:
* Include tests and documentation;
* Clean up "oops" commits before submitting;
* Follow the coding style guide.
-
-[![Donate OpenCV project](http://opencv.org/wp-content/uploads/2013/07/gittip1.png)](https://www.gittip.com/OpenCV/)
-[![Donate OpenCV project](http://opencv.org/wp-content/uploads/2013/07/paypal-donate-button.png)](https://www.paypal.com/cgi-bin/webscr?item_name=Donation+to+OpenCV&cmd=_donations&business=accountant%40opencv.org)
\ No newline at end of file
diff --git a/apps/haartraining/CMakeLists.txt b/apps/haartraining/CMakeLists.txt
index cdc280556..d8a3c55c8 100644
--- a/apps/haartraining/CMakeLists.txt
+++ b/apps/haartraining/CMakeLists.txt
@@ -71,14 +71,14 @@ set_target_properties(opencv_performance PROPERTIES
if(INSTALL_CREATE_DISTRIB)
if(BUILD_SHARED_LIBS)
- install(TARGETS opencv_haartraining RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT main)
- install(TARGETS opencv_createsamples RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT main)
- install(TARGETS opencv_performance RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT main)
+ install(TARGETS opencv_haartraining RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
+ install(TARGETS opencv_createsamples RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
+ install(TARGETS opencv_performance RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
endif()
else()
- install(TARGETS opencv_haartraining RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT main)
- install(TARGETS opencv_createsamples RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT main)
- install(TARGETS opencv_performance RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT main)
+ install(TARGETS opencv_haartraining RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
+ install(TARGETS opencv_createsamples RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
+ install(TARGETS opencv_performance RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
endif()
if(ENABLE_SOLUTION_FOLDERS)
diff --git a/apps/haartraining/cvclassifier.h b/apps/haartraining/cvclassifier.h
index df644ed17..3faec500a 100644
--- a/apps/haartraining/cvclassifier.h
+++ b/apps/haartraining/cvclassifier.h
@@ -338,7 +338,7 @@ typedef enum CvBoostType
CV_LKCLASS = 5, /* classification (K class problem) */
CV_LSREG = 6, /* least squares regression */
CV_LADREG = 7, /* least absolute deviation regression */
- CV_MREG = 8, /* M-regression (Huber loss) */
+ CV_MREG = 8 /* M-regression (Huber loss) */
} CvBoostType;
/****************************************************************************************\
diff --git a/apps/traincascade/CMakeLists.txt b/apps/traincascade/CMakeLists.txt
index 8f6fbe034..941c0ec71 100644
--- a/apps/traincascade/CMakeLists.txt
+++ b/apps/traincascade/CMakeLists.txt
@@ -35,8 +35,8 @@ endif()
if(INSTALL_CREATE_DISTRIB)
if(BUILD_SHARED_LIBS)
- install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT main)
+ install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
endif()
else()
- install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT main)
+ install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
endif()
diff --git a/cmake/OpenCVCRTLinkage.cmake b/cmake/OpenCVCRTLinkage.cmake
index 8a297c685..5265e3e8a 100644
--- a/cmake/OpenCVCRTLinkage.cmake
+++ b/cmake/OpenCVCRTLinkage.cmake
@@ -9,7 +9,7 @@ set(HAVE_WINRT FALSE)
# search Windows Platform SDK
message(STATUS "Checking for Windows Platform SDK")
GET_FILENAME_COMPONENT(WINDOWS_SDK_PATH "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\v8.0;InstallationFolder]" ABSOLUTE CACHE)
-if (WINDOWS_SDK_PATH STREQUAL "")
+if(WINDOWS_SDK_PATH STREQUAL "")
set(HAVE_MSPDK FALSE)
message(STATUS "Windows Platform SDK 8.0 was not found")
else()
@@ -19,7 +19,7 @@ endif()
#search for Visual Studio 11.0 install directory
message(STATUS "Checking for Visual Studio 2012")
GET_FILENAME_COMPONENT(VISUAL_STUDIO_PATH [HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\11.0\\Setup\\VS;ProductDir] REALPATH CACHE)
-if (VISUAL_STUDIO_PATH STREQUAL "")
+if(VISUAL_STUDIO_PATH STREQUAL "")
set(HAVE_MSVC2012 FALSE)
message(STATUS "Visual Studio 2012 was not found")
else()
@@ -30,11 +30,15 @@ try_compile(HAVE_WINRT_SDK
"${OpenCV_BINARY_DIR}"
"${OpenCV_SOURCE_DIR}/cmake/checks/winrttest.cpp")
-if (ENABLE_WINRT_MODE AND HAVE_WINRT_SDK AND HAVE_MSVC2012 AND HAVE_MSPDK)
+if(ENABLE_WINRT_MODE AND HAVE_WINRT_SDK AND HAVE_MSVC2012 AND HAVE_MSPDK)
set(HAVE_WINRT TRUE)
+ set(HAVE_WINRT_CX TRUE)
+elseif(ENABLE_WINRT_MODE_NATIVE AND HAVE_WINRT_SDK AND HAVE_MSVC2012 AND HAVE_MSPDK)
+ set(HAVE_WINRT TRUE)
+ set(HAVE_WINRT_CX FALSE)
endif()
-if (HAVE_WINRT)
+if(HAVE_WINRT)
add_definitions(/DWINVER=0x0602 /DNTDDI_VERSION=NTDDI_WIN8 /D_WIN32_WINNT=0x0602)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /appcontainer")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /appcontainer")
diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake
index a4b039280..d525609d1 100644
--- a/cmake/OpenCVCompilerOptions.cmake
+++ b/cmake/OpenCVCompilerOptions.cmake
@@ -187,6 +187,11 @@ if(CMAKE_COMPILER_IS_GNUCXX)
add_extra_compiler_option(-ffunction-sections)
endif()
+ if(ENABLE_COVERAGE)
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} --coverage")
+ set(OPENCV_EXTRA_CXX_FLAGS "${OPENCV_EXTRA_CXX_FLAGS} --coverage")
+ endif()
+
set(OPENCV_EXTRA_FLAGS_RELEASE "${OPENCV_EXTRA_FLAGS_RELEASE} -DNDEBUG")
set(OPENCV_EXTRA_FLAGS_DEBUG "${OPENCV_EXTRA_FLAGS_DEBUG} -O0 -DDEBUG -D_DEBUG")
endif()
diff --git a/cmake/OpenCVDetectAndroidSDK.cmake b/cmake/OpenCVDetectAndroidSDK.cmake
index 393dbb62d..273758967 100644
--- a/cmake/OpenCVDetectAndroidSDK.cmake
+++ b/cmake/OpenCVDetectAndroidSDK.cmake
@@ -180,7 +180,7 @@ unset(__android_project_chain CACHE)
# add_android_project(target_name ${path} NATIVE_DEPS opencv_core LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 11)
macro(add_android_project target path)
# parse arguments
- set(android_proj_arglist NATIVE_DEPS LIBRARY_DEPS SDK_TARGET IGNORE_JAVA IGNORE_MANIFEST)
+ set(android_proj_arglist NATIVE_DEPS LIBRARY_DEPS SDK_TARGET IGNORE_JAVA IGNORE_MANIFEST EMBED_CUDA FORCE_EMBED_OPENCV)
set(__varname "android_proj_")
foreach(v ${android_proj_arglist})
set(${__varname}${v} "")
@@ -303,6 +303,46 @@ macro(add_android_project target path)
add_custom_command(TARGET ${JNI_LIB_NAME} POST_BUILD COMMAND ${CMAKE_STRIP} --strip-unneeded "${android_proj_jni_location}")
endif()
endif()
+
+ # copy opencv_java, tbb if it is shared and dynamicuda if present if FORCE_EMBED_OPENCV flag is set
+ if(android_proj_FORCE_EMBED_OPENCV)
+ set(native_deps ${android_proj_NATIVE_DEPS})
+ # filter out gpu module as it is always static library on Android
+ list(REMOVE_ITEM native_deps "opencv_gpu")
+ if(ENABLE_DYNAMIC_CUDA)
+ list(APPEND native_deps "opencv_dynamicuda")
+ endif()
+ foreach(lib ${native_deps})
+ get_property(f TARGET ${lib} PROPERTY LOCATION)
+ get_filename_component(f_name ${f} NAME)
+ add_custom_command(
+ OUTPUT "${android_proj_bin_dir}/libs/${ANDROID_NDK_ABI_NAME}/${f_name}"
+ COMMAND ${CMAKE_COMMAND} -E copy "${f}" "${android_proj_bin_dir}/libs/${ANDROID_NDK_ABI_NAME}/${f_name}"
+ DEPENDS "${lib}" VERBATIM
+ COMMENT "Embedding ${f}")
+ list(APPEND android_proj_file_deps "${android_proj_bin_dir}/libs/${ANDROID_NDK_ABI_NAME}/${f_name}")
+ endforeach()
+ endif()
+
+ # copy all needed CUDA libs to project if EMBED_CUDA flag is present
+ if(android_proj_EMBED_CUDA)
+ set(android_proj_culibs ${CUDA_npp_LIBRARY_ABS} ${CUDA_LIBRARIES_ABS})
+ if(HAVE_CUFFT)
+ list(INSERT android_proj_culibs 0 ${CUDA_cufft_LIBRARY_ABS})
+ endif()
+ if(HAVE_CUBLAS)
+ list(INSERT android_proj_culibs 0 ${CUDA_cublas_LIBRARY_ABS})
+ endif()
+ foreach(lib ${android_proj_culibs})
+ get_filename_component(f "${lib}" NAME)
+ add_custom_command(
+ OUTPUT "${android_proj_bin_dir}/libs/${ANDROID_NDK_ABI_NAME}/${f}"
+ COMMAND ${CMAKE_COMMAND} -E copy "${lib}" "${android_proj_bin_dir}/libs/${ANDROID_NDK_ABI_NAME}/${f}"
+ DEPENDS "${lib}" VERBATIM
+ COMMENT "Embedding ${f}")
+ list(APPEND android_proj_file_deps "${android_proj_bin_dir}/libs/${ANDROID_NDK_ABI_NAME}/${f}")
+ endforeach()
+ endif()
endif()
# build java part
@@ -344,20 +384,20 @@ macro(add_android_project target path)
add_custom_command(TARGET ${target} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy "${android_proj_bin_dir}/bin/${target}-debug.apk" "${OpenCV_BINARY_DIR}/bin/${target}.apk")
if(INSTALL_ANDROID_EXAMPLES AND "${target}" MATCHES "^example-")
#apk
- install(FILES "${OpenCV_BINARY_DIR}/bin/${target}.apk" DESTINATION "samples" COMPONENT main)
+ install(FILES "${OpenCV_BINARY_DIR}/bin/${target}.apk" DESTINATION "samples" COMPONENT samples)
get_filename_component(sample_dir "${path}" NAME)
#java part
list(REMOVE_ITEM android_proj_files ${ANDROID_MANIFEST_FILE})
foreach(f ${android_proj_files} ${ANDROID_MANIFEST_FILE})
get_filename_component(install_subdir "${f}" PATH)
- install(FILES "${android_proj_bin_dir}/${f}" DESTINATION "samples/${sample_dir}/${install_subdir}" COMPONENT main)
+ install(FILES "${android_proj_bin_dir}/${f}" DESTINATION "samples/${sample_dir}/${install_subdir}" COMPONENT samples)
endforeach()
#jni part + eclipse files
file(GLOB_RECURSE jni_files RELATIVE "${path}" "${path}/jni/*" "${path}/.cproject")
ocv_list_filterout(jni_files "\\\\.svn")
foreach(f ${jni_files} ".classpath" ".project" ".settings/org.eclipse.jdt.core.prefs")
get_filename_component(install_subdir "${f}" PATH)
- install(FILES "${path}/${f}" DESTINATION "samples/${sample_dir}/${install_subdir}" COMPONENT main)
+ install(FILES "${path}/${f}" DESTINATION "samples/${sample_dir}/${install_subdir}" COMPONENT samples)
endforeach()
#update proj
if(android_proj_lib_deps_commands)
@@ -365,9 +405,9 @@ macro(add_android_project target path)
endif()
install(CODE "EXECUTE_PROCESS(COMMAND ${ANDROID_EXECUTABLE} --silent update project --path . --target \"${android_proj_sdk_target}\" --name \"${target}\" ${inst_lib_opt}
WORKING_DIRECTORY \"\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/samples/${sample_dir}\"
- )" COMPONENT main)
+ )" COMPONENT samples)
#empty 'gen'
- install(CODE "MAKE_DIRECTORY(\"\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/samples/${sample_dir}/gen\")" COMPONENT main)
+ install(CODE "MAKE_DIRECTORY(\"\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/samples/${sample_dir}/gen\")" COMPONENT samples)
endif()
endif()
endmacro()
diff --git a/cmake/OpenCVDetectCUDA.cmake b/cmake/OpenCVDetectCUDA.cmake
index b35a7977c..24fbb03ce 100644
--- a/cmake/OpenCVDetectCUDA.cmake
+++ b/cmake/OpenCVDetectCUDA.cmake
@@ -180,6 +180,9 @@ if(CUDA_FOUND)
# we remove -Wsign-promo as it generates warnings under linux
string(REPLACE "-Wsign-promo" "" ${var} "${${var}}")
+ # we remove -Wno-sign-promo as it generates warnings under linux
+ string(REPLACE "-Wno-sign-promo" "" ${var} "${${var}}")
+
# we remove -Wno-delete-non-virtual-dtor because it's used for C++ compiler
# but NVCC uses C compiler by default
string(REPLACE "-Wno-delete-non-virtual-dtor" "" ${var} "${${var}}")
@@ -216,3 +219,42 @@ else()
unset(CUDA_ARCH_BIN CACHE)
unset(CUDA_ARCH_PTX CACHE)
endif()
+
+if(HAVE_CUDA)
+ set(CUDA_LIBS_PATH "")
+ foreach(p ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
+ get_filename_component(_tmp ${p} PATH)
+ list(APPEND CUDA_LIBS_PATH ${_tmp})
+ endforeach()
+
+ if(HAVE_CUBLAS)
+ foreach(p ${CUDA_cublas_LIBRARY})
+ get_filename_component(_tmp ${p} PATH)
+ list(APPEND CUDA_LIBS_PATH ${_tmp})
+ endforeach()
+ endif()
+
+ if(HAVE_CUFFT)
+ foreach(p ${CUDA_cufft_LIBRARY})
+ get_filename_component(_tmp ${p} PATH)
+ list(APPEND CUDA_LIBS_PATH ${_tmp})
+ endforeach()
+ endif()
+
+ list(REMOVE_DUPLICATES CUDA_LIBS_PATH)
+ link_directories(${CUDA_LIBS_PATH})
+
+ set(CUDA_LIBRARIES_ABS ${CUDA_LIBRARIES})
+ ocv_convert_to_lib_name(CUDA_LIBRARIES ${CUDA_LIBRARIES})
+ set(CUDA_npp_LIBRARY_ABS ${CUDA_npp_LIBRARY})
+ ocv_convert_to_lib_name(CUDA_npp_LIBRARY ${CUDA_npp_LIBRARY})
+ if(HAVE_CUBLAS)
+ set(CUDA_cublas_LIBRARY_ABS ${CUDA_cublas_LIBRARY})
+ ocv_convert_to_lib_name(CUDA_cublas_LIBRARY ${CUDA_cublas_LIBRARY})
+ endif()
+
+ if(HAVE_CUFFT)
+ set(CUDA_cufft_LIBRARY_ABS ${CUDA_cufft_LIBRARY})
+ ocv_convert_to_lib_name(CUDA_cufft_LIBRARY ${CUDA_cufft_LIBRARY})
+ endif()
+endif()
\ No newline at end of file
diff --git a/cmake/OpenCVDetectPython.cmake b/cmake/OpenCVDetectPython.cmake
index 3326bcd98..d02b7596a 100644
--- a/cmake/OpenCVDetectPython.cmake
+++ b/cmake/OpenCVDetectPython.cmake
@@ -81,24 +81,39 @@ if(PYTHON_EXECUTABLE)
SET(PYTHON_PACKAGES_PATH "${_PYTHON_PACKAGES_PATH}" CACHE PATH "Where to install the python packages.")
if(NOT PYTHON_NUMPY_INCLUDE_DIR)
- # Attempt to discover the NumPy include directory. If this succeeds, then build python API with NumPy
- execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import os; os.environ['DISTUTILS_USE_SDK']='1'; import numpy.distutils; print numpy.distutils.misc_util.get_numpy_include_dirs()[0]"
- RESULT_VARIABLE PYTHON_NUMPY_PROCESS
- OUTPUT_VARIABLE PYTHON_NUMPY_INCLUDE_DIR
- OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(CMAKE_CROSSCOMPILING)
+ message(STATUS "Cannot probe for Python/Numpy support (because we are cross-compiling OpenCV)")
+ message(STATUS "If you want to enable Python/Numpy support, set the following variables:")
+ message(STATUS " PYTHON_INCLUDE_PATH")
+ message(STATUS " PYTHON_LIBRARIES")
+ message(STATUS " PYTHON_NUMPY_INCLUDE_DIR")
+ else()
+ # Attempt to discover the NumPy include directory. If this succeeds, then build python API with NumPy
+ execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import os; os.environ['DISTUTILS_USE_SDK']='1'; import numpy.distutils; print numpy.distutils.misc_util.get_numpy_include_dirs()[0]"
+ RESULT_VARIABLE PYTHON_NUMPY_PROCESS
+ OUTPUT_VARIABLE PYTHON_NUMPY_INCLUDE_DIR
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
- if(PYTHON_NUMPY_PROCESS EQUAL 0)
- file(TO_CMAKE_PATH "${PYTHON_NUMPY_INCLUDE_DIR}" _PYTHON_NUMPY_INCLUDE_DIR)
- set(PYTHON_NUMPY_INCLUDE_DIR ${_PYTHON_NUMPY_INCLUDE_DIR} CACHE PATH "Path to numpy headers")
+ if(NOT PYTHON_NUMPY_PROCESS EQUAL 0)
+ unset(PYTHON_NUMPY_INCLUDE_DIR)
+ endif()
endif()
endif()
if(PYTHON_NUMPY_INCLUDE_DIR)
+ file(TO_CMAKE_PATH "${PYTHON_NUMPY_INCLUDE_DIR}" _PYTHON_NUMPY_INCLUDE_DIR)
+ set(PYTHON_NUMPY_INCLUDE_DIR ${_PYTHON_NUMPY_INCLUDE_DIR} CACHE PATH "Path to numpy headers")
set(PYTHON_USE_NUMPY TRUE)
- execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import numpy; print numpy.version.version"
+ if(CMAKE_CROSSCOMPILING)
+ if(NOT PYTHON_NUMPY_VERSION)
+ set(PYTHON_NUMPY_VERSION "undefined - cannot be probed because of the cross-compilation")
+ endif()
+ else()
+ execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import numpy; print numpy.version.version"
RESULT_VARIABLE PYTHON_NUMPY_PROCESS
OUTPUT_VARIABLE PYTHON_NUMPY_VERSION
OUTPUT_STRIP_TRAILING_WHITESPACE)
+ endif()
endif()
endif(NOT ANDROID AND NOT IOS)
diff --git a/cmake/OpenCVDetectVTK.cmake b/cmake/OpenCVDetectVTK.cmake
new file mode 100644
index 000000000..2b55a9c3f
--- /dev/null
+++ b/cmake/OpenCVDetectVTK.cmake
@@ -0,0 +1,53 @@
+if(NOT WITH_VTK OR ANDROID OR IOS)
+ return()
+endif()
+
+# VTK 6.x components
+find_package(VTK QUIET COMPONENTS vtkRenderingOpenGL vtkInteractionStyle vtkRenderingLOD vtkIOPLY vtkFiltersTexture vtkRenderingFreeType vtkIOExport NO_MODULE)
+
+# VTK 5.x components
+if(NOT VTK_FOUND)
+ find_package(VTK QUIET COMPONENTS vtkCommon NO_MODULE)
+endif()
+
+if(NOT VTK_FOUND)
+ set(HAVE_VTK OFF)
+ message(STATUS "VTK is not found. Please set -DVTK_DIR in CMake to VTK build directory, or to VTK install subdirectory with VTKConfig.cmake file")
+ return()
+endif()
+
+# Don't support ealier VTKs
+if(${VTK_VERSION} VERSION_LESS "5.8.0")
+ message(STATUS "VTK support is disabled. VTK ver. 5.8.0 is minimum required, but found VTK ver. ${VTK_VERSION}")
+ return()
+endif()
+
+# Different Qt versions can't be linked together
+if(HAVE_QT5 AND ${VTK_VERSION} VERSION_LESS "6.0.0")
+ if(VTK_USE_QT)
+ message(STATUS "VTK support is disabled. Incompatible combination: OpenCV + Qt5 and VTK ver.${VTK_VERSION} + Qt4")
+ endif()
+endif()
+
+# Different Qt versions can't be linked together. VTK 6.0.0 doesn't provide a way to get Qt version it was linked with
+if(HAVE_QT5 AND ${VTK_VERSION} VERSION_EQUAL "6.0.0" AND NOT DEFINED FORCE_VTK)
+ message(STATUS "VTK support is disabled. Possible incompatible combination: OpenCV+Qt5, and VTK ver.${VTK_VERSION} with Qt4")
+ message(STATUS "If it is known that VTK was compiled without Qt4, please define '-DFORCE_VTK=TRUE' flag in CMake")
+ return()
+endif()
+
+# Different Qt versions can't be linked together
+if(HAVE_QT AND ${VTK_VERSION} VERSION_GREATER "6.0.0" AND NOT ${VTK_QT_VERSION} STREQUAL "")
+ if(HAVE_QT5 AND ${VTK_QT_VERSION} EQUAL "4")
+ message(STATUS "VTK support is disabled. Incompatible combination: OpenCV + Qt5 and VTK ver.${VTK_VERSION} + Qt4")
+ return()
+ endif()
+
+ if(NOT HAVE_QT5 AND ${VTK_QT_VERSION} EQUAL "5")
+ message(STATUS "VTK support is disabled. Incompatible combination: OpenCV + Qt4 and VTK ver.${VTK_VERSION} + Qt5")
+ return()
+ endif()
+endif()
+
+set(HAVE_VTK ON)
+message(STATUS "Found VTK ver. ${VTK_VERSION} (usefile: ${VTK_USE_FILE})")
diff --git a/cmake/OpenCVExtraTargets.cmake b/cmake/OpenCVExtraTargets.cmake
index b4d339155..ecb2a3b36 100644
--- a/cmake/OpenCVExtraTargets.cmake
+++ b/cmake/OpenCVExtraTargets.cmake
@@ -4,7 +4,7 @@
CONFIGURE_FILE(
"${OpenCV_SOURCE_DIR}/cmake/templates/cmake_uninstall.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
- IMMEDIATE @ONLY)
+ @ONLY)
ADD_CUSTOM_TARGET(uninstall "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake")
if(ENABLE_SOLUTION_FOLDERS)
diff --git a/cmake/OpenCVFindIPP.cmake b/cmake/OpenCVFindIPP.cmake
index 772cae886..db02e6acb 100644
--- a/cmake/OpenCVFindIPP.cmake
+++ b/cmake/OpenCVFindIPP.cmake
@@ -163,9 +163,16 @@ function(set_ipp_new_libraries _LATEST_VERSION)
${IPP_LIB_PREFIX}${IPP_PREFIX}${IPPCV}${IPP_SUFFIX}${IPP_LIB_SUFFIX}
${IPP_LIB_PREFIX}${IPP_PREFIX}${IPPI}${IPP_SUFFIX}${IPP_LIB_SUFFIX}
${IPP_LIB_PREFIX}${IPP_PREFIX}${IPPS}${IPP_SUFFIX}${IPP_LIB_SUFFIX}
- ${IPP_LIB_PREFIX}${IPP_PREFIX}${IPPCORE}${IPP_SUFFIX}${IPP_LIB_SUFFIX}
- PARENT_SCOPE)
+ ${IPP_LIB_PREFIX}${IPP_PREFIX}${IPPCORE}${IPP_SUFFIX}${IPP_LIB_SUFFIX})
+ if (UNIX)
+ set(IPP_LIBRARIES
+ ${IPP_LIBRARIES}
+ ${IPP_LIB_PREFIX}irc${CMAKE_SHARED_LIBRARY_SUFFIX}
+ ${IPP_LIB_PREFIX}imf${CMAKE_SHARED_LIBRARY_SUFFIX}
+ ${IPP_LIB_PREFIX}svml${CMAKE_SHARED_LIBRARY_SUFFIX})
+ endif()
+ set(IPP_LIBRARIES ${IPP_LIBRARIES} PARENT_SCOPE)
return()
endfunction()
@@ -208,19 +215,39 @@ function(set_ipp_variables _LATEST_VERSION)
set(IPP_INCLUDE_DIRS ${IPP_ROOT_DIR}/include PARENT_SCOPE)
if (APPLE)
- set(IPP_LIBRARY_DIRS ${IPP_ROOT_DIR}/lib PARENT_SCOPE)
+ set(IPP_LIBRARY_DIRS ${IPP_ROOT_DIR}/lib)
elseif (IPP_X64)
if(NOT EXISTS ${IPP_ROOT_DIR}/lib/intel64)
message(SEND_ERROR "IPP EM64T libraries not found")
endif()
- set(IPP_LIBRARY_DIRS ${IPP_ROOT_DIR}/lib/intel64 PARENT_SCOPE)
+ set(IPP_LIBRARY_DIRS ${IPP_ROOT_DIR}/lib/intel64)
else()
if(NOT EXISTS ${IPP_ROOT_DIR}/lib/ia32)
message(SEND_ERROR "IPP IA32 libraries not found")
endif()
- set(IPP_LIBRARY_DIRS ${IPP_ROOT_DIR}/lib/ia32 PARENT_SCOPE)
+ set(IPP_LIBRARY_DIRS ${IPP_ROOT_DIR}/lib/ia32)
endif()
+ if (UNIX)
+ get_filename_component(INTEL_COMPILER_LIBRARY_DIR ${IPP_ROOT_DIR}/../lib REALPATH)
+ if (IPP_X64)
+ if(NOT EXISTS ${INTEL_COMPILER_LIBRARY_DIR}/intel64)
+ message(SEND_ERROR "Intel compiler EM64T libraries not found")
+ endif()
+ set(IPP_LIBRARY_DIRS
+ ${IPP_LIBRARY_DIRS}
+ ${INTEL_COMPILER_LIBRARY_DIR}/intel64)
+ else()
+ if(NOT EXISTS ${INTEL_COMPILER_LIBRARY_DIR}/ia32)
+ message(SEND_ERROR "Intel compiler IA32 libraries not found")
+ endif()
+ set(IPP_LIBRARY_DIRS
+ ${IPP_LIBRARY_DIRS}
+ ${INTEL_COMPILER_LIBRARY_DIR}/ia32)
+ endif()
+ endif()
+ set(IPP_LIBRARY_DIRS ${IPP_LIBRARY_DIRS} PARENT_SCOPE)
+
# set IPP_LIBRARIES variable (7.x or 8.x lib names)
set_ipp_new_libraries(${_LATEST_VERSION})
set(IPP_LIBRARIES ${IPP_LIBRARIES} PARENT_SCOPE)
diff --git a/cmake/OpenCVGenAndroidMK.cmake b/cmake/OpenCVGenAndroidMK.cmake
index 8792d1b48..2622d2aae 100644
--- a/cmake/OpenCVGenAndroidMK.cmake
+++ b/cmake/OpenCVGenAndroidMK.cmake
@@ -56,8 +56,29 @@ if(ANDROID)
# remove CUDA runtime and NPP from regular deps
# it can be added separately if needed.
- ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "libcu")
- ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "libnpp")
+ ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cusparse")
+ ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cufft")
+ ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cublas")
+ ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "npp")
+ ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cudart")
+
+ if(HAVE_CUDA)
+ # CUDA runtime libraries and are required always
+ set(culibs ${CUDA_LIBRARIES})
+
+ # right now NPP is requared always too
+ list(INSERT culibs 0 ${CUDA_npp_LIBRARY})
+
+ if(HAVE_CUFFT)
+ list(INSERT culibs 0 ${CUDA_cufft_LIBRARY})
+ endif()
+
+ if(HAVE_CUBLAS)
+ list(INSERT culibs 0 ${CUDA_cublas_LIBRARY})
+ endif()
+ endif()
+
+ ocv_convert_to_lib_name(CUDA_RUNTIME_LIBS_CONFIGMAKE ${culibs})
# split 3rdparty libs and modules
foreach(mod ${OPENCV_MODULES_CONFIGMAKE})
@@ -69,8 +90,14 @@ if(ANDROID)
list(REMOVE_ITEM OPENCV_MODULES_CONFIGMAKE ${OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE})
endif()
+ if(ENABLE_DYNAMIC_CUDA)
+ set(OPENCV_DYNAMICUDA_MODULE_CONFIGMAKE "dynamicuda")
+ endif()
+
# GPU module enabled separately
- list(REMOVE_ITEM OPENCV_MODULES_CONFIGMAKE "gpu")
+ list(REMOVE_ITEM OPENCV_MODULES_CONFIGMAKE "opencv_gpu")
+ list(REMOVE_ITEM OPENCV_MODULES_CONFIGMAKE "opencv_dynamicuda")
+
if(HAVE_opencv_gpu)
set(OPENCV_HAVE_GPU_MODULE_CONFIGMAKE "on")
endif()
@@ -82,6 +109,7 @@ if(ANDROID)
string(REPLACE ";" " " ${lst} "${${lst}}")
endforeach()
string(REPLACE "opencv_" "" OPENCV_MODULES_CONFIGMAKE "${OPENCV_MODULES_CONFIGMAKE}")
+ string(REPLACE ";" " " CUDA_RUNTIME_LIBS_CONFIGMAKE "${CUDA_RUNTIME_LIBS_CONFIGMAKE}")
# prepare 3rd-party component list without TBB for armeabi and mips platforms. TBB is useless there.
set(OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE_NO_TBB ${OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE})
@@ -103,7 +131,7 @@ if(ANDROID)
set(OPENCV_LIBS_DIR_CONFIGCMAKE "\$(OPENCV_THIS_DIR)/lib/\$(OPENCV_TARGET_ARCH_ABI)")
set(OPENCV_3RDPARTY_LIBS_DIR_CONFIGCMAKE "\$(OPENCV_THIS_DIR)/3rdparty/lib/\$(OPENCV_TARGET_ARCH_ABI)")
- configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCV.mk.in" "${CMAKE_BINARY_DIR}/OpenCV.mk" IMMEDIATE @ONLY)
+ configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCV.mk.in" "${CMAKE_BINARY_DIR}/OpenCV.mk" @ONLY)
# -------------------------------------------------------------------------------------------
# Part 2/2: ${BIN_DIR}/unix-install/OpenCV.mk -> For use with "make install"
@@ -113,6 +141,6 @@ if(ANDROID)
set(OPENCV_LIBS_DIR_CONFIGCMAKE "\$(OPENCV_THIS_DIR)/../libs/\$(OPENCV_TARGET_ARCH_ABI)")
set(OPENCV_3RDPARTY_LIBS_DIR_CONFIGCMAKE "\$(OPENCV_THIS_DIR)/../3rdparty/libs/\$(OPENCV_TARGET_ARCH_ABI)")
- configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCV.mk.in" "${CMAKE_BINARY_DIR}/unix-install/OpenCV.mk" IMMEDIATE @ONLY)
- install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCV.mk DESTINATION ${OPENCV_CONFIG_INSTALL_PATH})
+ configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCV.mk.in" "${CMAKE_BINARY_DIR}/unix-install/OpenCV.mk" @ONLY)
+ install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCV.mk DESTINATION ${OPENCV_CONFIG_INSTALL_PATH} COMPONENT dev)
endif(ANDROID)
diff --git a/cmake/OpenCVGenConfig.cmake b/cmake/OpenCVGenConfig.cmake
index 411d22582..cdf418ec8 100644
--- a/cmake/OpenCVGenConfig.cmake
+++ b/cmake/OpenCVGenConfig.cmake
@@ -83,9 +83,9 @@ endif()
export(TARGETS ${OpenCVModules_TARGETS} FILE "${CMAKE_BINARY_DIR}/OpenCVModules${modules_file_suffix}.cmake")
-configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig.cmake.in" "${CMAKE_BINARY_DIR}/OpenCVConfig.cmake" IMMEDIATE @ONLY)
+configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig.cmake.in" "${CMAKE_BINARY_DIR}/OpenCVConfig.cmake" @ONLY)
#support for version checking when finding opencv. find_package(OpenCV 2.3.1 EXACT) should now work.
-configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig-version.cmake.in" "${CMAKE_BINARY_DIR}/OpenCVConfig-version.cmake" IMMEDIATE @ONLY)
+configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig-version.cmake.in" "${CMAKE_BINARY_DIR}/OpenCVConfig-version.cmake" @ONLY)
# --------------------------------------------------------------------------------------------
# Part 2/3: ${BIN_DIR}/unix-install/OpenCVConfig.cmake -> For use *with* "make install"
@@ -98,8 +98,8 @@ if(INSTALL_TO_MANGLED_PATHS)
set(OpenCV_3RDPARTY_LIB_DIRS_CONFIGCMAKE "\"\${OpenCV_INSTALL_PATH}/${OpenCV_3RDPARTY_LIB_DIRS_CONFIGCMAKE}\"")
endif()
-configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig.cmake.in" "${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig.cmake" IMMEDIATE @ONLY)
-configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig-version.cmake.in" "${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig-version.cmake" IMMEDIATE @ONLY)
+configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig.cmake.in" "${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig.cmake" @ONLY)
+configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig-version.cmake.in" "${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig-version.cmake" @ONLY)
if(UNIX) # ANDROID configuration is created here also
#http://www.vtk.org/Wiki/CMake/Tutorials/Packaging reference
@@ -109,18 +109,18 @@ if(UNIX) # ANDROID configuration is created here also
# /(share|lib)/*/ (U)
# /(share|lib)/*/(cmake|CMake)/ (U)
if(INSTALL_TO_MANGLED_PATHS)
- install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig.cmake DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}-${OPENCV_VERSION}/)
- install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig-version.cmake DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}-${OPENCV_VERSION}/)
- install(EXPORT OpenCVModules DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}-${OPENCV_VERSION}/ FILE OpenCVModules${modules_file_suffix}.cmake)
+ install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig.cmake DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}-${OPENCV_VERSION}/ COMPONENT dev)
+ install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig-version.cmake DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}-${OPENCV_VERSION}/ COMPONENT dev)
+ install(EXPORT OpenCVModules DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}-${OPENCV_VERSION}/ FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
else()
- install(FILES "${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig.cmake" DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/)
- install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig-version.cmake DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/)
- install(EXPORT OpenCVModules DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/ FILE OpenCVModules${modules_file_suffix}.cmake)
+ install(FILES "${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig.cmake" DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/ COMPONENT dev)
+ install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig-version.cmake DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/ COMPONENT dev)
+ install(EXPORT OpenCVModules DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/ FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
endif()
endif()
if(ANDROID)
- install(FILES "${OpenCV_SOURCE_DIR}/platforms/android/android.toolchain.cmake" DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/)
+ install(FILES "${OpenCV_SOURCE_DIR}/platforms/android/android.toolchain.cmake" DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/ COMPONENT dev)
endif()
# --------------------------------------------------------------------------------------------
@@ -131,15 +131,15 @@ if(WIN32)
set(OpenCV2_INCLUDE_DIRS_CONFIGCMAKE "\"\"")
exec_program(mkdir ARGS "-p \"${CMAKE_BINARY_DIR}/win-install/\"" OUTPUT_VARIABLE RET_VAL)
- configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig.cmake.in" "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" IMMEDIATE @ONLY)
- configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig-version.cmake.in" "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig-version.cmake" IMMEDIATE @ONLY)
+ configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig.cmake.in" "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" @ONLY)
+ configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig-version.cmake.in" "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig-version.cmake" @ONLY)
if(BUILD_SHARED_LIBS)
- install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}lib")
- install(EXPORT OpenCVModules DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}lib" FILE OpenCVModules${modules_file_suffix}.cmake)
+ install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}lib" COMPONENT dev)
+ install(EXPORT OpenCVModules DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}lib" FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
else()
- install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib")
- install(EXPORT OpenCVModules DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib" FILE OpenCVModules${modules_file_suffix}.cmake)
+ install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib" COMPONENT dev)
+ install(EXPORT OpenCVModules DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib" FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
endif()
- install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig-version.cmake" DESTINATION "${CMAKE_INSTALL_PREFIX}")
- install(FILES "${OpenCV_SOURCE_DIR}/cmake/OpenCVConfig.cmake" DESTINATION "${CMAKE_INSTALL_PREFIX}/")
+ install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig-version.cmake" DESTINATION "${CMAKE_INSTALL_PREFIX}" COMPONENT dev)
+ install(FILES "${OpenCV_SOURCE_DIR}/cmake/OpenCVConfig.cmake" DESTINATION "${CMAKE_INSTALL_PREFIX}/" COMPONENT dev)
endif()
diff --git a/cmake/OpenCVGenHeaders.cmake b/cmake/OpenCVGenHeaders.cmake
index 35da0fb4b..c892a929c 100644
--- a/cmake/OpenCVGenHeaders.cmake
+++ b/cmake/OpenCVGenHeaders.cmake
@@ -23,4 +23,4 @@ set(OPENCV_MODULE_DEFINITIONS_CONFIGMAKE "${OPENCV_MODULE_DEFINITIONS_CONFIGMAKE
#endforeach()
configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/opencv_modules.hpp.in" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp")
-install(FILES "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp" DESTINATION ${OPENCV_INCLUDE_INSTALL_PATH}/opencv2 COMPONENT main)
+install(FILES "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp" DESTINATION ${OPENCV_INCLUDE_INSTALL_PATH}/opencv2 COMPONENT dev)
diff --git a/cmake/OpenCVGenPkgconfig.cmake b/cmake/OpenCVGenPkgconfig.cmake
index cd54f11bf..fa57db9d3 100644
--- a/cmake/OpenCVGenPkgconfig.cmake
+++ b/cmake/OpenCVGenPkgconfig.cmake
@@ -78,8 +78,8 @@ else()
endif()
configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/opencv-XXX.pc.in"
"${CMAKE_BINARY_DIR}/unix-install/${OPENCV_PC_FILE_NAME}"
- @ONLY IMMEDIATE)
+ @ONLY)
if(UNIX AND NOT ANDROID)
- install(FILES ${CMAKE_BINARY_DIR}/unix-install/${OPENCV_PC_FILE_NAME} DESTINATION ${OPENCV_LIB_INSTALL_PATH}/pkgconfig)
+ install(FILES ${CMAKE_BINARY_DIR}/unix-install/${OPENCV_PC_FILE_NAME} DESTINATION ${OPENCV_LIB_INSTALL_PATH}/pkgconfig COMPONENT dev)
endif()
diff --git a/cmake/OpenCVModule.cmake b/cmake/OpenCVModule.cmake
index 3dd749b05..79e508609 100644
--- a/cmake/OpenCVModule.cmake
+++ b/cmake/OpenCVModule.cmake
@@ -27,7 +27,8 @@
# The verbose template for OpenCV module:
#
# ocv_add_module(modname )
-# ocv_glob_module_sources() or glob them manually and ocv_set_module_sources(...)
+# ocv_glob_module_sources(([EXCLUDE_CUDA] )
+# or glob them manually and ocv_set_module_sources(...)
# ocv_module_include_directories()
# ocv_create_module()
#
@@ -135,13 +136,13 @@ macro(ocv_add_module _name)
# parse list of dependencies
if("${ARGV1}" STREQUAL "INTERNAL" OR "${ARGV1}" STREQUAL "BINDINGS")
- set(OPENCV_MODULE_${the_module}_CLASS "${ARGV1}" CACHE INTERNAL "The cathegory of the module")
+ set(OPENCV_MODULE_${the_module}_CLASS "${ARGV1}" CACHE INTERNAL "The category of the module")
set(__ocv_argn__ ${ARGN})
list(REMOVE_AT __ocv_argn__ 0)
ocv_add_dependencies(${the_module} ${__ocv_argn__})
unset(__ocv_argn__)
else()
- set(OPENCV_MODULE_${the_module}_CLASS "PUBLIC" CACHE INTERNAL "The cathegory of the module")
+ set(OPENCV_MODULE_${the_module}_CLASS "PUBLIC" CACHE INTERNAL "The category of the module")
ocv_add_dependencies(${the_module} ${ARGN})
if(BUILD_${the_module})
set(OPENCV_MODULES_PUBLIC ${OPENCV_MODULES_PUBLIC} "${the_module}" CACHE INTERNAL "List of OpenCV modules marked for export")
@@ -478,22 +479,38 @@ endmacro()
# finds and sets headers and sources for the standard OpenCV module
# Usage:
-# ocv_glob_module_sources()
+# ocv_glob_module_sources([EXCLUDE_CUDA] )
macro(ocv_glob_module_sources)
+ set(_argn ${ARGN})
+ list(FIND _argn "EXCLUDE_CUDA" exclude_cuda)
+ if(NOT exclude_cuda EQUAL -1)
+ list(REMOVE_AT _argn ${exclude_cuda})
+ endif()
+
file(GLOB_RECURSE lib_srcs "src/*.cpp")
file(GLOB_RECURSE lib_int_hdrs "src/*.hpp" "src/*.h")
file(GLOB lib_hdrs "include/opencv2/${name}/*.hpp" "include/opencv2/${name}/*.h")
file(GLOB lib_hdrs_detail "include/opencv2/${name}/detail/*.hpp" "include/opencv2/${name}/detail/*.h")
+ file(GLOB_RECURSE lib_srcs_apple "src/*.mm")
+ if (APPLE)
+ list(APPEND lib_srcs ${lib_srcs_apple})
+ endif()
- file(GLOB lib_cuda_srcs "src/cuda/*.cu")
- set(cuda_objs "")
- set(lib_cuda_hdrs "")
- if(HAVE_CUDA)
- ocv_include_directories(${CUDA_INCLUDE_DIRS})
- file(GLOB lib_cuda_hdrs "src/cuda/*.hpp")
+ if (exclude_cuda EQUAL -1)
+ file(GLOB lib_cuda_srcs "src/cuda/*.cu")
+ set(cuda_objs "")
+ set(lib_cuda_hdrs "")
+ if(HAVE_CUDA)
+ ocv_include_directories(${CUDA_INCLUDE_DIRS})
+ file(GLOB lib_cuda_hdrs "src/cuda/*.hpp")
- ocv_cuda_compile(cuda_objs ${lib_cuda_srcs} ${lib_cuda_hdrs})
- source_group("Src\\Cuda" FILES ${lib_cuda_srcs} ${lib_cuda_hdrs})
+ ocv_cuda_compile(cuda_objs ${lib_cuda_srcs} ${lib_cuda_hdrs})
+ source_group("Src\\Cuda" FILES ${lib_cuda_srcs} ${lib_cuda_hdrs})
+ endif()
+ else()
+ set(cuda_objs "")
+ set(lib_cuda_srcs "")
+ set(lib_cuda_hdrs "")
endif()
source_group("Src" FILES ${lib_srcs} ${lib_int_hdrs})
@@ -512,8 +529,8 @@ macro(ocv_glob_module_sources)
source_group("Include" FILES ${lib_hdrs})
source_group("Include\\detail" FILES ${lib_hdrs_detail})
- ocv_set_module_sources(${ARGN} HEADERS ${lib_hdrs} ${lib_hdrs_detail}
- SOURCES ${lib_srcs} ${lib_int_hdrs} ${cuda_objs} ${lib_cuda_srcs} ${lib_cuda_hdrs})
+ ocv_set_module_sources(${_argn} HEADERS ${lib_hdrs} ${lib_hdrs_detail}
+ SOURCES ${lib_srcs} ${lib_int_hdrs} ${cuda_objs} ${lib_cuda_srcs} ${lib_cuda_hdrs})
endmacro()
# creates OpenCV module in current folder
@@ -577,9 +594,9 @@ macro(ocv_create_module)
endif()
ocv_install_target(${the_module} EXPORT OpenCVModules
- RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT main
- LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT main
- ARCHIVE DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT main
+ RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs
+ LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT libs
+ ARCHIVE DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT dev
)
# only "public" headers need to be installed
@@ -587,7 +604,7 @@ macro(ocv_create_module)
foreach(hdr ${OPENCV_MODULE_${the_module}_HEADERS})
string(REGEX REPLACE "^.*opencv2/" "opencv2/" hdr2 "${hdr}")
if(hdr2 MATCHES "^(opencv2/.*)/[^/]+.h(..)?$")
- install(FILES ${hdr} DESTINATION "${OPENCV_INCLUDE_INSTALL_PATH}/${CMAKE_MATCH_1}" COMPONENT main)
+ install(FILES ${hdr} DESTINATION "${OPENCV_INCLUDE_INSTALL_PATH}/${CMAKE_MATCH_1}" COMPONENT dev)
endif()
endforeach()
endif()
@@ -612,11 +629,20 @@ endmacro()
# short command for adding simple OpenCV module
# see ocv_add_module for argument details
# Usage:
-# ocv_define_module(module_name [INTERNAL] [REQUIRED] [] [OPTIONAL ])
+# ocv_define_module(module_name [INTERNAL] [EXCLUDE_CUDA] [REQUIRED] [] [OPTIONAL ])
macro(ocv_define_module module_name)
- ocv_add_module(${module_name} ${ARGN})
+ set(_argn ${ARGN})
+ set(exclude_cuda "")
+ foreach(arg ${_argn})
+ if("${arg}" STREQUAL "EXCLUDE_CUDA")
+ set(exclude_cuda "${arg}")
+ list(REMOVE_ITEM _argn ${arg})
+ endif()
+ endforeach()
+
+ ocv_add_module(${module_name} ${_argn})
ocv_module_include_directories()
- ocv_glob_module_sources()
+ ocv_glob_module_sources(${exclude_cuda})
ocv_create_module()
ocv_add_precompiled_headers(${the_module})
@@ -711,6 +737,9 @@ function(ocv_add_perf_tests)
else(OCV_DEPENDENCIES_FOUND)
# TODO: warn about unsatisfied dependencies
endif(OCV_DEPENDENCIES_FOUND)
+ if(INSTALL_TESTS)
+ install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_TEST_INSTALL_PATH} COMPONENT tests)
+ endif()
endif()
endfunction()
@@ -741,8 +770,8 @@ function(ocv_add_accuracy_tests)
endif()
get_native_precompiled_header(${the_target} test_precomp.hpp)
-
add_executable(${the_target} ${OPENCV_TEST_${the_module}_SOURCES} ${${the_target}_pch})
+
target_link_libraries(${the_target} ${OPENCV_MODULE_${the_module}_DEPS} ${test_deps} ${OPENCV_LINKER_LIBS})
add_dependencies(opencv_tests ${the_target})
@@ -764,6 +793,10 @@ function(ocv_add_accuracy_tests)
else(OCV_DEPENDENCIES_FOUND)
# TODO: warn about unsatisfied dependencies
endif(OCV_DEPENDENCIES_FOUND)
+
+ if(INSTALL_TESTS)
+ install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_TEST_INSTALL_PATH} COMPONENT tests)
+ endif()
endif()
endfunction()
@@ -795,7 +828,7 @@ function(ocv_add_samples)
endif()
if(WIN32)
- install(TARGETS ${the_target} RUNTIME DESTINATION "samples/${module_id}" COMPONENT main)
+ install(TARGETS ${the_target} RUNTIME DESTINATION "samples/${module_id}" COMPONENT samples)
endif()
endforeach()
endif()
@@ -804,8 +837,8 @@ function(ocv_add_samples)
if(INSTALL_C_EXAMPLES AND NOT WIN32 AND EXISTS "${samples_path}")
file(GLOB sample_files "${samples_path}/*")
install(FILES ${sample_files}
- DESTINATION share/OpenCV/samples/${module_id}
- PERMISSIONS OWNER_READ GROUP_READ WORLD_READ)
+ DESTINATION ${OPENCV_SAMPLES_SRC_INSTALL_PATH}/${module_id}
+ PERMISSIONS OWNER_READ GROUP_READ WORLD_READ COMPONENT samples)
endif()
endfunction()
diff --git a/cmake/OpenCVPackaging.cmake b/cmake/OpenCVPackaging.cmake
new file mode 100644
index 000000000..91f594096
--- /dev/null
+++ b/cmake/OpenCVPackaging.cmake
@@ -0,0 +1,110 @@
+if(EXISTS "${CMAKE_ROOT}/Modules/CPack.cmake")
+set(CPACK_set_DESTDIR "on")
+
+if(NOT OPENCV_CUSTOM_PACKAGE_INFO)
+ set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Open Computer Vision Library")
+ set(CPACK_PACKAGE_DESCRIPTION
+"OpenCV (Open Source Computer Vision Library) is an open source computer vision
+and machine learning software library. OpenCV was built to provide a common
+infrastructure for computer vision applications and to accelerate the use of
+machine perception in the commercial products. Being a BSD-licensed product,
+OpenCV makes it easy for businesses to utilize and modify the code.")
+ set(CPACK_PACKAGE_VENDOR "OpenCV Foundation")
+ set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
+ set(CPACK_PACKAGE_CONTACT "admin@opencv.org")
+ set(CPACK_PACKAGE_VERSION_MAJOR "${OPENCV_VERSION_MAJOR}")
+ set(CPACK_PACKAGE_VERSION_MINOR "${OPENCV_VERSION_MINOR}")
+ set(CPACK_PACKAGE_VERSION_PATCH "${OPENCV_VERSION_PATCH}")
+ set(CPACK_PACKAGE_VERSION "${OPENCV_VCSVERSION}")
+endif(NOT OPENCV_CUSTOM_PACKAGE_INFO)
+
+#arch
+if(X86)
+ set(CPACK_DEBIAN_ARCHITECTURE "i386")
+ set(CPACK_RPM_PACKAGE_ARCHITECTURE "i686")
+elseif(X86_64)
+ set(CPACK_DEBIAN_ARCHITECTURE "amd64")
+ set(CPACK_RPM_PACKAGE_ARCHITECTURE "x86_64")
+elseif(ARM)
+ set(CPACK_DEBIAN_ARCHITECTURE "armhf")
+ set(CPACK_RPM_PACKAGE_ARCHITECTURE "armhf")
+else()
+ set(CPACK_DEBIAN_ARCHITECTURE ${CMAKE_SYSTEM_PROCESSOR})
+ set(CPACK_RPM_PACKAGE_ARCHITECTURE ${CMAKE_SYSTEM_PROCESSOR})
+endif()
+
+if(CPACK_GENERATOR STREQUAL "DEB")
+ set(OPENCV_PACKAGE_ARCH_SUFFIX ${CPACK_DEBIAN_ARCHITECTURE})
+elseif(CPACK_GENERATOR STREQUAL "RPM")
+ set(OPENCV_PACKAGE_ARCH_SUFFIX ${CPACK_RPM_PACKAGE_ARCHITECTURE})
+else()
+ set(OPENCV_PACKAGE_ARCH_SUFFIX ${CMAKE_SYSTEM_PROCESSOR})
+endif()
+
+set(CPACK_PACKAGE_FILE_NAME "${CMAKE_PROJECT_NAME}-${OPENCV_VCSVERSION}-${OPENCV_PACKAGE_ARCH_SUFFIX}")
+set(CPACK_SOURCE_PACKAGE_FILE_NAME "${CMAKE_PROJECT_NAME}-${OPENCV_VCSVERSION}-${OPENCV_PACKAGE_ARCH_SUFFIX}")
+
+#rpm options
+set(CPACK_RPM_COMPONENT_INSTALL TRUE)
+set(CPACK_RPM_PACKAGE_SUMMARY ${CPACK_PACKAGE_DESCRIPTION_SUMMARY})
+set(CPACK_RPM_PACKAGE_DESCRIPTION ${CPACK_PACKAGE_DESCRIPTION})
+set(CPACK_RPM_PACKAGE_URL "http://opencv.org")
+set(CPACK_RPM_PACKAGE_LICENSE "BSD")
+
+#deb options
+set(CPACK_DEB_COMPONENT_INSTALL TRUE)
+set(CPACK_DEBIAN_PACKAGE_PRIORITY "optional")
+set(CPACK_DEBIAN_PACKAGE_SECTION "libs")
+set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "http://opencv.org")
+
+#depencencies
+set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS TRUE)
+set(CPACK_COMPONENT_samples_DEPENDS libs)
+set(CPACK_COMPONENT_dev_DEPENDS libs)
+set(CPACK_COMPONENT_docs_DEPENDS libs)
+set(CPACK_COMPONENT_java_DEPENDS libs)
+set(CPACK_COMPONENT_python_DEPENDS libs)
+set(CPACK_COMPONENT_tests_DEPENDS libs)
+
+if(HAVE_CUDA)
+ string(REPLACE "." "-" cuda_version_suffix ${CUDA_VERSION})
+ set(CPACK_DEB_libs_PACKAGE_DEPENDS "cuda-core-libs-${cuda_version_suffix}, cuda-extra-libs-${cuda_version_suffix}")
+ set(CPACK_COMPONENT_dev_DEPENDS libs)
+ set(CPACK_DEB_dev_PACKAGE_DEPENDS "cuda-headers-${cuda_version_suffix}")
+endif()
+
+if(NOT OPENCV_CUSTOM_PACKAGE_INFO)
+ set(CPACK_COMPONENT_libs_DISPLAY_NAME "lib${CMAKE_PROJECT_NAME}")
+ set(CPACK_COMPONENT_libs_DESCRIPTION "Open Computer Vision Library")
+
+ set(CPACK_COMPONENT_python_DISPLAY_NAME "lib${CMAKE_PROJECT_NAME}-python")
+ set(CPACK_COMPONENT_python_DESCRIPTION "Python bindings for Open Source Computer Vision Library")
+
+ set(CPACK_COMPONENT_java_DISPLAY_NAME "lib${CMAKE_PROJECT_NAME}-java")
+ set(CPACK_COMPONENT_java_DESCRIPTION "Java bindings for Open Source Computer Vision Library")
+
+ set(CPACK_COMPONENT_dev_DISPLAY_NAME "lib${CMAKE_PROJECT_NAME}-dev")
+ set(CPACK_COMPONENT_dev_DESCRIPTION "Development files for Open Source Computer Vision Library")
+
+ set(CPACK_COMPONENT_docs_DISPLAY_NAME "lib${CMAKE_PROJECT_NAME}-docs")
+ set(CPACK_COMPONENT_docs_DESCRIPTION "Documentation for Open Source Computer Vision Library")
+
+ set(CPACK_COMPONENT_samples_DISPLAY_NAME "lib${CMAKE_PROJECT_NAME}-samples")
+ set(CPACK_COMPONENT_samples_DESCRIPTION "Samples for Open Source Computer Vision Library")
+
+ set(CPACK_COMPONENT_tests_DISPLAY_NAME "lib${CMAKE_PROJECT_NAME}-tests")
+ set(CPACK_COMPONENT_tests_DESCRIPTION "Accuracy and performance tests for Open Source Computer Vision Library")
+endif(NOT OPENCV_CUSTOM_PACKAGE_INFO)
+
+if(NOT OPENCV_CUSTOM_PACKAGE_LAYOUT)
+ set(CPACK_libs_COMPONENT_INSTALL TRUE)
+ set(CPACK_dev_COMPONENT_INSTALL TRUE)
+ set(CPACK_docs_COMPONENT_INSTALL TRUE)
+ set(CPACK_python_COMPONENT_INSTALL TRUE)
+ set(CPACK_java_COMPONENT_INSTALL TRUE)
+ set(CPACK_samples_COMPONENT_INSTALL TRUE)
+endif(NOT OPENCV_CUSTOM_PACKAGE_LAYOUT)
+
+include(CPack)
+
+ENDif(EXISTS "${CMAKE_ROOT}/Modules/CPack.cmake")
\ No newline at end of file
diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake
index 13461e82c..9fa94bb8b 100644
--- a/cmake/OpenCVUtils.cmake
+++ b/cmake/OpenCVUtils.cmake
@@ -448,6 +448,20 @@ macro(ocv_convert_to_full_paths VAR)
endmacro()
+# convert list of paths to libraries names without lib prefix
+macro(ocv_convert_to_lib_name var)
+ set(__tmp "")
+ foreach(path ${ARGN})
+ get_filename_component(__tmp_name "${path}" NAME_WE)
+ string(REGEX REPLACE "^lib" "" __tmp_name ${__tmp_name})
+ list(APPEND __tmp "${__tmp_name}")
+ endforeach()
+ set(${var} ${__tmp})
+ unset(__tmp)
+ unset(__tmp_name)
+endmacro()
+
+
# add install command
function(ocv_install_target)
install(TARGETS ${ARGN})
diff --git a/cmake/templates/OpenCV.mk.in b/cmake/templates/OpenCV.mk.in
index 0fd7b9e05..77229ecf5 100644
--- a/cmake/templates/OpenCV.mk.in
+++ b/cmake/templates/OpenCV.mk.in
@@ -2,6 +2,13 @@
# you might need to define NDK_USE_CYGPATH=1 before calling the ndk-build
USER_LOCAL_PATH:=$(LOCAL_PATH)
+
+USER_LOCAL_C_INCLUDES:=$(LOCAL_C_INCLUDES)
+USER_LOCAL_CFLAGS:=$(LOCAL_CFLAGS)
+USER_LOCAL_STATIC_LIBRARIES:=$(LOCAL_STATIC_LIBRARIES)
+USER_LOCAL_SHARED_LIBRARIES:=$(LOCAL_SHARED_LIBRARIES)
+USER_LOCAL_LDLIBS:=$(LOCAL_LDLIBS)
+
LOCAL_PATH:=$(subst ?,,$(firstword ?$(subst \, ,$(subst /, ,$(call my-dir)))))
OPENCV_TARGET_ARCH_ABI:=$(TARGET_ARCH_ABI)
@@ -13,7 +20,7 @@ OPENCV_BASEDIR:=@OPENCV_BASE_INCLUDE_DIR_CONFIGCMAKE@
OPENCV_LOCAL_C_INCLUDES:=@OPENCV_INCLUDE_DIRS_CONFIGCMAKE@
OPENCV_MODULES:=@OPENCV_MODULES_CONFIGMAKE@
-OPENCV_HAVE_GPU_MODULE=@OPENCV_HAVE_GPU_MODULE_CONFIGMAKE@
+OPENCV_HAVE_GPU_MODULE:=@OPENCV_HAVE_GPU_MODULE_CONFIGMAKE@
OPENCV_USE_GPU_MODULE:=
ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
@@ -22,9 +29,12 @@ ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
OPENCV_USE_GPU_MODULE:=on
endif
endif
+ OPENCV_DYNAMICUDA_MODULE:=@OPENCV_DYNAMICUDA_MODULE_CONFIGMAKE@
+else
+ OPENCV_DYNAMICUDA_MODULE:=
endif
-CUDA_RUNTIME_LIBS:=cufft npps nppi nppc cudart
+CUDA_RUNTIME_LIBS:=@CUDA_RUNTIME_LIBS_CONFIGMAKE@
ifeq ($(OPENCV_LIB_TYPE),)
OPENCV_LIB_TYPE:=@OPENCV_LIBTYPE_CONFIGMAKE@
@@ -60,7 +70,7 @@ else
endif
endif
-ifeq (${OPENCV_CAMERA_MODULES},on)
+ifeq ($(OPENCV_CAMERA_MODULES),on)
ifeq ($(TARGET_ARCH_ABI),armeabi)
OPENCV_CAMERA_MODULES:=@OPENCV_CAMERA_LIBS_ARMEABI_CONFIGCMAKE@
endif
@@ -91,6 +101,13 @@ define add_opencv_module
include $(PREBUILT_$(OPENCV_LIB_TYPE)_LIBRARY)
endef
+define add_cuda_module
+ include $(CLEAR_VARS)
+ LOCAL_MODULE:=$1
+ LOCAL_SRC_FILES:=$(CUDA_TOOLKIT_DIR)/targets/armv7-linux-androideabi/lib/lib$1.so
+ include $(PREBUILT_SHARED_LIBRARY)
+endef
+
define add_opencv_3rdparty_component
include $(CLEAR_VARS)
LOCAL_MODULE:=$1
@@ -108,6 +125,17 @@ endef
ifeq ($(OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_ALREADY_INCLUDED),)
ifeq ($(OPENCV_INSTALL_MODULES),on)
$(foreach module,$(OPENCV_LIBS),$(eval $(call add_opencv_module,$(module))))
+ ifneq ($(OPENCV_DYNAMICUDA_MODULE),)
+ ifeq ($(OPENCV_LIB_TYPE),SHARED)
+ $(eval $(call add_opencv_module,$(OPENCV_DYNAMICUDA_MODULE)))
+ endif
+ endif
+ endif
+
+ ifeq ($(OPENCV_USE_GPU_MODULE),on)
+ ifeq ($(INSTALL_CUDA_LIBRARIES),on)
+ $(foreach module,$(CUDA_RUNTIME_LIBS),$(eval $(call add_cuda_module,$(module))))
+ endif
endif
$(foreach module,$(OPENCV_3RDPARTY_COMPONENTS),$(eval $(call add_opencv_3rdparty_component,$(module))))
@@ -124,11 +152,14 @@ ifeq ($(OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_ALREADY_INCLUDED),)
OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_ALREADY_INCLUDED:=on
endif
-ifeq ($(OPENCV_USE_GPU_MODULE),on)
- include $(CLEAR_VARS)
- LOCAL_MODULE:=opencv_gpu
- LOCAL_SRC_FILES:=$(OPENCV_LIBS_DIR)/libopencv_gpu.a
- include $(PREBUILT_STATIC_LIBRARY)
+ifeq ($(OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_GPU_ALREADY_INCLUDED),)
+ ifeq ($(OPENCV_USE_GPU_MODULE),on)
+ include $(CLEAR_VARS)
+ LOCAL_MODULE:=opencv_gpu
+ LOCAL_SRC_FILES:=$(OPENCV_LIBS_DIR)/libopencv_gpu.a
+ include $(PREBUILT_STATIC_LIBRARY)
+ endif
+ OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_GPU_ALREADY_INCLUDED:=on
endif
ifeq ($(OPENCV_LOCAL_CFLAGS),)
@@ -136,6 +167,13 @@ ifeq ($(OPENCV_LOCAL_CFLAGS),)
endif
include $(CLEAR_VARS)
+
+LOCAL_C_INCLUDES:=$(USER_LOCAL_C_INCLUDES)
+LOCAL_CFLAGS:=$(USER_LOCAL_CFLAGS)
+LOCAL_STATIC_LIBRARIES:=$(USER_LOCAL_STATIC_LIBRARIES)
+LOCAL_SHARED_LIBRARIES:=$(USER_LOCAL_SHARED_LIBRARIES)
+LOCAL_LDLIBS:=$(USER_LOCAL_LDLIBS)
+
LOCAL_C_INCLUDES += $(OPENCV_LOCAL_C_INCLUDES)
LOCAL_CFLAGS += $(OPENCV_LOCAL_CFLAGS)
@@ -145,6 +183,11 @@ endif
ifeq ($(OPENCV_INSTALL_MODULES),on)
LOCAL_$(OPENCV_LIB_TYPE)_LIBRARIES += $(foreach mod, $(OPENCV_LIBS), opencv_$(mod))
+ ifeq ($(OPENCV_LIB_TYPE),SHARED)
+ ifneq ($(OPENCV_DYNAMICUDA_MODULE),)
+ LOCAL_$(OPENCV_LIB_TYPE)_LIBRARIES += $(OPENCV_DYNAMICUDA_MODULE)
+ endif
+ endif
else
LOCAL_LDLIBS += -L$(call host-path,$(LOCAL_PATH)/$(OPENCV_LIBS_DIR)) $(foreach lib, $(OPENCV_LIBS), -lopencv_$(lib))
endif
@@ -156,8 +199,12 @@ endif
LOCAL_LDLIBS += $(foreach lib,$(OPENCV_EXTRA_COMPONENTS), -l$(lib))
ifeq ($(OPENCV_USE_GPU_MODULE),on)
+ ifeq ($(INSTALL_CUDA_LIBRARIES),on)
+ LOCAL_SHARED_LIBRARIES += $(foreach mod, $(CUDA_RUNTIME_LIBS), $(mod))
+ else
+ LOCAL_LDLIBS += -L$(CUDA_TOOLKIT_DIR)/targets/armv7-linux-androideabi/lib $(foreach lib, $(CUDA_RUNTIME_LIBS), -l$(lib))
+ endif
LOCAL_STATIC_LIBRARIES+=libopencv_gpu
- LOCAL_LDLIBS += -L$(CUDA_TOOLKIT_DIR)/lib $(foreach lib, $(CUDA_RUNTIME_LIBS), -l$(lib))
endif
#restore the LOCAL_PATH
diff --git a/cmake/templates/OpenCVConfig.cmake.in b/cmake/templates/OpenCVConfig.cmake.in
index 6db61d211..6d1c1a990 100644
--- a/cmake/templates/OpenCVConfig.cmake.in
+++ b/cmake/templates/OpenCVConfig.cmake.in
@@ -18,8 +18,8 @@
# This file will define the following variables:
# - OpenCV_LIBS : The list of all imported targets for OpenCV modules.
# - OpenCV_INCLUDE_DIRS : The OpenCV include directories.
-# - OpenCV_COMPUTE_CAPABILITIES : The version of compute capability
-# - OpenCV_ANDROID_NATIVE_API_LEVEL : Minimum required level of Android API
+# - OpenCV_COMPUTE_CAPABILITIES : The version of compute capability.
+# - OpenCV_ANDROID_NATIVE_API_LEVEL : Minimum required level of Android API.
# - OpenCV_VERSION : The version of this OpenCV build: "@OPENCV_VERSION@"
# - OpenCV_VERSION_MAJOR : Major version part of OpenCV_VERSION: "@OPENCV_VERSION_MAJOR@"
# - OpenCV_VERSION_MINOR : Minor version part of OpenCV_VERSION: "@OPENCV_VERSION_MINOR@"
@@ -27,22 +27,26 @@
# - OpenCV_VERSION_TWEAK : Tweak version part of OpenCV_VERSION: "@OPENCV_VERSION_TWEAK@"
#
# Advanced variables:
-# - OpenCV_SHARED
-# - OpenCV_CONFIG_PATH
-# - OpenCV_INSTALL_PATH (not set on Windows)
-# - OpenCV_LIB_COMPONENTS
-# - OpenCV_USE_MANGLED_PATHS
-# - OpenCV_HAVE_ANDROID_CAMERA
+# - OpenCV_SHARED : Use OpenCV as shared library
+# - OpenCV_CONFIG_PATH : Path to this OpenCVConfig.cmake
+# - OpenCV_INSTALL_PATH : OpenCV location (not set on Windows)
+# - OpenCV_LIB_COMPONENTS : Present OpenCV modules list
+# - OpenCV_USE_MANGLED_PATHS : Mangled OpenCV path flag
+# - OpenCV_MODULES_SUFFIX : The suffix for OpenCVModules-XXX.cmake file
+# - OpenCV_HAVE_ANDROID_CAMERA : Presence of Android native camera wrappers
#
# ===================================================================================
-set(modules_file_suffix "")
-if(ANDROID)
- string(REPLACE - _ modules_file_suffix "_${ANDROID_NDK_ABI_NAME}")
+if(NOT DEFINED OpenCV_MODULES_SUFFIX)
+ if(ANDROID)
+ string(REPLACE - _ OpenCV_MODULES_SUFFIX "_${ANDROID_NDK_ABI_NAME}")
+ else()
+ set(OpenCV_MODULES_SUFFIX "")
+ endif()
endif()
if(NOT TARGET opencv_core)
- include(${CMAKE_CURRENT_LIST_DIR}/OpenCVModules${modules_file_suffix}.cmake)
+ include(${CMAKE_CURRENT_LIST_DIR}/OpenCVModules${OpenCV_MODULES_SUFFIX}.cmake)
endif()
# TODO All things below should be reviewed. What is about of moving this code into related modules (special vars/hooks/files)
@@ -56,7 +60,11 @@ set(OpenCV_USE_CUFFT @HAVE_CUFFT@)
set(OpenCV_USE_NVCUVID @HAVE_NVCUVID@)
# Android API level from which OpenCV has been compiled is remembered
-set(OpenCV_ANDROID_NATIVE_API_LEVEL @OpenCV_ANDROID_NATIVE_API_LEVEL_CONFIGCMAKE@)
+if(ANDROID)
+ set(OpenCV_ANDROID_NATIVE_API_LEVEL @OpenCV_ANDROID_NATIVE_API_LEVEL_CONFIGCMAKE@)
+else()
+ set(OpenCV_ANDROID_NATIVE_API_LEVEL 0)
+endif()
# Some additional settings are required if OpenCV is built as static libs
set(OpenCV_SHARED @BUILD_SHARED_LIBS@)
@@ -67,8 +75,8 @@ set(OpenCV_USE_MANGLED_PATHS @OpenCV_USE_MANGLED_PATHS_CONFIGCMAKE@)
# Extract the directory where *this* file has been installed (determined at cmake run-time)
get_filename_component(OpenCV_CONFIG_PATH "${CMAKE_CURRENT_LIST_FILE}" PATH CACHE)
-if(NOT WIN32 OR OpenCV_ANDROID_NATIVE_API_LEVEL GREATER 0)
- if(OpenCV_ANDROID_NATIVE_API_LEVEL GREATER 0)
+if(NOT WIN32 OR ANDROID)
+ if(ANDROID)
set(OpenCV_INSTALL_PATH "${OpenCV_CONFIG_PATH}/../../..")
else()
set(OpenCV_INSTALL_PATH "${OpenCV_CONFIG_PATH}/../..")
@@ -206,7 +214,7 @@ foreach(__opttype OPT DBG)
SET(OpenCV_EXTRA_LIBS_${__opttype} "")
# CUDA
- if(OpenCV_CUDA_VERSION AND (CMAKE_CROSSCOMPILING OR (WIN32 AND NOT OpenCV_SHARED)))
+ if(OpenCV_CUDA_VERSION)
if(NOT CUDA_FOUND)
find_package(CUDA ${OpenCV_CUDA_VERSION} EXACT REQUIRED)
else()
@@ -215,32 +223,41 @@ foreach(__opttype OPT DBG)
endif()
endif()
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_LIBRARIES})
+ set(OpenCV_CUDA_LIBS_ABSPATH ${CUDA_LIBRARIES})
if(${CUDA_VERSION} VERSION_LESS "5.5")
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_npp_LIBRARY})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_npp_LIBRARY})
else()
find_cuda_helper_libs(nppc)
find_cuda_helper_libs(nppi)
find_cuda_helper_libs(npps)
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY})
endif()
if(OpenCV_USE_CUBLAS)
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_CUBLAS_LIBRARIES})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_CUBLAS_LIBRARIES})
endif()
if(OpenCV_USE_CUFFT)
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_CUFFT_LIBRARIES})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_CUFFT_LIBRARIES})
endif()
if(OpenCV_USE_NVCUVID)
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nvcuvid_LIBRARIES})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nvcuvid_LIBRARIES})
endif()
if(WIN32)
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nvcuvenc_LIBRARIES})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nvcuvenc_LIBRARIES})
endif()
+
+ set(OpenCV_CUDA_LIBS_RELPATH "")
+ foreach(l ${OpenCV_CUDA_LIBS_ABSPATH})
+ get_filename_component(_tmp ${l} PATH)
+ list(APPEND OpenCV_CUDA_LIBS_RELPATH ${_tmp})
+ endforeach()
+
+ list(REMOVE_DUPLICATES OpenCV_CUDA_LIBS_RELPATH)
+ link_directories(${OpenCV_CUDA_LIBS_RELPATH})
endif()
endforeach()
diff --git a/cmake/templates/cvconfig.h.in b/cmake/templates/cvconfig.h.in
index a6cee6368..d1c9e65d3 100644
--- a/cmake/templates/cvconfig.h.in
+++ b/cmake/templates/cvconfig.h.in
@@ -161,6 +161,6 @@
/* Xine video library */
#cmakedefine HAVE_XINE
-/* Define to 1 if your processor stores words with the most significant byte
+/* Define if your processor stores words with the most significant byte
first (like Motorola and SPARC, unlike Intel and VAX). */
#cmakedefine WORDS_BIGENDIAN
diff --git a/cmake/templates/opencv_run_all_tests_android.sh.in b/cmake/templates/opencv_run_all_tests_android.sh.in
new file mode 100644
index 000000000..93373fa96
--- /dev/null
+++ b/cmake/templates/opencv_run_all_tests_android.sh.in
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+BASE_DIR=`dirname $0`
+OPENCV_TEST_PATH=$BASE_DIR/@TEST_PATH@
+OPENCV_TEST_DATA_PATH=$BASE_DIR/sdk/etc/testdata/
+
+if [ $# -ne 1 ]; then
+ echo "Device architecture is not preset in command line"
+ echo "Tests are available for architectures: `ls -m ${OPENCV_TEST_PATH}`"
+ echo "Usage: $0 "
+ return 1
+else
+ TARGET_ARCH=$1
+fi
+
+if [ -z `which adb` ]; then
+ echo "adb command was not found in PATH"
+ return 1
+fi
+
+adb push $OPENCV_TEST_DATA_PATH /sdcard/opencv_testdata
+
+adb shell "mkdir -p /data/local/tmp/opencv_test"
+SUMMARY_STATUS=0
+for t in "$OPENCV_TEST_PATH/$TARGET_ARCH/"opencv_test_* "$OPENCV_TEST_PATH/$TARGET_ARCH/"opencv_perf_*;
+do
+ test_name=`basename "$t"`
+ report="$test_name-`date --rfc-3339=date`.xml"
+ adb push $t /data/local/tmp/opencv_test/
+ adb shell "export OPENCV_TEST_DATA_PATH=/sdcard/opencv_testdata && /data/local/tmp/opencv_test/$test_name --perf_min_samples=1 --perf_force_samples=1 --gtest_output=xml:/data/local/tmp/opencv_test/$report"
+ adb pull "/data/local/tmp/opencv_test/$report" $report
+ TEST_STATUS=0
+ if [ -e $report ]; then
+ if [ `grep -c "- weighttrimming <weight_trimming>
Specifies
-wheter and how much weight trimming should be used. A decent choice is 0.90.
+whether and how much weight trimming should be used. A decent choice is 0.90.
- eqw
diff --git a/doc/tutorials/core/adding_images/adding_images.rst b/doc/tutorials/core/adding_images/adding_images.rst
index e3135693d..601dbc07e 100644
--- a/doc/tutorials/core/adding_images/adding_images.rst
+++ b/doc/tutorials/core/adding_images/adding_images.rst
@@ -6,12 +6,12 @@ Adding (blending) two images using OpenCV
Goal
=====
-In this tutorial you will learn how to:
+In this tutorial you will learn:
.. container:: enumeratevisibleitemswithsquare
- * What is *linear blending* and why it is useful.
- * Add two images using :add_weighted:`addWeighted <>`
+ * what is *linear blending* and why it is useful;
+ * how to add two images using :add_weighted:`addWeighted <>`
Theory
=======
diff --git a/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst b/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst
index 613f4e100..a641435f8 100644
--- a/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst
+++ b/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst
@@ -192,7 +192,7 @@ Explanation
image.convertTo(new_image, -1, alpha, beta);
- where :convert_to:`convertTo <>` would effectively perform *new_image = a*image + beta*. However, we wanted to show you how to access each pixel. In any case, both methods give the same result.
+ where :convert_to:`convertTo <>` would effectively perform *new_image = a*image + beta*. However, we wanted to show you how to access each pixel. In any case, both methods give the same result but convertTo is more optimized and works a lot faster.
Result
=======
diff --git a/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst b/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst
index 42f6a6091..7e1674efa 100644
--- a/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst
+++ b/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst
@@ -31,15 +31,15 @@ Here's a sample code of how to achieve all the stuff enumerated at the goal list
Explanation
===========
-Here we talk only about XML and YAML file inputs. Your output (and its respective input) file may have only one of these extensions and the structure coming from this. They are two kinds of data structures you may serialize: *mappings* (like the STL map) and *element sequence* (like the STL vector>. The difference between these is that in a map every element has a unique name through what you may access it. For sequences you need to go through them to query a specific item.
+Here we talk only about XML and YAML file inputs. Your output (and its respective input) file may have only one of these extensions and the structure coming from this. They are two kinds of data structures you may serialize: *mappings* (like the STL map) and *element sequence* (like the STL vector). The difference between these is that in a map every element has a unique name through what you may access it. For sequences you need to go through them to query a specific item.
-1. **XML\\YAML File Open and Close.** Before you write any content to such file you need to open it and at the end to close it. The XML\YAML data structure in OpenCV is :xmlymlpers:`FileStorage `. To specify that this structure to which file binds on your hard drive you can use either its constructor or the *open()* function of this:
+1. **XML/YAML File Open and Close.** Before you write any content to such file you need to open it and at the end to close it. The XML/YAML data structure in OpenCV is :xmlymlpers:`FileStorage `. To specify that this structure to which file binds on your hard drive you can use either its constructor or the *open()* function of this:
.. code-block:: cpp
string filename = "I.xml";
FileStorage fs(filename, FileStorage::WRITE);
- \\...
+ //...
fs.open(filename, FileStorage::READ);
Either one of this you use the second argument is a constant specifying the type of operations you'll be able to on them: WRITE, READ or APPEND. The extension specified in the file name also determinates the output format that will be used. The output may be even compressed if you specify an extension such as *.xml.gz*.
@@ -64,7 +64,7 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv
fs["iterationNr"] >> itNr;
itNr = (int) fs["iterationNr"];
-#. **Input\\Output of OpenCV Data structures.** Well these behave exactly just as the basic C++ types:
+#. **Input/Output of OpenCV Data structures.** Well these behave exactly just as the basic C++ types:
.. code-block:: cpp
@@ -77,7 +77,7 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv
fs["R"] >> R; // Read cv::Mat
fs["T"] >> T;
-#. **Input\\Output of vectors (arrays) and associative maps.** As I mentioned beforehand we can output maps and sequences (array, vector) too. Again we first print the name of the variable and then we have to specify if our output is either a sequence or map.
+#. **Input/Output of vectors (arrays) and associative maps.** As I mentioned beforehand, we can output maps and sequences (array, vector) too. Again we first print the name of the variable and then we have to specify if our output is either a sequence or map.
For sequence before the first element print the "[" character and after the last one the "]" character:
diff --git a/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst b/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst
index ef0f8640c..b6a18fee8 100644
--- a/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst
+++ b/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst
@@ -18,7 +18,7 @@ We'll seek answers for the following questions:
Our test case
=============
-Let us consider a simple color reduction method. Using the unsigned char C and C++ type for matrix item storing a channel of pixel may have up to 256 different values. For a three channel image this can allow the formation of way too many colors (16 million to be exact). Working with so many color shades may give a heavy blow to our algorithm performance. However, sometimes it is enough to work with a lot less of them to get the same final result.
+Let us consider a simple color reduction method. By using the unsigned char C and C++ type for matrix item storing, a channel of pixel may have up to 256 different values. For a three channel image this can allow the formation of way too many colors (16 million to be exact). Working with so many color shades may give a heavy blow to our algorithm performance. However, sometimes it is enough to work with a lot less of them to get the same final result.
In this cases it's common that we make a *color space reduction*. This means that we divide the color space current value with a new input value to end up with fewer colors. For instance every value between zero and nine takes the new value zero, every value between ten and nineteen the value ten and so on.
diff --git a/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst b/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst
index 0549a9c12..f28920e77 100644
--- a/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst
+++ b/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst
@@ -32,14 +32,14 @@ Here's a function that will do this:
.. code-block:: cpp
- void Sharpen(const Mat& myImage,Mat& Result)
+ void Sharpen(const Mat& myImage, Mat& Result)
{
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
- Result.create(myImage.size(),myImage.type());
+ Result.create(myImage.size(), myImage.type());
const int nChannels = myImage.channels();
- for(int j = 1 ; j < myImage.rows-1; ++j)
+ for(int j = 1; j < myImage.rows - 1; ++j)
{
const uchar* previous = myImage.ptr(j - 1);
const uchar* current = myImage.ptr(j );
@@ -47,17 +47,17 @@ Here's a function that will do this:
uchar* output = Result.ptr(j);
- for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
+ for(int i = nChannels; i < nChannels * (myImage.cols - 1); ++i)
{
- *output++ = saturate_cast(5*current[i]
- -current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
+ *output++ = saturate_cast(5 * current[i]
+ -current[i - nChannels] - current[i + nChannels] - previous[i] - next[i]);
}
}
Result.row(0).setTo(Scalar(0));
- Result.row(Result.rows-1).setTo(Scalar(0));
+ Result.row(Result.rows - 1).setTo(Scalar(0));
Result.col(0).setTo(Scalar(0));
- Result.col(Result.cols-1).setTo(Scalar(0));
+ Result.col(Result.cols - 1).setTo(Scalar(0));
}
At first we make sure that the input images data is in unsigned char format. For this we use the :utilitysystemfunctions:`CV_Assert ` function that throws an error when the expression inside it is false.
@@ -70,14 +70,14 @@ We create an output image with the same size and the same type as our input. As
.. code-block:: cpp
- Result.create(myImage.size(),myImage.type());
+ Result.create(myImage.size(), myImage.type());
const int nChannels = myImage.channels();
We'll use the plain C [] operator to access pixels. Because we need to access multiple rows at the same time we'll acquire the pointers for each of them (a previous, a current and a next line). We need another pointer to where we're going to save the calculation. Then simply access the right items with the [] operator. For moving the output pointer ahead we simply increase this (with one byte) after each operation:
.. code-block:: cpp
- for(int j = 1 ; j < myImage.rows-1; ++j)
+ for(int j = 1; j < myImage.rows - 1; ++j)
{
const uchar* previous = myImage.ptr(j - 1);
const uchar* current = myImage.ptr(j );
@@ -85,21 +85,21 @@ We'll use the plain C [] operator to access pixels. Because we need to access mu
uchar* output = Result.ptr(j);
- for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
+ for(int i = nChannels; i < nChannels * (myImage.cols - 1); ++i)
{
- *output++ = saturate_cast(5*current[i]
- -current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
+ *output++ = saturate_cast(5 * current[i]
+ -current[i - nChannels] - current[i + nChannels] - previous[i] - next[i]);
}
}
-On the borders of the image the upper notation results inexistent pixel locations (like minus one - minus one). In these points our formula is undefined. A simple solution is to not apply the mask in these points and, for example, set the pixels on the borders to zeros:
+On the borders of the image the upper notation results inexistent pixel locations (like minus one - minus one). In these points our formula is undefined. A simple solution is to not apply the kernel in these points and, for example, set the pixels on the borders to zeros:
.. code-block:: cpp
- Result.row(0).setTo(Scalar(0)); // The top row
- Result.row(Result.rows-1).setTo(Scalar(0)); // The bottom row
- Result.col(0).setTo(Scalar(0)); // The left column
- Result.col(Result.cols-1).setTo(Scalar(0)); // The right column
+ Result.row(0).setTo(Scalar(0)); // The top row
+ Result.row(Result.rows - 1).setTo(Scalar(0)); // The bottom row
+ Result.col(0).setTo(Scalar(0)); // The left column
+ Result.col(Result.cols - 1).setTo(Scalar(0)); // The right column
The filter2D function
=====================
@@ -116,7 +116,7 @@ Then call the :filtering:`filter2D ` function specifying the input, th
.. code-block:: cpp
- filter2D(I, K, I.depth(), kern );
+ filter2D(I, K, I.depth(), kern);
The function even has a fifth optional argument to specify the center of the kernel, and a sixth one for determining what to do in the regions where the operation is undefined (borders). Using this function has the advantage that it's shorter, less verbose and because there are some optimization techniques implemented it is usually faster than the *hand-coded method*. For example in my test while the second one took only 13 milliseconds the first took around 31 milliseconds. Quite some difference.
diff --git a/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst b/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst
index 171d2e683..736aceb02 100644
--- a/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst
+++ b/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst
@@ -45,7 +45,7 @@ All the above objects, in the end, point to the same single data matrix. Their h
:linenos:
Mat D (A, Rect(10, 10, 100, 100) ); // using a rectangle
- Mat E = A(Range:all(), Range(1,3)); // using row and column boundaries
+ Mat E = A(Range::all(), Range(1,3)); // using row and column boundaries
Now you may ask if the matrix itself may belong to multiple *Mat* objects who takes responsibility for cleaning it up when it's no longer needed. The short answer is: the last object that used it. This is handled by using a reference counting mechanism. Whenever somebody copies a header of a *Mat* object, a counter is increased for the matrix. Whenever a header is cleaned this counter is decreased. When the counter reaches zero the matrix too is freed. Sometimes you will want to copy the matrix itself too, so OpenCV provides the :basicstructures:`clone() ` and :basicstructures:`copyTo() ` functions.
@@ -86,7 +86,7 @@ Each of the building components has their own valid domains. This leads to the d
Creating a *Mat* object explicitly
==================================
-In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() ` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices.
+In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readwriteimagevideo:`imwrite() ` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices.
Although *Mat* works really well as an image container, it is also a general matrix class. Therefore, it is possible to create and manipulate multidimensional matrices. You can create a Mat object in multiple ways:
@@ -113,7 +113,7 @@ Although *Mat* works really well as an image container, it is also a general mat
For instance, *CV_8UC3* means we use unsigned char types that are 8 bit long and each pixel has three of these to form the three channels. This are predefined for up to four channel numbers. The :basicstructures:`Scalar ` is four element short vector. Specify this and you can initialize all matrix points with a custom value. If you need more you can create the type with the upper macro, setting the channel number in parenthesis as you can see below.
- + Use C\\C++ arrays and initialize via constructor
+ + Use C/C++ arrays and initialize via constructor
.. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
:language: cpp
diff --git a/doc/tutorials/images/viz.jpg b/doc/tutorials/images/viz.jpg
new file mode 100644
index 000000000..7ac8f3ed8
Binary files /dev/null and b/doc/tutorials/images/viz.jpg differ
diff --git a/doc/tutorials/imgproc/histograms/histogram_comparison/histogram_comparison.rst b/doc/tutorials/imgproc/histograms/histogram_comparison/histogram_comparison.rst
index 1a5c59de0..f5f636d08 100644
--- a/doc/tutorials/imgproc/histograms/histogram_comparison/histogram_comparison.rst
+++ b/doc/tutorials/imgproc/histograms/histogram_comparison/histogram_comparison.rst
@@ -84,88 +84,10 @@ Code
* **Code at glance:**
-.. code-block:: cpp
+.. literalinclude:: ../../../../../samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp
+ :language: cpp
+ :tab-width: 4
- #include "opencv2/highgui/highgui.hpp"
- #include "opencv2/imgproc/imgproc.hpp"
- #include
- #include
-
- using namespace std;
- using namespace cv;
-
- /** @function main */
- int main( int argc, char** argv )
- {
- Mat src_base, hsv_base;
- Mat src_test1, hsv_test1;
- Mat src_test2, hsv_test2;
- Mat hsv_half_down;
-
- /// Load three images with different environment settings
- if( argc < 4 )
- { printf("** Error. Usage: ./compareHist_Demo \n");
- return -1;
- }
-
- src_base = imread( argv[1], 1 );
- src_test1 = imread( argv[2], 1 );
- src_test2 = imread( argv[3], 1 );
-
- /// Convert to HSV
- cvtColor( src_base, hsv_base, CV_BGR2HSV );
- cvtColor( src_test1, hsv_test1, CV_BGR2HSV );
- cvtColor( src_test2, hsv_test2, CV_BGR2HSV );
-
- hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
-
- /// Using 30 bins for hue and 32 for saturation
- int h_bins = 50; int s_bins = 60;
- int histSize[] = { h_bins, s_bins };
-
- // hue varies from 0 to 256, saturation from 0 to 180
- float h_ranges[] = { 0, 256 };
- float s_ranges[] = { 0, 180 };
-
- const float* ranges[] = { h_ranges, s_ranges };
-
- // Use the o-th and 1-st channels
- int channels[] = { 0, 1 };
-
- /// Histograms
- MatND hist_base;
- MatND hist_half_down;
- MatND hist_test1;
- MatND hist_test2;
-
- /// Calculate the histograms for the HSV images
- calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
- normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() );
-
- calcHist( &hsv_half_down, 1, channels, Mat(), hist_half_down, 2, histSize, ranges, true, false );
- normalize( hist_half_down, hist_half_down, 0, 1, NORM_MINMAX, -1, Mat() );
-
- calcHist( &hsv_test1, 1, channels, Mat(), hist_test1, 2, histSize, ranges, true, false );
- normalize( hist_test1, hist_test1, 0, 1, NORM_MINMAX, -1, Mat() );
-
- calcHist( &hsv_test2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false );
- normalize( hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat() );
-
- /// Apply the histogram comparison methods
- for( int i = 0; i < 4; i++ )
- { int compare_method = i;
- double base_base = compareHist( hist_base, hist_base, compare_method );
- double base_half = compareHist( hist_base, hist_half_down, compare_method );
- double base_test1 = compareHist( hist_base, hist_test1, compare_method );
- double base_test2 = compareHist( hist_base, hist_test2, compare_method );
-
- printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
- }
-
- printf( "Done \n" );
-
- return 0;
- }
Explanation
@@ -211,11 +133,11 @@ Explanation
.. code-block:: cpp
- int h_bins = 50; int s_bins = 32;
+ int h_bins = 50; int s_bins = 60;
int histSize[] = { h_bins, s_bins };
- float h_ranges[] = { 0, 256 };
- float s_ranges[] = { 0, 180 };
+ float h_ranges[] = { 0, 180 };
+ float s_ranges[] = { 0, 256 };
const float* ranges[] = { h_ranges, s_ranges };
diff --git a/doc/tutorials/imgproc/histograms/template_matching/template_matching.rst b/doc/tutorials/imgproc/histograms/template_matching/template_matching.rst
index cb7ece86f..db6838a12 100644
--- a/doc/tutorials/imgproc/histograms/template_matching/template_matching.rst
+++ b/doc/tutorials/imgproc/histograms/template_matching/template_matching.rst
@@ -85,7 +85,7 @@ d. **method=CV\_TM\_CCORR\_NORMED**
.. math::
- R(x,y)= \frac{\sum_{x',y'} (T(x',y') \cdot I'(x+x',y+y'))}{\sqrt{\sum_{x',y'}T(x',y')^2 \cdot \sum_{x',y'} I(x+x',y+y')^2}}
+ R(x,y)= \frac{\sum_{x',y'} (T(x',y') \cdot I(x+x',y+y'))}{\sqrt{\sum_{x',y'}T(x',y')^2 \cdot \sum_{x',y'} I(x+x',y+y')^2}}
e. **method=CV\_TM\_CCOEFF**
diff --git a/doc/tutorials/introduction/android_binary_package/O4A_SDK.rst b/doc/tutorials/introduction/android_binary_package/O4A_SDK.rst
index 9a683ea49..a70a98d52 100644
--- a/doc/tutorials/introduction/android_binary_package/O4A_SDK.rst
+++ b/doc/tutorials/introduction/android_binary_package/O4A_SDK.rst
@@ -48,10 +48,10 @@ The structure of package contents looks as follows:
::
- OpenCV-2.4.8-android-sdk
+ OpenCV-2.4.9-android-sdk
|_ apk
- | |_ OpenCV_2.4.8_binary_pack_armv7a.apk
- | |_ OpenCV_2.4.8_Manager_2.16_XXX.apk
+ | |_ OpenCV_2.4.9_binary_pack_armv7a.apk
+ | |_ OpenCV_2.4.9_Manager_2.18_XXX.apk
|
|_ doc
|_ samples
@@ -66,7 +66,7 @@ The structure of package contents looks as follows:
| |_ armeabi-v7a
| |_ x86
|
- |_ license.txt
+ |_ LICENSE
|_ README.android
* :file:`sdk` folder contains OpenCV API and libraries for Android:
@@ -157,10 +157,10 @@ Get the OpenCV4Android SDK
.. code-block:: bash
- unzip ~/Downloads/OpenCV-2.4.8-android-sdk.zip
+ unzip ~/Downloads/OpenCV-2.4.9-android-sdk.zip
-.. |opencv_android_bin_pack| replace:: :file:`OpenCV-2.4.8-android-sdk.zip`
-.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.8/OpenCV-2.4.8-android-sdk.zip/download
+.. |opencv_android_bin_pack| replace:: :file:`OpenCV-2.4.9-android-sdk.zip`
+.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.9/OpenCV-2.4.9-android-sdk.zip/download
.. |opencv_android_bin_pack_url| replace:: |opencv_android_bin_pack|
.. |seven_zip| replace:: 7-Zip
.. _seven_zip: http://www.7-zip.org/
@@ -295,7 +295,7 @@ Well, running samples from Eclipse is very simple:
.. code-block:: sh
:linenos:
- /platform-tools/adb install /apk/OpenCV_2.4.8_Manager_2.16_armv7a-neon.apk
+ /platform-tools/adb install /apk/OpenCV_2.4.9_Manager_2.18_armv7a-neon.apk
.. note:: ``armeabi``, ``armv7a-neon``, ``arm7a-neon-android8``, ``mips`` and ``x86`` stand for
platform targets:
diff --git a/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst b/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst
index 3d7268c80..6fe8f6b94 100644
--- a/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst
+++ b/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst
@@ -55,14 +55,14 @@ Manager to access OpenCV libraries externally installed in the target system.
:guilabel:`File -> Import -> Existing project in your workspace`.
Press :guilabel:`Browse` button and locate OpenCV4Android SDK
- (:file:`OpenCV-2.4.8-android-sdk/sdk`).
+ (:file:`OpenCV-2.4.9-android-sdk/sdk`).
.. image:: images/eclipse_opencv_dependency0.png
:alt: Add dependency from OpenCV library
:align: center
#. In application project add a reference to the OpenCV Java SDK in
- :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.8``.
+ :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.9``.
.. image:: images/eclipse_opencv_dependency1.png
:alt: Add dependency from OpenCV library
@@ -128,27 +128,27 @@ described above.
#. Add the OpenCV library project to your workspace the same way as for the async initialization
above. Use menu :guilabel:`File -> Import -> Existing project in your workspace`,
press :guilabel:`Browse` button and select OpenCV SDK path
- (:file:`OpenCV-2.4.8-android-sdk/sdk`).
+ (:file:`OpenCV-2.4.9-android-sdk/sdk`).
.. image:: images/eclipse_opencv_dependency0.png
:alt: Add dependency from OpenCV library
:align: center
#. In the application project add a reference to the OpenCV4Android SDK in
- :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.8``;
+ :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.9``;
.. image:: images/eclipse_opencv_dependency1.png
:alt: Add dependency from OpenCV library
:align: center
#. If your application project **doesn't have a JNI part**, just copy the corresponding OpenCV
- native libs from :file:`/sdk/native/libs/` to your
+ native libs from :file:`/sdk/native/libs/` to your
project directory to folder :file:`libs/`.
In case of the application project **with a JNI part**, instead of manual libraries copying you
need to modify your ``Android.mk`` file:
add the following two code lines after the ``"include $(CLEAR_VARS)"`` and before
- ``"include path_to_OpenCV-2.4.8-android-sdk/sdk/native/jni/OpenCV.mk"``
+ ``"include path_to_OpenCV-2.4.9-android-sdk/sdk/native/jni/OpenCV.mk"``
.. code-block:: make
:linenos:
@@ -221,7 +221,7 @@ taken:
.. code-block:: make
- include C:\Work\OpenCV4Android\OpenCV-2.4.8-android-sdk\sdk\native\jni\OpenCV.mk
+ include C:\Work\OpenCV4Android\OpenCV-2.4.9-android-sdk\sdk\native\jni\OpenCV.mk
Should be inserted into the :file:`jni/Android.mk` file **after** this line:
@@ -382,7 +382,7 @@ result.
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_6, this, mLoaderCallback);
}
-#. Defines that your activity implements ``CvViewFrameListener2`` interface and fix activity related
+#. Defines that your activity implements ``CvCameraViewListener2`` interface and fix activity related
errors by defining missed methods. For this activity define ``onCreate``, ``onDestroy`` and
``onPause`` and implement them according code snippet bellow. Fix errors by adding requited
imports.
@@ -432,7 +432,7 @@ result.
Lets discuss some most important steps. Every Android application with UI must implement Activity
and View. By the first steps we create blank activity and default view layout. The simplest
OpenCV-centric application must implement OpenCV initialization, create its own view to show
-preview from camera and implements ``CvViewFrameListener2`` interface to get frames from camera and
+preview from camera and implements ``CvCameraViewListener2`` interface to get frames from camera and
process it.
First of all we create our application view using xml layout. Our layout consists of the only
diff --git a/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst b/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst
index f582d3208..9aa1f6289 100644
--- a/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst
+++ b/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst
@@ -25,29 +25,34 @@ Let's use a simple program such as DisplayImage.cpp shown below.
.. code-block:: cpp
- #include
- #include
+ #include
+ #include
- using namespace cv;
+ using namespace cv;
- int main( int argc, char** argv )
- {
- Mat image;
- image = imread( argv[1], 1 );
+ int main(int argc, char** argv )
+ {
+ if ( argc != 2 )
+ {
+ printf("usage: DisplayImage.out \n");
+ return -1;
+ }
- if( argc != 2 || !image.data )
- {
- printf( "No image data \n" );
- return -1;
- }
+ Mat image;
+ image = imread( argv[1], 1 );
- namedWindow( "Display Image", CV_WINDOW_AUTOSIZE );
- imshow( "Display Image", image );
+ if ( !image.data )
+ {
+ printf("No image data \n");
+ return -1;
+ }
+ namedWindow("Display Image", CV_WINDOW_AUTOSIZE );
+ imshow("Display Image", image);
- waitKey(0);
+ waitKey(0);
- return 0;
- }
+ return 0;
+ }
Create a CMake file
---------------------
diff --git a/doc/tutorials/introduction/linux_install/linux_install.rst b/doc/tutorials/introduction/linux_install/linux_install.rst
index 1e02b64c9..b0dcf6236 100644
--- a/doc/tutorials/introduction/linux_install/linux_install.rst
+++ b/doc/tutorials/introduction/linux_install/linux_install.rst
@@ -7,22 +7,24 @@ These steps have been tested for Ubuntu 10.04 but should work with other distros
Required Packages
=================
- * GCC 4.4.x or later. This can be installed with:
+ * GCC 4.4.x or later
+ * CMake 2.6 or higher
+ * Git
+ * GTK+2.x or higher, including headers (libgtk2.0-dev)
+ * pkg-config
+ * Python 2.6 or later and Numpy 1.5 or later with developer packages (python-dev, python-numpy)
+ * ffmpeg or libav development packages: libavcodec-dev, libavformat-dev, libswscale-dev
+ * [optional] libtbb2 libtbb-dev
+ * [optional] libdc1394 2.x
+ * [optional] libjpeg-dev, libpng-dev, libtiff-dev, libjasper-dev, libdc1394-22-dev
+
+The packages can be installed using a terminal and the following commands or by using Synaptic Manager:
.. code-block:: bash
- sudo apt-get install build-essential
-
- * CMake 2.6 or higher;
- * Git;
- * GTK+2.x or higher, including headers (libgtk2.0-dev);
- * pkgconfig;
- * Python 2.6 or later and Numpy 1.5 or later with developer packages (python-dev, python-numpy);
- * ffmpeg or libav development packages: libavcodec-dev, libavformat-dev, libswscale-dev;
- * [optional] libdc1394 2.x;
- * [optional] libjpeg-dev, libpng-dev, libtiff-dev, libjasper-dev.
-
-All the libraries above can be installed via Terminal or by using Synaptic Manager.
+ [compiler] sudo apt-get install build-essential
+ [required] sudo apt-get install cmake git libgtk2-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
+ [optional] sudo apt-get install python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev
Getting OpenCV Source Code
==========================
diff --git a/doc/tutorials/introduction/load_save_image/load_save_image.rst b/doc/tutorials/introduction/load_save_image/load_save_image.rst
index 50fb9ea37..6f8150a00 100644
--- a/doc/tutorials/introduction/load_save_image/load_save_image.rst
+++ b/doc/tutorials/introduction/load_save_image/load_save_image.rst
@@ -5,7 +5,7 @@ Load, Modify, and Save an Image
.. note::
- We assume that by now you know how to load an image using :imread:`imread <>` and to display it in a window (using :imshow:`imshow <>`). Read the :ref:`Display_Image` tutorial otherwise.
+ We assume that by now you know how to load an image using :readwriteimagevideo:`imread ` and to display it in a window (using :user_interface:`imshow `). Read the :ref:`Display_Image` tutorial otherwise.
Goals
======
@@ -14,9 +14,9 @@ In this tutorial you will learn how to:
.. container:: enumeratevisibleitemswithsquare
- * Load an image using :imread:`imread <>`
- * Transform an image from BGR to Grayscale format by using :cvt_color:`cvtColor <>`
- * Save your transformed image in a file on disk (using :imwrite:`imwrite <>`)
+ * Load an image using :readwriteimagevideo:`imread `
+ * Transform an image from BGR to Grayscale format by using :miscellaneous_transformations:`cvtColor `
+ * Save your transformed image in a file on disk (using :readwriteimagevideo:`imwrite `)
Code
======
@@ -63,10 +63,7 @@ Here it is:
Explanation
============
-#. We begin by:
-
- * Creating a Mat object to store the image information
- * Load an image using :imread:`imread <>`, located in the path given by *imageName*. Fort this example, assume you are loading a RGB image.
+#. We begin by loading an image using :readwriteimagevideo:`imread `, located in the path given by *imageName*. For this example, assume you are loading a RGB image.
#. Now we are going to convert our image from BGR to Grayscale format. OpenCV has a really nice function to do this kind of transformations:
@@ -74,15 +71,15 @@ Explanation
cvtColor( image, gray_image, CV_BGR2GRAY );
- As you can see, :cvt_color:`cvtColor <>` takes as arguments:
+ As you can see, :miscellaneous_transformations:`cvtColor ` takes as arguments:
.. container:: enumeratevisibleitemswithsquare
* a source image (*image*)
* a destination image (*gray_image*), in which we will save the converted image.
- * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :imread:`imread <>` has BGR default channel order in case of color images).
+ * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :readwriteimagevideo:`imread ` has BGR default channel order in case of color images).
-#. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :imread:`imread <>`: :imwrite:`imwrite <>`
+#. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :readwriteimagevideo:`imread `: :readwriteimagevideo:`imwrite `
.. code-block:: cpp
@@ -100,7 +97,7 @@ Explanation
imshow( imageName, image );
imshow( "Gray image", gray_image );
-#. Add add the *waitKey(0)* function call for the program to wait forever for an user key press.
+#. Add the *waitKey(0)* function call for the program to wait forever for an user key press.
Result
diff --git a/doc/tutorials/introduction/windows_install/windows_install.rst b/doc/tutorials/introduction/windows_install/windows_install.rst
index c29c13aed..abb39cd13 100644
--- a/doc/tutorials/introduction/windows_install/windows_install.rst
+++ b/doc/tutorials/introduction/windows_install/windows_install.rst
@@ -55,7 +55,7 @@ Building the OpenCV library from scratch requires a couple of tools installed be
.. |TortoiseGit| replace:: TortoiseGit
.. _TortoiseGit: http://code.google.com/p/tortoisegit/wiki/Download
.. |Python_Libraries| replace:: Python libraries
-.. _Python_Libraries: http://www.python.org/getit/
+.. _Python_Libraries: http://www.python.org/downloads/
.. |Numpy| replace:: Numpy
.. _Numpy: http://numpy.scipy.org/
.. |IntelTBB| replace:: Intel |copy| Threading Building Blocks (*TBB*)
@@ -81,7 +81,7 @@ Building the OpenCV library from scratch requires a couple of tools installed be
+ An IDE of choice (preferably), or just a C\C++ compiler that will actually make the binary files. Here we will use the `Microsoft Visual Studio `_. However, you can use any other IDE that has a valid C\C++ compiler.
- + |CMake|_, which is a neat tool to make the project files (for your choosen IDE) from the OpenCV source files. It will also allow an easy configuration of the OpenCV build files, in order to make binary files that fits exactly to your needs.
+ + |CMake|_, which is a neat tool to make the project files (for your chosen IDE) from the OpenCV source files. It will also allow an easy configuration of the OpenCV build files, in order to make binary files that fits exactly to your needs.
+ Git to acquire the OpenCV source files. A good tool for this is |TortoiseGit|_. Alternatively, you can just download an archived version of the source files from our `page on Sourceforge `_
diff --git a/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst b/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst
index 3fd734f35..2ec616fbc 100644
--- a/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst
+++ b/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst
@@ -90,17 +90,25 @@ A full list, for the latest version would contain:
.. code-block:: bash
- opencv_core231d.lib
- opencv_imgproc231d.lib
- opencv_highgui231d.lib
- opencv_ml231d.lib
- opencv_video231d.lib
- opencv_features2d231d.lib
- opencv_calib3d231d.lib
- opencv_objdetect231d.lib
- opencv_contrib231d.lib
- opencv_legacy231d.lib
- opencv_flann231d.lib
+ opencv_calib3d249d.lib
+ opencv_contrib249d.lib
+ opencv_core249d.lib
+ opencv_features2d249d.lib
+ opencv_flann249d.lib
+ opencv_gpu249d.lib
+ opencv_highgui249d.lib
+ opencv_imgproc249d.lib
+ opencv_legacy249d.lib
+ opencv_ml249d.lib
+ opencv_nonfree249d.lib
+ opencv_objdetect249d.lib
+ opencv_ocl249d.lib
+ opencv_photo249d.lib
+ opencv_stitching249d.lib
+ opencv_superres249d.lib
+ opencv_ts249d.lib
+ opencv_video249d.lib
+ opencv_videostab249d.lib
The letter *d* at the end just indicates that these are the libraries required for the debug. Now click ok to save and do the same with a new property inside the Release rule section. Make sure to omit the *d* letters from the library names and to save the property sheets with the save icon above them.
diff --git a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst
index 1337ff3a1..f7d7a1506 100644
--- a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst
+++ b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst
@@ -78,6 +78,8 @@ Make sure your active solution configuration (:menuselection:`Build --> Configur
Build your solution (:menuselection:`Build --> Build Solution`, or press *F7*).
+Before continuing, do not forget to add the command line argument of your input image to your project (:menuselection:`Right click on project --> Properties --> Configuration Properties --> Debugging` and then set the field ``Command Arguments`` with the location of the image).
+
Now set a breakpoint on the source line that says
.. code-block:: c++
diff --git a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst
index 50f734803..574071de7 100644
--- a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst
+++ b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst
@@ -105,8 +105,8 @@ Explanation
.. code-block:: cpp
- Mat trainingDataMat(3, 2, CV_32FC1, trainingData);
- Mat labelsMat (3, 1, CV_32FC1, labels);
+ Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
+ Mat labelsMat (4, 1, CV_32FC1, labels);
2. **Set up SVM's parameters**
diff --git a/doc/tutorials/tutorials.rst b/doc/tutorials/tutorials.rst
index cbc51c195..3c4e9213e 100644
--- a/doc/tutorials/tutorials.rst
+++ b/doc/tutorials/tutorials.rst
@@ -102,7 +102,7 @@ As always, we would be happy to hear your comments and receive your contribution
.. cssclass:: toctableopencv
=========== =======================================================
- |Video| Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions.
+ |Video| Look here in order to find use on your video stream algorithms like: motion extraction, feature tracking and foreground extractions.
=========== =======================================================
@@ -186,6 +186,21 @@ As always, we would be happy to hear your comments and receive your contribution
:width: 80pt
:alt: gpu icon
+* :ref:`Table-Of-Content-Viz`
+
+ .. tabularcolumns:: m{100pt} m{300pt}
+ .. cssclass:: toctableopencv
+
+ =========== =======================================================
+ |Viz| These tutorials show how to use Viz module effectively.
+
+ =========== =======================================================
+
+ .. |Viz| image:: images/viz.jpg
+ :height: 80pt
+ :width: 80pt
+ :alt: viz icon
+
* :ref:`Table-Of-Content-General`
.. tabularcolumns:: m{100pt} m{300pt}
@@ -221,4 +236,5 @@ As always, we would be happy to hear your comments and receive your contribution
gpu/table_of_content_gpu/table_of_content_gpu
contrib/table_of_content_contrib/table_of_content_contrib
ios/table_of_content_ios/table_of_content_ios
+ viz/table_of_content_viz/table_of_content_viz
general/table_of_content_general/table_of_content_general
diff --git a/doc/tutorials/video/table_of_content_video/table_of_content_video.rst b/doc/tutorials/video/table_of_content_video/table_of_content_video.rst
index a2521d695..ddcad0c70 100644
--- a/doc/tutorials/video/table_of_content_video/table_of_content_video.rst
+++ b/doc/tutorials/video/table_of_content_video/table_of_content_video.rst
@@ -3,7 +3,7 @@
*video* module. Video analysis
-----------------------------------------------------------
-Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions.
+Look here in order to find use on your video stream algorithms like: motion extraction, feature tracking and foreground extractions.
.. include:: ../../definitions/noContent.rst
diff --git a/doc/tutorials/viz/creating_widgets/creating_widgets.rst b/doc/tutorials/viz/creating_widgets/creating_widgets.rst
new file mode 100644
index 000000000..8858035c3
--- /dev/null
+++ b/doc/tutorials/viz/creating_widgets/creating_widgets.rst
@@ -0,0 +1,159 @@
+.. _creating_widgets:
+
+Creating Widgets
+****************
+
+Goal
+====
+
+In this tutorial you will learn how to
+
+.. container:: enumeratevisibleitemswithsquare
+
+ * Create your own widgets using WidgetAccessor and VTK.
+ * Show your widget in the visualization window.
+
+Code
+====
+
+You can download the code from :download:`here <../../../../samples/cpp/tutorial_code/viz/creating_widgets.cpp>`.
+
+.. code-block:: cpp
+
+ #include
+ #include
+ #include
+
+ #include
+ #include
+ #include
+ #include
+ #include
+ #include
+ #include
+ #include
+
+ using namespace cv;
+ using namespace std;
+
+ /**
+ * @class WTriangle
+ * @brief Defining our own 3D Triangle widget
+ */
+ class WTriangle : public viz::Widget3D
+ {
+ public:
+ WTriangle(const Point3f &pt1, const Point3f &pt2, const Point3f &pt3, const viz::Color & color = viz::Color::white());
+ };
+
+ /**
+ * @function WTriangle::WTriangle
+ */
+ WTriangle::WTriangle(const Point3f &pt1, const Point3f &pt2, const Point3f &pt3, const viz::Color & color)
+ {
+ // Create a triangle
+ vtkSmartPointer points = vtkSmartPointer::New();
+ points->InsertNextPoint(pt1.x, pt1.y, pt1.z);
+ points->InsertNextPoint(pt2.x, pt2.y, pt2.z);
+ points->InsertNextPoint(pt3.x, pt3.y, pt3.z);
+
+ vtkSmartPointer triangle = vtkSmartPointer::New();
+ triangle->GetPointIds()->SetId(0,0);
+ triangle->GetPointIds()->SetId(1,1);
+ triangle->GetPointIds()->SetId(2,2);
+
+ vtkSmartPointer cells = vtkSmartPointer::New();
+ cells->InsertNextCell(triangle);
+
+ // Create a polydata object
+ vtkSmartPointer polyData = vtkSmartPointer::New();
+
+ // Add the geometry and topology to the polydata
+ polyData->SetPoints(points);
+ polyData->SetPolys(cells);
+
+ // Create mapper and actor
+ vtkSmartPointer mapper = vtkSmartPointer::New();
+ #if VTK_MAJOR_VERSION <= 5
+ mapper->SetInput(polyData);
+ #else
+ mapper->SetInputData(polyData);
+ #endif
+
+ vtkSmartPointer actor = vtkSmartPointer::New();
+ actor->SetMapper(mapper);
+
+ // Store this actor in the widget in order that visualizer can access it
+ viz::WidgetAccessor::setProp(*this, actor);
+
+ // Set the color of the widget. This has to be called after WidgetAccessor.
+ setColor(color);
+ }
+
+ /**
+ * @function main
+ */
+ int main()
+ {
+ /// Create a window
+ viz::Viz3d myWindow("Creating Widgets");
+
+ /// Create a triangle widget
+ WTriangle tw(Point3f(0.0,0.0,0.0), Point3f(1.0,1.0,1.0), Point3f(0.0,1.0,0.0), viz::Color::red());
+
+ /// Show widget in the visualizer window
+ myWindow.showWidget("TRIANGLE", tw);
+
+ /// Start event loop
+ myWindow.spin();
+
+ return 0;
+ }
+
+Explanation
+===========
+
+Here is the general structure of the program:
+
+* Extend Widget3D class to create a new 3D widget.
+
+.. code-block:: cpp
+
+ class WTriangle : public viz::Widget3D
+ {
+ public:
+ WTriangle(const Point3f &pt1, const Point3f &pt2, const Point3f &pt3, const viz::Color & color = viz::Color::white());
+ };
+
+* Assign a VTK actor to the widget.
+
+.. code-block:: cpp
+
+ // Store this actor in the widget in order that visualizer can access it
+ viz::WidgetAccessor::setProp(*this, actor);
+
+* Set color of the widget.
+
+.. code-block:: cpp
+
+ // Set the color of the widget. This has to be called after WidgetAccessor.
+ setColor(color);
+
+* Construct a triangle widget and display it in the window.
+
+.. code-block:: cpp
+
+ /// Create a triangle widget
+ WTriangle tw(Point3f(0.0,0.0,0.0), Point3f(1.0,1.0,1.0), Point3f(0.0,1.0,0.0), viz::Color::red());
+
+ /// Show widget in the visualizer window
+ myWindow.showWidget("TRIANGLE", tw);
+
+Results
+=======
+
+Here is the result of the program.
+
+.. image:: images/red_triangle.png
+ :alt: Creating Widgets
+ :align: center
diff --git a/doc/tutorials/viz/creating_widgets/images/red_triangle.png b/doc/tutorials/viz/creating_widgets/images/red_triangle.png
new file mode 100644
index 000000000..7da6ad060
Binary files /dev/null and b/doc/tutorials/viz/creating_widgets/images/red_triangle.png differ
diff --git a/doc/tutorials/viz/launching_viz/images/window_demo.png b/doc/tutorials/viz/launching_viz/images/window_demo.png
new file mode 100644
index 000000000..b853fe29d
Binary files /dev/null and b/doc/tutorials/viz/launching_viz/images/window_demo.png differ
diff --git a/doc/tutorials/viz/launching_viz/launching_viz.rst b/doc/tutorials/viz/launching_viz/launching_viz.rst
new file mode 100644
index 000000000..a3dd5d93c
--- /dev/null
+++ b/doc/tutorials/viz/launching_viz/launching_viz.rst
@@ -0,0 +1,118 @@
+.. _launching_viz:
+
+Launching Viz
+*************
+
+Goal
+====
+
+In this tutorial you will learn how to
+
+.. container:: enumeratevisibleitemswithsquare
+
+ * Open a visualization window.
+ * Access a window by its name.
+ * Start event loop.
+ * Start event loop for a given amount of time.
+
+Code
+====
+
+You can download the code from :download:`here <../../../../samples/cpp/tutorial_code/viz/launching_viz.cpp>`.
+
+.. code-block:: cpp
+
+ #include
+ #include
+
+ using namespace cv;
+ using namespace std;
+
+ /**
+ * @function main
+ */
+ int main()
+ {
+ /// Create a window
+ viz::Viz3d myWindow("Viz Demo");
+
+ /// Start event loop
+ myWindow.spin();
+
+ /// Event loop is over when pressed q, Q, e, E
+ cout << "First event loop is over" << endl;
+
+ /// Access window via its name
+ viz::Viz3d sameWindow = viz::getWindowByName("Viz Demo");
+
+ /// Start event loop
+ sameWindow.spin();
+
+ /// Event loop is over when pressed q, Q, e, E
+ cout << "Second event loop is over" << endl;
+
+ /// Event loop is over when pressed q, Q, e, E
+ /// Start event loop once for 1 millisecond
+ sameWindow.spinOnce(1, true);
+ while(!sameWindow.wasStopped())
+ {
+ /// Interact with window
+
+ /// Event loop for 1 millisecond
+ sameWindow.spinOnce(1, true);
+ }
+
+ /// Once more event loop is stopped
+ cout << "Last event loop is over" << endl;
+ return 0;
+ }
+
+Explanation
+===========
+
+Here is the general structure of the program:
+
+* Create a window.
+
+.. code-block:: cpp
+
+ /// Create a window
+ viz::Viz3d myWindow("Viz Demo");
+
+* Start event loop. This event loop will run until user terminates it by pressing **e**, **E**, **q**, **Q**.
+
+.. code-block:: cpp
+
+ /// Start event loop
+ myWindow.spin();
+
+* Access same window via its name. Since windows are implicitly shared, **sameWindow** is exactly the same with **myWindow**. If the name does not exist, a new window is created.
+
+.. code-block:: cpp
+
+ /// Access window via its name
+ viz::Viz3d sameWindow = viz::get("Viz Demo");
+
+* Start a controlled event loop. Once it starts, **wasStopped** is set to false. Inside the while loop, in each iteration, **spinOnce** is called to prevent event loop from completely stopping. Inside the while loop, user can execute other statements including those which interact with the window.
+
+.. code-block:: cpp
+
+ /// Event loop is over when pressed q, Q, e, E
+ /// Start event loop once for 1 millisecond
+ sameWindow.spinOnce(1, true);
+ while(!sameWindow.wasStopped())
+ {
+ /// Interact with window
+
+ /// Event loop for 1 millisecond
+ sameWindow.spinOnce(1, true);
+ }
+
+Results
+=======
+
+Here is the result of the program.
+
+.. image:: images/window_demo.png
+ :alt: Launching Viz
+ :align: center
diff --git a/doc/tutorials/viz/table_of_content_viz/images/facedetect.jpg b/doc/tutorials/viz/table_of_content_viz/images/facedetect.jpg
new file mode 100644
index 000000000..788b7d826
Binary files /dev/null and b/doc/tutorials/viz/table_of_content_viz/images/facedetect.jpg differ
diff --git a/doc/tutorials/viz/table_of_content_viz/images/image_effects.png b/doc/tutorials/viz/table_of_content_viz/images/image_effects.png
new file mode 100644
index 000000000..1a675ce74
Binary files /dev/null and b/doc/tutorials/viz/table_of_content_viz/images/image_effects.png differ
diff --git a/doc/tutorials/viz/table_of_content_viz/images/intro.png b/doc/tutorials/viz/table_of_content_viz/images/intro.png
new file mode 100644
index 000000000..4f54cfb91
Binary files /dev/null and b/doc/tutorials/viz/table_of_content_viz/images/intro.png differ
diff --git a/doc/tutorials/viz/table_of_content_viz/table_of_content_viz.rst b/doc/tutorials/viz/table_of_content_viz/table_of_content_viz.rst
new file mode 100644
index 000000000..c3d08fe17
--- /dev/null
+++ b/doc/tutorials/viz/table_of_content_viz/table_of_content_viz.rst
@@ -0,0 +1,94 @@
+.. _Table-Of-Content-Viz:
+
+**OpenCV Viz**
+-----------------------------------------------------------
+
+.. include:: ../../definitions/tocDefinitions.rst
+
++
+ .. tabularcolumns:: m{100pt} m{300pt}
+ .. cssclass:: toctableopencv
+
+ ================== ===============================================================================
+ |VizLaunchingViz| **Title:** :ref:`launching_viz`
+
+ *Compatibility:* > OpenCV 3.0.0
+
+ *Author:* Ozan Tonkal
+
+ You will learn how to launch a viz window.
+
+ ================== ===============================================================================
+
+ .. |VizLaunchingViz| image:: ../launching_viz/images/window_demo.png
+ :height: 120pt
+ :width: 90pt
+
++
+ .. tabularcolumns:: m{100pt} m{300pt}
+ .. cssclass:: toctableopencv
+
+ ================ ============================================================================
+ |WidgetPose| **Title:** :ref:`widget_pose`
+
+ *Compatibility:* > OpenCV 3.0.0
+
+ *Author:* Ozan Tonkal
+
+ You will learn how to change pose of a widget.
+
+ ================ ============================================================================
+
+ .. |WidgetPose| image:: ../widget_pose/images/widgetpose.png
+ :height: 90pt
+ :width: 90pt
+
++
+ .. tabularcolumns:: m{100pt} m{300pt}
+ .. cssclass:: toctableopencv
+
+ ================== ============================================================================
+ |Transformations| **Title:** :ref:`transformations`
+
+ *Compatibility:* > OpenCV 3.0.0
+
+ *Author:* Ozan Tonkal
+
+ You will learn how to transform between global and camera frames.
+
+ ================== ============================================================================
+
+ .. |Transformations| image:: ../transformations/images/global_view_point.png
+ :height: 120pt
+ :width: 90pt
+
++
+ .. tabularcolumns:: m{100pt} m{300pt}
+ .. cssclass:: toctableopencv
+
+ ================== ============================================================================
+ |CreatingWidgets| **Title:** :ref:`creating_widgets`
+
+ *Compatibility:* > OpenCV 3.0.0
+
+ *Author:* Ozan Tonkal
+
+ You will learn how to create your own widgets.
+
+ ================== ============================================================================
+
+ .. |CreatingWidgets| image:: ../creating_widgets/images/red_triangle.png
+ :height: 120pt
+ :width: 90pt
+
+.. raw:: latex
+
+ \pagebreak
+
+.. toctree::
+ :hidden:
+
+ ../launching_viz/launching_viz
+ ../widget_pose/widget_pose
+ ../transformations/transformations
+ ../creating_widgets/creating_widgets
diff --git a/doc/tutorials/viz/transformations/images/camera_view_point.png b/doc/tutorials/viz/transformations/images/camera_view_point.png
new file mode 100644
index 000000000..e2ac5b0f0
Binary files /dev/null and b/doc/tutorials/viz/transformations/images/camera_view_point.png differ
diff --git a/doc/tutorials/viz/transformations/images/global_view_point.png b/doc/tutorials/viz/transformations/images/global_view_point.png
new file mode 100644
index 000000000..fc6de2c1a
Binary files /dev/null and b/doc/tutorials/viz/transformations/images/global_view_point.png differ
diff --git a/doc/tutorials/viz/transformations/transformations.rst b/doc/tutorials/viz/transformations/transformations.rst
new file mode 100644
index 000000000..d1f2d0c2e
--- /dev/null
+++ b/doc/tutorials/viz/transformations/transformations.rst
@@ -0,0 +1,202 @@
+.. _transformations:
+
+Transformations
+***************
+
+Goal
+====
+
+In this tutorial you will learn how to
+
+.. container:: enumeratevisibleitemswithsquare
+
+ * How to use makeTransformToGlobal to compute pose
+ * How to use makeCameraPose and Viz3d::setViewerPose
+ * How to visualize camera position by axes and by viewing frustum
+
+Code
+====
+
+You can download the code from :download:`here <../../../../samples/cpp/tutorial_code/viz/transformations.cpp>`.
+
+.. code-block:: cpp
+
+ #include
+ #include
+ #include
+
+ using namespace cv;
+ using namespace std;
+
+ /**
+ * @function cvcloud_load
+ * @brief load bunny.ply
+ */
+ Mat cvcloud_load()
+ {
+ Mat cloud(1, 1889, CV_32FC3);
+ ifstream ifs("bunny.ply");
+
+ string str;
+ for(size_t i = 0; i < 12; ++i)
+ getline(ifs, str);
+
+ Point3f* data = cloud.ptr();
+ float dummy1, dummy2;
+ for(size_t i = 0; i < 1889; ++i)
+ ifs >> data[i].x >> data[i].y >> data[i].z >> dummy1 >> dummy2;
+
+ cloud *= 5.0f;
+ return cloud;
+ }
+
+ /**
+ * @function main
+ */
+ int main(int argn, char **argv)
+ {
+ if (argn < 2)
+ {
+ cout << "Usage: " << endl << "./transformations [ G | C ]" << endl;
+ return 1;
+ }
+
+ bool camera_pov = (argv[1][0] == 'C');
+
+ /// Create a window
+ viz::Viz3d myWindow("Coordinate Frame");
+
+ /// Add coordinate axes
+ myWindow.showWidget("Coordinate Widget", viz::WCoordinateSystem());
+
+ /// Let's assume camera has the following properties
+ Point3f cam_pos(3.0f,3.0f,3.0f), cam_focal_point(3.0f,3.0f,2.0f), cam_y_dir(-1.0f,0.0f,0.0f);
+
+ /// We can get the pose of the cam using makeCameraPose
+ Affine3f cam_pose = viz::makeCameraPose(cam_pos, cam_focal_point, cam_y_dir);
+
+ /// We can get the transformation matrix from camera coordinate system to global using
+ /// - makeTransformToGlobal. We need the axes of the camera
+ Affine3f transform = viz::makeTransformToGlobal(Vec3f(0.0f,-1.0f,0.0f), Vec3f(-1.0f,0.0f,0.0f), Vec3f(0.0f,0.0f,-1.0f), cam_pos);
+
+ /// Create a cloud widget.
+ Mat bunny_cloud = cvcloud_load();
+ viz::WCloud cloud_widget(bunny_cloud, viz::Color::green());
+
+ /// Pose of the widget in camera frame
+ Affine3f cloud_pose = Affine3f().translate(Vec3f(0.0f,0.0f,3.0f));
+ /// Pose of the widget in global frame
+ Affine3f cloud_pose_global = transform * cloud_pose;
+
+ /// Visualize camera frame
+ if (!camera_pov)
+ {
+ viz::WCameraPosition cpw(0.5); // Coordinate axes
+ viz::WCameraPosition cpw_frustum(Vec2f(0.889484, 0.523599)); // Camera frustum
+ myWindow.showWidget("CPW", cpw, cam_pose);
+ myWindow.showWidget("CPW_FRUSTUM", cpw_frustum, cam_pose);
+ }
+
+ /// Visualize widget
+ myWindow.showWidget("bunny", cloud_widget, cloud_pose_global);
+
+ /// Set the viewer pose to that of camera
+ if (camera_pov)
+ myWindow.setViewerPose(cam_pose);
+
+ /// Start event loop.
+ myWindow.spin();
+
+ return 0;
+ }
+
+
+Explanation
+===========
+
+Here is the general structure of the program:
+
+* Create a visualization window.
+
+.. code-block:: cpp
+
+ /// Create a window
+ viz::Viz3d myWindow("Transformations");
+
+* Get camera pose from camera position, camera focal point and y direction.
+
+.. code-block:: cpp
+
+ /// Let's assume camera has the following properties
+ Point3f cam_pos(3.0f,3.0f,3.0f), cam_focal_point(3.0f,3.0f,2.0f), cam_y_dir(-1.0f,0.0f,0.0f);
+
+ /// We can get the pose of the cam using makeCameraPose
+ Affine3f cam_pose = viz::makeCameraPose(cam_pos, cam_focal_point, cam_y_dir);
+
+* Obtain transform matrix knowing the axes of camera coordinate system.
+
+.. code-block:: cpp
+
+ /// We can get the transformation matrix from camera coordinate system to global using
+ /// - makeTransformToGlobal. We need the axes of the camera
+ Affine3f transform = viz::makeTransformToGlobal(Vec3f(0.0f,-1.0f,0.0f), Vec3f(-1.0f,0.0f,0.0f), Vec3f(0.0f,0.0f,-1.0f), cam_pos);
+
+* Create a cloud widget from bunny.ply file
+
+.. code-block:: cpp
+
+ /// Create a cloud widget.
+ Mat bunny_cloud = cvcloud_load();
+ viz::WCloud cloud_widget(bunny_cloud, viz::Color::green());
+
+* Given the pose in camera coordinate system, estimate the global pose.
+
+.. code-block:: cpp
+
+ /// Pose of the widget in camera frame
+ Affine3f cloud_pose = Affine3f().translate(Vec3f(0.0f,0.0f,3.0f));
+ /// Pose of the widget in global frame
+ Affine3f cloud_pose_global = transform * cloud_pose;
+
+* If the view point is set to be global, visualize camera coordinate frame and viewing frustum.
+
+.. code-block:: cpp
+
+ /// Visualize camera frame
+ if (!camera_pov)
+ {
+ viz::WCameraPosition cpw(0.5); // Coordinate axes
+ viz::WCameraPosition cpw_frustum(Vec2f(0.889484, 0.523599)); // Camera frustum
+ myWindow.showWidget("CPW", cpw, cam_pose);
+ myWindow.showWidget("CPW_FRUSTUM", cpw_frustum, cam_pose);
+ }
+
+* Visualize the cloud widget with the estimated global pose
+
+.. code-block:: cpp
+
+ /// Visualize widget
+ myWindow.showWidget("bunny", cloud_widget, cloud_pose_global);
+
+* If the view point is set to be camera's, set viewer pose to **cam_pose**.
+
+.. code-block:: cpp
+
+ /// Set the viewer pose to that of camera
+ if (camera_pov)
+ myWindow.setViewerPose(cam_pose);
+
+Results
+=======
+
+#. Here is the result from the camera point of view.
+
+ .. image:: images/camera_view_point.png
+ :alt: Camera Viewpoint
+ :align: center
+
+#. Here is the result from global point of view.
+
+ .. image:: images/global_view_point.png
+ :alt: Global Viewpoint
+ :align: center
diff --git a/doc/tutorials/viz/widget_pose/images/widgetpose.png b/doc/tutorials/viz/widget_pose/images/widgetpose.png
new file mode 100644
index 000000000..ef8a5937f
Binary files /dev/null and b/doc/tutorials/viz/widget_pose/images/widgetpose.png differ
diff --git a/doc/tutorials/viz/widget_pose/widget_pose.rst b/doc/tutorials/viz/widget_pose/widget_pose.rst
new file mode 100644
index 000000000..a4466bded
--- /dev/null
+++ b/doc/tutorials/viz/widget_pose/widget_pose.rst
@@ -0,0 +1,162 @@
+.. _widget_pose:
+
+Pose of a widget
+****************
+
+Goal
+====
+
+In this tutorial you will learn how to
+
+.. container:: enumeratevisibleitemswithsquare
+
+ * Add widgets to the visualization window
+ * Use Affine3 to set pose of a widget
+ * Rotating and translating a widget along an axis
+
+Code
+====
+
+You can download the code from :download:`here <../../../../samples/cpp/tutorial_code/viz/widget_pose.cpp>`.
+
+.. code-block:: cpp
+
+ #include
+ #include
+ #include
+
+ using namespace cv;
+ using namespace std;
+
+ /**
+ * @function main
+ */
+ int main()
+ {
+ /// Create a window
+ viz::Viz3d myWindow("Coordinate Frame");
+
+ /// Add coordinate axes
+ myWindow.showWidget("Coordinate Widget", viz::WCoordinateSystem());
+
+ /// Add line to represent (1,1,1) axis
+ viz::WLine axis(Point3f(-1.0f,-1.0f,-1.0f), Point3f(1.0f,1.0f,1.0f));
+ axis.setRenderingProperty(viz::LINE_WIDTH, 4.0);
+ myWindow.showWidget("Line Widget", axis);
+
+ /// Construct a cube widget
+ viz::WCube cube_widget(Point3f(0.5,0.5,0.0), Point3f(0.0,0.0,-0.5), true, viz::Color::blue());
+ cube_widget.setRenderingProperty(viz::LINE_WIDTH, 4.0);
+
+ /// Display widget (update if already displayed)
+ myWindow.showWidget("Cube Widget", cube_widget);
+
+ /// Rodrigues vector
+ Mat rot_vec = Mat::zeros(1,3,CV_32F);
+ float translation_phase = 0.0, translation = 0.0;
+ while(!myWindow.wasStopped())
+ {
+ /* Rotation using rodrigues */
+ /// Rotate around (1,1,1)
+ rot_vec.at(0,0) += CV_PI * 0.01f;
+ rot_vec.at(0,1) += CV_PI * 0.01f;
+ rot_vec.at(0,2) += CV_PI * 0.01f;
+
+ /// Shift on (1,1,1)
+ translation_phase += CV_PI * 0.01f;
+ translation = sin(translation_phase);
+
+ Mat rot_mat;
+ Rodrigues(rot_vec, rot_mat);
+
+ /// Construct pose
+ Affine3f pose(rot_mat, Vec3f(translation, translation, translation));
+
+ myWindow.setWidgetPose("Cube Widget", pose);
+
+ myWindow.spinOnce(1, true);
+ }
+
+ return 0;
+ }
+
+Explanation
+===========
+
+Here is the general structure of the program:
+
+* Create a visualization window.
+
+.. code-block:: cpp
+
+ /// Create a window
+ viz::Viz3d myWindow("Coordinate Frame");
+
+* Show coordinate axes in the window using CoordinateSystemWidget.
+
+.. code-block:: cpp
+
+ /// Add coordinate axes
+ myWindow.showWidget("Coordinate Widget", viz::WCoordinateSystem());
+
+* Display a line representing the axis (1,1,1).
+
+.. code-block:: cpp
+
+ /// Add line to represent (1,1,1) axis
+ viz::WLine axis(Point3f(-1.0f,-1.0f,-1.0f), Point3f(1.0f,1.0f,1.0f));
+ axis.setRenderingProperty(viz::LINE_WIDTH, 4.0);
+ myWindow.showWidget("Line Widget", axis);
+
+* Construct a cube.
+
+.. code-block:: cpp
+
+ /// Construct a cube widget
+ viz::WCube cube_widget(Point3f(0.5,0.5,0.0), Point3f(0.0,0.0,-0.5), true, viz::Color::blue());
+ cube_widget.setRenderingProperty(viz::LINE_WIDTH, 4.0);
+ myWindow.showWidget("Cube Widget", cube_widget);
+
+* Create rotation matrix from rodrigues vector
+
+.. code-block:: cpp
+
+ /// Rotate around (1,1,1)
+ rot_vec.at(0,0) += CV_PI * 0.01f;
+ rot_vec.at(0,1) += CV_PI * 0.01f;
+ rot_vec.at(0,2) += CV_PI * 0.01f;
+
+ ...
+
+ Mat rot_mat;
+ Rodrigues(rot_vec, rot_mat);
+
+* Use Affine3f to set pose of the cube.
+
+.. code-block:: cpp
+
+ /// Construct pose
+ Affine3f pose(rot_mat, Vec3f(translation, translation, translation));
+ myWindow.setWidgetPose("Cube Widget", pose);
+
+* Animate the rotation using wasStopped and spinOnce
+
+.. code-block:: cpp
+
+ while(!myWindow.wasStopped())
+ {
+ ...
+
+ myWindow.spinOnce(1, true);
+ }
+
+Results
+=======
+
+Here is the result of the program.
+
+.. raw:: html
+
+
+
+
diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt
index ed3b85a8f..b4e48e6fa 100644
--- a/include/CMakeLists.txt
+++ b/include/CMakeLists.txt
@@ -1,7 +1,7 @@
file(GLOB old_hdrs "opencv/*.h*")
install(FILES ${old_hdrs}
DESTINATION ${OPENCV_INCLUDE_INSTALL_PATH}/opencv
- COMPONENT main)
+ COMPONENT dev)
install(FILES "opencv2/opencv.hpp"
DESTINATION ${OPENCV_INCLUDE_INSTALL_PATH}/opencv2
- COMPONENT main)
+ COMPONENT dev)
diff --git a/modules/androidcamera/CMakeLists.txt b/modules/androidcamera/CMakeLists.txt
index 8ac8ced88..3858ba9f6 100644
--- a/modules/androidcamera/CMakeLists.txt
+++ b/modules/androidcamera/CMakeLists.txt
@@ -40,6 +40,6 @@ else()
get_filename_component(wrapper_name "${wrapper}" NAME)
install(FILES "${LIBRARY_OUTPUT_PATH}/${wrapper_name}"
DESTINATION ${OPENCV_LIB_INSTALL_PATH}
- COMPONENT main)
+ COMPONENT libs)
endforeach()
endif()
diff --git a/modules/androidcamera/camera_wrapper/CMakeLists.txt b/modules/androidcamera/camera_wrapper/CMakeLists.txt
index 21b9ee1ad..d08e2c469 100644
--- a/modules/androidcamera/camera_wrapper/CMakeLists.txt
+++ b/modules/androidcamera/camera_wrapper/CMakeLists.txt
@@ -58,9 +58,9 @@ SET_TARGET_PROPERTIES(${the_target} PROPERTIES
RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH}
)
-if (NOT (CMAKE_BUILD_TYPE MATCHES "debug"))
+if (NOT (CMAKE_BUILD_TYPE MATCHES "Debug"))
ADD_CUSTOM_COMMAND( TARGET ${the_target} POST_BUILD COMMAND ${CMAKE_STRIP} --strip-unneeded "${LIBRARY_OUTPUT_PATH}/lib${the_target}.so" )
endif()
-install(TARGETS ${the_target} LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT main)
+install(TARGETS ${the_target} LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT libs)
diff --git a/modules/androidcamera/camera_wrapper/camera_wrapper.cpp b/modules/androidcamera/camera_wrapper/camera_wrapper.cpp
index 5ca1778a5..0ed301323 100644
--- a/modules/androidcamera/camera_wrapper/camera_wrapper.cpp
+++ b/modules/androidcamera/camera_wrapper/camera_wrapper.cpp
@@ -61,6 +61,12 @@
using namespace android;
+// non-public camera related classes are not binary compatible
+// objects of these classes have different sizeof on different platforms
+// additional memory tail to all system objects to overcome sizeof issue
+#define MAGIC_TAIL 4096
+
+
void debugShowFPS();
#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0)
@@ -90,6 +96,7 @@ public:
};
#endif
+
std::string getProcessName()
{
std::string result;
@@ -142,12 +149,22 @@ class CameraHandler: public CameraListener
protected:
int cameraId;
sp camera;
- CameraParameters params;
+#if defined(ANDROID_r3_0_1) || defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
+ sp surface;
+#endif
+#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0) || defined(ANDROID_r4_4_0)
+ sp queue;
+ sp listener;
+#endif
+ CameraParameters* params;
CameraCallback cameraCallback;
void* userData;
int emptyCameraCallbackReported;
+ int width;
+ int height;
+
static const char* flashModesNames[ANDROID_CAMERA_FLASH_MODES_NUM];
static const char* focusModesNames[ANDROID_CAMERA_FOCUS_MODES_NUM];
static const char* whiteBalanceModesNames[ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM];
@@ -258,7 +275,7 @@ protected:
int is_supported(const char* supp_modes_key, const char* mode)
{
- const char* supported_modes = params.get(supp_modes_key);
+ const char* supported_modes = params->get(supp_modes_key);
return (supported_modes && mode && (strstr(supported_modes, mode) > 0));
}
@@ -268,7 +285,7 @@ protected:
if (focus_distance_type >= 0 && focus_distance_type < 3)
{
float focus_distances[3];
- const char* output = params.get(CameraParameters::KEY_FOCUS_DISTANCES);
+ const char* output = params->get(CameraParameters::KEY_FOCUS_DISTANCES);
int val_num = CameraHandler::split_float(output, focus_distances, ',', 3);
if(val_num == 3)
{
@@ -300,10 +317,15 @@ public:
emptyCameraCallbackReported(0)
{
LOGD("Instantiated new CameraHandler (%p, %p)", callback, _userData);
+ void* params_buffer = operator new(sizeof(CameraParameters) + MAGIC_TAIL);
+ params = new(params_buffer) CameraParameters();
}
virtual ~CameraHandler()
{
+ if (params)
+ params->~CameraParameters();
+ operator delete(params);
LOGD("CameraHandler destructor is called");
}
@@ -371,10 +393,18 @@ const char* CameraHandler::focusModesNames[ANDROID_CAMERA_FOCUS_MODES_NUM] =
CameraParameters::FOCUS_MODE_AUTO,
#if !defined(ANDROID_r2_2_0)
CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO,
+#else
+ CameraParameters::FOCUS_MODE_AUTO,
#endif
CameraParameters::FOCUS_MODE_EDOF,
CameraParameters::FOCUS_MODE_FIXED,
- CameraParameters::FOCUS_MODE_INFINITY
+ CameraParameters::FOCUS_MODE_INFINITY,
+ CameraParameters::FOCUS_MODE_MACRO,
+#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1)
+ CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE
+#else
+ CameraParameters::FOCUS_MODE_AUTO
+#endif
};
const char* CameraHandler::whiteBalanceModesNames[ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM] =
@@ -534,39 +564,39 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
{
LOGI("initCameraConnect: Setting paramers from previous camera handler");
camera->setParameters(prevCameraParameters->flatten());
- handler->params.unflatten(prevCameraParameters->flatten());
+ handler->params->unflatten(prevCameraParameters->flatten());
}
else
{
android::String8 params_str = camera->getParameters();
LOGI("initCameraConnect: [%s]", params_str.string());
- handler->params.unflatten(params_str);
+ handler->params->unflatten(params_str);
- LOGD("Supported Cameras: %s", handler->params.get("camera-indexes"));
- LOGD("Supported Picture Sizes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES));
- LOGD("Supported Picture Formats: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS));
- LOGD("Supported Preview Sizes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES));
- LOGD("Supported Preview Formats: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS));
- LOGD("Supported Preview Frame Rates: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES));
- LOGD("Supported Thumbnail Sizes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES));
- LOGD("Supported Whitebalance Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE));
- LOGD("Supported Effects: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_EFFECTS));
- LOGD("Supported Scene Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_SCENE_MODES));
- LOGD("Supported Focus Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES));
- LOGD("Supported Antibanding Options: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_ANTIBANDING));
- LOGD("Supported Flash Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_FLASH_MODES));
+ LOGD("Supported Cameras: %s", handler->params->get("camera-indexes"));
+ LOGD("Supported Picture Sizes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES));
+ LOGD("Supported Picture Formats: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS));
+ LOGD("Supported Preview Sizes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES));
+ LOGD("Supported Preview Formats: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS));
+ LOGD("Supported Preview Frame Rates: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES));
+ LOGD("Supported Thumbnail Sizes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES));
+ LOGD("Supported Whitebalance Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE));
+ LOGD("Supported Effects: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_EFFECTS));
+ LOGD("Supported Scene Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_SCENE_MODES));
+ LOGD("Supported Focus Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES));
+ LOGD("Supported Antibanding Options: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_ANTIBANDING));
+ LOGD("Supported Flash Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_FLASH_MODES));
#if !defined(ANDROID_r2_2_0)
// Set focus mode to continuous-video if supported
- const char* available_focus_modes = handler->params.get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES);
+ const char* available_focus_modes = handler->params->get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES);
if (available_focus_modes != 0)
{
if (strstr(available_focus_modes, "continuous-video") != NULL)
{
- handler->params.set(CameraParameters::KEY_FOCUS_MODE, CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO);
+ handler->params->set(CameraParameters::KEY_FOCUS_MODE, CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO);
- status_t resParams = handler->camera->setParameters(handler->params.flatten());
+ status_t resParams = handler->camera->setParameters(handler->params->flatten());
if (resParams != 0)
{
@@ -581,7 +611,7 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
#endif
//check if yuv420sp format available. Set this format as preview format.
- const char* available_formats = handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS);
+ const char* available_formats = handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS);
if (available_formats != 0)
{
const char* format_to_set = 0;
@@ -607,9 +637,9 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
if (0 != format_to_set)
{
- handler->params.setPreviewFormat(format_to_set);
+ handler->params->setPreviewFormat(format_to_set);
- status_t resParams = handler->camera->setParameters(handler->params.flatten());
+ status_t resParams = handler->camera->setParameters(handler->params->flatten());
if (resParams != 0)
LOGE("initCameraConnect: failed to set preview format to %s", format_to_set);
@@ -617,6 +647,13 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
LOGD("initCameraConnect: preview format is set to %s", format_to_set);
}
}
+
+ handler->params->setPreviewSize(640, 480);
+ status_t resParams = handler->camera->setParameters(handler->params->flatten());
+ if (resParams != 0)
+ LOGE("initCameraConnect: failed to set preview resolution to 640x480");
+ else
+ LOGD("initCameraConnect: preview format is set to 640x480");
}
status_t bufferStatus;
@@ -627,22 +664,27 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
#elif defined(ANDROID_r2_3_3)
/* Do nothing in case of 2.3 for now */
#elif defined(ANDROID_r3_0_1) || defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
- sp surfaceTexture = new SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
- bufferStatus = camera->setPreviewTexture(surfaceTexture);
+ void* surface_texture_obj = operator new(sizeof(SurfaceTexture) + MAGIC_TAIL);
+ handler->surface = new(surface_texture_obj) SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
+ bufferStatus = camera->setPreviewTexture(handler->surface);
if (bufferStatus != 0)
LOGE("initCameraConnect: failed setPreviewTexture call (status %d); camera might not work correctly", bufferStatus);
#elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0)
- sp bufferQueue = new BufferQueue();
- sp queueListener = new ConsumerListenerStub();
- bufferQueue->consumerConnect(queueListener);
- bufferStatus = camera->setPreviewTexture(bufferQueue);
+ void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
+ handler->queue = new(buffer_queue_obj) BufferQueue();
+ void* consumer_listener_obj = operator new(sizeof(ConsumerListenerStub) + MAGIC_TAIL);
+ handler->listener = new(consumer_listener_obj) ConsumerListenerStub();
+ handler->queue->consumerConnect(handler->listener);
+ bufferStatus = camera->setPreviewTexture(handler->queue);
if (bufferStatus != 0)
LOGE("initCameraConnect: failed setPreviewTexture call; camera might not work correctly");
# elif defined(ANDROID_r4_4_0)
- sp bufferQueue = new BufferQueue();
- sp queueListener = new ConsumerListenerStub();
- bufferQueue->consumerConnect(queueListener, true);
- bufferStatus = handler->camera->setPreviewTarget(bufferQueue);
+ void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
+ handler->queue = new(buffer_queue_obj) BufferQueue();
+ void* consumer_listener_obj = operator new(sizeof(ConsumerListenerStub) + MAGIC_TAIL);
+ handler->listener = new(consumer_listener_obj) ConsumerListenerStub();
+ handler->queue->consumerConnect(handler->listener, true);
+ bufferStatus = handler->camera->setPreviewTarget(handler->queue);
if (bufferStatus != 0)
LOGE("applyProperties: failed setPreviewTexture call; camera might not work correctly");
# endif
@@ -723,18 +765,18 @@ double CameraHandler::getProperty(int propIdx)
case ANDROID_CAMERA_PROPERTY_FRAMEWIDTH:
{
int w,h;
- params.getPreviewSize(&w, &h);
+ params->getPreviewSize(&w, &h);
return w;
}
case ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT:
{
int w,h;
- params.getPreviewSize(&w, &h);
+ params->getPreviewSize(&w, &h);
return h;
}
case ANDROID_CAMERA_PROPERTY_SUPPORTED_PREVIEW_SIZES_STRING:
{
- cameraPropertySupportedPreviewSizesString = params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES);
+ cameraPropertySupportedPreviewSizesString = params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES);
union {const char* str;double res;} u;
memset(&u.res, 0, sizeof(u.res));
u.str = cameraPropertySupportedPreviewSizesString.c_str();
@@ -742,7 +784,7 @@ double CameraHandler::getProperty(int propIdx)
}
case ANDROID_CAMERA_PROPERTY_PREVIEW_FORMAT_STRING:
{
- const char* fmt = params.get(CameraParameters::KEY_PREVIEW_FORMAT);
+ const char* fmt = params->get(CameraParameters::KEY_PREVIEW_FORMAT);
if (fmt == CameraParameters::PIXEL_FORMAT_YUV422SP)
fmt = "yuv422sp";
else if (fmt == CameraParameters::PIXEL_FORMAT_YUV420SP)
@@ -762,44 +804,44 @@ double CameraHandler::getProperty(int propIdx)
}
case ANDROID_CAMERA_PROPERTY_EXPOSURE:
{
- int exposure = params.getInt(CameraParameters::KEY_EXPOSURE_COMPENSATION);
+ int exposure = params->getInt(CameraParameters::KEY_EXPOSURE_COMPENSATION);
return exposure;
}
case ANDROID_CAMERA_PROPERTY_FPS:
{
- return params.getPreviewFrameRate();
+ return params->getPreviewFrameRate();
}
case ANDROID_CAMERA_PROPERTY_FLASH_MODE:
{
int flash_mode = getModeNum(CameraHandler::flashModesNames,
ANDROID_CAMERA_FLASH_MODES_NUM,
- params.get(CameraParameters::KEY_FLASH_MODE));
+ params->get(CameraParameters::KEY_FLASH_MODE));
return flash_mode;
}
case ANDROID_CAMERA_PROPERTY_FOCUS_MODE:
{
int focus_mode = getModeNum(CameraHandler::focusModesNames,
ANDROID_CAMERA_FOCUS_MODES_NUM,
- params.get(CameraParameters::KEY_FOCUS_MODE));
+ params->get(CameraParameters::KEY_FOCUS_MODE));
return focus_mode;
}
case ANDROID_CAMERA_PROPERTY_WHITE_BALANCE:
{
int white_balance = getModeNum(CameraHandler::whiteBalanceModesNames,
ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM,
- params.get(CameraParameters::KEY_WHITE_BALANCE));
+ params->get(CameraParameters::KEY_WHITE_BALANCE));
return white_balance;
}
case ANDROID_CAMERA_PROPERTY_ANTIBANDING:
{
int antibanding = getModeNum(CameraHandler::antibandingModesNames,
ANDROID_CAMERA_ANTIBANDING_MODES_NUM,
- params.get(CameraParameters::KEY_ANTIBANDING));
+ params->get(CameraParameters::KEY_ANTIBANDING));
return antibanding;
}
case ANDROID_CAMERA_PROPERTY_FOCAL_LENGTH:
{
- float focal_length = params.getFloat(CameraParameters::KEY_FOCAL_LENGTH);
+ float focal_length = params->getFloat(CameraParameters::KEY_FOCAL_LENGTH);
return focal_length;
}
case ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_NEAR:
@@ -814,6 +856,24 @@ double CameraHandler::getProperty(int propIdx)
{
return getFocusDistance(ANDROID_CAMERA_FOCUS_DISTANCE_FAR_INDEX);
}
+#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1)
+ case ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK:
+ {
+ const char* status = params->get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK);
+ if (status == CameraParameters::TRUE)
+ return 1.;
+ else
+ return 0.;
+ }
+ case ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK:
+ {
+ const char* status = params->get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK);
+ if (status == CameraParameters::TRUE)
+ return 1.;
+ else
+ return 0.;
+ }
+#endif
default:
LOGW("CameraHandler::getProperty - Unsupported property.");
};
@@ -824,99 +884,151 @@ void CameraHandler::setProperty(int propIdx, double value)
{
LOGD("CameraHandler::setProperty(%d, %f)", propIdx, value);
+ android::String8 params_str;
+ params_str = camera->getParameters();
+ LOGI("Params before set: [%s]", params_str.string());
+
switch (propIdx)
{
case ANDROID_CAMERA_PROPERTY_FRAMEWIDTH:
{
int w,h;
- params.getPreviewSize(&w, &h);
- w = (int)value;
- params.setPreviewSize(w, h);
+ params->getPreviewSize(&w, &h);
+ width = (int)value;
}
break;
case ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT:
{
int w,h;
- params.getPreviewSize(&w, &h);
- h = (int)value;
- params.setPreviewSize(w, h);
+ params->getPreviewSize(&w, &h);
+ height = (int)value;
}
break;
case ANDROID_CAMERA_PROPERTY_EXPOSURE:
{
- int max_exposure = params.getInt("max-exposure-compensation");
- int min_exposure = params.getInt("min-exposure-compensation");
- if(max_exposure && min_exposure){
+ int max_exposure = params->getInt("max-exposure-compensation");
+ int min_exposure = params->getInt("min-exposure-compensation");
+ if(max_exposure && min_exposure)
+ {
int exposure = (int)value;
- if(exposure >= min_exposure && exposure <= max_exposure){
- params.set("exposure-compensation", exposure);
- } else {
+ if(exposure >= min_exposure && exposure <= max_exposure)
+ params->set("exposure-compensation", exposure);
+ else
LOGE("Exposure compensation not in valid range (%i,%i).", min_exposure, max_exposure);
- }
- } else {
+ } else
LOGE("Exposure compensation adjust is not supported.");
- }
+
+ camera->setParameters(params->flatten());
}
break;
case ANDROID_CAMERA_PROPERTY_FLASH_MODE:
{
int new_val = (int)value;
- if(new_val >= 0 && new_val < ANDROID_CAMERA_FLASH_MODES_NUM){
+ if(new_val >= 0 && new_val < ANDROID_CAMERA_FLASH_MODES_NUM)
+ {
const char* mode_name = flashModesNames[new_val];
if(is_supported(CameraParameters::KEY_SUPPORTED_FLASH_MODES, mode_name))
- params.set(CameraParameters::KEY_FLASH_MODE, mode_name);
+ params->set(CameraParameters::KEY_FLASH_MODE, mode_name);
else
LOGE("Flash mode %s is not supported.", mode_name);
- } else {
- LOGE("Flash mode value not in valid range.");
}
+ else
+ LOGE("Flash mode value not in valid range.");
+
+ camera->setParameters(params->flatten());
}
break;
case ANDROID_CAMERA_PROPERTY_FOCUS_MODE:
{
int new_val = (int)value;
- if(new_val >= 0 && new_val < ANDROID_CAMERA_FOCUS_MODES_NUM){
+ if(new_val >= 0 && new_val < ANDROID_CAMERA_FOCUS_MODES_NUM)
+ {
const char* mode_name = focusModesNames[new_val];
if(is_supported(CameraParameters::KEY_SUPPORTED_FOCUS_MODES, mode_name))
- params.set(CameraParameters::KEY_FOCUS_MODE, mode_name);
+ params->set(CameraParameters::KEY_FOCUS_MODE, mode_name);
else
LOGE("Focus mode %s is not supported.", mode_name);
- } else {
- LOGE("Focus mode value not in valid range.");
}
+ else
+ LOGE("Focus mode value not in valid range.");
+
+ camera->setParameters(params->flatten());
}
break;
case ANDROID_CAMERA_PROPERTY_WHITE_BALANCE:
{
int new_val = (int)value;
- if(new_val >= 0 && new_val < ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM){
+ if(new_val >= 0 && new_val < ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM)
+ {
const char* mode_name = whiteBalanceModesNames[new_val];
if(is_supported(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE, mode_name))
- params.set(CameraParameters::KEY_WHITE_BALANCE, mode_name);
+ params->set(CameraParameters::KEY_WHITE_BALANCE, mode_name);
else
LOGE("White balance mode %s is not supported.", mode_name);
- } else {
- LOGE("White balance mode value not in valid range.");
}
+ else
+ LOGE("White balance mode value not in valid range.");
+
+ camera->setParameters(params->flatten());
}
break;
case ANDROID_CAMERA_PROPERTY_ANTIBANDING:
{
int new_val = (int)value;
- if(new_val >= 0 && new_val < ANDROID_CAMERA_ANTIBANDING_MODES_NUM){
+ if(new_val >= 0 && new_val < ANDROID_CAMERA_ANTIBANDING_MODES_NUM)
+ {
const char* mode_name = antibandingModesNames[new_val];
if(is_supported(CameraParameters::KEY_SUPPORTED_ANTIBANDING, mode_name))
- params.set(CameraParameters::KEY_ANTIBANDING, mode_name);
+ params->set(CameraParameters::KEY_ANTIBANDING, mode_name);
else
LOGE("Antibanding mode %s is not supported.", mode_name);
- } else {
- LOGE("Antibanding mode value not in valid range.");
}
+ else
+ LOGE("Antibanding mode value not in valid range.");
+
+ camera->setParameters(params->flatten());
}
break;
+#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1)
+ case ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK:
+ {
+ if (is_supported(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED, "true"))
+ {
+ if (value != 0)
+ params->set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK, CameraParameters::TRUE);
+ else
+ params->set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK, CameraParameters::FALSE);
+ LOGE("Expose lock is set");
+ }
+ else
+ LOGE("Expose lock is not supported");
+
+ camera->setParameters(params->flatten());
+ }
+ break;
+ case ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK:
+ {
+ if (is_supported(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED, "true"))
+ {
+ if (value != 0)
+ params->set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK, CameraParameters::TRUE);
+ else
+ params->set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK, CameraParameters::FALSE);
+ LOGE("White balance lock is set");
+ }
+ else
+ LOGE("White balance lock is not supported");
+
+ camera->setParameters(params->flatten());
+ }
+ break;
+#endif
default:
LOGW("CameraHandler::setProperty - Unsupported property.");
};
+
+ params_str = camera->getParameters();
+ LOGI("Params after set: [%s]", params_str.string());
}
void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
@@ -935,7 +1047,10 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
return;
}
- CameraParameters curCameraParameters((*ppcameraHandler)->params.flatten());
+ // delayed resolution setup to exclude errors during other parameres setup on the fly
+ // without camera restart
+ if (((*ppcameraHandler)->width != 0) && ((*ppcameraHandler)->height != 0))
+ (*ppcameraHandler)->params->setPreviewSize((*ppcameraHandler)->width, (*ppcameraHandler)->height);
#if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) \
|| defined(ANDROID_r4_3_0) || defined(ANDROID_r4_4_0)
@@ -951,27 +1066,27 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
return;
}
- handler->camera->setParameters(curCameraParameters.flatten());
- handler->params.unflatten(curCameraParameters.flatten());
+ handler->camera->setParameters((*ppcameraHandler)->params->flatten());
status_t bufferStatus;
# if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
- sp surfaceTexture = new SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
- bufferStatus = handler->camera->setPreviewTexture(surfaceTexture);
+ void* surface_texture_obj = operator new(sizeof(SurfaceTexture) + MAGIC_TAIL);
+ handler->surface = new(surface_texture_obj) SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
+ bufferStatus = handler->camera->setPreviewTexture(handler->surface);
if (bufferStatus != 0)
LOGE("applyProperties: failed setPreviewTexture call (status %d); camera might not work correctly", bufferStatus);
# elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0)
- sp bufferQueue = new BufferQueue();
- sp queueListener = new ConsumerListenerStub();
- bufferQueue->consumerConnect(queueListener);
- bufferStatus = handler->camera->setPreviewTexture(bufferQueue);
+ void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
+ handler->queue = new(buffer_queue_obj) BufferQueue();
+ handler->queue->consumerConnect(handler->listener);
+ bufferStatus = handler->camera->setPreviewTexture(handler->queue);
if (bufferStatus != 0)
LOGE("applyProperties: failed setPreviewTexture call; camera might not work correctly");
# elif defined(ANDROID_r4_4_0)
- sp bufferQueue = new BufferQueue();
- sp queueListener = new ConsumerListenerStub();
- bufferQueue->consumerConnect(queueListener, true);
- bufferStatus = handler->camera->setPreviewTarget(bufferQueue);
+ void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
+ handler->queue = new(buffer_queue_obj) BufferQueue();
+ handler->queue->consumerConnect(handler->listener, true);
+ bufferStatus = handler->camera->setPreviewTarget(handler->queue);
if (bufferStatus != 0)
LOGE("applyProperties: failed setPreviewTexture call; camera might not work correctly");
# endif
@@ -1002,7 +1117,7 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
LOGD("CameraHandler::applyProperties(): after previousCameraHandler->closeCameraConnect");
LOGD("CameraHandler::applyProperties(): before initCameraConnect");
- CameraHandler* handler=initCameraConnect(cameraCallback, cameraId, userData, &curCameraParameters);
+ CameraHandler* handler=initCameraConnect(cameraCallback, cameraId, userData, (*ppcameraHandler)->params);
LOGD("CameraHandler::applyProperties(): after initCameraConnect, handler=0x%x", (int)handler);
if (handler == NULL) {
LOGE("ERROR in applyProperties --- cannot reinit camera");
diff --git a/modules/androidcamera/include/camera_properties.h b/modules/androidcamera/include/camera_properties.h
index 2fec745fa..65499be2d 100644
--- a/modules/androidcamera/include/camera_properties.h
+++ b/modules/androidcamera/include/camera_properties.h
@@ -15,7 +15,9 @@ enum {
ANDROID_CAMERA_PROPERTY_FOCAL_LENGTH = 105,
ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_NEAR = 106,
ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_OPTIMAL = 107,
- ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_FAR = 108
+ ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_FAR = 108,
+ ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK = 109,
+ ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK = 110
};
@@ -30,12 +32,12 @@ enum {
enum {
ANDROID_CAMERA_FOCUS_MODE_AUTO = 0,
- ANDROID_CAMERA_FOCUS_MODE_CONTINUOUS_PICTURE,
ANDROID_CAMERA_FOCUS_MODE_CONTINUOUS_VIDEO,
ANDROID_CAMERA_FOCUS_MODE_EDOF,
ANDROID_CAMERA_FOCUS_MODE_FIXED,
ANDROID_CAMERA_FOCUS_MODE_INFINITY,
ANDROID_CAMERA_FOCUS_MODE_MACRO,
+ ANDROID_CAMERA_FOCUS_MODE_CONTINUOUS_PICTURE,
ANDROID_CAMERA_FOCUS_MODES_NUM
};
diff --git a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
index 37159b016..e6edfbeb2 100644
--- a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
+++ b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
@@ -217,9 +217,9 @@ Computes useful camera characteristics from the camera matrix.
:param imageSize: Input image size in pixels.
- :param apertureWidth: Physical width of the sensor.
+ :param apertureWidth: Physical width in mm of the sensor.
- :param apertureHeight: Physical height of the sensor.
+ :param apertureHeight: Physical height in mm of the sensor.
:param fovx: Output field of view in degrees along the horizontal sensor axis.
@@ -227,13 +227,15 @@ Computes useful camera characteristics from the camera matrix.
:param focalLength: Focal length of the lens in mm.
- :param principalPoint: Principal point in pixels.
+ :param principalPoint: Principal point in mm.
:param aspectRatio: :math:`f_y/f_x`
The function computes various useful camera characteristics from the previously estimated camera matrix.
+.. note::
+ Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for the chessboard pitch (it can thus be any value).
composeRT
-------------
@@ -1483,6 +1485,10 @@ Reconstructs points by triangulation.
The function reconstructs 3-dimensional points (in homogeneous coordinates) by using their observations with a stereo camera. Projections matrices can be obtained from :ocv:func:`stereoRectify`.
+.. note::
+
+ Keep in mind that all input data should be of float type in order for this function to work.
+
.. seealso::
:ocv:func:`reprojectImageTo3D`
diff --git a/modules/calib3d/perf/perf_pnp.cpp b/modules/calib3d/perf/perf_pnp.cpp
index e0ffd70cf..557387d9a 100644
--- a/modules/calib3d/perf/perf_pnp.cpp
+++ b/modules/calib3d/perf/perf_pnp.cpp
@@ -126,7 +126,7 @@ PERF_TEST_P(PointsNum, DISABLED_SolvePnPRansac, testing::Values(4, 3*9, 7*13))
Mat tvec;
#ifdef HAVE_TBB
- // limit concurrency to get determenistic result
+ // limit concurrency to get deterministic result
cv::Ptr one_thread = new tbb::task_scheduler_init(1);
#endif
diff --git a/modules/calib3d/test/test_cameracalibration.cpp b/modules/calib3d/test/test_cameracalibration.cpp
index 0b9d794a9..bf978bfb0 100644
--- a/modules/calib3d/test/test_cameracalibration.cpp
+++ b/modules/calib3d/test/test_cameracalibration.cpp
@@ -478,7 +478,7 @@ void CV_CameraCalibrationTest::run( int start_from )
values_read = fscanf(file,"%lf",goodDistortion+2); CV_Assert(values_read == 1);
values_read = fscanf(file,"%lf",goodDistortion+3); CV_Assert(values_read == 1);
- /* Read good Rot matrixes */
+ /* Read good Rot matrices */
for( currImage = 0; currImage < numImages; currImage++ )
{
for( i = 0; i < 3; i++ )
@@ -1605,7 +1605,7 @@ void CV_StereoCalibrationTest::run( int )
Mat _M1, _M2, _D1, _D2;
vector _R1, _R2, _T1, _T2;
calibrateCamera( objpt, imgpt1, imgsize, _M1, _D1, _R1, _T1, 0 );
- calibrateCamera( objpt, imgpt2, imgsize, _M2, _D2, _R2, _T1, 0 );
+ calibrateCamera( objpt, imgpt2, imgsize, _M2, _D2, _R2, _T2, 0 );
undistortPoints( _imgpt1, _imgpt1, _M1, _D1, Mat(), _M1 );
undistortPoints( _imgpt2, _imgpt2, _M2, _D2, Mat(), _M2 );
diff --git a/modules/calib3d/test/test_chessboardgenerator.hpp b/modules/calib3d/test/test_chessboardgenerator.hpp
index 48b3f3a24..6b669d2f5 100644
--- a/modules/calib3d/test/test_chessboardgenerator.hpp
+++ b/modules/calib3d/test/test_chessboardgenerator.hpp
@@ -34,7 +34,7 @@ private:
Mat rvec, tvec;
};
-};
+}
#endif
diff --git a/modules/calib3d/test/test_solvepnp_ransac.cpp b/modules/calib3d/test/test_solvepnp_ransac.cpp
index 6c924a580..4c8d56a96 100644
--- a/modules/calib3d/test/test_solvepnp_ransac.cpp
+++ b/modules/calib3d/test/test_solvepnp_ransac.cpp
@@ -271,7 +271,7 @@ TEST(DISABLED_Calib3d_SolvePnPRansac, concurrency)
Mat tvec1, tvec2;
{
- // limit concurrency to get determenistic result
+ // limit concurrency to get deterministic result
cv::theRNG().state = 20121010;
cv::Ptr one_thread = new tbb::task_scheduler_init(1);
solvePnPRansac(object, image, camera_mat, dist_coef, rvec1, tvec1);
diff --git a/modules/contrib/doc/facerec/facerec_api.rst b/modules/contrib/doc/facerec/facerec_api.rst
index 3100cfd8f..2aa3dcfa0 100644
--- a/modules/contrib/doc/facerec/facerec_api.rst
+++ b/modules/contrib/doc/facerec/facerec_api.rst
@@ -75,7 +75,7 @@ Moreover every :ocv:class:`FaceRecognizer` supports the:
Setting the Thresholds
+++++++++++++++++++++++
-Sometimes you run into the situation, when you want to apply a threshold on the prediction. A common scenario in face recognition is to tell, wether a face belongs to the training dataset or if it is unknown. You might wonder, why there's no public API in :ocv:class:`FaceRecognizer` to set the threshold for the prediction, but rest assured: It's supported. It just means there's no generic way in an abstract class to provide an interface for setting/getting the thresholds of *every possible* :ocv:class:`FaceRecognizer` algorithm. The appropriate place to set the thresholds is in the constructor of the specific :ocv:class:`FaceRecognizer` and since every :ocv:class:`FaceRecognizer` is a :ocv:class:`Algorithm` (see above), you can get/set the thresholds at runtime!
+Sometimes you run into the situation, when you want to apply a threshold on the prediction. A common scenario in face recognition is to tell, whether a face belongs to the training dataset or if it is unknown. You might wonder, why there's no public API in :ocv:class:`FaceRecognizer` to set the threshold for the prediction, but rest assured: It's supported. It just means there's no generic way in an abstract class to provide an interface for setting/getting the thresholds of *every possible* :ocv:class:`FaceRecognizer` algorithm. The appropriate place to set the thresholds is in the constructor of the specific :ocv:class:`FaceRecognizer` and since every :ocv:class:`FaceRecognizer` is a :ocv:class:`Algorithm` (see above), you can get/set the thresholds at runtime!
Here is an example of setting a threshold for the Eigenfaces method, when creating the model:
diff --git a/modules/contrib/doc/facerec/tutorial/facerec_gender_classification.rst b/modules/contrib/doc/facerec/tutorial/facerec_gender_classification.rst
index 770083170..95c821298 100644
--- a/modules/contrib/doc/facerec/tutorial/facerec_gender_classification.rst
+++ b/modules/contrib/doc/facerec/tutorial/facerec_gender_classification.rst
@@ -71,7 +71,7 @@ You really don't want to create the CSV file by hand. And you really don't want
Fisherfaces for Gender Classification
--------------------------------------
-If you want to decide wether a person is *male* or *female*, you have to learn the discriminative features of both classes. The Eigenfaces method is based on the Principal Component Analysis, which is an unsupervised statistical model and not suitable for this task. Please see the Face Recognition tutorial for insights into the algorithms. The Fisherfaces instead yields a class-specific linear projection, so it is much better suited for the gender classification task. `http://www.bytefish.de/blog/gender_classification `_ shows the recognition rate of the Fisherfaces method for gender classification.
+If you want to decide whether a person is *male* or *female*, you have to learn the discriminative features of both classes. The Eigenfaces method is based on the Principal Component Analysis, which is an unsupervised statistical model and not suitable for this task. Please see the Face Recognition tutorial for insights into the algorithms. The Fisherfaces instead yields a class-specific linear projection, so it is much better suited for the gender classification task. `http://www.bytefish.de/blog/gender_classification `_ shows the recognition rate of the Fisherfaces method for gender classification.
The Fisherfaces method achieves a 98% recognition rate in a subject-independent cross-validation. A subject-independent cross-validation means *images of the person under test are never used for learning the model*. And could you believe it: you can simply use the facerec_fisherfaces demo, that's inlcuded in OpenCV.
diff --git a/modules/contrib/src/adaptiveskindetector.cpp b/modules/contrib/src/adaptiveskindetector.cpp
index 0865ad70b..22f729d91 100644
--- a/modules/contrib/src/adaptiveskindetector.cpp
+++ b/modules/contrib/src/adaptiveskindetector.cpp
@@ -53,7 +53,7 @@ void CvAdaptiveSkinDetector::initData(IplImage *src, int widthDivider, int heigh
imgGrayFrame = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
imgLastGrayFrame = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
imgHSVFrame = cvCreateImage(imageSize, IPL_DEPTH_8U, 3);
-};
+}
CvAdaptiveSkinDetector::CvAdaptiveSkinDetector(int samplingDivider, int morphingMethod)
{
@@ -78,7 +78,7 @@ CvAdaptiveSkinDetector::CvAdaptiveSkinDetector(int samplingDivider, int morphing
imgLastGrayFrame = NULL;
imgSaturationFrame = NULL;
imgHSVFrame = NULL;
-};
+}
CvAdaptiveSkinDetector::~CvAdaptiveSkinDetector()
{
@@ -91,7 +91,7 @@ CvAdaptiveSkinDetector::~CvAdaptiveSkinDetector()
cvReleaseImage(&imgGrayFrame);
cvReleaseImage(&imgLastGrayFrame);
cvReleaseImage(&imgHSVFrame);
-};
+}
void CvAdaptiveSkinDetector::process(IplImage *inputBGRImage, IplImage *outputHueMask)
{
@@ -188,7 +188,7 @@ void CvAdaptiveSkinDetector::process(IplImage *inputBGRImage, IplImage *outputHu
if (outputHueMask != NULL)
cvCopy(imgFilteredFrame, outputHueMask);
-};
+}
//------------------------- Histogram for Adaptive Skin Detector -------------------------//
@@ -200,12 +200,12 @@ CvAdaptiveSkinDetector::Histogram::Histogram()
float *ranges[] = { range };
fHistogram = cvCreateHist(1, histogramSize, CV_HIST_ARRAY, ranges, 1);
cvClearHist(fHistogram);
-};
+}
CvAdaptiveSkinDetector::Histogram::~Histogram()
{
cvReleaseHist(&fHistogram);
-};
+}
int CvAdaptiveSkinDetector::Histogram::findCoverageIndex(double surfaceToCover, int defaultValue)
{
@@ -219,7 +219,7 @@ int CvAdaptiveSkinDetector::Histogram::findCoverageIndex(double surfaceToCover,
}
}
return defaultValue;
-};
+}
void CvAdaptiveSkinDetector::Histogram::findCurveThresholds(int &x1, int &x2, double percent)
{
@@ -242,7 +242,7 @@ void CvAdaptiveSkinDetector::Histogram::findCurveThresholds(int &x1, int &x2, do
x2 = GSD_HUE_UT;
else
x2 += GSD_HUE_LT;
-};
+}
void CvAdaptiveSkinDetector::Histogram::mergeWith(CvAdaptiveSkinDetector::Histogram *source, double weight)
{
@@ -283,4 +283,4 @@ void CvAdaptiveSkinDetector::Histogram::mergeWith(CvAdaptiveSkinDetector::Histog
}
}
}
-};
+}
diff --git a/modules/contrib/src/ba.cpp b/modules/contrib/src/ba.cpp
index 0e2afd95b..ec90cb79a 100644
--- a/modules/contrib/src/ba.cpp
+++ b/modules/contrib/src/ba.cpp
@@ -938,7 +938,7 @@ static void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, C
#endif
-};
+}
static void func(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* estim, void* /*data*/) {
//just do projections
CvMat _Mi;
@@ -977,17 +977,17 @@ static void func(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, C
cvTranspose( _mp2, estim );
cvReleaseMat( &_mp );
cvReleaseMat( &_mp2 );
-};
+}
static void fjac_new(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data) {
CvMat _point_params = point_params, _cam_params = cam_params, _Al = A, _Bl = B;
fjac(i,j, &_point_params, &_cam_params, &_Al, &_Bl, data);
-};
+}
static void func_new(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data) {
CvMat _point_params = point_params, _cam_params = cam_params, _estim = estim;
func(i,j,&_point_params,&_cam_params,&_estim,data);
-};
+}
void LevMarqSparse::bundleAdjust( vector& points, //positions of points in global coordinate system (input and output)
const vector >& imagePoints, //projections of 3d points for every camera
diff --git a/modules/contrib/src/basicretinafilter.hpp b/modules/contrib/src/basicretinafilter.hpp
index 8bd136d68..f0b0de4aa 100644
--- a/modules/contrib/src/basicretinafilter.hpp
+++ b/modules/contrib/src/basicretinafilter.hpp
@@ -439,8 +439,8 @@ namespace cv
#ifdef MAKE_PARALLEL
/******************************************************
** IF some parallelizing thread methods are available, then, main loops are parallelized using these functors
- ** ==> main idea paralellise main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelised methods as necessary
- ** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelised
+ ** ==> main idea parallelize main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelized methods as necessary
+ ** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelized
** ==> functors constructors can differ from the parameters used with their related serial functions
*/
diff --git a/modules/contrib/src/facerec.cpp b/modules/contrib/src/facerec.cpp
index bd202d206..a3d695ad1 100644
--- a/modules/contrib/src/facerec.cpp
+++ b/modules/contrib/src/facerec.cpp
@@ -873,7 +873,7 @@ CV_INIT_ALGORITHM(Eigenfaces, "FaceRecognizer.Eigenfaces",
obj.info()->addParam(obj, "labels", obj._labels, true);
obj.info()->addParam(obj, "eigenvectors", obj._eigenvectors, true);
obj.info()->addParam(obj, "eigenvalues", obj._eigenvalues, true);
- obj.info()->addParam(obj, "mean", obj._mean, true));
+ obj.info()->addParam(obj, "mean", obj._mean, true))
CV_INIT_ALGORITHM(Fisherfaces, "FaceRecognizer.Fisherfaces",
obj.info()->addParam(obj, "ncomponents", obj._num_components);
@@ -882,7 +882,7 @@ CV_INIT_ALGORITHM(Fisherfaces, "FaceRecognizer.Fisherfaces",
obj.info()->addParam(obj, "labels", obj._labels, true);
obj.info()->addParam(obj, "eigenvectors", obj._eigenvectors, true);
obj.info()->addParam(obj, "eigenvalues", obj._eigenvalues, true);
- obj.info()->addParam(obj, "mean", obj._mean, true));
+ obj.info()->addParam(obj, "mean", obj._mean, true))
CV_INIT_ALGORITHM(LBPH, "FaceRecognizer.LBPH",
obj.info()->addParam(obj, "radius", obj._radius);
@@ -891,7 +891,7 @@ CV_INIT_ALGORITHM(LBPH, "FaceRecognizer.LBPH",
obj.info()->addParam(obj, "grid_y", obj._grid_y);
obj.info()->addParam(obj, "threshold", obj._threshold);
obj.info()->addParam(obj, "histograms", obj._histograms, true);
- obj.info()->addParam(obj, "labels", obj._labels, true));
+ obj.info()->addParam(obj, "labels", obj._labels, true))
bool initModule_contrib()
{
diff --git a/modules/contrib/src/fuzzymeanshifttracker.cpp b/modules/contrib/src/fuzzymeanshifttracker.cpp
index 0ac6d2443..1932ad595 100644
--- a/modules/contrib/src/fuzzymeanshifttracker.cpp
+++ b/modules/contrib/src/fuzzymeanshifttracker.cpp
@@ -40,7 +40,7 @@ CvFuzzyPoint::CvFuzzyPoint(double _x, double _y)
{
x = _x;
y = _y;
-};
+}
bool CvFuzzyCurve::between(double x, double x1, double x2)
{
@@ -50,37 +50,37 @@ bool CvFuzzyCurve::between(double x, double x1, double x2)
return true;
return false;
-};
+}
CvFuzzyCurve::CvFuzzyCurve()
{
value = 0;
-};
+}
CvFuzzyCurve::~CvFuzzyCurve()
{
// nothing to do
-};
+}
void CvFuzzyCurve::setCentre(double _centre)
{
centre = _centre;
-};
+}
double CvFuzzyCurve::getCentre()
{
return centre;
-};
+}
void CvFuzzyCurve::clear()
{
points.clear();
-};
+}
void CvFuzzyCurve::addPoint(double x, double y)
{
points.push_back(CvFuzzyPoint(x, y));
-};
+}
double CvFuzzyCurve::calcValue(double param)
{
@@ -101,41 +101,41 @@ double CvFuzzyCurve::calcValue(double param)
}
}
return 0;
-};
+}
double CvFuzzyCurve::getValue()
{
return value;
-};
+}
void CvFuzzyCurve::setValue(double _value)
{
value = _value;
-};
+}
CvFuzzyFunction::CvFuzzyFunction()
{
// nothing to do
-};
+}
CvFuzzyFunction::~CvFuzzyFunction()
{
curves.clear();
-};
+}
void CvFuzzyFunction::addCurve(CvFuzzyCurve *curve, double value)
{
curves.push_back(*curve);
curve->setValue(value);
-};
+}
void CvFuzzyFunction::resetValues()
{
int numCurves = (int)curves.size();
for (int i = 0; i < numCurves; i++)
curves[i].setValue(0);
-};
+}
double CvFuzzyFunction::calcValue()
{
@@ -152,7 +152,7 @@ double CvFuzzyFunction::calcValue()
return s1/s2;
else
return 0;
-};
+}
CvFuzzyCurve *CvFuzzyFunction::newCurve()
{
@@ -160,14 +160,14 @@ CvFuzzyCurve *CvFuzzyFunction::newCurve()
c = new CvFuzzyCurve();
addCurve(c);
return c;
-};
+}
CvFuzzyRule::CvFuzzyRule()
{
fuzzyInput1 = NULL;
fuzzyInput2 = NULL;
fuzzyOutput = NULL;
-};
+}
CvFuzzyRule::~CvFuzzyRule()
{
@@ -179,14 +179,14 @@ CvFuzzyRule::~CvFuzzyRule()
if (fuzzyOutput != NULL)
delete fuzzyOutput;
-};
+}
void CvFuzzyRule::setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1)
{
fuzzyInput1 = c1;
fuzzyInput2 = c2;
fuzzyOutput = o1;
-};
+}
double CvFuzzyRule::calcValue(double param1, double param2)
{
@@ -202,31 +202,31 @@ double CvFuzzyRule::calcValue(double param1, double param2)
}
else
return v1;
-};
+}
CvFuzzyCurve *CvFuzzyRule::getOutputCurve()
{
return fuzzyOutput;
-};
+}
CvFuzzyController::CvFuzzyController()
{
// nothing to do
-};
+}
CvFuzzyController::~CvFuzzyController()
{
int size = (int)rules.size();
for(int i = 0; i < size; i++)
delete rules[i];
-};
+}
void CvFuzzyController::addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1)
{
CvFuzzyRule *f = new CvFuzzyRule();
rules.push_back(f);
f->setRule(c1, c2, o1);
-};
+}
double CvFuzzyController::calcOutput(double param1, double param2)
{
@@ -242,7 +242,7 @@ double CvFuzzyController::calcOutput(double param1, double param2)
}
v = list.calcValue();
return v;
-};
+}
CvFuzzyMeanShiftTracker::FuzzyResizer::FuzzyResizer()
{
@@ -298,12 +298,12 @@ CvFuzzyMeanShiftTracker::FuzzyResizer::FuzzyResizer()
fuzzyController.addRule(i1L, NULL, oS);
fuzzyController.addRule(i1M, NULL, oZE);
fuzzyController.addRule(i1H, NULL, oE);
-};
+}
int CvFuzzyMeanShiftTracker::FuzzyResizer::calcOutput(double edgeDensity, double density)
{
return (int)fuzzyController.calcOutput(edgeDensity, density);
-};
+}
CvFuzzyMeanShiftTracker::SearchWindow::SearchWindow()
{
@@ -328,7 +328,7 @@ CvFuzzyMeanShiftTracker::SearchWindow::SearchWindow()
depthLow = 0;
depthHigh = 0;
fuzzyResizer = NULL;
-};
+}
CvFuzzyMeanShiftTracker::SearchWindow::~SearchWindow()
{
@@ -354,7 +354,7 @@ void CvFuzzyMeanShiftTracker::SearchWindow::setSize(int _x, int _y, int _width,
if (y + height > maxHeight)
height = maxHeight - y;
-};
+}
void CvFuzzyMeanShiftTracker::SearchWindow::initDepthValues(IplImage *maskImage, IplImage *depthMap)
{
@@ -408,7 +408,7 @@ void CvFuzzyMeanShiftTracker::SearchWindow::initDepthValues(IplImage *maskImage,
depthHigh = 32000;
depthLow = 0;
}
-};
+}
bool CvFuzzyMeanShiftTracker::SearchWindow::shift()
{
@@ -421,7 +421,7 @@ bool CvFuzzyMeanShiftTracker::SearchWindow::shift()
{
return false;
}
-};
+}
void CvFuzzyMeanShiftTracker::SearchWindow::extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth)
{
@@ -527,7 +527,7 @@ void CvFuzzyMeanShiftTracker::SearchWindow::extractInfo(IplImage *maskImage, Ipl
ellipseAngle = 0;
density = 0;
}
-};
+}
void CvFuzzyMeanShiftTracker::SearchWindow::getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh) {
int x1 = horizontalEdgeTop;
@@ -571,7 +571,7 @@ void CvFuzzyMeanShiftTracker::SearchWindow::getResizeAttribsEdgeDensityLinear(in
} else {
resizeDw = - resizeDx;
}
-};
+}
void CvFuzzyMeanShiftTracker::SearchWindow::getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh)
{
@@ -587,7 +587,7 @@ void CvFuzzyMeanShiftTracker::SearchWindow::getResizeAttribsInnerDensity(int &re
resizeDy = (int)(py*dy);
resizeDw = (int)((1-px)*dx);
resizeDh = (int)((1-py)*dy);
-};
+}
void CvFuzzyMeanShiftTracker::SearchWindow::getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh)
{
@@ -626,7 +626,7 @@ void CvFuzzyMeanShiftTracker::SearchWindow::getResizeAttribsEdgeDensityFuzzy(int
resizeDy = int(-dy1);
resizeDh = int(dy1+dy2);
}
-};
+}
bool CvFuzzyMeanShiftTracker::SearchWindow::meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth)
{
@@ -639,7 +639,7 @@ bool CvFuzzyMeanShiftTracker::SearchWindow::meanShift(IplImage *maskImage, IplIm
} while (++numShifts < maxIteration);
return false;
-};
+}
void CvFuzzyMeanShiftTracker::findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth)
{
@@ -679,17 +679,17 @@ void CvFuzzyMeanShiftTracker::findOptimumSearchWindow(SearchWindow &searchWindow
searchWindow.setSize(searchWindow.x + resizeDx, searchWindow.y + resizeDy, searchWindow.width + resizeDw, searchWindow.height + resizeDh);
}
-};
+}
CvFuzzyMeanShiftTracker::CvFuzzyMeanShiftTracker()
{
searchMode = tsSetWindow;
-};
+}
CvFuzzyMeanShiftTracker::~CvFuzzyMeanShiftTracker()
{
// nothing to do
-};
+}
void CvFuzzyMeanShiftTracker::track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass)
{
@@ -717,4 +717,4 @@ void CvFuzzyMeanShiftTracker::track(IplImage *maskImage, IplImage *depthMap, int
else
searchMode = tsTracking;
}
-};
+}
diff --git a/modules/contrib/src/imagelogpolprojection.cpp b/modules/contrib/src/imagelogpolprojection.cpp
index ed821efa9..2824949c7 100644
--- a/modules/contrib/src/imagelogpolprojection.cpp
+++ b/modules/contrib/src/imagelogpolprojection.cpp
@@ -362,14 +362,14 @@ bool ImageLogPolProjection::_initLogPolarCortexSampling(const double reductionFa
//std::cout<<"ImageLogPolProjection::Starting cortex projection"< main idea paralellise main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelised methods as necessary
- ** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelised
+ ** ==> main idea parallelize main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelized methods as necessary
+ ** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelized
** ==> functors constructors can differ from the parameters used with their related serial functions
*/
class Parallel_amacrineCellsComputing: public cv::ParallelLoopBody
diff --git a/modules/contrib/src/parvoretinafilter.hpp b/modules/contrib/src/parvoretinafilter.hpp
index 55d61d120..58e1303ec 100644
--- a/modules/contrib/src/parvoretinafilter.hpp
+++ b/modules/contrib/src/parvoretinafilter.hpp
@@ -219,8 +219,8 @@ private:
#ifdef MAKE_PARALLEL
/******************************************************
** IF some parallelizing thread methods are available, then, main loops are parallelized using these functors
-** ==> main idea paralellise main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelised methods as necessary
-** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelised
+** ==> main idea parallelize main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelized methods as necessary
+** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelized
** ==> functors constructors can differ from the parameters used with their related serial functions
*/
class Parallel_OPL_OnOffWaysComputing: public cv::ParallelLoopBody
diff --git a/modules/contrib/src/retina.cpp b/modules/contrib/src/retina.cpp
index 6f08337cc..4a02bbcb4 100644
--- a/modules/contrib/src/retina.cpp
+++ b/modules/contrib/src/retina.cpp
@@ -85,7 +85,7 @@ Retina::Retina(const cv::Size inputSz, const bool colorMode, RETINA_COLORSAMPLIN
{
_retinaFilter = 0;
_init(inputSz, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);
-};
+}
Retina::~Retina()
{
diff --git a/modules/contrib/src/retinacolor.hpp b/modules/contrib/src/retinacolor.hpp
index 7b7294442..275309681 100644
--- a/modules/contrib/src/retinacolor.hpp
+++ b/modules/contrib/src/retinacolor.hpp
@@ -259,8 +259,8 @@ namespace cv
#ifdef MAKE_PARALLEL
/******************************************************
** IF some parallelizing thread methods are available, then, main loops are parallelized using these functors
- ** ==> main idea paralellise main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelised methods as necessary
- ** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelised
+ ** ==> main idea parallelize main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelized methods as necessary
+ ** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelized
** ==> functors constructors can differ from the parameters used with their related serial functions
*/
diff --git a/modules/contrib/src/rgbdodometry.cpp b/modules/contrib/src/rgbdodometry.cpp
index 4e9d8c4df..0b2b9518c 100644
--- a/modules/contrib/src/rgbdodometry.cpp
+++ b/modules/contrib/src/rgbdodometry.cpp
@@ -115,7 +115,7 @@ void computeProjectiveMatrix( const Mat& ksi, Mat& Rt )
{
CV_Assert( ksi.size() == Size(1,6) && ksi.type() == CV_64FC1 );
-#if defined(HAVE_EIGEN) && EIGEN_WORLD_VERSION == 3
+#if defined(HAVE_EIGEN) && EIGEN_WORLD_VERSION == 3 && (!defined _MSC_VER || !defined _M_X64 || _MSC_VER > 1500)
const double* ksi_ptr = reinterpret_cast(ksi.ptr(0));
Eigen::Matrix twist, g;
twist << 0., -ksi_ptr[2], ksi_ptr[1], ksi_ptr[3],
diff --git a/modules/contrib/src/spinimages.cpp b/modules/contrib/src/spinimages.cpp
index b01090057..747bf3e12 100644
--- a/modules/contrib/src/spinimages.cpp
+++ b/modules/contrib/src/spinimages.cpp
@@ -718,7 +718,7 @@ void cv::SpinImageModel::defaultParams()
T_GeometriccConsistency = 0.25f;
T_GroupingCorespondances = 0.25f;
-};
+}
Mat cv::SpinImageModel::packRandomScaledSpins(bool separateScale, size_t xCount, size_t yCount) const
{
diff --git a/modules/contrib/src/templatebuffer.hpp b/modules/contrib/src/templatebuffer.hpp
index e50cd442c..8a85b5494 100644
--- a/modules/contrib/src/templatebuffer.hpp
+++ b/modules/contrib/src/templatebuffer.hpp
@@ -357,27 +357,27 @@ namespace cv
for (unsigned int i=0;isize();++i)
{
- double curentValue=(double)*(bufferPTR++);
+ double currentValue=(double)*(bufferPTR++);
// updating "closest to the high threshold" pixel value
- double highValueTest=maxThreshold-curentValue;
+ double highValueTest=maxThreshold-currentValue;
if (highValueTest>0)
{
if (deltaH>highValueTest)
{
deltaH=highValueTest;
- updatedHighValue=curentValue;
+ updatedHighValue=currentValue;
}
}
// updating "closest to the low threshold" pixel value
- double lowValueTest=curentValue-minThreshold;
+ double lowValueTest=currentValue-minThreshold;
if (lowValueTest>0)
{
if (deltaL>lowValueTest)
{
deltaL=lowValueTest;
- updatedLowValue=curentValue;
+ updatedLowValue=currentValue;
}
}
}
diff --git a/modules/core/CMakeLists.txt b/modules/core/CMakeLists.txt
index a1e71bf4f..d9de52da2 100644
--- a/modules/core/CMakeLists.txt
+++ b/modules/core/CMakeLists.txt
@@ -8,8 +8,11 @@ endif()
ocv_module_include_directories("${OpenCV_SOURCE_DIR}/modules/dynamicuda/include/" ${ZLIB_INCLUDE_DIR})
+if(HAVE_WINRT_CX)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW")
+endif()
if(HAVE_WINRT)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW /GS /Gm- /AI\"${WINDOWS_SDK_PATH}/References/CommonConfiguration/Neutral\" /AI\"${VISUAL_STUDIO_PATH}/vcpackages\"")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GS /Gm- /AI\"${WINDOWS_SDK_PATH}/References/CommonConfiguration/Neutral\" /AI\"${VISUAL_STUDIO_PATH}/vcpackages\"")
endif()
if(ENABLE_DYNAMIC_CUDA)
@@ -22,7 +25,7 @@ endif()
if(HAVE_CUDA)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpu/include")
- ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
+ ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wshadow)
endif()
file(GLOB lib_cuda_hdrs "include/opencv2/${name}/cuda/*.hpp" "include/opencv2/${name}/cuda/*.h")
diff --git a/modules/core/doc/basic_structures.rst b/modules/core/doc/basic_structures.rst
index 6f9c10e1d..95d1b37f1 100644
--- a/modules/core/doc/basic_structures.rst
+++ b/modules/core/doc/basic_structures.rst
@@ -1350,12 +1350,12 @@ Copies the matrix to another one.
The method copies the matrix data to another matrix. Before copying the data, the method invokes ::
- m.create(this->size(), this->type);
+ m.create(this->size(), this->type());
so that the destination matrix is reallocated if needed. While ``m.copyTo(m);`` works flawlessly, the function does not handle the case of a partial overlap between the source and the destination matrices.
-When the operation mask is specified, and the ``Mat::create`` call shown above reallocated the matrix, the newly allocated matrix is initialized with all zeros before copying the data.
+When the operation mask is specified, if the ``Mat::create`` call shown above reallocates the matrix, the newly allocated matrix is initialized with all zeros before copying the data.
.. _Mat::convertTo:
@@ -1445,7 +1445,7 @@ Transposes a matrix.
The method performs matrix transposition by means of matrix expressions. It does not perform the actual transposition but returns a temporary matrix transposition object that can be further used as a part of more complex matrix expressions or can be assigned to a matrix: ::
- Mat A1 = A + Mat::eye(A.size(), A.type)*lambda;
+ Mat A1 = A + Mat::eye(A.size(), A.type())*lambda;
Mat C = A1.t()*A1; // compute (A + lambda*I)^t * (A + lamda*I)
diff --git a/modules/core/doc/drawing_functions.rst b/modules/core/doc/drawing_functions.rst
index 7a6bd173a..f06d0fa60 100644
--- a/modules/core/doc/drawing_functions.rst
+++ b/modules/core/doc/drawing_functions.rst
@@ -514,7 +514,7 @@ Draws a text string.
:param font: ``CvFont`` structure initialized using :ocv:cfunc:`InitFont`.
:param fontFace: Font type. One of ``FONT_HERSHEY_SIMPLEX``, ``FONT_HERSHEY_PLAIN``, ``FONT_HERSHEY_DUPLEX``, ``FONT_HERSHEY_COMPLEX``, ``FONT_HERSHEY_TRIPLEX``, ``FONT_HERSHEY_COMPLEX_SMALL``, ``FONT_HERSHEY_SCRIPT_SIMPLEX``, or ``FONT_HERSHEY_SCRIPT_COMPLEX``,
- where each of the font ID's can be combined with ``FONT_HERSHEY_ITALIC`` to get the slanted letters.
+ where each of the font ID's can be combined with ``FONT_ITALIC`` to get the slanted letters.
:param fontScale: Font scale factor that is multiplied by the font-specific base size.
diff --git a/modules/core/doc/old_basic_structures.rst b/modules/core/doc/old_basic_structures.rst
index c8de17ada..1d5880a62 100644
--- a/modules/core/doc/old_basic_structures.rst
+++ b/modules/core/doc/old_basic_structures.rst
@@ -1436,7 +1436,7 @@ description rewritten using
IplImage* color_img = cvCreateImage(cvSize(320,240), IPL_DEPTH_8U, 3);
IplImage gray_img_hdr, *gray_img;
- gray_img = (IplImage*)cvReshapeND(color_img, &gray_img_hdr, 1, 0, 0);
+ gray_img = (IplImage*)cvReshapeMatND(color_img, sizeof(gray_img_hdr), &gray_img_hdr, 1, 0, 0);
...
@@ -1444,6 +1444,18 @@ description rewritten using
int size[] = { 2, 2, 2 };
CvMatND* mat = cvCreateMatND(3, size, CV_32F);
CvMat row_header, *row;
+ row = (CvMat*)cvReshapeMatND(mat, sizeof(row_header), &row_header, 0, 1, 0);
+
+..
+
+In C, the header file for this function includes a convenient macro ``cvReshapeND`` that does away with the ``sizeof_header`` parameter. So, the lines containing the call to ``cvReshapeMatND`` in the examples may be replaced as follow:
+
+::
+
+ gray_img = (IplImage*)cvReshapeND(color_img, &gray_img_hdr, 1, 0, 0);
+
+ ...
+
row = (CvMat*)cvReshapeND(mat, &row_header, 0, 1, 0);
..
diff --git a/modules/core/doc/operations_on_arrays.rst b/modules/core/doc/operations_on_arrays.rst
index 8c01a1010..be91944e7 100644
--- a/modules/core/doc/operations_on_arrays.rst
+++ b/modules/core/doc/operations_on_arrays.rst
@@ -1252,11 +1252,12 @@ gemm
----
Performs generalized matrix multiplication.
-.. ocv:function:: void gemm( InputArray src1, InputArray src2, double alpha, InputArray src3, double gamma, OutputArray dst, int flags=0 )
+.. ocv:function:: void gemm( InputArray src1, InputArray src2, double alpha, InputArray src3, double beta, OutputArray dst, int flags=0 )
-.. ocv:pyfunction:: cv2.gemm(src1, src2, alpha, src3, gamma[, dst[, flags]]) -> dst
+.. ocv:pyfunction:: cv2.gemm(src1, src2, alpha, src3, beta[, dst[, flags]]) -> dst
.. ocv:cfunction:: void cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, const CvArr* src3, double beta, CvArr* dst, int tABC=0)
+
.. ocv:pyoldfunction:: cv.GEMM(src1, src2, alpha, src3, beta, dst, tABC=0)-> None
:param src1: first multiplied input matrix that should have ``CV_32FC1``, ``CV_64FC1``, ``CV_32FC2``, or ``CV_64FC2`` type.
diff --git a/modules/core/include/opencv2/core/affine.hpp b/modules/core/include/opencv2/core/affine.hpp
new file mode 100644
index 000000000..827d044b8
--- /dev/null
+++ b/modules/core/include/opencv2/core/affine.hpp
@@ -0,0 +1,509 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_AFFINE3_HPP__
+#define __OPENCV_CORE_AFFINE3_HPP__
+
+#ifdef __cplusplus
+
+#include
+
+namespace cv
+{
+ template
+ class Affine3
+ {
+ public:
+ typedef T float_type;
+ typedef Matx Mat3;
+ typedef Matx Mat4;
+ typedef Vec Vec3;
+
+ Affine3();
+
+ //Augmented affine matrix
+ Affine3(const Mat4& affine);
+
+ //Rotation matrix
+ Affine3(const Mat3& R, const Vec3& t = Vec3::all(0));
+
+ //Rodrigues vector
+ Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0));
+
+ //Combines all contructors above. Supports 4x4, 4x3, 3x3, 1x3, 3x1 sizes of data matrix
+ explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0));
+
+ //From 16th element array
+ explicit Affine3(const float_type* vals);
+
+ static Affine3 Identity();
+
+ //Rotation matrix
+ void rotation(const Mat3& R);
+
+ //Rodrigues vector
+ void rotation(const Vec3& rvec);
+
+ //Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix;
+ void rotation(const Mat& data);
+
+ void linear(const Mat3& L);
+ void translation(const Vec3& t);
+
+ Mat3 rotation() const;
+ Mat3 linear() const;
+ Vec3 translation() const;
+
+ //Rodrigues vector
+ Vec3 rvec() const;
+
+ Affine3 inv(int method = cv::DECOMP_SVD) const;
+
+ // a.rotate(R) is equivalent to Affine(R, 0) * a;
+ Affine3 rotate(const Mat3& R) const;
+
+ // a.rotate(R) is equivalent to Affine(rvec, 0) * a;
+ Affine3 rotate(const Vec3& rvec) const;
+
+ // a.translate(t) is equivalent to Affine(E, t) * a;
+ Affine3 translate(const Vec3& t) const;
+
+ // a.concatenate(affine) is equivalent to affine * a;
+ Affine3 concatenate(const Affine3& affine) const;
+
+ template operator Affine3() const;
+
+ template Affine3 cast() const;
+
+ Mat4 matrix;
+
+#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
+ Affine3(const Eigen::Transform& affine);
+ Affine3(const Eigen::Transform& affine);
+ operator Eigen::Transform() const;
+ operator Eigen::Transform() const;
+#endif
+ };
+
+ template static
+ Affine3 operator*(const Affine3& affine1, const Affine3& affine2);
+
+ template static
+ V operator*(const Affine3& affine, const V& vector);
+
+ typedef Affine3 Affine3f;
+ typedef Affine3 Affine3d;
+
+ static Vec3f operator*(const Affine3f& affine, const Vec3f& vector);
+ static Vec3d operator*(const Affine3d& affine, const Vec3d& vector);
+
+ template class DataType< Affine3<_Tp> >
+ {
+ public:
+ typedef Affine3<_Tp> value_type;
+ typedef Affine3::work_type> work_type;
+ typedef _Tp channel_type;
+
+ enum { generic_type = 0,
+ depth = DataType::depth,
+ channels = 16,
+ fmt = DataType::fmt + ((channels - 1) << 8),
+ type = CV_MAKETYPE(depth, channels)
+ };
+
+ typedef Vec vec_type;
+ };
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////
+/// Implementaiton
+
+template inline
+cv::Affine3::Affine3()
+ : matrix(Mat4::eye())
+{}
+
+template inline
+cv::Affine3::Affine3(const Mat4& affine)
+ : matrix(affine)
+{}
+
+template inline
+cv::Affine3::Affine3(const Mat3& R, const Vec3& t)
+{
+ rotation(R);
+ translation(t);
+ matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
+ matrix.val[15] = 1;
+}
+
+template inline
+cv::Affine3::Affine3(const Vec3& _rvec, const Vec3& t)
+{
+ rotation(_rvec);
+ translation(t);
+ matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
+ matrix.val[15] = 1;
+}
+
+template inline
+cv::Affine3::Affine3(const cv::Mat& data, const Vec3& t)
+{
+ CV_Assert(data.type() == cv::DataType::type);
+
+ if (data.cols == 4 && data.rows == 4)
+ {
+ data.copyTo(matrix);
+ return;
+ }
+ else if (data.cols == 4 && data.rows == 3)
+ {
+ rotation(data(Rect(0, 0, 3, 3)));
+ translation(data(Rect(3, 0, 1, 3)));
+ return;
+ }
+
+ rotation(data);
+ translation(t);
+ matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
+ matrix.val[15] = 1;
+}
+
+template inline
+cv::Affine3::Affine3(const float_type* vals) : matrix(vals)
+{}
+
+template inline
+cv::Affine3 cv::Affine3::Identity()
+{
+ return Affine3(cv::Affine3::Mat4::eye());
+}
+
+template inline
+void cv::Affine3::rotation(const Mat3& R)
+{
+ linear(R);
+}
+
+template inline
+void cv::Affine3::rotation(const Vec3& _rvec)
+{
+ double rx = _rvec[0], ry = _rvec[1], rz = _rvec[2];
+ double theta = std::sqrt(rx*rx + ry*ry + rz*rz);
+
+ if (theta < DBL_EPSILON)
+ rotation(Mat3::eye());
+ else
+ {
+ const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+
+ double c = std::cos(theta);
+ double s = std::sin(theta);
+ double c1 = 1. - c;
+ double itheta = theta ? 1./theta : 0.;
+
+ rx *= itheta; ry *= itheta; rz *= itheta;
+
+ double rrt[] = { rx*rx, rx*ry, rx*rz, rx*ry, ry*ry, ry*rz, rx*rz, ry*rz, rz*rz };
+ double _r_x_[] = { 0, -rz, ry, rz, 0, -rx, -ry, rx, 0 };
+ Mat3 R;
+
+ // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
+ // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]
+ for(int k = 0; k < 9; ++k)
+ R.val[k] = static_cast(c*I[k] + c1*rrt[k] + s*_r_x_[k]);
+
+ rotation(R);
+ }
+}
+
+//Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix;
+template inline
+void cv::Affine3::rotation(const cv::Mat& data)
+{
+ CV_Assert(data.type() == cv::DataType::type);
+
+ if (data.cols == 3 && data.rows == 3)
+ {
+ Mat3 R;
+ data.copyTo(R);
+ rotation(R);
+ }
+ else if ((data.cols == 3 && data.rows == 1) || (data.cols == 1 && data.rows == 3))
+ {
+ Vec3 _rvec;
+ data.reshape(1, 3).copyTo(_rvec);
+ rotation(_rvec);
+ }
+ else
+ CV_Assert(!"Input marix can be 3x3, 1x3 or 3x1");
+}
+
+template inline
+void cv::Affine3::linear(const Mat3& L)
+{
+ matrix.val[0] = L.val[0]; matrix.val[1] = L.val[1]; matrix.val[ 2] = L.val[2];
+ matrix.val[4] = L.val[3]; matrix.val[5] = L.val[4]; matrix.val[ 6] = L.val[5];
+ matrix.val[8] = L.val[6]; matrix.val[9] = L.val[7]; matrix.val[10] = L.val[8];
+}
+
+template inline
+void cv::Affine3::translation(const Vec3& t)
+{
+ matrix.val[3] = t[0]; matrix.val[7] = t[1]; matrix.val[11] = t[2];
+}
+
+template inline
+typename cv::Affine3::Mat3 cv::Affine3::rotation() const
+{
+ return linear();
+}
+
+template inline
+typename cv::Affine3::Mat3 cv::Affine3::linear() const
+{
+ typename cv::Affine3::Mat3 R;
+ R.val[0] = matrix.val[0]; R.val[1] = matrix.val[1]; R.val[2] = matrix.val[ 2];
+ R.val[3] = matrix.val[4]; R.val[4] = matrix.val[5]; R.val[5] = matrix.val[ 6];
+ R.val[6] = matrix.val[8]; R.val[7] = matrix.val[9]; R.val[8] = matrix.val[10];
+ return R;
+}
+
+template inline
+typename cv::Affine3::Vec3 cv::Affine3::translation() const
+{
+ return Vec3(matrix.val[3], matrix.val[7], matrix.val[11]);
+}
+
+template inline
+typename cv::Affine3::Vec3 cv::Affine3::rvec() const
+{
+ cv::Vec3d w;
+ cv::Matx33d u, vt, R = rotation();
+ cv::SVD::compute(R, w, u, vt, cv::SVD::FULL_UV + cv::SVD::MODIFY_A);
+ R = u * vt;
+
+ double rx = R.val[7] - R.val[5];
+ double ry = R.val[2] - R.val[6];
+ double rz = R.val[3] - R.val[1];
+
+ double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25);
+ double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5;
+ c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c;
+ double theta = acos(c);
+
+ if( s < 1e-5 )
+ {
+ if( c > 0 )
+ rx = ry = rz = 0;
+ else
+ {
+ double t;
+ t = (R.val[0] + 1) * 0.5;
+ rx = std::sqrt(std::max(t, 0.0));
+ t = (R.val[4] + 1) * 0.5;
+ ry = std::sqrt(std::max(t, 0.0)) * (R.val[1] < 0 ? -1.0 : 1.0);
+ t = (R.val[8] + 1) * 0.5;
+ rz = std::sqrt(std::max(t, 0.0)) * (R.val[2] < 0 ? -1.0 : 1.0);
+
+ if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R.val[5] > 0) != (ry*rz > 0) )
+ rz = -rz;
+ theta /= std::sqrt(rx*rx + ry*ry + rz*rz);
+ rx *= theta;
+ ry *= theta;
+ rz *= theta;
+ }
+ }
+ else
+ {
+ double vth = 1/(2*s);
+ vth *= theta;
+ rx *= vth; ry *= vth; rz *= vth;
+ }
+
+ return cv::Vec3d(rx, ry, rz);
+}
+
+template inline
+cv::Affine3 cv::Affine3::inv(int method) const
+{
+ return matrix.inv(method);
+}
+
+template inline
+cv::Affine3 cv::Affine3::rotate(const Mat3& R) const
+{
+ Mat3 Lc = linear();
+ Vec3 tc = translation();
+ Mat4 result;
+ result.val[12] = result.val[13] = result.val[14] = 0;
+ result.val[15] = 1;
+
+ for(int j = 0; j < 3; ++j)
+ {
+ for(int i = 0; i < 3; ++i)
+ {
+ float_type value = 0;
+ for(int k = 0; k < 3; ++k)
+ value += R(j, k) * Lc(k, i);
+ result(j, i) = value;
+ }
+
+ result(j, 3) = R.row(j).dot(tc.t());
+ }
+ return result;
+}
+
+template inline
+cv::Affine3 cv::Affine3::rotate(const Vec3& _rvec) const
+{
+ return rotate(Affine3f(_rvec).rotation());
+}
+
+template inline
+cv::Affine3 cv::Affine3::translate(const Vec3& t) const
+{
+ Mat4 m = matrix;
+ m.val[ 3] += t[0];
+ m.val[ 7] += t[1];
+ m.val[11] += t[2];
+ return m;
+}
+
+template inline
+cv::Affine3 cv::Affine3::concatenate(const Affine3& affine) const
+{
+ return (*this).rotate(affine.rotation()).translate(affine.translation());
+}
+
+template template inline
+cv::Affine3::operator Affine3() const
+{
+ return Affine3(matrix);
+}
+
+template template inline
+cv::Affine3 cv::Affine3::cast() const
+{
+ return Affine3(matrix);
+}
+
+template inline
+cv::Affine3 cv::operator*(const cv::Affine3& affine1, const cv::Affine3& affine2)
+{
+ return affine2.concatenate(affine1);
+}
+
+template inline
+V cv::operator*(const cv::Affine3& affine, const V& v)
+{
+ const typename Affine3::Mat4& m = affine.matrix;
+
+ V r;
+ r.x = m.val[0] * v.x + m.val[1] * v.y + m.val[ 2] * v.z + m.val[ 3];
+ r.y = m.val[4] * v.x + m.val[5] * v.y + m.val[ 6] * v.z + m.val[ 7];
+ r.z = m.val[8] * v.x + m.val[9] * v.y + m.val[10] * v.z + m.val[11];
+ return r;
+}
+
+static inline
+cv::Vec3f cv::operator*(const cv::Affine3f& affine, const cv::Vec3f& v)
+{
+ const cv::Matx44f& m = affine.matrix;
+ cv::Vec3f r;
+ r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
+ r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
+ r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
+ return r;
+}
+
+static inline
+cv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v)
+{
+ const cv::Matx44d& m = affine.matrix;
+ cv::Vec3d r;
+ r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
+ r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
+ r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
+ return r;
+}
+
+
+
+#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
+
+template inline
+cv::Affine3::Affine3(const Eigen::Transform& affine)
+{
+ cv::Mat(4, 4, cv::DataType::type, affine.matrix().data()).copyTo(matrix);
+}
+
+template inline
+cv::Affine3::Affine3(const Eigen::Transform& affine)
+{
+ Eigen::Transform a = affine;
+ cv::Mat(4, 4, cv::DataType::type, a.matrix().data()).copyTo(matrix);
+}
+
+template inline
+cv::Affine3::operator Eigen::Transform() const
+{
+ Eigen::Transform r;
+ cv::Mat hdr(4, 4, cv::DataType::type, r.matrix().data());
+ cv::Mat(matrix, false).copyTo(hdr);
+ return r;
+}
+
+template inline
+cv::Affine3::operator Eigen::Transform() const
+{
+ return this->operator Eigen::Transform();
+}
+
+#endif /* defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H */
+
+
+#endif /* __cplusplus */
+
+#endif /* __OPENCV_CORE_AFFINE3_HPP__ */
diff --git a/modules/core/include/opencv2/core/core.hpp b/modules/core/include/opencv2/core/core.hpp
index cafba0f8f..5667e9e50 100644
--- a/modules/core/include/opencv2/core/core.hpp
+++ b/modules/core/include/opencv2/core/core.hpp
@@ -892,6 +892,7 @@ public:
typedef Point_ Point2i;
typedef Point2i Point;
typedef Size_ Size2i;
+typedef Size_ Size2d;
typedef Size2i Size;
typedef Rect_ Rect;
typedef Point_ Point2f;
@@ -2318,7 +2319,7 @@ CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val=0);
//! implements generalized matrix product algorithm GEMM from BLAS
CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha,
- InputArray src3, double gamma, OutputArray dst, int flags=0);
+ InputArray src3, double beta, OutputArray dst, int flags=0);
//! multiplies matrix by its transposition from the left or from the right
CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa,
InputArray delta=noArray(),
diff --git a/modules/core/include/opencv2/core/core_c.h b/modules/core/include/opencv2/core/core_c.h
index d4182d2f7..38abfc409 100644
--- a/modules/core/include/opencv2/core/core_c.h
+++ b/modules/core/include/opencv2/core/core_c.h
@@ -105,7 +105,7 @@ CVAPI(void) cvResetImageROI( IplImage* image );
/* Retrieves image ROI */
CVAPI(CvRect) cvGetImageROI( const IplImage* image );
-/* Allocates and initalizes CvMat header */
+/* Allocates and initializes CvMat header */
CVAPI(CvMat*) cvCreateMatHeader( int rows, int cols, int type );
#define CV_AUTOSTEP 0x7fffffff
diff --git a/modules/core/include/opencv2/core/internal.hpp b/modules/core/include/opencv2/core/internal.hpp
index 3cd2f90f6..6c9d3d2f1 100644
--- a/modules/core/include/opencv2/core/internal.hpp
+++ b/modules/core/include/opencv2/core/internal.hpp
@@ -97,6 +97,13 @@ CV_INLINE IppiSize ippiSize(int width, int height)
IppiSize size = { width, height };
return size;
}
+
+CV_INLINE IppiSize ippiSize(const cv::Size & _size)
+{
+ IppiSize size = { _size.width, _size.height };
+ return size;
+}
+
#endif
#ifndef IPPI_CALL
diff --git a/modules/core/include/opencv2/core/mat.hpp b/modules/core/include/opencv2/core/mat.hpp
index 8ddc16eb1..45c25900c 100644
--- a/modules/core/include/opencv2/core/mat.hpp
+++ b/modules/core/include/opencv2/core/mat.hpp
@@ -2401,7 +2401,7 @@ template inline SparseMat_<_Tp>::SparseMat_(const SparseMat& m)
if( m.type() == DataType<_Tp>::type )
*this = (const SparseMat_<_Tp>&)m;
else
- m.convertTo(this, DataType<_Tp>::type);
+ m.convertTo(*this, DataType<_Tp>::type);
}
template inline SparseMat_<_Tp>::SparseMat_(const SparseMat_<_Tp>& m)
diff --git a/modules/core/include/opencv2/core/types_c.h b/modules/core/include/opencv2/core/types_c.h
index 3e5d5b033..99ac0d257 100644
--- a/modules/core/include/opencv2/core/types_c.h
+++ b/modules/core/include/opencv2/core/types_c.h
@@ -245,7 +245,7 @@ enum {
CV_StsVecLengthErr= -28, /* incorrect vector length */
CV_StsFilterStructContentErr= -29, /* incorr. filter structure content */
CV_StsKernelStructContentErr= -30, /* incorr. transform kernel content */
- CV_StsFilterOffsetErr= -31, /* incorrect filter ofset value */
+ CV_StsFilterOffsetErr= -31, /* incorrect filter offset value */
CV_StsBadSize= -201, /* the input/output structure size is incorrect */
CV_StsDivByZero= -202, /* division by zero */
CV_StsInplaceNotSupported= -203, /* in-place operation is not supported */
diff --git a/modules/core/include/opencv2/core/version.hpp b/modules/core/include/opencv2/core/version.hpp
index 25e5892b6..63c293528 100644
--- a/modules/core/include/opencv2/core/version.hpp
+++ b/modules/core/include/opencv2/core/version.hpp
@@ -49,7 +49,7 @@
#define CV_VERSION_EPOCH 2
#define CV_VERSION_MAJOR 4
-#define CV_VERSION_MINOR 8
+#define CV_VERSION_MINOR 9
#define CV_VERSION_REVISION 0
#define CVAUX_STR_EXP(__A) #__A
diff --git a/modules/core/src/algorithm.cpp b/modules/core/src/algorithm.cpp
index f96f243cb..5f16f95ed 100644
--- a/modules/core/src/algorithm.cpp
+++ b/modules/core/src/algorithm.cpp
@@ -647,7 +647,7 @@ void AlgorithmInfo::set(Algorithm* algo, const char* parameter, int argType, con
|| argType == Param::FLOAT || argType == Param::UNSIGNED_INT || argType == Param::UINT64 || argType == Param::UCHAR)
{
if ( !( p->type == Param::INT || p->type == Param::REAL || p->type == Param::BOOLEAN
- || p->type == Param::UNSIGNED_INT || p->type == Param::UINT64 || p->type == Param::FLOAT || argType == Param::UCHAR
+ || p->type == Param::UNSIGNED_INT || p->type == Param::UINT64 || p->type == Param::FLOAT || p->type == Param::UCHAR
|| (p->type == Param::SHORT && argType == Param::INT)) )
{
string message = getErrorMessageForWrongArgumentInSetter(algo->name(), parameter, p->type, argType);
diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp
index a11b64b44..fa74cfa4e 100644
--- a/modules/core/src/arithm.cpp
+++ b/modules/core/src/arithm.cpp
@@ -533,7 +533,7 @@ static void add8u( const uchar* src1, size_t step1,
uchar* dst, size_t step, Size sz, void* )
{
IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
- ippiAdd_8u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz, 0),
+ ippiAdd_8u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz), 0),
(vBinOp8, IF_SIMD(_VAdd8u)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -549,7 +549,7 @@ static void add16u( const ushort* src1, size_t step1,
ushort* dst, size_t step, Size sz, void* )
{
IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
- ippiAdd_16u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz, 0),
+ ippiAdd_16u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz), 0),
(vBinOp16, IF_SIMD(_VAdd16u)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -558,7 +558,7 @@ static void add16s( const short* src1, size_t step1,
short* dst, size_t step, Size sz, void* )
{
IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
- ippiAdd_16s_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz, 0),
+ ippiAdd_16s_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz), 0),
(vBinOp16, IF_SIMD(_VAdd16s)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -574,7 +574,7 @@ static void add32f( const float* src1, size_t step1,
float* dst, size_t step, Size sz, void* )
{
IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
- ippiAdd_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz),
+ ippiAdd_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz)),
(vBinOp32f, IF_SIMD(_VAdd32f)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -590,7 +590,7 @@ static void sub8u( const uchar* src1, size_t step1,
uchar* dst, size_t step, Size sz, void* )
{
IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
- ippiSub_8u_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz, 0),
+ ippiSub_8u_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, ippiSize(sz), 0),
(vBinOp8, IF_SIMD(_VSub8u)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -606,7 +606,7 @@ static void sub16u( const ushort* src1, size_t step1,
ushort* dst, size_t step, Size sz, void* )
{
IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
- ippiSub_16u_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz, 0),
+ ippiSub_16u_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, ippiSize(sz), 0),
(vBinOp16, IF_SIMD(_VSub16u)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -615,7 +615,7 @@ static void sub16s( const short* src1, size_t step1,
short* dst, size_t step, Size sz, void* )
{
IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
- ippiSub_16s_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz, 0),
+ ippiSub_16s_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, ippiSize(sz), 0),
(vBinOp16, IF_SIMD(_VSub16s)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -631,7 +631,7 @@ static void sub32f( const float* src1, size_t step1,
float* dst, size_t step, Size sz, void* )
{
IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
- ippiSub_32f_C1R(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz),
+ ippiSub_32f_C1R(src2, (int)step2, src1, (int)step1, dst, (int)step, ippiSize(sz)),
(vBinOp32f, IF_SIMD(_VSub32f)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -668,7 +668,7 @@ static void max8u( const uchar* src1, size_t step1,
#endif
// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
-// ippiMaxEvery_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
+// ippiMaxEvery_8u_C1R(src1, (int)step1, src2, (int)step2, dst, ippiSize(sz)),
// (vBinOp8, IF_SIMD(_VMax8u)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -702,7 +702,7 @@ static void max16u( const ushort* src1, size_t step1,
#endif
// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
-// ippiMaxEvery_16u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
+// ippiMaxEvery_16u_C1R(src1, (int)step1, src2, (int)step2, dst, ippiSize(sz)),
// (vBinOp16, IF_SIMD(_VMax16u)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -742,7 +742,7 @@ static void max32f( const float* src1, size_t step1,
vBinOp32f, IF_SIMD(_VMax32f)>(src1, step1, src2, step2, dst, step, sz);
#endif
// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
-// ippiMaxEvery_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
+// ippiMaxEvery_32f_C1R(src1, (int)step1, src2, (int)step2, dst, ippiSize(sz)),
// (vBinOp32f, IF_SIMD(_VMax32f)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -776,7 +776,7 @@ static void min8u( const uchar* src1, size_t step1,
#endif
// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
-// ippiMinEvery_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
+// ippiMinEvery_8u_C1R(src1, (int)step1, src2, (int)step2, dst, ippiSize(sz)),
// (vBinOp8, IF_SIMD(_VMin8u)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -810,7 +810,7 @@ static void min16u( const ushort* src1, size_t step1,
#endif
// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
-// ippiMinEvery_16u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
+// ippiMinEvery_16u_C1R(src1, (int)step1, src2, (int)step2, dst, ippiSize(sz)),
// (vBinOp16, IF_SIMD(_VMin16u)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -850,7 +850,7 @@ static void min32f( const float* src1, size_t step1,
vBinOp32f, IF_SIMD(_VMin32f)>(src1, step1, src2, step2, dst, step, sz);
#endif
// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
-// ippiMinEvery_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz),
+// ippiMinEvery_32f_C1R(src1, (int)step1, src2, (int)step2, dst, ippiSize(sz)),
// (vBinOp32f, IF_SIMD(_VMin32f)>(src1, step1, src2, step2, dst, step, sz)));
}
@@ -866,7 +866,7 @@ static void absdiff8u( const uchar* src1, size_t step1,
uchar* dst, size_t step, Size sz, void* )
{
IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step);
- ippiAbsDiff_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz),
+ ippiAbsDiff_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, ippiSize(sz)),
(vBinOp8