This commit is contained in:
commit
723bc3cae9
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,6 +2,7 @@
|
||||
.DS_Store
|
||||
refman.rst
|
||||
OpenCV4Tegra/
|
||||
tegra/
|
||||
*.user
|
||||
.sw[a-z]
|
||||
.*.swp
|
||||
|
1
3rdparty/ffmpeg/ffmpeg_version.cmake
vendored
1
3rdparty/ffmpeg/ffmpeg_version.cmake
vendored
@ -1,5 +1,4 @@
|
||||
set(HAVE_FFMPEG 1)
|
||||
set(NEW_FFMPEG 1)
|
||||
set(HAVE_FFMPEG_CODEC 1)
|
||||
set(HAVE_FFMPEG_FORMAT 1)
|
||||
set(HAVE_FFMPEG_UTIL 1)
|
||||
|
22
3rdparty/tbb/CMakeLists.txt
vendored
22
3rdparty/tbb/CMakeLists.txt
vendored
@ -11,7 +11,7 @@ if (WIN32 AND ARM)
|
||||
set(tbb_url "http://threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb41_20130613oss_src.tgz")
|
||||
set(tbb_md5 "108c8c1e481b0aaea61878289eb28b6a")
|
||||
set(tbb_version_file "version_string.ver")
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshadow -Wunused-parameter)
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4702)
|
||||
else()
|
||||
# 4.1 update 2 - works fine
|
||||
set(tbb_ver "tbb41_20130116oss")
|
||||
@ -82,24 +82,8 @@ endif()
|
||||
set(tbb_tarball "${CMAKE_CURRENT_SOURCE_DIR}/${tbb_ver}_src.tgz")
|
||||
set(tbb_src_dir "${CMAKE_CURRENT_BINARY_DIR}/${tbb_ver}")
|
||||
|
||||
macro(getMD5 filename varname)
|
||||
if(CMAKE_VERSION VERSION_GREATER 2.8.6)
|
||||
file(MD5 "${filename}" ${varname})
|
||||
else()
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E md5sum "${filename}"
|
||||
RESULT_VARIABLE getMD5_RESULT
|
||||
OUTPUT_VARIABLE getMD5_OUTPUT
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
if(getMD5_RESULT EQUAL 0)
|
||||
string(REGEX MATCH "^[a-z0-9]+" ${varname} "${getMD5_OUTPUT}")
|
||||
else()
|
||||
set(${varname} "invalid_md5")
|
||||
endif()
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
if(EXISTS "${tbb_tarball}")
|
||||
getMD5("${tbb_tarball}" tbb_local_md5)
|
||||
file(MD5 "${tbb_tarball}" tbb_local_md5)
|
||||
if(NOT tbb_local_md5 STREQUAL tbb_md5)
|
||||
message(WARNING "Local copy of TBB source tarball has invalid MD5 hash: ${tbb_local_md5} (expected: ${tbb_md5})")
|
||||
file(REMOVE "${tbb_tarball}")
|
||||
@ -115,7 +99,7 @@ if(NOT EXISTS "${tbb_tarball}")
|
||||
if(NOT __statvar EQUAL 0)
|
||||
message(FATAL_ERROR "Failed to download TBB sources: ${tbb_url}")
|
||||
endif()
|
||||
getMD5("${tbb_tarball}" tbb_local_md5)
|
||||
file(MD5 "${tbb_tarball}" tbb_local_md5)
|
||||
if(NOT tbb_local_md5 STREQUAL tbb_md5)
|
||||
file(REMOVE "${tbb_tarball}")
|
||||
message(FATAL_ERROR "Downloaded TBB source tarball has invalid MD5 hash: ${tbb_local_md5} (expected: ${tbb_md5})")
|
||||
|
@ -4,20 +4,14 @@
|
||||
# From the off-tree build directory, invoke:
|
||||
# $ cmake <PATH_TO_OPENCV_ROOT>
|
||||
#
|
||||
#
|
||||
# - OCT-2008: Initial version <joseluisblancoc@gmail.com>
|
||||
#
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
set(CMAKE_ALLOW_LOOSE_LOOP_CONSTRUCTS true)
|
||||
include(cmake/OpenCVMinDepVersions.cmake)
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# Indicate CMake 2.7 and above that we don't want to mix relative
|
||||
# and absolute paths in linker lib lists.
|
||||
# Run "cmake --help-policy CMP0003" for more information.
|
||||
# --------------------------------------------------------------
|
||||
if(COMMAND cmake_policy)
|
||||
cmake_policy(SET CMP0003 NEW)
|
||||
if(CMAKE_GENERATOR MATCHES Xcode AND XCODE_VERSION VERSION_GREATER 4.3)
|
||||
cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)
|
||||
else()
|
||||
cmake_minimum_required(VERSION "${MIN_VER_CMAKE}" FATAL_ERROR)
|
||||
endif()
|
||||
|
||||
# Following block can broke build in case of cross-compilng
|
||||
@ -41,20 +35,10 @@ else(NOT CMAKE_TOOLCHAIN_FILE)
|
||||
set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}/install" CACHE PATH "Installation Directory")
|
||||
endif(NOT CMAKE_TOOLCHAIN_FILE)
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# Top level OpenCV project
|
||||
# --------------------------------------------------------------
|
||||
if(CMAKE_GENERATOR MATCHES Xcode AND XCODE_VERSION VERSION_GREATER 4.3)
|
||||
cmake_minimum_required(VERSION 2.8.8)
|
||||
elseif(IOS)
|
||||
cmake_minimum_required(VERSION 2.8.0)
|
||||
else()
|
||||
cmake_minimum_required(VERSION 2.6.3)
|
||||
endif()
|
||||
|
||||
# must go before the project command
|
||||
set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "Configs" FORCE)
|
||||
if(DEFINED CMAKE_BUILD_TYPE AND CMAKE_VERSION VERSION_GREATER "2.8")
|
||||
if(DEFINED CMAKE_BUILD_TYPE)
|
||||
set_property( CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${CMAKE_CONFIGURATION_TYPES} )
|
||||
endif()
|
||||
|
||||
@ -126,10 +110,10 @@ endif()
|
||||
OCV_OPTION(WITH_1394 "Include IEEE1394 support" ON IF (NOT ANDROID AND NOT IOS) )
|
||||
OCV_OPTION(WITH_AVFOUNDATION "Use AVFoundation for Video I/O" ON IF IOS)
|
||||
OCV_OPTION(WITH_CARBON "Use Carbon for UI instead of Cocoa" OFF IF APPLE )
|
||||
OCV_OPTION(WITH_CUDA "Include NVidia Cuda Runtime support" ON IF (CMAKE_VERSION VERSION_GREATER "2.8" AND NOT ANDROID AND NOT IOS) )
|
||||
OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT) library support" ON IF (CMAKE_VERSION VERSION_GREATER "2.8" AND NOT ANDROID AND NOT IOS) )
|
||||
OCV_OPTION(WITH_CUBLAS "Include NVidia Cuda Basic Linear Algebra Subprograms (BLAS) library support" OFF IF (CMAKE_VERSION VERSION_GREATER "2.8" AND NOT ANDROID AND NOT IOS) )
|
||||
OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support" OFF IF (CMAKE_VERSION VERSION_GREATER "2.8" AND NOT ANDROID AND NOT IOS AND NOT APPLE) )
|
||||
OCV_OPTION(WITH_CUDA "Include NVidia Cuda Runtime support" ON IF (NOT ANDROID AND NOT IOS) )
|
||||
OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT) library support" ON IF (NOT ANDROID AND NOT IOS) )
|
||||
OCV_OPTION(WITH_CUBLAS "Include NVidia Cuda Basic Linear Algebra Subprograms (BLAS) library support" OFF IF (NOT ANDROID AND NOT IOS) )
|
||||
OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support" OFF IF (NOT ANDROID AND NOT IOS AND NOT APPLE) )
|
||||
OCV_OPTION(WITH_EIGEN "Include Eigen2/Eigen3 support" ON)
|
||||
OCV_OPTION(WITH_VFW "Include Video for Windows support" ON IF WIN32 )
|
||||
OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" ON IF (NOT ANDROID AND NOT IOS))
|
||||
@ -200,7 +184,7 @@ OCV_OPTION(INSTALL_TO_MANGLED_PATHS "Enables mangled install paths, that help wi
|
||||
# OpenCV build options
|
||||
# ===================================================
|
||||
OCV_OPTION(ENABLE_PRECOMPILED_HEADERS "Use precompiled headers" ON IF (NOT IOS) )
|
||||
OCV_OPTION(ENABLE_SOLUTION_FOLDERS "Solution folder in Visual Studio or in other IDEs" (MSVC_IDE OR CMAKE_GENERATOR MATCHES Xcode) IF (CMAKE_VERSION VERSION_GREATER "2.8.0") )
|
||||
OCV_OPTION(ENABLE_SOLUTION_FOLDERS "Solution folder in Visual Studio or in other IDEs" (MSVC_IDE OR CMAKE_GENERATOR MATCHES Xcode) )
|
||||
OCV_OPTION(ENABLE_PROFILING "Enable profiling in the GCC compiler (Add flags: -g -pg)" OFF IF CMAKE_COMPILER_IS_GNUCXX )
|
||||
OCV_OPTION(ENABLE_OMIT_FRAME_POINTER "Enable -fomit-frame-pointer for GCC" ON IF CMAKE_COMPILER_IS_GNUCXX AND NOT (APPLE AND CMAKE_COMPILER_IS_CLANGCXX) )
|
||||
OCV_OPTION(ENABLE_POWERPC "Enable PowerPC for GCC" ON IF (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES powerpc.*) )
|
||||
@ -214,7 +198,7 @@ OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions"
|
||||
OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
|
||||
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )
|
||||
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
|
||||
|
||||
OCV_OPTION(ENABLE_WINRT_MODE "Build with Windows Runtime support" OFF IF WIN32 )
|
||||
|
||||
# uncategorized options
|
||||
# ===================================================
|
||||
@ -362,14 +346,11 @@ endif(WIN32 AND NOT MINGW)
|
||||
# CHECK FOR SYSTEM LIBRARIES, OPTIONS, ETC..
|
||||
# ----------------------------------------------------------------------------
|
||||
if(UNIX)
|
||||
include(cmake/OpenCVFindPkgConfig.cmake OPTIONAL)
|
||||
find_package(PkgConfig QUIET)
|
||||
include(CheckFunctionExists)
|
||||
include(CheckIncludeFile)
|
||||
|
||||
if(NOT APPLE)
|
||||
CHECK_INCLUDE_FILE(alloca.h HAVE_ALLOCA_H)
|
||||
CHECK_FUNCTION_EXISTS(alloca HAVE_ALLOCA)
|
||||
CHECK_INCLUDE_FILE(unistd.h HAVE_UNISTD_H)
|
||||
CHECK_INCLUDE_FILE(pthread.h HAVE_LIBPTHREAD)
|
||||
if(ANDROID)
|
||||
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m log)
|
||||
@ -379,7 +360,7 @@ if(UNIX)
|
||||
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m pthread rt)
|
||||
endif()
|
||||
else()
|
||||
add_definitions(-DHAVE_ALLOCA -DHAVE_ALLOCA_H -DHAVE_LIBPTHREAD -DHAVE_UNISTD_H)
|
||||
set(HAVE_LIBPTHREAD YES)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@ -604,6 +585,16 @@ if(ANDROID)
|
||||
status(" Android examples:" BUILD_ANDROID_EXAMPLES AND CAN_BUILD_ANDROID_PROJECTS THEN YES ELSE NO)
|
||||
endif()
|
||||
|
||||
# ================== Windows RT features ==================
|
||||
if(WIN32)
|
||||
status("")
|
||||
status(" Windows RT support:" HAVE_WINRT THEN YES ELSE NO)
|
||||
if (ENABLE_WINRT_MODE)
|
||||
status(" Windows SDK v8.0:" ${WINDOWS_SDK_PATH})
|
||||
status(" Visual Studio 2012:" ${VISUAL_STUDIO_PATH})
|
||||
endif()
|
||||
endif(WIN32)
|
||||
|
||||
# ========================== GUI ==========================
|
||||
status("")
|
||||
status(" GUI: ")
|
||||
|
17
README
17
README
@ -1,17 +0,0 @@
|
||||
OpenCV: open source computer vision library
|
||||
|
||||
Homepage: http://opencv.org
|
||||
Online docs: http://docs.opencv.org
|
||||
Q&A forum: http://answers.opencv.org
|
||||
Dev zone: http://code.opencv.org
|
||||
|
||||
Please read before starting work on a pull request:
|
||||
http://code.opencv.org/projects/opencv/wiki/How_to_contribute
|
||||
|
||||
Summary of guidelines:
|
||||
|
||||
* One pull request per issue;
|
||||
* Choose the right base branch;
|
||||
* Include tests and documentation;
|
||||
* Clean up "oops" commits before submitting;
|
||||
* Follow the coding style guide.
|
23
README.md
Normal file
23
README.md
Normal file
@ -0,0 +1,23 @@
|
||||
### OpenCV: Open Source Computer Vision Library
|
||||
|
||||
#### Resources
|
||||
|
||||
* Homepage: <http://opencv.org>
|
||||
* Docs: <http://docs.opencv.org>
|
||||
* Q&A forum: <http://answers.opencv.org>
|
||||
* Issue tracking: <http://code.opencv.org>
|
||||
|
||||
#### Contributing
|
||||
|
||||
Please read before starting work on a pull request: <http://code.opencv.org/projects/opencv/wiki/How_to_contribute>
|
||||
|
||||
Summary of guidelines:
|
||||
|
||||
* One pull request per issue;
|
||||
* Choose the right base branch;
|
||||
* Include tests and documentation;
|
||||
* Clean up "oops" commits before submitting;
|
||||
* Follow the coding style guide.
|
||||
|
||||
[](https://www.gittip.com/OpenCV/)
|
||||
[](https://www.paypal.com/cgi-bin/webscr?item_name=Donation+to+OpenCV&cmd=_donations&business=accountant%40opencv.org)
|
@ -1,138 +0,0 @@
|
||||
# CMAKE_PARSE_ARGUMENTS(<prefix> <options> <one_value_keywords> <multi_value_keywords> args...)
|
||||
#
|
||||
# CMAKE_PARSE_ARGUMENTS() is intended to be used in macros or functions for
|
||||
# parsing the arguments given to that macro or function.
|
||||
# It processes the arguments and defines a set of variables which hold the
|
||||
# values of the respective options.
|
||||
#
|
||||
# The <options> argument contains all options for the respective macro,
|
||||
# i.e. keywords which can be used when calling the macro without any value
|
||||
# following, like e.g. the OPTIONAL keyword of the install() command.
|
||||
#
|
||||
# The <one_value_keywords> argument contains all keywords for this macro
|
||||
# which are followed by one value, like e.g. DESTINATION keyword of the
|
||||
# install() command.
|
||||
#
|
||||
# The <multi_value_keywords> argument contains all keywords for this macro
|
||||
# which can be followed by more than one value, like e.g. the TARGETS or
|
||||
# FILES keywords of the install() command.
|
||||
#
|
||||
# When done, CMAKE_PARSE_ARGUMENTS() will have defined for each of the
|
||||
# keywords listed in <options>, <one_value_keywords> and
|
||||
# <multi_value_keywords> a variable composed of the given <prefix>
|
||||
# followed by "_" and the name of the respective keyword.
|
||||
# These variables will then hold the respective value from the argument list.
|
||||
# For the <options> keywords this will be TRUE or FALSE.
|
||||
#
|
||||
# All remaining arguments are collected in a variable
|
||||
# <prefix>_UNPARSED_ARGUMENTS, this can be checked afterwards to see whether
|
||||
# your macro was called with unrecognized parameters.
|
||||
#
|
||||
# As an example here a my_install() macro, which takes similar arguments as the
|
||||
# real install() command:
|
||||
#
|
||||
# function(MY_INSTALL)
|
||||
# set(options OPTIONAL FAST)
|
||||
# set(oneValueArgs DESTINATION RENAME)
|
||||
# set(multiValueArgs TARGETS CONFIGURATIONS)
|
||||
# cmake_parse_arguments(MY_INSTALL "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
|
||||
# ...
|
||||
#
|
||||
# Assume my_install() has been called like this:
|
||||
# my_install(TARGETS foo bar DESTINATION bin OPTIONAL blub)
|
||||
#
|
||||
# After the cmake_parse_arguments() call the macro will have set the following
|
||||
# variables:
|
||||
# MY_INSTALL_OPTIONAL = TRUE
|
||||
# MY_INSTALL_FAST = FALSE (this option was not used when calling my_install()
|
||||
# MY_INSTALL_DESTINATION = "bin"
|
||||
# MY_INSTALL_RENAME = "" (was not used)
|
||||
# MY_INSTALL_TARGETS = "foo;bar"
|
||||
# MY_INSTALL_CONFIGURATIONS = "" (was not used)
|
||||
# MY_INSTALL_UNPARSED_ARGUMENTS = "blub" (no value expected after "OPTIONAL"
|
||||
#
|
||||
# You can the continue and process these variables.
|
||||
#
|
||||
# Keywords terminate lists of values, e.g. if directly after a one_value_keyword
|
||||
# another recognized keyword follows, this is interpreted as the beginning of
|
||||
# the new option.
|
||||
# E.g. my_install(TARGETS foo DESTINATION OPTIONAL) would result in
|
||||
# MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION would
|
||||
# be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefor.
|
||||
|
||||
#=============================================================================
|
||||
# Copyright 2010 Alexander Neundorf <neundorf@kde.org>
|
||||
#
|
||||
# Distributed under the OSI-approved BSD License (the "License");
|
||||
# see accompanying file Copyright.txt for details.
|
||||
#
|
||||
# This software is distributed WITHOUT ANY WARRANTY; without even the
|
||||
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the License for more information.
|
||||
#=============================================================================
|
||||
# (To distribute this file outside of CMake, substitute the full
|
||||
# License text for the above reference.)
|
||||
|
||||
|
||||
if(__CMAKE_PARSE_ARGUMENTS_INCLUDED)
|
||||
return()
|
||||
endif()
|
||||
set(__CMAKE_PARSE_ARGUMENTS_INCLUDED TRUE)
|
||||
|
||||
|
||||
function(CMAKE_PARSE_ARGUMENTS prefix _optionNames _singleArgNames _multiArgNames)
|
||||
# first set all result variables to empty/FALSE
|
||||
foreach(arg_name ${_singleArgNames} ${_multiArgNames})
|
||||
set(${prefix}_${arg_name})
|
||||
endforeach(arg_name)
|
||||
|
||||
foreach(option ${_optionNames})
|
||||
set(${prefix}_${option} FALSE)
|
||||
endforeach(option)
|
||||
|
||||
set(${prefix}_UNPARSED_ARGUMENTS)
|
||||
|
||||
set(insideValues FALSE)
|
||||
set(currentArgName)
|
||||
|
||||
# now iterate over all arguments and fill the result variables
|
||||
foreach(currentArg ${ARGN})
|
||||
list(FIND _optionNames "${currentArg}" optionIndex) # ... then this marks the end of the arguments belonging to this keyword
|
||||
list(FIND _singleArgNames "${currentArg}" singleArgIndex) # ... then this marks the end of the arguments belonging to this keyword
|
||||
list(FIND _multiArgNames "${currentArg}" multiArgIndex) # ... then this marks the end of the arguments belonging to this keyword
|
||||
|
||||
if(${optionIndex} EQUAL -1 AND ${singleArgIndex} EQUAL -1 AND ${multiArgIndex} EQUAL -1)
|
||||
if(insideValues)
|
||||
if("${insideValues}" STREQUAL "SINGLE")
|
||||
set(${prefix}_${currentArgName} ${currentArg})
|
||||
set(insideValues FALSE)
|
||||
elseif("${insideValues}" STREQUAL "MULTI")
|
||||
list(APPEND ${prefix}_${currentArgName} ${currentArg})
|
||||
endif()
|
||||
else(insideValues)
|
||||
list(APPEND ${prefix}_UNPARSED_ARGUMENTS ${currentArg})
|
||||
endif(insideValues)
|
||||
else()
|
||||
if(NOT ${optionIndex} EQUAL -1)
|
||||
set(${prefix}_${currentArg} TRUE)
|
||||
set(insideValues FALSE)
|
||||
elseif(NOT ${singleArgIndex} EQUAL -1)
|
||||
set(currentArgName ${currentArg})
|
||||
set(${prefix}_${currentArgName})
|
||||
set(insideValues "SINGLE")
|
||||
elseif(NOT ${multiArgIndex} EQUAL -1)
|
||||
set(currentArgName ${currentArg})
|
||||
set(${prefix}_${currentArgName})
|
||||
set(insideValues "MULTI")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
endforeach(currentArg)
|
||||
|
||||
# propagate the result variables to the caller:
|
||||
foreach(arg_name ${_singleArgNames} ${_multiArgNames} ${_optionNames})
|
||||
set(${prefix}_${arg_name} ${${prefix}_${arg_name}} PARENT_SCOPE)
|
||||
endforeach(arg_name)
|
||||
set(${prefix}_UNPARSED_ARGUMENTS ${${prefix}_UNPARSED_ARGUMENTS} PARENT_SCOPE)
|
||||
|
||||
endfunction(CMAKE_PARSE_ARGUMENTS _options _singleArgs _multiArgs)
|
@ -2,6 +2,40 @@ if(NOT MSVC)
|
||||
message(FATAL_ERROR "CRT options are available only for MSVC")
|
||||
endif()
|
||||
|
||||
#INCLUDE (CheckIncludeFiles)
|
||||
|
||||
if (ENABLE_WINRT_MODE)
|
||||
set(HAVE_WINRT True)
|
||||
|
||||
# search Windows Platform SDK
|
||||
message(STATUS "Checking for Windows Platfrom SDK")
|
||||
GET_FILENAME_COMPONENT(WINDOWS_SDK_PATH "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\v8.0;InstallationFolder]" ABSOLUTE CACHE)
|
||||
if (WINDOWS_SDK_PATH STREQUAL "")
|
||||
message(ERROR "Windows Platform SDK 8.0 was not found!")
|
||||
set(HAVE_WINRT False)
|
||||
endif()
|
||||
|
||||
#search for Visual Studio 11.0 install directory
|
||||
message(STATUS "Checking for Visual Studio 2012")
|
||||
GET_FILENAME_COMPONENT(VISUAL_STUDIO_PATH [HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\11.0\\Setup\\VS;ProductDir] REALPATH CACHE)
|
||||
if (VISUAL_STUDIO_PATH STREQUAL "")
|
||||
message(ERROR "Visual Studio 2012 was not found!")
|
||||
set(HAVE_WINRT False)
|
||||
endif()
|
||||
|
||||
if (HAVE_WINRT)
|
||||
TRY_COMPILE(HAVE_WINRT
|
||||
"${OPENCV_BINARY_DIR}/CMakeFiles/CMakeTmp"
|
||||
"${OpenCV_SOURCE_DIR}/cmake/checks/winrttest.cpp"
|
||||
CMAKE_FLAGS "\"kernel.lib\" \"user32.lib\""
|
||||
OUTPUT_VARIABLE OUTPUT)
|
||||
endif()
|
||||
|
||||
if (HAVE_WINRT)
|
||||
add_definitions(/DWINVER=0x0602 /DNTDDI_VERSION=NTDDI_WIN8 /D_WIN32_WINNT=0x0602)
|
||||
endif()
|
||||
endif(ENABLE_WINRT_MODE)
|
||||
|
||||
if(NOT BUILD_SHARED_LIBS AND BUILD_WITH_STATIC_CRT)
|
||||
foreach(flag_var
|
||||
CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE
|
||||
|
@ -1,159 +0,0 @@
|
||||
# ===================================================================================
|
||||
# The OpenCV CMake configuration file
|
||||
#
|
||||
# ** File generated automatically, do not modify **
|
||||
#
|
||||
# Usage from an external project:
|
||||
# In your CMakeLists.txt, add these lines:
|
||||
#
|
||||
# FIND_PACKAGE(OpenCV REQUIRED)
|
||||
# TARGET_LINK_LIBRARIES(MY_TARGET_NAME ${OpenCV_LIBS})
|
||||
#
|
||||
# Or you can search for specific OpenCV modules:
|
||||
#
|
||||
# FIND_PACKAGE(OpenCV REQUIRED core highgui)
|
||||
#
|
||||
# If the module is found then OPENCV_<MODULE>_FOUND is set to TRUE.
|
||||
#
|
||||
# This file will define the following variables:
|
||||
# - OpenCV_LIBS : The list of libraries to links against.
|
||||
# - OpenCV_LIB_DIR : The directory(es) where lib files are. Calling LINK_DIRECTORIES
|
||||
# with this path is NOT needed.
|
||||
# - OpenCV_INCLUDE_DIRS : The OpenCV include directories.
|
||||
# - OpenCV_COMPUTE_CAPABILITIES : The version of compute capability
|
||||
# - OpenCV_ANDROID_NATIVE_API_LEVEL : Minimum required level of Android API
|
||||
# - OpenCV_VERSION : The version of this OpenCV build. Example: "2.4.0"
|
||||
# - OpenCV_VERSION_MAJOR : Major version part of OpenCV_VERSION. Example: "2"
|
||||
# - OpenCV_VERSION_MINOR : Minor version part of OpenCV_VERSION. Example: "4"
|
||||
# - OpenCV_VERSION_PATCH : Patch version part of OpenCV_VERSION. Example: "0"
|
||||
#
|
||||
# Advanced variables:
|
||||
# - OpenCV_SHARED
|
||||
# - OpenCV_CONFIG_PATH
|
||||
# - OpenCV_LIB_COMPONENTS
|
||||
#
|
||||
# ===================================================================================
|
||||
#
|
||||
# Windows pack specific options:
|
||||
# - OpenCV_STATIC
|
||||
# - OpenCV_CUDA
|
||||
|
||||
if(CMAKE_VERSION VERSION_GREATER 2.6)
|
||||
get_property(OpenCV_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
|
||||
if(NOT ";${OpenCV_LANGUAGES};" MATCHES ";CXX;")
|
||||
enable_language(CXX)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED OpenCV_STATIC)
|
||||
# look for global setting
|
||||
if(NOT DEFINED BUILD_SHARED_LIBS OR BUILD_SHARED_LIBS)
|
||||
set(OpenCV_STATIC OFF)
|
||||
else()
|
||||
set(OpenCV_STATIC ON)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED OpenCV_CUDA)
|
||||
# if user' app uses CUDA, then it probably wants CUDA-enabled OpenCV binaries
|
||||
if(CUDA_FOUND)
|
||||
set(OpenCV_CUDA ON)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
if(CMAKE_CL_64)
|
||||
set(OpenCV_ARCH x64)
|
||||
set(OpenCV_TBB_ARCH intel64)
|
||||
else()
|
||||
set(OpenCV_ARCH x86)
|
||||
set(OpenCV_TBB_ARCH ia32)
|
||||
endif()
|
||||
if(MSVC_VERSION EQUAL 1400)
|
||||
set(OpenCV_RUNTIME vc8)
|
||||
elseif(MSVC_VERSION EQUAL 1500)
|
||||
set(OpenCV_RUNTIME vc9)
|
||||
elseif(MSVC_VERSION EQUAL 1600)
|
||||
set(OpenCV_RUNTIME vc10)
|
||||
elseif(MSVC_VERSION EQUAL 1700)
|
||||
set(OpenCV_RUNTIME vc11)
|
||||
endif()
|
||||
elseif(MINGW)
|
||||
set(OpenCV_RUNTIME mingw)
|
||||
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpmachine
|
||||
OUTPUT_VARIABLE OPENCV_GCC_TARGET_MACHINE
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
if(CMAKE_OPENCV_GCC_TARGET_MACHINE MATCHES "64")
|
||||
set(MINGW64 1)
|
||||
set(OpenCV_ARCH x64)
|
||||
else()
|
||||
set(OpenCV_ARCH x86)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(CMAKE_VERSION VERSION_GREATER 2.6.2)
|
||||
unset(OpenCV_CONFIG_PATH CACHE)
|
||||
endif()
|
||||
|
||||
get_filename_component(OpenCV_CONFIG_PATH "${CMAKE_CURRENT_LIST_FILE}" PATH CACHE)
|
||||
if(OpenCV_RUNTIME AND OpenCV_ARCH)
|
||||
if(OpenCV_STATIC AND EXISTS "${OpenCV_CONFIG_PATH}/${OpenCV_ARCH}/${OpenCV_RUNTIME}/staticlib/OpenCVConfig.cmake")
|
||||
if(OpenCV_CUDA AND EXISTS "${OpenCV_CONFIG_PATH}/gpu/${OpenCV_ARCH}/${OpenCV_RUNTIME}/staticlib/OpenCVConfig.cmake")
|
||||
set(OpenCV_LIB_PATH "${OpenCV_CONFIG_PATH}/gpu/${OpenCV_ARCH}/${OpenCV_RUNTIME}/staticlib")
|
||||
else()
|
||||
set(OpenCV_LIB_PATH "${OpenCV_CONFIG_PATH}/${OpenCV_ARCH}/${OpenCV_RUNTIME}/staticlib")
|
||||
endif()
|
||||
elseif(EXISTS "${OpenCV_CONFIG_PATH}/${OpenCV_ARCH}/${OpenCV_RUNTIME}/lib/OpenCVConfig.cmake")
|
||||
if(OpenCV_CUDA AND EXISTS "${OpenCV_CONFIG_PATH}/gpu/${OpenCV_ARCH}/${OpenCV_RUNTIME}/lib/OpenCVConfig.cmake")
|
||||
set(OpenCV_LIB_PATH "${OpenCV_CONFIG_PATH}/gpu/${OpenCV_ARCH}/${OpenCV_RUNTIME}/lib")
|
||||
else()
|
||||
set(OpenCV_LIB_PATH "${OpenCV_CONFIG_PATH}/${OpenCV_ARCH}/${OpenCV_RUNTIME}/lib")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(OpenCV_LIB_PATH AND EXISTS "${OpenCV_LIB_PATH}/OpenCVConfig.cmake")
|
||||
set(OpenCV_LIB_DIR_OPT "${OpenCV_LIB_PATH}" CACHE PATH "Path where release OpenCV libraries are located" FORCE)
|
||||
set(OpenCV_LIB_DIR_DBG "${OpenCV_LIB_PATH}" CACHE PATH "Path where debug OpenCV libraries are located" FORCE)
|
||||
set(OpenCV_3RDPARTY_LIB_DIR_OPT "${OpenCV_LIB_PATH}" CACHE PATH "Path where release 3rdpaty OpenCV dependencies are located" FORCE)
|
||||
set(OpenCV_3RDPARTY_LIB_DIR_DBG "${OpenCV_LIB_PATH}" CACHE PATH "Path where debug 3rdpaty OpenCV dependencies are located" FORCE)
|
||||
|
||||
include("${OpenCV_LIB_PATH}/OpenCVConfig.cmake")
|
||||
|
||||
if(OpenCV_CUDA)
|
||||
set(_OpenCV_LIBS "")
|
||||
foreach(_lib ${OpenCV_LIBS})
|
||||
string(REPLACE "${OpenCV_CONFIG_PATH}/gpu/${OpenCV_ARCH}/${OpenCV_RUNTIME}" "${OpenCV_CONFIG_PATH}/${OpenCV_ARCH}/${OpenCV_RUNTIME}" _lib2 "${_lib}")
|
||||
if(NOT EXISTS "${_lib}" AND EXISTS "${_lib2}")
|
||||
list(APPEND _OpenCV_LIBS "${_lib2}")
|
||||
else()
|
||||
list(APPEND _OpenCV_LIBS "${_lib}")
|
||||
endif()
|
||||
endforeach()
|
||||
set(OpenCV_LIBS ${_OpenCV_LIBS})
|
||||
endif()
|
||||
set(OpenCV_FOUND TRUE CACHE BOOL "" FORCE)
|
||||
set(OPENCV_FOUND TRUE CACHE BOOL "" FORCE)
|
||||
|
||||
if(NOT OpenCV_FIND_QUIETLY)
|
||||
message(STATUS "Found OpenCV ${OpenCV_VERSION} in ${OpenCV_LIB_PATH}")
|
||||
if(NOT OpenCV_LIB_PATH MATCHES "/staticlib")
|
||||
get_filename_component(_OpenCV_LIB_PATH "${OpenCV_LIB_PATH}/../bin" ABSOLUTE)
|
||||
file(TO_NATIVE_PATH "${_OpenCV_LIB_PATH}" _OpenCV_LIB_PATH)
|
||||
message(STATUS "You might need to add ${_OpenCV_LIB_PATH} to your PATH to be able to run your applications.")
|
||||
if(OpenCV_LIB_PATH MATCHES "/gpu/")
|
||||
string(REPLACE "\\gpu" "" _OpenCV_LIB_PATH2 "${_OpenCV_LIB_PATH}")
|
||||
message(STATUS "GPU support is enabled so you might also need ${_OpenCV_LIB_PATH2} in your PATH (it must go after the ${_OpenCV_LIB_PATH}).")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
if(NOT OpenCV_FIND_QUIETLY)
|
||||
message(WARNING "Found OpenCV 2.4.3 Windows Super Pack but it has not binaries compatible with your configuration.
|
||||
You should manually point CMake variable OpenCV_DIR to your build of OpenCV library.")
|
||||
endif()
|
||||
set(OpenCV_FOUND FALSE CACHE BOOL "" FORCE)
|
||||
set(OPENCV_FOUND FALSE CACHE BOOL "" FORCE)
|
||||
endif()
|
||||
|
@ -124,7 +124,7 @@ if(ANDROID_EXECUTABLE)
|
||||
if(NOT ANDROID_SDK_TARGET)
|
||||
set(ANDROID_SDK_TARGET "" CACHE STRING "Android SDK target for the OpenCV Java API and samples")
|
||||
endif()
|
||||
if(ANDROID_SDK_TARGETS AND CMAKE_VERSION VERSION_GREATER "2.8")
|
||||
if(ANDROID_SDK_TARGETS)
|
||||
set_property( CACHE ANDROID_SDK_TARGET PROPERTY STRINGS ${ANDROID_SDK_TARGETS} )
|
||||
endif()
|
||||
endif(ANDROID_EXECUTABLE)
|
||||
|
@ -1,8 +1,3 @@
|
||||
if(${CMAKE_VERSION} VERSION_LESS "2.8.3")
|
||||
message(STATUS "WITH_CUDA flag requires CMake 2.8.3 or newer. CUDA support is disabled.")
|
||||
return()
|
||||
endif()
|
||||
|
||||
if(WIN32 AND NOT MSVC)
|
||||
message(STATUS "CUDA compilation is disabled (due to only Visual Studio compiler supported on your platform).")
|
||||
return()
|
||||
|
@ -20,10 +20,24 @@ else(APPLE)
|
||||
DOC "OpenCL include directory"
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
if (X86_64)
|
||||
set(OPENCL_POSSIBLE_LIB_SUFFIXES lib/Win64 lib/x86_64 lib/x64)
|
||||
elseif (X86)
|
||||
set(OPENCL_POSSIBLE_LIB_SUFFIXES lib/Win32 lib/x86)
|
||||
if(WIN32)
|
||||
if(X86_64)
|
||||
set(OPENCL_POSSIBLE_LIB_SUFFIXES lib/Win64 lib/x86_64 lib/x64)
|
||||
elseif(X86)
|
||||
set(OPENCL_POSSIBLE_LIB_SUFFIXES lib/Win32 lib/x86)
|
||||
else()
|
||||
set(OPENCL_POSSIBLE_LIB_SUFFIXES lib)
|
||||
endif()
|
||||
elseif(UNIX)
|
||||
if(X86_64)
|
||||
set(OPENCL_POSSIBLE_LIB_SUFFIXES lib64 lib)
|
||||
elseif(X86)
|
||||
set(OPENCL_POSSIBLE_LIB_SUFFIXES lib32 lib)
|
||||
else()
|
||||
set(OPENCL_POSSIBLE_LIB_SUFFIXES lib)
|
||||
endif()
|
||||
else()
|
||||
set(OPENCL_POSSIBLE_LIB_SUFFIXES lib)
|
||||
endif()
|
||||
|
||||
find_library(OPENCL_LIBRARY
|
||||
|
@ -15,7 +15,7 @@ endif(WITH_WIN32UI)
|
||||
# --- QT4 ---
|
||||
ocv_clear_vars(HAVE_QT HAVE_QT5)
|
||||
if(WITH_QT)
|
||||
if(NOT CMAKE_VERSION VERSION_LESS 2.8.3 AND NOT WITH_QT EQUAL 4)
|
||||
if(NOT WITH_QT EQUAL 4)
|
||||
find_package(Qt5Core)
|
||||
find_package(Qt5Gui)
|
||||
find_package(Qt5Widgets)
|
||||
@ -65,3 +65,12 @@ if(WITH_OPENGL)
|
||||
endif()
|
||||
endif()
|
||||
endif(WITH_OPENGL)
|
||||
|
||||
# --- Carbon & Cocoa ---
|
||||
if(APPLE)
|
||||
if(WITH_CARBON)
|
||||
set(HAVE_CARBON YES)
|
||||
elif(NOT IOS)
|
||||
set(HAVE_COCOA YES)
|
||||
endif()
|
||||
endif()
|
||||
|
@ -36,56 +36,59 @@ if(WITH_TIFF)
|
||||
ocv_parse_header("${TIFF_INCLUDE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_TIFF AND NOT TIFF_FOUND)
|
||||
ocv_clear_vars(TIFF_LIBRARY TIFF_LIBRARIES TIFF_INCLUDE_DIR)
|
||||
if(NOT TIFF_FOUND)
|
||||
ocv_clear_vars(TIFF_LIBRARY TIFF_LIBRARIES TIFF_INCLUDE_DIR)
|
||||
|
||||
set(TIFF_LIBRARY libtiff)
|
||||
set(TIFF_LIBRARIES ${TIFF_LIBRARY})
|
||||
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libtiff")
|
||||
set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}")
|
||||
ocv_parse_header("${${TIFF_LIBRARY}_SOURCE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION)
|
||||
endif()
|
||||
|
||||
if(TIFF_VERSION_CLASSIC AND NOT TIFF_VERSION)
|
||||
set(TIFF_VERSION ${TIFF_VERSION_CLASSIC})
|
||||
endif()
|
||||
|
||||
if(TIFF_BIGTIFF_VERSION AND NOT TIFF_VERSION_BIG)
|
||||
set(TIFF_VERSION_BIG ${TIFF_BIGTIFF_VERSION})
|
||||
endif()
|
||||
|
||||
if(NOT TIFF_VERSION_STRING AND TIFF_INCLUDE_DIR)
|
||||
list(GET TIFF_INCLUDE_DIR 0 _TIFF_INCLUDE_DIR)
|
||||
if(EXISTS "${_TIFF_INCLUDE_DIR}/tiffvers.h")
|
||||
file(STRINGS "${_TIFF_INCLUDE_DIR}/tiffvers.h" tiff_version_str REGEX "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version .*")
|
||||
string(REGEX REPLACE "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version +([^ \\n]*).*" "\\1" TIFF_VERSION_STRING "${tiff_version_str}")
|
||||
unset(tiff_version_str)
|
||||
set(TIFF_LIBRARY libtiff)
|
||||
set(TIFF_LIBRARIES ${TIFF_LIBRARY})
|
||||
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libtiff")
|
||||
set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}")
|
||||
ocv_parse_header("${${TIFF_LIBRARY}_SOURCE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION)
|
||||
endif()
|
||||
unset(_TIFF_INCLUDE_DIR)
|
||||
|
||||
if(TIFF_VERSION_CLASSIC AND NOT TIFF_VERSION)
|
||||
set(TIFF_VERSION ${TIFF_VERSION_CLASSIC})
|
||||
endif()
|
||||
|
||||
if(TIFF_BIGTIFF_VERSION AND NOT TIFF_VERSION_BIG)
|
||||
set(TIFF_VERSION_BIG ${TIFF_BIGTIFF_VERSION})
|
||||
endif()
|
||||
|
||||
if(NOT TIFF_VERSION_STRING AND TIFF_INCLUDE_DIR)
|
||||
list(GET TIFF_INCLUDE_DIR 0 _TIFF_INCLUDE_DIR)
|
||||
if(EXISTS "${_TIFF_INCLUDE_DIR}/tiffvers.h")
|
||||
file(STRINGS "${_TIFF_INCLUDE_DIR}/tiffvers.h" tiff_version_str REGEX "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version .*")
|
||||
string(REGEX REPLACE "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version +([^ \\n]*).*" "\\1" TIFF_VERSION_STRING "${tiff_version_str}")
|
||||
unset(tiff_version_str)
|
||||
endif()
|
||||
unset(_TIFF_INCLUDE_DIR)
|
||||
endif()
|
||||
|
||||
set(HAVE_TIFF YES)
|
||||
endif()
|
||||
|
||||
# --- libjpeg (optional) ---
|
||||
if(WITH_JPEG AND NOT IOS)
|
||||
if(WITH_JPEG)
|
||||
if(BUILD_JPEG)
|
||||
ocv_clear_vars(JPEG_FOUND)
|
||||
else()
|
||||
include(FindJPEG)
|
||||
endif()
|
||||
|
||||
if(NOT JPEG_FOUND)
|
||||
ocv_clear_vars(JPEG_LIBRARY JPEG_LIBRARIES JPEG_INCLUDE_DIR)
|
||||
|
||||
set(JPEG_LIBRARY libjpeg)
|
||||
set(JPEG_LIBRARIES ${JPEG_LIBRARY})
|
||||
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg")
|
||||
set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}")
|
||||
endif()
|
||||
|
||||
ocv_parse_header("${JPEG_INCLUDE_DIR}/jpeglib.h" JPEG_VERSION_LINES JPEG_LIB_VERSION)
|
||||
set(HAVE_JPEG YES)
|
||||
endif()
|
||||
|
||||
if(WITH_JPEG AND NOT JPEG_FOUND)
|
||||
ocv_clear_vars(JPEG_LIBRARY JPEG_LIBRARIES JPEG_INCLUDE_DIR)
|
||||
|
||||
set(JPEG_LIBRARY libjpeg)
|
||||
set(JPEG_LIBRARIES ${JPEG_LIBRARY})
|
||||
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg")
|
||||
set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}")
|
||||
endif()
|
||||
|
||||
ocv_parse_header("${JPEG_INCLUDE_DIR}/jpeglib.h" JPEG_VERSION_LINES JPEG_LIB_VERSION)
|
||||
|
||||
# --- libwebp (optional) ---
|
||||
|
||||
if(WITH_WEBP)
|
||||
@ -129,19 +132,21 @@ if(WITH_JASPER)
|
||||
else()
|
||||
include(FindJasper)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_JASPER AND NOT JASPER_FOUND)
|
||||
ocv_clear_vars(JASPER_LIBRARY JASPER_LIBRARIES JASPER_INCLUDE_DIR)
|
||||
if(NOT JASPER_FOUND)
|
||||
ocv_clear_vars(JASPER_LIBRARY JASPER_LIBRARIES JASPER_INCLUDE_DIR)
|
||||
|
||||
set(JASPER_LIBRARY libjasper)
|
||||
set(JASPER_LIBRARIES ${JASPER_LIBRARY})
|
||||
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjasper")
|
||||
set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}")
|
||||
endif()
|
||||
set(JASPER_LIBRARY libjasper)
|
||||
set(JASPER_LIBRARIES ${JASPER_LIBRARY})
|
||||
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjasper")
|
||||
set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}")
|
||||
endif()
|
||||
|
||||
if(NOT JASPER_VERSION_STRING)
|
||||
ocv_parse_header2(JASPER "${JASPER_INCLUDE_DIR}/jasper/jas_config.h" JAS_VERSION "")
|
||||
set(HAVE_JASPER YES)
|
||||
|
||||
if(NOT JASPER_VERSION_STRING)
|
||||
ocv_parse_header2(JASPER "${JASPER_INCLUDE_DIR}/jasper/jas_config.h" JAS_VERSION "")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# --- libpng (optional, should be searched after zlib) ---
|
||||
@ -152,30 +157,30 @@ if(WITH_PNG AND NOT IOS)
|
||||
include(FindPNG)
|
||||
if(PNG_FOUND)
|
||||
include(CheckIncludeFile)
|
||||
check_include_file("${PNG_PNG_INCLUDE_DIR}/png.h" HAVE_PNG_H)
|
||||
check_include_file("${PNG_PNG_INCLUDE_DIR}/libpng/png.h" HAVE_LIBPNG_PNG_H)
|
||||
if(HAVE_PNG_H)
|
||||
ocv_parse_header("${PNG_PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
|
||||
elseif(HAVE_LIBPNG_PNG_H)
|
||||
if(HAVE_LIBPNG_PNG_H)
|
||||
ocv_parse_header("${PNG_PNG_INCLUDE_DIR}/libpng/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
|
||||
else()
|
||||
ocv_parse_header("${PNG_PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT PNG_FOUND)
|
||||
ocv_clear_vars(PNG_LIBRARY PNG_LIBRARIES PNG_INCLUDE_DIR PNG_PNG_INCLUDE_DIR HAVE_LIBPNG_PNG_H PNG_DEFINITIONS)
|
||||
|
||||
set(PNG_LIBRARY libpng)
|
||||
set(PNG_LIBRARIES ${PNG_LIBRARY})
|
||||
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libpng")
|
||||
set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}")
|
||||
set(PNG_DEFINITIONS "")
|
||||
ocv_parse_header("${PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
|
||||
endif()
|
||||
|
||||
set(HAVE_PNG YES)
|
||||
set(PNG_VERSION "${PNG_LIBPNG_VER_MAJOR}.${PNG_LIBPNG_VER_MINOR}.${PNG_LIBPNG_VER_RELEASE}")
|
||||
endif()
|
||||
|
||||
if(WITH_PNG AND NOT PNG_FOUND)
|
||||
ocv_clear_vars(PNG_LIBRARY PNG_LIBRARIES PNG_INCLUDE_DIR PNG_PNG_INCLUDE_DIR HAVE_PNG_H HAVE_LIBPNG_PNG_H PNG_DEFINITIONS)
|
||||
|
||||
set(PNG_LIBRARY libpng)
|
||||
set(PNG_LIBRARIES ${PNG_LIBRARY})
|
||||
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libpng")
|
||||
set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}")
|
||||
set(PNG_DEFINITIONS "")
|
||||
ocv_parse_header("${PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
|
||||
endif()
|
||||
|
||||
set(PNG_VERSION "${PNG_LIBPNG_VER_MAJOR}.${PNG_LIBPNG_VER_MINOR}.${PNG_LIBPNG_VER_RELEASE}")
|
||||
|
||||
# --- OpenEXR (optional) ---
|
||||
if(WITH_OPENEXR)
|
||||
if(BUILD_OPENEXR)
|
||||
@ -183,17 +188,14 @@ if(WITH_OPENEXR)
|
||||
else()
|
||||
include("${OpenCV_SOURCE_DIR}/cmake/OpenCVFindOpenEXR.cmake")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_OPENEXR AND NOT OPENEXR_FOUND)
|
||||
ocv_clear_vars(OPENEXR_INCLUDE_PATHS OPENEXR_LIBRARIES OPENEXR_ILMIMF_LIBRARY OPENEXR_VERSION)
|
||||
if(NOT OPENEXR_FOUND)
|
||||
ocv_clear_vars(OPENEXR_INCLUDE_PATHS OPENEXR_LIBRARIES OPENEXR_ILMIMF_LIBRARY OPENEXR_VERSION)
|
||||
|
||||
set(OPENEXR_LIBRARIES IlmImf)
|
||||
set(OPENEXR_ILMIMF_LIBRARY IlmImf)
|
||||
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/openexr")
|
||||
endif()
|
||||
set(OPENEXR_LIBRARIES IlmImf)
|
||||
set(OPENEXR_ILMIMF_LIBRARY IlmImf)
|
||||
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/openexr")
|
||||
endif()
|
||||
|
||||
#cmake 2.8.2 bug - it fails to determine zlib version
|
||||
if(ZLIB_FOUND)
|
||||
ocv_parse_header2(ZLIB "${ZLIB_INCLUDE_DIR}/zlib.h" ZLIB_VERSION)
|
||||
set(HAVE_OPENEXR YES)
|
||||
endif()
|
||||
|
@ -87,7 +87,14 @@ if(WITH_PVAPI)
|
||||
set(_PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_OPENCV_GCC_VERSION_MAJOR}.${CMAKE_OPENCV_GCC_VERSION_MINOR}")
|
||||
endif()
|
||||
|
||||
set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_STATIC_LIBRARY_PREFIX}PvAPI${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE PATH "The PvAPI library")
|
||||
if(WIN32)
|
||||
if(MINGW)
|
||||
set(PVAPI_DEFINITIONS "-DPVDECL=__stdcall")
|
||||
endif(MINGW)
|
||||
set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/PvAPI.lib" CACHE PATH "The PvAPI library")
|
||||
else(WIN32)
|
||||
set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_STATIC_LIBRARY_PREFIX}PvAPI${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE PATH "The PvAPI library")
|
||||
endif(WIN32)
|
||||
if(EXISTS "${PVAPI_LIBRARY}")
|
||||
set(HAVE_PVAPI TRUE)
|
||||
endif()
|
||||
@ -257,3 +264,13 @@ if(WIN32)
|
||||
list(APPEND HIGHGUI_LIBRARIES winmm)
|
||||
endif()
|
||||
endif(WIN32)
|
||||
|
||||
# --- Apple AV Foundation ---
|
||||
if(WITH_AVFOUNDATION)
|
||||
set(HAVE_AVFOUNDATION YES)
|
||||
endif()
|
||||
|
||||
# --- QuickTime ---
|
||||
if(WITH_QUICKTIME)
|
||||
set(HAVE_QUICKTIME YES)
|
||||
endif()
|
||||
|
@ -1,365 +0,0 @@
|
||||
#
|
||||
# OpenCV note: the file has been extracted from CMake 2.6.2 distribution.
|
||||
# It is used to build OpenCV with CMake 2.4.x
|
||||
#
|
||||
|
||||
# - a pkg-config module for CMake
|
||||
#
|
||||
# Usage:
|
||||
# pkg_check_modules(<PREFIX> [REQUIRED] <MODULE> [<MODULE>]*)
|
||||
# checks for all the given modules
|
||||
#
|
||||
# pkg_search_module(<PREFIX> [REQUIRED] <MODULE> [<MODULE>]*)
|
||||
# checks for given modules and uses the first working one
|
||||
#
|
||||
# When the 'REQUIRED' argument was set, macros will fail with an error
|
||||
# when module(s) could not be found
|
||||
#
|
||||
# It sets the following variables:
|
||||
# PKG_CONFIG_FOUND ... true if pkg-config works on the system
|
||||
# PKG_CONFIG_EXECUTABLE ... pathname of the pkg-config program
|
||||
# <PREFIX>_FOUND ... set to 1 if module(s) exist
|
||||
#
|
||||
# For the following variables two sets of values exist; first one is the
|
||||
# common one and has the given PREFIX. The second set contains flags
|
||||
# which are given out when pkgconfig was called with the '--static'
|
||||
# option.
|
||||
# <XPREFIX>_LIBRARIES ... only the libraries (w/o the '-l')
|
||||
# <XPREFIX>_LIBRARY_DIRS ... the paths of the libraries (w/o the '-L')
|
||||
# <XPREFIX>_LDFLAGS ... all required linker flags
|
||||
# <XPREFIX>_LDFLAGS_OTHER ... all other linker flags
|
||||
# <XPREFIX>_INCLUDE_DIRS ... the '-I' preprocessor flags (w/o the '-I')
|
||||
# <XPREFIX>_CFLAGS ... all required cflags
|
||||
# <XPREFIX>_CFLAGS_OTHER ... the other compiler flags
|
||||
#
|
||||
# <XPREFIX> = <PREFIX> for common case
|
||||
# <XPREFIX> = <PREFIX>_STATIC for static linking
|
||||
#
|
||||
# There are some special variables whose prefix depends on the count
|
||||
# of given modules. When there is only one module, <PREFIX> stays
|
||||
# unchanged. When there are multiple modules, the prefix will be
|
||||
# changed to <PREFIX>_<MODNAME>:
|
||||
# <XPREFIX>_VERSION ... version of the module
|
||||
# <XPREFIX>_PREFIX ... prefix-directory of the module
|
||||
# <XPREFIX>_INCLUDEDIR ... include-dir of the module
|
||||
# <XPREFIX>_LIBDIR ... lib-dir of the module
|
||||
#
|
||||
# <XPREFIX> = <PREFIX> when |MODULES| == 1, else
|
||||
# <XPREFIX> = <PREFIX>_<MODNAME>
|
||||
#
|
||||
# A <MODULE> parameter can have the following formats:
|
||||
# {MODNAME} ... matches any version
|
||||
# {MODNAME}>={VERSION} ... at least version <VERSION> is required
|
||||
# {MODNAME}={VERSION} ... exactly version <VERSION> is required
|
||||
# {MODNAME}<={VERSION} ... modules must not be newer than <VERSION>
|
||||
#
|
||||
# Examples
|
||||
# pkg_check_modules (GLIB2 glib-2.0)
|
||||
#
|
||||
# pkg_check_modules (GLIB2 glib-2.0>=2.10)
|
||||
# requires at least version 2.10 of glib2 and defines e.g.
|
||||
# GLIB2_VERSION=2.10.3
|
||||
#
|
||||
# pkg_check_modules (FOO glib-2.0>=2.10 gtk+-2.0)
|
||||
# requires both glib2 and gtk2, and defines e.g.
|
||||
# FOO_glib-2.0_VERSION=2.10.3
|
||||
# FOO_gtk+-2.0_VERSION=2.8.20
|
||||
#
|
||||
# pkg_check_modules (XRENDER REQUIRED xrender)
|
||||
# defines e.g.:
|
||||
# XRENDER_LIBRARIES=Xrender;X11
|
||||
# XRENDER_STATIC_LIBRARIES=Xrender;X11;pthread;Xau;Xdmcp
|
||||
#
|
||||
# pkg_search_module (BAR libxml-2.0 libxml2 libxml>=2)
|
||||
|
||||
|
||||
# Copyright (C) 2006 Enrico Scholz <enrico.scholz@informatik.tu-chemnitz.de>
|
||||
#
|
||||
# Redistribution and use, with or without modification, are permitted
|
||||
# provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
# 2. The name of the author may not be used to endorse or promote
|
||||
# products derived from this software without specific prior
|
||||
# written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
||||
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
||||
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
|
||||
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
### Common stuff ####
|
||||
set(PKG_CONFIG_VERSION 1)
|
||||
set(PKG_CONFIG_FOUND 0)
|
||||
|
||||
find_program(PKG_CONFIG_EXECUTABLE NAMES pkg-config DOC "pkg-config executable")
|
||||
mark_as_advanced(PKG_CONFIG_EXECUTABLE)
|
||||
|
||||
if(PKG_CONFIG_EXECUTABLE)
|
||||
set(PKG_CONFIG_FOUND 1)
|
||||
endif(PKG_CONFIG_EXECUTABLE)
|
||||
|
||||
|
||||
# Unsets the given variables
|
||||
macro(_pkgconfig_unset var)
|
||||
set(${var} "" CACHE INTERNAL "")
|
||||
endmacro(_pkgconfig_unset)
|
||||
|
||||
macro(_pkgconfig_set var value)
|
||||
set(${var} ${value} CACHE INTERNAL "")
|
||||
endmacro(_pkgconfig_set)
|
||||
|
||||
# Invokes pkgconfig, cleans up the result and sets variables
|
||||
macro(_pkgconfig_invoke _pkglist _prefix _varname _regexp)
|
||||
set(_pkgconfig_invoke_result)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${PKG_CONFIG_EXECUTABLE} ${ARGN} ${_pkglist}
|
||||
OUTPUT_VARIABLE _pkgconfig_invoke_result
|
||||
RESULT_VARIABLE _pkgconfig_failed)
|
||||
|
||||
if (_pkgconfig_failed)
|
||||
set(_pkgconfig_${_varname} "")
|
||||
_pkgconfig_unset(${_prefix}_${_varname})
|
||||
else(_pkgconfig_failed)
|
||||
string(REGEX REPLACE "[\r\n]" " " _pkgconfig_invoke_result "${_pkgconfig_invoke_result}")
|
||||
string(REGEX REPLACE " +$" "" _pkgconfig_invoke_result "${_pkgconfig_invoke_result}")
|
||||
|
||||
if (NOT ${_regexp} STREQUAL "")
|
||||
string(REGEX REPLACE "${_regexp}" " " _pkgconfig_invoke_result "${_pkgconfig_invoke_result}")
|
||||
endif(NOT ${_regexp} STREQUAL "")
|
||||
|
||||
separate_arguments(_pkgconfig_invoke_result)
|
||||
|
||||
#message(STATUS " ${_varname} ... ${_pkgconfig_invoke_result}")
|
||||
set(_pkgconfig_${_varname} ${_pkgconfig_invoke_result})
|
||||
_pkgconfig_set(${_prefix}_${_varname} "${_pkgconfig_invoke_result}")
|
||||
endif(_pkgconfig_failed)
|
||||
endmacro(_pkgconfig_invoke)
|
||||
|
||||
# Invokes pkgconfig two times; once without '--static' and once with
|
||||
# '--static'
|
||||
macro(_pkgconfig_invoke_dyn _pkglist _prefix _varname cleanup_regexp)
|
||||
_pkgconfig_invoke("${_pkglist}" ${_prefix} ${_varname} "${cleanup_regexp}" ${ARGN})
|
||||
_pkgconfig_invoke("${_pkglist}" ${_prefix} STATIC_${_varname} "${cleanup_regexp}" --static ${ARGN})
|
||||
endmacro(_pkgconfig_invoke_dyn)
|
||||
|
||||
# Splits given arguments into options and a package list
|
||||
macro(_pkgconfig_parse_options _result _is_req)
|
||||
set(${_is_req} 0)
|
||||
|
||||
foreach(_pkg ${ARGN})
|
||||
if (_pkg STREQUAL "REQUIRED")
|
||||
set(${_is_req} 1)
|
||||
endif (_pkg STREQUAL "REQUIRED")
|
||||
endforeach(_pkg ${ARGN})
|
||||
|
||||
set(${_result} ${ARGN})
|
||||
list(REMOVE_ITEM ${_result} "REQUIRED")
|
||||
endmacro(_pkgconfig_parse_options)
|
||||
|
||||
###
|
||||
macro(_pkg_check_modules_internal _is_required _is_silent _prefix)
|
||||
_pkgconfig_unset(${_prefix}_FOUND)
|
||||
_pkgconfig_unset(${_prefix}_VERSION)
|
||||
_pkgconfig_unset(${_prefix}_PREFIX)
|
||||
_pkgconfig_unset(${_prefix}_INCLUDEDIR)
|
||||
_pkgconfig_unset(${_prefix}_LIBDIR)
|
||||
_pkgconfig_unset(${_prefix}_LIBS)
|
||||
_pkgconfig_unset(${_prefix}_LIBS_L)
|
||||
_pkgconfig_unset(${_prefix}_LIBS_PATHS)
|
||||
_pkgconfig_unset(${_prefix}_LIBS_OTHER)
|
||||
_pkgconfig_unset(${_prefix}_CFLAGS)
|
||||
_pkgconfig_unset(${_prefix}_CFLAGS_I)
|
||||
_pkgconfig_unset(${_prefix}_CFLAGS_OTHER)
|
||||
_pkgconfig_unset(${_prefix}_STATIC_LIBDIR)
|
||||
_pkgconfig_unset(${_prefix}_STATIC_LIBS)
|
||||
_pkgconfig_unset(${_prefix}_STATIC_LIBS_L)
|
||||
_pkgconfig_unset(${_prefix}_STATIC_LIBS_PATHS)
|
||||
_pkgconfig_unset(${_prefix}_STATIC_LIBS_OTHER)
|
||||
_pkgconfig_unset(${_prefix}_STATIC_CFLAGS)
|
||||
_pkgconfig_unset(${_prefix}_STATIC_CFLAGS_I)
|
||||
_pkgconfig_unset(${_prefix}_STATIC_CFLAGS_OTHER)
|
||||
|
||||
# create a better addressable variable of the modules and calculate its size
|
||||
set(_pkg_check_modules_list ${ARGN})
|
||||
list(LENGTH _pkg_check_modules_list _pkg_check_modules_cnt)
|
||||
|
||||
if(PKG_CONFIG_EXECUTABLE)
|
||||
# give out status message telling checked module
|
||||
if (NOT ${_is_silent})
|
||||
if (_pkg_check_modules_cnt EQUAL 1)
|
||||
message(STATUS "checking for module '${_pkg_check_modules_list}'")
|
||||
else(_pkg_check_modules_cnt EQUAL 1)
|
||||
message(STATUS "checking for modules '${_pkg_check_modules_list}'")
|
||||
endif(_pkg_check_modules_cnt EQUAL 1)
|
||||
endif(NOT ${_is_silent})
|
||||
|
||||
set(_pkg_check_modules_packages)
|
||||
set(_pkg_check_modules_failed)
|
||||
|
||||
# iterate through module list and check whether they exist and match the required version
|
||||
foreach (_pkg_check_modules_pkg ${_pkg_check_modules_list})
|
||||
set(_pkg_check_modules_exist_query)
|
||||
|
||||
# check whether version is given
|
||||
if (_pkg_check_modules_pkg MATCHES ".*(>=|=|<=).*")
|
||||
string(REGEX REPLACE "(.*[^><])(>=|=|<=)(.*)" "\\1" _pkg_check_modules_pkg_name "${_pkg_check_modules_pkg}")
|
||||
string(REGEX REPLACE "(.*[^><])(>=|=|<=)(.*)" "\\2" _pkg_check_modules_pkg_op "${_pkg_check_modules_pkg}")
|
||||
string(REGEX REPLACE "(.*[^><])(>=|=|<=)(.*)" "\\3" _pkg_check_modules_pkg_ver "${_pkg_check_modules_pkg}")
|
||||
else(_pkg_check_modules_pkg MATCHES ".*(>=|=|<=).*")
|
||||
set(_pkg_check_modules_pkg_name "${_pkg_check_modules_pkg}")
|
||||
set(_pkg_check_modules_pkg_op)
|
||||
set(_pkg_check_modules_pkg_ver)
|
||||
endif(_pkg_check_modules_pkg MATCHES ".*(>=|=|<=).*")
|
||||
|
||||
# handle the operands
|
||||
if (_pkg_check_modules_pkg_op STREQUAL ">=")
|
||||
list(APPEND _pkg_check_modules_exist_query --atleast-version)
|
||||
endif(_pkg_check_modules_pkg_op STREQUAL ">=")
|
||||
|
||||
if (_pkg_check_modules_pkg_op STREQUAL "=")
|
||||
list(APPEND _pkg_check_modules_exist_query --exact-version)
|
||||
endif(_pkg_check_modules_pkg_op STREQUAL "=")
|
||||
|
||||
if (_pkg_check_modules_pkg_op STREQUAL "<=")
|
||||
list(APPEND _pkg_check_modules_exist_query --max-version)
|
||||
endif(_pkg_check_modules_pkg_op STREQUAL "<=")
|
||||
|
||||
# create the final query which is of the format:
|
||||
# * --atleast-version <version> <pkg-name>
|
||||
# * --exact-version <version> <pkg-name>
|
||||
# * --max-version <version> <pkg-name>
|
||||
# * --exists <pkg-name>
|
||||
if (_pkg_check_modules_pkg_op)
|
||||
list(APPEND _pkg_check_modules_exist_query "${_pkg_check_modules_pkg_ver}")
|
||||
else(_pkg_check_modules_pkg_op)
|
||||
list(APPEND _pkg_check_modules_exist_query --exists)
|
||||
endif(_pkg_check_modules_pkg_op)
|
||||
|
||||
_pkgconfig_unset(${_prefix}_${_pkg_check_modules_pkg_name}_VERSION)
|
||||
_pkgconfig_unset(${_prefix}_${_pkg_check_modules_pkg_name}_PREFIX)
|
||||
_pkgconfig_unset(${_prefix}_${_pkg_check_modules_pkg_name}_INCLUDEDIR)
|
||||
_pkgconfig_unset(${_prefix}_${_pkg_check_modules_pkg_name}_LIBDIR)
|
||||
|
||||
list(APPEND _pkg_check_modules_exist_query "${_pkg_check_modules_pkg_name}")
|
||||
list(APPEND _pkg_check_modules_packages "${_pkg_check_modules_pkg_name}")
|
||||
|
||||
# execute the query
|
||||
execute_process(
|
||||
COMMAND ${PKG_CONFIG_EXECUTABLE} ${_pkg_check_modules_exist_query}
|
||||
RESULT_VARIABLE _pkgconfig_retval)
|
||||
|
||||
# evaluate result and tell failures
|
||||
if (_pkgconfig_retval)
|
||||
if(NOT ${_is_silent})
|
||||
message(STATUS " package '${_pkg_check_modules_pkg}' not found")
|
||||
endif(NOT ${_is_silent})
|
||||
|
||||
set(_pkg_check_modules_failed 1)
|
||||
endif(_pkgconfig_retval)
|
||||
endforeach(_pkg_check_modules_pkg)
|
||||
|
||||
if(_pkg_check_modules_failed)
|
||||
# fail when requested
|
||||
if (${_is_required})
|
||||
message(SEND_ERROR "A required package was not found")
|
||||
endif (${_is_required})
|
||||
else(_pkg_check_modules_failed)
|
||||
# when we are here, we checked whether requested modules
|
||||
# exist. Now, go through them and set variables
|
||||
|
||||
_pkgconfig_set(${_prefix}_FOUND 1)
|
||||
list(LENGTH _pkg_check_modules_packages pkg_count)
|
||||
|
||||
# iterate through all modules again and set individual variables
|
||||
foreach (_pkg_check_modules_pkg ${_pkg_check_modules_packages})
|
||||
# handle case when there is only one package required
|
||||
if (pkg_count EQUAL 1)
|
||||
set(_pkg_check_prefix "${_prefix}")
|
||||
else(pkg_count EQUAL 1)
|
||||
set(_pkg_check_prefix "${_prefix}_${_pkg_check_modules_pkg}")
|
||||
endif(pkg_count EQUAL 1)
|
||||
|
||||
_pkgconfig_invoke(${_pkg_check_modules_pkg} "${_pkg_check_prefix}" VERSION "" --modversion )
|
||||
_pkgconfig_invoke(${_pkg_check_modules_pkg} "${_pkg_check_prefix}" PREFIX "" --variable=prefix )
|
||||
_pkgconfig_invoke(${_pkg_check_modules_pkg} "${_pkg_check_prefix}" INCLUDEDIR "" --variable=includedir )
|
||||
_pkgconfig_invoke(${_pkg_check_modules_pkg} "${_pkg_check_prefix}" LIBDIR "" --variable=libdir )
|
||||
|
||||
message(STATUS " found ${_pkg_check_modules_pkg}, version ${_pkgconfig_VERSION}")
|
||||
endforeach(_pkg_check_modules_pkg)
|
||||
|
||||
# set variables which are combined for multiple modules
|
||||
_pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" LIBRARIES "(^| )-l" --libs-only-l )
|
||||
_pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" LIBRARY_DIRS "(^| )-L" --libs-only-L )
|
||||
_pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" LDFLAGS "" --libs )
|
||||
_pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" LDFLAGS_OTHER "" --libs-only-other )
|
||||
|
||||
_pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" INCLUDE_DIRS "(^| )-I" --cflags-only-I )
|
||||
_pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" CFLAGS "" --cflags )
|
||||
_pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" CFLAGS_OTHER "" --cflags-only-other )
|
||||
endif(_pkg_check_modules_failed)
|
||||
else(PKG_CONFIG_EXECUTABLE)
|
||||
if (${_is_required})
|
||||
message(SEND_ERROR "pkg-config tool not found")
|
||||
endif (${_is_required})
|
||||
endif(PKG_CONFIG_EXECUTABLE)
|
||||
endmacro(_pkg_check_modules_internal)
|
||||
|
||||
###
|
||||
### User visible macros start here
|
||||
###
|
||||
|
||||
###
|
||||
macro(pkg_check_modules _prefix _module0)
|
||||
# check cached value
|
||||
if (NOT DEFINED __pkg_config_checked_${_prefix} OR __pkg_config_checked_${_prefix} LESS ${PKG_CONFIG_VERSION} OR NOT ${_prefix}_FOUND)
|
||||
_pkgconfig_parse_options (_pkg_modules _pkg_is_required "${_module0}" ${ARGN})
|
||||
_pkg_check_modules_internal("${_pkg_is_required}" 0 "${_prefix}" ${_pkg_modules})
|
||||
|
||||
_pkgconfig_set(__pkg_config_checked_${_prefix} ${PKG_CONFIG_VERSION})
|
||||
endif(NOT DEFINED __pkg_config_checked_${_prefix} OR __pkg_config_checked_${_prefix} LESS ${PKG_CONFIG_VERSION} OR NOT ${_prefix}_FOUND)
|
||||
endmacro(pkg_check_modules)
|
||||
|
||||
###
|
||||
macro(pkg_search_module _prefix _module0)
|
||||
# check cached value
|
||||
if (NOT DEFINED __pkg_config_checked_${_prefix} OR __pkg_config_checked_${_prefix} LESS ${PKG_CONFIG_VERSION} OR NOT ${_prefix}_FOUND)
|
||||
set(_pkg_modules_found 0)
|
||||
_pkgconfig_parse_options(_pkg_modules_alt _pkg_is_required "${_module0}" ${ARGN})
|
||||
|
||||
message(STATUS "checking for one of the modules '${_pkg_modules_alt}'")
|
||||
|
||||
# iterate through all modules and stop at the first working one.
|
||||
foreach(_pkg_alt ${_pkg_modules_alt})
|
||||
if(NOT _pkg_modules_found)
|
||||
_pkg_check_modules_internal(0 1 "${_prefix}" "${_pkg_alt}")
|
||||
endif(NOT _pkg_modules_found)
|
||||
|
||||
if (${_prefix}_FOUND)
|
||||
set(_pkg_modules_found 1)
|
||||
endif(${_prefix}_FOUND)
|
||||
endforeach(_pkg_alt)
|
||||
|
||||
if (NOT ${_prefix}_FOUND)
|
||||
if(${_pkg_is_required})
|
||||
message(SEND_ERROR "None of the required '${_pkg_modules_alt}' found")
|
||||
endif(${_pkg_is_required})
|
||||
endif(NOT ${_prefix}_FOUND)
|
||||
|
||||
_pkgconfig_set(__pkg_config_checked_${_prefix} ${PKG_CONFIG_VERSION})
|
||||
endif(NOT DEFINED __pkg_config_checked_${_prefix} OR __pkg_config_checked_${_prefix} LESS ${PKG_CONFIG_VERSION} OR NOT ${_prefix}_FOUND)
|
||||
endmacro(pkg_search_module)
|
||||
|
||||
### Local Variables:
|
||||
### mode: cmake
|
||||
### End:
|
@ -84,7 +84,7 @@ macro(ocv_generate_dependencies_map_configcmake suffix configuration)
|
||||
|
||||
set(OPENCV_DEPENDENCIES_MAP_${suffix} "${OPENCV_DEPENDENCIES_MAP_${suffix}}set(OpenCV_${__ocv_lib}_LIBNAME_${suffix} \"${__libname}\")\n")
|
||||
set(OPENCV_DEPENDENCIES_MAP_${suffix} "${OPENCV_DEPENDENCIES_MAP_${suffix}}set(OpenCV_${__ocv_lib}_DEPS_${suffix} ${__mod_deps})\n")
|
||||
set(OPENCV_DEPENDENCIES_MAP_${suffix} "${OPENCV_DEPENDENCIES_MAP_${suffix}}set(OpenCV_${__ocv_lib}_EXTRA_DEPS_${suffix} ${__ext_deps})\n")
|
||||
set(OPENCV_DEPENDENCIES_MAP_${suffix} "${OPENCV_DEPENDENCIES_MAP_${suffix}}set(OpenCV_${__ocv_lib}_EXTRA_DEPS_${suffix} \"${__ext_deps}\")\n")
|
||||
|
||||
list(APPEND OPENCV_PROCESSED_LIBS ${__ocv_lib})
|
||||
list(APPEND OPENCV_LIBS_TO_PROCESS ${${__ocv_lib}_MODULE_DEPS_${suffix}})
|
||||
|
@ -1,13 +1,3 @@
|
||||
# ----------------------------------------------------------------------------
|
||||
# Variables for cvconfig.h.cmake
|
||||
# ----------------------------------------------------------------------------
|
||||
set(PACKAGE "opencv")
|
||||
set(PACKAGE_BUGREPORT "opencvlibrary-devel@lists.sourceforge.net")
|
||||
set(PACKAGE_NAME "opencv")
|
||||
set(PACKAGE_STRING "${PACKAGE} ${OPENCV_VERSION}")
|
||||
set(PACKAGE_TARNAME "${PACKAGE}")
|
||||
set(PACKAGE_VERSION "${OPENCV_VERSION}")
|
||||
|
||||
# platform-specific config file
|
||||
configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/cvconfig.h.cmake" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/cvconfig.h")
|
||||
|
||||
|
@ -12,7 +12,6 @@ set(prefix "${CMAKE_INSTALL_PREFIX}")
|
||||
set(exec_prefix "\${prefix}")
|
||||
set(libdir "") #TODO: need link paths for OpenCV_EXTRA_COMPONENTS
|
||||
set(includedir "\${prefix}/${OPENCV_INCLUDE_INSTALL_PATH}")
|
||||
set(VERSION ${OPENCV_VERSION})
|
||||
|
||||
if(CMAKE_BUILD_TYPE MATCHES "Release")
|
||||
set(ocv_optkind OPT)
|
||||
|
1
cmake/OpenCVMinDepVersions.cmake
Normal file
1
cmake/OpenCVMinDepVersions.cmake
Normal file
@ -0,0 +1 @@
|
||||
set(MIN_VER_CMAKE 2.8.7)
|
@ -511,7 +511,8 @@ macro(ocv_create_module)
|
||||
)
|
||||
endif()
|
||||
|
||||
if(BUILD_SHARED_LIBS)
|
||||
if((NOT DEFINED OPENCV_MODULE_TYPE AND BUILD_SHARED_LIBS)
|
||||
OR (DEFINED OPENCV_MODULE_TYPE AND OPENCV_MODULE_TYPE STREQUAL SHARED))
|
||||
if(MSVC)
|
||||
set_target_properties(${the_module} PROPERTIES DEFINE_SYMBOL CVAPI_EXPORTS)
|
||||
else()
|
||||
|
6
cmake/checks/winrttest.cpp
Normal file
6
cmake/checks/winrttest.cpp
Normal file
@ -0,0 +1,6 @@
|
||||
#include <wrl/client.h>
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
return 0;
|
||||
}
|
@ -1,20 +1,20 @@
|
||||
/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP
|
||||
systems. This function is required for `alloca.c' support on those systems.
|
||||
*/
|
||||
#cmakedefine CRAY_STACKSEG_END
|
||||
/* OpenCV compiled as static or dynamic libs */
|
||||
#cmakedefine BUILD_SHARED_LIBS
|
||||
|
||||
/* Define to 1 if using `alloca.c'. */
|
||||
#cmakedefine C_ALLOCA
|
||||
/* Compile for 'real' NVIDIA GPU architectures */
|
||||
#define CUDA_ARCH_BIN "${OPENCV_CUDA_ARCH_BIN}"
|
||||
|
||||
/* Define to 1 if you have `alloca', as a function or macro. */
|
||||
#cmakedefine HAVE_ALLOCA 1
|
||||
/* Create PTX or BIN for 1.0 compute capability */
|
||||
#cmakedefine CUDA_ARCH_BIN_OR_PTX_10
|
||||
|
||||
/* Define to 1 if you have <alloca.h> and it should be used (not on Ultrix).
|
||||
*/
|
||||
#cmakedefine HAVE_ALLOCA_H 1
|
||||
/* NVIDIA GPU features are used */
|
||||
#define CUDA_ARCH_FEATURES "${OPENCV_CUDA_ARCH_FEATURES}"
|
||||
|
||||
/* Video for Windows support */
|
||||
#cmakedefine HAVE_VFW
|
||||
/* Compile for 'virtual' NVIDIA PTX architectures */
|
||||
#define CUDA_ARCH_PTX "${OPENCV_CUDA_ARCH_PTX}"
|
||||
|
||||
/* AVFoundation video libraries */
|
||||
#cmakedefine HAVE_AVFOUNDATION
|
||||
|
||||
/* V4L capturing support */
|
||||
#cmakedefine HAVE_CAMV4L
|
||||
@ -22,15 +22,33 @@
|
||||
/* V4L2 capturing support */
|
||||
#cmakedefine HAVE_CAMV4L2
|
||||
|
||||
/* V4L2 capturing support in videoio.h */
|
||||
#cmakedefine HAVE_VIDEOIO
|
||||
|
||||
/* V4L/V4L2 capturing support via libv4l */
|
||||
#cmakedefine HAVE_LIBV4L
|
||||
|
||||
/* Carbon windowing environment */
|
||||
#cmakedefine HAVE_CARBON
|
||||
|
||||
/* AMD's Basic Linear Algebra Subprograms Library*/
|
||||
#cmakedefine HAVE_CLAMDBLAS
|
||||
|
||||
/* AMD's OpenCL Fast Fourier Transform Library*/
|
||||
#cmakedefine HAVE_CLAMDFFT
|
||||
|
||||
/* Clp support */
|
||||
#cmakedefine HAVE_CLP
|
||||
|
||||
/* Cocoa API */
|
||||
#cmakedefine HAVE_COCOA
|
||||
|
||||
/* C= */
|
||||
#cmakedefine HAVE_CSTRIPES
|
||||
|
||||
/* NVidia Cuda Basic Linear Algebra Subprograms (BLAS) API*/
|
||||
#cmakedefine HAVE_CUBLAS
|
||||
|
||||
/* NVidia Cuda Runtime API*/
|
||||
#cmakedefine HAVE_CUDA
|
||||
|
||||
/* NVidia Cuda Fast Fourier Transform (FFT) API*/
|
||||
#cmakedefine HAVE_CUFFT
|
||||
|
||||
/* IEEE1394 capturing support */
|
||||
#cmakedefine HAVE_DC1394
|
||||
|
||||
@ -40,194 +58,105 @@
|
||||
/* IEEE1394 capturing support - libdc1394 v2.x */
|
||||
#cmakedefine HAVE_DC1394_2
|
||||
|
||||
/* DirectShow Video Capture library */
|
||||
#cmakedefine HAVE_DSHOW
|
||||
|
||||
/* Eigen Matrix & Linear Algebra Library */
|
||||
#cmakedefine HAVE_EIGEN
|
||||
|
||||
/* FFMpeg video library */
|
||||
#cmakedefine HAVE_FFMPEG
|
||||
|
||||
/* ffmpeg's libswscale */
|
||||
#cmakedefine HAVE_FFMPEG_SWSCALE
|
||||
|
||||
/* ffmpeg in Gentoo */
|
||||
#cmakedefine HAVE_GENTOO_FFMPEG
|
||||
|
||||
/* FFMpeg video library */
|
||||
#cmakedefine HAVE_FFMPEG
|
||||
|
||||
/* FFMpeg version flag */
|
||||
#cmakedefine NEW_FFMPEG
|
||||
|
||||
/* ffmpeg's libswscale */
|
||||
#cmakedefine HAVE_FFMPEG_SWSCALE
|
||||
|
||||
/* GStreamer multimedia framework */
|
||||
#cmakedefine HAVE_GSTREAMER
|
||||
#cmakedefine HAVE_GSTREAMER
|
||||
|
||||
/* GTK+ 2.0 Thread support */
|
||||
#cmakedefine HAVE_GTHREAD
|
||||
|
||||
/* Win32 UI */
|
||||
#cmakedefine HAVE_WIN32UI
|
||||
#cmakedefine HAVE_GTHREAD
|
||||
|
||||
/* GTK+ 2.x toolkit */
|
||||
#cmakedefine HAVE_GTK
|
||||
|
||||
/* OpenEXR codec */
|
||||
#cmakedefine HAVE_ILMIMF
|
||||
#cmakedefine HAVE_GTK
|
||||
|
||||
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||
#cmakedefine HAVE_INTTYPES_H 1
|
||||
|
||||
/* JPEG-2000 codec */
|
||||
#cmakedefine HAVE_JASPER
|
||||
|
||||
/* IJG JPEG codec */
|
||||
#cmakedefine HAVE_JPEG
|
||||
|
||||
/* Define to 1 if you have the `dl' library (-ldl). */
|
||||
#cmakedefine HAVE_LIBDL 1
|
||||
|
||||
/* Define to 1 if you have the `gomp' library (-lgomp). */
|
||||
#cmakedefine HAVE_LIBGOMP 1
|
||||
|
||||
/* Define to 1 if you have the `m' library (-lm). */
|
||||
#cmakedefine HAVE_LIBM 1
|
||||
|
||||
/* libpng/png.h needs to be included */
|
||||
#cmakedefine HAVE_LIBPNG_PNG_H
|
||||
|
||||
/* Define to 1 if you have the `pthread' library (-lpthread). */
|
||||
#cmakedefine HAVE_LIBPTHREAD 1
|
||||
|
||||
/* Define to 1 if you have the `lrint' function. */
|
||||
#cmakedefine HAVE_LRINT 1
|
||||
|
||||
/* PNG codec */
|
||||
#cmakedefine HAVE_PNG
|
||||
|
||||
/* Define to 1 if you have the `png_get_valid' function. */
|
||||
#cmakedefine HAVE_PNG_GET_VALID 1
|
||||
|
||||
/* png.h needs to be included */
|
||||
#cmakedefine HAVE_PNG_H
|
||||
|
||||
/* Define to 1 if you have the `png_set_tRNS_to_alpha' function. */
|
||||
#cmakedefine HAVE_PNG_SET_TRNS_TO_ALPHA 1
|
||||
|
||||
/* QuickTime video libraries */
|
||||
#cmakedefine HAVE_QUICKTIME
|
||||
|
||||
/* AVFoundation video libraries */
|
||||
#cmakedefine HAVE_AVFOUNDATION
|
||||
|
||||
/* TIFF codec */
|
||||
#cmakedefine HAVE_TIFF
|
||||
|
||||
/* Unicap video capture library */
|
||||
#cmakedefine HAVE_UNICAP
|
||||
|
||||
/* Define to 1 if you have the <unistd.h> header file. */
|
||||
#cmakedefine HAVE_UNISTD_H 1
|
||||
|
||||
/* Xine video library */
|
||||
#cmakedefine HAVE_XINE
|
||||
|
||||
/* OpenNI library */
|
||||
#cmakedefine HAVE_OPENNI
|
||||
|
||||
/* LZ77 compression/decompression library (used for PNG) */
|
||||
#cmakedefine HAVE_ZLIB
|
||||
#cmakedefine HAVE_INTTYPES_H 1
|
||||
|
||||
/* Intel Integrated Performance Primitives */
|
||||
#cmakedefine HAVE_IPP
|
||||
#cmakedefine HAVE_IPP
|
||||
|
||||
/* OpenCV compiled as static or dynamic libs */
|
||||
#cmakedefine BUILD_SHARED_LIBS
|
||||
/* JPEG-2000 codec */
|
||||
#cmakedefine HAVE_JASPER
|
||||
|
||||
/* Name of package */
|
||||
#define PACKAGE "${PACKAGE}"
|
||||
/* IJG JPEG codec */
|
||||
#cmakedefine HAVE_JPEG
|
||||
|
||||
/* Define to the address where bug reports for this package should be sent. */
|
||||
#define PACKAGE_BUGREPORT "${PACKAGE_BUGREPORT}"
|
||||
/* libpng/png.h needs to be included */
|
||||
#cmakedefine HAVE_LIBPNG_PNG_H
|
||||
|
||||
/* Define to the full name of this package. */
|
||||
#define PACKAGE_NAME "${PACKAGE_NAME}"
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING "${PACKAGE_STRING}"
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#define PACKAGE_TARNAME "${PACKAGE_TARNAME}"
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION "${PACKAGE_VERSION}"
|
||||
|
||||
/* If using the C implementation of alloca, define if you know the
|
||||
direction of stack growth for your system; otherwise it will be
|
||||
automatically deduced at runtime.
|
||||
STACK_DIRECTION > 0 => grows toward higher addresses
|
||||
STACK_DIRECTION < 0 => grows toward lower addresses
|
||||
STACK_DIRECTION = 0 => direction of growth unknown */
|
||||
#cmakedefine STACK_DIRECTION
|
||||
|
||||
/* Version number of package */
|
||||
#define VERSION "${PACKAGE_VERSION}"
|
||||
|
||||
/* Define to 1 if your processor stores words with the most significant byte
|
||||
first (like Motorola and SPARC, unlike Intel and VAX). */
|
||||
#cmakedefine WORDS_BIGENDIAN
|
||||
|
||||
/* Intel Threading Building Blocks */
|
||||
#cmakedefine HAVE_TBB
|
||||
|
||||
/* C= */
|
||||
#cmakedefine HAVE_CSTRIPES
|
||||
|
||||
/* Eigen Matrix & Linear Algebra Library */
|
||||
#cmakedefine HAVE_EIGEN
|
||||
|
||||
/* NVidia Cuda Runtime API*/
|
||||
#cmakedefine HAVE_CUDA
|
||||
|
||||
/* NVidia Cuda Fast Fourier Transform (FFT) API*/
|
||||
#cmakedefine HAVE_CUFFT
|
||||
|
||||
/* NVidia Cuda Basic Linear Algebra Subprograms (BLAS) API*/
|
||||
#cmakedefine HAVE_CUBLAS
|
||||
|
||||
/* NVidia Video Decoding API*/
|
||||
#cmakedefine HAVE_NVCUVID
|
||||
|
||||
/* Compile for 'real' NVIDIA GPU architectures */
|
||||
#define CUDA_ARCH_BIN "${OPENCV_CUDA_ARCH_BIN}"
|
||||
|
||||
/* Compile for 'virtual' NVIDIA PTX architectures */
|
||||
#define CUDA_ARCH_PTX "${OPENCV_CUDA_ARCH_PTX}"
|
||||
|
||||
/* NVIDIA GPU features are used */
|
||||
#define CUDA_ARCH_FEATURES "${OPENCV_CUDA_ARCH_FEATURES}"
|
||||
|
||||
/* Create PTX or BIN for 1.0 compute capability */
|
||||
#cmakedefine CUDA_ARCH_BIN_OR_PTX_10
|
||||
|
||||
/* OpenCL Support */
|
||||
#cmakedefine HAVE_OPENCL
|
||||
|
||||
/* AMD's OpenCL Fast Fourier Transform Library*/
|
||||
#cmakedefine HAVE_CLAMDFFT
|
||||
|
||||
/* AMD's Basic Linear Algebra Subprograms Library*/
|
||||
#cmakedefine HAVE_CLAMDBLAS
|
||||
|
||||
/* DirectShow Video Capture library */
|
||||
#cmakedefine HAVE_DSHOW
|
||||
/* V4L/V4L2 capturing support via libv4l */
|
||||
#cmakedefine HAVE_LIBV4L
|
||||
|
||||
/* Microsoft Media Foundation Capture library */
|
||||
#cmakedefine HAVE_MSMF
|
||||
|
||||
/* XIMEA camera support */
|
||||
#cmakedefine HAVE_XIMEA
|
||||
/* NVidia Video Decoding API*/
|
||||
#cmakedefine HAVE_NVCUVID
|
||||
|
||||
/* OpenCL Support */
|
||||
#cmakedefine HAVE_OPENCL
|
||||
|
||||
/* OpenEXR codec */
|
||||
#cmakedefine HAVE_OPENEXR
|
||||
|
||||
/* OpenGL support*/
|
||||
#cmakedefine HAVE_OPENGL
|
||||
|
||||
/* Clp support */
|
||||
#cmakedefine HAVE_CLP
|
||||
/* OpenNI library */
|
||||
#cmakedefine HAVE_OPENNI
|
||||
|
||||
/* PNG codec */
|
||||
#cmakedefine HAVE_PNG
|
||||
|
||||
/* Qt support */
|
||||
#cmakedefine HAVE_QT
|
||||
|
||||
/* Qt OpenGL support */
|
||||
#cmakedefine HAVE_QT_OPENGL
|
||||
|
||||
/* QuickTime video libraries */
|
||||
#cmakedefine HAVE_QUICKTIME
|
||||
|
||||
/* Intel Threading Building Blocks */
|
||||
#cmakedefine HAVE_TBB
|
||||
|
||||
/* TIFF codec */
|
||||
#cmakedefine HAVE_TIFF
|
||||
|
||||
/* Unicap video capture library */
|
||||
#cmakedefine HAVE_UNICAP
|
||||
|
||||
/* Video for Windows support */
|
||||
#cmakedefine HAVE_VFW
|
||||
|
||||
/* V4L2 capturing support in videoio.h */
|
||||
#cmakedefine HAVE_VIDEOIO
|
||||
|
||||
/* Win32 UI */
|
||||
#cmakedefine HAVE_WIN32UI
|
||||
|
||||
/* Windows Runtime support */
|
||||
#cmakedefine HAVE_WINRT
|
||||
|
||||
/* XIMEA camera support */
|
||||
#cmakedefine HAVE_XIMEA
|
||||
|
||||
/* Xine video library */
|
||||
#cmakedefine HAVE_XINE
|
||||
|
||||
/* Define to 1 if your processor stores words with the most significant byte
|
||||
first (like Motorola and SPARC, unlike Intel and VAX). */
|
||||
#cmakedefine WORDS_BIGENDIAN
|
||||
|
@ -8,6 +8,6 @@ includedir_new=@includedir@
|
||||
|
||||
Name: OpenCV
|
||||
Description: Open Source Computer Vision Library
|
||||
Version: @VERSION@
|
||||
Version: @OPENCV_VERSION@
|
||||
Libs: @OpenCV_LIB_COMPONENTS@
|
||||
Cflags: -I${includedir_old} -I${includedir_new}
|
||||
|
@ -312,9 +312,13 @@ First we set an enviroment variable to make easier our work. This will hold the
|
||||
|
||||
::
|
||||
|
||||
setx -m OPENCV_DIR D:\OpenCV\Build\x86\vc10
|
||||
setx -m OPENCV_DIR D:\OpenCV\Build\x86\vc10 (suggested for Visual Studio 2010 - 32 bit Windows)
|
||||
setx -m OPENCV_DIR D:\OpenCV\Build\x64\vc10 (suggested for Visual Studio 2010 - 64 bit Windows)
|
||||
|
||||
Here the directory is where you have your OpenCV binaries (*extracted* or *built*). You can have different platform (e.g. x64 instead of x86) or compiler type, so substitute appropriate value. Inside this you should have folders like *bin* and *include*. The -m should be added if you wish to make the settings computer wise, instead of user wise.
|
||||
setx -m OPENCV_DIR D:\OpenCV\Build\x86\vc11 (suggested for Visual Studio 2012 - 32 bit Windows)
|
||||
setx -m OPENCV_DIR D:\OpenCV\Build\x64\vc11 (suggested for Visual Studio 2012 - 64 bit Windows)
|
||||
|
||||
Here the directory is where you have your OpenCV binaries (*extracted* or *built*). You can have different platform (e.g. x64 instead of x86) or compiler type, so substitute appropriate value. Inside this you should have two folders called *lib* and *bin*. The -m should be added if you wish to make the settings computer wise, instead of user wise.
|
||||
|
||||
If you built static libraries then you are done. Otherwise, you need to add the *bin* folders path to the systems path. This is cause you will use the OpenCV library in form of *\"Dynamic-link libraries\"* (also known as **DLL**). Inside these are stored all the algorithms and information the OpenCV library contains. The operating system will load them only on demand, during runtime. However, to do this he needs to know where they are. The systems **PATH** contains a list of folders where DLLs can be found. Add the OpenCV library path to this and the OS will know where to look if he ever needs the OpenCV binaries. Otherwise, you will need to copy the used DLLs right beside the applications executable file (*exe*) for the OS to find it, which is highly unpleasent if you work on many projects. To do this start up again the |PathEditor|_ and add the following new entry (right click in the application to bring up the menu):
|
||||
|
||||
|
@ -108,6 +108,12 @@ Here is an overview of the abstract Retina interface, allocate one instance with
|
||||
cv::Ptr<Retina> createRetina (Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0);
|
||||
}} // cv and bioinspired namespaces end
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example on retina tone mapping can be found at opencv_source_code/samples/cpp/OpenEXRimages_HighDynamicRange_Retina_toneMapping.cpp
|
||||
* : An example on retina tone mapping on video input can be found at opencv_source_code/samples/cpp/OpenEXRimages_HighDynamicRange_Retina_toneMapping.cpp
|
||||
* : A complete example illustrating the retina interface can be found at opencv_source_code/samples/cpp/retinaDemo.cpp
|
||||
|
||||
Description
|
||||
+++++++++++
|
||||
|
||||
|
@ -109,7 +109,16 @@ The functions below use the above model to do the following:
|
||||
|
||||
* Estimate the relative position and orientation of the stereo camera "heads" and compute the *rectification* transformation that makes the camera optical axes parallel.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : A calibration sample for 3 cameras in horizontal position can be found at opencv_source_code/samples/cpp/3calibration.cpp
|
||||
* : A calibration sample based on a sequence of images can be found at opencv_source_code/samples/cpp/calibration.cpp
|
||||
* : A calibration sample in order to do 3D reconstruction can be found at opencv_source_code/samples/cpp/build3dmodel.cpp
|
||||
* : A calibration sample of an artificially generated camera and chessboard patterns can be found at opencv_source_code/samples/cpp/calibration_artificial.cpp
|
||||
* : A calibration example on stereo calibration can be found at opencv_source_code/samples/cpp/stereo_calib.cpp
|
||||
* : A calibration example on stereo matching can be found at opencv_source_code/samples/cpp/stereo_match.cpp
|
||||
|
||||
* : PYTHON : A camera calibration sample can be found at opencv_source_code/samples/python2/calibrate.py
|
||||
|
||||
calibrateCamera
|
||||
---------------
|
||||
@ -577,7 +586,9 @@ Finds an object pose from 3D-2D point correspondences.
|
||||
|
||||
The function estimates the object pose given a set of object points, their corresponding image projections, as well as the camera matrix and the distortion coefficients.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example of how to use solvePNP for planar augmented reality can be found at opencv_source_code/samples/python2/plane_ar.py
|
||||
|
||||
solvePnPRansac
|
||||
------------------
|
||||
@ -879,6 +890,9 @@ Homography matrix is determined up to a scale. Thus, it is normalized so that
|
||||
:ocv:func:`warpPerspective`,
|
||||
:ocv:func:`perspectiveTransform`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : A example on calculating a homography for image matching can be found at opencv_source_code/samples/cpp/video_homography.cpp
|
||||
|
||||
estimateAffine3D
|
||||
--------------------
|
||||
@ -1168,6 +1182,9 @@ StereoBM
|
||||
|
||||
Class for computing stereo correspondence using the block matching algorithm, introduced and contributed to OpenCV by K. Konolige.
|
||||
|
||||
.. Sample code:
|
||||
|
||||
* : OCL : An example for using the stereoBM matching algorithm can be found at opencv_source_code/samples/ocl/stereo_match.cpp
|
||||
|
||||
createStereoBM
|
||||
------------------
|
||||
@ -1199,6 +1216,9 @@ The class implements the modified H. Hirschmuller algorithm [HH08]_ that differs
|
||||
|
||||
* Some pre- and post- processing steps from K. Konolige algorithm ``StereoBM`` are included, for example: pre-filtering (``StereoBM::PREFILTER_XSOBEL`` type) and post-filtering (uniqueness check, quadratic interpolation and speckle filtering).
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : PYTHON : An example illustrating the use of the StereoSGBM matching algorithm can be found at opencv_source_code/samples/python2/stereo_match.py
|
||||
|
||||
createStereoSGBM
|
||||
--------------------------
|
||||
|
@ -3,6 +3,12 @@ FaceRecognizer
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the FaceRecognizer class can be found at opencv_source_code/samples/cpp/facerec_demo.cpp
|
||||
|
||||
* : PYTHON : An example using the FaceRecognizer class can be found at opencv_source_code/samples/python2/facerec_demo.py
|
||||
|
||||
FaceRecognizer
|
||||
--------------
|
||||
|
||||
|
@ -9,6 +9,10 @@ FAB-MAP is an approach to appearance-based place recognition. FAB-MAP compares i
|
||||
|
||||
openFABMAP requires training data (e.g. a collection of images from a similar but not identical environment) to construct a visual vocabulary for the visual bag-of-words model, along with a Chow-Liu tree representation of feature likelihood and for use in the Sampled new place method (see below).
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the openFABMAP package can be found at opencv_source_code/samples/cpp/fabmap_sample.cpp
|
||||
|
||||
of2::FabMap
|
||||
--------------------
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "cvconfig.h"
|
||||
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
#include <windows.h>
|
||||
@ -16,10 +16,22 @@ namespace cv
|
||||
list.clear();
|
||||
String path_f = path + "/" + exten;
|
||||
#ifdef WIN32
|
||||
WIN32_FIND_DATA FindFileData;
|
||||
HANDLE hFind;
|
||||
#ifdef HAVE_WINRT
|
||||
WIN32_FIND_DATAW FindFileData;
|
||||
#else
|
||||
WIN32_FIND_DATAA FindFileData;
|
||||
#endif
|
||||
HANDLE hFind;
|
||||
|
||||
hFind = FindFirstFile((LPCSTR)path_f.c_str(), &FindFileData);
|
||||
#ifdef HAVE_WINRT
|
||||
size_t size = mbstowcs(NULL, path_f.c_str(), path_f.size());
|
||||
Ptr<wchar_t> wpath = new wchar_t[size+1];
|
||||
wpath[size] = 0;
|
||||
mbstowcs(wpath, path_f.c_str(), path_f.size());
|
||||
hFind = FindFirstFileExW(wpath, FindExInfoStandard, &FindFileData, FindExSearchNameMatch, NULL, 0);
|
||||
#else
|
||||
hFind = FindFirstFileA((LPCSTR)path_f.c_str(), &FindFileData);
|
||||
#endif
|
||||
if (hFind == INVALID_HANDLE_VALUE)
|
||||
{
|
||||
return list;
|
||||
@ -34,13 +46,26 @@ namespace cv
|
||||
FindFileData.dwFileAttributes == FILE_ATTRIBUTE_SYSTEM ||
|
||||
FindFileData.dwFileAttributes == FILE_ATTRIBUTE_READONLY)
|
||||
{
|
||||
cv::Ptr<char> fname;
|
||||
#ifdef HAVE_WINRT
|
||||
size_t asize = wcstombs(NULL, FindFileData.cFileName, 0);
|
||||
fname = new char[asize+1];
|
||||
fname[asize] = 0;
|
||||
wcstombs(fname, FindFileData.cFileName, asize);
|
||||
#else
|
||||
fname = FindFileData.cFileName;
|
||||
#endif
|
||||
if (addPath)
|
||||
list.push_back(path + "/" + FindFileData.cFileName);
|
||||
list.push_back(path + "/" + String(fname));
|
||||
else
|
||||
list.push_back(FindFileData.cFileName);
|
||||
list.push_back(String(fname));
|
||||
}
|
||||
}
|
||||
while(FindNextFile(hFind, &FindFileData));
|
||||
#ifdef HAVE_WINRT
|
||||
while(FindNextFileW(hFind, &FindFileData));
|
||||
#else
|
||||
while(FindNextFileA(hFind, &FindFileData));
|
||||
#endif
|
||||
FindClose(hFind);
|
||||
}
|
||||
#else
|
||||
@ -75,10 +100,22 @@ namespace cv
|
||||
String path_f = path + "/" + exten;
|
||||
list.clear();
|
||||
#ifdef WIN32
|
||||
WIN32_FIND_DATA FindFileData;
|
||||
#ifdef HAVE_WINRT
|
||||
WIN32_FIND_DATAW FindFileData;
|
||||
#else
|
||||
WIN32_FIND_DATAA FindFileData;
|
||||
#endif
|
||||
HANDLE hFind;
|
||||
|
||||
hFind = FindFirstFile((LPCSTR)path_f.c_str(), &FindFileData);
|
||||
#ifdef HAVE_WINRT
|
||||
size_t size = mbstowcs(NULL, path_f.c_str(), path_f.size());
|
||||
Ptr<wchar_t> wpath = new wchar_t[size+1];
|
||||
wpath[size] = 0;
|
||||
mbstowcs(wpath, path_f.c_str(), path_f.size());
|
||||
hFind = FindFirstFileExW(wpath, FindExInfoStandard, &FindFileData, FindExSearchNameMatch, NULL, 0);
|
||||
#else
|
||||
hFind = FindFirstFileA((LPCSTR)path_f.c_str(), &FindFileData);
|
||||
#endif
|
||||
if (hFind == INVALID_HANDLE_VALUE)
|
||||
{
|
||||
return list;
|
||||
@ -87,17 +124,37 @@ namespace cv
|
||||
{
|
||||
do
|
||||
{
|
||||
#ifdef HAVE_WINRT
|
||||
if (FindFileData.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY &&
|
||||
wcscmp(FindFileData.cFileName, L".") != 0 &&
|
||||
wcscmp(FindFileData.cFileName, L"..") != 0)
|
||||
#else
|
||||
if (FindFileData.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY &&
|
||||
strcmp(FindFileData.cFileName, ".") != 0 &&
|
||||
strcmp(FindFileData.cFileName, "..") != 0)
|
||||
#endif
|
||||
{
|
||||
cv::Ptr<char> fname;
|
||||
#ifdef HAVE_WINRT
|
||||
size_t asize = wcstombs(NULL, FindFileData.cFileName, 0);
|
||||
fname = new char[asize+1];
|
||||
fname[asize] = 0;
|
||||
wcstombs(fname, FindFileData.cFileName, asize);
|
||||
#else
|
||||
fname = FindFileData.cFileName;
|
||||
#endif
|
||||
|
||||
if (addPath)
|
||||
list.push_back(path + "/" + FindFileData.cFileName);
|
||||
list.push_back(path + "/" + String(fname));
|
||||
else
|
||||
list.push_back(FindFileData.cFileName);
|
||||
list.push_back(String(fname));
|
||||
}
|
||||
}
|
||||
while(FindNextFile(hFind, &FindFileData));
|
||||
#ifdef HAVE_WINRT
|
||||
while(FindNextFileW(hFind, &FindFileData));
|
||||
#else
|
||||
while(FindNextFileA(hFind, &FindFileData));
|
||||
#endif
|
||||
FindClose(hFind);
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,10 @@ set(the_description "The Core Functionality")
|
||||
ocv_add_module(core ${ZLIB_LIBRARIES})
|
||||
ocv_module_include_directories(${ZLIB_INCLUDE_DIR})
|
||||
|
||||
if (HAVE_WINRT)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW /GS /Gm- /AI\"${WINDOWS_SDK_PATH}/References/CommonConfiguration/Neutral\" /AI\"${VISUAL_STUDIO_PATH}/vcpackages\"")
|
||||
endif()
|
||||
|
||||
if(HAVE_CUDA)
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
|
||||
endif()
|
||||
|
@ -884,6 +884,9 @@ Finally, there are STL-style iterators that are smart enough to skip gaps betwee
|
||||
|
||||
The matrix iterators are random-access iterators, so they can be passed to any STL algorithm, including ``std::sort()`` .
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example demonstrating the serial out capabilities of cv::Mat can be found at opencv_source_code/samples/cpp/cout_mat.cpp
|
||||
|
||||
.. _MatrixExpressions:
|
||||
|
||||
|
@ -64,6 +64,12 @@ Basically, you can use only the core of the function, set the number of
|
||||
attempts to 1, initialize labels each time using a custom algorithm, pass them with the
|
||||
( ``flags`` = ``KMEANS_USE_INITIAL_LABELS`` ) flag, and then choose the best (most-compact) clustering.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example on K-means clustering can be found at opencv_source_code/samples/cpp/kmeans.cpp
|
||||
|
||||
* : PYTHON : An example on K-means clustering can be found at opencv_source_code/samples/python2/kmeans.py
|
||||
|
||||
partition
|
||||
-------------
|
||||
Splits an element set into equivalency classes.
|
||||
|
@ -15,4 +15,5 @@ core. The Core Functionality
|
||||
old_xml_yaml_persistence
|
||||
clustering
|
||||
utility_and_system_functions_and_macros
|
||||
opengl_interop
|
||||
|
||||
|
@ -26,6 +26,10 @@ If a drawn figure is partially or completely outside the image, the drawing func
|
||||
|
||||
.. note:: The functions do not support alpha-transparency when the target image is 4-channel. In this case, the ``color[3]`` is simply copied to the repainted pixels. Thus, if you want to paint semi-transparent shapes, you can paint them in a separate buffer and then blend it with the main image.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example on using variate drawing functions like line, rectangle, ... can be found at opencv_source_code/samples/cpp/drawing.cpp
|
||||
|
||||
circle
|
||||
----------
|
||||
Draws a circle.
|
||||
@ -555,6 +559,12 @@ The function draws contour outlines in the image if
|
||||
waitKey(0);
|
||||
}
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the drawContour functionality can be found at opencv_source_code/samples/cpp/contours2.cpp
|
||||
* : An example using drawContours to clean up a background segmentation result at opencv_source_code/samples/cpp/segment_objects.cpp
|
||||
|
||||
* : PYTHON : An example using the drawContour functionality can be found at opencv_source/samples/python2/contours.py
|
||||
|
||||
|
||||
putText
|
||||
@ -592,4 +602,3 @@ The function ``putText`` renders the specified text string in the image.
|
||||
Symbols that cannot be rendered using the specified font are
|
||||
replaced by question marks. See
|
||||
:ocv:func:`getTextSize` for a text rendering code example.
|
||||
|
||||
|
539
modules/core/doc/opengl_interop.rst
Normal file
539
modules/core/doc/opengl_interop.rst
Normal file
@ -0,0 +1,539 @@
|
||||
OpenGL interoperability
|
||||
=======================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
|
||||
|
||||
General Information
|
||||
-------------------
|
||||
This section describes OpenGL interoperability.
|
||||
|
||||
To enable OpenGL support, configure OpenCV using ``CMake`` with ``WITH_OPENGL=ON`` .
|
||||
Currently OpenGL is supported only with WIN32, GTK and Qt backends on Windows and Linux (MacOS and Android are not supported).
|
||||
For GTK backend ``gtkglext-1.0`` library is required.
|
||||
|
||||
To use OpenGL functionality you should first create OpenGL context (window or frame buffer).
|
||||
You can do this with :ocv:func:`namedWindow` function or with other OpenGL toolkit (GLUT, for example).
|
||||
|
||||
|
||||
|
||||
ogl::Buffer
|
||||
-----------
|
||||
Smart pointer for OpenGL buffer object with reference counting.
|
||||
|
||||
.. ocv:class:: ogl::Buffer
|
||||
|
||||
Buffer Objects are OpenGL objects that store an array of unformatted memory allocated by the OpenGL context.
|
||||
These can be used to store vertex data, pixel data retrieved from images or the framebuffer, and a variety of other things.
|
||||
|
||||
``ogl::Buffer`` has interface similar with :ocv:class:`Mat` interface and represents 2D array memory.
|
||||
|
||||
``ogl::Buffer`` supports memory transfers between host and device and also can be mapped to CUDA memory.
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::Target
|
||||
-------------------
|
||||
The target defines how you intend to use the buffer object.
|
||||
|
||||
.. ocv:enum:: ogl::Buffer::Target
|
||||
|
||||
.. ocv:emember:: ARRAY_BUFFER
|
||||
|
||||
The buffer will be used as a source for vertex data.
|
||||
|
||||
.. ocv:emember:: ELEMENT_ARRAY_BUFFER
|
||||
|
||||
The buffer will be used for indices (in ``glDrawElements`` or :ocv:func:`ogl::render`, for example).
|
||||
|
||||
.. ocv:emember:: PIXEL_PACK_BUFFER
|
||||
|
||||
The buffer will be used for reading from OpenGL textures.
|
||||
|
||||
.. ocv:emember:: PIXEL_UNPACK_BUFFER
|
||||
|
||||
The buffer will be used for writing to OpenGL textures.
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::Buffer
|
||||
-------------------
|
||||
The constructors.
|
||||
|
||||
.. ocv:function:: ogl::Buffer::Buffer()
|
||||
|
||||
.. ocv:function:: ogl::Buffer::Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false)
|
||||
|
||||
.. ocv:function:: ogl::Buffer::Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false)
|
||||
|
||||
.. ocv:function:: ogl::Buffer::Buffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false)
|
||||
|
||||
.. ocv:function:: ogl::Buffer::Buffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false)
|
||||
|
||||
.. ocv:function:: ogl::Buffer::Buffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false)
|
||||
|
||||
:param arows: Number of rows in a 2D array.
|
||||
|
||||
:param acols: Number of columns in a 2D array.
|
||||
|
||||
:param asize: 2D array size.
|
||||
|
||||
:param atype: Array type ( ``CV_8UC1, ..., CV_64FC4`` ). See :ocv:class:`Mat` for details.
|
||||
|
||||
:param abufId: Buffer object name.
|
||||
|
||||
:param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or ``std::vector`` ).
|
||||
|
||||
:param target: Buffer usage. See :ocv:enum:`ogl::Buffer::Target` .
|
||||
|
||||
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
|
||||
|
||||
Creates empty ``ogl::Buffer`` object, creates ``ogl::Buffer`` object from existed buffer ( ``abufId`` parameter),
|
||||
allocates memory for ``ogl::Buffer`` object or copies from host/device memory.
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::create
|
||||
-------------------
|
||||
Allocates memory for ``ogl::Buffer`` object.
|
||||
|
||||
.. ocv:function:: void ogl::Buffer::create(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false)
|
||||
|
||||
.. ocv:function:: void ogl::Buffer::create(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false)
|
||||
|
||||
:param arows: Number of rows in a 2D array.
|
||||
|
||||
:param acols: Number of columns in a 2D array.
|
||||
|
||||
:param asize: 2D array size.
|
||||
|
||||
:param atype: Array type ( ``CV_8UC1, ..., CV_64FC4`` ). See :ocv:class:`Mat` for details.
|
||||
|
||||
:param target: Buffer usage. See :ocv:enum:`ogl::Buffer::Target` .
|
||||
|
||||
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::release
|
||||
--------------------
|
||||
Decrements the reference counter and destroys the buffer object if needed.
|
||||
|
||||
.. ocv:function:: void ogl::Buffer::release()
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::setAutoRelease
|
||||
---------------------------
|
||||
Sets auto release mode.
|
||||
|
||||
.. ocv:function:: void ogl::Buffer::setAutoRelease(bool flag)
|
||||
|
||||
:param flag: Auto release mode (if true, release will be called in object's destructor).
|
||||
|
||||
The lifetime of the OpenGL object is tied to the lifetime of the context.
|
||||
If OpenGL context was bound to a window it could be released at any time (user can close a window).
|
||||
If object's destructor is called after destruction of the context it will cause an error.
|
||||
Thus ``ogl::Buffer`` doesn't destroy OpenGL object in destructor by default (all OpenGL resources will be released with OpenGL context).
|
||||
This function can force ``ogl::Buffer`` destructor to destroy OpenGL object.
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::copyFrom
|
||||
---------------------
|
||||
Copies from host/device memory to OpenGL buffer.
|
||||
|
||||
.. ocv:function:: void ogl::Buffer::copyFrom(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false)
|
||||
|
||||
:param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or ``std::vector`` ).
|
||||
|
||||
:param target: Buffer usage. See :ocv:enum:`ogl::Buffer::Target` .
|
||||
|
||||
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::copyTo
|
||||
-------------------
|
||||
Copies from OpenGL buffer to host/device memory or another OpenGL buffer object.
|
||||
|
||||
.. ocv:function:: void ogl::Buffer::copyTo(OutputArray arr) const
|
||||
|
||||
:param arr: Destination array (host or device memory, can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` , ``std::vector`` or ``ogl::Buffer`` ).
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::clone
|
||||
------------------
|
||||
Creates a full copy of the buffer object and the underlying data.
|
||||
|
||||
.. ocv:function:: Buffer ogl::Buffer::clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const
|
||||
|
||||
:param target: Buffer usage for destination buffer.
|
||||
|
||||
:param autoRelease: Auto release mode for destination buffer.
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::bind
|
||||
-----------------
|
||||
Binds OpenGL buffer to the specified buffer binding point.
|
||||
|
||||
.. ocv:function:: void ogl::Buffer::bind(Target target) const
|
||||
|
||||
:param target: Binding point. See :ocv:enum:`ogl::Buffer::Target` .
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::unbind
|
||||
-------------------
|
||||
Unbind any buffers from the specified binding point.
|
||||
|
||||
.. ocv:function:: static void ogl::Buffer::unbind(Target target)
|
||||
|
||||
:param target: Binding point. See :ocv:enum:`ogl::Buffer::Target` .
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::mapHost
|
||||
--------------------
|
||||
Maps OpenGL buffer to host memory.
|
||||
|
||||
.. ocv:function:: Mat ogl::Buffer::mapHost(Access access)
|
||||
|
||||
:param access: Access policy, indicating whether it will be possible to read from, write to, or both read from and write to the buffer object's mapped data store. The symbolic constant must be ``ogl::Buffer::READ_ONLY`` , ``ogl::Buffer::WRITE_ONLY`` or ``ogl::Buffer::READ_WRITE`` .
|
||||
|
||||
``mapHost`` maps to the client's address space the entire data store of the buffer object.
|
||||
The data can then be directly read and/or written relative to the returned pointer, depending on the specified ``access`` policy.
|
||||
|
||||
A mapped data store must be unmapped with :ocv:func:`ogl::Buffer::unmapHost` before its buffer object is used.
|
||||
|
||||
This operation can lead to memory transfers between host and device.
|
||||
|
||||
Only one buffer object can be mapped at a time.
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::unmapHost
|
||||
----------------------
|
||||
Unmaps OpenGL buffer.
|
||||
|
||||
.. ocv:function:: void ogl::Buffer::unmapHost()
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::mapDevice
|
||||
----------------------
|
||||
Maps OpenGL buffer to CUDA device memory.
|
||||
|
||||
.. ocv:function:: gpu::GpuMat ogl::Buffer::mapDevice()
|
||||
|
||||
This operatation doesn't copy data.
|
||||
Several buffer objects can be mapped to CUDA memory at a time.
|
||||
|
||||
A mapped data store must be unmapped with :ocv:func:`ogl::Buffer::unmapDevice` before its buffer object is used.
|
||||
|
||||
|
||||
|
||||
ogl::Buffer::unmapDevice
|
||||
------------------------
|
||||
Unmaps OpenGL buffer.
|
||||
|
||||
.. ocv:function:: void ogl::Buffer::unmapDevice()
|
||||
|
||||
|
||||
|
||||
ogl::Texture2D
|
||||
--------------
|
||||
Smart pointer for OpenGL 2D texture memory with reference counting.
|
||||
|
||||
.. ocv:class:: ogl::Texture2D
|
||||
|
||||
|
||||
|
||||
ogl::Texture2D::Format
|
||||
----------------------
|
||||
An Image Format describes the way that the images in Textures store their data.
|
||||
|
||||
.. ocv:enum:: ogl::Texture2D::Format
|
||||
|
||||
.. ocv:emember:: NONE
|
||||
.. ocv:emember:: DEPTH_COMPONENT
|
||||
.. ocv:emember:: RGB
|
||||
.. ocv:emember:: RGBA
|
||||
|
||||
|
||||
|
||||
ogl::Texture2D::Texture2D
|
||||
-------------------------
|
||||
The constructors.
|
||||
|
||||
.. ocv:function:: ogl::Texture2D::Texture2D()
|
||||
|
||||
.. ocv:function:: ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false)
|
||||
|
||||
.. ocv:function:: ogl::Texture2D::Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false)
|
||||
|
||||
.. ocv:function:: ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, bool autoRelease = false)
|
||||
|
||||
.. ocv:function:: ogl::Texture2D::Texture2D(Size asize, Format aformat, bool autoRelease = false)
|
||||
|
||||
.. ocv:function:: ogl::Texture2D::Texture2D(InputArray arr, bool autoRelease = false)
|
||||
|
||||
:param arows: Number of rows.
|
||||
|
||||
:param acols: Number of columns.
|
||||
|
||||
:param asize: 2D array size.
|
||||
|
||||
:param aformat: Image format. See :ocv:enum:`ogl::Texture2D::Format` .
|
||||
|
||||
:param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or :ocv:class:`ogl::Buffer` ).
|
||||
|
||||
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
|
||||
|
||||
Creates empty ``ogl::Texture2D`` object, allocates memory for ``ogl::Texture2D`` object or copies from host/device memory.
|
||||
|
||||
|
||||
|
||||
ogl::Texture2D::create
|
||||
----------------------
|
||||
Allocates memory for ``ogl::Texture2D`` object.
|
||||
|
||||
.. ocv:function:: void ogl::Texture2D::create(int arows, int acols, Format aformat, bool autoRelease = false)
|
||||
|
||||
.. ocv:function:: void ogl::Texture2D::create(Size asize, Format aformat, bool autoRelease = false)
|
||||
|
||||
:param arows: Number of rows.
|
||||
|
||||
:param acols: Number of columns.
|
||||
|
||||
:param asize: 2D array size.
|
||||
|
||||
:param aformat: Image format. See :ocv:enum:`ogl::Texture2D::Format` .
|
||||
|
||||
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
|
||||
|
||||
|
||||
|
||||
ogl::Texture2D::release
|
||||
-----------------------
|
||||
Decrements the reference counter and destroys the texture object if needed.
|
||||
|
||||
.. ocv:function:: void ogl::Texture2D::release()
|
||||
|
||||
|
||||
|
||||
ogl::Texture2D::setAutoRelease
|
||||
------------------------------
|
||||
Sets auto release mode.
|
||||
|
||||
.. ocv:function:: void ogl::Texture2D::setAutoRelease(bool flag)
|
||||
|
||||
:param flag: Auto release mode (if true, release will be called in object's destructor).
|
||||
|
||||
The lifetime of the OpenGL object is tied to the lifetime of the context.
|
||||
If OpenGL context was bound to a window it could be released at any time (user can close a window).
|
||||
If object's destructor is called after destruction of the context it will cause an error.
|
||||
Thus ``ogl::Texture2D`` doesn't destroy OpenGL object in destructor by default (all OpenGL resources will be released with OpenGL context).
|
||||
This function can force ``ogl::Texture2D`` destructor to destroy OpenGL object.
|
||||
|
||||
|
||||
|
||||
ogl::Texture2D::copyFrom
|
||||
------------------------
|
||||
Copies from host/device memory to OpenGL texture.
|
||||
|
||||
.. ocv:function:: void ogl::Texture2D::copyFrom(InputArray arr, bool autoRelease = false)
|
||||
|
||||
:param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or :ocv:class:`ogl::Buffer` ).
|
||||
|
||||
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
|
||||
|
||||
|
||||
|
||||
ogl::Texture2D::copyTo
|
||||
----------------------
|
||||
Copies from OpenGL texture to host/device memory or another OpenGL texture object.
|
||||
|
||||
.. ocv:function:: void ogl::Texture2D::copyTo(OutputArray arr, int ddepth = CV_32F, bool autoRelease = false) const
|
||||
|
||||
:param arr: Destination array (host or device memory, can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` , :ocv:class:`ogl::Buffer` or ``ogl::Texture2D`` ).
|
||||
|
||||
:param ddepth: Destination depth.
|
||||
|
||||
:param autoRelease: Auto release mode for destination buffer (if ``arr`` is OpenGL buffer or texture).
|
||||
|
||||
|
||||
|
||||
ogl::Texture2D::bind
|
||||
--------------------
|
||||
Binds texture to current active texture unit for ``GL_TEXTURE_2D`` target.
|
||||
|
||||
.. ocv:function:: void ogl::Texture2D::bind() const
|
||||
|
||||
|
||||
|
||||
ogl::Arrays
|
||||
-----------
|
||||
Wrapper for OpenGL Client-Side Vertex arrays.
|
||||
|
||||
.. ocv:class:: ogl::Arrays
|
||||
|
||||
``ogl::Arrays`` stores vertex data in :ocv:class:`ogl::Buffer` objects.
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::setVertexArray
|
||||
---------------------------
|
||||
Sets an array of vertex coordinates.
|
||||
|
||||
.. ocv:function:: void ogl::Arrays::setVertexArray(InputArray vertex)
|
||||
|
||||
:param vertex: array with vertex coordinates, can be both host and device memory.
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::resetVertexArray
|
||||
-----------------------------
|
||||
Resets vertex coordinates.
|
||||
|
||||
.. ocv:function:: void ogl::Arrays::resetVertexArray()
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::setColorArray
|
||||
--------------------------
|
||||
Sets an array of vertex colors.
|
||||
|
||||
.. ocv:function:: void ogl::Arrays::setColorArray(InputArray color)
|
||||
|
||||
:param color: array with vertex colors, can be both host and device memory.
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::resetColorArray
|
||||
----------------------------
|
||||
Resets vertex colors.
|
||||
|
||||
.. ocv:function:: void ogl::Arrays::resetColorArray()
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::setNormalArray
|
||||
---------------------------
|
||||
Sets an array of vertex normals.
|
||||
|
||||
.. ocv:function:: void ogl::Arrays::setNormalArray(InputArray normal)
|
||||
|
||||
:param normal: array with vertex normals, can be both host and device memory.
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::resetNormalArray
|
||||
-----------------------------
|
||||
Resets vertex normals.
|
||||
|
||||
.. ocv:function:: void ogl::Arrays::resetNormalArray()
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::setTexCoordArray
|
||||
-----------------------------
|
||||
Sets an array of vertex texture coordinates.
|
||||
|
||||
.. ocv:function:: void ogl::Arrays::setTexCoordArray(InputArray texCoord)
|
||||
|
||||
:param texCoord: array with vertex texture coordinates, can be both host and device memory.
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::resetTexCoordArray
|
||||
-------------------------------
|
||||
Resets vertex texture coordinates.
|
||||
|
||||
.. ocv:function:: void ogl::Arrays::resetTexCoordArray()
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::release
|
||||
--------------------
|
||||
Releases all inner buffers.
|
||||
|
||||
.. ocv:function:: void ogl::Arrays::release()
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::setAutoRelease
|
||||
---------------------------
|
||||
Sets auto release mode all inner buffers.
|
||||
|
||||
.. ocv:function:: void ogl::Arrays::setAutoRelease(bool flag)
|
||||
|
||||
:param flag: Auto release mode.
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::bind
|
||||
-----------------
|
||||
Binds all vertex arrays.
|
||||
|
||||
.. ocv:function:: void ogl::Arrays::bind() const
|
||||
|
||||
|
||||
|
||||
ogl::Arrays::size
|
||||
-----------------
|
||||
Returns the vertex count.
|
||||
|
||||
.. ocv:function:: int ogl::Arrays::size() const
|
||||
|
||||
|
||||
|
||||
ogl::render
|
||||
-----------
|
||||
Render OpenGL texture or primitives.
|
||||
|
||||
.. ocv:function:: void ogl::render(const Texture2D& tex, Rect_<double> wndRect = Rect_<double>(0.0, 0.0, 1.0, 1.0), Rect_<double> texRect = Rect_<double>(0.0, 0.0, 1.0, 1.0))
|
||||
|
||||
.. ocv:function:: void ogl::render(const Arrays& arr, int mode = POINTS, Scalar color = Scalar::all(255))
|
||||
|
||||
.. ocv:function:: void ogl::render(const Arrays& arr, InputArray indices, int mode = POINTS, Scalar color = Scalar::all(255))
|
||||
|
||||
:param tex: Texture to draw.
|
||||
|
||||
:param wndRect: Region of window, where to draw a texture (normalized coordinates).
|
||||
|
||||
:param texRect: Region of texture to draw (normalized coordinates).
|
||||
|
||||
:param arr: Array of privitives vertices.
|
||||
|
||||
:param indices: Array of vertices indices (host or device memory).
|
||||
|
||||
:param mode: Render mode. Available options:
|
||||
|
||||
* **POINTS**
|
||||
* **LINES**
|
||||
* **LINE_LOOP**
|
||||
* **LINE_STRIP**
|
||||
* **TRIANGLES**
|
||||
* **TRIANGLE_STRIP**
|
||||
* **TRIANGLE_FAN**
|
||||
* **QUADS**
|
||||
* **QUAD_STRIP**
|
||||
* **POLYGON**
|
||||
|
||||
:param color: Color for all vertices. Will be used if ``arr`` doesn't contain color array.
|
||||
|
||||
|
||||
|
||||
gpu::setGlDevice
|
||||
----------------
|
||||
Sets a CUDA device and initializes it for the current thread with OpenGL interoperability.
|
||||
|
||||
.. ocv:function:: void gpu::setGlDevice( int device = 0 )
|
||||
|
||||
:param device: System index of a GPU device starting with 0.
|
||||
|
||||
This function should be explicitly called after OpenGL context creation and before any CUDA calls.
|
@ -512,7 +512,7 @@ Performs the per-element comparison of two arrays or an array and scalar value.
|
||||
|
||||
:param value: scalar value.
|
||||
|
||||
:param dst: output array that has the same size as the input arrays and type= ``CV_8UC1`` .
|
||||
:param dst: output array that has the same size and type as the input arrays.
|
||||
|
||||
:param cmpop: a flag, that specifies correspondence between the arrays:
|
||||
|
||||
@ -971,6 +971,12 @@ All of the above improvements have been implemented in :ocv:func:`matchTemplate`
|
||||
|
||||
.. seealso:: :ocv:func:`dct` , :ocv:func:`getOptimalDFTSize` , :ocv:func:`mulSpectrums`, :ocv:func:`filter2D` , :ocv:func:`matchTemplate` , :ocv:func:`flip` , :ocv:func:`cartToPolar` , :ocv:func:`magnitude` , :ocv:func:`phase`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the discrete fourier transform can be found at opencv_source_code/samples/cpp/dft.cpp
|
||||
|
||||
* : PYTHON : An example using the dft functionality to perform Wiener deconvolution can be found at opencv_source/samples/python2/deconvolution.py
|
||||
* : PYTHON : An example rearranging the quadrants of a Fourier image can be found at opencv_source/samples/python2/dft.py
|
||||
|
||||
|
||||
divide
|
||||
@ -2161,7 +2167,9 @@ The sample below is the function that takes two matrices. The first function sto
|
||||
:ocv:func:`dft`,
|
||||
:ocv:func:`dct`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using PCA for dimensionality reduction while maintaining an amount of variance can be found at opencv_source_code/samples/cpp/pca.cpp
|
||||
|
||||
PCA::PCA
|
||||
--------
|
||||
|
@ -91,6 +91,10 @@ Several things can be noted by looking at the sample code and the output:
|
||||
*
|
||||
In YAML (but not XML), mappings and sequences can be written in a compact Python-like inline form. In the sample above matrix elements, as well as each feature, including its lbp value, is stored in such inline form. To store a mapping/sequence in a compact form, put ":" after the opening character, e.g. use **"{:"** instead of **"{"** and **"[:"** instead of **"["**. When the data is written to XML, those extra ":" are ignored.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : A complete example using the FileStorage interface can be found at opencv_source_code/samples/cpp/filestorage.cpp
|
||||
|
||||
|
||||
Reading data from a file storage.
|
||||
---------------------------------
|
||||
|
@ -149,7 +149,12 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef __ARM_NEON__
|
||||
#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)
|
||||
# include <Intrin.h>
|
||||
# include "arm_neon.h"
|
||||
# define CV_NEON 1
|
||||
# define CPU_HAS_NEON_FEATURE (true)
|
||||
#elif defined(__ARM_NEON__)
|
||||
# include <arm_neon.h>
|
||||
# define CV_NEON 1
|
||||
#endif
|
||||
@ -364,7 +369,7 @@ CV_INLINE int cvRound( double value )
|
||||
return t;
|
||||
#elif defined _MSC_VER && defined _M_ARM && defined HAVE_TEGRA_OPTIMIZATION
|
||||
TEGRA_ROUND(value);
|
||||
#elif defined HAVE_LRINT || defined CV_ICC || defined __GNUC__
|
||||
#elif defined CV_ICC || defined __GNUC__
|
||||
# ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
TEGRA_ROUND(value);
|
||||
# else
|
||||
|
@ -94,9 +94,20 @@ void fastFree(void* ptr)
|
||||
#define STAT(stmt)
|
||||
|
||||
#ifdef WIN32
|
||||
#if (_WIN32_WINNT >= 0x0602)
|
||||
#include <synchapi.h>
|
||||
#endif
|
||||
|
||||
struct CriticalSection
|
||||
{
|
||||
CriticalSection() { InitializeCriticalSection(&cs); }
|
||||
CriticalSection()
|
||||
{
|
||||
#if (_WIN32_WINNT >= 0x0600)
|
||||
InitializeCriticalSectionEx(&cs, 1000, 0);
|
||||
#else
|
||||
InitializeCriticalSection(&cs);
|
||||
#endif
|
||||
}
|
||||
~CriticalSection() { DeleteCriticalSection(&cs); }
|
||||
void lock() { EnterCriticalSection(&cs); }
|
||||
void unlock() { LeaveCriticalSection(&cs); }
|
||||
|
@ -886,12 +886,14 @@ void ellipse2Poly( Point center, Size axes, int angle,
|
||||
Point pt;
|
||||
pt.x = cvRound( cx + x * alpha - y * beta );
|
||||
pt.y = cvRound( cy + x * beta + y * alpha );
|
||||
if( pt != prevPt )
|
||||
if( pt != prevPt ){
|
||||
pts.push_back(pt);
|
||||
prevPt = pt;
|
||||
}
|
||||
}
|
||||
|
||||
// If there are no points, it's a zero-size polygon
|
||||
if( pts.size() < 2) {
|
||||
if( pts.size() == 1) {
|
||||
pts.assign(2,center);
|
||||
}
|
||||
}
|
||||
|
@ -56,16 +56,39 @@ namespace
|
||||
|
||||
struct DIR
|
||||
{
|
||||
#ifdef HAVE_WINRT
|
||||
WIN32_FIND_DATAW data;
|
||||
#else
|
||||
WIN32_FIND_DATA data;
|
||||
#endif
|
||||
HANDLE handle;
|
||||
dirent ent;
|
||||
#ifdef HAVE_WINRT
|
||||
DIR() {};
|
||||
~DIR()
|
||||
{
|
||||
if (ent.d_name)
|
||||
delete[] ent.d_name;
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
DIR* opendir(const char* path)
|
||||
{
|
||||
DIR* dir = new DIR;
|
||||
dir->ent.d_name = 0;
|
||||
dir->handle = ::FindFirstFileA((cv::String(path) + "\\*").c_str(), &dir->data);
|
||||
#ifdef HAVE_WINRT
|
||||
cv::String full_path = cv::String(path) + "\\*";
|
||||
size_t size = mbstowcs(NULL, full_path.c_str(), full_path.size());
|
||||
cv::Ptr<wchar_t> wfull_path = new wchar_t[size+1];
|
||||
wfull_path[size] = 0;
|
||||
mbstowcs(wfull_path, full_path.c_str(), full_path.size());
|
||||
dir->handle = ::FindFirstFileExW(wfull_path, FindExInfoStandard,
|
||||
&dir->data, FindExSearchNameMatch, NULL, 0);
|
||||
#else
|
||||
dir->handle = ::FindFirstFileExA((cv::String(path) + "\\*").c_str(),
|
||||
FindExInfoStandard, &dir->data, FindExSearchNameMatch, NULL, 0);
|
||||
#endif
|
||||
if(dir->handle == INVALID_HANDLE_VALUE)
|
||||
{
|
||||
/*closedir will do all cleanup*/
|
||||
@ -76,12 +99,25 @@ namespace
|
||||
|
||||
dirent* readdir(DIR* dir)
|
||||
{
|
||||
#ifdef HAVE_WINRT
|
||||
if (dir->ent.d_name != 0)
|
||||
{
|
||||
if (::FindNextFile(dir->handle, &dir->data) != TRUE)
|
||||
if (::FindNextFileW(dir->handle, &dir->data) != TRUE)
|
||||
return 0;
|
||||
}
|
||||
size_t asize = wcstombs(NULL, dir->data.cFileName, 0);
|
||||
char* aname = new char[asize+1];
|
||||
aname[asize] = 0;
|
||||
wcstombs(aname, dir->data.cFileName, asize);
|
||||
dir->ent.d_name = aname;
|
||||
#else
|
||||
if (dir->ent.d_name != 0)
|
||||
{
|
||||
if (::FindNextFileA(dir->handle, &dir->data) != TRUE)
|
||||
return 0;
|
||||
}
|
||||
dir->ent.d_name = dir->data.cFileName;
|
||||
#endif
|
||||
return &dir->ent;
|
||||
}
|
||||
|
||||
@ -107,7 +143,19 @@ static bool isDir(const cv::String& path, DIR* dir)
|
||||
if (dir)
|
||||
attributes = dir->data.dwFileAttributes;
|
||||
else
|
||||
attributes = ::GetFileAttributes(path.c_str());
|
||||
{
|
||||
WIN32_FILE_ATTRIBUTE_DATA all_attrs;
|
||||
#ifdef HAVE_WINRT
|
||||
size_t size = mbstowcs(NULL, path.c_str(), path.size());
|
||||
cv::Ptr<wchar_t> wpath = new wchar_t[size+1];
|
||||
wpath[size] = 0;
|
||||
mbstowcs(wpath, path.c_str(), path.size());
|
||||
::GetFileAttributesExW(wpath, GetFileExInfoStandard, &all_attrs);
|
||||
#else
|
||||
::GetFileAttributesExA(path.c_str(), GetFileExInfoStandard, &all_attrs);
|
||||
#endif
|
||||
attributes = all_attrs.dwFileAttributes;
|
||||
}
|
||||
|
||||
return (attributes != INVALID_FILE_ATTRIBUTES) && ((attributes & FILE_ATTRIBUTE_DIRECTORY) != 0);
|
||||
#else
|
||||
@ -241,4 +289,4 @@ void cv::glob(String pattern, std::vector<String>& result, bool recursive)
|
||||
|
||||
glob_rec(path, wildchart, result, recursive);
|
||||
std::sort(result.begin(), result.end());
|
||||
}
|
||||
}
|
||||
|
@ -453,7 +453,11 @@ int cv::getNumberOfCPUs(void)
|
||||
{
|
||||
#if defined WIN32 || defined _WIN32
|
||||
SYSTEM_INFO sysinfo;
|
||||
#if defined(_M_ARM) || defined(_M_X64) || defined(HAVE_WINRT)
|
||||
GetNativeSystemInfo( &sysinfo );
|
||||
#else
|
||||
GetSystemInfo( &sysinfo );
|
||||
#endif
|
||||
|
||||
return (int)sysinfo.dwNumberOfProcessors;
|
||||
#elif defined ANDROID
|
||||
|
@ -58,7 +58,6 @@
|
||||
#endif
|
||||
|
||||
#if USE_ZLIB
|
||||
# undef HAVE_UNISTD_H //to avoid redefinition
|
||||
# ifndef _LFS64_LARGEFILE
|
||||
# define _LFS64_LARGEFILE 0
|
||||
# endif
|
||||
|
@ -728,33 +728,54 @@ void RNG::fill( InputOutputArray _mat, int disttype,
|
||||
}
|
||||
|
||||
#ifdef WIN32
|
||||
|
||||
|
||||
#ifdef HAVE_WINRT
|
||||
// using C++11 thread attribute for local thread data
|
||||
__declspec( thread ) RNG* rng = NULL;
|
||||
|
||||
void deleteThreadRNGData()
|
||||
{
|
||||
if (rng)
|
||||
delete rng;
|
||||
}
|
||||
|
||||
RNG& theRNG()
|
||||
{
|
||||
if (!rng)
|
||||
{
|
||||
rng = new RNG;
|
||||
}
|
||||
return *rng;
|
||||
}
|
||||
#else
|
||||
#ifdef WINCE
|
||||
# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
|
||||
#endif
|
||||
static DWORD tlsRNGKey = TLS_OUT_OF_INDEXES;
|
||||
|
||||
void deleteThreadRNGData()
|
||||
{
|
||||
if( tlsRNGKey != TLS_OUT_OF_INDEXES )
|
||||
delete (RNG*)TlsGetValue( tlsRNGKey );
|
||||
void deleteThreadRNGData()
|
||||
{
|
||||
if( tlsRNGKey != TLS_OUT_OF_INDEXES )
|
||||
delete (RNG*)TlsGetValue( tlsRNGKey );
|
||||
}
|
||||
|
||||
RNG& theRNG()
|
||||
{
|
||||
if( tlsRNGKey == TLS_OUT_OF_INDEXES )
|
||||
{
|
||||
tlsRNGKey = TlsAlloc();
|
||||
CV_Assert(tlsRNGKey != TLS_OUT_OF_INDEXES);
|
||||
tlsRNGKey = TlsAlloc();
|
||||
CV_Assert(tlsRNGKey != TLS_OUT_OF_INDEXES);
|
||||
}
|
||||
RNG* rng = (RNG*)TlsGetValue( tlsRNGKey );
|
||||
if( !rng )
|
||||
{
|
||||
rng = new RNG;
|
||||
TlsSetValue( tlsRNGKey, rng );
|
||||
rng = new RNG;
|
||||
TlsSetValue( tlsRNGKey, rng );
|
||||
}
|
||||
return *rng;
|
||||
}
|
||||
|
||||
#endif //HAVE_WINRT
|
||||
#else
|
||||
|
||||
static pthread_key_t tlsRNGKey = 0;
|
||||
|
@ -47,6 +47,9 @@
|
||||
#define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx
|
||||
#endif
|
||||
#include <windows.h>
|
||||
#if (_WIN32_WINNT >= 0x0602)
|
||||
#include <synchapi.h>
|
||||
#endif
|
||||
#undef small
|
||||
#undef min
|
||||
#undef max
|
||||
@ -75,6 +78,30 @@
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_WINRT
|
||||
#include <wrl/client.h>
|
||||
|
||||
std::wstring GetTempPathWinRT()
|
||||
{
|
||||
return std::wstring(Windows::Storage::ApplicationData::Current->TemporaryFolder->Path->Data());
|
||||
}
|
||||
|
||||
std::wstring GetTempFileNameWinRT(std::wstring prefix)
|
||||
{
|
||||
wchar_t guidStr[40];
|
||||
GUID g;
|
||||
CoCreateGuid(&g);
|
||||
wchar_t* mask = L"%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x";
|
||||
swprintf(&guidStr[0], sizeof(guidStr)/sizeof(wchar_t), mask,
|
||||
g.Data1, g.Data2, g.Data3, UINT(g.Data4[0]), UINT(g.Data4[1]),
|
||||
UINT(g.Data4[2]), UINT(g.Data4[3]), UINT(g.Data4[4]),
|
||||
UINT(g.Data4[5]), UINT(g.Data4[6]), UINT(g.Data4[7]));
|
||||
|
||||
return prefix + std::wstring(guidStr);
|
||||
}
|
||||
|
||||
#endif
|
||||
#else
|
||||
#include <pthread.h>
|
||||
#include <sys/time.h>
|
||||
@ -371,10 +398,38 @@ String format( const char* fmt, ... )
|
||||
|
||||
String tempfile( const char* suffix )
|
||||
{
|
||||
#ifdef HAVE_WINRT
|
||||
std::wstring temp_dir = L"";
|
||||
const wchar_t* opencv_temp_dir = _wgetenv(L"OPENCV_TEMP_PATH");
|
||||
if (opencv_temp_dir)
|
||||
temp_dir = std::wstring(opencv_temp_dir);
|
||||
#else
|
||||
const char *temp_dir = getenv("OPENCV_TEMP_PATH");
|
||||
String fname;
|
||||
#endif
|
||||
|
||||
#if defined WIN32 || defined _WIN32
|
||||
#ifdef HAVE_WINRT
|
||||
RoInitialize(RO_INIT_MULTITHREADED);
|
||||
std::wstring temp_dir2;
|
||||
if (temp_dir.empty())
|
||||
temp_dir = GetTempPathWinRT();
|
||||
|
||||
std::wstring temp_file;
|
||||
temp_file = GetTempFileNameWinRT(L"ocv");
|
||||
if (temp_file.empty())
|
||||
return std::string();
|
||||
|
||||
temp_file = temp_dir + std::wstring(L"\\") + temp_file;
|
||||
DeleteFileW(temp_file.c_str());
|
||||
|
||||
size_t asize = wcstombs(NULL, temp_file.c_str(), 0);
|
||||
Ptr<char> aname = new char[asize+1];
|
||||
aname[asize] = 0;
|
||||
wcstombs(aname, temp_file.c_str(), asize);
|
||||
fname = std::string(aname);
|
||||
RoUninitialize();
|
||||
#else
|
||||
char temp_dir2[MAX_PATH + 1] = { 0 };
|
||||
char temp_file[MAX_PATH + 1] = { 0 };
|
||||
|
||||
@ -389,6 +444,7 @@ String tempfile( const char* suffix )
|
||||
DeleteFileA(temp_file);
|
||||
|
||||
fname = temp_file;
|
||||
#endif
|
||||
# else
|
||||
# ifdef ANDROID
|
||||
//char defaultTemplate[] = "/mnt/sdcard/__opencv_temp.XXXXXX";
|
||||
@ -486,40 +542,6 @@ redirectError( CvErrorCallback errCallback, void* userdata, void** prevUserdata)
|
||||
|
||||
}
|
||||
|
||||
/*CV_IMPL int
|
||||
cvGuiBoxReport( int code, const char *func_name, const char *err_msg,
|
||||
const char *file, int line, void* )
|
||||
{
|
||||
#if (!defined WIN32 && !defined _WIN32) || defined WINCE
|
||||
return cvStdErrReport( code, func_name, err_msg, file, line, 0 );
|
||||
#else
|
||||
if( code != CV_StsBackTrace && code != CV_StsAutoTrace )
|
||||
{
|
||||
size_t msg_len = strlen(err_msg ? err_msg : "") + 1024;
|
||||
char* message = (char*)alloca(msg_len);
|
||||
char title[100];
|
||||
|
||||
wsprintf( message, "%s (%s)\nin function %s, %s(%d)\n\n"
|
||||
"Press \"Abort\" to terminate application.\n"
|
||||
"Press \"Retry\" to debug (if the app is running under debugger).\n"
|
||||
"Press \"Ignore\" to continue (this is not safe).\n",
|
||||
cvErrorStr(code), err_msg ? err_msg : "no description",
|
||||
func_name, file, line );
|
||||
|
||||
wsprintf( title, "OpenCV GUI Error Handler" );
|
||||
|
||||
int answer = MessageBox( NULL, message, title, MB_ICONERROR|MB_ABORTRETRYIGNORE|MB_SYSTEMMODAL );
|
||||
|
||||
if( answer == IDRETRY )
|
||||
{
|
||||
CV_DBG_BREAK();
|
||||
}
|
||||
return answer != IDIGNORE;
|
||||
}
|
||||
return 0;
|
||||
#endif
|
||||
}*/
|
||||
|
||||
CV_IMPL int cvCheckHardwareSupport(int feature)
|
||||
{
|
||||
CV_DbgAssert( 0 <= feature && feature <= CV_HARDWARE_MAX_FEATURE );
|
||||
@ -677,7 +699,11 @@ cvErrorFromIppStatus( int status )
|
||||
}
|
||||
|
||||
|
||||
#if defined BUILD_SHARED_LIBS && defined CVAPI_EXPORTS && defined WIN32 && !defined WINCE
|
||||
#if defined CVAPI_EXPORTS && defined WIN32 && !defined WINCE
|
||||
#ifdef HAVE_WINRT
|
||||
#pragma warning(disable:4447) // Disable warning 'main' signature found without threading model
|
||||
#endif
|
||||
|
||||
BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID );
|
||||
|
||||
BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID )
|
||||
@ -698,7 +724,15 @@ namespace cv
|
||||
|
||||
struct Mutex::Impl
|
||||
{
|
||||
Impl() { InitializeCriticalSection(&cs); refcount = 1; }
|
||||
Impl()
|
||||
{
|
||||
#if (_WIN32_WINNT >= 0x0600)
|
||||
::InitializeCriticalSectionEx(&cs, 1000, 0);
|
||||
#else
|
||||
::InitializeCriticalSection(&cs);
|
||||
#endif
|
||||
refcount = 1;
|
||||
}
|
||||
~Impl() { DeleteCriticalSection(&cs); }
|
||||
|
||||
void lock() { EnterCriticalSection(&cs); }
|
||||
@ -791,4 +825,4 @@ bool Mutex::trylock() { return impl->trylock(); }
|
||||
|
||||
}
|
||||
|
||||
/* End of file. */
|
||||
/* End of file. */
|
||||
|
@ -1,3 +1,7 @@
|
||||
#ifdef HAVE_WINRT
|
||||
#pragma warning(disable:4447) // Disable warning 'main' signature found without threading model
|
||||
#endif
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
|
||||
CV_TEST_MAIN("cv")
|
||||
|
@ -9,7 +9,10 @@ represented as vectors in a multidimensional space. All objects that implement t
|
||||
descriptor extractors inherit the
|
||||
:ocv:class:`DescriptorExtractor` interface.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example explaining keypoint extraction can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
|
||||
* : An example on descriptor evaluation can be found at opencv_source_code/samples/cpp/detector_descriptor_evaluation.cpp
|
||||
|
||||
DescriptorExtractor
|
||||
-------------------
|
||||
@ -82,9 +85,10 @@ The current implementation supports the following types of a descriptor extracto
|
||||
|
||||
* ``"SIFT"`` -- :ocv:class:`SIFT`
|
||||
* ``"SURF"`` -- :ocv:class:`SURF`
|
||||
* ``"ORB"`` -- :ocv:class:`ORB`
|
||||
* ``"BRISK"`` -- :ocv:class:`BRISK`
|
||||
* ``"BRIEF"`` -- :ocv:class:`BriefDescriptorExtractor`
|
||||
* ``"BRISK"`` -- :ocv:class:`BRISK`
|
||||
* ``"ORB"`` -- :ocv:class:`ORB`
|
||||
* ``"FREAK"`` -- :ocv:class:`FREAK`
|
||||
|
||||
A combined format is also supported: descriptor extractor adapter name ( ``"Opponent"`` --
|
||||
:ocv:class:`OpponentColorDescriptorExtractor` ) + descriptor extractor name (see above),
|
||||
@ -141,4 +145,6 @@ Strecha C., Fua P. *BRIEF: Binary Robust Independent Elementary Features* ,
|
||||
...
|
||||
};
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : A complete BRIEF extractor sample can be found at opencv_source_code/samples/cpp/brief_match_test.cpp
|
||||
|
@ -9,6 +9,11 @@ that are represented as vectors in a multidimensional space. All objects that im
|
||||
descriptor matchers inherit the
|
||||
:ocv:class:`DescriptorMatcher` interface.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example explaining keypoint matching can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
|
||||
* : An example on descriptor matching evaluation can be found at opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp
|
||||
* : An example on one to many image matching can be found at opencv_source_code/samples/cpp/matching_to_many_images.cpp
|
||||
|
||||
DescriptorMatcher
|
||||
-----------------
|
||||
@ -271,4 +276,3 @@ Flann-based descriptor matcher. This matcher trains :ocv:class:`flann::Index_` o
|
||||
};
|
||||
|
||||
..
|
||||
|
||||
|
@ -8,6 +8,9 @@ between different algorithms solving the same problem. All objects that implemen
|
||||
inherit the
|
||||
:ocv:class:`FeatureDetector` interface.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example explaining keypoint detection can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
|
||||
|
||||
FeatureDetector
|
||||
---------------
|
||||
@ -166,7 +169,7 @@ StarFeatureDetector
|
||||
-------------------
|
||||
.. ocv:class:: StarFeatureDetector : public FeatureDetector
|
||||
|
||||
The class implements the keypoint detector introduced by K. Konolige, synonym of ``StarDetector``. ::
|
||||
The class implements the keypoint detector introduced by [Agrawal08]_, synonym of ``StarDetector``. ::
|
||||
|
||||
class StarFeatureDetector : public FeatureDetector
|
||||
{
|
||||
@ -180,6 +183,9 @@ The class implements the keypoint detector introduced by K. Konolige, synonym of
|
||||
...
|
||||
};
|
||||
|
||||
.. [Agrawal08] Agrawal, M., Konolige, K., & Blas, M. R. (2008). Censure: Center surround extremas for realtime feature detection and matching. In Computer Vision–ECCV 2008 (pp. 102-115). Springer Berlin Heidelberg.
|
||||
|
||||
|
||||
DenseFeatureDetector
|
||||
--------------------
|
||||
.. ocv:class:: DenseFeatureDetector : public FeatureDetector
|
||||
|
@ -11,7 +11,11 @@ Every descriptor with the
|
||||
:ocv:class:`VectorDescriptorMatcher` ).
|
||||
There are descriptors such as the One-way descriptor and Ferns that have the ``GenericDescriptorMatcher`` interface implemented but do not support ``DescriptorExtractor``.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example explaining keypoint description can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
|
||||
* : An example on descriptor matching evaluation can be found at opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp
|
||||
* : An example on one to many image matching can be found at opencv_source_code/samples/cpp/matching_to_many_images.cpp
|
||||
|
||||
GenericDescriptorMatcher
|
||||
------------------------
|
||||
|
@ -3,6 +3,10 @@ Feature Detection and Description
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example explaining keypoint detection and description can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
|
||||
|
||||
FAST
|
||||
----
|
||||
Detects corners using the FAST algorithm
|
||||
@ -58,6 +62,10 @@ Maximally stable extremal region extractor. ::
|
||||
The class encapsulates all the parameters of the MSER extraction algorithm (see
|
||||
http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions). Also see http://code.opencv.org/projects/opencv/wiki/MSER for useful comments and parameters description.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : PYTHON : A complete example showing the use of the MSER detector can be found at opencv_source_code/samples/python2/mser.py
|
||||
|
||||
|
||||
ORB
|
||||
---
|
||||
@ -182,6 +190,10 @@ Class implementing the FREAK (*Fast Retina Keypoint*) keypoint descriptor, descr
|
||||
|
||||
.. [AOV12] A. Alahi, R. Ortiz, and P. Vandergheynst. FREAK: Fast Retina Keypoint. In IEEE Conference on Computer Vision and Pattern Recognition, 2012. CVPR 2012 Open Source Award Winner.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example on how to use the FREAK descriptor can be found at opencv_source_code/samples/cpp/freak_demo.cpp
|
||||
|
||||
FREAK::FREAK
|
||||
------------
|
||||
The FREAK constructor
|
||||
|
@ -5,6 +5,12 @@ Object Categorization
|
||||
|
||||
This section describes approaches based on local 2D features and used to categorize objects.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : A complete Bag-Of-Words sample can be found at opencv_source_code/samples/cpp/bagofwords_classification.cpp
|
||||
|
||||
* : PYTHON : An example using the features2D framework to perform object categorization can be found at opencv_source_code/samples/python2/find_obj.py
|
||||
|
||||
BOWTrainer
|
||||
----------
|
||||
.. ocv:class:: BOWTrainer
|
||||
|
@ -43,8 +43,12 @@ typedef unsigned __int64 uint64_t;
|
||||
|
||||
#include "defines.h"
|
||||
|
||||
#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)
|
||||
# include <Intrin.h>
|
||||
#endif
|
||||
|
||||
#ifdef __ARM_NEON__
|
||||
#include "arm_neon.h"
|
||||
# include "arm_neon.h"
|
||||
#endif
|
||||
|
||||
namespace cvflann
|
||||
|
@ -23,4 +23,3 @@
|
||||
#include "opencv2/core/private.hpp"
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -62,7 +62,12 @@ The class implements Histogram of Oriented Gradients ([Dalal2005]_) object detec
|
||||
|
||||
Interfaces of all methods are kept similar to the ``CPU HOG`` descriptor and detector analogues as much as possible.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/cpp/peopledetect.cpp
|
||||
* : A GPU example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/gpu/hog.cpp
|
||||
|
||||
* : PYTHON : An example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/python2/peopledetect.py
|
||||
|
||||
gpu::HOGDescriptor::HOGDescriptor
|
||||
-------------------------------------
|
||||
@ -229,7 +234,10 @@ Cascade classifier class used for object detection. Supports HAAR and LBP cascad
|
||||
Size getClassifierSize() const;
|
||||
};
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : A cascade classifier example can be found at opencv_source_code/samples/gpu/cascadeclassifier.cpp
|
||||
* : A Nvidea API specific cascade classifier example can be found at opencv_source_code/samples/gpu/cascadeclassifier_nvidia_api.cpp
|
||||
|
||||
gpu::CascadeClassifier_GPU::CascadeClassifier_GPU
|
||||
-----------------------------------------------------
|
||||
|
@ -52,7 +52,12 @@
|
||||
#include "opencv2/ts.hpp"
|
||||
#include "opencv2/ts/gpu_perf.hpp"
|
||||
|
||||
CV_PERF_TEST_CUDA_MAIN(gpu_perf4au)
|
||||
static const char * impls[] = {
|
||||
"cuda",
|
||||
"plain"
|
||||
};
|
||||
|
||||
CV_PERF_TEST_MAIN_WITH_IMPLS(gpu_perf4au, impls, perf::printCudaInfo())
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// HoughLinesP
|
||||
|
@ -6,4 +6,4 @@ set(the_description "GPU-accelerated Background Segmentation")
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations)
|
||||
|
||||
ocv_define_module(gpubgsegm opencv_video opencv_imgproc opencv_legacy opencv_gpuarithm opencv_gpufilters opencv_gpuimgproc)
|
||||
ocv_define_module(gpubgsegm opencv_video OPTIONAL opencv_legacy opencv_imgproc opencv_gpuarithm opencv_gpufilters opencv_gpuimgproc)
|
||||
|
@ -5,404 +5,123 @@ Background Segmentation
|
||||
|
||||
|
||||
|
||||
gpu::FGDStatModel
|
||||
-----------------
|
||||
.. ocv:class:: gpu::FGDStatModel
|
||||
gpu::BackgroundSubtractorMOG
|
||||
----------------------------
|
||||
Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
|
||||
|
||||
Class used for background/foreground segmentation. ::
|
||||
|
||||
class FGDStatModel
|
||||
{
|
||||
public:
|
||||
struct Params
|
||||
{
|
||||
...
|
||||
};
|
||||
|
||||
explicit FGDStatModel(int out_cn = 3);
|
||||
explicit FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3);
|
||||
|
||||
~FGDStatModel();
|
||||
|
||||
void create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params());
|
||||
void release();
|
||||
|
||||
int update(const cv::gpu::GpuMat& curFrame);
|
||||
|
||||
//8UC3 or 8UC4 reference background image
|
||||
cv::gpu::GpuMat background;
|
||||
|
||||
//8UC1 foreground image
|
||||
cv::gpu::GpuMat foreground;
|
||||
|
||||
std::vector< std::vector<cv::Point> > foreground_regions;
|
||||
};
|
||||
|
||||
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [FGD2003]_.
|
||||
|
||||
The results are available through the class fields:
|
||||
|
||||
.. ocv:member:: cv::gpu::GpuMat background
|
||||
|
||||
The output background image.
|
||||
|
||||
.. ocv:member:: cv::gpu::GpuMat foreground
|
||||
|
||||
The output foreground mask as an 8-bit binary image.
|
||||
|
||||
.. ocv:member:: cv::gpu::GpuMat foreground_regions
|
||||
|
||||
The output foreground regions calculated by :ocv:func:`findContours`.
|
||||
|
||||
|
||||
|
||||
gpu::FGDStatModel::FGDStatModel
|
||||
-------------------------------
|
||||
Constructors.
|
||||
|
||||
.. ocv:function:: gpu::FGDStatModel::FGDStatModel(int out_cn = 3)
|
||||
.. ocv:function:: gpu::FGDStatModel::FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3)
|
||||
|
||||
:param firstFrame: First frame from video stream. Supports 3- and 4-channels input ( ``CV_8UC3`` and ``CV_8UC4`` ).
|
||||
|
||||
:param params: Algorithm's parameters. See [FGD2003]_ for explanation.
|
||||
|
||||
:param out_cn: Channels count in output result and inner buffers. Can be 3 or 4. 4-channels version requires more memory, but works a bit faster.
|
||||
|
||||
.. seealso:: :ocv:func:`gpu::FGDStatModel::create`
|
||||
|
||||
|
||||
|
||||
gpu::FGDStatModel::create
|
||||
-------------------------
|
||||
Initializes background model.
|
||||
|
||||
.. ocv:function:: void gpu::FGDStatModel::create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params())
|
||||
|
||||
:param firstFrame: First frame from video stream. Supports 3- and 4-channels input ( ``CV_8UC3`` and ``CV_8UC4`` ).
|
||||
|
||||
:param params: Algorithm's parameters. See [FGD2003]_ for explanation.
|
||||
|
||||
|
||||
|
||||
gpu::FGDStatModel::release
|
||||
--------------------------
|
||||
Releases all inner buffer's memory.
|
||||
|
||||
.. ocv:function:: void gpu::FGDStatModel::release()
|
||||
|
||||
|
||||
|
||||
gpu::FGDStatModel::update
|
||||
--------------------------
|
||||
Updates the background model and returns foreground regions count.
|
||||
|
||||
.. ocv:function:: int gpu::FGDStatModel::update(const cv::gpu::GpuMat& curFrame)
|
||||
|
||||
:param curFrame: Next video frame.
|
||||
|
||||
|
||||
|
||||
gpu::MOG_GPU
|
||||
------------
|
||||
.. ocv:class:: gpu::MOG_GPU
|
||||
|
||||
Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm. ::
|
||||
|
||||
class MOG_GPU
|
||||
{
|
||||
public:
|
||||
MOG_GPU(int nmixtures = -1);
|
||||
|
||||
void initialize(Size frameSize, int frameType);
|
||||
|
||||
void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = 0.0f, Stream& stream = Stream::Null());
|
||||
|
||||
void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const;
|
||||
|
||||
void release();
|
||||
|
||||
int history;
|
||||
float varThreshold;
|
||||
float backgroundRatio;
|
||||
float noiseSigma;
|
||||
};
|
||||
.. ocv:class:: gpu::BackgroundSubtractorMOG : public cv::BackgroundSubtractorMOG
|
||||
|
||||
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [MOG2001]_.
|
||||
|
||||
.. seealso:: :ocv:class:`BackgroundSubtractorMOG`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example on gaussian mixture based background/foreground segmantation can be found at opencv_source_code/samples/gpu/bgfg_segm.cpp
|
||||
|
||||
|
||||
gpu::MOG_GPU::MOG_GPU
|
||||
---------------------
|
||||
The constructor.
|
||||
gpu::createBackgroundSubtractorMOG
|
||||
----------------------------------
|
||||
Creates mixture-of-gaussian background subtractor
|
||||
|
||||
.. ocv:function:: gpu::MOG_GPU::MOG_GPU(int nmixtures = -1)
|
||||
.. ocv:function:: Ptr<gpu::BackgroundSubtractorMOG> gpu::createBackgroundSubtractorMOG(int history=200, int nmixtures=5, double backgroundRatio=0.7, double noiseSigma=0)
|
||||
|
||||
:param history: Length of the history.
|
||||
|
||||
:param nmixtures: Number of Gaussian mixtures.
|
||||
|
||||
Default constructor sets all parameters to default values.
|
||||
:param backgroundRatio: Background ratio.
|
||||
|
||||
:param noiseSigma: Noise strength (standard deviation of the brightness or each color channel). 0 means some automatic value.
|
||||
|
||||
|
||||
|
||||
gpu::MOG_GPU::operator()
|
||||
------------------------
|
||||
Updates the background model and returns the foreground mask.
|
||||
gpu::BackgroundSubtractorMOG2
|
||||
-----------------------------
|
||||
Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
|
||||
|
||||
.. ocv:function:: void gpu::MOG_GPU::operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = 0.0f, Stream& stream = Stream::Null())
|
||||
.. ocv:class:: gpu::BackgroundSubtractorMOG2 : public cv::BackgroundSubtractorMOG2
|
||||
|
||||
:param frame: Next video frame.
|
||||
|
||||
:param fgmask: The output foreground mask as an 8-bit binary image.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
|
||||
|
||||
gpu::MOG_GPU::getBackgroundImage
|
||||
--------------------------------
|
||||
Computes a background image.
|
||||
|
||||
.. ocv:function:: void gpu::MOG_GPU::getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const
|
||||
|
||||
:param backgroundImage: The output background image.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
|
||||
|
||||
gpu::MOG_GPU::release
|
||||
---------------------
|
||||
Releases all inner buffer's memory.
|
||||
|
||||
.. ocv:function:: void gpu::MOG_GPU::release()
|
||||
|
||||
|
||||
|
||||
gpu::MOG2_GPU
|
||||
-------------
|
||||
.. ocv:class:: gpu::MOG2_GPU
|
||||
|
||||
Gaussian Mixture-based Background/Foreground Segmentation Algorithm. ::
|
||||
|
||||
class MOG2_GPU
|
||||
{
|
||||
public:
|
||||
MOG2_GPU(int nmixtures = -1);
|
||||
|
||||
void initialize(Size frameSize, int frameType);
|
||||
|
||||
void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = 0.0f, Stream& stream = Stream::Null());
|
||||
|
||||
void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const;
|
||||
|
||||
void release();
|
||||
|
||||
// parameters
|
||||
...
|
||||
};
|
||||
|
||||
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [MOG2004]_.
|
||||
|
||||
Here are important members of the class that control the algorithm, which you can set after constructing the class instance:
|
||||
|
||||
.. ocv:member:: float backgroundRatio
|
||||
|
||||
Threshold defining whether the component is significant enough to be included into the background model ( corresponds to ``TB=1-cf`` from the paper??which paper??). ``cf=0.1 => TB=0.9`` is default. For ``alpha=0.001``, it means that the mode should exist for approximately 105 frames before it is considered foreground.
|
||||
|
||||
.. ocv:member:: float varThreshold
|
||||
|
||||
Threshold for the squared Mahalanobis distance that helps decide when a sample is close to the existing components (corresponds to ``Tg``). If it is not close to any component, a new component is generated. ``3 sigma => Tg=3*3=9`` is default. A smaller ``Tg`` value generates more components. A higher ``Tg`` value may result in a small number of components but they can grow too large.
|
||||
|
||||
.. ocv:member:: float fVarInit
|
||||
|
||||
Initial variance for the newly generated components. It affects the speed of adaptation. The parameter value is based on your estimate of the typical standard deviation from the images. OpenCV uses 15 as a reasonable value.
|
||||
|
||||
.. ocv:member:: float fVarMin
|
||||
|
||||
Parameter used to further control the variance.
|
||||
|
||||
.. ocv:member:: float fVarMax
|
||||
|
||||
Parameter used to further control the variance.
|
||||
|
||||
.. ocv:member:: float fCT
|
||||
|
||||
Complexity reduction parameter. This parameter defines the number of samples needed to accept to prove the component exists. ``CT=0.05`` is a default value for all the samples. By setting ``CT=0`` you get an algorithm very similar to the standard Stauffer&Grimson algorithm.
|
||||
|
||||
.. ocv:member:: uchar nShadowDetection
|
||||
|
||||
The value for marking shadow pixels in the output foreground mask. Default value is 127.
|
||||
|
||||
.. ocv:member:: float fTau
|
||||
|
||||
Shadow threshold. The shadow is detected if the pixel is a darker version of the background. ``Tau`` is a threshold defining how much darker the shadow can be. ``Tau= 0.5`` means that if a pixel is more than twice darker then it is not shadow. See [ShadowDetect2003]_.
|
||||
|
||||
.. ocv:member:: bool bShadowDetection
|
||||
|
||||
Parameter defining whether shadow detection should be enabled.
|
||||
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [MOG2004]_.
|
||||
|
||||
.. seealso:: :ocv:class:`BackgroundSubtractorMOG2`
|
||||
|
||||
|
||||
|
||||
gpu::MOG2_GPU::MOG2_GPU
|
||||
-----------------------
|
||||
The constructor.
|
||||
gpu::createBackgroundSubtractorMOG2
|
||||
-----------------------------------
|
||||
Creates MOG2 Background Subtractor
|
||||
|
||||
.. ocv:function:: gpu::MOG2_GPU::MOG2_GPU(int nmixtures = -1)
|
||||
.. ocv:function:: Ptr<gpu::BackgroundSubtractorMOG2> gpu::createBackgroundSubtractorMOG2( int history=500, double varThreshold=16, bool detectShadows=true )
|
||||
|
||||
:param nmixtures: Number of Gaussian mixtures.
|
||||
:param history: Length of the history.
|
||||
|
||||
Default constructor sets all parameters to default values.
|
||||
:param varThreshold: Threshold on the squared Mahalanobis distance between the pixel and the model to decide whether a pixel is well described by the background model. This parameter does not affect the background update.
|
||||
|
||||
:param detectShadows: If true, the algorithm will detect shadows and mark them. It decreases the speed a bit, so if you do not need this feature, set the parameter to false.
|
||||
|
||||
|
||||
|
||||
gpu::MOG2_GPU::operator()
|
||||
-------------------------
|
||||
Updates the background model and returns the foreground mask.
|
||||
gpu::BackgroundSubtractorGMG
|
||||
----------------------------
|
||||
Background/Foreground Segmentation Algorithm.
|
||||
|
||||
.. ocv:function:: void gpu::MOG2_GPU::operator()( const GpuMat& frame, GpuMat& fgmask, float learningRate=-1.0f, Stream& stream=Stream::Null() )
|
||||
.. ocv:class:: gpu::BackgroundSubtractorGMG : public cv::BackgroundSubtractorGMG
|
||||
|
||||
:param frame: Next video frame.
|
||||
|
||||
:param fgmask: The output foreground mask as an 8-bit binary image.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [GMG2012]_.
|
||||
|
||||
|
||||
|
||||
gpu::MOG2_GPU::getBackgroundImage
|
||||
---------------------------------
|
||||
Computes a background image.
|
||||
gpu::createBackgroundSubtractorGMG
|
||||
----------------------------------
|
||||
Creates GMG Background Subtractor
|
||||
|
||||
.. ocv:function:: void gpu::MOG2_GPU::getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const
|
||||
.. ocv:function:: Ptr<gpu::BackgroundSubtractorGMG> gpu::createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8)
|
||||
|
||||
:param backgroundImage: The output background image.
|
||||
:param initializationFrames: Number of frames of video to use to initialize histograms.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
:param decisionThreshold: Value above which pixel is determined to be FG.
|
||||
|
||||
|
||||
|
||||
gpu::MOG2_GPU::release
|
||||
----------------------
|
||||
Releases all inner buffer's memory.
|
||||
gpu::BackgroundSubtractorFGD
|
||||
----------------------------
|
||||
|
||||
.. ocv:function:: void gpu::MOG2_GPU::release()
|
||||
.. ocv:class:: gpu::BackgroundSubtractorFGD : public cv::BackgroundSubtractor
|
||||
|
||||
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [FGD2003]_. ::
|
||||
|
||||
|
||||
gpu::GMG_GPU
|
||||
------------
|
||||
.. ocv:class:: gpu::GMG_GPU
|
||||
|
||||
Class used for background/foreground segmentation. ::
|
||||
|
||||
class GMG_GPU_GPU
|
||||
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
|
||||
{
|
||||
public:
|
||||
GMG_GPU();
|
||||
|
||||
void initialize(Size frameSize, float min = 0.0f, float max = 255.0f);
|
||||
|
||||
void operator ()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null());
|
||||
|
||||
void release();
|
||||
|
||||
int maxFeatures;
|
||||
float learningRate;
|
||||
int numInitializationFrames;
|
||||
int quantizationLevels;
|
||||
float backgroundPrior;
|
||||
float decisionThreshold;
|
||||
int smoothingRadius;
|
||||
|
||||
...
|
||||
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
|
||||
};
|
||||
|
||||
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [GMG2012]_.
|
||||
|
||||
Here are important members of the class that control the algorithm, which you can set after constructing the class instance:
|
||||
|
||||
.. ocv:member:: int maxFeatures
|
||||
|
||||
Total number of distinct colors to maintain in histogram.
|
||||
|
||||
.. ocv:member:: float learningRate
|
||||
|
||||
Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
|
||||
|
||||
.. ocv:member:: int numInitializationFrames
|
||||
|
||||
Number of frames of video to use to initialize histograms.
|
||||
|
||||
.. ocv:member:: int quantizationLevels
|
||||
|
||||
Number of discrete levels in each channel to be used in histograms.
|
||||
|
||||
.. ocv:member:: float backgroundPrior
|
||||
|
||||
Prior probability that any given pixel is a background pixel. A sensitivity parameter.
|
||||
|
||||
.. ocv:member:: float decisionThreshold
|
||||
|
||||
Value above which pixel is determined to be FG.
|
||||
|
||||
.. ocv:member:: float smoothingRadius
|
||||
|
||||
Smoothing radius, in pixels, for cleaning up FG image.
|
||||
.. seealso:: :ocv:class:`BackgroundSubtractor`
|
||||
|
||||
|
||||
|
||||
gpu::GMG_GPU::GMG_GPU
|
||||
---------------------
|
||||
The default constructor.
|
||||
gpu::BackgroundSubtractorFGD::getForegroundRegions
|
||||
--------------------------------------------------
|
||||
Returns the output foreground regions calculated by :ocv:func:`findContours`.
|
||||
|
||||
.. ocv:function:: gpu::GMG_GPU::GMG_GPU()
|
||||
.. ocv:function:: void gpu::BackgroundSubtractorFGD::getForegroundRegions(OutputArrayOfArrays foreground_regions)
|
||||
|
||||
Default constructor sets all parameters to default values.
|
||||
:params foreground_regions: Output array (CPU memory).
|
||||
|
||||
|
||||
|
||||
gpu::GMG_GPU::initialize
|
||||
------------------------
|
||||
Initialize background model and allocates all inner buffers.
|
||||
gpu::createBackgroundSubtractorFGD
|
||||
----------------------------------
|
||||
Creates FGD Background Subtractor
|
||||
|
||||
.. ocv:function:: void gpu::GMG_GPU::initialize(Size frameSize, float min = 0.0f, float max = 255.0f)
|
||||
.. ocv:function:: Ptr<gpu::BackgroundSubtractorGMG> gpu::createBackgroundSubtractorFGD(const FGDParams& params = FGDParams())
|
||||
|
||||
:param frameSize: Input frame size.
|
||||
|
||||
:param min: Minimum value taken on by pixels in image sequence. Usually 0.
|
||||
|
||||
:param max: Maximum value taken on by pixels in image sequence, e.g. 1.0 or 255.
|
||||
|
||||
|
||||
|
||||
gpu::GMG_GPU::operator()
|
||||
------------------------
|
||||
Updates the background model and returns the foreground mask
|
||||
|
||||
.. ocv:function:: void gpu::GMG_GPU::operator ()( const GpuMat& frame, GpuMat& fgmask, float learningRate=-1.0f, Stream& stream=Stream::Null() )
|
||||
|
||||
:param frame: Next video frame.
|
||||
|
||||
:param fgmask: The output foreground mask as an 8-bit binary image.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
|
||||
|
||||
gpu::GMG_GPU::release
|
||||
---------------------
|
||||
Releases all inner buffer's memory.
|
||||
|
||||
.. ocv:function:: void gpu::GMG_GPU::release()
|
||||
:param params: Algorithm's parameters. See [FGD2003]_ for explanation.
|
||||
|
||||
|
||||
|
||||
.. [FGD2003] Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian. *Foreground Object Detection from Videos Containing Complex Background*. ACM MM2003 9p, 2003.
|
||||
.. [MOG2001] P. KadewTraKuPong and R. Bowden. *An improved adaptive background mixture model for real-time tracking with shadow detection*. Proc. 2nd European Workshop on Advanced Video-Based Surveillance Systems, 2001
|
||||
.. [MOG2004] Z. Zivkovic. *Improved adaptive Gausian mixture model for background subtraction*. International Conference Pattern Recognition, UK, August, 2004
|
||||
.. [ShadowDetect2003] Prati, Mikic, Trivedi and Cucchiarra. *Detecting Moving Shadows...*. IEEE PAMI, 2003
|
||||
.. [GMG2012] A. Godbehere, A. Matsukawa and K. Goldberg. *Visual Tracking of Human Visitors under Variable-Lighting Conditions for a Responsive Audio Art Installation*. American Control Conference, Montreal, June 2012
|
||||
|
@ -47,284 +47,106 @@
|
||||
# error gpubgsegm.hpp header must be compiled as C++
|
||||
#endif
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "opencv2/core/gpu.hpp"
|
||||
#include "opencv2/gpufilters.hpp"
|
||||
#include "opencv2/video/background_segm.hpp"
|
||||
|
||||
namespace cv { namespace gpu {
|
||||
|
||||
// Foreground Object Detection from Videos Containing Complex Background.
|
||||
// Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
|
||||
// ACM MM2003 9p
|
||||
class CV_EXPORTS FGDStatModel
|
||||
////////////////////////////////////////////////////
|
||||
// MOG
|
||||
|
||||
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractorMOG
|
||||
{
|
||||
public:
|
||||
struct CV_EXPORTS Params
|
||||
{
|
||||
int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
|
||||
int N1c; // Number of color vectors used to model normal background color variation at a given pixel.
|
||||
int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
|
||||
// Used to allow the first N1c vectors to adapt over time to changing background.
|
||||
using cv::BackgroundSubtractorMOG::apply;
|
||||
using cv::BackgroundSubtractorMOG::getBackgroundImage;
|
||||
|
||||
int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
|
||||
int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
|
||||
int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
|
||||
// Used to allow the first N1cc vectors to adapt over time to changing background.
|
||||
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
|
||||
|
||||
bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
|
||||
int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations.
|
||||
// These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
|
||||
|
||||
float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1.
|
||||
float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
|
||||
float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
|
||||
|
||||
float delta; // Affects color and color co-occurrence quantization, typically set to 2.
|
||||
float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
|
||||
float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold.
|
||||
|
||||
// default Params
|
||||
Params();
|
||||
};
|
||||
|
||||
// out_cn - channels count in output result (can be 3 or 4)
|
||||
// 4-channels require more memory, but a bit faster
|
||||
explicit FGDStatModel(int out_cn = 3);
|
||||
explicit FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3);
|
||||
|
||||
~FGDStatModel();
|
||||
|
||||
void create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params());
|
||||
void release();
|
||||
|
||||
int update(const cv::gpu::GpuMat& curFrame);
|
||||
|
||||
//8UC3 or 8UC4 reference background image
|
||||
cv::gpu::GpuMat background;
|
||||
|
||||
//8UC1 foreground image
|
||||
cv::gpu::GpuMat foreground;
|
||||
|
||||
std::vector< std::vector<cv::Point> > foreground_regions;
|
||||
|
||||
private:
|
||||
FGDStatModel(const FGDStatModel&);
|
||||
FGDStatModel& operator=(const FGDStatModel&);
|
||||
|
||||
class Impl;
|
||||
std::auto_ptr<Impl> impl_;
|
||||
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
|
||||
};
|
||||
|
||||
/*!
|
||||
Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm
|
||||
CV_EXPORTS Ptr<gpu::BackgroundSubtractorMOG>
|
||||
createBackgroundSubtractorMOG(int history = 200, int nmixtures = 5,
|
||||
double backgroundRatio = 0.7, double noiseSigma = 0);
|
||||
|
||||
The class implements the following algorithm:
|
||||
"An improved adaptive background mixture model for real-time tracking with shadow detection"
|
||||
P. KadewTraKuPong and R. Bowden,
|
||||
Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
|
||||
http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
|
||||
*/
|
||||
class CV_EXPORTS MOG_GPU
|
||||
////////////////////////////////////////////////////
|
||||
// MOG2
|
||||
|
||||
class CV_EXPORTS BackgroundSubtractorMOG2 : public cv::BackgroundSubtractorMOG2
|
||||
{
|
||||
public:
|
||||
//! the default constructor
|
||||
MOG_GPU(int nmixtures = -1);
|
||||
using cv::BackgroundSubtractorMOG2::apply;
|
||||
using cv::BackgroundSubtractorMOG2::getBackgroundImage;
|
||||
|
||||
//! re-initiaization method
|
||||
void initialize(Size frameSize, int frameType);
|
||||
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
|
||||
|
||||
//! the update operator
|
||||
void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = 0.0f, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes a background image which are the mean of all background gaussians
|
||||
void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const;
|
||||
|
||||
//! releases all inner buffers
|
||||
void release();
|
||||
|
||||
int history;
|
||||
float varThreshold;
|
||||
float backgroundRatio;
|
||||
float noiseSigma;
|
||||
|
||||
private:
|
||||
int nmixtures_;
|
||||
|
||||
Size frameSize_;
|
||||
int frameType_;
|
||||
int nframes_;
|
||||
|
||||
GpuMat weight_;
|
||||
GpuMat sortKey_;
|
||||
GpuMat mean_;
|
||||
GpuMat var_;
|
||||
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
|
||||
};
|
||||
|
||||
/*!
|
||||
The class implements the following algorithm:
|
||||
"Improved adaptive Gausian mixture model for background subtraction"
|
||||
Z.Zivkovic
|
||||
International Conference Pattern Recognition, UK, August, 2004.
|
||||
http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
|
||||
*/
|
||||
class CV_EXPORTS MOG2_GPU
|
||||
CV_EXPORTS Ptr<gpu::BackgroundSubtractorMOG2>
|
||||
createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16,
|
||||
bool detectShadows = true);
|
||||
|
||||
////////////////////////////////////////////////////
|
||||
// GMG
|
||||
|
||||
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractorGMG
|
||||
{
|
||||
public:
|
||||
//! the default constructor
|
||||
MOG2_GPU(int nmixtures = -1);
|
||||
using cv::BackgroundSubtractorGMG::apply;
|
||||
|
||||
//! re-initiaization method
|
||||
void initialize(Size frameSize, int frameType);
|
||||
|
||||
//! the update operator
|
||||
void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes a background image which are the mean of all background gaussians
|
||||
void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const;
|
||||
|
||||
//! releases all inner buffers
|
||||
void release();
|
||||
|
||||
// parameters
|
||||
// you should call initialize after parameters changes
|
||||
|
||||
int history;
|
||||
|
||||
//! here it is the maximum allowed number of mixture components.
|
||||
//! Actual number is determined dynamically per pixel
|
||||
float varThreshold;
|
||||
// threshold on the squared Mahalanobis distance to decide if it is well described
|
||||
// by the background model or not. Related to Cthr from the paper.
|
||||
// This does not influence the update of the background. A typical value could be 4 sigma
|
||||
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
|
||||
|
||||
/////////////////////////
|
||||
// less important parameters - things you might change but be carefull
|
||||
////////////////////////
|
||||
|
||||
float backgroundRatio;
|
||||
// corresponds to fTB=1-cf from the paper
|
||||
// TB - threshold when the component becomes significant enough to be included into
|
||||
// the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
|
||||
// For alpha=0.001 it means that the mode should exist for approximately 105 frames before
|
||||
// it is considered foreground
|
||||
// float noiseSigma;
|
||||
float varThresholdGen;
|
||||
|
||||
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
|
||||
//when a sample is close to the existing components. If it is not close
|
||||
//to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
|
||||
//Smaller Tg leads to more generated components and higher Tg might make
|
||||
//lead to small number of components but they can grow too large
|
||||
float fVarInit;
|
||||
float fVarMin;
|
||||
float fVarMax;
|
||||
|
||||
//initial variance for the newly generated components.
|
||||
//It will will influence the speed of adaptation. A good guess should be made.
|
||||
//A simple way is to estimate the typical standard deviation from the images.
|
||||
//I used here 10 as a reasonable value
|
||||
// min and max can be used to further control the variance
|
||||
float fCT; //CT - complexity reduction prior
|
||||
//this is related to the number of samples needed to accept that a component
|
||||
//actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
|
||||
//the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
|
||||
|
||||
//shadow detection parameters
|
||||
bool bShadowDetection; //default 1 - do shadow detection
|
||||
unsigned char nShadowDetection; //do shadow detection - insert this value as the detection result - 127 default value
|
||||
float fTau;
|
||||
// Tau - shadow threshold. The shadow is detected if the pixel is darker
|
||||
//version of the background. Tau is a threshold on how much darker the shadow can be.
|
||||
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
|
||||
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
|
||||
|
||||
private:
|
||||
int nmixtures_;
|
||||
|
||||
Size frameSize_;
|
||||
int frameType_;
|
||||
int nframes_;
|
||||
|
||||
GpuMat weight_;
|
||||
GpuMat variance_;
|
||||
GpuMat mean_;
|
||||
|
||||
GpuMat bgmodelUsedModes_; //keep track of number of modes per pixel
|
||||
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
|
||||
};
|
||||
|
||||
CV_EXPORTS Ptr<gpu::BackgroundSubtractorGMG>
|
||||
createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
|
||||
|
||||
////////////////////////////////////////////////////
|
||||
// FGD
|
||||
|
||||
/**
|
||||
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
|
||||
* images of the same size, where 255 indicates Foreground and 0 represents Background.
|
||||
* This class implements an algorithm described in "Visual Tracking of Human Visitors under
|
||||
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
|
||||
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
|
||||
* Foreground Object Detection from Videos Containing Complex Background.
|
||||
* Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
|
||||
* ACM MM2003 9p
|
||||
*/
|
||||
class CV_EXPORTS GMG_GPU
|
||||
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
|
||||
{
|
||||
public:
|
||||
GMG_GPU();
|
||||
|
||||
/**
|
||||
* Validate parameters and set up data structures for appropriate frame size.
|
||||
* @param frameSize Input frame size
|
||||
* @param min Minimum value taken on by pixels in image sequence. Usually 0
|
||||
* @param max Maximum value taken on by pixels in image sequence. e.g. 1.0 or 255
|
||||
*/
|
||||
void initialize(Size frameSize, float min = 0.0f, float max = 255.0f);
|
||||
|
||||
/**
|
||||
* Performs single-frame background subtraction and builds up a statistical background image
|
||||
* model.
|
||||
* @param frame Input frame
|
||||
* @param fgmask Output mask image representing foreground and background pixels
|
||||
* @param stream Stream for the asynchronous version
|
||||
*/
|
||||
void operator ()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null());
|
||||
|
||||
//! Releases all inner buffers
|
||||
void release();
|
||||
|
||||
//! Total number of distinct colors to maintain in histogram.
|
||||
int maxFeatures;
|
||||
|
||||
//! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
|
||||
float learningRate;
|
||||
|
||||
//! Number of frames of video to use to initialize histograms.
|
||||
int numInitializationFrames;
|
||||
|
||||
//! Number of discrete levels in each channel to be used in histograms.
|
||||
int quantizationLevels;
|
||||
|
||||
//! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
|
||||
float backgroundPrior;
|
||||
|
||||
//! Value above which pixel is determined to be FG.
|
||||
float decisionThreshold;
|
||||
|
||||
//! Smoothing radius, in pixels, for cleaning up FG image.
|
||||
int smoothingRadius;
|
||||
|
||||
//! Perform background model update.
|
||||
bool updateBackgroundModel;
|
||||
|
||||
private:
|
||||
float maxVal_, minVal_;
|
||||
|
||||
Size frameSize_;
|
||||
|
||||
int frameNum_;
|
||||
|
||||
GpuMat nfeatures_;
|
||||
GpuMat colors_;
|
||||
GpuMat weights_;
|
||||
|
||||
Ptr<gpu::Filter> boxFilter_;
|
||||
GpuMat buf_;
|
||||
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
|
||||
};
|
||||
|
||||
struct CV_EXPORTS FGDParams
|
||||
{
|
||||
int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
|
||||
int N1c; // Number of color vectors used to model normal background color variation at a given pixel.
|
||||
int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
|
||||
// Used to allow the first N1c vectors to adapt over time to changing background.
|
||||
|
||||
int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
|
||||
int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
|
||||
int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
|
||||
// Used to allow the first N1cc vectors to adapt over time to changing background.
|
||||
|
||||
bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
|
||||
int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations.
|
||||
// These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
|
||||
|
||||
float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1.
|
||||
float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
|
||||
float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
|
||||
|
||||
float delta; // Affects color and color co-occurrence quantization, typically set to 2.
|
||||
float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
|
||||
float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold.
|
||||
|
||||
// default Params
|
||||
FGDParams();
|
||||
};
|
||||
|
||||
CV_EXPORTS Ptr<gpu::BackgroundSubtractorFGD>
|
||||
createBackgroundSubtractorFGD(const FGDParams& params = FGDParams());
|
||||
|
||||
}} // namespace cv { namespace gpu {
|
||||
|
||||
#endif /* __OPENCV_GPUBGSEGM_HPP__ */
|
||||
|
@ -41,7 +41,14 @@
|
||||
//M*/
|
||||
|
||||
#include "perf_precomp.hpp"
|
||||
#include "opencv2/legacy.hpp"
|
||||
|
||||
#ifdef HAVE_OPENCV_LEGACY
|
||||
# include "opencv2/legacy.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_OPENCV_GPUIMGPROC
|
||||
# include "opencv2/gpuimgproc.hpp"
|
||||
#endif
|
||||
|
||||
using namespace std;
|
||||
using namespace testing;
|
||||
@ -59,6 +66,13 @@ using namespace perf;
|
||||
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// FGDStatModel
|
||||
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
|
||||
|
||||
#ifdef HAVE_OPENCV_LEGACY
|
||||
|
||||
namespace cv
|
||||
{
|
||||
template<> void Ptr<CvBGStatModel>::delete_obj()
|
||||
@ -67,10 +81,7 @@ namespace cv
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// FGDStatModel
|
||||
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
|
||||
#endif
|
||||
|
||||
DEF_PARAM_TEST_1(Video, string);
|
||||
|
||||
@ -90,10 +101,10 @@ PERF_TEST_P(Video, FGDStatModel,
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::GpuMat d_frame(frame);
|
||||
cv::gpu::GpuMat d_frame(frame), foreground;
|
||||
|
||||
cv::gpu::FGDStatModel d_model(4);
|
||||
d_model.create(d_frame);
|
||||
cv::Ptr<cv::gpu::BackgroundSubtractorFGD> d_fgd = cv::gpu::createBackgroundSubtractorFGD();
|
||||
d_fgd->apply(d_frame, foreground);
|
||||
|
||||
for (int i = 0; i < 10; ++i)
|
||||
{
|
||||
@ -103,18 +114,22 @@ PERF_TEST_P(Video, FGDStatModel,
|
||||
d_frame.upload(frame);
|
||||
|
||||
startTimer(); next();
|
||||
d_model.update(d_frame);
|
||||
d_fgd->apply(d_frame, foreground);
|
||||
stopTimer();
|
||||
}
|
||||
|
||||
const cv::gpu::GpuMat background = d_model.background;
|
||||
const cv::gpu::GpuMat foreground = d_model.foreground;
|
||||
|
||||
GPU_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
|
||||
GPU_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
|
||||
|
||||
#ifdef HAVE_OPENCV_GPUIMGPROC
|
||||
cv::gpu::GpuMat background3, background;
|
||||
d_fgd->getBackgroundImage(background3);
|
||||
cv::gpu::cvtColor(background3, background, cv::COLOR_BGR2BGRA);
|
||||
GPU_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifdef HAVE_OPENCV_LEGACY
|
||||
IplImage ipl_frame = frame;
|
||||
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
|
||||
|
||||
@ -135,6 +150,9 @@ PERF_TEST_P(Video, FGDStatModel,
|
||||
|
||||
CPU_SANITY_CHECK(background);
|
||||
CPU_SANITY_CHECK(foreground);
|
||||
#else
|
||||
FAIL_NO_CPU();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,11 +194,12 @@ PERF_TEST_P(Video_Cn_LearningRate, MOG,
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::Ptr<cv::BackgroundSubtractor> d_mog = cv::gpu::createBackgroundSubtractorMOG();
|
||||
|
||||
cv::gpu::GpuMat d_frame(frame);
|
||||
cv::gpu::MOG_GPU d_mog;
|
||||
cv::gpu::GpuMat foreground;
|
||||
|
||||
d_mog(d_frame, foreground, learningRate);
|
||||
d_mog->apply(d_frame, foreground, learningRate);
|
||||
|
||||
for (int i = 0; i < 10; ++i)
|
||||
{
|
||||
@ -200,7 +219,7 @@ PERF_TEST_P(Video_Cn_LearningRate, MOG,
|
||||
d_frame.upload(frame);
|
||||
|
||||
startTimer(); next();
|
||||
d_mog(d_frame, foreground, learningRate);
|
||||
d_mog->apply(d_frame, foreground, learningRate);
|
||||
stopTimer();
|
||||
}
|
||||
|
||||
@ -273,13 +292,13 @@ PERF_TEST_P(Video_Cn, MOG2,
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::MOG2_GPU d_mog2;
|
||||
d_mog2.bShadowDetection = false;
|
||||
cv::Ptr<cv::BackgroundSubtractorMOG2> d_mog2 = cv::gpu::createBackgroundSubtractorMOG2();
|
||||
d_mog2->setDetectShadows(false);
|
||||
|
||||
cv::gpu::GpuMat d_frame(frame);
|
||||
cv::gpu::GpuMat foreground;
|
||||
|
||||
d_mog2(d_frame, foreground);
|
||||
d_mog2->apply(d_frame, foreground);
|
||||
|
||||
for (int i = 0; i < 10; ++i)
|
||||
{
|
||||
@ -299,7 +318,7 @@ PERF_TEST_P(Video_Cn, MOG2,
|
||||
d_frame.upload(frame);
|
||||
|
||||
startTimer(); next();
|
||||
d_mog2(d_frame, foreground);
|
||||
d_mog2->apply(d_frame, foreground);
|
||||
stopTimer();
|
||||
}
|
||||
|
||||
@ -307,8 +326,8 @@ PERF_TEST_P(Video_Cn, MOG2,
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
|
||||
mog2->set("detectShadows", false);
|
||||
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2 = cv::createBackgroundSubtractorMOG2();
|
||||
mog2->setDetectShadows(false);
|
||||
|
||||
cv::Mat foreground;
|
||||
|
||||
@ -359,8 +378,9 @@ PERF_TEST_P(Video_Cn, MOG2GetBackgroundImage,
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::Ptr<cv::BackgroundSubtractor> d_mog2 = cv::gpu::createBackgroundSubtractorMOG2();
|
||||
|
||||
cv::gpu::GpuMat d_frame;
|
||||
cv::gpu::MOG2_GPU d_mog2;
|
||||
cv::gpu::GpuMat d_foreground;
|
||||
|
||||
for (int i = 0; i < 10; ++i)
|
||||
@ -380,12 +400,12 @@ PERF_TEST_P(Video_Cn, MOG2GetBackgroundImage,
|
||||
|
||||
d_frame.upload(frame);
|
||||
|
||||
d_mog2(d_frame, d_foreground);
|
||||
d_mog2->apply(d_frame, d_foreground);
|
||||
}
|
||||
|
||||
cv::gpu::GpuMat background;
|
||||
|
||||
TEST_CYCLE() d_mog2.getBackgroundImage(background);
|
||||
TEST_CYCLE() d_mog2->getBackgroundImage(background);
|
||||
|
||||
GPU_SANITY_CHECK(background, 1);
|
||||
}
|
||||
@ -460,10 +480,10 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
|
||||
cv::gpu::GpuMat d_frame(frame);
|
||||
cv::gpu::GpuMat foreground;
|
||||
|
||||
cv::gpu::GMG_GPU d_gmg;
|
||||
d_gmg.maxFeatures = maxFeatures;
|
||||
cv::Ptr<cv::BackgroundSubtractorGMG> d_gmg = cv::gpu::createBackgroundSubtractorGMG();
|
||||
d_gmg->setMaxFeatures(maxFeatures);
|
||||
|
||||
d_gmg(d_frame, foreground);
|
||||
d_gmg->apply(d_frame, foreground);
|
||||
|
||||
for (int i = 0; i < 150; ++i)
|
||||
{
|
||||
@ -488,7 +508,7 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
|
||||
d_frame.upload(frame);
|
||||
|
||||
startTimer(); next();
|
||||
d_gmg(d_frame, foreground);
|
||||
d_gmg->apply(d_frame, foreground);
|
||||
stopTimer();
|
||||
}
|
||||
|
||||
@ -499,9 +519,8 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
|
||||
cv::Mat foreground;
|
||||
cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0));
|
||||
|
||||
cv::Ptr<cv::BackgroundSubtractor> gmg = cv::createBackgroundSubtractorGMG();
|
||||
gmg->set("maxFeatures", maxFeatures);
|
||||
//gmg.initialize(frame.size(), 0.0, 255.0);
|
||||
cv::Ptr<cv::BackgroundSubtractorGMG> gmg = cv::createBackgroundSubtractorGMG();
|
||||
gmg->setMaxFeatures(maxFeatures);
|
||||
|
||||
gmg->apply(frame, foreground);
|
||||
|
||||
|
@ -57,6 +57,8 @@
|
||||
#include "opencv2/gpubgsegm.hpp"
|
||||
#include "opencv2/video.hpp"
|
||||
|
||||
#include "opencv2/opencv_modules.hpp"
|
||||
|
||||
#ifdef GTEST_CREATE_SHARED_LIBRARY
|
||||
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
|
||||
#endif
|
||||
|
@ -53,7 +53,7 @@
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace bgfg
|
||||
namespace fgd
|
||||
{
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// calcDiffHistogram
|
||||
|
@ -45,7 +45,7 @@
|
||||
|
||||
#include "opencv2/core/gpu_types.hpp"
|
||||
|
||||
namespace bgfg
|
||||
namespace fgd
|
||||
{
|
||||
struct BGPixelStat
|
||||
{
|
||||
|
@ -47,7 +47,7 @@
|
||||
#include "opencv2/core/cuda/limits.hpp"
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev {
|
||||
namespace bgfg_gmg
|
||||
namespace gmg
|
||||
{
|
||||
__constant__ int c_width;
|
||||
__constant__ int c_height;
|
||||
|
@ -111,14 +111,6 @@ namespace cv { namespace gpu { namespace cudev
|
||||
0.0f);
|
||||
}
|
||||
|
||||
template <class Ptr2D>
|
||||
__device__ __forceinline__ void swap(Ptr2D& ptr, int x, int y, int k, int rows)
|
||||
{
|
||||
typename Ptr2D::elem_type val = ptr(k * rows + y, x);
|
||||
ptr(k * rows + y, x) = ptr((k + 1) * rows + y, x);
|
||||
ptr((k + 1) * rows + y, x) = val;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// MOG without learning
|
||||
|
||||
@ -426,337 +418,6 @@ namespace cv { namespace gpu { namespace cudev
|
||||
|
||||
funcs[cn](weight, mean, dst, nmixtures, backgroundRatio, stream);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// MOG2
|
||||
|
||||
__constant__ int c_nmixtures;
|
||||
__constant__ float c_Tb;
|
||||
__constant__ float c_TB;
|
||||
__constant__ float c_Tg;
|
||||
__constant__ float c_varInit;
|
||||
__constant__ float c_varMin;
|
||||
__constant__ float c_varMax;
|
||||
__constant__ float c_tau;
|
||||
__constant__ unsigned char c_shadowVal;
|
||||
|
||||
void loadConstants(int nmixtures, float Tb, float TB, float Tg, float varInit, float varMin, float varMax, float tau, unsigned char shadowVal)
|
||||
{
|
||||
varMin = ::fminf(varMin, varMax);
|
||||
varMax = ::fmaxf(varMin, varMax);
|
||||
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_nmixtures, &nmixtures, sizeof(int)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_Tb, &Tb, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_TB, &TB, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_Tg, &Tg, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_varInit, &varInit, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_varMin, &varMin, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_varMax, &varMax, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_tau, &tau, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_shadowVal, &shadowVal, sizeof(unsigned char)) );
|
||||
}
|
||||
|
||||
template <bool detectShadows, typename SrcT, typename WorkT>
|
||||
__global__ void mog2(const PtrStepSz<SrcT> frame, PtrStepb fgmask, PtrStepb modesUsed,
|
||||
PtrStepf gmm_weight, PtrStepf gmm_variance, PtrStep<WorkT> gmm_mean,
|
||||
const float alphaT, const float alpha1, const float prune)
|
||||
{
|
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (x >= frame.cols || y >= frame.rows)
|
||||
return;
|
||||
|
||||
WorkT pix = cvt(frame(y, x));
|
||||
|
||||
//calculate distances to the modes (+ sort)
|
||||
//here we need to go in descending order!!!
|
||||
|
||||
bool background = false; // true - the pixel classified as background
|
||||
|
||||
//internal:
|
||||
|
||||
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
|
||||
|
||||
int nmodes = modesUsed(y, x);
|
||||
int nNewModes = nmodes; //current number of modes in GMM
|
||||
|
||||
float totalWeight = 0.0f;
|
||||
|
||||
//go through all modes
|
||||
|
||||
for (int mode = 0; mode < nmodes; ++mode)
|
||||
{
|
||||
//need only weight if fit is found
|
||||
float weight = alpha1 * gmm_weight(mode * frame.rows + y, x) + prune;
|
||||
|
||||
//fit not found yet
|
||||
if (!fitsPDF)
|
||||
{
|
||||
//check if it belongs to some of the remaining modes
|
||||
float var = gmm_variance(mode * frame.rows + y, x);
|
||||
|
||||
WorkT mean = gmm_mean(mode * frame.rows + y, x);
|
||||
|
||||
//calculate difference and distance
|
||||
WorkT diff = mean - pix;
|
||||
float dist2 = sqr(diff);
|
||||
|
||||
//background? - Tb - usually larger than Tg
|
||||
if (totalWeight < c_TB && dist2 < c_Tb * var)
|
||||
background = true;
|
||||
|
||||
//check fit
|
||||
if (dist2 < c_Tg * var)
|
||||
{
|
||||
//belongs to the mode
|
||||
fitsPDF = true;
|
||||
|
||||
//update distribution
|
||||
|
||||
//update weight
|
||||
weight += alphaT;
|
||||
float k = alphaT / weight;
|
||||
|
||||
//update mean
|
||||
gmm_mean(mode * frame.rows + y, x) = mean - k * diff;
|
||||
|
||||
//update variance
|
||||
float varnew = var + k * (dist2 - var);
|
||||
|
||||
//limit the variance
|
||||
varnew = ::fmaxf(varnew, c_varMin);
|
||||
varnew = ::fminf(varnew, c_varMax);
|
||||
|
||||
gmm_variance(mode * frame.rows + y, x) = varnew;
|
||||
|
||||
//sort
|
||||
//all other weights are at the same place and
|
||||
//only the matched (iModes) is higher -> just find the new place for it
|
||||
|
||||
for (int i = mode; i > 0; --i)
|
||||
{
|
||||
//check one up
|
||||
if (weight < gmm_weight((i - 1) * frame.rows + y, x))
|
||||
break;
|
||||
|
||||
//swap one up
|
||||
swap(gmm_weight, x, y, i - 1, frame.rows);
|
||||
swap(gmm_variance, x, y, i - 1, frame.rows);
|
||||
swap(gmm_mean, x, y, i - 1, frame.rows);
|
||||
}
|
||||
|
||||
//belongs to the mode - bFitsPDF becomes 1
|
||||
}
|
||||
} // !fitsPDF
|
||||
|
||||
//check prune
|
||||
if (weight < -prune)
|
||||
{
|
||||
weight = 0.0;
|
||||
nmodes--;
|
||||
}
|
||||
|
||||
gmm_weight(mode * frame.rows + y, x) = weight; //update weight by the calculated value
|
||||
totalWeight += weight;
|
||||
}
|
||||
|
||||
//renormalize weights
|
||||
|
||||
totalWeight = 1.f / totalWeight;
|
||||
for (int mode = 0; mode < nmodes; ++mode)
|
||||
gmm_weight(mode * frame.rows + y, x) *= totalWeight;
|
||||
|
||||
nmodes = nNewModes;
|
||||
|
||||
//make new mode if needed and exit
|
||||
|
||||
if (!fitsPDF)
|
||||
{
|
||||
// replace the weakest or add a new one
|
||||
int mode = nmodes == c_nmixtures ? c_nmixtures - 1 : nmodes++;
|
||||
|
||||
if (nmodes == 1)
|
||||
gmm_weight(mode * frame.rows + y, x) = 1.f;
|
||||
else
|
||||
{
|
||||
gmm_weight(mode * frame.rows + y, x) = alphaT;
|
||||
|
||||
// renormalize all other weights
|
||||
|
||||
for (int i = 0; i < nmodes - 1; ++i)
|
||||
gmm_weight(i * frame.rows + y, x) *= alpha1;
|
||||
}
|
||||
|
||||
// init
|
||||
|
||||
gmm_mean(mode * frame.rows + y, x) = pix;
|
||||
gmm_variance(mode * frame.rows + y, x) = c_varInit;
|
||||
|
||||
//sort
|
||||
//find the new place for it
|
||||
|
||||
for (int i = nmodes - 1; i > 0; --i)
|
||||
{
|
||||
// check one up
|
||||
if (alphaT < gmm_weight((i - 1) * frame.rows + y, x))
|
||||
break;
|
||||
|
||||
//swap one up
|
||||
swap(gmm_weight, x, y, i - 1, frame.rows);
|
||||
swap(gmm_variance, x, y, i - 1, frame.rows);
|
||||
swap(gmm_mean, x, y, i - 1, frame.rows);
|
||||
}
|
||||
}
|
||||
|
||||
//set the number of modes
|
||||
modesUsed(y, x) = nmodes;
|
||||
|
||||
bool isShadow = false;
|
||||
if (detectShadows && !background)
|
||||
{
|
||||
float tWeight = 0.0f;
|
||||
|
||||
// check all the components marked as background:
|
||||
for (int mode = 0; mode < nmodes; ++mode)
|
||||
{
|
||||
WorkT mean = gmm_mean(mode * frame.rows + y, x);
|
||||
|
||||
WorkT pix_mean = pix * mean;
|
||||
|
||||
float numerator = sum(pix_mean);
|
||||
float denominator = sqr(mean);
|
||||
|
||||
// no division by zero allowed
|
||||
if (denominator == 0)
|
||||
break;
|
||||
|
||||
// if tau < a < 1 then also check the color distortion
|
||||
if (numerator <= denominator && numerator >= c_tau * denominator)
|
||||
{
|
||||
float a = numerator / denominator;
|
||||
|
||||
WorkT dD = a * mean - pix;
|
||||
|
||||
if (sqr(dD) < c_Tb * gmm_variance(mode * frame.rows + y, x) * a * a)
|
||||
{
|
||||
isShadow = true;
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
tWeight += gmm_weight(mode * frame.rows + y, x);
|
||||
if (tWeight > c_TB)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fgmask(y, x) = background ? 0 : isShadow ? c_shadowVal : 255;
|
||||
}
|
||||
|
||||
template <typename SrcT, typename WorkT>
|
||||
void mog2_caller(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean,
|
||||
float alphaT, float prune, bool detectShadows, cudaStream_t stream)
|
||||
{
|
||||
dim3 block(32, 8);
|
||||
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
|
||||
|
||||
const float alpha1 = 1.0f - alphaT;
|
||||
|
||||
if (detectShadows)
|
||||
{
|
||||
cudaSafeCall( cudaFuncSetCacheConfig(mog2<true, SrcT, WorkT>, cudaFuncCachePreferL1) );
|
||||
|
||||
mog2<true, SrcT, WorkT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, modesUsed,
|
||||
weight, variance, (PtrStepSz<WorkT>) mean,
|
||||
alphaT, alpha1, prune);
|
||||
}
|
||||
else
|
||||
{
|
||||
cudaSafeCall( cudaFuncSetCacheConfig(mog2<false, SrcT, WorkT>, cudaFuncCachePreferL1) );
|
||||
|
||||
mog2<false, SrcT, WorkT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, modesUsed,
|
||||
weight, variance, (PtrStepSz<WorkT>) mean,
|
||||
alphaT, alpha1, prune);
|
||||
}
|
||||
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
void mog2_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean,
|
||||
float alphaT, float prune, bool detectShadows, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean, float alphaT, float prune, bool detectShadows, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
0, mog2_caller<uchar, float>, 0, mog2_caller<uchar3, float3>, mog2_caller<uchar4, float4>
|
||||
};
|
||||
|
||||
funcs[cn](frame, fgmask, modesUsed, weight, variance, mean, alphaT, prune, detectShadows, stream);
|
||||
}
|
||||
|
||||
template <typename WorkT, typename OutT>
|
||||
__global__ void getBackgroundImage2(const PtrStepSzb modesUsed, const PtrStepf gmm_weight, const PtrStep<WorkT> gmm_mean, PtrStep<OutT> dst)
|
||||
{
|
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (x >= modesUsed.cols || y >= modesUsed.rows)
|
||||
return;
|
||||
|
||||
int nmodes = modesUsed(y, x);
|
||||
|
||||
WorkT meanVal = VecTraits<WorkT>::all(0.0f);
|
||||
float totalWeight = 0.0f;
|
||||
|
||||
for (int mode = 0; mode < nmodes; ++mode)
|
||||
{
|
||||
float weight = gmm_weight(mode * modesUsed.rows + y, x);
|
||||
|
||||
WorkT mean = gmm_mean(mode * modesUsed.rows + y, x);
|
||||
meanVal = meanVal + weight * mean;
|
||||
|
||||
totalWeight += weight;
|
||||
|
||||
if(totalWeight > c_TB)
|
||||
break;
|
||||
}
|
||||
|
||||
meanVal = meanVal * (1.f / totalWeight);
|
||||
|
||||
dst(y, x) = saturate_cast<OutT>(meanVal);
|
||||
}
|
||||
|
||||
template <typename WorkT, typename OutT>
|
||||
void getBackgroundImage2_caller(PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
dim3 block(32, 8);
|
||||
dim3 grid(divUp(modesUsed.cols, block.x), divUp(modesUsed.rows, block.y));
|
||||
|
||||
cudaSafeCall( cudaFuncSetCacheConfig(getBackgroundImage2<WorkT, OutT>, cudaFuncCachePreferL1) );
|
||||
|
||||
getBackgroundImage2<WorkT, OutT><<<grid, block, 0, stream>>>(modesUsed, weight, (PtrStepSz<WorkT>) mean, (PtrStepSz<OutT>) dst);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
void getBackgroundImage2_gpu(int cn, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
0, getBackgroundImage2_caller<float, uchar>, 0, getBackgroundImage2_caller<float3, uchar3>, getBackgroundImage2_caller<float4, uchar4>
|
||||
};
|
||||
|
||||
funcs[cn](modesUsed, weight, mean, dst, stream);
|
||||
}
|
||||
}
|
||||
}}}
|
||||
|
||||
|
438
modules/gpubgsegm/src/cuda/mog2.cu
Normal file
438
modules/gpubgsegm/src/cuda/mog2.cu
Normal file
@ -0,0 +1,438 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/vec_traits.hpp"
|
||||
#include "opencv2/core/cuda/vec_math.hpp"
|
||||
#include "opencv2/core/cuda/limits.hpp"
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
namespace mog2
|
||||
{
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Utility
|
||||
|
||||
__device__ __forceinline__ float cvt(uchar val)
|
||||
{
|
||||
return val;
|
||||
}
|
||||
__device__ __forceinline__ float3 cvt(const uchar3& val)
|
||||
{
|
||||
return make_float3(val.x, val.y, val.z);
|
||||
}
|
||||
__device__ __forceinline__ float4 cvt(const uchar4& val)
|
||||
{
|
||||
return make_float4(val.x, val.y, val.z, val.w);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ float sqr(float val)
|
||||
{
|
||||
return val * val;
|
||||
}
|
||||
__device__ __forceinline__ float sqr(const float3& val)
|
||||
{
|
||||
return val.x * val.x + val.y * val.y + val.z * val.z;
|
||||
}
|
||||
__device__ __forceinline__ float sqr(const float4& val)
|
||||
{
|
||||
return val.x * val.x + val.y * val.y + val.z * val.z;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ float sum(float val)
|
||||
{
|
||||
return val;
|
||||
}
|
||||
__device__ __forceinline__ float sum(const float3& val)
|
||||
{
|
||||
return val.x + val.y + val.z;
|
||||
}
|
||||
__device__ __forceinline__ float sum(const float4& val)
|
||||
{
|
||||
return val.x + val.y + val.z;
|
||||
}
|
||||
|
||||
template <class Ptr2D>
|
||||
__device__ __forceinline__ void swap(Ptr2D& ptr, int x, int y, int k, int rows)
|
||||
{
|
||||
typename Ptr2D::elem_type val = ptr(k * rows + y, x);
|
||||
ptr(k * rows + y, x) = ptr((k + 1) * rows + y, x);
|
||||
ptr((k + 1) * rows + y, x) = val;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// MOG2
|
||||
|
||||
__constant__ int c_nmixtures;
|
||||
__constant__ float c_Tb;
|
||||
__constant__ float c_TB;
|
||||
__constant__ float c_Tg;
|
||||
__constant__ float c_varInit;
|
||||
__constant__ float c_varMin;
|
||||
__constant__ float c_varMax;
|
||||
__constant__ float c_tau;
|
||||
__constant__ unsigned char c_shadowVal;
|
||||
|
||||
void loadConstants(int nmixtures, float Tb, float TB, float Tg, float varInit, float varMin, float varMax, float tau, unsigned char shadowVal)
|
||||
{
|
||||
varMin = ::fminf(varMin, varMax);
|
||||
varMax = ::fmaxf(varMin, varMax);
|
||||
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_nmixtures, &nmixtures, sizeof(int)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_Tb, &Tb, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_TB, &TB, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_Tg, &Tg, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_varInit, &varInit, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_varMin, &varMin, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_varMax, &varMax, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_tau, &tau, sizeof(float)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(c_shadowVal, &shadowVal, sizeof(unsigned char)) );
|
||||
}
|
||||
|
||||
template <bool detectShadows, typename SrcT, typename WorkT>
|
||||
__global__ void mog2(const PtrStepSz<SrcT> frame, PtrStepb fgmask, PtrStepb modesUsed,
|
||||
PtrStepf gmm_weight, PtrStepf gmm_variance, PtrStep<WorkT> gmm_mean,
|
||||
const float alphaT, const float alpha1, const float prune)
|
||||
{
|
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (x >= frame.cols || y >= frame.rows)
|
||||
return;
|
||||
|
||||
WorkT pix = cvt(frame(y, x));
|
||||
|
||||
//calculate distances to the modes (+ sort)
|
||||
//here we need to go in descending order!!!
|
||||
|
||||
bool background = false; // true - the pixel classified as background
|
||||
|
||||
//internal:
|
||||
|
||||
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
|
||||
|
||||
int nmodes = modesUsed(y, x);
|
||||
int nNewModes = nmodes; //current number of modes in GMM
|
||||
|
||||
float totalWeight = 0.0f;
|
||||
|
||||
//go through all modes
|
||||
|
||||
for (int mode = 0; mode < nmodes; ++mode)
|
||||
{
|
||||
//need only weight if fit is found
|
||||
float weight = alpha1 * gmm_weight(mode * frame.rows + y, x) + prune;
|
||||
|
||||
//fit not found yet
|
||||
if (!fitsPDF)
|
||||
{
|
||||
//check if it belongs to some of the remaining modes
|
||||
float var = gmm_variance(mode * frame.rows + y, x);
|
||||
|
||||
WorkT mean = gmm_mean(mode * frame.rows + y, x);
|
||||
|
||||
//calculate difference and distance
|
||||
WorkT diff = mean - pix;
|
||||
float dist2 = sqr(diff);
|
||||
|
||||
//background? - Tb - usually larger than Tg
|
||||
if (totalWeight < c_TB && dist2 < c_Tb * var)
|
||||
background = true;
|
||||
|
||||
//check fit
|
||||
if (dist2 < c_Tg * var)
|
||||
{
|
||||
//belongs to the mode
|
||||
fitsPDF = true;
|
||||
|
||||
//update distribution
|
||||
|
||||
//update weight
|
||||
weight += alphaT;
|
||||
float k = alphaT / weight;
|
||||
|
||||
//update mean
|
||||
gmm_mean(mode * frame.rows + y, x) = mean - k * diff;
|
||||
|
||||
//update variance
|
||||
float varnew = var + k * (dist2 - var);
|
||||
|
||||
//limit the variance
|
||||
varnew = ::fmaxf(varnew, c_varMin);
|
||||
varnew = ::fminf(varnew, c_varMax);
|
||||
|
||||
gmm_variance(mode * frame.rows + y, x) = varnew;
|
||||
|
||||
//sort
|
||||
//all other weights are at the same place and
|
||||
//only the matched (iModes) is higher -> just find the new place for it
|
||||
|
||||
for (int i = mode; i > 0; --i)
|
||||
{
|
||||
//check one up
|
||||
if (weight < gmm_weight((i - 1) * frame.rows + y, x))
|
||||
break;
|
||||
|
||||
//swap one up
|
||||
swap(gmm_weight, x, y, i - 1, frame.rows);
|
||||
swap(gmm_variance, x, y, i - 1, frame.rows);
|
||||
swap(gmm_mean, x, y, i - 1, frame.rows);
|
||||
}
|
||||
|
||||
//belongs to the mode - bFitsPDF becomes 1
|
||||
}
|
||||
} // !fitsPDF
|
||||
|
||||
//check prune
|
||||
if (weight < -prune)
|
||||
{
|
||||
weight = 0.0;
|
||||
nmodes--;
|
||||
}
|
||||
|
||||
gmm_weight(mode * frame.rows + y, x) = weight; //update weight by the calculated value
|
||||
totalWeight += weight;
|
||||
}
|
||||
|
||||
//renormalize weights
|
||||
|
||||
totalWeight = 1.f / totalWeight;
|
||||
for (int mode = 0; mode < nmodes; ++mode)
|
||||
gmm_weight(mode * frame.rows + y, x) *= totalWeight;
|
||||
|
||||
nmodes = nNewModes;
|
||||
|
||||
//make new mode if needed and exit
|
||||
|
||||
if (!fitsPDF)
|
||||
{
|
||||
// replace the weakest or add a new one
|
||||
int mode = nmodes == c_nmixtures ? c_nmixtures - 1 : nmodes++;
|
||||
|
||||
if (nmodes == 1)
|
||||
gmm_weight(mode * frame.rows + y, x) = 1.f;
|
||||
else
|
||||
{
|
||||
gmm_weight(mode * frame.rows + y, x) = alphaT;
|
||||
|
||||
// renormalize all other weights
|
||||
|
||||
for (int i = 0; i < nmodes - 1; ++i)
|
||||
gmm_weight(i * frame.rows + y, x) *= alpha1;
|
||||
}
|
||||
|
||||
// init
|
||||
|
||||
gmm_mean(mode * frame.rows + y, x) = pix;
|
||||
gmm_variance(mode * frame.rows + y, x) = c_varInit;
|
||||
|
||||
//sort
|
||||
//find the new place for it
|
||||
|
||||
for (int i = nmodes - 1; i > 0; --i)
|
||||
{
|
||||
// check one up
|
||||
if (alphaT < gmm_weight((i - 1) * frame.rows + y, x))
|
||||
break;
|
||||
|
||||
//swap one up
|
||||
swap(gmm_weight, x, y, i - 1, frame.rows);
|
||||
swap(gmm_variance, x, y, i - 1, frame.rows);
|
||||
swap(gmm_mean, x, y, i - 1, frame.rows);
|
||||
}
|
||||
}
|
||||
|
||||
//set the number of modes
|
||||
modesUsed(y, x) = nmodes;
|
||||
|
||||
bool isShadow = false;
|
||||
if (detectShadows && !background)
|
||||
{
|
||||
float tWeight = 0.0f;
|
||||
|
||||
// check all the components marked as background:
|
||||
for (int mode = 0; mode < nmodes; ++mode)
|
||||
{
|
||||
WorkT mean = gmm_mean(mode * frame.rows + y, x);
|
||||
|
||||
WorkT pix_mean = pix * mean;
|
||||
|
||||
float numerator = sum(pix_mean);
|
||||
float denominator = sqr(mean);
|
||||
|
||||
// no division by zero allowed
|
||||
if (denominator == 0)
|
||||
break;
|
||||
|
||||
// if tau < a < 1 then also check the color distortion
|
||||
if (numerator <= denominator && numerator >= c_tau * denominator)
|
||||
{
|
||||
float a = numerator / denominator;
|
||||
|
||||
WorkT dD = a * mean - pix;
|
||||
|
||||
if (sqr(dD) < c_Tb * gmm_variance(mode * frame.rows + y, x) * a * a)
|
||||
{
|
||||
isShadow = true;
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
tWeight += gmm_weight(mode * frame.rows + y, x);
|
||||
if (tWeight > c_TB)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fgmask(y, x) = background ? 0 : isShadow ? c_shadowVal : 255;
|
||||
}
|
||||
|
||||
template <typename SrcT, typename WorkT>
|
||||
void mog2_caller(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean,
|
||||
float alphaT, float prune, bool detectShadows, cudaStream_t stream)
|
||||
{
|
||||
dim3 block(32, 8);
|
||||
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
|
||||
|
||||
const float alpha1 = 1.0f - alphaT;
|
||||
|
||||
if (detectShadows)
|
||||
{
|
||||
cudaSafeCall( cudaFuncSetCacheConfig(mog2<true, SrcT, WorkT>, cudaFuncCachePreferL1) );
|
||||
|
||||
mog2<true, SrcT, WorkT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, modesUsed,
|
||||
weight, variance, (PtrStepSz<WorkT>) mean,
|
||||
alphaT, alpha1, prune);
|
||||
}
|
||||
else
|
||||
{
|
||||
cudaSafeCall( cudaFuncSetCacheConfig(mog2<false, SrcT, WorkT>, cudaFuncCachePreferL1) );
|
||||
|
||||
mog2<false, SrcT, WorkT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, modesUsed,
|
||||
weight, variance, (PtrStepSz<WorkT>) mean,
|
||||
alphaT, alpha1, prune);
|
||||
}
|
||||
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
void mog2_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean,
|
||||
float alphaT, float prune, bool detectShadows, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean, float alphaT, float prune, bool detectShadows, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
0, mog2_caller<uchar, float>, 0, mog2_caller<uchar3, float3>, mog2_caller<uchar4, float4>
|
||||
};
|
||||
|
||||
funcs[cn](frame, fgmask, modesUsed, weight, variance, mean, alphaT, prune, detectShadows, stream);
|
||||
}
|
||||
|
||||
template <typename WorkT, typename OutT>
|
||||
__global__ void getBackgroundImage2(const PtrStepSzb modesUsed, const PtrStepf gmm_weight, const PtrStep<WorkT> gmm_mean, PtrStep<OutT> dst)
|
||||
{
|
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (x >= modesUsed.cols || y >= modesUsed.rows)
|
||||
return;
|
||||
|
||||
int nmodes = modesUsed(y, x);
|
||||
|
||||
WorkT meanVal = VecTraits<WorkT>::all(0.0f);
|
||||
float totalWeight = 0.0f;
|
||||
|
||||
for (int mode = 0; mode < nmodes; ++mode)
|
||||
{
|
||||
float weight = gmm_weight(mode * modesUsed.rows + y, x);
|
||||
|
||||
WorkT mean = gmm_mean(mode * modesUsed.rows + y, x);
|
||||
meanVal = meanVal + weight * mean;
|
||||
|
||||
totalWeight += weight;
|
||||
|
||||
if(totalWeight > c_TB)
|
||||
break;
|
||||
}
|
||||
|
||||
meanVal = meanVal * (1.f / totalWeight);
|
||||
|
||||
dst(y, x) = saturate_cast<OutT>(meanVal);
|
||||
}
|
||||
|
||||
template <typename WorkT, typename OutT>
|
||||
void getBackgroundImage2_caller(PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
dim3 block(32, 8);
|
||||
dim3 grid(divUp(modesUsed.cols, block.x), divUp(modesUsed.rows, block.y));
|
||||
|
||||
cudaSafeCall( cudaFuncSetCacheConfig(getBackgroundImage2<WorkT, OutT>, cudaFuncCachePreferL1) );
|
||||
|
||||
getBackgroundImage2<WorkT, OutT><<<grid, block, 0, stream>>>(modesUsed, weight, (PtrStepSz<WorkT>) mean, (PtrStepSz<OutT>) dst);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
void getBackgroundImage2_gpu(int cn, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
0, getBackgroundImage2_caller<float, uchar>, 0, getBackgroundImage2_caller<float3, uchar3>, getBackgroundImage2_caller<float4, uchar4>
|
||||
};
|
||||
|
||||
funcs[cn](modesUsed, weight, mean, dst, stream);
|
||||
}
|
||||
}
|
||||
}}}
|
||||
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
File diff suppressed because it is too large
Load Diff
@ -42,17 +42,17 @@
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::gpu;
|
||||
|
||||
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
|
||||
|
||||
cv::gpu::GMG_GPU::GMG_GPU() { throw_no_cuda(); }
|
||||
void cv::gpu::GMG_GPU::initialize(cv::Size, float, float) { throw_no_cuda(); }
|
||||
void cv::gpu::GMG_GPU::operator ()(const cv::gpu::GpuMat&, cv::gpu::GpuMat&, float, cv::gpu::Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::GMG_GPU::release() {}
|
||||
Ptr<gpu::BackgroundSubtractorGMG> cv::gpu::createBackgroundSubtractorGMG(int, double) { throw_no_cuda(); return Ptr<gpu::BackgroundSubtractorGMG>(); }
|
||||
|
||||
#else
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev {
|
||||
namespace bgfg_gmg
|
||||
namespace gmg
|
||||
{
|
||||
void loadConstants(int width, int height, float minVal, float maxVal, int quantizationLevels, float backgroundPrior,
|
||||
float decisionThreshold, int maxFeatures, int numInitializationFrames);
|
||||
@ -63,103 +63,215 @@ namespace cv { namespace gpu { namespace cudev {
|
||||
}
|
||||
}}}
|
||||
|
||||
cv::gpu::GMG_GPU::GMG_GPU()
|
||||
namespace
|
||||
{
|
||||
maxFeatures = 64;
|
||||
learningRate = 0.025f;
|
||||
numInitializationFrames = 120;
|
||||
quantizationLevels = 16;
|
||||
backgroundPrior = 0.8f;
|
||||
decisionThreshold = 0.8f;
|
||||
smoothingRadius = 7;
|
||||
updateBackgroundModel = true;
|
||||
}
|
||||
|
||||
void cv::gpu::GMG_GPU::initialize(cv::Size frameSize, float min, float max)
|
||||
{
|
||||
using namespace cv::gpu::cudev::bgfg_gmg;
|
||||
|
||||
CV_Assert(min < max);
|
||||
CV_Assert(maxFeatures > 0);
|
||||
CV_Assert(learningRate >= 0.0f && learningRate <= 1.0f);
|
||||
CV_Assert(numInitializationFrames >= 1);
|
||||
CV_Assert(quantizationLevels >= 1 && quantizationLevels <= 255);
|
||||
CV_Assert(backgroundPrior >= 0.0f && backgroundPrior <= 1.0f);
|
||||
|
||||
minVal_ = min;
|
||||
maxVal_ = max;
|
||||
|
||||
frameSize_ = frameSize;
|
||||
|
||||
frameNum_ = 0;
|
||||
|
||||
nfeatures_.create(frameSize_, CV_32SC1);
|
||||
colors_.create(maxFeatures * frameSize_.height, frameSize_.width, CV_32SC1);
|
||||
weights_.create(maxFeatures * frameSize_.height, frameSize_.width, CV_32FC1);
|
||||
|
||||
nfeatures_.setTo(cv::Scalar::all(0));
|
||||
|
||||
if (smoothingRadius > 0)
|
||||
boxFilter_ = cv::gpu::createBoxFilter(CV_8UC1, -1, cv::Size(smoothingRadius, smoothingRadius));
|
||||
|
||||
loadConstants(frameSize_.width, frameSize_.height, minVal_, maxVal_, quantizationLevels, backgroundPrior, decisionThreshold, maxFeatures, numInitializationFrames);
|
||||
}
|
||||
|
||||
void cv::gpu::GMG_GPU::operator ()(const cv::gpu::GpuMat& frame, cv::gpu::GpuMat& fgmask, float newLearningRate, cv::gpu::Stream& stream)
|
||||
{
|
||||
using namespace cv::gpu::cudev::bgfg_gmg;
|
||||
|
||||
typedef void (*func_t)(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures,
|
||||
int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
|
||||
static const func_t funcs[6][4] =
|
||||
class GMGImpl : public gpu::BackgroundSubtractorGMG
|
||||
{
|
||||
{update_gpu<uchar>, 0, update_gpu<uchar3>, update_gpu<uchar4>},
|
||||
{0,0,0,0},
|
||||
{update_gpu<ushort>, 0, update_gpu<ushort3>, update_gpu<ushort4>},
|
||||
{0,0,0,0},
|
||||
{0,0,0,0},
|
||||
{update_gpu<float>, 0, update_gpu<float3>, update_gpu<float4>}
|
||||
public:
|
||||
GMGImpl(int initializationFrames, double decisionThreshold);
|
||||
|
||||
void apply(InputArray image, OutputArray fgmask, double learningRate=-1);
|
||||
void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream);
|
||||
|
||||
void getBackgroundImage(OutputArray backgroundImage) const;
|
||||
|
||||
int getMaxFeatures() const { return maxFeatures_; }
|
||||
void setMaxFeatures(int maxFeatures) { maxFeatures_ = maxFeatures; }
|
||||
|
||||
double getDefaultLearningRate() const { return learningRate_; }
|
||||
void setDefaultLearningRate(double lr) { learningRate_ = (float) lr; }
|
||||
|
||||
int getNumFrames() const { return numInitializationFrames_; }
|
||||
void setNumFrames(int nframes) { numInitializationFrames_ = nframes; }
|
||||
|
||||
int getQuantizationLevels() const { return quantizationLevels_; }
|
||||
void setQuantizationLevels(int nlevels) { quantizationLevels_ = nlevels; }
|
||||
|
||||
double getBackgroundPrior() const { return backgroundPrior_; }
|
||||
void setBackgroundPrior(double bgprior) { backgroundPrior_ = (float) bgprior; }
|
||||
|
||||
int getSmoothingRadius() const { return smoothingRadius_; }
|
||||
void setSmoothingRadius(int radius) { smoothingRadius_ = radius; }
|
||||
|
||||
double getDecisionThreshold() const { return decisionThreshold_; }
|
||||
void setDecisionThreshold(double thresh) { decisionThreshold_ = (float) thresh; }
|
||||
|
||||
bool getUpdateBackgroundModel() const { return updateBackgroundModel_; }
|
||||
void setUpdateBackgroundModel(bool update) { updateBackgroundModel_ = update; }
|
||||
|
||||
double getMinVal() const { return minVal_; }
|
||||
void setMinVal(double val) { minVal_ = (float) val; }
|
||||
|
||||
double getMaxVal() const { return maxVal_; }
|
||||
void setMaxVal(double val) { maxVal_ = (float) val; }
|
||||
|
||||
private:
|
||||
void initialize(Size frameSize, float min, float max);
|
||||
|
||||
//! Total number of distinct colors to maintain in histogram.
|
||||
int maxFeatures_;
|
||||
|
||||
//! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
|
||||
float learningRate_;
|
||||
|
||||
//! Number of frames of video to use to initialize histograms.
|
||||
int numInitializationFrames_;
|
||||
|
||||
//! Number of discrete levels in each channel to be used in histograms.
|
||||
int quantizationLevels_;
|
||||
|
||||
//! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
|
||||
float backgroundPrior_;
|
||||
|
||||
//! Smoothing radius, in pixels, for cleaning up FG image.
|
||||
int smoothingRadius_;
|
||||
|
||||
//! Value above which pixel is determined to be FG.
|
||||
float decisionThreshold_;
|
||||
|
||||
//! Perform background model update.
|
||||
bool updateBackgroundModel_;
|
||||
|
||||
float minVal_, maxVal_;
|
||||
|
||||
Size frameSize_;
|
||||
int frameNum_;
|
||||
|
||||
GpuMat nfeatures_;
|
||||
GpuMat colors_;
|
||||
GpuMat weights_;
|
||||
|
||||
#if defined(HAVE_OPENCV_GPUFILTERS) && defined(HAVE_OPENCV_GPUARITHM)
|
||||
Ptr<gpu::Filter> boxFilter_;
|
||||
GpuMat buf_;
|
||||
#endif
|
||||
};
|
||||
|
||||
CV_Assert(frame.depth() == CV_8U || frame.depth() == CV_16U || frame.depth() == CV_32F);
|
||||
CV_Assert(frame.channels() == 1 || frame.channels() == 3 || frame.channels() == 4);
|
||||
|
||||
if (newLearningRate != -1.0f)
|
||||
GMGImpl::GMGImpl(int initializationFrames, double decisionThreshold)
|
||||
{
|
||||
CV_Assert(newLearningRate >= 0.0f && newLearningRate <= 1.0f);
|
||||
learningRate = newLearningRate;
|
||||
maxFeatures_ = 64;
|
||||
learningRate_ = 0.025f;
|
||||
numInitializationFrames_ = initializationFrames;
|
||||
quantizationLevels_ = 16;
|
||||
backgroundPrior_ = 0.8f;
|
||||
decisionThreshold_ = (float) decisionThreshold;
|
||||
smoothingRadius_ = 7;
|
||||
updateBackgroundModel_ = true;
|
||||
minVal_ = maxVal_ = 0;
|
||||
}
|
||||
|
||||
if (frame.size() != frameSize_)
|
||||
initialize(frame.size(), 0.0f, frame.depth() == CV_8U ? 255.0f : frame.depth() == CV_16U ? std::numeric_limits<ushort>::max() : 1.0f);
|
||||
|
||||
fgmask.create(frameSize_, CV_8UC1);
|
||||
fgmask.setTo(cv::Scalar::all(0), stream);
|
||||
|
||||
funcs[frame.depth()][frame.channels() - 1](frame, fgmask, colors_, weights_, nfeatures_, frameNum_, learningRate, updateBackgroundModel, cv::gpu::StreamAccessor::getStream(stream));
|
||||
|
||||
// medianBlur
|
||||
if (smoothingRadius > 0)
|
||||
void GMGImpl::apply(InputArray image, OutputArray fgmask, double learningRate)
|
||||
{
|
||||
boxFilter_->apply(fgmask, buf_, stream);
|
||||
int minCount = (smoothingRadius * smoothingRadius + 1) / 2;
|
||||
double thresh = 255.0 * minCount / (smoothingRadius * smoothingRadius);
|
||||
cv::gpu::threshold(buf_, fgmask, thresh, 255.0, cv::THRESH_BINARY, stream);
|
||||
apply(image, fgmask, learningRate, Stream::Null());
|
||||
}
|
||||
|
||||
// keep track of how many frames we have processed
|
||||
++frameNum_;
|
||||
void GMGImpl::apply(InputArray _frame, OutputArray _fgmask, double newLearningRate, Stream& stream)
|
||||
{
|
||||
using namespace cv::gpu::cudev::gmg;
|
||||
|
||||
typedef void (*func_t)(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures,
|
||||
int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
|
||||
static const func_t funcs[6][4] =
|
||||
{
|
||||
{update_gpu<uchar>, 0, update_gpu<uchar3>, update_gpu<uchar4>},
|
||||
{0,0,0,0},
|
||||
{update_gpu<ushort>, 0, update_gpu<ushort3>, update_gpu<ushort4>},
|
||||
{0,0,0,0},
|
||||
{0,0,0,0},
|
||||
{update_gpu<float>, 0, update_gpu<float3>, update_gpu<float4>}
|
||||
};
|
||||
|
||||
GpuMat frame = _frame.getGpuMat();
|
||||
|
||||
CV_Assert( frame.depth() == CV_8U || frame.depth() == CV_16U || frame.depth() == CV_32F );
|
||||
CV_Assert( frame.channels() == 1 || frame.channels() == 3 || frame.channels() == 4 );
|
||||
|
||||
if (newLearningRate != -1.0)
|
||||
{
|
||||
CV_Assert( newLearningRate >= 0.0 && newLearningRate <= 1.0 );
|
||||
learningRate_ = (float) newLearningRate;
|
||||
}
|
||||
|
||||
if (frame.size() != frameSize_)
|
||||
{
|
||||
double minVal = minVal_;
|
||||
double maxVal = maxVal_;
|
||||
|
||||
if (minVal_ == 0 && maxVal_ == 0)
|
||||
{
|
||||
minVal = 0;
|
||||
maxVal = frame.depth() == CV_8U ? 255.0 : frame.depth() == CV_16U ? std::numeric_limits<ushort>::max() : 1.0;
|
||||
}
|
||||
|
||||
initialize(frame.size(), (float) minVal, (float) maxVal);
|
||||
}
|
||||
|
||||
_fgmask.create(frameSize_, CV_8UC1);
|
||||
GpuMat fgmask = _fgmask.getGpuMat();
|
||||
|
||||
fgmask.setTo(Scalar::all(0), stream);
|
||||
|
||||
funcs[frame.depth()][frame.channels() - 1](frame, fgmask, colors_, weights_, nfeatures_, frameNum_,
|
||||
learningRate_, updateBackgroundModel_, StreamAccessor::getStream(stream));
|
||||
|
||||
#if defined(HAVE_OPENCV_GPUFILTERS) && defined(HAVE_OPENCV_GPUARITHM)
|
||||
// medianBlur
|
||||
if (smoothingRadius_ > 0)
|
||||
{
|
||||
boxFilter_->apply(fgmask, buf_, stream);
|
||||
const int minCount = (smoothingRadius_ * smoothingRadius_ + 1) / 2;
|
||||
const double thresh = 255.0 * minCount / (smoothingRadius_ * smoothingRadius_);
|
||||
gpu::threshold(buf_, fgmask, thresh, 255.0, THRESH_BINARY, stream);
|
||||
}
|
||||
#endif
|
||||
|
||||
// keep track of how many frames we have processed
|
||||
++frameNum_;
|
||||
}
|
||||
|
||||
void GMGImpl::getBackgroundImage(OutputArray backgroundImage) const
|
||||
{
|
||||
(void) backgroundImage;
|
||||
CV_Error(Error::StsNotImplemented, "Not implemented");
|
||||
}
|
||||
|
||||
void GMGImpl::initialize(Size frameSize, float min, float max)
|
||||
{
|
||||
using namespace cv::gpu::cudev::gmg;
|
||||
|
||||
CV_Assert( maxFeatures_ > 0 );
|
||||
CV_Assert( learningRate_ >= 0.0f && learningRate_ <= 1.0f);
|
||||
CV_Assert( numInitializationFrames_ >= 1);
|
||||
CV_Assert( quantizationLevels_ >= 1 && quantizationLevels_ <= 255);
|
||||
CV_Assert( backgroundPrior_ >= 0.0f && backgroundPrior_ <= 1.0f);
|
||||
|
||||
minVal_ = min;
|
||||
maxVal_ = max;
|
||||
CV_Assert( minVal_ < maxVal_ );
|
||||
|
||||
frameSize_ = frameSize;
|
||||
|
||||
frameNum_ = 0;
|
||||
|
||||
nfeatures_.create(frameSize_, CV_32SC1);
|
||||
colors_.create(maxFeatures_ * frameSize_.height, frameSize_.width, CV_32SC1);
|
||||
weights_.create(maxFeatures_ * frameSize_.height, frameSize_.width, CV_32FC1);
|
||||
|
||||
nfeatures_.setTo(Scalar::all(0));
|
||||
|
||||
#if defined(HAVE_OPENCV_GPUFILTERS) && defined(HAVE_OPENCV_GPUARITHM)
|
||||
if (smoothingRadius_ > 0)
|
||||
boxFilter_ = gpu::createBoxFilter(CV_8UC1, -1, Size(smoothingRadius_, smoothingRadius_));
|
||||
#endif
|
||||
|
||||
loadConstants(frameSize_.width, frameSize_.height, minVal_, maxVal_,
|
||||
quantizationLevels_, backgroundPrior_, decisionThreshold_, maxFeatures_, numInitializationFrames_);
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::GMG_GPU::release()
|
||||
Ptr<gpu::BackgroundSubtractorGMG> cv::gpu::createBackgroundSubtractorGMG(int initializationFrames, double decisionThreshold)
|
||||
{
|
||||
frameSize_ = Size();
|
||||
|
||||
nfeatures_.release();
|
||||
colors_.release();
|
||||
weights_.release();
|
||||
boxFilter_.release();
|
||||
buf_.release();
|
||||
return new GMGImpl(initializationFrames, decisionThreshold);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -42,19 +42,12 @@
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::gpu;
|
||||
|
||||
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
|
||||
|
||||
cv::gpu::MOG_GPU::MOG_GPU(int) { throw_no_cuda(); }
|
||||
void cv::gpu::MOG_GPU::initialize(cv::Size, int) { throw_no_cuda(); }
|
||||
void cv::gpu::MOG_GPU::operator()(const cv::gpu::GpuMat&, cv::gpu::GpuMat&, float, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::MOG_GPU::getBackgroundImage(GpuMat&, Stream&) const { throw_no_cuda(); }
|
||||
void cv::gpu::MOG_GPU::release() {}
|
||||
|
||||
cv::gpu::MOG2_GPU::MOG2_GPU(int) { throw_no_cuda(); }
|
||||
void cv::gpu::MOG2_GPU::initialize(cv::Size, int) { throw_no_cuda(); }
|
||||
void cv::gpu::MOG2_GPU::operator()(const GpuMat&, GpuMat&, float, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::MOG2_GPU::getBackgroundImage(GpuMat&, Stream&) const { throw_no_cuda(); }
|
||||
void cv::gpu::MOG2_GPU::release() {}
|
||||
Ptr<gpu::BackgroundSubtractorMOG> cv::gpu::createBackgroundSubtractorMOG(int, int, double, double) { throw_no_cuda(); return Ptr<gpu::BackgroundSubtractorMOG>(); }
|
||||
|
||||
#else
|
||||
|
||||
@ -66,14 +59,10 @@ namespace cv { namespace gpu { namespace cudev
|
||||
int nmixtures, float varThreshold, float learningRate, float backgroundRatio, float noiseSigma,
|
||||
cudaStream_t stream);
|
||||
void getBackgroundImage_gpu(int cn, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, int nmixtures, float backgroundRatio, cudaStream_t stream);
|
||||
|
||||
void loadConstants(int nmixtures, float Tb, float TB, float Tg, float varInit, float varMin, float varMax, float tau, unsigned char shadowVal);
|
||||
void mog2_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean, float alphaT, float prune, bool detectShadows, cudaStream_t stream);
|
||||
void getBackgroundImage2_gpu(int cn, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
namespace mog
|
||||
namespace
|
||||
{
|
||||
const int defaultNMixtures = 5;
|
||||
const int defaultHistory = 200;
|
||||
@ -81,199 +70,140 @@ namespace mog
|
||||
const float defaultVarThreshold = 2.5f * 2.5f;
|
||||
const float defaultNoiseSigma = 30.0f * 0.5f;
|
||||
const float defaultInitialWeight = 0.05f;
|
||||
|
||||
class MOGImpl : public gpu::BackgroundSubtractorMOG
|
||||
{
|
||||
public:
|
||||
MOGImpl(int history, int nmixtures, double backgroundRatio, double noiseSigma);
|
||||
|
||||
void apply(InputArray image, OutputArray fgmask, double learningRate=-1);
|
||||
void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream);
|
||||
|
||||
void getBackgroundImage(OutputArray backgroundImage) const;
|
||||
void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const;
|
||||
|
||||
int getHistory() const { return history_; }
|
||||
void setHistory(int nframes) { history_ = nframes; }
|
||||
|
||||
int getNMixtures() const { return nmixtures_; }
|
||||
void setNMixtures(int nmix) { nmixtures_ = nmix; }
|
||||
|
||||
double getBackgroundRatio() const { return backgroundRatio_; }
|
||||
void setBackgroundRatio(double backgroundRatio) { backgroundRatio_ = (float) backgroundRatio; }
|
||||
|
||||
double getNoiseSigma() const { return noiseSigma_; }
|
||||
void setNoiseSigma(double noiseSigma) { noiseSigma_ = (float) noiseSigma; }
|
||||
|
||||
private:
|
||||
//! re-initiaization method
|
||||
void initialize(Size frameSize, int frameType);
|
||||
|
||||
int history_;
|
||||
int nmixtures_;
|
||||
float backgroundRatio_;
|
||||
float noiseSigma_;
|
||||
|
||||
float varThreshold_;
|
||||
|
||||
Size frameSize_;
|
||||
int frameType_;
|
||||
int nframes_;
|
||||
|
||||
GpuMat weight_;
|
||||
GpuMat sortKey_;
|
||||
GpuMat mean_;
|
||||
GpuMat var_;
|
||||
};
|
||||
|
||||
MOGImpl::MOGImpl(int history, int nmixtures, double backgroundRatio, double noiseSigma) :
|
||||
frameSize_(0, 0), frameType_(0), nframes_(0)
|
||||
{
|
||||
history_ = history > 0 ? history : defaultHistory;
|
||||
nmixtures_ = std::min(nmixtures > 0 ? nmixtures : defaultNMixtures, 8);
|
||||
backgroundRatio_ = backgroundRatio > 0 ? (float) backgroundRatio : defaultBackgroundRatio;
|
||||
noiseSigma_ = noiseSigma > 0 ? (float) noiseSigma : defaultNoiseSigma;
|
||||
|
||||
varThreshold_ = defaultVarThreshold;
|
||||
}
|
||||
|
||||
void MOGImpl::apply(InputArray image, OutputArray fgmask, double learningRate)
|
||||
{
|
||||
apply(image, fgmask, learningRate, Stream::Null());
|
||||
}
|
||||
|
||||
void MOGImpl::apply(InputArray _frame, OutputArray _fgmask, double learningRate, Stream& stream)
|
||||
{
|
||||
using namespace cv::gpu::cudev::mog;
|
||||
|
||||
GpuMat frame = _frame.getGpuMat();
|
||||
|
||||
CV_Assert( frame.depth() == CV_8U );
|
||||
|
||||
int ch = frame.channels();
|
||||
int work_ch = ch;
|
||||
|
||||
if (nframes_ == 0 || learningRate >= 1.0 || frame.size() != frameSize_ || work_ch != mean_.channels())
|
||||
initialize(frame.size(), frame.type());
|
||||
|
||||
_fgmask.create(frameSize_, CV_8UC1);
|
||||
GpuMat fgmask = _fgmask.getGpuMat();
|
||||
|
||||
++nframes_;
|
||||
learningRate = learningRate >= 0 && nframes_ > 1 ? learningRate : 1.0 / std::min(nframes_, history_);
|
||||
CV_Assert( learningRate >= 0 );
|
||||
|
||||
mog_gpu(frame, ch, fgmask, weight_, sortKey_, mean_, var_, nmixtures_,
|
||||
varThreshold_, (float) learningRate, backgroundRatio_, noiseSigma_,
|
||||
StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void MOGImpl::getBackgroundImage(OutputArray backgroundImage) const
|
||||
{
|
||||
getBackgroundImage(backgroundImage, Stream::Null());
|
||||
}
|
||||
|
||||
void MOGImpl::getBackgroundImage(OutputArray _backgroundImage, Stream& stream) const
|
||||
{
|
||||
using namespace cv::gpu::cudev::mog;
|
||||
|
||||
_backgroundImage.create(frameSize_, frameType_);
|
||||
GpuMat backgroundImage = _backgroundImage.getGpuMat();
|
||||
|
||||
getBackgroundImage_gpu(backgroundImage.channels(), weight_, mean_, backgroundImage, nmixtures_, backgroundRatio_, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void MOGImpl::initialize(Size frameSize, int frameType)
|
||||
{
|
||||
CV_Assert( frameType == CV_8UC1 || frameType == CV_8UC3 || frameType == CV_8UC4 );
|
||||
|
||||
frameSize_ = frameSize;
|
||||
frameType_ = frameType;
|
||||
|
||||
int ch = CV_MAT_CN(frameType);
|
||||
int work_ch = ch;
|
||||
|
||||
// for each gaussian mixture of each pixel bg model we store
|
||||
// the mixture sort key (w/sum_of_variances), the mixture weight (w),
|
||||
// the mean (nchannels values) and
|
||||
// the diagonal covariance matrix (another nchannels values)
|
||||
|
||||
weight_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
|
||||
sortKey_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
|
||||
mean_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
|
||||
var_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
|
||||
|
||||
weight_.setTo(cv::Scalar::all(0));
|
||||
sortKey_.setTo(cv::Scalar::all(0));
|
||||
mean_.setTo(cv::Scalar::all(0));
|
||||
var_.setTo(cv::Scalar::all(0));
|
||||
|
||||
nframes_ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
cv::gpu::MOG_GPU::MOG_GPU(int nmixtures) :
|
||||
frameSize_(0, 0), frameType_(0), nframes_(0)
|
||||
Ptr<gpu::BackgroundSubtractorMOG> cv::gpu::createBackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma)
|
||||
{
|
||||
nmixtures_ = std::min(nmixtures > 0 ? nmixtures : mog::defaultNMixtures, 8);
|
||||
history = mog::defaultHistory;
|
||||
varThreshold = mog::defaultVarThreshold;
|
||||
backgroundRatio = mog::defaultBackgroundRatio;
|
||||
noiseSigma = mog::defaultNoiseSigma;
|
||||
}
|
||||
|
||||
void cv::gpu::MOG_GPU::initialize(cv::Size frameSize, int frameType)
|
||||
{
|
||||
CV_Assert(frameType == CV_8UC1 || frameType == CV_8UC3 || frameType == CV_8UC4);
|
||||
|
||||
frameSize_ = frameSize;
|
||||
frameType_ = frameType;
|
||||
|
||||
int ch = CV_MAT_CN(frameType);
|
||||
int work_ch = ch;
|
||||
|
||||
// for each gaussian mixture of each pixel bg model we store
|
||||
// the mixture sort key (w/sum_of_variances), the mixture weight (w),
|
||||
// the mean (nchannels values) and
|
||||
// the diagonal covariance matrix (another nchannels values)
|
||||
|
||||
weight_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
|
||||
sortKey_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
|
||||
mean_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
|
||||
var_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
|
||||
|
||||
weight_.setTo(cv::Scalar::all(0));
|
||||
sortKey_.setTo(cv::Scalar::all(0));
|
||||
mean_.setTo(cv::Scalar::all(0));
|
||||
var_.setTo(cv::Scalar::all(0));
|
||||
|
||||
nframes_ = 0;
|
||||
}
|
||||
|
||||
void cv::gpu::MOG_GPU::operator()(const cv::gpu::GpuMat& frame, cv::gpu::GpuMat& fgmask, float learningRate, Stream& stream)
|
||||
{
|
||||
using namespace cv::gpu::cudev::mog;
|
||||
|
||||
CV_Assert(frame.depth() == CV_8U);
|
||||
|
||||
int ch = frame.channels();
|
||||
int work_ch = ch;
|
||||
|
||||
if (nframes_ == 0 || learningRate >= 1.0 || frame.size() != frameSize_ || work_ch != mean_.channels())
|
||||
initialize(frame.size(), frame.type());
|
||||
|
||||
fgmask.create(frameSize_, CV_8UC1);
|
||||
|
||||
++nframes_;
|
||||
learningRate = learningRate >= 0.0f && nframes_ > 1 ? learningRate : 1.0f / std::min(nframes_, history);
|
||||
CV_Assert(learningRate >= 0.0f);
|
||||
|
||||
mog_gpu(frame, ch, fgmask, weight_, sortKey_, mean_, var_, nmixtures_,
|
||||
varThreshold, learningRate, backgroundRatio, noiseSigma,
|
||||
StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::MOG_GPU::getBackgroundImage(GpuMat& backgroundImage, Stream& stream) const
|
||||
{
|
||||
using namespace cv::gpu::cudev::mog;
|
||||
|
||||
backgroundImage.create(frameSize_, frameType_);
|
||||
|
||||
getBackgroundImage_gpu(backgroundImage.channels(), weight_, mean_, backgroundImage, nmixtures_, backgroundRatio, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::MOG_GPU::release()
|
||||
{
|
||||
frameSize_ = Size(0, 0);
|
||||
frameType_ = 0;
|
||||
nframes_ = 0;
|
||||
|
||||
weight_.release();
|
||||
sortKey_.release();
|
||||
mean_.release();
|
||||
var_.release();
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// MOG2
|
||||
|
||||
namespace mog2
|
||||
{
|
||||
// default parameters of gaussian background detection algorithm
|
||||
const int defaultHistory = 500; // Learning rate; alpha = 1/defaultHistory2
|
||||
const float defaultVarThreshold = 4.0f * 4.0f;
|
||||
const int defaultNMixtures = 5; // maximal number of Gaussians in mixture
|
||||
const float defaultBackgroundRatio = 0.9f; // threshold sum of weights for background test
|
||||
const float defaultVarThresholdGen = 3.0f * 3.0f;
|
||||
const float defaultVarInit = 15.0f; // initial variance for new components
|
||||
const float defaultVarMax = 5.0f * defaultVarInit;
|
||||
const float defaultVarMin = 4.0f;
|
||||
|
||||
// additional parameters
|
||||
const float defaultfCT = 0.05f; // complexity reduction prior constant 0 - no reduction of number of components
|
||||
const unsigned char defaultnShadowDetection = 127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
|
||||
const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation
|
||||
}
|
||||
|
||||
cv::gpu::MOG2_GPU::MOG2_GPU(int nmixtures) :
|
||||
frameSize_(0, 0), frameType_(0), nframes_(0)
|
||||
{
|
||||
nmixtures_ = nmixtures > 0 ? nmixtures : mog2::defaultNMixtures;
|
||||
|
||||
history = mog2::defaultHistory;
|
||||
varThreshold = mog2::defaultVarThreshold;
|
||||
bShadowDetection = true;
|
||||
|
||||
backgroundRatio = mog2::defaultBackgroundRatio;
|
||||
fVarInit = mog2::defaultVarInit;
|
||||
fVarMax = mog2::defaultVarMax;
|
||||
fVarMin = mog2::defaultVarMin;
|
||||
|
||||
varThresholdGen = mog2::defaultVarThresholdGen;
|
||||
fCT = mog2::defaultfCT;
|
||||
nShadowDetection = mog2::defaultnShadowDetection;
|
||||
fTau = mog2::defaultfTau;
|
||||
}
|
||||
|
||||
void cv::gpu::MOG2_GPU::initialize(cv::Size frameSize, int frameType)
|
||||
{
|
||||
using namespace cv::gpu::cudev::mog;
|
||||
|
||||
CV_Assert(frameType == CV_8UC1 || frameType == CV_8UC3 || frameType == CV_8UC4);
|
||||
|
||||
frameSize_ = frameSize;
|
||||
frameType_ = frameType;
|
||||
nframes_ = 0;
|
||||
|
||||
int ch = CV_MAT_CN(frameType);
|
||||
int work_ch = ch;
|
||||
|
||||
// for each gaussian mixture of each pixel bg model we store ...
|
||||
// the mixture weight (w),
|
||||
// the mean (nchannels values) and
|
||||
// the covariance
|
||||
weight_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
|
||||
variance_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
|
||||
mean_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
|
||||
|
||||
//make the array for keeping track of the used modes per pixel - all zeros at start
|
||||
bgmodelUsedModes_.create(frameSize_, CV_8UC1);
|
||||
bgmodelUsedModes_.setTo(cv::Scalar::all(0));
|
||||
|
||||
loadConstants(nmixtures_, varThreshold, backgroundRatio, varThresholdGen, fVarInit, fVarMin, fVarMax, fTau, nShadowDetection);
|
||||
}
|
||||
|
||||
void cv::gpu::MOG2_GPU::operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate, Stream& stream)
|
||||
{
|
||||
using namespace cv::gpu::cudev::mog;
|
||||
|
||||
int ch = frame.channels();
|
||||
int work_ch = ch;
|
||||
|
||||
if (nframes_ == 0 || learningRate >= 1.0f || frame.size() != frameSize_ || work_ch != mean_.channels())
|
||||
initialize(frame.size(), frame.type());
|
||||
|
||||
fgmask.create(frameSize_, CV_8UC1);
|
||||
fgmask.setTo(cv::Scalar::all(0));
|
||||
|
||||
++nframes_;
|
||||
learningRate = learningRate >= 0.0f && nframes_ > 1 ? learningRate : 1.0f / std::min(2 * nframes_, history);
|
||||
CV_Assert(learningRate >= 0.0f);
|
||||
|
||||
mog2_gpu(frame, frame.channels(), fgmask, bgmodelUsedModes_, weight_, variance_, mean_, learningRate, -learningRate * fCT, bShadowDetection, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::MOG2_GPU::getBackgroundImage(GpuMat& backgroundImage, Stream& stream) const
|
||||
{
|
||||
using namespace cv::gpu::cudev::mog;
|
||||
|
||||
backgroundImage.create(frameSize_, frameType_);
|
||||
|
||||
getBackgroundImage2_gpu(backgroundImage.channels(), bgmodelUsedModes_, weight_, mean_, backgroundImage, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::MOG2_GPU::release()
|
||||
{
|
||||
frameSize_ = Size(0, 0);
|
||||
frameType_ = 0;
|
||||
nframes_ = 0;
|
||||
|
||||
weight_.release();
|
||||
variance_.release();
|
||||
mean_.release();
|
||||
|
||||
bgmodelUsedModes_.release();
|
||||
return new MOGImpl(history, nmixtures, backgroundRatio, noiseSigma);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
253
modules/gpubgsegm/src/mog2.cpp
Normal file
253
modules/gpubgsegm/src/mog2.cpp
Normal file
@ -0,0 +1,253 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::gpu;
|
||||
|
||||
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
|
||||
|
||||
Ptr<gpu::BackgroundSubtractorMOG2> cv::gpu::createBackgroundSubtractorMOG2(int, double, bool) { throw_no_cuda(); return Ptr<gpu::BackgroundSubtractorMOG2>(); }
|
||||
|
||||
#else
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
namespace mog2
|
||||
{
|
||||
void loadConstants(int nmixtures, float Tb, float TB, float Tg, float varInit, float varMin, float varMax, float tau, unsigned char shadowVal);
|
||||
void mog2_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean, float alphaT, float prune, bool detectShadows, cudaStream_t stream);
|
||||
void getBackgroundImage2_gpu(int cn, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
namespace
|
||||
{
|
||||
// default parameters of gaussian background detection algorithm
|
||||
const int defaultHistory = 500; // Learning rate; alpha = 1/defaultHistory2
|
||||
const float defaultVarThreshold = 4.0f * 4.0f;
|
||||
const int defaultNMixtures = 5; // maximal number of Gaussians in mixture
|
||||
const float defaultBackgroundRatio = 0.9f; // threshold sum of weights for background test
|
||||
const float defaultVarThresholdGen = 3.0f * 3.0f;
|
||||
const float defaultVarInit = 15.0f; // initial variance for new components
|
||||
const float defaultVarMax = 5.0f * defaultVarInit;
|
||||
const float defaultVarMin = 4.0f;
|
||||
|
||||
// additional parameters
|
||||
const float defaultCT = 0.05f; // complexity reduction prior constant 0 - no reduction of number of components
|
||||
const unsigned char defaultShadowValue = 127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
|
||||
const float defaultShadowThreshold = 0.5f; // Tau - shadow threshold, see the paper for explanation
|
||||
|
||||
class MOG2Impl : public gpu::BackgroundSubtractorMOG2
|
||||
{
|
||||
public:
|
||||
MOG2Impl(int history, double varThreshold, bool detectShadows);
|
||||
|
||||
void apply(InputArray image, OutputArray fgmask, double learningRate=-1);
|
||||
void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream);
|
||||
|
||||
void getBackgroundImage(OutputArray backgroundImage) const;
|
||||
void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const;
|
||||
|
||||
int getHistory() const { return history_; }
|
||||
void setHistory(int history) { history_ = history; }
|
||||
|
||||
int getNMixtures() const { return nmixtures_; }
|
||||
void setNMixtures(int nmixtures) { nmixtures_ = nmixtures; }
|
||||
|
||||
double getBackgroundRatio() const { return backgroundRatio_; }
|
||||
void setBackgroundRatio(double ratio) { backgroundRatio_ = (float) ratio; }
|
||||
|
||||
double getVarThreshold() const { return varThreshold_; }
|
||||
void setVarThreshold(double varThreshold) { varThreshold_ = (float) varThreshold; }
|
||||
|
||||
double getVarThresholdGen() const { return varThresholdGen_; }
|
||||
void setVarThresholdGen(double varThresholdGen) { varThresholdGen_ = (float) varThresholdGen; }
|
||||
|
||||
double getVarInit() const { return varInit_; }
|
||||
void setVarInit(double varInit) { varInit_ = (float) varInit; }
|
||||
|
||||
double getVarMin() const { return varMin_; }
|
||||
void setVarMin(double varMin) { varMin_ = (float) varMin; }
|
||||
|
||||
double getVarMax() const { return varMax_; }
|
||||
void setVarMax(double varMax) { varMax_ = (float) varMax; }
|
||||
|
||||
double getComplexityReductionThreshold() const { return ct_; }
|
||||
void setComplexityReductionThreshold(double ct) { ct_ = (float) ct; }
|
||||
|
||||
bool getDetectShadows() const { return detectShadows_; }
|
||||
void setDetectShadows(bool detectShadows) { detectShadows_ = detectShadows; }
|
||||
|
||||
int getShadowValue() const { return shadowValue_; }
|
||||
void setShadowValue(int value) { shadowValue_ = (uchar) value; }
|
||||
|
||||
double getShadowThreshold() const { return shadowThreshold_; }
|
||||
void setShadowThreshold(double threshold) { shadowThreshold_ = (float) threshold; }
|
||||
|
||||
private:
|
||||
void initialize(Size frameSize, int frameType);
|
||||
|
||||
int history_;
|
||||
int nmixtures_;
|
||||
float backgroundRatio_;
|
||||
float varThreshold_;
|
||||
float varThresholdGen_;
|
||||
float varInit_;
|
||||
float varMin_;
|
||||
float varMax_;
|
||||
float ct_;
|
||||
bool detectShadows_;
|
||||
uchar shadowValue_;
|
||||
float shadowThreshold_;
|
||||
|
||||
Size frameSize_;
|
||||
int frameType_;
|
||||
int nframes_;
|
||||
|
||||
GpuMat weight_;
|
||||
GpuMat variance_;
|
||||
GpuMat mean_;
|
||||
|
||||
//keep track of number of modes per pixel
|
||||
GpuMat bgmodelUsedModes_;
|
||||
};
|
||||
|
||||
MOG2Impl::MOG2Impl(int history, double varThreshold, bool detectShadows) :
|
||||
frameSize_(0, 0), frameType_(0), nframes_(0)
|
||||
{
|
||||
history_ = history > 0 ? history : defaultHistory;
|
||||
varThreshold_ = varThreshold > 0 ? (float) varThreshold : defaultVarThreshold;
|
||||
detectShadows_ = detectShadows;
|
||||
|
||||
nmixtures_ = defaultNMixtures;
|
||||
backgroundRatio_ = defaultBackgroundRatio;
|
||||
varInit_ = defaultVarInit;
|
||||
varMax_ = defaultVarMax;
|
||||
varMin_ = defaultVarMin;
|
||||
varThresholdGen_ = defaultVarThresholdGen;
|
||||
ct_ = defaultCT;
|
||||
shadowValue_ = defaultShadowValue;
|
||||
shadowThreshold_ = defaultShadowThreshold;
|
||||
}
|
||||
|
||||
void MOG2Impl::apply(InputArray image, OutputArray fgmask, double learningRate)
|
||||
{
|
||||
apply(image, fgmask, learningRate, Stream::Null());
|
||||
}
|
||||
|
||||
void MOG2Impl::apply(InputArray _frame, OutputArray _fgmask, double learningRate, Stream& stream)
|
||||
{
|
||||
using namespace cv::gpu::cudev::mog2;
|
||||
|
||||
GpuMat frame = _frame.getGpuMat();
|
||||
|
||||
int ch = frame.channels();
|
||||
int work_ch = ch;
|
||||
|
||||
if (nframes_ == 0 || learningRate >= 1.0 || frame.size() != frameSize_ || work_ch != mean_.channels())
|
||||
initialize(frame.size(), frame.type());
|
||||
|
||||
_fgmask.create(frameSize_, CV_8UC1);
|
||||
GpuMat fgmask = _fgmask.getGpuMat();
|
||||
|
||||
fgmask.setTo(Scalar::all(0), stream);
|
||||
|
||||
++nframes_;
|
||||
learningRate = learningRate >= 0 && nframes_ > 1 ? learningRate : 1.0 / std::min(2 * nframes_, history_);
|
||||
CV_Assert( learningRate >= 0 );
|
||||
|
||||
mog2_gpu(frame, frame.channels(), fgmask, bgmodelUsedModes_, weight_, variance_, mean_,
|
||||
(float) learningRate, static_cast<float>(-learningRate * ct_), detectShadows_, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void MOG2Impl::getBackgroundImage(OutputArray backgroundImage) const
|
||||
{
|
||||
getBackgroundImage(backgroundImage, Stream::Null());
|
||||
}
|
||||
|
||||
void MOG2Impl::getBackgroundImage(OutputArray _backgroundImage, Stream& stream) const
|
||||
{
|
||||
using namespace cv::gpu::cudev::mog2;
|
||||
|
||||
_backgroundImage.create(frameSize_, frameType_);
|
||||
GpuMat backgroundImage = _backgroundImage.getGpuMat();
|
||||
|
||||
getBackgroundImage2_gpu(backgroundImage.channels(), bgmodelUsedModes_, weight_, mean_, backgroundImage, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void MOG2Impl::initialize(cv::Size frameSize, int frameType)
|
||||
{
|
||||
using namespace cv::gpu::cudev::mog2;
|
||||
|
||||
CV_Assert( frameType == CV_8UC1 || frameType == CV_8UC3 || frameType == CV_8UC4 );
|
||||
|
||||
frameSize_ = frameSize;
|
||||
frameType_ = frameType;
|
||||
nframes_ = 0;
|
||||
|
||||
int ch = CV_MAT_CN(frameType);
|
||||
int work_ch = ch;
|
||||
|
||||
// for each gaussian mixture of each pixel bg model we store ...
|
||||
// the mixture weight (w),
|
||||
// the mean (nchannels values) and
|
||||
// the covariance
|
||||
weight_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
|
||||
variance_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1);
|
||||
mean_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch));
|
||||
|
||||
//make the array for keeping track of the used modes per pixel - all zeros at start
|
||||
bgmodelUsedModes_.create(frameSize_, CV_8UC1);
|
||||
bgmodelUsedModes_.setTo(Scalar::all(0));
|
||||
|
||||
loadConstants(nmixtures_, varThreshold_, backgroundRatio_, varThresholdGen_, varInit_, varMin_, varMax_, shadowThreshold_, shadowValue_);
|
||||
}
|
||||
}
|
||||
|
||||
Ptr<gpu::BackgroundSubtractorMOG2> cv::gpu::createBackgroundSubtractorMOG2(int history, double varThreshold, bool detectShadows)
|
||||
{
|
||||
return new MOG2Impl(history, varThreshold, detectShadows);
|
||||
}
|
||||
|
||||
#endif
|
@ -46,10 +46,21 @@
|
||||
#include <limits>
|
||||
|
||||
#include "opencv2/gpubgsegm.hpp"
|
||||
#include "opencv2/gpuarithm.hpp"
|
||||
#include "opencv2/gpufilters.hpp"
|
||||
#include "opencv2/gpuimgproc.hpp"
|
||||
|
||||
#include "opencv2/core/private.gpu.hpp"
|
||||
|
||||
#include "opencv2/opencv_modules.hpp"
|
||||
|
||||
#ifdef HAVE_OPENCV_GPUARITHM
|
||||
# include "opencv2/gpuarithm.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_OPENCV_GPUFILTERS
|
||||
# include "opencv2/gpufilters.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_OPENCV_GPUIMGPROC
|
||||
# include "opencv2/gpuimgproc.hpp"
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCV_PRECOMP_H__ */
|
||||
|
@ -41,7 +41,10 @@
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
#include "opencv2/legacy.hpp"
|
||||
|
||||
#ifdef HAVE_OPENCV_LEGACY
|
||||
# include "opencv2/legacy.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
|
||||
@ -62,7 +65,7 @@ using namespace cvtest;
|
||||
//////////////////////////////////////////////////////
|
||||
// FGDStatModel
|
||||
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && defined(HAVE_OPENCV_LEGACY)
|
||||
|
||||
namespace cv
|
||||
{
|
||||
@ -72,11 +75,10 @@ namespace cv
|
||||
}
|
||||
}
|
||||
|
||||
PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string, Channels)
|
||||
PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
std::string inputFile;
|
||||
int out_cn;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
@ -84,8 +86,6 @@ PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string, Channels)
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
|
||||
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
|
||||
|
||||
out_cn = GET_PARAM(2);
|
||||
}
|
||||
};
|
||||
|
||||
@ -102,15 +102,10 @@ GPU_TEST_P(FGDStatModel, Update)
|
||||
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
|
||||
|
||||
cv::gpu::GpuMat d_frame(frame);
|
||||
cv::gpu::FGDStatModel d_model(out_cn);
|
||||
d_model.create(d_frame);
|
||||
|
||||
cv::Mat h_background;
|
||||
cv::Mat h_foreground;
|
||||
cv::Mat h_background3;
|
||||
|
||||
cv::Mat backgroundDiff;
|
||||
cv::Mat foregroundDiff;
|
||||
cv::Ptr<cv::gpu::BackgroundSubtractorFGD> d_fgd = cv::gpu::createBackgroundSubtractorFGD();
|
||||
cv::gpu::GpuMat d_foreground, d_background;
|
||||
std::vector< std::vector<cv::Point> > foreground_regions;
|
||||
d_fgd->apply(d_frame, d_foreground);
|
||||
|
||||
for (int i = 0; i < 5; ++i)
|
||||
{
|
||||
@ -121,32 +116,23 @@ GPU_TEST_P(FGDStatModel, Update)
|
||||
int gold_count = cvUpdateBGStatModel(&ipl_frame, model);
|
||||
|
||||
d_frame.upload(frame);
|
||||
|
||||
int count = d_model.update(d_frame);
|
||||
|
||||
ASSERT_EQ(gold_count, count);
|
||||
d_fgd->apply(d_frame, d_foreground);
|
||||
d_fgd->getBackgroundImage(d_background);
|
||||
d_fgd->getForegroundRegions(foreground_regions);
|
||||
int count = (int) foreground_regions.size();
|
||||
|
||||
cv::Mat gold_background = cv::cvarrToMat(model->background);
|
||||
cv::Mat gold_foreground = cv::cvarrToMat(model->foreground);
|
||||
|
||||
if (out_cn == 3)
|
||||
d_model.background.download(h_background3);
|
||||
else
|
||||
{
|
||||
d_model.background.download(h_background);
|
||||
cv::cvtColor(h_background, h_background3, cv::COLOR_BGRA2BGR);
|
||||
}
|
||||
d_model.foreground.download(h_foreground);
|
||||
|
||||
ASSERT_MAT_NEAR(gold_background, h_background3, 1.0);
|
||||
ASSERT_MAT_NEAR(gold_foreground, h_foreground, 0.0);
|
||||
ASSERT_MAT_NEAR(gold_background, d_background, 1.0);
|
||||
ASSERT_MAT_NEAR(gold_foreground, d_foreground, 0.0);
|
||||
ASSERT_EQ(gold_count, count);
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_BgSegm, FGDStatModel, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
testing::Values(std::string("768x576.avi")),
|
||||
testing::Values(Channels(3), Channels(4))));
|
||||
testing::Values(std::string("768x576.avi"))));
|
||||
|
||||
#endif
|
||||
|
||||
@ -193,7 +179,7 @@ GPU_TEST_P(MOG, Update)
|
||||
cap >> frame;
|
||||
ASSERT_FALSE(frame.empty());
|
||||
|
||||
cv::gpu::MOG_GPU mog;
|
||||
cv::Ptr<cv::BackgroundSubtractorMOG> mog = cv::gpu::createBackgroundSubtractorMOG();
|
||||
cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
|
||||
|
||||
cv::Ptr<cv::BackgroundSubtractorMOG> mog_gold = cv::createBackgroundSubtractorMOG();
|
||||
@ -211,7 +197,7 @@ GPU_TEST_P(MOG, Update)
|
||||
cv::swap(temp, frame);
|
||||
}
|
||||
|
||||
mog(loadMat(frame, useRoi), foreground, (float)learningRate);
|
||||
mog->apply(loadMat(frame, useRoi), foreground, learningRate);
|
||||
|
||||
mog_gold->apply(frame, foreground_gold, learningRate);
|
||||
|
||||
@ -267,8 +253,8 @@ GPU_TEST_P(MOG2, Update)
|
||||
cap >> frame;
|
||||
ASSERT_FALSE(frame.empty());
|
||||
|
||||
cv::gpu::MOG2_GPU mog2;
|
||||
mog2.bShadowDetection = detectShadow;
|
||||
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2 = cv::gpu::createBackgroundSubtractorMOG2();
|
||||
mog2->setDetectShadows(detectShadow);
|
||||
cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
|
||||
|
||||
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = cv::createBackgroundSubtractorMOG2();
|
||||
@ -287,7 +273,7 @@ GPU_TEST_P(MOG2, Update)
|
||||
cv::swap(temp, frame);
|
||||
}
|
||||
|
||||
mog2(loadMat(frame, useRoi), foreground);
|
||||
mog2->apply(loadMat(frame, useRoi), foreground);
|
||||
|
||||
mog2_gold->apply(frame, foreground_gold);
|
||||
|
||||
@ -312,8 +298,8 @@ GPU_TEST_P(MOG2, getBackgroundImage)
|
||||
|
||||
cv::Mat frame;
|
||||
|
||||
cv::gpu::MOG2_GPU mog2;
|
||||
mog2.bShadowDetection = detectShadow;
|
||||
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2 = cv::gpu::createBackgroundSubtractorMOG2();
|
||||
mog2->setDetectShadows(detectShadow);
|
||||
cv::gpu::GpuMat foreground;
|
||||
|
||||
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = cv::createBackgroundSubtractorMOG2();
|
||||
@ -325,13 +311,13 @@ GPU_TEST_P(MOG2, getBackgroundImage)
|
||||
cap >> frame;
|
||||
ASSERT_FALSE(frame.empty());
|
||||
|
||||
mog2(loadMat(frame, useRoi), foreground);
|
||||
mog2->apply(loadMat(frame, useRoi), foreground);
|
||||
|
||||
mog2_gold->apply(frame, foreground_gold);
|
||||
}
|
||||
|
||||
cv::gpu::GpuMat background = createMat(frame.size(), frame.type(), useRoi);
|
||||
mog2.getBackgroundImage(background);
|
||||
mog2->getBackgroundImage(background);
|
||||
|
||||
cv::Mat background_gold;
|
||||
mog2_gold->getBackgroundImage(background_gold);
|
||||
@ -372,16 +358,15 @@ GPU_TEST_P(GMG, Accuracy)
|
||||
cv::Mat frame = randomMat(size, type, 0, 100);
|
||||
cv::gpu::GpuMat d_frame = loadMat(frame, useRoi);
|
||||
|
||||
cv::gpu::GMG_GPU gmg;
|
||||
gmg.numInitializationFrames = 5;
|
||||
gmg.smoothingRadius = 0;
|
||||
gmg.initialize(d_frame.size(), 0, 255);
|
||||
cv::Ptr<cv::BackgroundSubtractorGMG> gmg = cv::gpu::createBackgroundSubtractorGMG();
|
||||
gmg->setNumFrames(5);
|
||||
gmg->setSmoothingRadius(0);
|
||||
|
||||
cv::gpu::GpuMat d_fgmask = createMat(size, CV_8UC1, useRoi);
|
||||
|
||||
for (int i = 0; i < gmg.numInitializationFrames; ++i)
|
||||
for (int i = 0; i < gmg->getNumFrames(); ++i)
|
||||
{
|
||||
gmg(d_frame, d_fgmask);
|
||||
gmg->apply(d_frame, d_fgmask);
|
||||
|
||||
// fgmask should be entirely background during training
|
||||
ASSERT_MAT_NEAR(zeros, d_fgmask, 0);
|
||||
@ -389,7 +374,7 @@ GPU_TEST_P(GMG, Accuracy)
|
||||
|
||||
frame = randomMat(size, type, 160, 255);
|
||||
d_frame = loadMat(frame, useRoi);
|
||||
gmg(d_frame, d_fgmask);
|
||||
gmg->apply(d_frame, d_fgmask);
|
||||
|
||||
// now fgmask should be entirely foreground
|
||||
ASSERT_MAT_NEAR(fullfg, d_fgmask, 0);
|
||||
|
@ -59,4 +59,6 @@
|
||||
#include "opencv2/gpubgsegm.hpp"
|
||||
#include "opencv2/video.hpp"
|
||||
|
||||
#include "opencv2/opencv_modules.hpp"
|
||||
|
||||
#endif
|
||||
|
@ -11,6 +11,9 @@ Video reader interface.
|
||||
|
||||
.. ocv:class:: gpucodec::VideoReader
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example on how to use the videoReader class can be found at opencv_source_code/samples/gpu/video_reader.cpp
|
||||
|
||||
|
||||
gpucodec::VideoReader::nextFrame
|
||||
|
@ -15,6 +15,9 @@ The implementation uses H264 video codec.
|
||||
|
||||
.. note:: Currently only Windows platform is supported.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example on how to use the videoWriter class can be found at opencv_source_code/samples/gpu/video_writer.cpp
|
||||
|
||||
|
||||
gpucodec::VideoWriter::write
|
||||
|
@ -5,7 +5,9 @@ Image Filtering
|
||||
|
||||
Functions and classes described in this section are used to perform various linear or non-linear filtering operations on 2D images.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example containing all basic morphology operators like erode and dilate can be found at opencv_source_code/samples/gpu/morphology.cpp
|
||||
|
||||
gpu::Filter
|
||||
-----------
|
||||
|
@ -123,6 +123,9 @@ Composites two images using alpha opacity values contained in each image.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example demonstrating the use of alphaComp can be found at opencv_source_code/samples/gpu/alpha_comp.cpp
|
||||
|
||||
|
||||
.. [MHT2011] Pascal Getreuer, Malvar-He-Cutler Linear Image Demosaicking, Image Processing On Line, 2011
|
||||
|
@ -108,6 +108,9 @@ Base class for line segments detector algorithm. ::
|
||||
virtual int getMaxLines() const = 0;
|
||||
};
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the Hough segment detector can be found at opencv_source_code/samples/gpu/houghlines.cpp
|
||||
|
||||
|
||||
gpu::HoughSegmentDetector::detect
|
||||
|
@ -109,6 +109,86 @@ namespace hist
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
|
||||
namespace hist
|
||||
{
|
||||
__device__ __forceinline__ void histEvenInc(int* shist, uint data, int binSize, int lowerLevel, int upperLevel)
|
||||
{
|
||||
if (data >= lowerLevel && data <= upperLevel)
|
||||
{
|
||||
const uint ind = (data - lowerLevel) / binSize;
|
||||
Emulation::smem::atomicAdd(shist + ind, 1);
|
||||
}
|
||||
}
|
||||
|
||||
__global__ void histEven8u(const uchar* src, const size_t step, const int rows, const int cols,
|
||||
int* hist, const int binCount, const int binSize, const int lowerLevel, const int upperLevel)
|
||||
{
|
||||
extern __shared__ int shist[];
|
||||
|
||||
const int y = blockIdx.x * blockDim.y + threadIdx.y;
|
||||
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
|
||||
|
||||
if (tid < binCount)
|
||||
shist[tid] = 0;
|
||||
|
||||
__syncthreads();
|
||||
|
||||
if (y < rows)
|
||||
{
|
||||
const uchar* rowPtr = src + y * step;
|
||||
const uint* rowPtr4 = (uint*) rowPtr;
|
||||
|
||||
const int cols_4 = cols / 4;
|
||||
for (int x = threadIdx.x; x < cols_4; x += blockDim.x)
|
||||
{
|
||||
const uint data = rowPtr4[x];
|
||||
|
||||
histEvenInc(shist, (data >> 0) & 0xFFU, binSize, lowerLevel, upperLevel);
|
||||
histEvenInc(shist, (data >> 8) & 0xFFU, binSize, lowerLevel, upperLevel);
|
||||
histEvenInc(shist, (data >> 16) & 0xFFU, binSize, lowerLevel, upperLevel);
|
||||
histEvenInc(shist, (data >> 24) & 0xFFU, binSize, lowerLevel, upperLevel);
|
||||
}
|
||||
|
||||
if (cols % 4 != 0 && threadIdx.x == 0)
|
||||
{
|
||||
for (int x = cols_4 * 4; x < cols; ++x)
|
||||
{
|
||||
const uchar data = rowPtr[x];
|
||||
histEvenInc(shist, data, binSize, lowerLevel, upperLevel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
if (tid < binCount)
|
||||
{
|
||||
const int histVal = shist[tid];
|
||||
|
||||
if (histVal > 0)
|
||||
::atomicAdd(hist + tid, histVal);
|
||||
}
|
||||
}
|
||||
|
||||
void histEven8u(PtrStepSzb src, int* hist, int binCount, int lowerLevel, int upperLevel, cudaStream_t stream)
|
||||
{
|
||||
const dim3 block(32, 8);
|
||||
const dim3 grid(divUp(src.rows, block.y));
|
||||
|
||||
const int binSize = divUp(upperLevel - lowerLevel, binCount);
|
||||
|
||||
const size_t smem_size = binCount * sizeof(int);
|
||||
|
||||
histEven8u<<<grid, block, smem_size, stream>>>(src.data, src.step, src.rows, src.cols, hist, binCount, binSize, lowerLevel, upperLevel);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
|
||||
namespace hist
|
||||
{
|
||||
__constant__ int c_lut[256];
|
||||
|
@ -478,6 +478,21 @@ void cv::gpu::evenLevels(OutputArray _levels, int nLevels, int lowerLevel, int u
|
||||
_levels.getGpuMatRef().upload(host_levels);
|
||||
}
|
||||
|
||||
namespace hist
|
||||
{
|
||||
void histEven8u(PtrStepSzb src, int* hist, int binCount, int lowerLevel, int upperLevel, cudaStream_t stream);
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
void histEven8u(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, cudaStream_t stream)
|
||||
{
|
||||
hist.create(1, histSize, CV_32S);
|
||||
cudaSafeCall( cudaMemsetAsync(hist.data, 0, histSize * sizeof(int), stream) );
|
||||
hist::histEven8u(src, hist.ptr<int>(), histSize, lowerLevel, upperLevel, stream);
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::histEven(InputArray _src, OutputArray hist, InputOutputArray buf, int histSize, int lowerLevel, int upperLevel, Stream& stream)
|
||||
{
|
||||
typedef void (*hist_t)(const GpuMat& src, OutputArray hist, InputOutputArray buf, int levels, int lowerLevel, int upperLevel, cudaStream_t stream);
|
||||
@ -491,6 +506,12 @@ void cv::gpu::histEven(InputArray _src, OutputArray hist, InputOutputArray buf,
|
||||
|
||||
GpuMat src = _src.getGpuMat();
|
||||
|
||||
if (src.depth() == CV_8U && deviceSupports(FEATURE_SET_COMPUTE_30))
|
||||
{
|
||||
histEven8u(src, hist.getGpuMatRef(), histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));
|
||||
return;
|
||||
}
|
||||
|
||||
CV_Assert( src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 );
|
||||
|
||||
hist_callers[src.depth()](src, hist, buf, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));
|
||||
|
@ -49,13 +49,16 @@ using namespace cvtest;
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// HistEven
|
||||
|
||||
struct HistEven : testing::TestWithParam<cv::gpu::DeviceInfo>
|
||||
PARAM_TEST_CASE(HistEven, cv::gpu::DeviceInfo, cv::Size)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
|
||||
cv::Size size;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GetParam();
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
@ -63,57 +66,34 @@ struct HistEven : testing::TestWithParam<cv::gpu::DeviceInfo>
|
||||
|
||||
GPU_TEST_P(HistEven, Accuracy)
|
||||
{
|
||||
cv::Mat img = readImage("stereobm/aloe-L.png");
|
||||
ASSERT_FALSE(img.empty());
|
||||
|
||||
cv::Mat hsv;
|
||||
cv::cvtColor(img, hsv, cv::COLOR_BGR2HSV);
|
||||
cv::Mat src = randomMat(size, CV_8UC1);
|
||||
|
||||
int hbins = 30;
|
||||
float hranges[] = {0.0f, 180.0f};
|
||||
|
||||
std::vector<cv::Mat> srcs;
|
||||
cv::split(hsv, srcs);
|
||||
float hranges[] = {50.0f, 200.0f};
|
||||
|
||||
cv::gpu::GpuMat hist;
|
||||
cv::gpu::histEven(loadMat(srcs[0]), hist, hbins, (int)hranges[0], (int)hranges[1]);
|
||||
cv::gpu::histEven(loadMat(src), hist, hbins, (int) hranges[0], (int) hranges[1]);
|
||||
|
||||
cv::Mat hist_gold;
|
||||
|
||||
cv::MatND histnd;
|
||||
int histSize[] = {hbins};
|
||||
const float* ranges[] = {hranges};
|
||||
int channels[] = {0};
|
||||
cv::calcHist(&hsv, 1, channels, cv::Mat(), histnd, 1, histSize, ranges);
|
||||
cv::calcHist(&src, 1, channels, cv::Mat(), hist_gold, 1, histSize, ranges);
|
||||
|
||||
cv::Mat hist_gold = histnd;
|
||||
hist_gold = hist_gold.t();
|
||||
hist_gold.convertTo(hist_gold, CV_32S);
|
||||
|
||||
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HistEven, ALL_DEVICES);
|
||||
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HistEven, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES));
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// CalcHist
|
||||
|
||||
namespace
|
||||
{
|
||||
void calcHistGold(const cv::Mat& src, cv::Mat& hist)
|
||||
{
|
||||
hist.create(1, 256, CV_32SC1);
|
||||
hist.setTo(cv::Scalar::all(0));
|
||||
|
||||
int* hist_row = hist.ptr<int>();
|
||||
for (int y = 0; y < src.rows; ++y)
|
||||
{
|
||||
const uchar* src_row = src.ptr(y);
|
||||
|
||||
for (int x = 0; x < src.cols; ++x)
|
||||
++hist_row[src_row[x]];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PARAM_TEST_CASE(CalcHist, cv::gpu::DeviceInfo, cv::Size)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
@ -137,7 +117,16 @@ GPU_TEST_P(CalcHist, Accuracy)
|
||||
cv::gpu::calcHist(loadMat(src), hist);
|
||||
|
||||
cv::Mat hist_gold;
|
||||
calcHistGold(src, hist_gold);
|
||||
|
||||
const int hbins = 256;
|
||||
const float hranges[] = {0.0f, 256.0f};
|
||||
const int histSize[] = {hbins};
|
||||
const float* ranges[] = {hranges};
|
||||
const int channels[] = {0};
|
||||
|
||||
cv::calcHist(&src, 1, channels, cv::Mat(), hist_gold, 1, histSize, ranges);
|
||||
hist_gold = hist_gold.reshape(1, 1);
|
||||
hist_gold.convertTo(hist_gold, CV_32S);
|
||||
|
||||
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
|
||||
}
|
||||
|
@ -3,6 +3,10 @@ Optical Flow
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : A general optical flow example can be found at opencv_source_code/samples/gpu/optical_flow.cpp
|
||||
* : A feneral optical flow example using the nvidia API can be found at opencv_source_code/samples/gpu/opticalflow_nvidia_api.cpp
|
||||
|
||||
|
||||
gpu::BroxOpticalFlow
|
||||
@ -44,6 +48,9 @@ Class computing the optical flow for two images using Brox et al Optical Flow al
|
||||
GpuMat buf;
|
||||
};
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example illustrating the Brox et al optical flow algorithm can be found at opencv_source_code/samples/gpu/brox_optical_flow.cpp
|
||||
|
||||
|
||||
gpu::FarnebackOpticalFlow
|
||||
@ -138,6 +145,9 @@ The class can calculate an optical flow for a sparse feature set or dense optica
|
||||
|
||||
.. seealso:: :ocv:func:`calcOpticalFlowPyrLK`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example of the Lucas Kanade optical flow algorithm can be found at opencv_source_code/samples/gpu/pyrlk_optical_flow.cpp
|
||||
|
||||
|
||||
gpu::PyrLKOpticalFlow::sparse
|
||||
|
@ -3,7 +3,11 @@ Stereo Correspondence
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : A basic stereo matching example can be found at opencv_source_code/samples/gpu/stereo_match.cpp
|
||||
* : A stereo matching example using several GPU's can be found at opencv_source_code/samples/gpu/stereo_multi.cpp
|
||||
* : A stereo matching example using several GPU's and driver API can be found at opencv_source_code/samples/gpu/driver_api_stereo_multi.cpp
|
||||
|
||||
gpu::StereoBM
|
||||
-------------
|
||||
|
@ -9,13 +9,12 @@ ocv_add_module(highgui opencv_imgproc OPTIONAL opencv_androidcamera)
|
||||
|
||||
ocv_clear_vars(GRFMT_LIBS)
|
||||
|
||||
if(WITH_PNG OR WITH_TIFF OR WITH_OPENEXR)
|
||||
if(HAVE_PNG OR HAVE_TIFF OR HAVE_OPENEXR)
|
||||
ocv_include_directories(${ZLIB_INCLUDE_DIR})
|
||||
list(APPEND GRFMT_LIBS ${ZLIB_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(WITH_JPEG)
|
||||
add_definitions(-DHAVE_JPEG)
|
||||
if(HAVE_JPEG)
|
||||
ocv_include_directories(${JPEG_INCLUDE_DIR})
|
||||
list(APPEND GRFMT_LIBS ${JPEG_LIBRARIES})
|
||||
endif()
|
||||
@ -26,27 +25,23 @@ if(WITH_WEBP)
|
||||
list(APPEND GRFMT_LIBS ${WEBP_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(WITH_PNG)
|
||||
add_definitions(-DHAVE_PNG)
|
||||
if(HAVE_PNG)
|
||||
add_definitions(${PNG_DEFINITIONS})
|
||||
ocv_include_directories(${PNG_INCLUDE_DIR})
|
||||
list(APPEND GRFMT_LIBS ${PNG_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(WITH_TIFF)
|
||||
add_definitions(-DHAVE_TIFF)
|
||||
if(HAVE_TIFF)
|
||||
ocv_include_directories(${TIFF_INCLUDE_DIR})
|
||||
list(APPEND GRFMT_LIBS ${TIFF_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(WITH_JASPER)
|
||||
add_definitions(-DHAVE_JASPER)
|
||||
if(HAVE_JASPER)
|
||||
ocv_include_directories(${JASPER_INCLUDE_DIR})
|
||||
list(APPEND GRFMT_LIBS ${JASPER_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(WITH_OPENEXR)
|
||||
add_definitions(-DHAVE_OPENEXR)
|
||||
if(HAVE_OPENEXR)
|
||||
include_directories(SYSTEM ${OPENEXR_INCLUDE_PATHS})
|
||||
list(APPEND GRFMT_LIBS ${OPENEXR_LIBRARIES})
|
||||
endif()
|
||||
@ -114,16 +109,12 @@ elseif(HAVE_WIN32UI)
|
||||
list(APPEND highgui_srcs src/window_w32.cpp)
|
||||
elseif(HAVE_GTK)
|
||||
list(APPEND highgui_srcs src/window_gtk.cpp)
|
||||
elseif(APPLE)
|
||||
if(WITH_CARBON)
|
||||
add_definitions(-DHAVE_CARBON=1)
|
||||
list(APPEND highgui_srcs src/window_carbon.cpp)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime")
|
||||
elseif(NOT IOS)
|
||||
add_definitions(-DHAVE_COCOA=1)
|
||||
list(APPEND highgui_srcs src/window_cocoa.mm)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework Cocoa")
|
||||
endif()
|
||||
elseif(HAVE_CARBON)
|
||||
list(APPEND highgui_srcs src/window_carbon.cpp)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime")
|
||||
elseif(HAVE_COCOA)
|
||||
list(APPEND highgui_srcs src/window_cocoa.mm)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework Cocoa")
|
||||
endif()
|
||||
|
||||
if(WIN32 AND NOT ARM)
|
||||
@ -203,6 +194,7 @@ endif(HAVE_FFMPEG)
|
||||
|
||||
if(HAVE_PVAPI)
|
||||
add_definitions(-DHAVE_PVAPI)
|
||||
add_definitions(${PVAPI_DEFINITIONS})
|
||||
ocv_include_directories(${PVAPI_INCLUDE_PATH})
|
||||
set(highgui_srcs src/cap_pvapi.cpp ${highgui_srcs})
|
||||
list(APPEND HIGHGUI_LIBRARIES ${PVAPI_LIBRARY})
|
||||
@ -216,19 +208,17 @@ if(HAVE_GIGE_API)
|
||||
list(APPEND highgui_srcs src/cap_giganetix.cpp)
|
||||
endif(HAVE_GIGE_API)
|
||||
|
||||
if(WITH_AVFOUNDATION)
|
||||
add_definitions(-DHAVE_AVFOUNDATION=1)
|
||||
if(HAVE_AVFOUNDATION)
|
||||
list(APPEND highgui_srcs src/cap_avfoundation.mm)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework AVFoundation" "-framework QuartzCore")
|
||||
endif()
|
||||
|
||||
if(HAVE_QUICKTIME)
|
||||
list(APPEND highgui_srcs src/cap_qt.cpp)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore")
|
||||
elseif(APPLE)
|
||||
add_definitions(-DHAVE_QUICKTIME=1)
|
||||
if(WITH_QUICKTIME)
|
||||
list(APPEND highgui_srcs src/cap_qt.cpp)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore")
|
||||
else()
|
||||
list(APPEND highgui_srcs src/cap_qtkit.mm)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit")
|
||||
endif()
|
||||
list(APPEND highgui_srcs src/cap_qtkit.mm)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit")
|
||||
endif()
|
||||
|
||||
if(IOS)
|
||||
@ -303,11 +293,6 @@ if(WIN32 AND WITH_FFMPEG)
|
||||
set(ffmpeg_bare_name_ver "opencv_ffmpeg${OPENCV_DLLVERSION}${FFMPEG_SUFFIX}.dll")
|
||||
set(ffmpeg_path "${OpenCV_SOURCE_DIR}/3rdparty/ffmpeg/${ffmpeg_bare_name}")
|
||||
|
||||
#if(MSVC AND CMAKE_VERSION VERSION_GREATER "2.8.2")
|
||||
# add_custom_command(TARGET ${the_module} POST_BUILD
|
||||
# COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/$<CONFIGURATION>/${ffmpeg_bare_name_ver}"
|
||||
# COMMENT "Copying ${ffmpeg_path} to the output directory")
|
||||
#else
|
||||
if(MSVC_IDE)
|
||||
add_custom_command(TARGET ${the_module} POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Release/${ffmpeg_bare_name_ver}"
|
||||
|
@ -223,6 +223,15 @@ The class provides C++ API for capturing video from cameras or for reading video
|
||||
|
||||
.. note:: In C API the black-box structure ``CvCapture`` is used instead of ``VideoCapture``.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : A basic sample on using the VideoCapture interface can be found at opencv_source_code/samples/cpp/starter_video.cpp
|
||||
* : Another basic video processing sample can be found at opencv_source_code/samples/cpp/video_dmtx.cpp
|
||||
|
||||
* : PYTHON : A basic sample on using the VideoCapture interface can be found at opencv_source_code/samples/python2/video.py
|
||||
* : PYTHON : basic video processing sample can be found at opencv_source_code/samples/python2/video_dmtx.py
|
||||
* : PYTHON : A multi threaded video processing sample can be found at opencv_source_code/samples/python2/video_threaded.py
|
||||
|
||||
|
||||
VideoCapture::VideoCapture
|
||||
------------------------------
|
||||
|
@ -33,6 +33,10 @@ The function ``createTrackbar`` creates a trackbar (a slider or range control) w
|
||||
|
||||
Clicking the label of each trackbar enables editing the trackbar values manually.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example of using the trackbar functionality can be found at opencv_source_code/samples/cpp/connected_components.cpp
|
||||
|
||||
getTrackbarPos
|
||||
------------------
|
||||
Returns the trackbar position.
|
||||
@ -75,6 +79,7 @@ The function ``imshow`` displays an image in the specified window. If the window
|
||||
|
||||
* If the image is 32-bit floating-point, the pixel values are multiplied by 255. That is, the value range [0,1] is mapped to [0,255].
|
||||
|
||||
If window was created with OpenGL support, ``imshow`` also support :ocv:class:`ogl::Buffer` , :ocv:class:`ogl::Texture2D` and :ocv:class:`gpu::GpuMat` as input.
|
||||
|
||||
namedWindow
|
||||
---------------
|
||||
@ -88,7 +93,13 @@ Creates a window.
|
||||
|
||||
:param name: Name of the window in the window caption that may be used as a window identifier.
|
||||
|
||||
:param flags: Flags of the window. Currently the only supported flag is ``CV_WINDOW_AUTOSIZE`` . If this is set, the window size is automatically adjusted to fit the displayed image (see :ocv:func:`imshow` ), and you cannot change the window size manually.
|
||||
:param flags: Flags of the window. The supported flags are:
|
||||
|
||||
* **WINDOW_NORMAL** If this is set, the user can resize the window (no constraint).
|
||||
|
||||
* **WINDOW_AUTOSIZE** If this is set, the window size is automatically adjusted to fit the displayed image (see :ocv:func:`imshow` ), and you cannot change the window size manually.
|
||||
|
||||
* **WINDOW_OPENGL** If this is set, the window will be created with OpenGL support.
|
||||
|
||||
The function ``namedWindow`` creates a window that can be used as a placeholder for images and trackbars. Created windows are referred to by their names.
|
||||
|
||||
@ -238,3 +249,31 @@ The function ``waitKey`` waits for a key event infinitely (when
|
||||
.. note::
|
||||
|
||||
The function only works if there is at least one HighGUI window created and the window is active. If there are several HighGUI windows, any of them can be active.
|
||||
|
||||
setOpenGlDrawCallback
|
||||
---------------------
|
||||
Set OpenGL render handler for the specified window.
|
||||
|
||||
.. ocv:function:: void setOpenGlDrawCallback(const string& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0)
|
||||
|
||||
:param winname: Window name
|
||||
|
||||
:param onOpenGlDraw: Draw callback.
|
||||
|
||||
:param userdata: The optional parameter passed to the callback.
|
||||
|
||||
setOpenGlContext
|
||||
----------------
|
||||
Sets the specified window as current OpenGL context.
|
||||
|
||||
.. ocv:function:: void setOpenGlContext(const String& winname)
|
||||
|
||||
:param winname: Window name
|
||||
|
||||
updateWindow
|
||||
------------
|
||||
Force window to redraw its context and call draw callback ( :ocv:func:`setOpenGlDrawCallback` ).
|
||||
|
||||
.. ocv:function:: void updateWindow(const String& winname)
|
||||
|
||||
:param winname: Window name
|
||||
|
@ -2124,7 +2124,7 @@ long videoDevices::initDevices(IMFAttributes *pAttributes)
|
||||
return hr;
|
||||
}
|
||||
|
||||
size_t videoDevices::getCount()
|
||||
unsigned int videoDevices::getCount()
|
||||
{
|
||||
return vds_Devices.size();
|
||||
}
|
||||
|
@ -52,6 +52,8 @@ void CvCaptureCAM_XIMEA::init()
|
||||
{
|
||||
xiGetNumberDevices( &numDevices);
|
||||
hmv = NULL;
|
||||
frame = NULL;
|
||||
timeout = 0;
|
||||
memset(&image, 0, sizeof(XI_IMG));
|
||||
}
|
||||
|
||||
@ -60,6 +62,8 @@ void CvCaptureCAM_XIMEA::init()
|
||||
// Initialize camera input
|
||||
bool CvCaptureCAM_XIMEA::open( int wIndex )
|
||||
{
|
||||
#define HandleXiResult(res) if (res!=XI_OK) goto error;
|
||||
|
||||
int mvret = XI_OK;
|
||||
|
||||
if(numDevices == 0)
|
||||
@ -73,26 +77,42 @@ bool CvCaptureCAM_XIMEA::open( int wIndex )
|
||||
|
||||
// always use auto exposure/gain
|
||||
mvret = xiSetParamInt( hmv, XI_PRM_AEAG, 1);
|
||||
if(mvret != XI_OK) goto error;
|
||||
|
||||
// always use auto white ballance
|
||||
mvret = xiSetParamInt( hmv, XI_PRM_AUTO_WB, 1);
|
||||
if(mvret != XI_OK) goto error;
|
||||
|
||||
// default image format RGB24
|
||||
mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, XI_RGB24);
|
||||
if(mvret != XI_OK) goto error;
|
||||
HandleXiResult(mvret);
|
||||
|
||||
int width = 0;
|
||||
mvret = xiGetParamInt( hmv, XI_PRM_WIDTH, &width);
|
||||
if(mvret != XI_OK) goto error;
|
||||
HandleXiResult(mvret);
|
||||
|
||||
int height = 0;
|
||||
mvret = xiGetParamInt( hmv, XI_PRM_HEIGHT, &height);
|
||||
if(mvret != XI_OK) goto error;
|
||||
HandleXiResult(mvret);
|
||||
|
||||
// allocate frame buffer for RGB24 image
|
||||
frame = cvCreateImage(cvSize( width, height), IPL_DEPTH_8U, 3);
|
||||
int isColor = 0;
|
||||
mvret = xiGetParamInt(hmv, XI_PRM_IMAGE_IS_COLOR, &isColor);
|
||||
HandleXiResult(mvret);
|
||||
|
||||
if(isColor) // for color cameras
|
||||
{
|
||||
// default image format RGB24
|
||||
mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, XI_RGB24);
|
||||
HandleXiResult(mvret);
|
||||
|
||||
// always use auto white ballance for color cameras
|
||||
mvret = xiSetParamInt( hmv, XI_PRM_AUTO_WB, 1);
|
||||
HandleXiResult(mvret);
|
||||
|
||||
// allocate frame buffer for RGB24 image
|
||||
frame = cvCreateImage(cvSize( width, height), IPL_DEPTH_8U, 3);
|
||||
}
|
||||
else // for mono cameras
|
||||
{
|
||||
// default image format MONO8
|
||||
mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, XI_MONO8);
|
||||
HandleXiResult(mvret);
|
||||
|
||||
// allocate frame buffer for MONO8 image
|
||||
frame = cvCreateImage(cvSize( width, height), IPL_DEPTH_8U, 1);
|
||||
}
|
||||
|
||||
//default capture timeout 10s
|
||||
timeout = 10000;
|
||||
@ -119,8 +139,11 @@ void CvCaptureCAM_XIMEA::close()
|
||||
if(frame)
|
||||
cvReleaseImage(&frame);
|
||||
|
||||
xiStopAcquisition(hmv);
|
||||
xiCloseDevice(hmv);
|
||||
if(hmv)
|
||||
{
|
||||
xiStopAcquisition(hmv);
|
||||
xiCloseDevice(hmv);
|
||||
}
|
||||
hmv = NULL;
|
||||
}
|
||||
|
||||
@ -153,11 +176,11 @@ IplImage* CvCaptureCAM_XIMEA::retrieveFrame(int)
|
||||
{
|
||||
// update cvImage after format has changed
|
||||
resetCvImage();
|
||||
|
||||
|
||||
// copy pixel data
|
||||
switch( image.frm)
|
||||
{
|
||||
case XI_MONO8 :
|
||||
case XI_MONO8 :
|
||||
case XI_RAW8 : memcpy( frame->imageData, image.bp, image.width*image.height); break;
|
||||
case XI_MONO16 :
|
||||
case XI_RAW16 : memcpy( frame->imageData, image.bp, image.width*image.height*sizeof(WORD)); break;
|
||||
@ -187,9 +210,9 @@ void CvCaptureCAM_XIMEA::resetCvImage()
|
||||
{
|
||||
case XI_MONO8 :
|
||||
case XI_RAW8 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 1); break;
|
||||
case XI_MONO16 :
|
||||
case XI_MONO16 :
|
||||
case XI_RAW16 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_16U, 1); break;
|
||||
case XI_RGB24 :
|
||||
case XI_RGB24 :
|
||||
case XI_RGB_PLANAR : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 3); break;
|
||||
case XI_RGB32 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 4); break;
|
||||
default :
|
||||
@ -315,9 +338,9 @@ int CvCaptureCAM_XIMEA::getBpp()
|
||||
{
|
||||
case XI_MONO8 :
|
||||
case XI_RAW8 : return 1;
|
||||
case XI_MONO16 :
|
||||
case XI_MONO16 :
|
||||
case XI_RAW16 : return 2;
|
||||
case XI_RGB24 :
|
||||
case XI_RGB24 :
|
||||
case XI_RGB_PLANAR : return 3;
|
||||
case XI_RGB32 : return 4;
|
||||
default :
|
||||
|
@ -53,12 +53,6 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#undef PACKAGE
|
||||
#undef PACKAGE_BUGREPORT
|
||||
#undef PACKAGE_NAME
|
||||
#undef PACKAGE_STRING
|
||||
#undef PACKAGE_TARNAME
|
||||
#undef PACKAGE_VERSION
|
||||
#undef VERSION
|
||||
|
||||
#include <jasper/jasper.h>
|
||||
|
@ -51,7 +51,6 @@
|
||||
and png2bmp sample from libpng distribution (Copyright (C) 1999-2001 MIYASAKA Masaru)
|
||||
\****************************************************************************************/
|
||||
|
||||
#undef HAVE_UNISTD_H //to avoid redefinition
|
||||
#ifndef _LFS64_LARGEFILE
|
||||
# define _LFS64_LARGEFILE 0
|
||||
#endif
|
||||
|
@ -30,7 +30,11 @@ Finds edges in an image using the [Canny86]_ algorithm.
|
||||
The function finds edges in the input image ``image`` and marks them in the output map ``edges`` using the Canny algorithm. The smallest value between ``threshold1`` and ``threshold2`` is used for edge linking. The largest value is used to find initial segments of strong edges. See
|
||||
http://en.wikipedia.org/wiki/Canny_edge_detector
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example on using the canny edge detector can be found at opencv_source_code/samples/cpp/edge.cpp
|
||||
|
||||
* : PYTHON : An example on using the canny edge detector can be found at opencv_source_code/samples/cpp/edge.py
|
||||
|
||||
cornerEigenValsAndVecs
|
||||
----------------------
|
||||
@ -81,7 +85,9 @@ The output of the function can be used for robust edge or corner detection.
|
||||
:ocv:func:`cornerHarris`,
|
||||
:ocv:func:`preCornerDetect`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : PYTHON : An example on how to use eigenvectors and eigenvalues to estimate image texture flow direction can be found at opencv_source_code/samples/python2/texture_flow.py
|
||||
|
||||
cornerHarris
|
||||
------------
|
||||
@ -344,6 +350,9 @@ Example: ::
|
||||
:ocv:func:`fitEllipse`,
|
||||
:ocv:func:`minEnclosingCircle`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the Hough circle detector can be found at opencv_source_code/samples/cpp/houghcircles.cpp
|
||||
|
||||
HoughLines
|
||||
----------
|
||||
@ -398,6 +407,10 @@ Finds lines in a binary image using the standard Hough transform.
|
||||
The function implements the standard or standard multi-scale Hough transform algorithm for line detection. See http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm for a good explanation of Hough transform.
|
||||
See also the example in :ocv:func:`HoughLinesP` description.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the Hough line detector can be found at opencv_source_code/samples/cpp/houghlines.cpp
|
||||
|
||||
HoughLinesP
|
||||
-----------
|
||||
Finds line segments in a binary image using the probabilistic Hough transform.
|
||||
|
@ -22,6 +22,10 @@ OpenCV enables you to specify the extrapolation method. For details, see the fun
|
||||
* BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
|
||||
*/
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : PYTHON : A complete example illustrating different morphological operations like erode/dilate, open/close, blackhat/tophat ... can be found at opencv_source_code/samples/python2/morphology.py
|
||||
|
||||
BaseColumnFilter
|
||||
----------------
|
||||
.. ocv:class:: BaseColumnFilter
|
||||
@ -779,6 +783,9 @@ The function supports the in-place mode. Dilation can be applied several ( ``ite
|
||||
:ocv:func:`morphologyEx`,
|
||||
:ocv:func:`createMorphologyFilter`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the morphological dilate operation can be found at opencv_source_code/samples/cpp/morphology2.cpp
|
||||
|
||||
erode
|
||||
-----
|
||||
@ -818,7 +825,9 @@ The function supports the in-place mode. Erosion can be applied several ( ``iter
|
||||
:ocv:func:`morphologyEx`,
|
||||
:ocv:func:`createMorphologyFilter`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the morphological erode operation can be found at opencv_source_code/samples/cpp/morphology2.cpp
|
||||
|
||||
filter2D
|
||||
--------
|
||||
@ -1150,6 +1159,9 @@ Any of the operations can be done in-place. In case of multi-channel images, eac
|
||||
:ocv:func:`erode`,
|
||||
:ocv:func:`createMorphologyFilter`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the morphologyEx function for the morphological opening and closing operations can be found at opencv_source_code/samples/cpp/morphology2.cpp
|
||||
|
||||
Laplacian
|
||||
---------
|
||||
@ -1193,7 +1205,9 @@ This is done when ``ksize > 1`` . When ``ksize == 1`` , the Laplacian is compute
|
||||
:ocv:func:`Sobel`,
|
||||
:ocv:func:`Scharr`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the Laplace transformation for edge detection can be found at opencv_source_code/samples/cpp/laplace.cpp
|
||||
|
||||
pyrDown
|
||||
-------
|
||||
@ -1250,6 +1264,10 @@ Upsamples an image and then blurs it.
|
||||
The function performs the upsampling step of the Gaussian pyramid construction, though it can actually be used to construct the Laplacian pyramid. First, it upsamples the source image by injecting even zero rows and columns and then convolves the result with the same kernel as in
|
||||
:ocv:func:`pyrDown` multiplied by 4.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : PYTHON : An example of Laplacian Pyramid construction and merging can be found at opencv_source_code/samples/python2/lappyr.py
|
||||
|
||||
|
||||
pyrMeanShiftFiltering
|
||||
---------------------
|
||||
@ -1297,6 +1315,9 @@ After the iterations over, the color components of the initial pixel (that is, t
|
||||
|
||||
When ``maxLevel > 0``, the gaussian pyramid of ``maxLevel+1`` levels is built, and the above procedure is run on the smallest layer first. After that, the results are propagated to the larger layer and the iterations are run again only on those pixels where the layer colors differ by more than ``sr`` from the lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the results will be actually different from the ones obtained by running the meanshift procedure on the whole original image (i.e. when ``maxLevel==0``).
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using mean-shift image segmentation can be found at opencv_source_code/samples/cpp/meanshift_segmentation.cpp
|
||||
|
||||
sepFilter2D
|
||||
-----------
|
||||
|
@ -298,6 +298,9 @@ where
|
||||
|
||||
The function emulates the human "foveal" vision and can be used for fast scale and rotation-invariant template matching, for object tracking and so forth. The function can not operate in-place.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the geometric logpolar operation in 4 applications can be found at opencv_source_code/samples/cpp/logpolar_bsm.cpp
|
||||
|
||||
remap
|
||||
-----
|
||||
|
@ -98,7 +98,12 @@ input arrays at the same location. The sample below shows how to compute a 2D Hu
|
||||
waitKey();
|
||||
}
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example for creating histograms of an image can be found at opencv_source_code/samples/cpp/demhist.cpp
|
||||
|
||||
* : PYTHON : An example for creating color histograms can be found at opencv_source/samples/python2/color_histogram.py
|
||||
* : PYTHON : An example illustrating RGB and grayscale histogram plotting can be found at opencv_source/samples/python2/hist.py
|
||||
|
||||
|
||||
calcBackProject
|
||||
|
@ -476,6 +476,12 @@ In this mode, the complexity is still linear.
|
||||
That is, the function provides a very fast way to compute the Voronoi diagram for a binary image.
|
||||
Currently, the second variant can use only the approximate distance transform algorithm, i.e. ``maskSize=CV_DIST_MASK_PRECISE`` is not supported yet.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example on using the distance transform can be found at opencv_source_code/samples/cpp/distrans.cpp
|
||||
|
||||
* : PYTHON : An example on using the distance transform can be found at opencv_source/samples/python2/distrans.py
|
||||
|
||||
floodFill
|
||||
---------
|
||||
Fills a connected component with the given color.
|
||||
@ -574,11 +580,15 @@ where
|
||||
*
|
||||
Color/brightness of the seed point in case of a fixed range.
|
||||
|
||||
Use these functions to either mark a connected component with the specified color in-place, or build a mask and then extract the contour, or copy the region to another image, and so on. Various modes of the function are demonstrated in the ``floodfill.cpp`` sample.
|
||||
Use these functions to either mark a connected component with the specified color in-place, or build a mask and then extract the contour, or copy the region to another image, and so on.
|
||||
|
||||
.. seealso:: :ocv:func:`findContours`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the FloodFill technique can be found at opencv_source_code/samples/cpp/ffilldemo.cpp
|
||||
|
||||
* : PYTHON : An example using the FloodFill technique can be found at opencv_source_code/samples/python2/floodfill.cpp
|
||||
|
||||
integral
|
||||
--------
|
||||
@ -738,6 +748,12 @@ Visual demonstration and usage example of the function can be found in the OpenC
|
||||
|
||||
.. seealso:: :ocv:func:`findContours`
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the watershed algorithm can be found at opencv_source_code/samples/cpp/watershed.cpp
|
||||
|
||||
* : PYTHON : An example using the watershed algorithm can be found at opencv_source_code/samples/python2/watershed.py
|
||||
|
||||
grabCut
|
||||
-------
|
||||
Runs the GrabCut algorithm.
|
||||
@ -784,3 +800,9 @@ See the sample ``grabcut.cpp`` to learn how to use the function.
|
||||
.. [Meyer92] Meyer, F. *Color Image Segmentation*, ICIP92, 1992
|
||||
|
||||
.. [Telea04] Alexandru Telea, *An Image Inpainting Technique Based on the Fast Marching Method*. Journal of Graphics, GPU, and Game Tools 9 1, pp 23-34 (2004)
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the GrabCut algorithm can be found at opencv_source_code/samples/cpp/grabcut.cpp
|
||||
|
||||
* : PYTHON : An example using the GrabCut algorithm can be found at opencv_source_code/samples/python2/grabcut.py
|
||||
|
@ -73,3 +73,6 @@ image patch:
|
||||
After the function finishes the comparison, the best matches can be found as global minimums (when ``CV_TM_SQDIFF`` was used) or maximums (when ``CV_TM_CCORR`` or ``CV_TM_CCOEFF`` was used) using the
|
||||
:ocv:func:`minMaxLoc` function. In case of a color image, template summation in the numerator and each sum in the denominator is done over all of the channels and separate mean values are used for each channel. That is, the function can take a color template and a color image. The result will still be a single-channel image, which is easier to analyze.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : PYTHON : An example on how to match mouse selected regions in an image can be found at opencv_source_code/samples/python2/mouse_and_match.py
|
||||
|
@ -192,6 +192,14 @@ The function retrieves contours from the binary image using the algorithm
|
||||
|
||||
.. note:: If you use the new Python interface then the ``CV_`` prefix has to be omitted in contour retrieval mode and contour approximation method parameters (for example, use ``cv2.RETR_LIST`` and ``cv2.CHAIN_APPROX_NONE`` parameters). If you use the old Python interface then these parameters have the ``CV_`` prefix (for example, use ``cv.CV_RETR_LIST`` and ``cv.CV_CHAIN_APPROX_NONE``).
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the findContour functionality can be found at opencv_source_code/samples/cpp/contours2.cpp
|
||||
* : An example using findContours to clean up a background segmentation result at opencv_source_code/samples/cpp/segment_objects.cpp
|
||||
|
||||
* : PYTHON : An example using the findContour functionality can be found at opencv_source/samples/python2/contours.py
|
||||
* : PYTHON : An example of detecting squares in an image can be found at opencv_source/samples/python2/squares.py
|
||||
|
||||
|
||||
approxPolyDP
|
||||
----------------
|
||||
@ -353,6 +361,10 @@ The functions find the convex hull of a 2D point set using the Sklansky's algori
|
||||
that has
|
||||
*O(N logN)* complexity in the current implementation. See the OpenCV sample ``convexhull.cpp`` that demonstrates the usage of different function variants.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the convexHull functionality can be found at opencv_source_code/samples/cpp/convexhull.cpp
|
||||
|
||||
|
||||
convexityDefects
|
||||
----------------
|
||||
@ -406,6 +418,11 @@ Fits an ellipse around a set of 2D points.
|
||||
|
||||
The function calculates the ellipse that fits (in a least-squares sense) a set of 2D points best of all. It returns the rotated rectangle in which the ellipse is inscribed. The algorithm [Fitzgibbon95]_ is used.
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* : An example using the fitEllipse technique can be found at opencv_source_code/samples/cpp/fitellipse.cpp
|
||||
|
||||
|
||||
fitLine
|
||||
-----------
|
||||
Fits a line to a 2D or 3D point set.
|
||||
@ -476,6 +493,9 @@ http://en.wikipedia.org/wiki/M-estimator
|
||||
:math:`w_i` are adjusted to be inversely proportional to
|
||||
:math:`\rho(r_i)` .
|
||||
|
||||
.. Sample code:
|
||||
|
||||
* : PYTHON : An example of robust line fitting can be found at opencv_source_code/samples/python2/fitline.py
|
||||
|
||||
|
||||
isContourConvex
|
||||
|
@ -258,7 +258,7 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u,
|
||||
declare.time(100);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
int runs = sz.width <= 320 ? 70 : 5;
|
||||
int runs = sz.width <= 320 ? 100 : 5;
|
||||
TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn);
|
||||
|
||||
SANITY_CHECK(dst, 1);
|
||||
|
@ -28,14 +28,14 @@ PERF_TEST_P(Size_Source, calcHist1d,
|
||||
int dims = 1;
|
||||
int numberOfImages = 1;
|
||||
|
||||
const float r[] = {rangeLow, rangeHight};
|
||||
const float* ranges[] = {r};
|
||||
const float range[] = {rangeLow, rangeHight};
|
||||
const float* ranges[] = {range};
|
||||
|
||||
randu(source, rangeLow, rangeHight);
|
||||
|
||||
declare.in(source);
|
||||
|
||||
TEST_CYCLE()
|
||||
TEST_CYCLE_MULTIRUN(3)
|
||||
{
|
||||
calcHist(&source, numberOfImages, channels, Mat(), hist, dims, histSize, ranges);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user