fixed docs
This commit is contained in:
@@ -5,11 +5,11 @@ Camera Calibration and 3D Reconstruction
|
||||
|
||||
|
||||
|
||||
gpu::solvePnPRansac
|
||||
-------------------
|
||||
cuda::solvePnPRansac
|
||||
--------------------
|
||||
Finds the object pose from 3D-2D point correspondences.
|
||||
|
||||
.. ocv:function:: void gpu::solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat, const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false, int num_iters=100, float max_dist=8.0, int min_inlier_count=100, vector<int>* inliers=NULL)
|
||||
.. ocv:function:: void cuda::solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat, const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false, int num_iters=100, float max_dist=8.0, int min_inlier_count=100, vector<int>* inliers=NULL)
|
||||
|
||||
:param object: Single-row matrix of object points.
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
************************************
|
||||
gpu. GPU-accelerated Computer Vision
|
||||
************************************
|
||||
**************************************
|
||||
cuda. CUDA-accelerated Computer Vision
|
||||
**************************************
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
@@ -5,9 +5,9 @@ Data Structures
|
||||
|
||||
|
||||
|
||||
gpu::PtrStepSz
|
||||
--------------
|
||||
.. ocv:class:: gpu::PtrStepSz
|
||||
cuda::PtrStepSz
|
||||
---------------
|
||||
.. ocv:class:: cuda::PtrStepSz
|
||||
|
||||
Lightweight class encapsulating pitched memory on a GPU and passed to nvcc-compiled code (CUDA kernels). Typically, it is used internally by OpenCV and by users who write device code. You can call its members from both host and device code. ::
|
||||
|
||||
@@ -30,11 +30,11 @@ Lightweight class encapsulating pitched memory on a GPU and passed to nvcc-compi
|
||||
|
||||
|
||||
|
||||
gpu::PtrStep
|
||||
------------
|
||||
.. ocv:class:: gpu::PtrStep
|
||||
cuda::PtrStep
|
||||
-------------
|
||||
.. ocv:class:: cuda::PtrStep
|
||||
|
||||
Structure similar to :ocv:class:`gpu::PtrStepSz` but containing only a pointer and row step. Width and height fields are excluded due to performance reasons. The structure is intended for internal use or for users who write device code. ::
|
||||
Structure similar to :ocv:class:`cuda::PtrStepSz` but containing only a pointer and row step. Width and height fields are excluded due to performance reasons. The structure is intended for internal use or for users who write device code. ::
|
||||
|
||||
template <typename T> struct PtrStep : public DevPtr<T>
|
||||
{
|
||||
@@ -57,9 +57,9 @@ Structure similar to :ocv:class:`gpu::PtrStepSz` but containing only a pointer a
|
||||
|
||||
|
||||
|
||||
gpu::GpuMat
|
||||
-----------
|
||||
.. ocv:class:: gpu::GpuMat
|
||||
cuda::GpuMat
|
||||
------------
|
||||
.. ocv:class:: cuda::GpuMat
|
||||
|
||||
Base storage class for GPU memory with reference counting. Its interface matches the :ocv:class:`Mat` interface with the following limitations:
|
||||
|
||||
@@ -67,7 +67,7 @@ Base storage class for GPU memory with reference counting. Its interface matches
|
||||
* no functions that return references to their data (because references on GPU are not valid for CPU)
|
||||
* no expression templates technique support
|
||||
|
||||
Beware that the latter limitation may lead to overloaded matrix operators that cause memory allocations. The ``GpuMat`` class is convertible to :ocv:class:`gpu::PtrStepSz` and :ocv:class:`gpu::PtrStep` so it can be passed directly to the kernel.
|
||||
Beware that the latter limitation may lead to overloaded matrix operators that cause memory allocations. The ``GpuMat`` class is convertible to :ocv:class:`cuda::PtrStepSz` and :ocv:class:`cuda::PtrStep` so it can be passed directly to the kernel.
|
||||
|
||||
.. note:: In contrast with :ocv:class:`Mat`, in most cases ``GpuMat::isContinuous() == false`` . This means that rows are aligned to a size depending on the hardware. Single-row ``GpuMat`` is always a continuous matrix.
|
||||
|
||||
@@ -76,34 +76,34 @@ Beware that the latter limitation may lead to overloaded matrix operators that c
|
||||
class CV_EXPORTS GpuMat
|
||||
{
|
||||
public:
|
||||
//! default constructor
|
||||
GpuMat();
|
||||
//! default constructor
|
||||
GpuMat();
|
||||
|
||||
//! constructs GpuMat of the specified size and type
|
||||
GpuMat(int rows, int cols, int type);
|
||||
GpuMat(Size size, int type);
|
||||
//! constructs GpuMat of the specified size and type
|
||||
GpuMat(int rows, int cols, int type);
|
||||
GpuMat(Size size, int type);
|
||||
|
||||
.....
|
||||
.....
|
||||
|
||||
//! builds GpuMat from host memory (Blocking call)
|
||||
explicit GpuMat(InputArray arr);
|
||||
//! builds GpuMat from host memory (Blocking call)
|
||||
explicit GpuMat(InputArray arr);
|
||||
|
||||
//! returns lightweight PtrStepSz structure for passing
|
||||
//to nvcc-compiled code. Contains size, data ptr and step.
|
||||
template <class T> operator PtrStepSz<T>() const;
|
||||
template <class T> operator PtrStep<T>() const;
|
||||
//! returns lightweight PtrStepSz structure for passing
|
||||
//to nvcc-compiled code. Contains size, data ptr and step.
|
||||
template <class T> operator PtrStepSz<T>() const;
|
||||
template <class T> operator PtrStep<T>() const;
|
||||
|
||||
//! pefroms upload data to GpuMat (Blocking call)
|
||||
void upload(InputArray arr);
|
||||
//! pefroms upload data to GpuMat (Blocking call)
|
||||
void upload(InputArray arr);
|
||||
|
||||
//! pefroms upload data to GpuMat (Non-Blocking call)
|
||||
void upload(InputArray arr, Stream& stream);
|
||||
//! pefroms upload data to GpuMat (Non-Blocking call)
|
||||
void upload(InputArray arr, Stream& stream);
|
||||
|
||||
//! pefroms download data from device to host memory (Blocking call)
|
||||
void download(OutputArray dst) const;
|
||||
//! pefroms download data from device to host memory (Blocking call)
|
||||
void download(OutputArray dst) const;
|
||||
|
||||
//! pefroms download data from device to host memory (Non-Blocking call)
|
||||
void download(OutputArray dst, Stream& stream) const;
|
||||
//! pefroms download data from device to host memory (Non-Blocking call)
|
||||
void download(OutputArray dst, Stream& stream) const;
|
||||
};
|
||||
|
||||
|
||||
@@ -113,11 +113,11 @@ Beware that the latter limitation may lead to overloaded matrix operators that c
|
||||
|
||||
|
||||
|
||||
gpu::createContinuous
|
||||
---------------------
|
||||
cuda::createContinuous
|
||||
----------------------
|
||||
Creates a continuous matrix.
|
||||
|
||||
.. ocv:function:: void gpu::createContinuous(int rows, int cols, int type, OutputArray arr)
|
||||
.. ocv:function:: void cuda::createContinuous(int rows, int cols, int type, OutputArray arr)
|
||||
|
||||
:param rows: Row count.
|
||||
|
||||
@@ -131,11 +131,11 @@ Matrix is called continuous if its elements are stored continuously, that is, wi
|
||||
|
||||
|
||||
|
||||
gpu::ensureSizeIsEnough
|
||||
-----------------------
|
||||
cuda::ensureSizeIsEnough
|
||||
------------------------
|
||||
Ensures that the size of a matrix is big enough and the matrix has a proper type.
|
||||
|
||||
.. ocv:function:: void gpu::ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
|
||||
.. ocv:function:: void cuda::ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
|
||||
|
||||
:param rows: Minimum desired number of rows.
|
||||
|
||||
@@ -149,9 +149,9 @@ The function does not reallocate memory if the matrix has proper attributes alre
|
||||
|
||||
|
||||
|
||||
gpu::CudaMem
|
||||
------------
|
||||
.. ocv:class:: gpu::CudaMem
|
||||
cuda::CudaMem
|
||||
-------------
|
||||
.. ocv:class:: cuda::CudaMem
|
||||
|
||||
Class with reference counting wrapping special memory type allocation functions from CUDA. Its interface is also :ocv:func:`Mat`-like but with additional memory type parameters.
|
||||
|
||||
@@ -191,47 +191,47 @@ Class with reference counting wrapping special memory type allocation functions
|
||||
|
||||
|
||||
|
||||
gpu::CudaMem::createMatHeader
|
||||
-----------------------------
|
||||
Creates a header without reference counting to :ocv:class:`gpu::CudaMem` data.
|
||||
cuda::CudaMem::createMatHeader
|
||||
------------------------------
|
||||
Creates a header without reference counting to :ocv:class:`cuda::CudaMem` data.
|
||||
|
||||
.. ocv:function:: Mat gpu::CudaMem::createMatHeader() const
|
||||
.. ocv:function:: Mat cuda::CudaMem::createMatHeader() const
|
||||
|
||||
|
||||
|
||||
gpu::CudaMem::createGpuMatHeader
|
||||
--------------------------------
|
||||
Maps CPU memory to GPU address space and creates the :ocv:class:`gpu::GpuMat` header without reference counting for it.
|
||||
cuda::CudaMem::createGpuMatHeader
|
||||
---------------------------------
|
||||
Maps CPU memory to GPU address space and creates the :ocv:class:`cuda::GpuMat` header without reference counting for it.
|
||||
|
||||
.. ocv:function:: GpuMat gpu::CudaMem::createGpuMatHeader() const
|
||||
.. ocv:function:: GpuMat cuda::CudaMem::createGpuMatHeader() const
|
||||
|
||||
This can be done only if memory was allocated with the ``SHARED`` flag and if it is supported by the hardware. Laptops often share video and CPU memory, so address spaces can be mapped, which eliminates an extra copy.
|
||||
|
||||
|
||||
|
||||
gpu::registerPageLocked
|
||||
-----------------------
|
||||
cuda::registerPageLocked
|
||||
------------------------
|
||||
Page-locks the memory of matrix and maps it for the device(s).
|
||||
|
||||
.. ocv:function:: void gpu::registerPageLocked(Mat& m)
|
||||
.. ocv:function:: void cuda::registerPageLocked(Mat& m)
|
||||
|
||||
:param m: Input matrix.
|
||||
|
||||
|
||||
|
||||
gpu::unregisterPageLocked
|
||||
-------------------------
|
||||
cuda::unregisterPageLocked
|
||||
--------------------------
|
||||
Unmaps the memory of matrix and makes it pageable again.
|
||||
|
||||
.. ocv:function:: void gpu::unregisterPageLocked(Mat& m)
|
||||
.. ocv:function:: void cuda::unregisterPageLocked(Mat& m)
|
||||
|
||||
:param m: Input matrix.
|
||||
|
||||
|
||||
|
||||
gpu::Stream
|
||||
-----------
|
||||
.. ocv:class:: gpu::Stream
|
||||
cuda::Stream
|
||||
------------
|
||||
.. ocv:class:: cuda::Stream
|
||||
|
||||
This class encapsulates a queue of asynchronous calls.
|
||||
|
||||
@@ -265,45 +265,45 @@ This class encapsulates a queue of asynchronous calls.
|
||||
|
||||
|
||||
|
||||
gpu::Stream::queryIfComplete
|
||||
----------------------------
|
||||
cuda::Stream::queryIfComplete
|
||||
-----------------------------
|
||||
Returns ``true`` if the current stream queue is finished. Otherwise, it returns false.
|
||||
|
||||
.. ocv:function:: bool gpu::Stream::queryIfComplete()
|
||||
.. ocv:function:: bool cuda::Stream::queryIfComplete()
|
||||
|
||||
|
||||
|
||||
gpu::Stream::waitForCompletion
|
||||
------------------------------
|
||||
cuda::Stream::waitForCompletion
|
||||
-------------------------------
|
||||
Blocks the current CPU thread until all operations in the stream are complete.
|
||||
|
||||
.. ocv:function:: void gpu::Stream::waitForCompletion()
|
||||
.. ocv:function:: void cuda::Stream::waitForCompletion()
|
||||
|
||||
|
||||
|
||||
gpu::Stream::waitEvent
|
||||
----------------------
|
||||
cuda::Stream::waitEvent
|
||||
-----------------------
|
||||
Makes a compute stream wait on an event.
|
||||
|
||||
.. ocv:function:: void gpu::Stream::waitEvent(const Event& event)
|
||||
.. ocv:function:: void cuda::Stream::waitEvent(const Event& event)
|
||||
|
||||
|
||||
|
||||
gpu::Stream::enqueueHostCallback
|
||||
--------------------------------
|
||||
cuda::Stream::enqueueHostCallback
|
||||
---------------------------------
|
||||
Adds a callback to be called on the host after all currently enqueued items in the stream have completed.
|
||||
|
||||
.. ocv:function:: void gpu::Stream::enqueueHostCallback(StreamCallback callback, void* userData)
|
||||
.. ocv:function:: void cuda::Stream::enqueueHostCallback(StreamCallback callback, void* userData)
|
||||
|
||||
.. note:: Callbacks must not make any CUDA API calls. Callbacks must not perform any synchronization that may depend on outstanding device work or other callbacks that are not mandated to run earlier. Callbacks without a mandated order (in independent streams) execute in undefined order and may be serialized.
|
||||
|
||||
|
||||
|
||||
gpu::StreamAccessor
|
||||
-------------------
|
||||
.. ocv:struct:: gpu::StreamAccessor
|
||||
cuda::StreamAccessor
|
||||
--------------------
|
||||
.. ocv:struct:: cuda::StreamAccessor
|
||||
|
||||
Class that enables getting ``cudaStream_t`` from :ocv:class:`gpu::Stream` and is declared in ``stream_accessor.hpp`` because it is the only public header that depends on the CUDA Runtime API. Including it brings a dependency to your code. ::
|
||||
Class that enables getting ``cudaStream_t`` from :ocv:class:`cuda::Stream` and is declared in ``stream_accessor.hpp`` because it is the only public header that depends on the CUDA Runtime API. Including it brings a dependency to your code. ::
|
||||
|
||||
struct StreamAccessor
|
||||
{
|
||||
|
@@ -5,51 +5,51 @@ Initalization and Information
|
||||
|
||||
|
||||
|
||||
gpu::getCudaEnabledDeviceCount
|
||||
------------------------------
|
||||
cuda::getCudaEnabledDeviceCount
|
||||
-------------------------------
|
||||
Returns the number of installed CUDA-enabled devices.
|
||||
|
||||
.. ocv:function:: int gpu::getCudaEnabledDeviceCount()
|
||||
.. ocv:function:: int cuda::getCudaEnabledDeviceCount()
|
||||
|
||||
Use this function before any other GPU functions calls. If OpenCV is compiled without GPU support, this function returns 0.
|
||||
Use this function before any other CUDA functions calls. If OpenCV is compiled without CUDA support, this function returns 0.
|
||||
|
||||
|
||||
|
||||
gpu::setDevice
|
||||
--------------
|
||||
cuda::setDevice
|
||||
---------------
|
||||
Sets a device and initializes it for the current thread.
|
||||
|
||||
.. ocv:function:: void gpu::setDevice(int device)
|
||||
.. ocv:function:: void cuda::setDevice(int device)
|
||||
|
||||
:param device: System index of a GPU device starting with 0.
|
||||
:param device: System index of a CUDA device starting with 0.
|
||||
|
||||
If the call of this function is omitted, a default device is initialized at the fist GPU usage.
|
||||
If the call of this function is omitted, a default device is initialized at the fist CUDA usage.
|
||||
|
||||
|
||||
|
||||
gpu::getDevice
|
||||
--------------
|
||||
Returns the current device index set by :ocv:func:`gpu::setDevice` or initialized by default.
|
||||
cuda::getDevice
|
||||
---------------
|
||||
Returns the current device index set by :ocv:func:`cuda::setDevice` or initialized by default.
|
||||
|
||||
.. ocv:function:: int gpu::getDevice()
|
||||
.. ocv:function:: int cuda::getDevice()
|
||||
|
||||
|
||||
|
||||
gpu::resetDevice
|
||||
----------------
|
||||
cuda::resetDevice
|
||||
-----------------
|
||||
Explicitly destroys and cleans up all resources associated with the current device in the current process.
|
||||
|
||||
.. ocv:function:: void gpu::resetDevice()
|
||||
.. ocv:function:: void cuda::resetDevice()
|
||||
|
||||
Any subsequent API call to this device will reinitialize the device.
|
||||
|
||||
|
||||
|
||||
gpu::FeatureSet
|
||||
---------------
|
||||
Enumeration providing GPU computing features.
|
||||
cuda::FeatureSet
|
||||
----------------
|
||||
Enumeration providing CUDA computing features.
|
||||
|
||||
.. ocv:enum:: gpu::FeatureSet
|
||||
.. ocv:enum:: cuda::FeatureSet
|
||||
|
||||
.. ocv:emember:: FEATURE_SET_COMPUTE_10
|
||||
.. ocv:emember:: FEATURE_SET_COMPUTE_11
|
||||
@@ -62,33 +62,34 @@ Enumeration providing GPU computing features.
|
||||
.. ocv:emember:: NATIVE_DOUBLE
|
||||
|
||||
|
||||
gpu::TargetArchs
|
||||
----------------
|
||||
.. ocv:class:: gpu::TargetArchs
|
||||
|
||||
Class providing a set of static methods to check what NVIDIA* card architecture the GPU module was built for.
|
||||
cuda::TargetArchs
|
||||
-----------------
|
||||
.. ocv:class:: cuda::TargetArchs
|
||||
|
||||
Class providing a set of static methods to check what NVIDIA* card architecture the CUDA module was built for.
|
||||
|
||||
The following method checks whether the module was built with the support of the given feature:
|
||||
|
||||
.. ocv:function:: static bool gpu::TargetArchs::builtWith( FeatureSet feature_set )
|
||||
.. ocv:function:: static bool cuda::TargetArchs::builtWith( FeatureSet feature_set )
|
||||
|
||||
:param feature_set: Features to be checked. See :ocv:enum:`gpu::FeatureSet`.
|
||||
:param feature_set: Features to be checked. See :ocv:enum:`cuda::FeatureSet`.
|
||||
|
||||
There is a set of methods to check whether the module contains intermediate (PTX) or binary GPU code for the given architecture(s):
|
||||
There is a set of methods to check whether the module contains intermediate (PTX) or binary CUDA code for the given architecture(s):
|
||||
|
||||
.. ocv:function:: static bool gpu::TargetArchs::has(int major, int minor)
|
||||
.. ocv:function:: static bool cuda::TargetArchs::has(int major, int minor)
|
||||
|
||||
.. ocv:function:: static bool gpu::TargetArchs::hasPtx(int major, int minor)
|
||||
.. ocv:function:: static bool cuda::TargetArchs::hasPtx(int major, int minor)
|
||||
|
||||
.. ocv:function:: static bool gpu::TargetArchs::hasBin(int major, int minor)
|
||||
.. ocv:function:: static bool cuda::TargetArchs::hasBin(int major, int minor)
|
||||
|
||||
.. ocv:function:: static bool gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor)
|
||||
.. ocv:function:: static bool cuda::TargetArchs::hasEqualOrLessPtx(int major, int minor)
|
||||
|
||||
.. ocv:function:: static bool gpu::TargetArchs::hasEqualOrGreater(int major, int minor)
|
||||
.. ocv:function:: static bool cuda::TargetArchs::hasEqualOrGreater(int major, int minor)
|
||||
|
||||
.. ocv:function:: static bool gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
|
||||
.. ocv:function:: static bool cuda::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
|
||||
|
||||
.. ocv:function:: static bool gpu::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
|
||||
.. ocv:function:: static bool cuda::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
|
||||
|
||||
:param major: Major compute capability version.
|
||||
|
||||
@@ -98,9 +99,9 @@ According to the CUDA C Programming Guide Version 3.2: "PTX code produced for so
|
||||
|
||||
|
||||
|
||||
gpu::DeviceInfo
|
||||
---------------
|
||||
.. ocv:class:: gpu::DeviceInfo
|
||||
cuda::DeviceInfo
|
||||
----------------
|
||||
.. ocv:class:: cuda::DeviceInfo
|
||||
|
||||
Class providing functionality for querying the specified GPU properties. ::
|
||||
|
||||
@@ -285,90 +286,90 @@ Class providing functionality for querying the specified GPU properties. ::
|
||||
//! checks whether device supports the given feature
|
||||
bool supports(FeatureSet feature_set) const;
|
||||
|
||||
//! checks whether the GPU module can be run on the given device
|
||||
//! checks whether the CUDA module can be run on the given device
|
||||
bool isCompatible() const;
|
||||
};
|
||||
|
||||
|
||||
|
||||
gpu::DeviceInfo::DeviceInfo
|
||||
---------------------------
|
||||
cuda::DeviceInfo::DeviceInfo
|
||||
----------------------------
|
||||
The constructors.
|
||||
|
||||
.. ocv:function:: gpu::DeviceInfo::DeviceInfo()
|
||||
.. ocv:function:: cuda::DeviceInfo::DeviceInfo()
|
||||
|
||||
.. ocv:function:: gpu::DeviceInfo::DeviceInfo(int device_id)
|
||||
.. ocv:function:: cuda::DeviceInfo::DeviceInfo(int device_id)
|
||||
|
||||
:param device_id: System index of the GPU device starting with 0.
|
||||
:param device_id: System index of the CUDA device starting with 0.
|
||||
|
||||
Constructs the ``DeviceInfo`` object for the specified device. If ``device_id`` parameter is missed, it constructs an object for the current device.
|
||||
|
||||
|
||||
|
||||
gpu::DeviceInfo::name
|
||||
---------------------
|
||||
cuda::DeviceInfo::name
|
||||
----------------------
|
||||
Returns the device name.
|
||||
|
||||
.. ocv:function:: const char* gpu::DeviceInfo::name() const
|
||||
.. ocv:function:: const char* cuda::DeviceInfo::name() const
|
||||
|
||||
|
||||
|
||||
gpu::DeviceInfo::majorVersion
|
||||
-----------------------------
|
||||
cuda::DeviceInfo::majorVersion
|
||||
------------------------------
|
||||
Returns the major compute capability version.
|
||||
|
||||
.. ocv:function:: int gpu::DeviceInfo::majorVersion()
|
||||
.. ocv:function:: int cuda::DeviceInfo::majorVersion()
|
||||
|
||||
|
||||
|
||||
gpu::DeviceInfo::minorVersion
|
||||
-----------------------------
|
||||
cuda::DeviceInfo::minorVersion
|
||||
------------------------------
|
||||
Returns the minor compute capability version.
|
||||
|
||||
.. ocv:function:: int gpu::DeviceInfo::minorVersion()
|
||||
.. ocv:function:: int cuda::DeviceInfo::minorVersion()
|
||||
|
||||
|
||||
|
||||
gpu::DeviceInfo::freeMemory
|
||||
---------------------------
|
||||
cuda::DeviceInfo::freeMemory
|
||||
----------------------------
|
||||
Returns the amount of free memory in bytes.
|
||||
|
||||
.. ocv:function:: size_t gpu::DeviceInfo::freeMemory()
|
||||
.. ocv:function:: size_t cuda::DeviceInfo::freeMemory()
|
||||
|
||||
|
||||
|
||||
gpu::DeviceInfo::totalMemory
|
||||
----------------------------
|
||||
cuda::DeviceInfo::totalMemory
|
||||
-----------------------------
|
||||
Returns the amount of total memory in bytes.
|
||||
|
||||
.. ocv:function:: size_t gpu::DeviceInfo::totalMemory()
|
||||
.. ocv:function:: size_t cuda::DeviceInfo::totalMemory()
|
||||
|
||||
|
||||
|
||||
gpu::DeviceInfo::supports
|
||||
-------------------------
|
||||
Provides information on GPU feature support.
|
||||
cuda::DeviceInfo::supports
|
||||
--------------------------
|
||||
Provides information on CUDA feature support.
|
||||
|
||||
.. ocv:function:: bool gpu::DeviceInfo::supports(FeatureSet feature_set) const
|
||||
.. ocv:function:: bool cuda::DeviceInfo::supports(FeatureSet feature_set) const
|
||||
|
||||
:param feature_set: Features to be checked. See :ocv:enum:`gpu::FeatureSet`.
|
||||
:param feature_set: Features to be checked. See :ocv:enum:`cuda::FeatureSet`.
|
||||
|
||||
This function returns ``true`` if the device has the specified GPU feature. Otherwise, it returns ``false`` .
|
||||
This function returns ``true`` if the device has the specified CUDA feature. Otherwise, it returns ``false`` .
|
||||
|
||||
|
||||
|
||||
gpu::DeviceInfo::isCompatible
|
||||
-----------------------------
|
||||
Checks the GPU module and device compatibility.
|
||||
cuda::DeviceInfo::isCompatible
|
||||
------------------------------
|
||||
Checks the CUDA module and device compatibility.
|
||||
|
||||
.. ocv:function:: bool gpu::DeviceInfo::isCompatible()
|
||||
.. ocv:function:: bool cuda::DeviceInfo::isCompatible()
|
||||
|
||||
This function returns ``true`` if the GPU module can be run on the specified device. Otherwise, it returns ``false`` .
|
||||
This function returns ``true`` if the CUDA module can be run on the specified device. Otherwise, it returns ``false`` .
|
||||
|
||||
|
||||
|
||||
gpu::DeviceInfo::deviceID
|
||||
-------------------------
|
||||
Returns system index of the GPU device starting with 0.
|
||||
cuda::DeviceInfo::deviceID
|
||||
--------------------------
|
||||
Returns system index of the CUDA device starting with 0.
|
||||
|
||||
.. ocv:function:: int gpu::DeviceInfo::deviceID()
|
||||
.. ocv:function:: int cuda::DeviceInfo::deviceID()
|
||||
|
@@ -1,21 +1,23 @@
|
||||
GPU Module Introduction
|
||||
=======================
|
||||
CUDA Module Introduction
|
||||
========================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
|
||||
|
||||
General Information
|
||||
-------------------
|
||||
|
||||
The OpenCV GPU module is a set of classes and functions to utilize GPU computational capabilities. It is implemented using NVIDIA* CUDA* Runtime API and supports only NVIDIA GPUs. The OpenCV GPU module includes utility functions, low-level vision primitives, and high-level algorithms. The utility functions and low-level primitives provide a powerful infrastructure for developing fast vision algorithms taking advantage of GPU whereas the high-level functionality includes some state-of-the-art algorithms (such as stereo correspondence, face and people detectors, and others) ready to be used by the application developers.
|
||||
The OpenCV CUDA module is a set of classes and functions to utilize CUDA computational capabilities. It is implemented using NVIDIA* CUDA* Runtime API and supports only NVIDIA GPUs. The OpenCV CUDA module includes utility functions, low-level vision primitives, and high-level algorithms. The utility functions and low-level primitives provide a powerful infrastructure for developing fast vision algorithms taking advantage of CUDA whereas the high-level functionality includes some state-of-the-art algorithms (such as stereo correspondence, face and people detectors, and others) ready to be used by the application developers.
|
||||
|
||||
The GPU module is designed as a host-level API. This means that if you have pre-compiled OpenCV GPU binaries, you are not required to have the CUDA Toolkit installed or write any extra code to make use of the GPU.
|
||||
The CUDA module is designed as a host-level API. This means that if you have pre-compiled OpenCV CUDA binaries, you are not required to have the CUDA Toolkit installed or write any extra code to make use of the CUDA.
|
||||
|
||||
The OpenCV GPU module is designed for ease of use and does not require any knowledge of CUDA. Though, such a knowledge will certainly be useful to handle non-trivial cases or achieve the highest performance. It is helpful to understand the cost of various operations, what the GPU does, what the preferred data formats are, and so on. The GPU module is an effective instrument for quick implementation of GPU-accelerated computer vision algorithms. However, if your algorithm involves many simple operations, then, for the best possible performance, you may still need to write your own kernels to avoid extra write and read operations on the intermediate results.
|
||||
The OpenCV CUDA module is designed for ease of use and does not require any knowledge of CUDA. Though, such a knowledge will certainly be useful to handle non-trivial cases or achieve the highest performance. It is helpful to understand the cost of various operations, what the GPU does, what the preferred data formats are, and so on. The CUDA module is an effective instrument for quick implementation of CUDA-accelerated computer vision algorithms. However, if your algorithm involves many simple operations, then, for the best possible performance, you may still need to write your own kernels to avoid extra write and read operations on the intermediate results.
|
||||
|
||||
To enable CUDA support, configure OpenCV using ``CMake`` with ``WITH_CUDA=ON`` . When the flag is set and if CUDA is installed, the full-featured OpenCV GPU module is built. Otherwise, the module is still built but at runtime all functions from the module throw
|
||||
To enable CUDA support, configure OpenCV using ``CMake`` with ``WITH_CUDA=ON`` . When the flag is set and if CUDA is installed, the full-featured OpenCV CUDA module is built. Otherwise, the module is still built but at runtime all functions from the module throw
|
||||
:ocv:class:`Exception` with ``CV_GpuNotSupported`` error code, except for
|
||||
:ocv:func:`gpu::getCudaEnabledDeviceCount()`. The latter function returns zero GPU count in this case. Building OpenCV without CUDA support does not perform device code compilation, so it does not require the CUDA Toolkit installed. Therefore, using the
|
||||
:ocv:func:`gpu::getCudaEnabledDeviceCount()` function, you can implement a high-level algorithm that will detect GPU presence at runtime and choose an appropriate implementation (CPU or GPU) accordingly.
|
||||
:ocv:func:`cuda::getCudaEnabledDeviceCount()`. The latter function returns zero GPU count in this case. Building OpenCV without CUDA support does not perform device code compilation, so it does not require the CUDA Toolkit installed. Therefore, using the
|
||||
:ocv:func:`cuda::getCudaEnabledDeviceCount()` function, you can implement a high-level algorithm that will detect GPU presence at runtime and choose an appropriate implementation (CPU or GPU) accordingly.
|
||||
|
||||
Compilation for Different NVIDIA* Platforms
|
||||
-------------------------------------------
|
||||
@@ -23,7 +25,7 @@ Compilation for Different NVIDIA* Platforms
|
||||
NVIDIA* compiler enables generating binary code (cubin and fatbin) and intermediate code (PTX). Binary code often implies a specific GPU architecture and generation, so the compatibility with other GPUs is not guaranteed. PTX is targeted for a virtual platform that is defined entirely by the set of capabilities or features. Depending on the selected virtual platform, some of the instructions are emulated or disabled, even if the real hardware supports all the features.
|
||||
|
||||
At the first call, the PTX code is compiled to binary code for the particular GPU using a JIT compiler. When the target GPU has a compute capability (CC) lower than the PTX code, JIT fails.
|
||||
By default, the OpenCV GPU module includes:
|
||||
By default, the OpenCV CUDA module includes:
|
||||
|
||||
*
|
||||
Binaries for compute capabilities 1.3 and 2.0 (controlled by ``CUDA_ARCH_BIN`` in ``CMake``)
|
||||
@@ -34,16 +36,16 @@ By default, the OpenCV GPU module includes:
|
||||
This means that for devices with CC 1.3 and 2.0 binary images are ready to run. For all newer platforms, the PTX code for 1.3 is JIT'ed to a binary image. For devices with CC 1.1 and 1.2, the PTX for 1.1 is JIT'ed. For devices with CC 1.0, no code is available and the functions throw
|
||||
:ocv:class:`Exception`. For platforms where JIT compilation is performed first, the run is slow.
|
||||
|
||||
On a GPU with CC 1.0, you can still compile the GPU module and most of the functions will run flawlessly. To achieve this, add "1.0" to the list of binaries, for example, ``CUDA_ARCH_BIN="1.0 1.3 2.0"`` . The functions that cannot be run on CC 1.0 GPUs throw an exception.
|
||||
On a GPU with CC 1.0, you can still compile the CUDA module and most of the functions will run flawlessly. To achieve this, add "1.0" to the list of binaries, for example, ``CUDA_ARCH_BIN="1.0 1.3 2.0"`` . The functions that cannot be run on CC 1.0 GPUs throw an exception.
|
||||
|
||||
You can always determine at runtime whether the OpenCV GPU-built binaries (or PTX code) are compatible with your GPU. The function
|
||||
:ocv:func:`gpu::DeviceInfo::isCompatible` returns the compatibility status (true/false).
|
||||
:ocv:func:`cuda::DeviceInfo::isCompatible` returns the compatibility status (true/false).
|
||||
|
||||
Utilizing Multiple GPUs
|
||||
-----------------------
|
||||
|
||||
In the current version, each of the OpenCV GPU algorithms can use only a single GPU. So, to utilize multiple GPUs, you have to manually distribute the work between GPUs.
|
||||
Switching active devie can be done using :ocv:func:`gpu::setDevice()` function. For more details please read Cuda C Programing Guide.
|
||||
In the current version, each of the OpenCV CUDA algorithms can use only a single GPU. So, to utilize multiple GPUs, you have to manually distribute the work between GPUs.
|
||||
Switching active devie can be done using :ocv:func:`cuda::setDevice()` function. For more details please read Cuda C Programing Guide.
|
||||
|
||||
While developing algorithms for multiple GPUs, note a data passing overhead. For primitive functions and small images, it can be significant, which may eliminate all the advantages of having multiple GPUs. But for high-level algorithms, consider using multi-GPU acceleration. For example, the Stereo Block Matching algorithm has been successfully parallelized using the following algorithm:
|
||||
|
||||
@@ -56,7 +58,4 @@ While developing algorithms for multiple GPUs, note a data passing overhead. For
|
||||
|
||||
3. Merge the results into a single disparity map.
|
||||
|
||||
With this algorithm, a dual GPU gave a 180
|
||||
%
|
||||
performance increase comparing to the single Fermi GPU. For a source code example, see
|
||||
http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/gpu/.
|
||||
With this algorithm, a dual GPU gave a 180% performance increase comparing to the single Fermi GPU. For a source code example, see http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/gpu/.
|
||||
|
@@ -5,9 +5,9 @@ Object Detection
|
||||
|
||||
|
||||
|
||||
gpu::HOGDescriptor
|
||||
------------------
|
||||
.. ocv:struct:: gpu::HOGDescriptor
|
||||
cuda::HOGDescriptor
|
||||
-------------------
|
||||
.. ocv:struct:: cuda::HOGDescriptor
|
||||
|
||||
The class implements Histogram of Oriented Gradients ([Dalal2005]_) object detector. ::
|
||||
|
||||
@@ -65,15 +65,17 @@ Interfaces of all methods are kept similar to the ``CPU HOG`` descriptor and det
|
||||
.. note::
|
||||
|
||||
* An example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/cpp/peopledetect.cpp
|
||||
* A GPU example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/gpu/hog.cpp
|
||||
* A CUDA example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/gpu/hog.cpp
|
||||
|
||||
* (Python) An example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/python2/peopledetect.py
|
||||
|
||||
gpu::HOGDescriptor::HOGDescriptor
|
||||
-------------------------------------
|
||||
|
||||
|
||||
cuda::HOGDescriptor::HOGDescriptor
|
||||
----------------------------------
|
||||
Creates the ``HOG`` descriptor and detector.
|
||||
|
||||
.. ocv:function:: gpu::HOGDescriptor::HOGDescriptor(Size win_size=Size(64, 128), Size block_size=Size(16, 16), Size block_stride=Size(8, 8), Size cell_size=Size(8, 8), int nbins=9, double win_sigma=DEFAULT_WIN_SIGMA, double threshold_L2hys=0.2, bool gamma_correction=true, int nlevels=DEFAULT_NLEVELS)
|
||||
.. ocv:function:: cuda::HOGDescriptor::HOGDescriptor(Size win_size=Size(64, 128), Size block_size=Size(16, 16), Size block_stride=Size(8, 8), Size cell_size=Size(8, 8), int nbins=9, double win_sigma=DEFAULT_WIN_SIGMA, double threshold_L2hys=0.2, bool gamma_correction=true, int nlevels=DEFAULT_NLEVELS)
|
||||
|
||||
:param win_size: Detection window size. Align to block size and block stride.
|
||||
|
||||
@@ -95,59 +97,59 @@ Creates the ``HOG`` descriptor and detector.
|
||||
|
||||
|
||||
|
||||
gpu::HOGDescriptor::getDescriptorSize
|
||||
-----------------------------------------
|
||||
cuda::HOGDescriptor::getDescriptorSize
|
||||
--------------------------------------
|
||||
Returns the number of coefficients required for the classification.
|
||||
|
||||
.. ocv:function:: size_t gpu::HOGDescriptor::getDescriptorSize() const
|
||||
.. ocv:function:: size_t cuda::HOGDescriptor::getDescriptorSize() const
|
||||
|
||||
|
||||
|
||||
gpu::HOGDescriptor::getBlockHistogramSize
|
||||
---------------------------------------------
|
||||
cuda::HOGDescriptor::getBlockHistogramSize
|
||||
------------------------------------------
|
||||
Returns the block histogram size.
|
||||
|
||||
.. ocv:function:: size_t gpu::HOGDescriptor::getBlockHistogramSize() const
|
||||
.. ocv:function:: size_t cuda::HOGDescriptor::getBlockHistogramSize() const
|
||||
|
||||
|
||||
|
||||
gpu::HOGDescriptor::setSVMDetector
|
||||
--------------------------------------
|
||||
cuda::HOGDescriptor::setSVMDetector
|
||||
-----------------------------------
|
||||
Sets coefficients for the linear SVM classifier.
|
||||
|
||||
.. ocv:function:: void gpu::HOGDescriptor::setSVMDetector(const vector<float>& detector)
|
||||
.. ocv:function:: void cuda::HOGDescriptor::setSVMDetector(const vector<float>& detector)
|
||||
|
||||
|
||||
|
||||
gpu::HOGDescriptor::getDefaultPeopleDetector
|
||||
------------------------------------------------
|
||||
cuda::HOGDescriptor::getDefaultPeopleDetector
|
||||
---------------------------------------------
|
||||
Returns coefficients of the classifier trained for people detection (for default window size).
|
||||
|
||||
.. ocv:function:: static vector<float> gpu::HOGDescriptor::getDefaultPeopleDetector()
|
||||
.. ocv:function:: static vector<float> cuda::HOGDescriptor::getDefaultPeopleDetector()
|
||||
|
||||
|
||||
|
||||
gpu::HOGDescriptor::getPeopleDetector48x96
|
||||
----------------------------------------------
|
||||
cuda::HOGDescriptor::getPeopleDetector48x96
|
||||
-------------------------------------------
|
||||
Returns coefficients of the classifier trained for people detection (for 48x96 windows).
|
||||
|
||||
.. ocv:function:: static vector<float> gpu::HOGDescriptor::getPeopleDetector48x96()
|
||||
.. ocv:function:: static vector<float> cuda::HOGDescriptor::getPeopleDetector48x96()
|
||||
|
||||
|
||||
|
||||
gpu::HOGDescriptor::getPeopleDetector64x128
|
||||
-----------------------------------------------
|
||||
cuda::HOGDescriptor::getPeopleDetector64x128
|
||||
--------------------------------------------
|
||||
Returns coefficients of the classifier trained for people detection (for 64x128 windows).
|
||||
|
||||
.. ocv:function:: static vector<float> gpu::HOGDescriptor::getPeopleDetector64x128()
|
||||
.. ocv:function:: static vector<float> cuda::HOGDescriptor::getPeopleDetector64x128()
|
||||
|
||||
|
||||
|
||||
gpu::HOGDescriptor::detect
|
||||
------------------------------
|
||||
cuda::HOGDescriptor::detect
|
||||
---------------------------
|
||||
Performs object detection without a multi-scale window.
|
||||
|
||||
.. ocv:function:: void gpu::HOGDescriptor::detect(const GpuMat& img, vector<Point>& found_locations, double hit_threshold=0, Size win_stride=Size(), Size padding=Size())
|
||||
.. ocv:function:: void cuda::HOGDescriptor::detect(const GpuMat& img, vector<Point>& found_locations, double hit_threshold=0, Size win_stride=Size(), Size padding=Size())
|
||||
|
||||
:param img: Source image. ``CV_8UC1`` and ``CV_8UC4`` types are supported for now.
|
||||
|
||||
@@ -161,17 +163,17 @@ Performs object detection without a multi-scale window.
|
||||
|
||||
|
||||
|
||||
gpu::HOGDescriptor::detectMultiScale
|
||||
----------------------------------------
|
||||
cuda::HOGDescriptor::detectMultiScale
|
||||
-------------------------------------
|
||||
Performs object detection with a multi-scale window.
|
||||
|
||||
.. ocv:function:: void gpu::HOGDescriptor::detectMultiScale(const GpuMat& img, vector<Rect>& found_locations, double hit_threshold=0, Size win_stride=Size(), Size padding=Size(), double scale0=1.05, int group_threshold=2)
|
||||
.. ocv:function:: void cuda::HOGDescriptor::detectMultiScale(const GpuMat& img, vector<Rect>& found_locations, double hit_threshold=0, Size win_stride=Size(), Size padding=Size(), double scale0=1.05, int group_threshold=2)
|
||||
|
||||
:param img: Source image. See :ocv:func:`gpu::HOGDescriptor::detect` for type limitations.
|
||||
:param img: Source image. See :ocv:func:`cuda::HOGDescriptor::detect` for type limitations.
|
||||
|
||||
:param found_locations: Detected objects boundaries.
|
||||
|
||||
:param hit_threshold: Threshold for the distance between features and SVM classifying plane. See :ocv:func:`gpu::HOGDescriptor::detect` for details.
|
||||
:param hit_threshold: Threshold for the distance between features and SVM classifying plane. See :ocv:func:`cuda::HOGDescriptor::detect` for details.
|
||||
|
||||
:param win_stride: Window stride. It must be a multiple of block stride.
|
||||
|
||||
@@ -183,13 +185,13 @@ Performs object detection with a multi-scale window.
|
||||
|
||||
|
||||
|
||||
gpu::HOGDescriptor::getDescriptors
|
||||
--------------------------------------
|
||||
cuda::HOGDescriptor::getDescriptors
|
||||
-----------------------------------
|
||||
Returns block descriptors computed for the whole image.
|
||||
|
||||
.. ocv:function:: void gpu::HOGDescriptor::getDescriptors(const GpuMat& img, Size win_stride, GpuMat& descriptors, int descr_format=DESCR_FORMAT_COL_BY_COL)
|
||||
.. ocv:function:: void cuda::HOGDescriptor::getDescriptors(const GpuMat& img, Size win_stride, GpuMat& descriptors, int descr_format=DESCR_FORMAT_COL_BY_COL)
|
||||
|
||||
:param img: Source image. See :ocv:func:`gpu::HOGDescriptor::detect` for type limitations.
|
||||
:param img: Source image. See :ocv:func:`cuda::HOGDescriptor::detect` for type limitations.
|
||||
|
||||
:param win_stride: Window stride. It must be a multiple of block stride.
|
||||
|
||||
@@ -204,18 +206,19 @@ Returns block descriptors computed for the whole image.
|
||||
The function is mainly used to learn the classifier.
|
||||
|
||||
|
||||
gpu::CascadeClassifier_GPU
|
||||
--------------------------
|
||||
.. ocv:class:: gpu::CascadeClassifier_GPU
|
||||
|
||||
cuda::CascadeClassifier_CUDA
|
||||
----------------------------
|
||||
.. ocv:class:: cuda::CascadeClassifier_CUDA
|
||||
|
||||
Cascade classifier class used for object detection. Supports HAAR and LBP cascades. ::
|
||||
|
||||
class CV_EXPORTS CascadeClassifier_GPU
|
||||
class CV_EXPORTS CascadeClassifier_CUDA
|
||||
{
|
||||
public:
|
||||
CascadeClassifier_GPU();
|
||||
CascadeClassifier_GPU(const String& filename);
|
||||
~CascadeClassifier_GPU();
|
||||
CascadeClassifier_CUDA();
|
||||
CascadeClassifier_CUDA(const String& filename);
|
||||
~CascadeClassifier_CUDA();
|
||||
|
||||
bool empty() const;
|
||||
bool load(const String& filename);
|
||||
@@ -239,48 +242,51 @@ Cascade classifier class used for object detection. Supports HAAR and LBP cascad
|
||||
* A cascade classifier example can be found at opencv_source_code/samples/gpu/cascadeclassifier.cpp
|
||||
* A Nvidea API specific cascade classifier example can be found at opencv_source_code/samples/gpu/cascadeclassifier_nvidia_api.cpp
|
||||
|
||||
gpu::CascadeClassifier_GPU::CascadeClassifier_GPU
|
||||
-----------------------------------------------------
|
||||
|
||||
|
||||
cuda::CascadeClassifier_CUDA::CascadeClassifier_CUDA
|
||||
----------------------------------------------------
|
||||
Loads the classifier from a file. Cascade type is detected automatically by constructor parameter.
|
||||
|
||||
.. ocv:function:: gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const String& filename)
|
||||
.. ocv:function:: cuda::CascadeClassifier_CUDA::CascadeClassifier_CUDA(const String& filename)
|
||||
|
||||
:param filename: Name of the file from which the classifier is loaded. Only the old ``haar`` classifier (trained by the ``haar`` training application) and NVIDIA's ``nvbin`` are supported for HAAR and only new type of OpenCV XML cascade supported for LBP.
|
||||
|
||||
|
||||
|
||||
gpu::CascadeClassifier_GPU::empty
|
||||
-------------------------------------
|
||||
cuda::CascadeClassifier_CUDA::empty
|
||||
-----------------------------------
|
||||
Checks whether the classifier is loaded or not.
|
||||
|
||||
.. ocv:function:: bool gpu::CascadeClassifier_GPU::empty() const
|
||||
.. ocv:function:: bool cuda::CascadeClassifier_CUDA::empty() const
|
||||
|
||||
|
||||
|
||||
gpu::CascadeClassifier_GPU::load
|
||||
------------------------------------
|
||||
cuda::CascadeClassifier_CUDA::load
|
||||
----------------------------------
|
||||
Loads the classifier from a file. The previous content is destroyed.
|
||||
|
||||
.. ocv:function:: bool gpu::CascadeClassifier_GPU::load(const String& filename)
|
||||
.. ocv:function:: bool cuda::CascadeClassifier_CUDA::load(const String& filename)
|
||||
|
||||
:param filename: Name of the file from which the classifier is loaded. Only the old ``haar`` classifier (trained by the ``haar`` training application) and NVIDIA's ``nvbin`` are supported for HAAR and only new type of OpenCV XML cascade supported for LBP.
|
||||
|
||||
|
||||
gpu::CascadeClassifier_GPU::release
|
||||
---------------------------------------
|
||||
|
||||
cuda::CascadeClassifier_CUDA::release
|
||||
-------------------------------------
|
||||
Destroys the loaded classifier.
|
||||
|
||||
.. ocv:function:: void gpu::CascadeClassifier_GPU::release()
|
||||
.. ocv:function:: void cuda::CascadeClassifier_CUDA::release()
|
||||
|
||||
|
||||
|
||||
gpu::CascadeClassifier_GPU::detectMultiScale
|
||||
------------------------------------------------
|
||||
cuda::CascadeClassifier_CUDA::detectMultiScale
|
||||
----------------------------------------------
|
||||
Detects objects of different sizes in the input image.
|
||||
|
||||
.. ocv:function:: int gpu::CascadeClassifier_GPU::detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, double scaleFactor=1.2, int minNeighbors=4, Size minSize=Size())
|
||||
.. ocv:function:: int cuda::CascadeClassifier_CUDA::detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, double scaleFactor=1.2, int minNeighbors=4, Size minSize=Size())
|
||||
|
||||
.. ocv:function:: int gpu::CascadeClassifier_GPU::detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize = Size(), double scaleFactor = 1.1, int minNeighbors = 4)
|
||||
.. ocv:function:: int cuda::CascadeClassifier_CUDA::detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize = Size(), double scaleFactor = 1.1, int minNeighbors = 4)
|
||||
|
||||
:param image: Matrix of type ``CV_8U`` containing an image where objects should be detected.
|
||||
|
||||
@@ -298,7 +304,7 @@ The detected objects are returned as a list of rectangles.
|
||||
|
||||
The function returns the number of detected objects, so you can retrieve them as in the following example: ::
|
||||
|
||||
gpu::CascadeClassifier_GPU cascade_gpu(...);
|
||||
cuda::CascadeClassifier_CUDA cascade_gpu(...);
|
||||
|
||||
Mat image_cpu = imread(...)
|
||||
GpuMat image_gpu(image_cpu);
|
||||
|
Reference in New Issue
Block a user