Merge remote-tracking branch 'remotes/upstream/2.4' into dc1394_2-1394b

This commit is contained in:
Dominik Rose
2013-06-27 19:43:05 +02:00
428 changed files with 19621 additions and 7425 deletions

View File

@@ -2,4 +2,4 @@ if(NOT OPENCV_MODULES_PATH)
set(OPENCV_MODULES_PATH "${CMAKE_CURRENT_SOURCE_DIR}")
endif()
ocv_glob_modules(${OPENCV_MODULES_PATH})
ocv_glob_modules(${OPENCV_MODULES_PATH} ${OPENCV_EXTRA_MODULES_PATH})

View File

@@ -6,7 +6,7 @@ set(the_description "Auxiliary module for Android native camera support")
set(OPENCV_MODULE_TYPE STATIC)
ocv_define_module(androidcamera INTERNAL opencv_core log dl)
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/camera_wrapper" "${OpenCV_SOURCE_DIR}/android/service/engine/jni/include")
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/camera_wrapper" "${OpenCV_SOURCE_DIR}/platforms/android/service/engine/jni/include")
# Android source tree for native camera
SET (ANDROID_SOURCE_TREE "ANDROID_SOURCE_TREE-NOTFOUND" CACHE PATH

View File

@@ -639,9 +639,9 @@ CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
double param1=3., double param2=0.99);
//! finds coordinates of epipolar lines corresponding the specified points
CV_EXPORTS void computeCorrespondEpilines( InputArray points,
int whichImage, InputArray F,
OutputArray lines );
CV_EXPORTS_W void computeCorrespondEpilines( InputArray points,
int whichImage, InputArray F,
OutputArray lines );
CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
InputArray projPoints1, InputArray projPoints2,

View File

@@ -115,31 +115,6 @@ namespace cv
transform(points, modif_points, transformation);
}
class Mutex
{
public:
Mutex() {
}
void lock()
{
#ifdef HAVE_TBB
resultsMutex.lock();
#endif
}
void unlock()
{
#ifdef HAVE_TBB
resultsMutex.unlock();
#endif
}
private:
#ifdef HAVE_TBB
tbb::mutex resultsMutex;
#endif
};
struct CameraParameters
{
void init(Mat _intrinsics, Mat _distCoeffs)

View File

@@ -699,7 +699,7 @@ struct PrefilterInvoker
};
struct FindStereoCorrespInvoker
struct FindStereoCorrespInvoker : ParallelLoopBody
{
FindStereoCorrespInvoker( const Mat& _left, const Mat& _right,
Mat& _disp, CvStereoBMState* _state,
@@ -713,12 +713,12 @@ struct FindStereoCorrespInvoker
validDisparityRect = _validDisparityRect;
}
void operator()( const BlockedRange& range ) const
void operator()( const Range& range ) const
{
int cols = left->cols, rows = left->rows;
int _row0 = min(cvRound(range.begin() * rows / nstripes), rows);
int _row1 = min(cvRound(range.end() * rows / nstripes), rows);
uchar *ptr = state->slidingSumBuf->data.ptr + range.begin() * stripeBufSize;
int _row0 = min(cvRound(range.start * rows / nstripes), rows);
int _row1 = min(cvRound(range.end * rows / nstripes), rows);
uchar *ptr = state->slidingSumBuf->data.ptr + range.start * stripeBufSize;
int FILTERED = (state->minDisparity - 1)*16;
Rect roi = validDisparityRect & Rect(0, _row0, cols, _row1 - _row0);
@@ -871,14 +871,10 @@ static void findStereoCorrespondenceBM( const Mat& left0, const Mat& right0, Mat
const bool useShorts = false;
#endif
#ifdef HAVE_TBB
const double SAD_overhead_coeff = 10.0;
double N0 = 8000000 / (useShorts ? 1 : 4); // approx tbb's min number instructions reasonable for one thread
double maxStripeSize = min(max(N0 / (width * ndisp), (wsz-1) * SAD_overhead_coeff), (double)height);
int nstripes = cvCeil(height / maxStripeSize);
#else
const int nstripes = 1;
#endif
int bufSize = max(bufSize0 * nstripes, max(bufSize1 * 2, bufSize2));
@@ -898,9 +894,9 @@ static void findStereoCorrespondenceBM( const Mat& left0, const Mat& right0, Mat
state->minDisparity, state->numberOfDisparities,
state->SADWindowSize);
parallel_for(BlockedRange(0, nstripes),
FindStereoCorrespInvoker(left, right, disp, state, nstripes,
bufSize0, useShorts, validDisparityRect));
parallel_for_(Range(0, nstripes),
FindStereoCorrespInvoker(left, right, disp, state, nstripes,
bufSize0, useShorts, validDisparityRect));
if( state->speckleRange >= 0 && state->speckleWindowSize > 0 )
{

View File

@@ -1,7 +1,7 @@
#include "opencv2/contrib/contrib.hpp"
#ifdef WIN32
#if defined(WIN32) || defined(_WIN32)
#include <windows.h>
#include <tchar.h>
#else

View File

@@ -489,6 +489,9 @@ Various Ptr constructors.
.. ocv:function:: Ptr::Ptr(_Tp* _obj)
.. ocv:function:: Ptr::Ptr(const Ptr& ptr)
:param _obj: Object for copy.
:param ptr: Object for copy.
Ptr::~Ptr
---------
The Ptr destructor.
@@ -501,6 +504,8 @@ Assignment operator.
.. ocv:function:: Ptr& Ptr::operator = (const Ptr& ptr)
:param ptr: Object for assignment.
Decrements own reference counter (with ``release()``) and increments ptr's reference counter.
Ptr::addref
@@ -1465,6 +1470,7 @@ Adds elements to the bottom of the matrix.
.. ocv:function:: void Mat::push_back( const Mat& m )
:param elem: Added element(s).
:param m: Added line(s).
The methods add one or more elements to the bottom of the matrix. They emulate the corresponding method of the STL vector class. When ``elem`` is ``Mat`` , its type and the number of columns must be the same as in the container matrix.
@@ -1691,7 +1697,7 @@ Returns the depth of a matrix element.
.. ocv:function:: int Mat::depth() const
The method returns the identifier of the matrix element depth (the type of each individual channel). For example, for a 16-bit signed 3-channel array, the method returns ``CV_16S`` . A complete list of matrix types contains the following values:
The method returns the identifier of the matrix element depth (the type of each individual channel). For example, for a 16-bit signed element array, the method returns ``CV_16S`` . A complete list of matrix types contains the following values:
* ``CV_8U`` - 8-bit unsigned integers ( ``0..255`` )
@@ -2160,7 +2166,6 @@ Various SparseMat constructors.
:param dims: Array dimensionality.
:param _sizes: Sparce matrix size on all dementions.
:param _type: Sparse matrix data type.
:param try1d: if try1d is true and matrix is a single-column matrix (Nx1), then the sparse matrix will be 1-dimensional.
SparseMat::~SparseMat
---------------------
@@ -2175,6 +2180,8 @@ Provides sparse matrix assignment operators.
.. ocv:function:: SparseMat& SparseMat::operator = (const SparseMat& m)
.. ocv:function:: SparseMat& SparseMat::operator = (const Mat& m)
:param m: Matrix for assignment.
The last variant is equivalent to the corresponding constructor with try1d=false.
@@ -2202,6 +2209,10 @@ Convert sparse matrix with possible type change and scaling.
.. ocv:function:: void SparseMat::convertTo( SparseMat& m, int rtype, double alpha=1 ) const
.. ocv:function:: void SparseMat::convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const
:param m: Destination matrix.
:param rtype: Destination matrix type.
:param alpha: Conversion multiplier.
The first version converts arbitrary sparse matrix to dense matrix and multiplies all the matrix elements by the specified scalar.
The second versiob converts sparse matrix to dense matrix with optional type conversion and scaling.
When rtype=-1, the destination element type will be the same as the sparse matrix element type.
@@ -2294,7 +2305,7 @@ The method returns the number of matrix channels.
SparseMat::size
---------------
Returns the array of sizes or matrix size by i dimention and 0 if the matrix is not allocated.
Returns the array of sizes or matrix size by i dimension and 0 if the matrix is not allocated.
.. ocv:function:: const int* SparseMat::size() const
.. ocv:function:: int SparseMat::size(int i) const
@@ -2322,6 +2333,11 @@ Compute element hash value from the element indices.
.. ocv:function:: size_t SparseMat::hash(int i0, int i1, int i2) const
.. ocv:function:: size_t SparseMat::hash(const int* idx) const
:param i0: The first dimension index.
:param i1: The second dimension index.
:param i2: The third dimension index.
:param idx: Array of element indices for multidimensional matices.
SparseMat::ptr
--------------
Low-level element-access functions, special variants for 1D, 2D, 3D cases, and the generic one for n-D case.
@@ -2331,6 +2347,12 @@ Low-level element-access functions, special variants for 1D, 2D, 3D cases, and t
.. ocv:function:: uchar* SparseMat::ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0)
.. ocv:function:: uchar* SparseMat::ptr(const int* idx, bool createMissing, size_t* hashval=0)
:param i0: The first dimension index.
:param i1: The second dimension index.
:param i2: The third dimension index.
:param idx: Array of element indices for multidimensional matices.
:param createMissing: Create new element with 0 value if it does not exist in SparseMat.
Return pointer to the matrix element. If the element is there (it is non-zero), the pointer to it is returned.
If it is not there and ``createMissing=false``, NULL pointer is returned. If it is not there and ``createMissing=true``,
the new elementis created and initialized with 0. Pointer to it is returned. If the optional hashval pointer is not ``NULL``,
@@ -2344,6 +2366,11 @@ Erase the specified matrix element. When there is no such an element, the method
.. ocv:function:: void SparseMat::erase(int i0, int i1, int i2, size_t* hashval=0)
.. ocv:function:: void SparseMat::erase(const int* idx, size_t* hashval=0)
:param i0: The first dimension index.
:param i1: The second dimension index.
:param i2: The third dimension index.
:param idx: Array of element indices for multidimensional matices.
SparseMat\_
-----------
.. ocv:class:: SparseMat_

View File

@@ -17,12 +17,18 @@ Finds centers of clusters and groups input samples around the clusters.
:param samples: Floating-point matrix of input samples, one row per sample.
:param data: Data for clustering.
:param cluster_count: Number of clusters to split the set by.
:param K: Number of clusters to split the set by.
:param labels: Input/output integer array that stores the cluster indices for every sample.
:param criteria: The algorithm termination criteria, that is, the maximum number of iterations and/or the desired accuracy. The accuracy is specified as ``criteria.epsilon``. As soon as each of the cluster centers moves by less than ``criteria.epsilon`` on some iteration, the algorithm stops.
:param termcrit: The algorithm termination criteria, that is, the maximum number of iterations and/or the desired accuracy.
:param attempts: Flag to specify the number of times the algorithm is executed using different initial labellings. The algorithm returns the labels that yield the best compactness (see the last function parameter).
:param rng: CvRNG state initialized by RNG().
@@ -37,6 +43,8 @@ Finds centers of clusters and groups input samples around the clusters.
:param centers: Output matrix of the cluster centers, one row per each cluster center.
:param _centers: Output matrix of the cluster centers, one row per each cluster center.
:param compactness: The returned value that is described below.
The function ``kmeans`` implements a k-means algorithm that finds the

View File

@@ -234,6 +234,8 @@ Calculates the width and height of a text string.
:param text: Input text string.
:param text_string: Input text string in C format.
:param fontFace: Font to use. See the :ocv:func:`putText` for details.
:param fontScale: Font scale. See the :ocv:func:`putText` for details.
@@ -242,6 +244,12 @@ Calculates the width and height of a text string.
:param baseLine: Output parameter - y-coordinate of the baseline relative to the bottom-most text point.
:param baseline: Output parameter - y-coordinate of the baseline relative to the bottom-most text point.
:param font: Font description in terms of old C API.
:param text_size: Output parameter - The size of a box that contains the specified text.
The function ``getTextSize`` calculates and returns the size of a box that contains the specified text.
That is, the following code renders some text, the tight box surrounding it, and the baseline: ::

View File

@@ -1062,6 +1062,8 @@ Returns the determinant of a square floating-point matrix.
:param mtx: input matrix that must have ``CV_32FC1`` or ``CV_64FC1`` type and square size.
:param mat: input matrix that must have ``CV_32FC1`` or ``CV_64FC1`` type and square size.
The function ``determinant`` calculates and returns the determinant of the specified matrix. For small matrices ( ``mtx.cols=mtx.rows<=3`` ),
the direct method is used. For larger matrices, the function uses LU factorization with partial pivoting.

View File

@@ -173,6 +173,8 @@ Checks a condition at runtime and throws exception if it fails
.. ocv:function:: CV_Assert(expr)
:param expr: Expression for check.
The macros ``CV_Assert`` (and ``CV_DbgAssert``) evaluate the specified expression. If it is 0, the macros raise an error (see :ocv:func:`error` ). The macro ``CV_Assert`` checks the condition in both Debug and Release configurations while ``CV_DbgAssert`` is only retained in the Debug configuration.
@@ -188,8 +190,14 @@ Signals an error and raises an exception.
:param status: Error code. Normally, it is a negative value. The list of pre-defined error codes can be found in ``cxerror.h`` .
:param func_name: The function name where error occurs.
:param err_msg: Text of the error message.
:param file_name: The file name where error occurs.
:param line: The line number where error occurs.
:param args: ``printf`` -like formatted error message in parentheses.
The function and the helper macros ``CV_Error`` and ``CV_Error_``: ::
@@ -249,6 +257,7 @@ Allocates an aligned memory buffer.
.. ocv:cfunction:: void* cvAlloc( size_t size )
:param size: Allocated buffer size.
:param bufSize: Allocated buffer size.
The function allocates the buffer of the specified size and returns it. When the buffer size is 16 bytes or more, the returned buffer is aligned to 16 bytes.

View File

@@ -181,6 +181,17 @@ Opens a file.
.. ocv:function:: bool FileStorage::open(const string& filename, int flags, const string& encoding=string())
:param filename: Name of the file to open or the text string to read the data from.
Extension of the file (``.xml`` or ``.yml``/``.yaml``) determines its format (XML or YAML respectively).
Also you can append ``.gz`` to work with compressed files, for example ``myHugeMatrix.xml.gz``.
If both ``FileStorage::WRITE`` and ``FileStorage::MEMORY`` flags are specified, ``source``
is used just to specify the output file format (e.g. ``mydata.xml``, ``.yml`` etc.).
:param flags: Mode of operation. See FileStorage constructor for more details.
:param encoding: Encoding of the file. Note that UTF-16 XML encoding is not supported currently and you should use 8-bit encoding instead of it.
See description of parameters in :ocv:func:`FileStorage::FileStorage`. The method calls :ocv:func:`FileStorage::release` before opening the file.

View File

@@ -1322,7 +1322,8 @@ public:
EXPR = 6 << KIND_SHIFT,
OPENGL_BUFFER = 7 << KIND_SHIFT,
OPENGL_TEXTURE = 8 << KIND_SHIFT,
GPU_MAT = 9 << KIND_SHIFT
GPU_MAT = 9 << KIND_SHIFT,
OCL_MAT =10 << KIND_SHIFT
};
_InputArray();
@@ -3409,8 +3410,6 @@ public:
//! converts dense 2d matrix to the sparse form
/*!
\param m the input matrix
\param try1d if true and m is a single-column matrix (Nx1),
then the sparse matrix will be 1-dimensional.
*/
explicit SparseMat(const Mat& m);
//! converts old-style sparse matrix to the new-style. All the data is copied
@@ -4813,6 +4812,9 @@ public:
~AutoLock() { mutex->unlock(); }
protected:
Mutex* mutex;
private:
AutoLock(const AutoLock&);
AutoLock& operator = (const AutoLock&);
};
}

View File

@@ -50,6 +50,9 @@
#include <vector>
#include "opencv2/core/core.hpp"
#include "opencv2/core/types_c.h"
#if defined WIN32 || defined _WIN32
# ifndef WIN32
# define WIN32
@@ -251,6 +254,10 @@ namespace cv
body(range);
}
#endif
// Returns a static string if there is a parallel framework,
// NULL otherwise.
CV_EXPORTS const char* currentParallelFramework();
} //namespace cv
#define CV_INIT_ALGORITHM(classname, algname, memberinit) \

View File

@@ -34,7 +34,8 @@ PERF_TEST_P(Size_MatType_ROp, reduceR,
declare.in(src, WARMUP_RNG).out(vec);
declare.time(100);
TEST_CYCLE() reduce(src, vec, 0, reduceOp, ddepth);
int runs = 15;
TEST_CYCLE_MULTIRUN(runs) reduce(src, vec, 0, reduceOp, ddepth);
SANITY_CHECK(vec, 1);
}
@@ -65,4 +66,3 @@ PERF_TEST_P(Size_MatType_ROp, reduceC,
SANITY_CHECK(vec, 1);
}

View File

@@ -2855,9 +2855,9 @@ PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, int maxComp
if( _mean.data )
{
CV_Assert( _mean.size() == mean_sz );
CV_Assert( _mean.size() == mean_sz );
_mean.convertTo(mean, ctype);
covar_flags |= CV_COVAR_USE_AVG;
covar_flags |= CV_COVAR_USE_AVG;
}
calcCovarMatrix( data, covar, mean, covar_flags, ctype );
@@ -2901,6 +2901,36 @@ PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, int maxComp
return *this;
}
template <typename T>
int computeCumulativeEnergy(const Mat& eigenvalues, double retainedVariance)
{
CV_DbgAssert( eigenvalues.type() == DataType<T>::type );
Mat g(eigenvalues.size(), DataType<T>::type);
for(int ig = 0; ig < g.rows; ig++)
{
g.at<T>(ig, 0) = 0;
for(int im = 0; im <= ig; im++)
{
g.at<T>(ig,0) += eigenvalues.at<T>(im,0);
}
}
int L;
for(L = 0; L < eigenvalues.rows; L++)
{
double energy = g.at<T>(L, 0) / g.at<T>(g.rows - 1, 0);
if(energy > retainedVariance)
break;
}
L = std::max(2, L);
return L;
}
PCA& PCA::computeVar(InputArray _data, InputArray __mean, int flags, double retainedVariance)
{
Mat data = _data.getMat(), _mean = __mean.getMat();
@@ -2977,26 +3007,11 @@ PCA& PCA::computeVar(InputArray _data, InputArray __mean, int flags, double reta
}
// compute the cumulative energy content for each eigenvector
Mat g(eigenvalues.size(), ctype);
for(int ig = 0; ig < g.rows; ig++)
{
g.at<float>(ig,0) = 0;
for(int im = 0; im <= ig; im++)
{
g.at<float>(ig,0) += eigenvalues.at<float>(im,0);
}
}
int L;
for(L = 0; L < eigenvalues.rows; L++)
{
double energy = g.at<float>(L, 0) / g.at<float>(g.rows - 1, 0);
if(energy > retainedVariance)
break;
}
L = std::max(2, L);
if (ctype == CV_32F)
L = computeCumulativeEnergy<float>(eigenvalues, retainedVariance);
else
L = computeCumulativeEnergy<double>(eigenvalues, retainedVariance);
// use clone() to physically copy the data and thus deallocate the original matrices
eigenvalues = eigenvalues.rowRange(0,L).clone();

View File

@@ -980,6 +980,11 @@ Mat _InputArray::getMat(int i) const
return !v.empty() ? Mat(size(i), t, (void*)&v[0]) : Mat();
}
if( k == OCL_MAT )
{
CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
}
CV_Assert( k == STD_VECTOR_MAT );
//if( k == STD_VECTOR_MAT )
{
@@ -1062,6 +1067,11 @@ void _InputArray::getMatVector(vector<Mat>& mv) const
return;
}
if( k == OCL_MAT )
{
CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
}
CV_Assert( k == STD_VECTOR_MAT );
//if( k == STD_VECTOR_MAT )
{
@@ -1189,6 +1199,11 @@ Size _InputArray::size(int i) const
return tex->size();
}
if( k == OCL_MAT )
{
CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
}
CV_Assert( k == GPU_MAT );
//if( k == GPU_MAT )
{
@@ -1303,6 +1318,11 @@ bool _InputArray::empty() const
if( k == OPENGL_TEXTURE )
return ((const ogl::Texture2D*)obj)->empty();
if( k == OCL_MAT )
{
CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
}
CV_Assert( k == GPU_MAT );
//if( k == GPU_MAT )
return ((const gpu::GpuMat*)obj)->empty();
@@ -1523,6 +1543,11 @@ void _OutputArray::create(int dims, const int* sizes, int mtype, int i, bool all
return;
}
if( k == OCL_MAT )
{
CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
}
if( k == NONE )
{
CV_Error(CV_StsNullPtr, "create() called for the missing output array" );
@@ -1634,6 +1659,11 @@ void _OutputArray::release() const
return;
}
if( k == OCL_MAT )
{
CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
}
CV_Assert( k == STD_VECTOR_MAT );
//if( k == STD_VECTOR_MAT )
{

View File

@@ -110,8 +110,16 @@
#endif
#endif
#if defined HAVE_TBB || defined HAVE_CSTRIPES || defined HAVE_OPENMP || defined HAVE_GCD || defined HAVE_CONCURRENCY
#define HAVE_PARALLEL_FRAMEWORK
#if defined HAVE_TBB && TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202
# define CV_PARALLEL_FRAMEWORK "tbb"
#elif defined HAVE_CSTRIPES
# define CV_PARALLEL_FRAMEWORK "cstripes"
#elif defined HAVE_OPENMP
# define CV_PARALLEL_FRAMEWORK "openmp"
#elif defined HAVE_GCD
# define CV_PARALLEL_FRAMEWORK "gcd"
#elif defined HAVE_CONCURRENCY
# define CV_PARALLEL_FRAMEWORK "ms-concurrency"
#endif
namespace cv
@@ -121,7 +129,7 @@ namespace cv
namespace
{
#ifdef HAVE_PARALLEL_FRAMEWORK
#ifdef CV_PARALLEL_FRAMEWORK
class ParallelLoopBodyWrapper
{
public:
@@ -218,7 +226,7 @@ public:
static SchedPtr pplScheduler;
#endif
#endif // HAVE_PARALLEL_FRAMEWORK
#endif // CV_PARALLEL_FRAMEWORK
} //namespace
@@ -226,7 +234,7 @@ static SchedPtr pplScheduler;
void cv::parallel_for_(const cv::Range& range, const cv::ParallelLoopBody& body, double nstripes)
{
#ifdef HAVE_PARALLEL_FRAMEWORK
#ifdef CV_PARALLEL_FRAMEWORK
if(numThreads != 0)
{
@@ -281,7 +289,7 @@ void cv::parallel_for_(const cv::Range& range, const cv::ParallelLoopBody& body,
}
else
#endif // HAVE_PARALLEL_FRAMEWORK
#endif // CV_PARALLEL_FRAMEWORK
{
(void)nstripes;
body(range);
@@ -290,7 +298,7 @@ void cv::parallel_for_(const cv::Range& range, const cv::ParallelLoopBody& body,
int cv::getNumThreads(void)
{
#ifdef HAVE_PARALLEL_FRAMEWORK
#ifdef CV_PARALLEL_FRAMEWORK
if(numThreads == 0)
return 1;
@@ -333,7 +341,7 @@ int cv::getNumThreads(void)
void cv::setNumThreads( int threads )
{
(void)threads;
#ifdef HAVE_PARALLEL_FRAMEWORK
#ifdef CV_PARALLEL_FRAMEWORK
numThreads = threads;
#endif
@@ -480,6 +488,14 @@ int cv::getNumberOfCPUs(void)
#endif
}
const char* cv::currentParallelFramework() {
#ifdef CV_PARALLEL_FRAMEWORK
return CV_PARALLEL_FRAMEWORK;
#else
return NULL;
#endif
}
CV_IMPL void cvSetNumThreads(int nt)
{
cv::setNumThreads(nt);

View File

@@ -217,7 +217,7 @@ For each query descriptor, finds the training descriptors not farther than the s
:param compactResult: Parameter used when the mask (or masks) is not empty. If ``compactResult`` is false, the ``matches`` vector has the same size as ``queryDescriptors`` rows. If ``compactResult`` is true, the ``matches`` vector does not contain matches for fully masked-out query descriptors.
:param maxDistance: Threshold for the distance between matched descriptors.
:param maxDistance: Threshold for the distance between matched descriptors. Distance means here metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in Pixels)!
For each query descriptor, the methods find such training descriptors that the distance between the query descriptor and the training descriptor is equal or smaller than ``maxDistance``. Found matches are returned in the distance increasing order.

View File

@@ -214,7 +214,7 @@ static void keepStrongest( int N, vector<KeyPoint>& keypoints )
}
namespace {
class GridAdaptedFeatureDetectorInvoker
class GridAdaptedFeatureDetectorInvoker : public ParallelLoopBody
{
private:
int gridRows_, gridCols_;
@@ -223,29 +223,24 @@ private:
const Mat& image_;
const Mat& mask_;
const Ptr<FeatureDetector>& detector_;
#ifdef HAVE_TBB
tbb::mutex* kptLock_;
#endif
Mutex* kptLock_;
GridAdaptedFeatureDetectorInvoker& operator=(const GridAdaptedFeatureDetectorInvoker&); // to quiet MSVC
public:
GridAdaptedFeatureDetectorInvoker(const Ptr<FeatureDetector>& detector, const Mat& image, const Mat& mask, vector<KeyPoint>& keypoints, int maxPerCell, int gridRows, int gridCols
#ifdef HAVE_TBB
, tbb::mutex* kptLock
#endif
) : gridRows_(gridRows), gridCols_(gridCols), maxPerCell_(maxPerCell),
keypoints_(keypoints), image_(image), mask_(mask), detector_(detector)
#ifdef HAVE_TBB
, kptLock_(kptLock)
#endif
GridAdaptedFeatureDetectorInvoker(const Ptr<FeatureDetector>& detector, const Mat& image, const Mat& mask,
vector<KeyPoint>& keypoints, int maxPerCell, int gridRows, int gridCols,
cv::Mutex* kptLock)
: gridRows_(gridRows), gridCols_(gridCols), maxPerCell_(maxPerCell),
keypoints_(keypoints), image_(image), mask_(mask), detector_(detector),
kptLock_(kptLock)
{
}
void operator() (const BlockedRange& range) const
void operator() (const Range& range) const
{
for (int i = range.begin(); i < range.end(); ++i)
for (int i = range.start; i < range.end; ++i)
{
int celly = i / gridCols_;
int cellx = i - celly * gridCols_;
@@ -270,9 +265,8 @@ public:
it->pt.x += col_range.start;
it->pt.y += row_range.start;
}
#ifdef HAVE_TBB
tbb::mutex::scoped_lock join_keypoints(*kptLock_);
#endif
cv::AutoLock join_keypoints(*kptLock_);
keypoints_.insert( keypoints_.end(), sub_keypoints.begin(), sub_keypoints.end() );
}
}
@@ -289,13 +283,9 @@ void GridAdaptedFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>&
keypoints.reserve(maxTotalKeypoints);
int maxPerCell = maxTotalKeypoints / (gridRows * gridCols);
#ifdef HAVE_TBB
tbb::mutex kptLock;
cv::parallel_for(cv::BlockedRange(0, gridRows * gridCols),
cv::Mutex kptLock;
cv::parallel_for_(cv::Range(0, gridRows * gridCols),
GridAdaptedFeatureDetectorInvoker(detector, image, mask, keypoints, maxPerCell, gridRows, gridCols, &kptLock));
#else
GridAdaptedFeatureDetectorInvoker(detector, image, mask, keypoints, maxPerCell, gridRows, gridCols)(cv::BlockedRange(0, gridRows * gridCols));
#endif
}
/*

View File

@@ -45,16 +45,16 @@ if(HAVE_CUDA)
set(cuda_link_libs ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
if(WITH_NVCUVID)
set(cuda_link_libs ${cuda_link_libs} ${CUDA_nvcuvid_LIBRARY})
endif()
set(cuda_link_libs ${cuda_link_libs} ${CUDA_CUDA_LIBRARY} ${CUDA_nvcuvid_LIBRARY})
if(WIN32)
find_cuda_helper_libs(nvcuvenc)
set(cuda_link_libs ${cuda_link_libs} ${CUDA_nvcuvenc_LIBRARY})
endif()
if(WIN32)
find_cuda_helper_libs(nvcuvenc)
set(cuda_link_libs ${cuda_link_libs} ${CUDA_nvcuvenc_LIBRARY})
endif()
if(WITH_FFMPEG)
set(cuda_link_libs ${cuda_link_libs} ${HIGHGUI_LIBRARIES})
if(WITH_FFMPEG)
set(cuda_link_libs ${cuda_link_libs} ${HIGHGUI_LIBRARIES})
endif()
endif()
else()
set(lib_cuda "")

View File

@@ -120,11 +120,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ RGB2RGB()
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ RGB2RGB(const RGB2RGB& other_)
:unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ RGB2RGB() {}
__host__ __device__ __forceinline__ RGB2RGB(const RGB2RGB&) {}
};
template <> struct RGB2RGB<uchar, 4, 4, 2> : unary_function<uint, uint>
@@ -141,8 +138,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ RGB2RGB():unary_function<uint, uint>(){}
__device__ __forceinline__ RGB2RGB(const RGB2RGB& other_):unary_function<uint, uint>(){}
__host__ __device__ __forceinline__ RGB2RGB() {}
__host__ __device__ __forceinline__ RGB2RGB(const RGB2RGB&) {}
};
}
@@ -203,8 +200,8 @@ namespace cv { namespace gpu { namespace device
return RGB2RGB5x5Converter<green_bits, bidx>::cvt(src);
}
__device__ __forceinline__ RGB2RGB5x5():unary_function<uchar3, ushort>(){}
__device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5& other_):unary_function<uchar3, ushort>(){}
__host__ __device__ __forceinline__ RGB2RGB5x5() {}
__host__ __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5&) {}
};
template<int bidx, int green_bits> struct RGB2RGB5x5<4, bidx,green_bits> : unary_function<uint, ushort>
@@ -214,8 +211,8 @@ namespace cv { namespace gpu { namespace device
return RGB2RGB5x5Converter<green_bits, bidx>::cvt(src);
}
__device__ __forceinline__ RGB2RGB5x5():unary_function<uint, ushort>(){}
__device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5& other_):unary_function<uint, ushort>(){}
__host__ __device__ __forceinline__ RGB2RGB5x5() {}
__host__ __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5&) {}
};
}
@@ -282,8 +279,8 @@ namespace cv { namespace gpu { namespace device
RGB5x52RGBConverter<green_bits, bidx>::cvt(src, dst);
return dst;
}
__device__ __forceinline__ RGB5x52RGB():unary_function<ushort, uchar3>(){}
__device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB& other_):unary_function<ushort, uchar3>(){}
__host__ __device__ __forceinline__ RGB5x52RGB() {}
__host__ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB&) {}
};
@@ -295,8 +292,8 @@ namespace cv { namespace gpu { namespace device
RGB5x52RGBConverter<green_bits, bidx>::cvt(src, dst);
return dst;
}
__device__ __forceinline__ RGB5x52RGB():unary_function<ushort, uint>(){}
__device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB& other_):unary_function<ushort, uint>(){}
__host__ __device__ __forceinline__ RGB5x52RGB() {}
__host__ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB&) {}
};
}
@@ -325,9 +322,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ Gray2RGB():unary_function<T, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ Gray2RGB(const Gray2RGB& other_)
: unary_function<T, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ Gray2RGB() {}
__host__ __device__ __forceinline__ Gray2RGB(const Gray2RGB&) {}
};
template <> struct Gray2RGB<uchar, 4> : unary_function<uchar, uint>
@@ -342,8 +338,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ Gray2RGB():unary_function<uchar, uint>(){}
__device__ __forceinline__ Gray2RGB(const Gray2RGB& other_):unary_function<uchar, uint>(){}
__host__ __device__ __forceinline__ Gray2RGB() {}
__host__ __device__ __forceinline__ Gray2RGB(const Gray2RGB&) {}
};
}
@@ -384,8 +380,8 @@ namespace cv { namespace gpu { namespace device
return Gray2RGB5x5Converter<green_bits>::cvt(src);
}
__device__ __forceinline__ Gray2RGB5x5():unary_function<uchar, ushort>(){}
__device__ __forceinline__ Gray2RGB5x5(const Gray2RGB5x5& other_):unary_function<uchar, ushort>(){}
__host__ __device__ __forceinline__ Gray2RGB5x5() {}
__host__ __device__ __forceinline__ Gray2RGB5x5(const Gray2RGB5x5&) {}
};
}
@@ -426,8 +422,8 @@ namespace cv { namespace gpu { namespace device
{
return RGB5x52GrayConverter<green_bits>::cvt(src);
}
__device__ __forceinline__ RGB5x52Gray() : unary_function<ushort, uchar>(){}
__device__ __forceinline__ RGB5x52Gray(const RGB5x52Gray& other_) : unary_function<ushort, uchar>(){}
__host__ __device__ __forceinline__ RGB5x52Gray() {}
__host__ __device__ __forceinline__ RGB5x52Gray(const RGB5x52Gray&) {}
};
}
@@ -467,9 +463,8 @@ namespace cv { namespace gpu { namespace device
{
return RGB2GrayConvert<bidx>(&src.x);
}
__device__ __forceinline__ RGB2Gray() : unary_function<typename TypeVec<T, scn>::vec_type, T>(){}
__device__ __forceinline__ RGB2Gray(const RGB2Gray& other_)
: unary_function<typename TypeVec<T, scn>::vec_type, T>(){}
__host__ __device__ __forceinline__ RGB2Gray() {}
__host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {}
};
template <int bidx> struct RGB2Gray<uchar, 4, bidx> : unary_function<uint, uchar>
@@ -478,8 +473,8 @@ namespace cv { namespace gpu { namespace device
{
return RGB2GrayConvert<bidx>(src);
}
__device__ __forceinline__ RGB2Gray() : unary_function<uint, uchar>(){}
__device__ __forceinline__ RGB2Gray(const RGB2Gray& other_) : unary_function<uint, uchar>(){}
__host__ __device__ __forceinline__ RGB2Gray() {}
__host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {}
};
}
@@ -529,10 +524,8 @@ namespace cv { namespace gpu { namespace device
RGB2YUVConvert<bidx>(&src.x, dst);
return dst;
}
__device__ __forceinline__ RGB2YUV()
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ RGB2YUV(const RGB2YUV& other_)
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ RGB2YUV() {}
__host__ __device__ __forceinline__ RGB2YUV(const RGB2YUV&) {}
};
}
@@ -609,10 +602,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ YUV2RGB()
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ YUV2RGB(const YUV2RGB& other_)
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ YUV2RGB() {}
__host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {}
};
template <int bidx> struct YUV2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>
@@ -621,8 +612,8 @@ namespace cv { namespace gpu { namespace device
{
return YUV2RGBConvert<bidx>(src);
}
__device__ __forceinline__ YUV2RGB() : unary_function<uint, uint>(){}
__device__ __forceinline__ YUV2RGB(const YUV2RGB& other_) : unary_function<uint, uint>(){}
__host__ __device__ __forceinline__ YUV2RGB() {}
__host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {}
};
}
@@ -689,10 +680,8 @@ namespace cv { namespace gpu { namespace device
RGB2YCrCbConvert<bidx>(&src.x, dst);
return dst;
}
__device__ __forceinline__ RGB2YCrCb()
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb& other_)
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ RGB2YCrCb() {}
__host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {}
};
template <int bidx> struct RGB2YCrCb<uchar, 4, 4, bidx> : unary_function<uint, uint>
@@ -702,8 +691,8 @@ namespace cv { namespace gpu { namespace device
return RGB2YCrCbConvert<bidx>(src);
}
__device__ __forceinline__ RGB2YCrCb() : unary_function<uint, uint>(){}
__device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb& other_) : unary_function<uint, uint>(){}
__host__ __device__ __forceinline__ RGB2YCrCb() {}
__host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {}
};
}
@@ -771,10 +760,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ YCrCb2RGB()
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB& other_)
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ YCrCb2RGB() {}
__host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {}
};
template <int bidx> struct YCrCb2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>
@@ -783,8 +770,8 @@ namespace cv { namespace gpu { namespace device
{
return YCrCb2RGBConvert<bidx>(src);
}
__device__ __forceinline__ YCrCb2RGB() : unary_function<uint, uint>(){}
__device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB& other_) : unary_function<uint, uint>(){}
__host__ __device__ __forceinline__ YCrCb2RGB() {}
__host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {}
};
}
@@ -849,10 +836,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ RGB2XYZ()
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ RGB2XYZ(const RGB2XYZ& other_)
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ RGB2XYZ() {}
__host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {}
};
template <int bidx> struct RGB2XYZ<uchar, 4, 4, bidx> : unary_function<uint, uint>
@@ -861,8 +846,8 @@ namespace cv { namespace gpu { namespace device
{
return RGB2XYZConvert<bidx>(src);
}
__device__ __forceinline__ RGB2XYZ() : unary_function<uint, uint>(){}
__device__ __forceinline__ RGB2XYZ(const RGB2XYZ& other_) : unary_function<uint, uint>(){}
__host__ __device__ __forceinline__ RGB2XYZ() {}
__host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {}
};
}
@@ -926,10 +911,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ XYZ2RGB()
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ XYZ2RGB(const XYZ2RGB& other_)
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ XYZ2RGB() {}
__host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {}
};
template <int bidx> struct XYZ2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>
@@ -938,8 +921,8 @@ namespace cv { namespace gpu { namespace device
{
return XYZ2RGBConvert<bidx>(src);
}
__device__ __forceinline__ XYZ2RGB() : unary_function<uint, uint>(){}
__device__ __forceinline__ XYZ2RGB(const XYZ2RGB& other_) : unary_function<uint, uint>(){}
__host__ __device__ __forceinline__ XYZ2RGB() {}
__host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {}
};
}
@@ -1066,10 +1049,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ RGB2HSV()
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ RGB2HSV(const RGB2HSV& other_)
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ RGB2HSV() {}
__host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {}
};
template <int bidx, int hr> struct RGB2HSV<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>
@@ -1078,8 +1059,8 @@ namespace cv { namespace gpu { namespace device
{
return RGB2HSVConvert<bidx, hr>(src);
}
__device__ __forceinline__ RGB2HSV():unary_function<uint, uint>(){}
__device__ __forceinline__ RGB2HSV(const RGB2HSV& other_):unary_function<uint, uint>(){}
__host__ __device__ __forceinline__ RGB2HSV() {}
__host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {}
};
}
@@ -1208,10 +1189,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ HSV2RGB()
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ HSV2RGB(const HSV2RGB& other_)
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ HSV2RGB() {}
__host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {}
};
template <int bidx, int hr> struct HSV2RGB<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>
@@ -1220,8 +1199,8 @@ namespace cv { namespace gpu { namespace device
{
return HSV2RGBConvert<bidx, hr>(src);
}
__device__ __forceinline__ HSV2RGB():unary_function<uint, uint>(){}
__device__ __forceinline__ HSV2RGB(const HSV2RGB& other_):unary_function<uint, uint>(){}
__host__ __device__ __forceinline__ HSV2RGB() {}
__host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {}
};
}
@@ -1343,10 +1322,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ RGB2HLS()
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ RGB2HLS(const RGB2HLS& other_)
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ RGB2HLS() {}
__host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {}
};
template <int bidx, int hr> struct RGB2HLS<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>
@@ -1355,8 +1332,8 @@ namespace cv { namespace gpu { namespace device
{
return RGB2HLSConvert<bidx, hr>(src);
}
__device__ __forceinline__ RGB2HLS() : unary_function<uint, uint>(){}
__device__ __forceinline__ RGB2HLS(const RGB2HLS& other_) : unary_function<uint, uint>(){}
__host__ __device__ __forceinline__ RGB2HLS() {}
__host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {}
};
}
@@ -1485,10 +1462,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ HLS2RGB()
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__device__ __forceinline__ HLS2RGB(const HLS2RGB& other_)
: unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}
__host__ __device__ __forceinline__ HLS2RGB() {}
__host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {}
};
template <int bidx, int hr> struct HLS2RGB<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>
@@ -1497,8 +1472,8 @@ namespace cv { namespace gpu { namespace device
{
return HLS2RGBConvert<bidx, hr>(src);
}
__device__ __forceinline__ HLS2RGB() : unary_function<uint, uint>(){}
__device__ __forceinline__ HLS2RGB(const HLS2RGB& other_) : unary_function<uint, uint>(){}
__host__ __device__ __forceinline__ HLS2RGB() {}
__host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {}
};
}
@@ -1651,8 +1626,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ RGB2Lab() {}
__device__ __forceinline__ RGB2Lab(const RGB2Lab& other_) {}
__host__ __device__ __forceinline__ RGB2Lab() {}
__host__ __device__ __forceinline__ RGB2Lab(const RGB2Lab&) {}
};
template <int scn, int dcn, bool srgb, int blueIdx>
struct RGB2Lab<float, scn, dcn, srgb, blueIdx>
@@ -1666,8 +1641,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ RGB2Lab() {}
__device__ __forceinline__ RGB2Lab(const RGB2Lab& other_) {}
__host__ __device__ __forceinline__ RGB2Lab() {}
__host__ __device__ __forceinline__ RGB2Lab(const RGB2Lab&) {}
};
}
@@ -1764,8 +1739,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ Lab2RGB() {}
__device__ __forceinline__ Lab2RGB(const Lab2RGB& other_) {}
__host__ __device__ __forceinline__ Lab2RGB() {}
__host__ __device__ __forceinline__ Lab2RGB(const Lab2RGB&) {}
};
template <int scn, int dcn, bool srgb, int blueIdx>
struct Lab2RGB<float, scn, dcn, srgb, blueIdx>
@@ -1779,8 +1754,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ Lab2RGB() {}
__device__ __forceinline__ Lab2RGB(const Lab2RGB& other_) {}
__host__ __device__ __forceinline__ Lab2RGB() {}
__host__ __device__ __forceinline__ Lab2RGB(const Lab2RGB&) {}
};
}
@@ -1863,8 +1838,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ RGB2Luv() {}
__device__ __forceinline__ RGB2Luv(const RGB2Luv& other_) {}
__host__ __device__ __forceinline__ RGB2Luv() {}
__host__ __device__ __forceinline__ RGB2Luv(const RGB2Luv&) {}
};
template <int scn, int dcn, bool srgb, int blueIdx>
struct RGB2Luv<float, scn, dcn, srgb, blueIdx>
@@ -1878,8 +1853,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ RGB2Luv() {}
__device__ __forceinline__ RGB2Luv(const RGB2Luv& other_) {}
__host__ __device__ __forceinline__ RGB2Luv() {}
__host__ __device__ __forceinline__ RGB2Luv(const RGB2Luv&) {}
};
}
@@ -1964,8 +1939,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ Luv2RGB() {}
__device__ __forceinline__ Luv2RGB(const Luv2RGB& other_) {}
__host__ __device__ __forceinline__ Luv2RGB() {}
__host__ __device__ __forceinline__ Luv2RGB(const Luv2RGB&) {}
};
template <int scn, int dcn, bool srgb, int blueIdx>
struct Luv2RGB<float, scn, dcn, srgb, blueIdx>
@@ -1979,8 +1954,8 @@ namespace cv { namespace gpu { namespace device
return dst;
}
__device__ __forceinline__ Luv2RGB() {}
__device__ __forceinline__ Luv2RGB(const Luv2RGB& other_) {}
__host__ __device__ __forceinline__ Luv2RGB() {}
__host__ __device__ __forceinline__ Luv2RGB(const Luv2RGB&) {}
};
}

View File

@@ -63,8 +63,8 @@ namespace cv { namespace gpu { namespace device
{
return a + b;
}
__device__ __forceinline__ plus(const plus& other):binary_function<T,T,T>(){}
__device__ __forceinline__ plus():binary_function<T,T,T>(){}
__host__ __device__ __forceinline__ plus() {}
__host__ __device__ __forceinline__ plus(const plus&) {}
};
template <typename T> struct minus : binary_function<T, T, T>
@@ -74,8 +74,8 @@ namespace cv { namespace gpu { namespace device
{
return a - b;
}
__device__ __forceinline__ minus(const minus& other):binary_function<T,T,T>(){}
__device__ __forceinline__ minus():binary_function<T,T,T>(){}
__host__ __device__ __forceinline__ minus() {}
__host__ __device__ __forceinline__ minus(const minus&) {}
};
template <typename T> struct multiplies : binary_function<T, T, T>
@@ -85,8 +85,8 @@ namespace cv { namespace gpu { namespace device
{
return a * b;
}
__device__ __forceinline__ multiplies(const multiplies& other):binary_function<T,T,T>(){}
__device__ __forceinline__ multiplies():binary_function<T,T,T>(){}
__host__ __device__ __forceinline__ multiplies() {}
__host__ __device__ __forceinline__ multiplies(const multiplies&) {}
};
template <typename T> struct divides : binary_function<T, T, T>
@@ -96,8 +96,8 @@ namespace cv { namespace gpu { namespace device
{
return a / b;
}
__device__ __forceinline__ divides(const divides& other):binary_function<T,T,T>(){}
__device__ __forceinline__ divides():binary_function<T,T,T>(){}
__host__ __device__ __forceinline__ divides() {}
__host__ __device__ __forceinline__ divides(const divides&) {}
};
template <typename T> struct modulus : binary_function<T, T, T>
@@ -107,8 +107,8 @@ namespace cv { namespace gpu { namespace device
{
return a % b;
}
__device__ __forceinline__ modulus(const modulus& other):binary_function<T,T,T>(){}
__device__ __forceinline__ modulus():binary_function<T,T,T>(){}
__host__ __device__ __forceinline__ modulus() {}
__host__ __device__ __forceinline__ modulus(const modulus&) {}
};
template <typename T> struct negate : unary_function<T, T>
@@ -117,8 +117,8 @@ namespace cv { namespace gpu { namespace device
{
return -a;
}
__device__ __forceinline__ negate(const negate& other):unary_function<T,T>(){}
__device__ __forceinline__ negate():unary_function<T,T>(){}
__host__ __device__ __forceinline__ negate() {}
__host__ __device__ __forceinline__ negate(const negate&) {}
};
// Comparison Operations
@@ -129,8 +129,8 @@ namespace cv { namespace gpu { namespace device
{
return a == b;
}
__device__ __forceinline__ equal_to(const equal_to& other):binary_function<T,T,bool>(){}
__device__ __forceinline__ equal_to():binary_function<T,T,bool>(){}
__host__ __device__ __forceinline__ equal_to() {}
__host__ __device__ __forceinline__ equal_to(const equal_to&) {}
};
template <typename T> struct not_equal_to : binary_function<T, T, bool>
@@ -140,8 +140,8 @@ namespace cv { namespace gpu { namespace device
{
return a != b;
}
__device__ __forceinline__ not_equal_to(const not_equal_to& other):binary_function<T,T,bool>(){}
__device__ __forceinline__ not_equal_to():binary_function<T,T,bool>(){}
__host__ __device__ __forceinline__ not_equal_to() {}
__host__ __device__ __forceinline__ not_equal_to(const not_equal_to&) {}
};
template <typename T> struct greater : binary_function<T, T, bool>
@@ -151,8 +151,8 @@ namespace cv { namespace gpu { namespace device
{
return a > b;
}
__device__ __forceinline__ greater(const greater& other):binary_function<T,T,bool>(){}
__device__ __forceinline__ greater():binary_function<T,T,bool>(){}
__host__ __device__ __forceinline__ greater() {}
__host__ __device__ __forceinline__ greater(const greater&) {}
};
template <typename T> struct less : binary_function<T, T, bool>
@@ -162,8 +162,8 @@ namespace cv { namespace gpu { namespace device
{
return a < b;
}
__device__ __forceinline__ less(const less& other):binary_function<T,T,bool>(){}
__device__ __forceinline__ less():binary_function<T,T,bool>(){}
__host__ __device__ __forceinline__ less() {}
__host__ __device__ __forceinline__ less(const less&) {}
};
template <typename T> struct greater_equal : binary_function<T, T, bool>
@@ -173,8 +173,8 @@ namespace cv { namespace gpu { namespace device
{
return a >= b;
}
__device__ __forceinline__ greater_equal(const greater_equal& other):binary_function<T,T,bool>(){}
__device__ __forceinline__ greater_equal():binary_function<T,T,bool>(){}
__host__ __device__ __forceinline__ greater_equal() {}
__host__ __device__ __forceinline__ greater_equal(const greater_equal&) {}
};
template <typename T> struct less_equal : binary_function<T, T, bool>
@@ -184,8 +184,8 @@ namespace cv { namespace gpu { namespace device
{
return a <= b;
}
__device__ __forceinline__ less_equal(const less_equal& other):binary_function<T,T,bool>(){}
__device__ __forceinline__ less_equal():binary_function<T,T,bool>(){}
__host__ __device__ __forceinline__ less_equal() {}
__host__ __device__ __forceinline__ less_equal(const less_equal&) {}
};
// Logical Operations
@@ -196,8 +196,8 @@ namespace cv { namespace gpu { namespace device
{
return a && b;
}
__device__ __forceinline__ logical_and(const logical_and& other):binary_function<T,T,bool>(){}
__device__ __forceinline__ logical_and():binary_function<T,T,bool>(){}
__host__ __device__ __forceinline__ logical_and() {}
__host__ __device__ __forceinline__ logical_and(const logical_and&) {}
};
template <typename T> struct logical_or : binary_function<T, T, bool>
@@ -207,8 +207,8 @@ namespace cv { namespace gpu { namespace device
{
return a || b;
}
__device__ __forceinline__ logical_or(const logical_or& other):binary_function<T,T,bool>(){}
__device__ __forceinline__ logical_or():binary_function<T,T,bool>(){}
__host__ __device__ __forceinline__ logical_or() {}
__host__ __device__ __forceinline__ logical_or(const logical_or&) {}
};
template <typename T> struct logical_not : unary_function<T, bool>
@@ -217,8 +217,8 @@ namespace cv { namespace gpu { namespace device
{
return !a;
}
__device__ __forceinline__ logical_not(const logical_not& other):unary_function<T,bool>(){}
__device__ __forceinline__ logical_not():unary_function<T,bool>(){}
__host__ __device__ __forceinline__ logical_not() {}
__host__ __device__ __forceinline__ logical_not(const logical_not&) {}
};
// Bitwise Operations
@@ -229,8 +229,8 @@ namespace cv { namespace gpu { namespace device
{
return a & b;
}
__device__ __forceinline__ bit_and(const bit_and& other):binary_function<T,T,T>(){}
__device__ __forceinline__ bit_and():binary_function<T,T,T>(){}
__host__ __device__ __forceinline__ bit_and() {}
__host__ __device__ __forceinline__ bit_and(const bit_and&) {}
};
template <typename T> struct bit_or : binary_function<T, T, T>
@@ -240,8 +240,8 @@ namespace cv { namespace gpu { namespace device
{
return a | b;
}
__device__ __forceinline__ bit_or(const bit_or& other):binary_function<T,T,T>(){}
__device__ __forceinline__ bit_or():binary_function<T,T,T>(){}
__host__ __device__ __forceinline__ bit_or() {}
__host__ __device__ __forceinline__ bit_or(const bit_or&) {}
};
template <typename T> struct bit_xor : binary_function<T, T, T>
@@ -251,8 +251,8 @@ namespace cv { namespace gpu { namespace device
{
return a ^ b;
}
__device__ __forceinline__ bit_xor(const bit_xor& other):binary_function<T,T,T>(){}
__device__ __forceinline__ bit_xor():binary_function<T,T,T>(){}
__host__ __device__ __forceinline__ bit_xor() {}
__host__ __device__ __forceinline__ bit_xor(const bit_xor&) {}
};
template <typename T> struct bit_not : unary_function<T, T>
@@ -261,8 +261,8 @@ namespace cv { namespace gpu { namespace device
{
return ~v;
}
__device__ __forceinline__ bit_not(const bit_not& other):unary_function<T,T>(){}
__device__ __forceinline__ bit_not():unary_function<T,T>(){}
__host__ __device__ __forceinline__ bit_not() {}
__host__ __device__ __forceinline__ bit_not(const bit_not&) {}
};
// Generalized Identity Operations
@@ -272,8 +272,8 @@ namespace cv { namespace gpu { namespace device
{
return x;
}
__device__ __forceinline__ identity(const identity& other):unary_function<T,T>(){}
__device__ __forceinline__ identity():unary_function<T,T>(){}
__host__ __device__ __forceinline__ identity() {}
__host__ __device__ __forceinline__ identity(const identity&) {}
};
template <typename T1, typename T2> struct project1st : binary_function<T1, T2, T1>
@@ -282,8 +282,8 @@ namespace cv { namespace gpu { namespace device
{
return lhs;
}
__device__ __forceinline__ project1st(const project1st& other):binary_function<T1,T2,T1>(){}
__device__ __forceinline__ project1st():binary_function<T1,T2,T1>(){}
__host__ __device__ __forceinline__ project1st() {}
__host__ __device__ __forceinline__ project1st(const project1st&) {}
};
template <typename T1, typename T2> struct project2nd : binary_function<T1, T2, T2>
@@ -292,8 +292,8 @@ namespace cv { namespace gpu { namespace device
{
return rhs;
}
__device__ __forceinline__ project2nd(const project2nd& other):binary_function<T1,T2,T2>(){}
__device__ __forceinline__ project2nd():binary_function<T1,T2,T2>(){}
__host__ __device__ __forceinline__ project2nd() {}
__host__ __device__ __forceinline__ project2nd(const project2nd&) {}
};
// Min/Max Operations
@@ -302,8 +302,8 @@ namespace cv { namespace gpu { namespace device
template <> struct name<type> : binary_function<type, type, type> \
{ \
__device__ __forceinline__ type operator()(type lhs, type rhs) const {return op(lhs, rhs);} \
__device__ __forceinline__ name() {}\
__device__ __forceinline__ name(const name&) {}\
__host__ __device__ __forceinline__ name() {}\
__host__ __device__ __forceinline__ name(const name&) {}\
};
template <typename T> struct maximum : binary_function<T, T, T>
@@ -312,8 +312,8 @@ namespace cv { namespace gpu { namespace device
{
return max(lhs, rhs);
}
__device__ __forceinline__ maximum() {}
__device__ __forceinline__ maximum(const maximum&) {}
__host__ __device__ __forceinline__ maximum() {}
__host__ __device__ __forceinline__ maximum(const maximum&) {}
};
OPENCV_GPU_IMPLEMENT_MINMAX(maximum, uchar, ::max)
@@ -332,8 +332,8 @@ namespace cv { namespace gpu { namespace device
{
return min(lhs, rhs);
}
__device__ __forceinline__ minimum() {}
__device__ __forceinline__ minimum(const minimum&) {}
__host__ __device__ __forceinline__ minimum() {}
__host__ __device__ __forceinline__ minimum(const minimum&) {}
};
OPENCV_GPU_IMPLEMENT_MINMAX(minimum, uchar, ::min)
@@ -349,7 +349,6 @@ namespace cv { namespace gpu { namespace device
#undef OPENCV_GPU_IMPLEMENT_MINMAX
// Math functions
///bound=========================================
template <typename T> struct abs_func : unary_function<T, T>
{
@@ -358,8 +357,8 @@ namespace cv { namespace gpu { namespace device
return abs(x);
}
__device__ __forceinline__ abs_func() {}
__device__ __forceinline__ abs_func(const abs_func&) {}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<unsigned char> : unary_function<unsigned char, unsigned char>
{
@@ -368,8 +367,8 @@ namespace cv { namespace gpu { namespace device
return x;
}
__device__ __forceinline__ abs_func() {}
__device__ __forceinline__ abs_func(const abs_func&) {}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<signed char> : unary_function<signed char, signed char>
{
@@ -378,8 +377,8 @@ namespace cv { namespace gpu { namespace device
return ::abs((int)x);
}
__device__ __forceinline__ abs_func() {}
__device__ __forceinline__ abs_func(const abs_func&) {}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<char> : unary_function<char, char>
{
@@ -388,8 +387,8 @@ namespace cv { namespace gpu { namespace device
return ::abs((int)x);
}
__device__ __forceinline__ abs_func() {}
__device__ __forceinline__ abs_func(const abs_func&) {}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<unsigned short> : unary_function<unsigned short, unsigned short>
{
@@ -398,8 +397,8 @@ namespace cv { namespace gpu { namespace device
return x;
}
__device__ __forceinline__ abs_func() {}
__device__ __forceinline__ abs_func(const abs_func&) {}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<short> : unary_function<short, short>
{
@@ -408,8 +407,8 @@ namespace cv { namespace gpu { namespace device
return ::abs((int)x);
}
__device__ __forceinline__ abs_func() {}
__device__ __forceinline__ abs_func(const abs_func&) {}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<unsigned int> : unary_function<unsigned int, unsigned int>
{
@@ -418,8 +417,8 @@ namespace cv { namespace gpu { namespace device
return x;
}
__device__ __forceinline__ abs_func() {}
__device__ __forceinline__ abs_func(const abs_func&) {}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<int> : unary_function<int, int>
{
@@ -428,8 +427,8 @@ namespace cv { namespace gpu { namespace device
return ::abs(x);
}
__device__ __forceinline__ abs_func() {}
__device__ __forceinline__ abs_func(const abs_func&) {}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<float> : unary_function<float, float>
{
@@ -438,8 +437,8 @@ namespace cv { namespace gpu { namespace device
return ::fabsf(x);
}
__device__ __forceinline__ abs_func() {}
__device__ __forceinline__ abs_func(const abs_func&) {}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<double> : unary_function<double, double>
{
@@ -448,8 +447,8 @@ namespace cv { namespace gpu { namespace device
return ::fabs(x);
}
__device__ __forceinline__ abs_func() {}
__device__ __forceinline__ abs_func(const abs_func&) {}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
#define OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(name, func) \
@@ -459,8 +458,8 @@ namespace cv { namespace gpu { namespace device
{ \
return func ## f(v); \
} \
__device__ __forceinline__ name ## _func() {} \
__device__ __forceinline__ name ## _func(const name ## _func&) {} \
__host__ __device__ __forceinline__ name ## _func() {} \
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
}; \
template <> struct name ## _func<double> : unary_function<double, double> \
{ \
@@ -468,8 +467,8 @@ namespace cv { namespace gpu { namespace device
{ \
return func(v); \
} \
__device__ __forceinline__ name ## _func() {} \
__device__ __forceinline__ name ## _func(const name ## _func&) {} \
__host__ __device__ __forceinline__ name ## _func() {} \
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
};
#define OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(name, func) \
@@ -479,6 +478,8 @@ namespace cv { namespace gpu { namespace device
{ \
return func ## f(v1, v2); \
} \
__host__ __device__ __forceinline__ name ## _func() {} \
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
}; \
template <> struct name ## _func<double> : binary_function<double, double, double> \
{ \
@@ -486,6 +487,8 @@ namespace cv { namespace gpu { namespace device
{ \
return func(v1, v2); \
} \
__host__ __device__ __forceinline__ name ## _func() {} \
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
};
OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sqrt, ::sqrt)
@@ -522,8 +525,8 @@ namespace cv { namespace gpu { namespace device
{
return src1 * src1 + src2 * src2;
}
__device__ __forceinline__ hypot_sqr_func(const hypot_sqr_func& other) : binary_function<T, T, float>(){}
__device__ __forceinline__ hypot_sqr_func() : binary_function<T, T, float>(){}
__host__ __device__ __forceinline__ hypot_sqr_func() {}
__host__ __device__ __forceinline__ hypot_sqr_func(const hypot_sqr_func&) {}
};
// Saturate Cast Functor
@@ -533,8 +536,8 @@ namespace cv { namespace gpu { namespace device
{
return saturate_cast<D>(v);
}
__device__ __forceinline__ saturate_cast_func(const saturate_cast_func& other):unary_function<T, D>(){}
__device__ __forceinline__ saturate_cast_func():unary_function<T, D>(){}
__host__ __device__ __forceinline__ saturate_cast_func() {}
__host__ __device__ __forceinline__ saturate_cast_func(const saturate_cast_func&) {}
};
// Threshold Functors
@@ -547,10 +550,9 @@ namespace cv { namespace gpu { namespace device
return (src > thresh) * maxVal;
}
__device__ __forceinline__ thresh_binary_func(const thresh_binary_func& other)
: unary_function<T, T>(), thresh(other.thresh), maxVal(other.maxVal){}
__device__ __forceinline__ thresh_binary_func():unary_function<T, T>(){}
__host__ __device__ __forceinline__ thresh_binary_func() {}
__host__ __device__ __forceinline__ thresh_binary_func(const thresh_binary_func& other)
: thresh(other.thresh), maxVal(other.maxVal) {}
const T thresh;
const T maxVal;
@@ -565,10 +567,9 @@ namespace cv { namespace gpu { namespace device
return (src <= thresh) * maxVal;
}
__device__ __forceinline__ thresh_binary_inv_func(const thresh_binary_inv_func& other)
: unary_function<T, T>(), thresh(other.thresh), maxVal(other.maxVal){}
__device__ __forceinline__ thresh_binary_inv_func():unary_function<T, T>(){}
__host__ __device__ __forceinline__ thresh_binary_inv_func() {}
__host__ __device__ __forceinline__ thresh_binary_inv_func(const thresh_binary_inv_func& other)
: thresh(other.thresh), maxVal(other.maxVal) {}
const T thresh;
const T maxVal;
@@ -583,10 +584,9 @@ namespace cv { namespace gpu { namespace device
return minimum<T>()(src, thresh);
}
__device__ __forceinline__ thresh_trunc_func(const thresh_trunc_func& other)
: unary_function<T, T>(), thresh(other.thresh){}
__device__ __forceinline__ thresh_trunc_func():unary_function<T, T>(){}
__host__ __device__ __forceinline__ thresh_trunc_func() {}
__host__ __device__ __forceinline__ thresh_trunc_func(const thresh_trunc_func& other)
: thresh(other.thresh) {}
const T thresh;
};
@@ -599,10 +599,10 @@ namespace cv { namespace gpu { namespace device
{
return (src > thresh) * src;
}
__device__ __forceinline__ thresh_to_zero_func(const thresh_to_zero_func& other)
: unary_function<T, T>(), thresh(other.thresh){}
__device__ __forceinline__ thresh_to_zero_func():unary_function<T, T>(){}
__host__ __device__ __forceinline__ thresh_to_zero_func() {}
__host__ __device__ __forceinline__ thresh_to_zero_func(const thresh_to_zero_func& other)
: thresh(other.thresh) {}
const T thresh;
};
@@ -615,14 +615,14 @@ namespace cv { namespace gpu { namespace device
{
return (src <= thresh) * src;
}
__device__ __forceinline__ thresh_to_zero_inv_func(const thresh_to_zero_inv_func& other)
: unary_function<T, T>(), thresh(other.thresh){}
__device__ __forceinline__ thresh_to_zero_inv_func():unary_function<T, T>(){}
__host__ __device__ __forceinline__ thresh_to_zero_inv_func() {}
__host__ __device__ __forceinline__ thresh_to_zero_inv_func(const thresh_to_zero_inv_func& other)
: thresh(other.thresh) {}
const T thresh;
};
//bound!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ============>
// Function Object Adaptors
template <typename Predicate> struct unary_negate : unary_function<typename Predicate::argument_type, bool>
{
@@ -633,8 +633,8 @@ namespace cv { namespace gpu { namespace device
return !pred(x);
}
__device__ __forceinline__ unary_negate(const unary_negate& other) : unary_function<typename Predicate::argument_type, bool>(){}
__device__ __forceinline__ unary_negate() : unary_function<typename Predicate::argument_type, bool>(){}
__host__ __device__ __forceinline__ unary_negate() {}
__host__ __device__ __forceinline__ unary_negate(const unary_negate& other) : pred(other.pred) {}
const Predicate pred;
};
@@ -653,11 +653,9 @@ namespace cv { namespace gpu { namespace device
{
return !pred(x,y);
}
__device__ __forceinline__ binary_negate(const binary_negate& other)
: binary_function<typename Predicate::first_argument_type, typename Predicate::second_argument_type, bool>(){}
__device__ __forceinline__ binary_negate() :
binary_function<typename Predicate::first_argument_type, typename Predicate::second_argument_type, bool>(){}
__host__ __device__ __forceinline__ binary_negate() {}
__host__ __device__ __forceinline__ binary_negate(const binary_negate& other) : pred(other.pred) {}
const Predicate pred;
};
@@ -676,8 +674,8 @@ namespace cv { namespace gpu { namespace device
return op(arg1, a);
}
__device__ __forceinline__ binder1st(const binder1st& other) :
unary_function<typename Op::second_argument_type, typename Op::result_type>(){}
__host__ __device__ __forceinline__ binder1st() {}
__host__ __device__ __forceinline__ binder1st(const binder1st& other) : op(other.op), arg1(other.arg1) {}
const Op op;
const typename Op::first_argument_type arg1;
@@ -697,8 +695,8 @@ namespace cv { namespace gpu { namespace device
return op(a, arg2);
}
__device__ __forceinline__ binder2nd(const binder2nd& other) :
unary_function<typename Op::first_argument_type, typename Op::result_type>(), op(other.op), arg2(other.arg2){}
__host__ __device__ __forceinline__ binder2nd() {}
__host__ __device__ __forceinline__ binder2nd(const binder2nd& other) : op(other.op), arg2(other.arg2) {}
const Op op;
const typename Op::second_argument_type arg2;

View File

@@ -43,193 +43,80 @@
#ifndef __OPENCV_GPU_LIMITS_GPU_HPP__
#define __OPENCV_GPU_LIMITS_GPU_HPP__
#include <limits>
#include <limits.h>
#include <float.h>
#include "common.hpp"
namespace cv { namespace gpu { namespace device
{
template<class T> struct numeric_limits
{
typedef T type;
__device__ __forceinline__ static type min() { return type(); };
__device__ __forceinline__ static type max() { return type(); };
__device__ __forceinline__ static type epsilon() { return type(); }
__device__ __forceinline__ static type round_error() { return type(); }
__device__ __forceinline__ static type denorm_min() { return type(); }
__device__ __forceinline__ static type infinity() { return type(); }
__device__ __forceinline__ static type quiet_NaN() { return type(); }
__device__ __forceinline__ static type signaling_NaN() { return T(); }
static const bool is_signed;
};
template<> struct numeric_limits<bool>
{
typedef bool type;
__device__ __forceinline__ static type min() { return false; };
__device__ __forceinline__ static type max() { return true; };
__device__ __forceinline__ static type epsilon();
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = false;
};
template <class T> struct numeric_limits;
template<> struct numeric_limits<char>
{
typedef char type;
__device__ __forceinline__ static type min() { return CHAR_MIN; };
__device__ __forceinline__ static type max() { return CHAR_MAX; };
__device__ __forceinline__ static type epsilon();
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = (char)-1 == -1;
};
template <> struct numeric_limits<bool>
{
__device__ __forceinline__ static bool min() { return false; }
__device__ __forceinline__ static bool max() { return true; }
static const bool is_signed = false;
};
template<> struct numeric_limits<signed char>
{
typedef char type;
__device__ __forceinline__ static type min() { return SCHAR_MIN; };
__device__ __forceinline__ static type max() { return SCHAR_MAX; };
__device__ __forceinline__ static type epsilon();
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = (signed char)-1 == -1;
};
template <> struct numeric_limits<signed char>
{
__device__ __forceinline__ static signed char min() { return SCHAR_MIN; }
__device__ __forceinline__ static signed char max() { return SCHAR_MAX; }
static const bool is_signed = true;
};
template<> struct numeric_limits<unsigned char>
{
typedef unsigned char type;
__device__ __forceinline__ static type min() { return 0; };
__device__ __forceinline__ static type max() { return UCHAR_MAX; };
__device__ __forceinline__ static type epsilon();
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = false;
};
template <> struct numeric_limits<unsigned char>
{
__device__ __forceinline__ static unsigned char min() { return 0; }
__device__ __forceinline__ static unsigned char max() { return UCHAR_MAX; }
static const bool is_signed = false;
};
template<> struct numeric_limits<short>
{
typedef short type;
__device__ __forceinline__ static type min() { return SHRT_MIN; };
__device__ __forceinline__ static type max() { return SHRT_MAX; };
__device__ __forceinline__ static type epsilon();
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = true;
};
template <> struct numeric_limits<short>
{
__device__ __forceinline__ static short min() { return SHRT_MIN; }
__device__ __forceinline__ static short max() { return SHRT_MAX; }
static const bool is_signed = true;
};
template<> struct numeric_limits<unsigned short>
{
typedef unsigned short type;
__device__ __forceinline__ static type min() { return 0; };
__device__ __forceinline__ static type max() { return USHRT_MAX; };
__device__ __forceinline__ static type epsilon();
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = false;
};
template <> struct numeric_limits<unsigned short>
{
__device__ __forceinline__ static unsigned short min() { return 0; }
__device__ __forceinline__ static unsigned short max() { return USHRT_MAX; }
static const bool is_signed = false;
};
template<> struct numeric_limits<int>
{
typedef int type;
__device__ __forceinline__ static type min() { return INT_MIN; };
__device__ __forceinline__ static type max() { return INT_MAX; };
__device__ __forceinline__ static type epsilon();
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = true;
};
template <> struct numeric_limits<int>
{
__device__ __forceinline__ static int min() { return INT_MIN; }
__device__ __forceinline__ static int max() { return INT_MAX; }
static const bool is_signed = true;
};
template <> struct numeric_limits<unsigned int>
{
__device__ __forceinline__ static unsigned int min() { return 0; }
__device__ __forceinline__ static unsigned int max() { return UINT_MAX; }
static const bool is_signed = false;
};
template<> struct numeric_limits<unsigned int>
{
typedef unsigned int type;
__device__ __forceinline__ static type min() { return 0; };
__device__ __forceinline__ static type max() { return UINT_MAX; };
__device__ __forceinline__ static type epsilon();
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = false;
};
template <> struct numeric_limits<float>
{
__device__ __forceinline__ static float min() { return FLT_MIN; }
__device__ __forceinline__ static float max() { return FLT_MAX; }
__device__ __forceinline__ static float epsilon() { return FLT_EPSILON; }
static const bool is_signed = true;
};
template<> struct numeric_limits<long>
{
typedef long type;
__device__ __forceinline__ static type min() { return LONG_MIN; };
__device__ __forceinline__ static type max() { return LONG_MAX; };
__device__ __forceinline__ static type epsilon();
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = true;
};
template <> struct numeric_limits<double>
{
__device__ __forceinline__ static double min() { return DBL_MIN; }
__device__ __forceinline__ static double max() { return DBL_MAX; }
__device__ __forceinline__ static double epsilon() { return DBL_EPSILON; }
static const bool is_signed = true;
};
template<> struct numeric_limits<unsigned long>
{
typedef unsigned long type;
__device__ __forceinline__ static type min() { return 0; };
__device__ __forceinline__ static type max() { return ULONG_MAX; };
__device__ __forceinline__ static type epsilon();
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = false;
};
template<> struct numeric_limits<float>
{
typedef float type;
__device__ __forceinline__ static type min() { return 1.175494351e-38f/*FLT_MIN*/; };
__device__ __forceinline__ static type max() { return 3.402823466e+38f/*FLT_MAX*/; };
__device__ __forceinline__ static type epsilon() { return 1.192092896e-07f/*FLT_EPSILON*/; };
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = true;
};
template<> struct numeric_limits<double>
{
typedef double type;
__device__ __forceinline__ static type min() { return 2.2250738585072014e-308/*DBL_MIN*/; };
__device__ __forceinline__ static type max() { return 1.7976931348623158e+308/*DBL_MAX*/; };
__device__ __forceinline__ static type epsilon();
__device__ __forceinline__ static type round_error();
__device__ __forceinline__ static type denorm_min();
__device__ __forceinline__ static type infinity();
__device__ __forceinline__ static type quiet_NaN();
__device__ __forceinline__ static type signaling_NaN();
static const bool is_signed = true;
};
}}} // namespace cv { namespace gpu { namespace device {
#endif // __OPENCV_GPU_LIMITS_GPU_HPP__

View File

@@ -124,8 +124,8 @@ namespace cv { namespace gpu { namespace device
struct WithOutMask
{
__device__ __forceinline__ WithOutMask(){}
__device__ __forceinline__ WithOutMask(const WithOutMask& mask){}
__host__ __device__ __forceinline__ WithOutMask(){}
__host__ __device__ __forceinline__ WithOutMask(const WithOutMask&){}
__device__ __forceinline__ void next() const
{

File diff suppressed because it is too large Load Diff

View File

@@ -72,7 +72,7 @@ PERF_TEST_P(Sz_Type_KernelSz, Filters_Blur,
TEST_CYCLE() cv::gpu::blur(d_src, dst, cv::Size(ksize, ksize));
GPU_SANITY_CHECK(dst);
GPU_SANITY_CHECK(dst, 1);
}
else
{

View File

@@ -44,4 +44,11 @@
using namespace perf;
CV_PERF_TEST_MAIN(gpu, printCudaInfo())
static const char * impls[] = {
#ifdef HAVE_CUDA
"cuda",
#endif
"plain"
};
CV_PERF_TEST_MAIN_WITH_IMPLS(gpu, impls, printCudaInfo())

View File

@@ -103,7 +103,7 @@ PERF_TEST_P(ImagePair, Video_InterpolateFrames,
TEST_CYCLE() cv::gpu::interpolateFrames(d_frame0, d_frame1, d_fu, d_fv, d_bu, d_bv, 0.5f, newFrame, d_buf);
GPU_SANITY_CHECK(newFrame);
GPU_SANITY_CHECK(newFrame, 1e-4);
}
else
{
@@ -142,7 +142,7 @@ PERF_TEST_P(ImagePair, Video_CreateOpticalFlowNeedleMap,
TEST_CYCLE() cv::gpu::createOpticalFlowNeedleMap(u, v, vertex, colors);
GPU_SANITY_CHECK(vertex);
GPU_SANITY_CHECK(vertex, 1e-6);
GPU_SANITY_CHECK(colors);
}
else
@@ -219,8 +219,8 @@ PERF_TEST_P(ImagePair, Video_BroxOpticalFlow,
TEST_CYCLE() d_flow(d_frame0, d_frame1, u, v);
GPU_SANITY_CHECK(u);
GPU_SANITY_CHECK(v);
GPU_SANITY_CHECK(u, 1e-1);
GPU_SANITY_CHECK(v, 1e-1);
}
else
{

View File

@@ -151,7 +151,7 @@ namespace
}
// Computes rotation, translation pair for small subsets if the input data
class TransformHypothesesGenerator
class TransformHypothesesGenerator : public ParallelLoopBody
{
public:
TransformHypothesesGenerator(const Mat& object_, const Mat& image_, const Mat& dist_coef_,
@@ -161,7 +161,7 @@ namespace
num_points(num_points_), subset_size(subset_size_), rot_matrices(rot_matrices_),
transl_vectors(transl_vectors_) {}
void operator()(const BlockedRange& range) const
void operator()(const Range& range) const
{
// Input data for generation of the current hypothesis
vector<int> subset_indices(subset_size);
@@ -173,7 +173,7 @@ namespace
Mat rot_mat(3, 3, CV_64F);
Mat transl_vec(1, 3, CV_64F);
for (int iter = range.begin(); iter < range.end(); ++iter)
for (int iter = range.start; iter < range.end; ++iter)
{
selectRandom(subset_size, num_points, subset_indices);
for (int i = 0; i < subset_size; ++i)
@@ -239,7 +239,7 @@ void cv::gpu::solvePnPRansac(const Mat& object, const Mat& image, const Mat& cam
// Generate set of hypotheses using small subsets of the input data
TransformHypothesesGenerator body(object, image_normalized, empty_dist_coef, eye_camera_mat,
num_points, subset_size, rot_matrices, transl_vectors);
parallel_for(BlockedRange(0, num_iters), body);
parallel_for_(Range(0, num_iters), body);
// Compute scores (i.e. number of inliers) for each hypothesis
GpuMat d_object(object);

View File

@@ -406,7 +406,7 @@ public:
GpuMat dclassified(1, 1, CV_32S);
cudaSafeCall( cudaMemcpy(dclassified.ptr(), &classified, sizeof(int), cudaMemcpyHostToDevice) );
PyrLavel level(0, 1.0f, image.size(), NxM, minObjectSize);
PyrLavel level(0, scaleFactor, image.size(), NxM, minObjectSize);
while (level.isFeasible(maxObjectSize))
{

View File

@@ -67,8 +67,8 @@ namespace cv { namespace gpu { namespace device
crot1.x * p.x + crot1.y * p.y + crot1.z * p.z + ctransl.y,
crot2.x * p.x + crot2.y * p.y + crot2.z * p.z + ctransl.z);
}
__device__ __forceinline__ TransformOp() {}
__device__ __forceinline__ TransformOp(const TransformOp&) {}
__host__ __device__ __forceinline__ TransformOp() {}
__host__ __device__ __forceinline__ TransformOp(const TransformOp&) {}
};
void call(const PtrStepSz<float3> src, const float* rot,
@@ -106,8 +106,8 @@ namespace cv { namespace gpu { namespace device
(cproj0.x * t.x + cproj0.y * t.y) / t.z + cproj0.z,
(cproj1.x * t.x + cproj1.y * t.y) / t.z + cproj1.z);
}
__device__ __forceinline__ ProjectOp() {}
__device__ __forceinline__ ProjectOp(const ProjectOp&) {}
__host__ __device__ __forceinline__ ProjectOp() {}
__host__ __device__ __forceinline__ ProjectOp(const ProjectOp&) {}
};
void call(const PtrStepSz<float3> src, const float* rot,

View File

@@ -62,8 +62,8 @@ namespace canny
return ::abs(x) + ::abs(y);
}
__device__ __forceinline__ L1() {}
__device__ __forceinline__ L1(const L1&) {}
__host__ __device__ __forceinline__ L1() {}
__host__ __device__ __forceinline__ L1(const L1&) {}
};
struct L2 : binary_function<int, int, float>
{
@@ -72,8 +72,8 @@ namespace canny
return ::sqrtf(x * x + y * y);
}
__device__ __forceinline__ L2() {}
__device__ __forceinline__ L2(const L2&) {}
__host__ __device__ __forceinline__ L2() {}
__host__ __device__ __forceinline__ L2(const L2&) {}
};
}
@@ -470,8 +470,8 @@ namespace canny
return (uchar)(-(e >> 1));
}
__device__ __forceinline__ GetEdges() {}
__device__ __forceinline__ GetEdges(const GetEdges&) {}
__host__ __device__ __forceinline__ GetEdges() {}
__host__ __device__ __forceinline__ GetEdges(const GetEdges&) {}
};
}

View File

@@ -153,7 +153,7 @@ namespace cv { namespace gpu { namespace device
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{
I d = a - b;
I d = saturate_cast<I>(a - b);
return lo.x <= d.x && d.x <= hi.x &&
lo.y <= d.y && d.y <= hi.y &&
lo.z <= d.z && d.z <= hi.z;
@@ -169,7 +169,7 @@ namespace cv { namespace gpu { namespace device
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{
I d = a - b;
I d = saturate_cast<I>(a - b);
return lo.x <= d.x && d.x <= hi.x &&
lo.y <= d.y && d.y <= hi.y &&
lo.z <= d.z && d.z <= hi.z &&

View File

@@ -162,8 +162,8 @@ namespace arithm
return vadd4(a, b);
}
__device__ __forceinline__ VAdd4() {}
__device__ __forceinline__ VAdd4(const VAdd4& other) {}
__host__ __device__ __forceinline__ VAdd4() {}
__host__ __device__ __forceinline__ VAdd4(const VAdd4&) {}
};
////////////////////////////////////
@@ -175,8 +175,8 @@ namespace arithm
return vadd2(a, b);
}
__device__ __forceinline__ VAdd2() {}
__device__ __forceinline__ VAdd2(const VAdd2& other) {}
__host__ __device__ __forceinline__ VAdd2() {}
__host__ __device__ __forceinline__ VAdd2(const VAdd2&) {}
};
////////////////////////////////////
@@ -188,8 +188,8 @@ namespace arithm
return saturate_cast<D>(a + b);
}
__device__ __forceinline__ AddMat() {}
__device__ __forceinline__ AddMat(const AddMat& other) {}
__host__ __device__ __forceinline__ AddMat() {}
__host__ __device__ __forceinline__ AddMat(const AddMat&) {}
};
}
@@ -397,8 +397,8 @@ namespace arithm
return vsub4(a, b);
}
__device__ __forceinline__ VSub4() {}
__device__ __forceinline__ VSub4(const VSub4& other) {}
__host__ __device__ __forceinline__ VSub4() {}
__host__ __device__ __forceinline__ VSub4(const VSub4&) {}
};
////////////////////////////////////
@@ -410,8 +410,8 @@ namespace arithm
return vsub2(a, b);
}
__device__ __forceinline__ VSub2() {}
__device__ __forceinline__ VSub2(const VSub2& other) {}
__host__ __device__ __forceinline__ VSub2() {}
__host__ __device__ __forceinline__ VSub2(const VSub2&) {}
};
////////////////////////////////////
@@ -423,8 +423,8 @@ namespace arithm
return saturate_cast<D>(a - b);
}
__device__ __forceinline__ SubMat() {}
__device__ __forceinline__ SubMat(const SubMat& other) {}
__host__ __device__ __forceinline__ SubMat() {}
__host__ __device__ __forceinline__ SubMat(const SubMat&) {}
};
}
@@ -617,8 +617,8 @@ namespace arithm
return res;
}
__device__ __forceinline__ Mul_8uc4_32f() {}
__device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f& other) {}
__host__ __device__ __forceinline__ Mul_8uc4_32f() {}
__host__ __device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f&) {}
};
struct Mul_16sc4_32f : binary_function<short4, float, short4>
@@ -629,8 +629,8 @@ namespace arithm
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
__device__ __forceinline__ Mul_16sc4_32f() {}
__device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f& other) {}
__host__ __device__ __forceinline__ Mul_16sc4_32f() {}
__host__ __device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f&) {}
};
template <typename T, typename D> struct Mul : binary_function<T, T, D>
@@ -640,8 +640,8 @@ namespace arithm
return saturate_cast<D>(a * b);
}
__device__ __forceinline__ Mul() {}
__device__ __forceinline__ Mul(const Mul& other) {}
__host__ __device__ __forceinline__ Mul() {}
__host__ __device__ __forceinline__ Mul(const Mul&) {}
};
template <typename T, typename S, typename D> struct MulScale : binary_function<T, T, D>
@@ -888,8 +888,8 @@ namespace arithm
return b != 0 ? saturate_cast<D>(a / b) : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
__host__ __device__ __forceinline__ Div() {}
__host__ __device__ __forceinline__ Div(const Div&) {}
};
template <typename T> struct Div<T, float> : binary_function<T, T, float>
{
@@ -898,8 +898,8 @@ namespace arithm
return b != 0 ? static_cast<float>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
__host__ __device__ __forceinline__ Div() {}
__host__ __device__ __forceinline__ Div(const Div&) {}
};
template <typename T> struct Div<T, double> : binary_function<T, T, double>
{
@@ -908,8 +908,8 @@ namespace arithm
return b != 0 ? static_cast<double>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
__host__ __device__ __forceinline__ Div() {}
__host__ __device__ __forceinline__ Div(const Div&) {}
};
template <typename T, typename S, typename D> struct DivScale : binary_function<T, T, D>
@@ -1196,8 +1196,8 @@ namespace arithm
return vabsdiff4(a, b);
}
__device__ __forceinline__ VAbsDiff4() {}
__device__ __forceinline__ VAbsDiff4(const VAbsDiff4& other) {}
__host__ __device__ __forceinline__ VAbsDiff4() {}
__host__ __device__ __forceinline__ VAbsDiff4(const VAbsDiff4&) {}
};
////////////////////////////////////
@@ -1209,8 +1209,8 @@ namespace arithm
return vabsdiff2(a, b);
}
__device__ __forceinline__ VAbsDiff2() {}
__device__ __forceinline__ VAbsDiff2(const VAbsDiff2& other) {}
__host__ __device__ __forceinline__ VAbsDiff2() {}
__host__ __device__ __forceinline__ VAbsDiff2(const VAbsDiff2&) {}
};
////////////////////////////////////
@@ -1235,8 +1235,8 @@ namespace arithm
return saturate_cast<T>(_abs(a - b));
}
__device__ __forceinline__ AbsDiffMat() {}
__device__ __forceinline__ AbsDiffMat(const AbsDiffMat& other) {}
__host__ __device__ __forceinline__ AbsDiffMat() {}
__host__ __device__ __forceinline__ AbsDiffMat(const AbsDiffMat&) {}
};
}
@@ -1370,8 +1370,8 @@ namespace arithm
return saturate_cast<T>(x * x);
}
__device__ __forceinline__ Sqr() {}
__device__ __forceinline__ Sqr(const Sqr& other) {}
__host__ __device__ __forceinline__ Sqr() {}
__host__ __device__ __forceinline__ Sqr(const Sqr&) {}
};
}
@@ -1466,8 +1466,8 @@ namespace arithm
return saturate_cast<T>(f(x));
}
__device__ __forceinline__ Exp() {}
__device__ __forceinline__ Exp(const Exp& other) {}
__host__ __device__ __forceinline__ Exp() {}
__host__ __device__ __forceinline__ Exp(const Exp&) {}
};
}
@@ -1507,8 +1507,8 @@ namespace arithm
return vcmpeq4(a, b);
}
__device__ __forceinline__ VCmpEq4() {}
__device__ __forceinline__ VCmpEq4(const VCmpEq4& other) {}
__host__ __device__ __forceinline__ VCmpEq4() {}
__host__ __device__ __forceinline__ VCmpEq4(const VCmpEq4&) {}
};
struct VCmpNe4 : binary_function<uint, uint, uint>
{
@@ -1517,8 +1517,8 @@ namespace arithm
return vcmpne4(a, b);
}
__device__ __forceinline__ VCmpNe4() {}
__device__ __forceinline__ VCmpNe4(const VCmpNe4& other) {}
__host__ __device__ __forceinline__ VCmpNe4() {}
__host__ __device__ __forceinline__ VCmpNe4(const VCmpNe4&) {}
};
struct VCmpLt4 : binary_function<uint, uint, uint>
{
@@ -1527,8 +1527,8 @@ namespace arithm
return vcmplt4(a, b);
}
__device__ __forceinline__ VCmpLt4() {}
__device__ __forceinline__ VCmpLt4(const VCmpLt4& other) {}
__host__ __device__ __forceinline__ VCmpLt4() {}
__host__ __device__ __forceinline__ VCmpLt4(const VCmpLt4&) {}
};
struct VCmpLe4 : binary_function<uint, uint, uint>
{
@@ -1537,8 +1537,8 @@ namespace arithm
return vcmple4(a, b);
}
__device__ __forceinline__ VCmpLe4() {}
__device__ __forceinline__ VCmpLe4(const VCmpLe4& other) {}
__host__ __device__ __forceinline__ VCmpLe4() {}
__host__ __device__ __forceinline__ VCmpLe4(const VCmpLe4&) {}
};
////////////////////////////////////
@@ -2008,8 +2008,8 @@ namespace arithm
return vmin4(a, b);
}
__device__ __forceinline__ VMin4() {}
__device__ __forceinline__ VMin4(const VMin4& other) {}
__host__ __device__ __forceinline__ VMin4() {}
__host__ __device__ __forceinline__ VMin4(const VMin4&) {}
};
////////////////////////////////////
@@ -2021,8 +2021,8 @@ namespace arithm
return vmin2(a, b);
}
__device__ __forceinline__ VMin2() {}
__device__ __forceinline__ VMin2(const VMin2& other) {}
__host__ __device__ __forceinline__ VMin2() {}
__host__ __device__ __forceinline__ VMin2(const VMin2&) {}
};
}
@@ -2100,8 +2100,8 @@ namespace arithm
return vmax4(a, b);
}
__device__ __forceinline__ VMax4() {}
__device__ __forceinline__ VMax4(const VMax4& other) {}
__host__ __device__ __forceinline__ VMax4() {}
__host__ __device__ __forceinline__ VMax4(const VMax4&) {}
};
////////////////////////////////////
@@ -2113,8 +2113,8 @@ namespace arithm
return vmax2(a, b);
}
__device__ __forceinline__ VMax2() {}
__device__ __forceinline__ VMax2(const VMax2& other) {}
__host__ __device__ __forceinline__ VMax2() {}
__host__ __device__ __forceinline__ VMax2(const VMax2&) {}
};
}

View File

@@ -48,6 +48,7 @@
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/emulation.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/dynamic_smem.hpp"
@@ -811,7 +812,7 @@ namespace cv { namespace gpu { namespace device
const int ind = ::atomicAdd(r_sizes + n, 1);
if (ind < maxSize)
r_table(n, ind) = p - templCenter;
r_table(n, ind) = saturate_cast<short2>(p - templCenter);
}
void buildRTable_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount,
@@ -855,7 +856,7 @@ namespace cv { namespace gpu { namespace device
for (int j = 0; j < r_row_size; ++j)
{
short2 c = p - r_row[j];
int2 c = p - r_row[j];
c.x = __float2int_rn(c.x * idp);
c.y = __float2int_rn(c.y * idp);

View File

@@ -81,48 +81,90 @@ namespace
const ErrorEntry npp_errors [] =
{
error_entry( NPP_NOT_SUPPORTED_MODE_ERROR ),
error_entry( NPP_ROUND_MODE_NOT_SUPPORTED_ERROR ),
error_entry( NPP_RESIZE_NO_OPERATION_ERROR ),
#if defined (_MSC_VER)
error_entry( NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY ),
#endif
#if NPP_VERSION < 5500
error_entry( NPP_BAD_ARG_ERROR ),
error_entry( NPP_LUT_NUMBER_OF_LEVELS_ERROR ),
error_entry( NPP_TEXTURE_BIND_ERROR ),
error_entry( NPP_COEFF_ERROR ),
error_entry( NPP_RECT_ERROR ),
error_entry( NPP_QUAD_ERROR ),
error_entry( NPP_MEMFREE_ERR ),
error_entry( NPP_MEMSET_ERR ),
error_entry( NPP_MEM_ALLOC_ERR ),
error_entry( NPP_HISTO_NUMBER_OF_LEVELS_ERROR ),
error_entry( NPP_MIRROR_FLIP_ERR ),
error_entry( NPP_INVALID_INPUT ),
error_entry( NPP_POINTER_ERROR ),
error_entry( NPP_WARNING ),
error_entry( NPP_ODD_ROI_WARNING ),
#else
error_entry( NPP_INVALID_HOST_POINTER_ERROR ),
error_entry( NPP_INVALID_DEVICE_POINTER_ERROR ),
error_entry( NPP_LUT_PALETTE_BITSIZE_ERROR ),
error_entry( NPP_ZC_MODE_NOT_SUPPORTED_ERROR ),
error_entry( NPP_MEMFREE_ERROR ),
error_entry( NPP_MEMSET_ERROR ),
error_entry( NPP_QUALITY_INDEX_ERROR ),
error_entry( NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR ),
error_entry( NPP_CHANNEL_ORDER_ERROR ),
error_entry( NPP_ZERO_MASK_VALUE_ERROR ),
error_entry( NPP_QUADRANGLE_ERROR ),
error_entry( NPP_RECTANGLE_ERROR ),
error_entry( NPP_COEFFICIENT_ERROR ),
error_entry( NPP_NUMBER_OF_CHANNELS_ERROR ),
error_entry( NPP_COI_ERROR ),
error_entry( NPP_DIVISOR_ERROR ),
error_entry( NPP_CHANNEL_ERROR ),
error_entry( NPP_STRIDE_ERROR ),
error_entry( NPP_ANCHOR_ERROR ),
error_entry( NPP_MASK_SIZE_ERROR ),
error_entry( NPP_MIRROR_FLIP_ERROR ),
error_entry( NPP_MOMENT_00_ZERO_ERROR ),
error_entry( NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR ),
error_entry( NPP_THRESHOLD_ERROR ),
error_entry( NPP_CONTEXT_MATCH_ERROR ),
error_entry( NPP_FFT_FLAG_ERROR ),
error_entry( NPP_FFT_ORDER_ERROR ),
error_entry( NPP_SCALE_RANGE_ERROR ),
error_entry( NPP_DATA_TYPE_ERROR ),
error_entry( NPP_OUT_OFF_RANGE_ERROR ),
error_entry( NPP_DIVIDE_BY_ZERO_ERROR ),
error_entry( NPP_MEMORY_ALLOCATION_ERR ),
error_entry( NPP_RANGE_ERROR ),
error_entry( NPP_BAD_ARGUMENT_ERROR ),
error_entry( NPP_NO_MEMORY_ERROR ),
error_entry( NPP_ERROR_RESERVED ),
error_entry( NPP_NO_OPERATION_WARNING ),
error_entry( NPP_DIVIDE_BY_ZERO_WARNING ),
error_entry( NPP_WRONG_INTERSECTION_ROI_WARNING ),
#endif
error_entry( NPP_NOT_SUPPORTED_MODE_ERROR ),
error_entry( NPP_ROUND_MODE_NOT_SUPPORTED_ERROR ),
error_entry( NPP_RESIZE_NO_OPERATION_ERROR ),
error_entry( NPP_LUT_NUMBER_OF_LEVELS_ERROR ),
error_entry( NPP_TEXTURE_BIND_ERROR ),
error_entry( NPP_WRONG_INTERSECTION_ROI_ERROR ),
error_entry( NPP_NOT_EVEN_STEP_ERROR ),
error_entry( NPP_INTERPOLATION_ERROR ),
error_entry( NPP_RESIZE_FACTOR_ERROR ),
error_entry( NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR ),
error_entry( NPP_MEMFREE_ERR ),
error_entry( NPP_MEMSET_ERR ),
error_entry( NPP_MEMCPY_ERROR ),
error_entry( NPP_MEM_ALLOC_ERR ),
error_entry( NPP_HISTO_NUMBER_OF_LEVELS_ERROR ),
error_entry( NPP_MIRROR_FLIP_ERR ),
error_entry( NPP_INVALID_INPUT ),
error_entry( NPP_ALIGNMENT_ERROR ),
error_entry( NPP_STEP_ERROR ),
error_entry( NPP_SIZE_ERROR ),
error_entry( NPP_POINTER_ERROR ),
error_entry( NPP_NULL_POINTER_ERROR ),
error_entry( NPP_CUDA_KERNEL_EXECUTION_ERROR ),
error_entry( NPP_NOT_IMPLEMENTED_ERROR ),
error_entry( NPP_ERROR ),
error_entry( NPP_NO_ERROR ),
error_entry( NPP_SUCCESS ),
error_entry( NPP_WARNING ),
error_entry( NPP_WRONG_INTERSECTION_QUAD_WARNING ),
error_entry( NPP_MISALIGNED_DST_ROI_WARNING ),
error_entry( NPP_AFFINE_QUAD_INCORRECT_WARNING ),
error_entry( NPP_DOUBLE_SIZE_WARNING ),
error_entry( NPP_ODD_ROI_WARNING )
error_entry( NPP_DOUBLE_SIZE_WARNING )
};
const size_t npp_error_num = sizeof(npp_errors) / sizeof(npp_errors[0]);

View File

@@ -187,10 +187,20 @@ double cv::gpu::norm(const GpuMat& src1, const GpuMat& src2, int normType)
CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());
CV_Assert(normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2);
typedef NppStatus (*npp_norm_diff_func_t)(const Npp8u* pSrc1, int nSrcStep1, const Npp8u* pSrc2, int nSrcStep2,
NppiSize oSizeROI, Npp64f* pRetVal);
#if CUDA_VERSION < 5050
typedef NppStatus (*func_t)(const Npp8u* pSrc1, int nSrcStep1, const Npp8u* pSrc2, int nSrcStep2, NppiSize oSizeROI, Npp64f* pRetVal);
static const npp_norm_diff_func_t npp_norm_diff_func[] = {nppiNormDiff_Inf_8u_C1R, nppiNormDiff_L1_8u_C1R, nppiNormDiff_L2_8u_C1R};
static const func_t funcs[] = {nppiNormDiff_Inf_8u_C1R, nppiNormDiff_L1_8u_C1R, nppiNormDiff_L2_8u_C1R};
#else
typedef NppStatus (*func_t)(const Npp8u* pSrc1, int nSrcStep1, const Npp8u* pSrc2, int nSrcStep2,
NppiSize oSizeROI, Npp64f* pRetVal, Npp8u * pDeviceBuffer);
typedef NppStatus (*buf_size_func_t)(NppiSize oSizeROI, int* hpBufferSize);
static const func_t funcs[] = {nppiNormDiff_Inf_8u_C1R, nppiNormDiff_L1_8u_C1R, nppiNormDiff_L2_8u_C1R};
static const buf_size_func_t buf_size_funcs[] = {nppiNormDiffInfGetBufferHostSize_8u_C1R, nppiNormDiffL1GetBufferHostSize_8u_C1R, nppiNormDiffL2GetBufferHostSize_8u_C1R};
#endif
NppiSize sz;
sz.width = src1.cols;
@@ -202,7 +212,16 @@ double cv::gpu::norm(const GpuMat& src1, const GpuMat& src2, int normType)
DeviceBuffer dbuf;
nppSafeCall( npp_norm_diff_func[funcIdx](src1.ptr<Npp8u>(), static_cast<int>(src1.step), src2.ptr<Npp8u>(), static_cast<int>(src2.step), sz, dbuf) );
#if CUDA_VERSION < 5050
nppSafeCall( funcs[funcIdx](src1.ptr<Npp8u>(), static_cast<int>(src1.step), src2.ptr<Npp8u>(), static_cast<int>(src2.step), sz, dbuf) );
#else
int bufSize;
buf_size_funcs[funcIdx](sz, &bufSize);
GpuMat buf(1, bufSize, CV_8UC1);
nppSafeCall( funcs[funcIdx](src1.ptr<Npp8u>(), static_cast<int>(src1.step), src2.ptr<Npp8u>(), static_cast<int>(src2.step), sz, dbuf, buf.data) );
#endif
cudaSafeCall( cudaDeviceSynchronize() );

View File

@@ -130,7 +130,7 @@ typedef int Ncv32s;
typedef unsigned int Ncv32u;
typedef short Ncv16s;
typedef unsigned short Ncv16u;
typedef char Ncv8s;
typedef signed char Ncv8s;
typedef unsigned char Ncv8u;
typedef float Ncv32f;
typedef double Ncv64f;

View File

@@ -51,7 +51,7 @@ template<typename TBase> inline __host__ __device__ TBase _pixMaxVal();
template<> static inline __host__ __device__ Ncv8u _pixMaxVal<Ncv8u>() {return UCHAR_MAX;}
template<> static inline __host__ __device__ Ncv16u _pixMaxVal<Ncv16u>() {return USHRT_MAX;}
template<> static inline __host__ __device__ Ncv32u _pixMaxVal<Ncv32u>() {return UINT_MAX;}
template<> static inline __host__ __device__ Ncv8s _pixMaxVal<Ncv8s>() {return CHAR_MAX;}
template<> static inline __host__ __device__ Ncv8s _pixMaxVal<Ncv8s>() {return SCHAR_MAX;}
template<> static inline __host__ __device__ Ncv16s _pixMaxVal<Ncv16s>() {return SHRT_MAX;}
template<> static inline __host__ __device__ Ncv32s _pixMaxVal<Ncv32s>() {return INT_MAX;}
template<> static inline __host__ __device__ Ncv32f _pixMaxVal<Ncv32f>() {return FLT_MAX;}
@@ -61,7 +61,7 @@ template<typename TBase> inline __host__ __device__ TBase _pixMinVal();
template<> static inline __host__ __device__ Ncv8u _pixMinVal<Ncv8u>() {return 0;}
template<> static inline __host__ __device__ Ncv16u _pixMinVal<Ncv16u>() {return 0;}
template<> static inline __host__ __device__ Ncv32u _pixMinVal<Ncv32u>() {return 0;}
template<> static inline __host__ __device__ Ncv8s _pixMinVal<Ncv8s>() {return CHAR_MIN;}
template<> static inline __host__ __device__ Ncv8s _pixMinVal<Ncv8s>() {return SCHAR_MIN;}
template<> static inline __host__ __device__ Ncv16s _pixMinVal<Ncv16s>() {return SHRT_MIN;}
template<> static inline __host__ __device__ Ncv32s _pixMinVal<Ncv32s>() {return INT_MIN;}
template<> static inline __host__ __device__ Ncv32f _pixMinVal<Ncv32f>() {return FLT_MIN;}

View File

@@ -116,11 +116,13 @@
#define CUDART_MINIMUM_REQUIRED_VERSION 4010
#define NPP_MINIMUM_REQUIRED_VERSION 4100
#define NPP_VERSION (NPP_VERSION_MAJOR * 1000 + NPP_VERSION_MINOR * 100 + NPP_VERSION_BUILD)
#if (CUDART_VERSION < CUDART_MINIMUM_REQUIRED_VERSION)
#error "Insufficient Cuda Runtime library version, please update it."
#endif
#if (NPP_VERSION_MAJOR * 1000 + NPP_VERSION_MINOR * 100 + NPP_VERSION_BUILD < NPP_MINIMUM_REQUIRED_VERSION)
#if (NPP_VERSION < NPP_MINIMUM_REQUIRED_VERSION)
#error "Insufficient NPP version, please update it."
#endif

View File

@@ -352,7 +352,7 @@ GPU_TEST_P(Add_Scalar, WithOutMask)
cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
cv::add(mat, val, dst_gold, cv::noArray(), depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
}
}
@@ -383,7 +383,7 @@ GPU_TEST_P(Add_Scalar, WithMask)
cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
cv::add(mat, val, dst_gold, mask, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
}
}
@@ -567,7 +567,7 @@ GPU_TEST_P(Subtract_Scalar, WithOutMask)
cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
cv::subtract(mat, val, dst_gold, cv::noArray(), depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
}
}
@@ -598,7 +598,7 @@ GPU_TEST_P(Subtract_Scalar, WithMask)
cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
cv::subtract(mat, val, dst_gold, mask, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
}
}
@@ -2148,7 +2148,7 @@ GPU_TEST_P(Min, Scalar)
cv::Mat dst_gold = cv::min(src, val);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-5);
}
}
@@ -2231,7 +2231,7 @@ GPU_TEST_P(Max, Scalar)
cv::Mat dst_gold = cv::max(src, val);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-5);
}
}

View File

@@ -102,8 +102,8 @@ GPU_TEST_P(BroxOpticalFlow, Regression)
for (int i = 0; i < v_gold.rows; ++i)
f.read(v_gold.ptr<char>(i), v_gold.cols * sizeof(float));
EXPECT_MAT_NEAR(u_gold, u, 0);
EXPECT_MAT_NEAR(v_gold, v, 0);
EXPECT_MAT_SIMILAR(u_gold, u, 1e-3);
EXPECT_MAT_SIMILAR(v_gold, v, 1e-3);
#else
std::ofstream f(fname.c_str(), std::ios_base::binary);

View File

@@ -95,14 +95,10 @@ elseif(HAVE_QT)
endif()
include(${QT_USE_FILE})
if(QT_INCLUDE_DIR)
ocv_include_directories(${QT_INCLUDE_DIR})
endif()
QT4_ADD_RESOURCES(_RCC_OUTFILES src/window_QT.qrc)
QT4_WRAP_CPP(_MOC_OUTFILES src/window_QT.h)
list(APPEND HIGHGUI_LIBRARIES ${QT_LIBRARIES} ${QT_QTTEST_LIBRARY})
list(APPEND HIGHGUI_LIBRARIES ${QT_LIBRARIES})
list(APPEND highgui_srcs src/window_QT.cpp ${_MOC_OUTFILES} ${_RCC_OUTFILES})
ocv_check_flag_support(CXX -Wno-missing-declarations _have_flag)
if(${_have_flag})
@@ -183,7 +179,11 @@ if(HAVE_XIMEA)
if(XIMEA_LIBRARY_DIR)
link_directories(${XIMEA_LIBRARY_DIR})
endif()
list(APPEND HIGHGUI_LIBRARIES m3api)
if(CMAKE_CL_64)
list(APPEND HIGHGUI_LIBRARIES m3apiX64)
else()
list(APPEND HIGHGUI_LIBRARIES m3api)
endif()
endif(HAVE_XIMEA)
if(HAVE_FFMPEG)

View File

@@ -1,6 +1,4 @@
/*
* cap_ios.h
* For iOS video I/O
/* For iOS video I/O
* by Eduard Feicho on 29/07/12
* Copyright 2012. All rights reserved.
*
@@ -90,6 +88,12 @@
- (void)createVideoPreviewLayer;
- (void)updateOrientation;
- (void)lockFocus;
- (void)unlockFocus;
- (void)lockExposure;
- (void)unlockExposure;
- (void)lockBalance;
- (void)unlockBalance;
@end
@@ -116,6 +120,7 @@
BOOL grayscaleMode;
BOOL recordVideo;
BOOL rotateVideo;
AVAssetWriterInput* recordAssetWriterInput;
AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor;
AVAssetWriter* recordAssetWriter;
@@ -128,6 +133,7 @@
@property (nonatomic, assign) BOOL grayscaleMode;
@property (nonatomic, assign) BOOL recordVideo;
@property (nonatomic, assign) BOOL rotateVideo;
@property (nonatomic, retain) AVAssetWriterInput* recordAssetWriterInput;
@property (nonatomic, retain) AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor;
@property (nonatomic, retain) AVAssetWriter* recordAssetWriter;

View File

@@ -558,9 +558,11 @@ CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
/* "black box" video file writer structure */
typedef struct CvVideoWriter CvVideoWriter;
#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24))
CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)
{
return (c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24);
return CV_FOURCC_MACRO(c1, c2, c3, c4);
}
#define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */

View File

@@ -11,11 +11,21 @@ using std::tr1::get;
typedef perf::TestBaseWithParam<String> VideoCapture_Reading;
#if defined(HAVE_MSMF)
// MPEG2 is not supported by Media Foundation yet
// http://social.msdn.microsoft.com/Forums/en-US/mediafoundationdevelopment/thread/39a36231-8c01-40af-9af5-3c105d684429
PERF_TEST_P(VideoCapture_Reading, ReadFile, testing::Values( "highgui/video/big_buck_bunny.avi",
"highgui/video/big_buck_bunny.mov",
"highgui/video/big_buck_bunny.mp4",
"highgui/video/big_buck_bunny.wmv" ) )
#else
PERF_TEST_P(VideoCapture_Reading, ReadFile, testing::Values( "highgui/video/big_buck_bunny.avi",
"highgui/video/big_buck_bunny.mov",
"highgui/video/big_buck_bunny.mp4",
"highgui/video/big_buck_bunny.mpg",
"highgui/video/big_buck_bunny.wmv" ) )
#endif
{
string filename = getDataPath(GetParam());

View File

@@ -22,10 +22,16 @@ PERF_TEST_P(VideoWriter_Writing, WriteFrame,
{
string filename = getDataPath(get<0>(GetParam()));
bool isColor = get<1>(GetParam());
Mat image = imread(filename, 1);
#if defined(HAVE_MSMF) && !defined(HAVE_VFW) && !defined(HAVE_FFMPEG) // VFW has greater priority
VideoWriter writer(cv::tempfile(".wmv"), CV_FOURCC('W', 'M', 'V', '3'),
25, cv::Size(image.cols, image.rows), isColor);
#else
VideoWriter writer(cv::tempfile(".avi"), CV_FOURCC('X', 'V', 'I', 'D'),
25, cv::Size(image.cols, image.rows), isColor);
#endif
VideoWriter writer(cv::tempfile(".avi"), CV_FOURCC('X', 'V', 'I', 'D'), 25, cv::Size(640, 480), isColor);
TEST_CYCLE() { Mat image = imread(filename, 1); writer << image; }
TEST_CYCLE() { image = imread(filename, 1); writer << image; }
bool dummy = writer.isOpened();
SANITY_CHECK(dummy);

View File

@@ -21,6 +21,7 @@
defined(HAVE_QUICKTIME) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \
defined(HAVE_MSMF) || \
defined(HAVE_VFW)
/*defined(HAVE_OPENNI) too specialized */ \
@@ -34,6 +35,7 @@
defined(HAVE_QUICKTIME) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \
defined(HAVE_MSMF) || \
defined(HAVE_VFW)
# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 1
#else

View File

@@ -117,6 +117,9 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
#ifdef HAVE_DSHOW
CV_CAP_DSHOW,
#endif
#ifdef HAVE_MSMF
CV_CAP_MSMF,
#endif
#if 1
CV_CAP_IEEE1394, // identical to CV_CAP_DC1394
#endif
@@ -196,13 +199,6 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
switch (domains[i])
{
#ifdef HAVE_MSMF
case CV_CAP_MSMF:
capture = cvCreateCameraCapture_MSMF (index);
if (capture)
return capture;
break;
#endif
#ifdef HAVE_DSHOW
case CV_CAP_DSHOW:
capture = cvCreateCameraCapture_DShow (index);
@@ -210,7 +206,13 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
return capture;
break;
#endif
#ifdef HAVE_MSMF
case CV_CAP_MSMF:
capture = cvCreateCameraCapture_MSMF (index);
if (capture)
return capture;
break;
#endif
#ifdef HAVE_TYZX
case CV_CAP_STEREO:
capture = cvCreateCameraCapture_TYZX (index);
@@ -218,14 +220,12 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
return capture;
break;
#endif
case CV_CAP_VFW:
#ifdef HAVE_VFW
case CV_CAP_VFW:
capture = cvCreateCameraCapture_VFW (index);
if (capture)
return capture;
#endif
#if defined HAVE_LIBV4L || defined HAVE_CAMV4L || defined HAVE_CAMV4L2 || defined HAVE_VIDEOIO
capture = cvCreateCameraCapture_V4L (index);
if (capture)
@@ -358,6 +358,16 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
if (! result)
result = cvCreateFileCapture_FFMPEG_proxy (filename);
#ifdef HAVE_VFW
if (! result)
result = cvCreateFileCapture_VFW (filename);
#endif
#ifdef HAVE_MSMF
if (! result)
result = cvCreateFileCapture_MSMF (filename);
#endif
#ifdef HAVE_XINE
if (! result)
result = cvCreateFileCapture_XINE (filename);
@@ -406,6 +416,16 @@ CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
if(!result)
result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color);
#ifdef HAVE_VFW
if(!result)
result = cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, is_color);
#endif
#ifdef HAVE_MSMF
if (!result)
result = cvCreateVideoWriter_MSMF(filename, fourcc, fps, frameSize, is_color);
#endif
/* #ifdef HAVE_XINE
if(!result)
result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color);

View File

@@ -45,7 +45,16 @@
#include <unistd.h>
#include <stdint.h>
#include <sys/select.h>
#ifdef WIN32
// On Windows, we have no sys/select.h, but we need to pick up
// select() which is in winsock2.
#ifndef __SYS_SELECT_H__
#define __SYS_SELECT_H__ 1
#include <winsock2.h>
#endif
#else
#include <sys/select.h>
#endif /*WIN32*/
#include <dc1394/dc1394.h>
#include <stdlib.h>
#include <string.h>

View File

@@ -3195,8 +3195,10 @@ IplImage* CvCaptureCAM_DShow::retrieveFrame(int)
frame = cvCreateImage( cvSize(w,h), 8, 3 );
}
VI.getPixels( index, (uchar*)frame->imageData, false, true );
return frame;
if (VI.getPixels( index, (uchar*)frame->imageData, false, true ))
return frame;
else
return NULL;
}
double CvCaptureCAM_DShow::getProperty( int property_id )

View File

@@ -209,11 +209,7 @@ CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char * filename)
if( result->open( filename ))
return result;
delete result;
#ifdef HAVE_VFW
return cvCreateFileCapture_VFW(filename);
#else
return 0;
#endif
}
class CvVideoWriter_FFMPEG_proxy :
@@ -263,9 +259,5 @@ CvVideoWriter* cvCreateVideoWriter_FFMPEG_proxy( const char* filename, int fourc
if( result->open( filename, fourcc, fps, frameSize, isColor != 0 ))
return result;
delete result;
#ifdef HAVE_VFW
return cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, isColor);
#else
return 0;
#endif
}

View File

@@ -2,6 +2,7 @@
* cap_ios_abstract_camera.mm
* For iOS video I/O
* by Eduard Feicho on 29/07/12
* by Alexander Shishkov on 17/07/13
* Copyright 2012. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -405,4 +406,89 @@
}
}
- (void)lockFocus;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isFocusModeSupported:AVCaptureFocusModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked focus configuration %@", [error localizedDescription]);
}
}
}
- (void) unlockFocus;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeContinuousAutoFocus;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for autofocus configuration %@", [error localizedDescription]);
}
}
}
- (void)lockExposure;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isExposureModeSupported:AVCaptureExposureModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.exposureMode = AVCaptureExposureModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked exposure configuration %@", [error localizedDescription]);
}
}
}
- (void) unlockExposure;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.exposureMode = AVCaptureExposureModeContinuousAutoExposure;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for autoexposure configuration %@", [error localizedDescription]);
}
}
}
- (void)lockBalance;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.whiteBalanceMode = AVCaptureWhiteBalanceModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked white balance configuration %@", [error localizedDescription]);
}
}
}
- (void) unlockBalance;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for auto white balance configuration %@", [error localizedDescription]);
}
}
}
@end

View File

@@ -2,6 +2,7 @@
* cap_ios_video_camera.mm
* For iOS video I/O
* by Eduard Feicho on 29/07/12
* by Alexander Shishkov on 17/07/13
* Copyright 2012. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,7 +31,6 @@
#import "opencv2/highgui/cap_ios.h"
#include "precomp.hpp"
#import <AssetsLibrary/AssetsLibrary.h>
@@ -70,6 +70,7 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
@synthesize videoDataOutput;
@synthesize recordVideo;
@synthesize rotateVideo;
//@synthesize videoFileOutput;
@synthesize recordAssetWriterInput;
@synthesize recordPixelBufferAdaptor;
@@ -85,6 +86,7 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
if (self) {
self.useAVCaptureVideoPreviewLayer = NO;
self.recordVideo = NO;
self.rotateVideo = NO;
}
return self;
}
@@ -269,13 +271,8 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
}
#pragma mark - Private Interface
- (void)createVideoDataOutput;
{
// Make a video data output
@@ -389,6 +386,38 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
[self.parentView.layer addSublayer:self.customPreviewLayer];
}
- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
{
CGSize frameSize = CGSizeMake(CGImageGetWidth(image), CGImageGetHeight(image));
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:NO], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:NO], kCVPixelBufferCGBitmapContextCompatibilityKey,
nil];
CVPixelBufferRef pxbuffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, frameSize.width,
frameSize.height, kCVPixelFormatType_32ARGB, (CFDictionaryRef) CFBridgingRetain(options),
&pxbuffer);
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, frameSize.width,
frameSize.height, 8, 4*frameSize.width, rgbColorSpace,
kCGImageAlphaPremultipliedFirst);
CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(image),
CGImageGetHeight(image)), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
return pxbuffer;
}
#pragma mark - Protocol AVCaptureVideoDataOutputSampleBufferDelegate
@@ -522,7 +551,8 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
}
if (self.recordAssetWriterInput.readyForMoreMediaData) {
if (! [self.recordPixelBufferAdaptor appendPixelBuffer:imageBuffer
CVImageBufferRef pixelBuffer = [self pixelBufferFromCGImage:dstImage];
if (! [self.recordPixelBufferAdaptor appendPixelBuffer:pixelBuffer
withPresentationTime:lastSampleTime] ) {
NSLog(@"Video Writing Error");
}
@@ -543,9 +573,12 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
- (void)updateOrientation;
{
NSLog(@"rotate..");
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
[self layoutPreviewLayer];
if (self.rotateVideo == YES)
{
NSLog(@"rotate..");
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
[self layoutPreviewLayer];
}
}
@@ -583,3 +616,4 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
}
@end

File diff suppressed because it is too large Load Diff

View File

@@ -613,8 +613,10 @@ bool CvVideoWriter_VFW::open( const char* filename, int _fourcc, double _fps, Cv
close();
return false;
}
return true;
}
return true;
else
return false;
}

View File

@@ -20,25 +20,24 @@ public:
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain() { return CV_CAP_XIAPI; } // Return the type of the capture object: CV_CAP_VFW, etc...
protected:
private:
void init();
void errMsg(const char* msg, int errNum);
void resetCvImage();
int getBpp();
IplImage* frame;
HANDLE hmv;
DWORD numDevices;
XI_IMG image;
int width;
int height;
int format;
int timeout;
XI_IMG image;
};
/**********************************************************************************/
CvCapture* cvCreateCameraCapture_XIMEA( int index )
{
CvCaptureCAM_XIMEA* capture = new CvCaptureCAM_XIMEA;
CvCaptureCAM_XIMEA* capture = new CvCaptureCAM_XIMEA;
if( capture->open( index ))
return capture;
@@ -79,18 +78,19 @@ bool CvCaptureCAM_XIMEA::open( int wIndex )
// always use auto white ballance
mvret = xiSetParamInt( hmv, XI_PRM_AUTO_WB, 1);
if(mvret != XI_OK) goto error;
// default image format RGB24
mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, XI_RGB24);
if(mvret != XI_OK) goto error;
int width = 0;
mvret = xiGetParamInt( hmv, XI_PRM_WIDTH, &width);
if(mvret != XI_OK) goto error;
int height = 0;
mvret = xiGetParamInt( hmv, XI_PRM_HEIGHT, &height);
if(mvret != XI_OK) goto error;
// default image format RGB24
format = XI_RGB24;
mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, format);
if(mvret != XI_OK) goto error;
// allocate frame buffer for RGB24 image
frame = cvCreateImage(cvSize( width, height), IPL_DEPTH_8U, 3);
@@ -103,10 +103,10 @@ bool CvCaptureCAM_XIMEA::open( int wIndex )
errMsg("StartAcquisition XI_DEVICE failed", mvret);
goto error;
}
return true;
error:
errMsg("Open XI_DEVICE failed", mvret);
xiCloseDevice(hmv);
hmv = NULL;
return false;
@@ -116,18 +116,19 @@ error:
void CvCaptureCAM_XIMEA::close()
{
if(hmv)
{
xiStopAcquisition(hmv);
xiCloseDevice(hmv);
hmv = NULL;
}
if(frame)
cvReleaseImage(&frame);
xiStopAcquisition(hmv);
xiCloseDevice(hmv);
hmv = NULL;
}
/**********************************************************************************/
bool CvCaptureCAM_XIMEA::grabFrame()
{
memset(&image, 0, sizeof(XI_IMG));
image.size = sizeof(XI_IMG);
int mvret = xiGetImage( hmv, timeout, &image);
@@ -151,31 +152,18 @@ bool CvCaptureCAM_XIMEA::grabFrame()
IplImage* CvCaptureCAM_XIMEA::retrieveFrame(int)
{
// update cvImage after format has changed
if( (int)image.width != width || (int)image.height != height || image.frm != (XI_IMG_FORMAT)format)
{
cvReleaseImage(&frame);
switch( image.frm)
{
case XI_MONO8 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 1); break;
case XI_MONO16 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_16U, 1); break;
case XI_RGB24 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 3); break;
case XI_RGB32 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 4); break;
default :
return frame;
}
// update global image format
format = image.frm;
width = image.width;
height = image.height;
}
resetCvImage();
// copy pixel data
switch( image.frm)
{
case XI_MONO8 : memcpy( frame->imageData, image.bp, image.width*image.height); break;
case XI_MONO16 : memcpy( frame->imageData, image.bp, image.width*image.height*sizeof(WORD)); break;
case XI_RGB24 : memcpy( frame->imageData, image.bp, image.width*image.height*3); break;
case XI_RGB32 : memcpy( frame->imageData, image.bp, image.width*image.height*sizeof(DWORD)); break;
case XI_MONO8 :
case XI_RAW8 : memcpy( frame->imageData, image.bp, image.width*image.height); break;
case XI_MONO16 :
case XI_RAW16 : memcpy( frame->imageData, image.bp, image.width*image.height*sizeof(WORD)); break;
case XI_RGB24 :
case XI_RGB_PLANAR : memcpy( frame->imageData, image.bp, image.width*image.height*3); break;
case XI_RGB32 : memcpy( frame->imageData, image.bp, image.width*image.height*4); break;
default: break;
}
return frame;
@@ -183,6 +171,35 @@ IplImage* CvCaptureCAM_XIMEA::retrieveFrame(int)
/**********************************************************************************/
void CvCaptureCAM_XIMEA::resetCvImage()
{
int width = 0, height = 0, format = 0;
xiGetParamInt( hmv, XI_PRM_WIDTH, &width);
xiGetParamInt( hmv, XI_PRM_HEIGHT, &height);
xiGetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, &format);
if( (int)image.width != width || (int)image.height != height || image.frm != (XI_IMG_FORMAT)format)
{
if(frame) cvReleaseImage(&frame);
frame = NULL;
switch( image.frm)
{
case XI_MONO8 :
case XI_RAW8 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 1); break;
case XI_MONO16 :
case XI_RAW16 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_16U, 1); break;
case XI_RGB24 :
case XI_RGB_PLANAR : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 3); break;
case XI_RGB32 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 4); break;
default :
return;
}
}
cvZero(frame);
}
/**********************************************************************************/
double CvCaptureCAM_XIMEA::getProperty( int property_id )
{
if(hmv == NULL)
@@ -238,20 +255,14 @@ bool CvCaptureCAM_XIMEA::setProperty( int property_id, double value )
switch(property_id)
{
// OCV parameters
case CV_CAP_PROP_FRAME_WIDTH : mvret = xiSetParamInt( hmv, XI_PRM_WIDTH, ival);
if(mvret == XI_OK) width = ival;
break;
case CV_CAP_PROP_FRAME_HEIGHT : mvret = xiSetParamInt( hmv, XI_PRM_HEIGHT, ival);
if(mvret == XI_OK) height = ival;
break;
case CV_CAP_PROP_FRAME_WIDTH : mvret = xiSetParamInt( hmv, XI_PRM_WIDTH, ival); break;
case CV_CAP_PROP_FRAME_HEIGHT : mvret = xiSetParamInt( hmv, XI_PRM_HEIGHT, ival); break;
case CV_CAP_PROP_FPS : mvret = xiSetParamFloat( hmv, XI_PRM_FRAMERATE, fval); break;
case CV_CAP_PROP_GAIN : mvret = xiSetParamFloat( hmv, XI_PRM_GAIN, fval); break;
case CV_CAP_PROP_EXPOSURE : mvret = xiSetParamInt( hmv, XI_PRM_EXPOSURE, ival); break;
// XIMEA camera properties
case CV_CAP_PROP_XI_DOWNSAMPLING : mvret = xiSetParamInt( hmv, XI_PRM_DOWNSAMPLING, ival); break;
case CV_CAP_PROP_XI_DATA_FORMAT : mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, ival);
if(mvret == XI_OK) format = ival;
break;
case CV_CAP_PROP_XI_DATA_FORMAT : mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, ival); break;
case CV_CAP_PROP_XI_OFFSET_X : mvret = xiSetParamInt( hmv, XI_PRM_OFFSET_X, ival); break;
case CV_CAP_PROP_XI_OFFSET_Y : mvret = xiSetParamInt( hmv, XI_PRM_OFFSET_Y, ival); break;
case CV_CAP_PROP_XI_TRG_SOURCE : mvret = xiSetParamInt( hmv, XI_PRM_TRG_SOURCE, ival); break;
@@ -288,7 +299,7 @@ bool CvCaptureCAM_XIMEA::setProperty( int property_id, double value )
void CvCaptureCAM_XIMEA::errMsg(const char* msg, int errNum)
{
#if defined WIN32 || defined _WIN32
char buf[512];
char buf[512]="";
sprintf( buf, "%s : %d\n", msg, errNum);
OutputDebugString(buf);
#else
@@ -296,4 +307,22 @@ void CvCaptureCAM_XIMEA::errMsg(const char* msg, int errNum)
#endif
}
/**********************************************************************************/
int CvCaptureCAM_XIMEA::getBpp()
{
switch( image.frm)
{
case XI_MONO8 :
case XI_RAW8 : return 1;
case XI_MONO16 :
case XI_RAW16 : return 2;
case XI_RGB24 :
case XI_RGB_PLANAR : return 3;
case XI_RGB32 : return 4;
default :
return 0;
}
}
/**********************************************************************************/

View File

@@ -119,6 +119,9 @@ CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture* cvCreateCameraCapture_DShow( int index );
CvCapture* cvCreateCameraCapture_MSMF( int index );
CvCapture* cvCreateFileCapture_MSMF (const char* filename);
CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture* cvCreateCameraCapture_OpenNI( int index );
CvCapture* cvCreateFileCapture_OpenNI( const char* filename );
CvCapture* cvCreateCameraCapture_Android( int index );

View File

@@ -256,12 +256,17 @@ namespace
void cv::imshow( const string& winname, InputArray _img )
{
const Size size = _img.size();
#ifndef HAVE_OPENGL
Mat img = _img.getMat();
CvMat c_img = img;
cvShowImage(winname.c_str(), &c_img);
CV_Assert(size.width>0 && size.height>0);
{
Mat img = _img.getMat();
CvMat c_img = img;
cvShowImage(winname.c_str(), &c_img);
}
#else
const double useGl = getWindowProperty(winname, WND_PROP_OPENGL);
CV_Assert(size.width>0 && size.height>0);
if (useGl <= 0)
{
@@ -275,7 +280,6 @@ void cv::imshow( const string& winname, InputArray _img )
if (autoSize > 0)
{
Size size = _img.size();
resizeWindow(winname, size.width, size.height);
}

View File

@@ -38,6 +38,7 @@
//--------------------Google Code 2010 -- Yannick Verdie--------------------//
#include "precomp.hpp"
#if defined(HAVE_QT)
@@ -2473,35 +2474,33 @@ void DefaultViewPort::saveView()
if (!fileName.isEmpty()) //save the picture
{
QString extension = fileName.right(3);
// (no need anymore) create the image resized to receive the 'screenshot'
// image2Draw_qt_resized = QImage(viewport()->width(), viewport()->height(),QImage::Format_RGB888);
QPainter saveimage(&image2Draw_qt_resized);
this->render(&saveimage);
// Create a new pixmap to render the viewport into
QPixmap viewportPixmap(viewport()->size());
viewport()->render(&viewportPixmap);
// Save it..
if (QString::compare(extension, "png", Qt::CaseInsensitive) == 0)
{
image2Draw_qt_resized.save(fileName, "PNG");
viewportPixmap.save(fileName, "PNG");
return;
}
if (QString::compare(extension, "jpg", Qt::CaseInsensitive) == 0)
{
image2Draw_qt_resized.save(fileName, "JPG");
viewportPixmap.save(fileName, "JPG");
return;
}
if (QString::compare(extension, "bmp", Qt::CaseInsensitive) == 0)
{
image2Draw_qt_resized.save(fileName, "BMP");
viewportPixmap.save(fileName, "BMP");
return;
}
if (QString::compare(extension, "jpeg", Qt::CaseInsensitive) == 0)
{
image2Draw_qt_resized.save(fileName, "JPEG");
viewportPixmap.save(fileName, "JPEG");
return;
}
@@ -2651,17 +2650,16 @@ void DefaultViewPort::paintEvent(QPaintEvent* evnt)
//Now disable matrixWorld for overlay display
myPainter.setWorldMatrixEnabled(false);
//overlay pixel values if zoomed in far enough
if (param_matrixWorld.m11()*ratioX >= threshold_zoom_img_region &&
param_matrixWorld.m11()*ratioY >= threshold_zoom_img_region)
{
drawImgRegion(&myPainter);
}
//in mode zoom/panning
if (param_matrixWorld.m11() > 1)
{
if (param_matrixWorld.m11() >= threshold_zoom_img_region)
{
if (centralWidget->param_flags == CV_WINDOW_NORMAL)
startDisplayInfo("WARNING: The values displayed are the resized image's values. If you want the original image's values, use CV_WINDOW_AUTOSIZE", 1000);
drawImgRegion(&myPainter);
}
drawViewOverview(&myPainter);
}
@@ -2887,22 +2885,24 @@ void DefaultViewPort::drawStatusBar()
//accept only CV_8UC1 and CV_8UC8 image for now
void DefaultViewPort::drawImgRegion(QPainter *painter)
{
if (nbChannelOriginImage!=CV_8UC1 && nbChannelOriginImage!=CV_8UC3)
return;
qreal offsetX = param_matrixWorld.dx()/param_matrixWorld.m11();
double pixel_width = param_matrixWorld.m11()*ratioX;
double pixel_height = param_matrixWorld.m11()*ratioY;
qreal offsetX = param_matrixWorld.dx()/pixel_width;
offsetX = offsetX - floor(offsetX);
qreal offsetY = param_matrixWorld.dy()/param_matrixWorld.m11();
qreal offsetY = param_matrixWorld.dy()/pixel_height;
offsetY = offsetY - floor(offsetY);
QSize view = size();
QVarLengthArray<QLineF, 30> linesX;
for (qreal _x = offsetX*param_matrixWorld.m11(); _x < view.width(); _x += param_matrixWorld.m11() )
for (qreal _x = offsetX*pixel_width; _x < view.width(); _x += pixel_width )
linesX.append(QLineF(_x, 0, _x, view.height()));
QVarLengthArray<QLineF, 30> linesY;
for (qreal _y = offsetY*param_matrixWorld.m11(); _y < view.height(); _y += param_matrixWorld.m11() )
for (qreal _y = offsetY*pixel_height; _y < view.height(); _y += pixel_height )
linesY.append(QLineF(0, _y, view.width(), _y));
@@ -2910,27 +2910,25 @@ void DefaultViewPort::drawImgRegion(QPainter *painter)
int original_font_size = f.pointSize();
//change font size
//f.setPointSize(4+(param_matrixWorld.m11()-threshold_zoom_img_region)/5);
f.setPixelSize(10+(param_matrixWorld.m11()-threshold_zoom_img_region)/5);
f.setPixelSize(10+(pixel_height-threshold_zoom_img_region)/5);
painter->setFont(f);
QString val;
QRgb rgbValue;
QPointF point1;//sorry, I do not know how to name it
QPointF point2;//idem
for (int j=-1;j<height()/param_matrixWorld.m11();j++)//-1 because display the pixels top rows left colums
for (int i=-1;i<width()/param_matrixWorld.m11();i++)//-1
for (int j=-1;j<height()/pixel_height;j++)//-1 because display the pixels top rows left columns
for (int i=-1;i<width()/pixel_width;i++)//-1
{
point1.setX((i+offsetX)*param_matrixWorld.m11());
point1.setY((j+offsetY)*param_matrixWorld.m11());
// Calculate top left of the pixel's position in the viewport (screen space)
QPointF pos_in_view((i+offsetX)*pixel_width, (j+offsetY)*pixel_height);
matrixWorld_inv.map(point1.x(),point1.y(),&point2.rx(),&point2.ry());
// Calculate top left of the pixel's position in the image (image space)
QPointF pos_in_image = matrixWorld_inv.map(pos_in_view);// Top left of pixel in view
pos_in_image.rx() = pos_in_image.x()/ratioX;
pos_in_image.ry() = pos_in_image.y()/ratioY;
QPoint point_in_image(pos_in_image.x() + 0.5f,pos_in_image.y() + 0.5f);// Add 0.5 for rounding
point2.rx()= (long) (point2.x() + 0.5);
point2.ry()= (long) (point2.y() + 0.5);
if (point2.x() >= 0 && point2.y() >= 0)
rgbValue = image2Draw_qt_resized.pixel(QPoint(point2.x(),point2.y()));
QRgb rgbValue;
if (image2Draw_qt.valid(point_in_image))
rgbValue = image2Draw_qt.pixel(point_in_image);
else
rgbValue = qRgb(0,0,0);
@@ -2943,29 +2941,29 @@ void DefaultViewPort::drawImgRegion(QPainter *painter)
painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()/2),
Qt::AlignCenter, val);
*/
QString val;
val = tr("%1").arg(qRed(rgbValue));
painter->setPen(QPen(Qt::red, 1));
painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()/3),
painter->drawText(QRect(pos_in_view.x(),pos_in_view.y(),pixel_width,pixel_height/3),
Qt::AlignCenter, val);
val = tr("%1").arg(qGreen(rgbValue));
painter->setPen(QPen(Qt::green, 1));
painter->drawText(QRect(point1.x(),point1.y()+param_matrixWorld.m11()/3,param_matrixWorld.m11(),param_matrixWorld.m11()/3),
painter->drawText(QRect(pos_in_view.x(),pos_in_view.y()+pixel_height/3,pixel_width,pixel_height/3),
Qt::AlignCenter, val);
val = tr("%1").arg(qBlue(rgbValue));
painter->setPen(QPen(Qt::blue, 1));
painter->drawText(QRect(point1.x(),point1.y()+2*param_matrixWorld.m11()/3,param_matrixWorld.m11(),param_matrixWorld.m11()/3),
painter->drawText(QRect(pos_in_view.x(),pos_in_view.y()+2*pixel_height/3,pixel_width,pixel_height/3),
Qt::AlignCenter, val);
}
if (nbChannelOriginImage==CV_8UC1)
{
val = tr("%1").arg(qRed(rgbValue));
painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()),
QString val = tr("%1").arg(qRed(rgbValue));
painter->drawText(QRect(pos_in_view.x(),pos_in_view.y(),pixel_width,pixel_height),
Qt::AlignCenter, val);
}
}

View File

@@ -522,7 +522,6 @@ private:
CvMat* image2Draw_mat;
QImage image2Draw_qt;
QImage image2Draw_qt_resized;
int nbChannelOriginImage;
//for mouse callback

View File

@@ -47,7 +47,8 @@
defined(HAVE_QUICKTIME) || \
defined(HAVE_AVFOUNDATION) || \
/*defined(HAVE_OPENNI) || too specialized */ \
defined(HAVE_FFMPEG)
defined(HAVE_FFMPEG) || \
defined(HAVE_MSMF)
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
@@ -57,7 +58,8 @@
defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG)
defined(HAVE_FFMPEG) || \
defined(HAVE_MSMF)
# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 0

View File

@@ -54,6 +54,35 @@ string fourccToString(int fourcc)
return format("%c%c%c%c", fourcc & 255, (fourcc >> 8) & 255, (fourcc >> 16) & 255, (fourcc >> 24) & 255);
}
#ifdef HAVE_MSMF
const VideoFormat g_specific_fmt_list[] =
{
/*VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', '2', '5')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', '5', '0')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 'c', ' ')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 'h', '1')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 'h', 'd')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 's', 'd')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 's', 'l')),
VideoFormat("wmv", CV_FOURCC_MACRO('H', '2', '6', '3')),
VideoFormat("wmv", CV_FOURCC_MACRO('M', '4', 'S', '2')),
VideoFormat("avi", CV_FOURCC_MACRO('M', 'J', 'P', 'G')),
VideoFormat("mp4", CV_FOURCC_MACRO('M', 'P', '4', 'S')),
VideoFormat("mp4", CV_FOURCC_MACRO('M', 'P', '4', 'V')),
VideoFormat("wmv", CV_FOURCC_MACRO('M', 'P', '4', '3')),
VideoFormat("wmv", CV_FOURCC_MACRO('M', 'P', 'G', '1')),
VideoFormat("wmv", CV_FOURCC_MACRO('M', 'S', 'S', '1')),
VideoFormat("wmv", CV_FOURCC_MACRO('M', 'S', 'S', '2')),*/
#if !defined(_M_ARM)
VideoFormat("wmv", CV_FOURCC_MACRO('W', 'M', 'V', '1')),
VideoFormat("wmv", CV_FOURCC_MACRO('W', 'M', 'V', '2')),
#endif
VideoFormat("wmv", CV_FOURCC_MACRO('W', 'M', 'V', '3')),
VideoFormat("avi", CV_FOURCC_MACRO('H', '2', '6', '4')),
//VideoFormat("wmv", CV_FOURCC_MACRO('W', 'V', 'C', '1')),
VideoFormat()
};
#else
const VideoFormat g_specific_fmt_list[] =
{
VideoFormat("avi", CV_FOURCC('X', 'V', 'I', 'D')),
@@ -63,17 +92,17 @@ const VideoFormat g_specific_fmt_list[] =
VideoFormat("mkv", CV_FOURCC('X', 'V', 'I', 'D')),
VideoFormat("mkv", CV_FOURCC('M', 'P', 'E', 'G')),
VideoFormat("mkv", CV_FOURCC('M', 'J', 'P', 'G')),
VideoFormat("mov", CV_FOURCC('m', 'p', '4', 'v')),
VideoFormat()
};
#endif
}
class CV_HighGuiTest : public cvtest::BaseTest
{
protected:
void ImageTest(const string& dir);
void ImageTest (const string& dir);
void VideoTest (const string& dir, const cvtest::VideoFormat& fmt);
void SpecificImageTest (const string& dir);
void SpecificVideoTest (const string& dir, const cvtest::VideoFormat& fmt);
@@ -242,19 +271,19 @@ void CV_HighGuiTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt
for(;;)
{
IplImage * img = cvQueryFrame( cap );
IplImage* img = cvQueryFrame( cap );
if (!img)
break;
frames.push_back(Mat(img).clone());
if (writer == 0)
if (writer == NULL)
{
writer = cvCreateVideoWriter(tmp_name.c_str(), fmt.fourcc, 24, cvGetSize(img));
if (writer == 0)
if (writer == NULL)
{
ts->printf(ts->LOG, "can't create writer (with fourcc : %d)\n",
ts->printf(ts->LOG, "can't create writer (with fourcc : %s)\n",
cvtest::fourccToString(fmt.fourcc).c_str());
cvReleaseCapture( &cap );
ts->set_failed_test_info(ts->FAIL_MISMATCH);
@@ -290,15 +319,22 @@ void CV_HighGuiTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt
double psnr = PSNR(img1, img);
if (psnr < thresDbell)
{
printf("Too low psnr = %gdb\n", psnr);
// imwrite("img.png", img);
// imwrite("img1.png", img1);
ts->printf(ts->LOG, "Too low frame %d psnr = %gdb\n", i, psnr);
ts->set_failed_test_info(ts->FAIL_MISMATCH);
//imwrite("original.png", img);
//imwrite("after_test.png", img1);
//Mat diff;
//absdiff(img, img1, diff);
//imwrite("diff.png", diff);
break;
}
}
printf("Before saved release for %s\n", tmp_name.c_str());
cvReleaseCapture( &saved );
printf("After release\n");
ts->printf(ts->LOG, "end test function : ImagesVideo \n");
}

View File

@@ -116,6 +116,7 @@ If you use ``cvtColor`` with 8-bit images, the conversion will have some informa
The function can do the following transformations:
*
RGB :math:`\leftrightarrow` GRAY ( ``CV_BGR2GRAY, CV_RGB2GRAY, CV_GRAY2BGR, CV_GRAY2RGB`` )
Transformations within RGB space like adding/removing the alpha channel, reversing the channel order, conversion to/from 16-bit RGB color (R5:G6:B5 or R5:G5:B5), as well as conversion to/from grayscale using:
.. math::
@@ -765,7 +766,7 @@ Runs the GrabCut algorithm.
* **GC_PR_BGD** defines a possible background pixel.
* **GC_PR_BGD** defines a possible foreground pixel.
* **GC_PR_FGD** defines a possible foreground pixel.
:param rect: ROI containing a segmented object. The pixels outside of the ROI are marked as "obvious background". The parameter is only used when ``mode==GC_INIT_WITH_RECT`` .

View File

@@ -258,7 +258,8 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u,
declare.time(100);
declare.in(src, WARMUP_RNG).out(dst);
TEST_CYCLE() cvtColor(src, dst, mode, ch.dcn);
int runs = sz.width <= 320 ? 70 : 5;
TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn);
SANITY_CHECK(dst, 1);
}
@@ -334,7 +335,8 @@ PERF_TEST_P(Size_CvtMode3, cvtColorRGB2YUV420p,
declare.time(100);
declare.in(src, WARMUP_RNG).out(dst);
TEST_CYCLE() cvtColor(src, dst, mode, ch.dcn);
int runs = (sz.width <= 640) ? 10 : 1;
TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn);
SANITY_CHECK(dst, 1);
}

View File

@@ -19,7 +19,8 @@ PERF_TEST_P(Size_MatType, erode, TYPICAL_MATS_MORPH)
declare.in(src, WARMUP_RNG).out(dst);
TEST_CYCLE() erode(src, dst, noArray());
int runs = (sz.width <= 320) ? 15 : 1;
TEST_CYCLE_MULTIRUN(runs) erode(src, dst, noArray());
SANITY_CHECK(dst);
}

View File

@@ -63,7 +63,8 @@ PERF_TEST_P( TestRemap, Remap,
declare.in(src, WARMUP_RNG).out(dst).time(20);
TEST_CYCLE() remap(src, dst, map1, map2, inter_type);
int runs = (sz.width <= 640) ? 3 : 1;
TEST_CYCLE_MULTIRUN(runs) remap(src, dst, map1, map2, inter_type);
SANITY_CHECK(dst);
}

View File

@@ -85,7 +85,8 @@ PERF_TEST_P(MatInfo_Size_Scale, ResizeAreaFast,
declare.in(src, WARMUP_RNG).out(dst);
TEST_CYCLE() resize(src, dst, dst.size(), 0, 0, INTER_AREA);
int runs = 15;
TEST_CYCLE_MULTIRUN(runs) resize(src, dst, dst.size(), 0, 0, INTER_AREA);
//difference equal to 1 is allowed because of different possible rounding modes: round-to-nearest vs bankers' rounding
SANITY_CHECK(dst, 1);

View File

@@ -32,7 +32,7 @@ PERF_TEST_P(Size_MatType_ThreshType, threshold,
declare.in(src, WARMUP_RNG).out(dst);
int runs = (sz.width <= 640) ? 8 : 1;
int runs = (sz.width <= 640) ? 40 : 1;
TEST_CYCLE_MULTIRUN(runs) threshold(src, dst, thresh, maxval, threshType);
SANITY_CHECK(dst);
@@ -51,7 +51,8 @@ PERF_TEST_P(Size_Only, threshold_otsu, testing::Values(TYPICAL_MAT_SIZES))
declare.in(src, WARMUP_RNG).out(dst);
TEST_CYCLE() threshold(src, dst, 0, maxval, THRESH_BINARY|THRESH_OTSU);
int runs = 15;
TEST_CYCLE_MULTIRUN(runs) threshold(src, dst, 0, maxval, THRESH_BINARY|THRESH_OTSU);
SANITY_CHECK(dst);
}

View File

@@ -0,0 +1,334 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the copyright holders or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
// ----------------------------------------------------------------------
// CLAHE
namespace
{
class CLAHE_CalcLut_Body : public cv::ParallelLoopBody
{
public:
CLAHE_CalcLut_Body(const cv::Mat& src, cv::Mat& lut, cv::Size tileSize, int tilesX, int tilesY, int clipLimit, float lutScale) :
src_(src), lut_(lut), tileSize_(tileSize), tilesX_(tilesX), tilesY_(tilesY), clipLimit_(clipLimit), lutScale_(lutScale)
{
}
void operator ()(const cv::Range& range) const;
private:
cv::Mat src_;
mutable cv::Mat lut_;
cv::Size tileSize_;
int tilesX_;
int tilesY_;
int clipLimit_;
float lutScale_;
};
void CLAHE_CalcLut_Body::operator ()(const cv::Range& range) const
{
const int histSize = 256;
uchar* tileLut = lut_.ptr(range.start);
const size_t lut_step = lut_.step;
for (int k = range.start; k < range.end; ++k, tileLut += lut_step)
{
const int ty = k / tilesX_;
const int tx = k % tilesX_;
// retrieve tile submatrix
cv::Rect tileROI;
tileROI.x = tx * tileSize_.width;
tileROI.y = ty * tileSize_.height;
tileROI.width = tileSize_.width;
tileROI.height = tileSize_.height;
const cv::Mat tile = src_(tileROI);
// calc histogram
int tileHist[histSize] = {0, };
int height = tileROI.height;
const size_t sstep = tile.step;
for (const uchar* ptr = tile.ptr<uchar>(0); height--; ptr += sstep)
{
int x = 0;
for (; x <= tileROI.width - 4; x += 4)
{
int t0 = ptr[x], t1 = ptr[x+1];
tileHist[t0]++; tileHist[t1]++;
t0 = ptr[x+2]; t1 = ptr[x+3];
tileHist[t0]++; tileHist[t1]++;
}
for (; x < tileROI.width; ++x)
tileHist[ptr[x]]++;
}
// clip histogram
if (clipLimit_ > 0)
{
// how many pixels were clipped
int clipped = 0;
for (int i = 0; i < histSize; ++i)
{
if (tileHist[i] > clipLimit_)
{
clipped += tileHist[i] - clipLimit_;
tileHist[i] = clipLimit_;
}
}
// redistribute clipped pixels
int redistBatch = clipped / histSize;
int residual = clipped - redistBatch * histSize;
for (int i = 0; i < histSize; ++i)
tileHist[i] += redistBatch;
for (int i = 0; i < residual; ++i)
tileHist[i]++;
}
// calc Lut
int sum = 0;
for (int i = 0; i < histSize; ++i)
{
sum += tileHist[i];
tileLut[i] = cv::saturate_cast<uchar>(sum * lutScale_);
}
}
}
class CLAHE_Interpolation_Body : public cv::ParallelLoopBody
{
public:
CLAHE_Interpolation_Body(const cv::Mat& src, cv::Mat& dst, const cv::Mat& lut, cv::Size tileSize, int tilesX, int tilesY) :
src_(src), dst_(dst), lut_(lut), tileSize_(tileSize), tilesX_(tilesX), tilesY_(tilesY)
{
}
void operator ()(const cv::Range& range) const;
private:
cv::Mat src_;
mutable cv::Mat dst_;
cv::Mat lut_;
cv::Size tileSize_;
int tilesX_;
int tilesY_;
};
void CLAHE_Interpolation_Body::operator ()(const cv::Range& range) const
{
const size_t lut_step = lut_.step;
for (int y = range.start; y < range.end; ++y)
{
const uchar* srcRow = src_.ptr<uchar>(y);
uchar* dstRow = dst_.ptr<uchar>(y);
const float tyf = (static_cast<float>(y) / tileSize_.height) - 0.5f;
int ty1 = cvFloor(tyf);
int ty2 = ty1 + 1;
const float ya = tyf - ty1;
ty1 = std::max(ty1, 0);
ty2 = std::min(ty2, tilesY_ - 1);
const uchar* lutPlane1 = lut_.ptr(ty1 * tilesX_);
const uchar* lutPlane2 = lut_.ptr(ty2 * tilesX_);
for (int x = 0; x < src_.cols; ++x)
{
const float txf = (static_cast<float>(x) / tileSize_.width) - 0.5f;
int tx1 = cvFloor(txf);
int tx2 = tx1 + 1;
const float xa = txf - tx1;
tx1 = std::max(tx1, 0);
tx2 = std::min(tx2, tilesX_ - 1);
const int srcVal = srcRow[x];
const size_t ind1 = tx1 * lut_step + srcVal;
const size_t ind2 = tx2 * lut_step + srcVal;
float res = 0;
res += lutPlane1[ind1] * ((1.0f - xa) * (1.0f - ya));
res += lutPlane1[ind2] * ((xa) * (1.0f - ya));
res += lutPlane2[ind1] * ((1.0f - xa) * (ya));
res += lutPlane2[ind2] * ((xa) * (ya));
dstRow[x] = cv::saturate_cast<uchar>(res);
}
}
}
class CLAHE_Impl : public cv::CLAHE
{
public:
CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8);
cv::AlgorithmInfo* info() const;
void apply(cv::InputArray src, cv::OutputArray dst);
void setClipLimit(double clipLimit);
double getClipLimit() const;
void setTilesGridSize(cv::Size tileGridSize);
cv::Size getTilesGridSize() const;
void collectGarbage();
private:
double clipLimit_;
int tilesX_;
int tilesY_;
cv::Mat srcExt_;
cv::Mat lut_;
};
CLAHE_Impl::CLAHE_Impl(double clipLimit, int tilesX, int tilesY) :
clipLimit_(clipLimit), tilesX_(tilesX), tilesY_(tilesY)
{
}
CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE",
obj.info()->addParam(obj, "clipLimit", obj.clipLimit_);
obj.info()->addParam(obj, "tilesX", obj.tilesX_);
obj.info()->addParam(obj, "tilesY", obj.tilesY_))
void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst)
{
cv::Mat src = _src.getMat();
CV_Assert( src.type() == CV_8UC1 );
_dst.create( src.size(), src.type() );
cv::Mat dst = _dst.getMat();
const int histSize = 256;
lut_.create(tilesX_ * tilesY_, histSize, CV_8UC1);
cv::Size tileSize;
cv::Mat srcForLut;
if (src.cols % tilesX_ == 0 && src.rows % tilesY_ == 0)
{
tileSize = cv::Size(src.cols / tilesX_, src.rows / tilesY_);
srcForLut = src;
}
else
{
cv::copyMakeBorder(src, srcExt_, 0, tilesY_ - (src.rows % tilesY_), 0, tilesX_ - (src.cols % tilesX_), cv::BORDER_REFLECT_101);
tileSize = cv::Size(srcExt_.cols / tilesX_, srcExt_.rows / tilesY_);
srcForLut = srcExt_;
}
const int tileSizeTotal = tileSize.area();
const float lutScale = static_cast<float>(histSize - 1) / tileSizeTotal;
int clipLimit = 0;
if (clipLimit_ > 0.0)
{
clipLimit = static_cast<int>(clipLimit_ * tileSizeTotal / histSize);
clipLimit = std::max(clipLimit, 1);
}
CLAHE_CalcLut_Body calcLutBody(srcForLut, lut_, tileSize, tilesX_, tilesY_, clipLimit, lutScale);
cv::parallel_for_(cv::Range(0, tilesX_ * tilesY_), calcLutBody);
CLAHE_Interpolation_Body interpolationBody(src, dst, lut_, tileSize, tilesX_, tilesY_);
cv::parallel_for_(cv::Range(0, src.rows), interpolationBody);
}
void CLAHE_Impl::setClipLimit(double clipLimit)
{
clipLimit_ = clipLimit;
}
double CLAHE_Impl::getClipLimit() const
{
return clipLimit_;
}
void CLAHE_Impl::setTilesGridSize(cv::Size tileGridSize)
{
tilesX_ = tileGridSize.width;
tilesY_ = tileGridSize.height;
}
cv::Size CLAHE_Impl::getTilesGridSize() const
{
return cv::Size(tilesX_, tilesY_);
}
void CLAHE_Impl::collectGarbage()
{
srcExt_.release();
lut_.release();
}
}
cv::Ptr<cv::CLAHE> cv::createCLAHE(double clipLimit, cv::Size tileGridSize)
{
return new CLAHE_Impl(clipLimit, tileGridSize.width, tileGridSize.height);
}

View File

@@ -2755,7 +2755,7 @@ const int ITUR_BT_601_CGV = -385875;
const int ITUR_BT_601_CBV = -74448;
template<int bIdx, int uIdx>
struct YUV420sp2RGB888Invoker
struct YUV420sp2RGB888Invoker : ParallelLoopBody
{
Mat* dst;
const uchar* my1, *muv;
@@ -2764,10 +2764,10 @@ struct YUV420sp2RGB888Invoker
YUV420sp2RGB888Invoker(Mat* _dst, int _stride, const uchar* _y1, const uchar* _uv)
: dst(_dst), my1(_y1), muv(_uv), width(_dst->cols), stride(_stride) {}
void operator()(const BlockedRange& range) const
void operator()(const Range& range) const
{
int rangeBegin = range.begin() * 2;
int rangeEnd = range.end() * 2;
int rangeBegin = range.start * 2;
int rangeEnd = range.end * 2;
//R = 1.164(Y - 16) + 1.596(V - 128)
//G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128)
@@ -2824,7 +2824,7 @@ struct YUV420sp2RGB888Invoker
};
template<int bIdx, int uIdx>
struct YUV420sp2RGBA8888Invoker
struct YUV420sp2RGBA8888Invoker : ParallelLoopBody
{
Mat* dst;
const uchar* my1, *muv;
@@ -2833,10 +2833,10 @@ struct YUV420sp2RGBA8888Invoker
YUV420sp2RGBA8888Invoker(Mat* _dst, int _stride, const uchar* _y1, const uchar* _uv)
: dst(_dst), my1(_y1), muv(_uv), width(_dst->cols), stride(_stride) {}
void operator()(const BlockedRange& range) const
void operator()(const Range& range) const
{
int rangeBegin = range.begin() * 2;
int rangeEnd = range.end() * 2;
int rangeBegin = range.start * 2;
int rangeEnd = range.end * 2;
//R = 1.164(Y - 16) + 1.596(V - 128)
//G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128)
@@ -2897,7 +2897,7 @@ struct YUV420sp2RGBA8888Invoker
};
template<int bIdx>
struct YUV420p2RGB888Invoker
struct YUV420p2RGB888Invoker : ParallelLoopBody
{
Mat* dst;
const uchar* my1, *mu, *mv;
@@ -2907,19 +2907,19 @@ struct YUV420p2RGB888Invoker
YUV420p2RGB888Invoker(Mat* _dst, int _stride, const uchar* _y1, const uchar* _u, const uchar* _v, int _ustepIdx, int _vstepIdx)
: dst(_dst), my1(_y1), mu(_u), mv(_v), width(_dst->cols), stride(_stride), ustepIdx(_ustepIdx), vstepIdx(_vstepIdx) {}
void operator()(const BlockedRange& range) const
void operator()(const Range& range) const
{
const int rangeBegin = range.begin() * 2;
const int rangeEnd = range.end() * 2;
const int rangeBegin = range.start * 2;
const int rangeEnd = range.end * 2;
size_t uvsteps[2] = {width/2, stride - width/2};
int usIdx = ustepIdx, vsIdx = vstepIdx;
const uchar* y1 = my1 + rangeBegin * stride;
const uchar* u1 = mu + (range.begin() / 2) * stride;
const uchar* v1 = mv + (range.begin() / 2) * stride;
const uchar* u1 = mu + (range.start / 2) * stride;
const uchar* v1 = mv + (range.start / 2) * stride;
if(range.begin() % 2 == 1)
if(range.start % 2 == 1)
{
u1 += uvsteps[(usIdx++) & 1];
v1 += uvsteps[(vsIdx++) & 1];
@@ -2965,7 +2965,7 @@ struct YUV420p2RGB888Invoker
};
template<int bIdx>
struct YUV420p2RGBA8888Invoker
struct YUV420p2RGBA8888Invoker : ParallelLoopBody
{
Mat* dst;
const uchar* my1, *mu, *mv;
@@ -2975,19 +2975,19 @@ struct YUV420p2RGBA8888Invoker
YUV420p2RGBA8888Invoker(Mat* _dst, int _stride, const uchar* _y1, const uchar* _u, const uchar* _v, int _ustepIdx, int _vstepIdx)
: dst(_dst), my1(_y1), mu(_u), mv(_v), width(_dst->cols), stride(_stride), ustepIdx(_ustepIdx), vstepIdx(_vstepIdx) {}
void operator()(const BlockedRange& range) const
void operator()(const Range& range) const
{
int rangeBegin = range.begin() * 2;
int rangeEnd = range.end() * 2;
int rangeBegin = range.start * 2;
int rangeEnd = range.end * 2;
size_t uvsteps[2] = {width/2, stride - width/2};
int usIdx = ustepIdx, vsIdx = vstepIdx;
const uchar* y1 = my1 + rangeBegin * stride;
const uchar* u1 = mu + (range.begin() / 2) * stride;
const uchar* v1 = mv + (range.begin() / 2) * stride;
const uchar* u1 = mu + (range.start / 2) * stride;
const uchar* v1 = mv + (range.start / 2) * stride;
if(range.begin() % 2 == 1)
if(range.start % 2 == 1)
{
u1 += uvsteps[(usIdx++) & 1];
v1 += uvsteps[(vsIdx++) & 1];
@@ -3042,48 +3042,40 @@ template<int bIdx, int uIdx>
inline void cvtYUV420sp2RGB(Mat& _dst, int _stride, const uchar* _y1, const uchar* _uv)
{
YUV420sp2RGB888Invoker<bIdx, uIdx> converter(&_dst, _stride, _y1, _uv);
#ifdef HAVE_TBB
if (_dst.total() >= MIN_SIZE_FOR_PARALLEL_YUV420_CONVERSION)
parallel_for(BlockedRange(0, _dst.rows/2), converter);
parallel_for_(Range(0, _dst.rows/2), converter);
else
#endif
converter(BlockedRange(0, _dst.rows/2));
converter(Range(0, _dst.rows/2));
}
template<int bIdx, int uIdx>
inline void cvtYUV420sp2RGBA(Mat& _dst, int _stride, const uchar* _y1, const uchar* _uv)
{
YUV420sp2RGBA8888Invoker<bIdx, uIdx> converter(&_dst, _stride, _y1, _uv);
#ifdef HAVE_TBB
if (_dst.total() >= MIN_SIZE_FOR_PARALLEL_YUV420_CONVERSION)
parallel_for(BlockedRange(0, _dst.rows/2), converter);
parallel_for_(Range(0, _dst.rows/2), converter);
else
#endif
converter(BlockedRange(0, _dst.rows/2));
converter(Range(0, _dst.rows/2));
}
template<int bIdx>
inline void cvtYUV420p2RGB(Mat& _dst, int _stride, const uchar* _y1, const uchar* _u, const uchar* _v, int ustepIdx, int vstepIdx)
{
YUV420p2RGB888Invoker<bIdx> converter(&_dst, _stride, _y1, _u, _v, ustepIdx, vstepIdx);
#ifdef HAVE_TBB
if (_dst.total() >= MIN_SIZE_FOR_PARALLEL_YUV420_CONVERSION)
parallel_for(BlockedRange(0, _dst.rows/2), converter);
parallel_for_(Range(0, _dst.rows/2), converter);
else
#endif
converter(BlockedRange(0, _dst.rows/2));
converter(Range(0, _dst.rows/2));
}
template<int bIdx>
inline void cvtYUV420p2RGBA(Mat& _dst, int _stride, const uchar* _y1, const uchar* _u, const uchar* _v, int ustepIdx, int vstepIdx)
{
YUV420p2RGBA8888Invoker<bIdx> converter(&_dst, _stride, _y1, _u, _v, ustepIdx, vstepIdx);
#ifdef HAVE_TBB
if (_dst.total() >= MIN_SIZE_FOR_PARALLEL_YUV420_CONVERSION)
parallel_for(BlockedRange(0, _dst.rows/2), converter);
parallel_for_(Range(0, _dst.rows/2), converter);
else
#endif
converter(BlockedRange(0, _dst.rows/2));
converter(Range(0, _dst.rows/2));
}
///////////////////////////////////// RGB -> YUV420p /////////////////////////////////////
@@ -3167,7 +3159,7 @@ static void cvtRGBtoYUV420p(const Mat& src, Mat& dst)
///////////////////////////////////// YUV422 -> RGB /////////////////////////////////////
template<int bIdx, int uIdx, int yIdx>
struct YUV422toRGB888Invoker
struct YUV422toRGB888Invoker : ParallelLoopBody
{
Mat* dst;
const uchar* src;
@@ -3176,10 +3168,10 @@ struct YUV422toRGB888Invoker
YUV422toRGB888Invoker(Mat* _dst, int _stride, const uchar* _yuv)
: dst(_dst), src(_yuv), width(_dst->cols), stride(_stride) {}
void operator()(const BlockedRange& range) const
void operator()(const Range& range) const
{
int rangeBegin = range.begin();
int rangeEnd = range.end();
int rangeBegin = range.start;
int rangeEnd = range.end;
const int uidx = 1 - yIdx + uIdx * 2;
const int vidx = (2 + uidx) % 4;
@@ -3213,7 +3205,7 @@ struct YUV422toRGB888Invoker
};
template<int bIdx, int uIdx, int yIdx>
struct YUV422toRGBA8888Invoker
struct YUV422toRGBA8888Invoker : ParallelLoopBody
{
Mat* dst;
const uchar* src;
@@ -3222,10 +3214,10 @@ struct YUV422toRGBA8888Invoker
YUV422toRGBA8888Invoker(Mat* _dst, int _stride, const uchar* _yuv)
: dst(_dst), src(_yuv), width(_dst->cols), stride(_stride) {}
void operator()(const BlockedRange& range) const
void operator()(const Range& range) const
{
int rangeBegin = range.begin();
int rangeEnd = range.end();
int rangeBegin = range.start;
int rangeEnd = range.end;
const int uidx = 1 - yIdx + uIdx * 2;
const int vidx = (2 + uidx) % 4;
@@ -3266,24 +3258,20 @@ template<int bIdx, int uIdx, int yIdx>
inline void cvtYUV422toRGB(Mat& _dst, int _stride, const uchar* _yuv)
{
YUV422toRGB888Invoker<bIdx, uIdx, yIdx> converter(&_dst, _stride, _yuv);
#ifdef HAVE_TBB
if (_dst.total() >= MIN_SIZE_FOR_PARALLEL_YUV422_CONVERSION)
parallel_for(BlockedRange(0, _dst.rows), converter);
parallel_for_(Range(0, _dst.rows), converter);
else
#endif
converter(BlockedRange(0, _dst.rows));
converter(Range(0, _dst.rows));
}
template<int bIdx, int uIdx, int yIdx>
inline void cvtYUV422toRGBA(Mat& _dst, int _stride, const uchar* _yuv)
{
YUV422toRGBA8888Invoker<bIdx, uIdx, yIdx> converter(&_dst, _stride, _yuv);
#ifdef HAVE_TBB
if (_dst.total() >= MIN_SIZE_FOR_PARALLEL_YUV422_CONVERSION)
parallel_for(BlockedRange(0, _dst.rows), converter);
parallel_for_(Range(0, _dst.rows), converter);
else
#endif
converter(BlockedRange(0, _dst.rows));
converter(Range(0, _dst.rows));
}
/////////////////////////// RGBA <-> mRGBA (alpha premultiplied) //////////////

View File

@@ -443,7 +443,7 @@ icvGetDistanceTransformMask( int maskType, float *metrics )
namespace cv
{
struct DTColumnInvoker
struct DTColumnInvoker : ParallelLoopBody
{
DTColumnInvoker( const CvMat* _src, CvMat* _dst, const int* _sat_tab, const float* _sqr_tab)
{
@@ -453,9 +453,9 @@ struct DTColumnInvoker
sqr_tab = _sqr_tab;
}
void operator()( const BlockedRange& range ) const
void operator()( const Range& range ) const
{
int i, i1 = range.begin(), i2 = range.end();
int i, i1 = range.start, i2 = range.end;
int m = src->rows;
size_t sstep = src->step, dstep = dst->step/sizeof(float);
AutoBuffer<int> _d(m);
@@ -490,7 +490,7 @@ struct DTColumnInvoker
};
struct DTRowInvoker
struct DTRowInvoker : ParallelLoopBody
{
DTRowInvoker( CvMat* _dst, const float* _sqr_tab, const float* _inv_tab )
{
@@ -499,10 +499,10 @@ struct DTRowInvoker
inv_tab = _inv_tab;
}
void operator()( const BlockedRange& range ) const
void operator()( const Range& range ) const
{
const float inf = 1e15f;
int i, i1 = range.begin(), i2 = range.end();
int i, i1 = range.start, i2 = range.end;
int n = dst->cols;
AutoBuffer<uchar> _buf((n+2)*2*sizeof(float) + (n+2)*sizeof(int));
float* f = (float*)(uchar*)_buf;
@@ -586,7 +586,7 @@ icvTrueDistTrans( const CvMat* src, CvMat* dst )
for( ; i <= m*3; i++ )
sat_tab[i] = i - shift;
cv::parallel_for(cv::BlockedRange(0, n), cv::DTColumnInvoker(src, dst, sat_tab, sqr_tab));
cv::parallel_for_(cv::Range(0, n), cv::DTColumnInvoker(src, dst, sat_tab, sqr_tab));
// stage 2: compute modified distance transform for each row
float* inv_tab = sqr_tab + n;
@@ -598,7 +598,7 @@ icvTrueDistTrans( const CvMat* src, CvMat* dst )
sqr_tab[i] = (float)(i*i);
}
cv::parallel_for(cv::BlockedRange(0, m), cv::DTRowInvoker(dst, sqr_tab, inv_tab));
cv::parallel_for_(cv::Range(0, m), cv::DTRowInvoker(dst, sqr_tab, inv_tab));
}

View File

@@ -2986,29 +2986,23 @@ cvCalcProbDensity( const CvHistogram* hist, const CvHistogram* hist_mask,
}
}
class EqualizeHistCalcHist_Invoker
class EqualizeHistCalcHist_Invoker : public cv::ParallelLoopBody
{
public:
enum {HIST_SZ = 256};
#ifdef HAVE_TBB
typedef tbb::mutex* MutextPtr;
#else
typedef void* MutextPtr;
#endif
EqualizeHistCalcHist_Invoker(cv::Mat& src, int* histogram, MutextPtr histogramLock)
EqualizeHistCalcHist_Invoker(cv::Mat& src, int* histogram, cv::Mutex* histogramLock)
: src_(src), globalHistogram_(histogram), histogramLock_(histogramLock)
{ }
void operator()( const cv::BlockedRange& rowRange ) const
void operator()( const cv::Range& rowRange ) const
{
int localHistogram[HIST_SZ] = {0, };
const size_t sstep = src_.step;
int width = src_.cols;
int height = rowRange.end() - rowRange.begin();
int height = rowRange.end - rowRange.start;
if (src_.isContinuous())
{
@@ -3016,7 +3010,7 @@ public:
height = 1;
}
for (const uchar* ptr = src_.ptr<uchar>(rowRange.begin()); height--; ptr += sstep)
for (const uchar* ptr = src_.ptr<uchar>(rowRange.start); height--; ptr += sstep)
{
int x = 0;
for (; x <= width - 4; x += 4)
@@ -3031,9 +3025,7 @@ public:
localHistogram[ptr[x]]++;
}
#ifdef HAVE_TBB
tbb::mutex::scoped_lock lock(*histogramLock_);
#endif
cv::AutoLock lock(*histogramLock_);
for( int i = 0; i < HIST_SZ; i++ )
globalHistogram_[i] += localHistogram[i];
@@ -3041,12 +3033,7 @@ public:
static bool isWorthParallel( const cv::Mat& src )
{
#ifdef HAVE_TBB
return ( src.total() >= 640*480 );
#else
(void)src;
return false;
#endif
}
private:
@@ -3054,10 +3041,10 @@ private:
cv::Mat& src_;
int* globalHistogram_;
MutextPtr histogramLock_;
cv::Mutex* histogramLock_;
};
class EqualizeHistLut_Invoker
class EqualizeHistLut_Invoker : public cv::ParallelLoopBody
{
public:
EqualizeHistLut_Invoker( cv::Mat& src, cv::Mat& dst, int* lut )
@@ -3066,13 +3053,13 @@ public:
lut_(lut)
{ }
void operator()( const cv::BlockedRange& rowRange ) const
void operator()( const cv::Range& rowRange ) const
{
const size_t sstep = src_.step;
const size_t dstep = dst_.step;
int width = src_.cols;
int height = rowRange.end() - rowRange.begin();
int height = rowRange.end - rowRange.start;
int* lut = lut_;
if (src_.isContinuous() && dst_.isContinuous())
@@ -3081,8 +3068,8 @@ public:
height = 1;
}
const uchar* sptr = src_.ptr<uchar>(rowRange.begin());
uchar* dptr = dst_.ptr<uchar>(rowRange.begin());
const uchar* sptr = src_.ptr<uchar>(rowRange.start);
uchar* dptr = dst_.ptr<uchar>(rowRange.start);
for (; height--; sptr += sstep, dptr += dstep)
{
@@ -3111,12 +3098,7 @@ public:
static bool isWorthParallel( const cv::Mat& src )
{
#ifdef HAVE_TBB
return ( src.total() >= 640*480 );
#else
(void)src;
return false;
#endif
}
private:
@@ -3143,23 +3125,18 @@ void cv::equalizeHist( InputArray _src, OutputArray _dst )
if(src.empty())
return;
#ifdef HAVE_TBB
tbb::mutex histogramLockInstance;
EqualizeHistCalcHist_Invoker::MutextPtr histogramLock = &histogramLockInstance;
#else
EqualizeHistCalcHist_Invoker::MutextPtr histogramLock = 0;
#endif
Mutex histogramLockInstance;
const int hist_sz = EqualizeHistCalcHist_Invoker::HIST_SZ;
int hist[hist_sz] = {0,};
int lut[hist_sz];
EqualizeHistCalcHist_Invoker calcBody(src, hist, histogramLock);
EqualizeHistCalcHist_Invoker calcBody(src, hist, &histogramLockInstance);
EqualizeHistLut_Invoker lutBody(src, dst, lut);
cv::BlockedRange heightRange(0, src.rows);
cv::Range heightRange(0, src.rows);
if(EqualizeHistCalcHist_Invoker::isWorthParallel(src))
parallel_for(heightRange, calcBody);
parallel_for_(heightRange, calcBody);
else
calcBody(heightRange);
@@ -3183,303 +3160,11 @@ void cv::equalizeHist( InputArray _src, OutputArray _dst )
}
if(EqualizeHistLut_Invoker::isWorthParallel(src))
parallel_for(heightRange, lutBody);
parallel_for_(heightRange, lutBody);
else
lutBody(heightRange);
}
// ----------------------------------------------------------------------
// CLAHE
namespace
{
class CLAHE_CalcLut_Body : public cv::ParallelLoopBody
{
public:
CLAHE_CalcLut_Body(const cv::Mat& src, cv::Mat& lut, cv::Size tileSize, int tilesX, int tilesY, int clipLimit, float lutScale) :
src_(src), lut_(lut), tileSize_(tileSize), tilesX_(tilesX), tilesY_(tilesY), clipLimit_(clipLimit), lutScale_(lutScale)
{
}
void operator ()(const cv::Range& range) const;
private:
cv::Mat src_;
mutable cv::Mat lut_;
cv::Size tileSize_;
int tilesX_;
int tilesY_;
int clipLimit_;
float lutScale_;
};
void CLAHE_CalcLut_Body::operator ()(const cv::Range& range) const
{
const int histSize = 256;
uchar* tileLut = lut_.ptr(range.start);
const size_t lut_step = lut_.step;
for (int k = range.start; k < range.end; ++k, tileLut += lut_step)
{
const int ty = k / tilesX_;
const int tx = k % tilesX_;
// retrieve tile submatrix
cv::Rect tileROI;
tileROI.x = tx * tileSize_.width;
tileROI.y = ty * tileSize_.height;
tileROI.width = tileSize_.width;
tileROI.height = tileSize_.height;
const cv::Mat tile = src_(tileROI);
// calc histogram
int tileHist[histSize] = {0, };
int height = tileROI.height;
const size_t sstep = tile.step;
for (const uchar* ptr = tile.ptr<uchar>(0); height--; ptr += sstep)
{
int x = 0;
for (; x <= tileROI.width - 4; x += 4)
{
int t0 = ptr[x], t1 = ptr[x+1];
tileHist[t0]++; tileHist[t1]++;
t0 = ptr[x+2]; t1 = ptr[x+3];
tileHist[t0]++; tileHist[t1]++;
}
for (; x < tileROI.width; ++x)
tileHist[ptr[x]]++;
}
// clip histogram
if (clipLimit_ > 0)
{
// how many pixels were clipped
int clipped = 0;
for (int i = 0; i < histSize; ++i)
{
if (tileHist[i] > clipLimit_)
{
clipped += tileHist[i] - clipLimit_;
tileHist[i] = clipLimit_;
}
}
// redistribute clipped pixels
int redistBatch = clipped / histSize;
int residual = clipped - redistBatch * histSize;
for (int i = 0; i < histSize; ++i)
tileHist[i] += redistBatch;
for (int i = 0; i < residual; ++i)
tileHist[i]++;
}
// calc Lut
int sum = 0;
for (int i = 0; i < histSize; ++i)
{
sum += tileHist[i];
tileLut[i] = cv::saturate_cast<uchar>(sum * lutScale_);
}
}
}
class CLAHE_Interpolation_Body : public cv::ParallelLoopBody
{
public:
CLAHE_Interpolation_Body(const cv::Mat& src, cv::Mat& dst, const cv::Mat& lut, cv::Size tileSize, int tilesX, int tilesY) :
src_(src), dst_(dst), lut_(lut), tileSize_(tileSize), tilesX_(tilesX), tilesY_(tilesY)
{
}
void operator ()(const cv::Range& range) const;
private:
cv::Mat src_;
mutable cv::Mat dst_;
cv::Mat lut_;
cv::Size tileSize_;
int tilesX_;
int tilesY_;
};
void CLAHE_Interpolation_Body::operator ()(const cv::Range& range) const
{
const size_t lut_step = lut_.step;
for (int y = range.start; y < range.end; ++y)
{
const uchar* srcRow = src_.ptr<uchar>(y);
uchar* dstRow = dst_.ptr<uchar>(y);
const float tyf = (static_cast<float>(y) / tileSize_.height) - 0.5f;
int ty1 = cvFloor(tyf);
int ty2 = ty1 + 1;
const float ya = tyf - ty1;
ty1 = std::max(ty1, 0);
ty2 = std::min(ty2, tilesY_ - 1);
const uchar* lutPlane1 = lut_.ptr(ty1 * tilesX_);
const uchar* lutPlane2 = lut_.ptr(ty2 * tilesX_);
for (int x = 0; x < src_.cols; ++x)
{
const float txf = (static_cast<float>(x) / tileSize_.width) - 0.5f;
int tx1 = cvFloor(txf);
int tx2 = tx1 + 1;
const float xa = txf - tx1;
tx1 = std::max(tx1, 0);
tx2 = std::min(tx2, tilesX_ - 1);
const int srcVal = srcRow[x];
const size_t ind1 = tx1 * lut_step + srcVal;
const size_t ind2 = tx2 * lut_step + srcVal;
float res = 0;
res += lutPlane1[ind1] * ((1.0f - xa) * (1.0f - ya));
res += lutPlane1[ind2] * ((xa) * (1.0f - ya));
res += lutPlane2[ind1] * ((1.0f - xa) * (ya));
res += lutPlane2[ind2] * ((xa) * (ya));
dstRow[x] = cv::saturate_cast<uchar>(res);
}
}
}
class CLAHE_Impl : public cv::CLAHE
{
public:
CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8);
cv::AlgorithmInfo* info() const;
void apply(cv::InputArray src, cv::OutputArray dst);
void setClipLimit(double clipLimit);
double getClipLimit() const;
void setTilesGridSize(cv::Size tileGridSize);
cv::Size getTilesGridSize() const;
void collectGarbage();
private:
double clipLimit_;
int tilesX_;
int tilesY_;
cv::Mat srcExt_;
cv::Mat lut_;
};
CLAHE_Impl::CLAHE_Impl(double clipLimit, int tilesX, int tilesY) :
clipLimit_(clipLimit), tilesX_(tilesX), tilesY_(tilesY)
{
}
CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE",
obj.info()->addParam(obj, "clipLimit", obj.clipLimit_);
obj.info()->addParam(obj, "tilesX", obj.tilesX_);
obj.info()->addParam(obj, "tilesY", obj.tilesY_))
void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst)
{
cv::Mat src = _src.getMat();
CV_Assert( src.type() == CV_8UC1 );
_dst.create( src.size(), src.type() );
cv::Mat dst = _dst.getMat();
const int histSize = 256;
lut_.create(tilesX_ * tilesY_, histSize, CV_8UC1);
cv::Size tileSize;
cv::Mat srcForLut;
if (src.cols % tilesX_ == 0 && src.rows % tilesY_ == 0)
{
tileSize = cv::Size(src.cols / tilesX_, src.rows / tilesY_);
srcForLut = src;
}
else
{
cv::copyMakeBorder(src, srcExt_, 0, tilesY_ - (src.rows % tilesY_), 0, tilesX_ - (src.cols % tilesX_), cv::BORDER_REFLECT_101);
tileSize = cv::Size(srcExt_.cols / tilesX_, srcExt_.rows / tilesY_);
srcForLut = srcExt_;
}
const int tileSizeTotal = tileSize.area();
const float lutScale = static_cast<float>(histSize - 1) / tileSizeTotal;
int clipLimit = 0;
if (clipLimit_ > 0.0)
{
clipLimit = static_cast<int>(clipLimit_ * tileSizeTotal / histSize);
clipLimit = std::max(clipLimit, 1);
}
CLAHE_CalcLut_Body calcLutBody(srcForLut, lut_, tileSize, tilesX_, tilesY_, clipLimit, lutScale);
cv::parallel_for_(cv::Range(0, tilesX_ * tilesY_), calcLutBody);
CLAHE_Interpolation_Body interpolationBody(src, dst, lut_, tileSize, tilesX_, tilesY_);
cv::parallel_for_(cv::Range(0, src.rows), interpolationBody);
}
void CLAHE_Impl::setClipLimit(double clipLimit)
{
clipLimit_ = clipLimit;
}
double CLAHE_Impl::getClipLimit() const
{
return clipLimit_;
}
void CLAHE_Impl::setTilesGridSize(cv::Size tileGridSize)
{
tilesX_ = tileGridSize.width;
tilesY_ = tileGridSize.height;
}
cv::Size CLAHE_Impl::getTilesGridSize() const
{
return cv::Size(tilesX_, tilesY_);
}
void CLAHE_Impl::collectGarbage()
{
srcExt_.release();
lut_.release();
}
}
cv::Ptr<cv::CLAHE> cv::createCLAHE(double clipLimit, cv::Size tileGridSize)
{
return new CLAHE_Impl(clipLimit, tileGridSize.width, tileGridSize.height);
}
// ----------------------------------------------------------------------
/* Implementation of RTTI and Generic Functions for CvHistogram */

View File

@@ -1081,7 +1081,7 @@ cv::Mat cv::getStructuringElement(int shape, Size ksize, Point anchor)
namespace cv
{
class MorphologyRunner
class MorphologyRunner : public ParallelLoopBody
{
public:
MorphologyRunner(Mat _src, Mat _dst, int _nStripes, int _iterations,
@@ -1102,14 +1102,14 @@ public:
columnBorderType = _columnBorderType;
}
void operator () ( const BlockedRange& range ) const
void operator () ( const Range& range ) const
{
int row0 = min(cvRound(range.begin() * src.rows / nStripes), src.rows);
int row1 = min(cvRound(range.end() * src.rows / nStripes), src.rows);
int row0 = min(cvRound(range.start * src.rows / nStripes), src.rows);
int row1 = min(cvRound(range.end * src.rows / nStripes), src.rows);
/*if(0)
printf("Size = (%d, %d), range[%d,%d), row0 = %d, row1 = %d\n",
src.rows, src.cols, range.begin(), range.end(), row0, row1);*/
src.rows, src.cols, range.start, range.end, row0, row1);*/
Mat srcStripe = src.rowRange(row0, row1);
Mat dstStripe = dst.rowRange(row0, row1);
@@ -1173,15 +1173,15 @@ static void morphOp( int op, InputArray _src, OutputArray _dst,
}
int nStripes = 1;
#if defined HAVE_TBB && defined HAVE_TEGRA_OPTIMIZATION
#if defined HAVE_TEGRA_OPTIMIZATION
if (src.data != dst.data && iterations == 1 && //NOTE: threads are not used for inplace processing
(borderType & BORDER_ISOLATED) == 0 && //TODO: check border types
src.rows >= 64 ) //NOTE: just heuristics
nStripes = 4;
#endif
parallel_for(BlockedRange(0, nStripes),
MorphologyRunner(src, dst, nStripes, iterations, op, kernel, anchor, borderType, borderType, borderValue));
parallel_for_(Range(0, nStripes),
MorphologyRunner(src, dst, nStripes, iterations, op, kernel, anchor, borderType, borderType, borderValue));
//Ptr<FilterEngine> f = createMorphologyFilter(op, src.type(),
// kernel, anchor, borderType, borderType, borderValue );

View File

@@ -585,4 +585,18 @@ public class Calib3dTest extends OpenCVTestCase {
public void testValidateDisparityMatMatIntIntInt() {
fail("Not yet implemented");
}
public void testComputeCorrespondEpilines()
{
Mat fundamental = new Mat(3, 3, CvType.CV_64F);
fundamental.put(0, 0, 0, -0.577, 0.288, 0.577, 0, 0.288, -0.288, -0.288, 0);
MatOfPoint2f left = new MatOfPoint2f();
left.alloc(1);
left.put(0, 0, 2, 3); //add(new Point(x, y));
Mat lines = new Mat();
Mat truth = new Mat(1, 1, CvType.CV_32FC3);
truth.put(0, 0, -0.70735186, 0.70686162, -0.70588124);
Calib3d.computeCorrespondEpilines(left, 1, fundamental, lines);
assertMatEqual(truth, lines, EPS);
}
}

View File

@@ -81,6 +81,14 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
styledAttrs.recycle();
}
/**
* Sets the camera index
* @param cameraIndex new camera index
*/
public void setCameraIndex(int cameraIndex) {
this.mCameraIndex = cameraIndex;
}
public interface CvCameraViewListener {
/**
* This method is invoked when camera preview has started. After this method is invoked

View File

@@ -14,7 +14,7 @@ public class MatOfByte extends Mat {
protected MatOfByte(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -25,7 +25,7 @@ public class MatOfByte extends Mat {
public MatOfByte(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -14,7 +14,7 @@ public class MatOfDouble extends Mat {
protected MatOfDouble(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -25,7 +25,7 @@ public class MatOfDouble extends Mat {
public MatOfDouble(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -14,7 +14,7 @@ public class MatOfFloat extends Mat {
protected MatOfFloat(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -25,7 +25,7 @@ public class MatOfFloat extends Mat {
public MatOfFloat(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -14,7 +14,7 @@ public class MatOfFloat4 extends Mat {
protected MatOfFloat4(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -25,7 +25,7 @@ public class MatOfFloat4 extends Mat {
public MatOfFloat4(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -14,7 +14,7 @@ public class MatOfFloat6 extends Mat {
protected MatOfFloat6(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -25,7 +25,7 @@ public class MatOfFloat6 extends Mat {
public MatOfFloat6(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -15,7 +15,7 @@ public class MatOfInt extends Mat {
protected MatOfInt(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -26,7 +26,7 @@ public class MatOfInt extends Mat {
public MatOfInt(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -15,7 +15,7 @@ public class MatOfInt4 extends Mat {
protected MatOfInt4(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -26,7 +26,7 @@ public class MatOfInt4 extends Mat {
public MatOfInt4(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -16,7 +16,7 @@ public class MatOfKeyPoint extends Mat {
protected MatOfKeyPoint(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -27,7 +27,7 @@ public class MatOfKeyPoint extends Mat {
public MatOfKeyPoint(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -14,7 +14,7 @@ public class MatOfPoint extends Mat {
protected MatOfPoint(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -25,7 +25,7 @@ public class MatOfPoint extends Mat {
public MatOfPoint(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -14,7 +14,7 @@ public class MatOfPoint2f extends Mat {
protected MatOfPoint2f(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -25,7 +25,7 @@ public class MatOfPoint2f extends Mat {
public MatOfPoint2f(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -14,7 +14,7 @@ public class MatOfPoint3 extends Mat {
protected MatOfPoint3(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -25,7 +25,7 @@ public class MatOfPoint3 extends Mat {
public MatOfPoint3(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -14,7 +14,7 @@ public class MatOfPoint3f extends Mat {
protected MatOfPoint3f(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -25,7 +25,7 @@ public class MatOfPoint3f extends Mat {
public MatOfPoint3f(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -15,7 +15,7 @@ public class MatOfRect extends Mat {
protected MatOfRect(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
@@ -26,7 +26,7 @@ public class MatOfRect extends Mat {
public MatOfRect(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
if( !empty() && checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}

View File

@@ -40,10 +40,6 @@
#include "precomp.hpp"
#ifdef HAVE_TBB
#include <tbb/tbb.h>
#endif
CvANN_MLP_TrainParams::CvANN_MLP_TrainParams()
{
term_crit = cvTermCriteria( CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 1000, 0.01 );
@@ -255,7 +251,7 @@ void CvANN_MLP::create( const CvMat* _layer_sizes, int _activ_func,
buf_sz += (l_dst[0] + l_dst[l_count-1]*2)*2;
CV_CALL( wbuf = cvCreateMat( 1, buf_sz, CV_64F ));
CV_CALL( weights = (double**)cvAlloc( (l_count+1)*sizeof(weights[0]) ));
CV_CALL( weights = (double**)cvAlloc( (l_count+2)*sizeof(weights[0]) ));
weights[0] = wbuf->data.db;
weights[1] = weights[0] + l_dst[0]*2;
@@ -1022,7 +1018,7 @@ int CvANN_MLP::train_backprop( CvVectors x0, CvVectors u, const double* sw )
return iter;
}
struct rprop_loop {
struct rprop_loop : cv::ParallelLoopBody {
rprop_loop(const CvANN_MLP* _point, double**& _weights, int& _count, int& _ivcount, CvVectors* _x0,
int& _l_count, CvMat*& _layer_sizes, int& _ovcount, int& _max_count,
CvVectors* _u, const double*& _sw, double& _inv_count, CvMat*& _dEdw, int& _dcount0, double* _E, int _buf_sz)
@@ -1063,7 +1059,7 @@ struct rprop_loop {
int buf_sz;
void operator()( const cv::BlockedRange& range ) const
void operator()( const cv::Range& range ) const
{
double* buf_ptr;
double** x = 0;
@@ -1084,7 +1080,7 @@ struct rprop_loop {
buf_ptr += (df[i] - x[i])*2;
}
for(int si = range.begin(); si < range.end(); si++ )
for(int si = range.start; si < range.end; si++ )
{
if (si % dcount0 != 0) continue;
int n1, n2, k;
@@ -1170,36 +1166,33 @@ struct rprop_loop {
}
// backward pass, update dEdw
#ifdef HAVE_TBB
static tbb::spin_mutex mutex;
tbb::spin_mutex::scoped_lock lock;
#endif
static cv::Mutex mutex;
for(int i = l_count-1; i > 0; i-- )
{
n1 = layer_sizes->data.i[i-1]; n2 = layer_sizes->data.i[i];
cvInitMatHeader( &_df, dcount, n2, CV_64F, df[i] );
cvMul( grad1, &_df, grad1 );
#ifdef HAVE_TBB
lock.acquire(mutex);
#endif
cvInitMatHeader( &_dEdw, n1, n2, CV_64F, dEdw->data.db+(weights[i]-weights[0]) );
cvInitMatHeader( x1, dcount, n1, CV_64F, x[i-1] );
cvGEMM( x1, grad1, 1, &_dEdw, 1, &_dEdw, CV_GEMM_A_T );
// update bias part of dEdw
for( k = 0; k < dcount; k++ )
{
double* dst = _dEdw.data.db + n1*n2;
const double* src = grad1->data.db + k*n2;
for(int j = 0; j < n2; j++ )
dst[j] += src[j];
{
cv::AutoLock lock(mutex);
cvInitMatHeader( &_dEdw, n1, n2, CV_64F, dEdw->data.db+(weights[i]-weights[0]) );
cvInitMatHeader( x1, dcount, n1, CV_64F, x[i-1] );
cvGEMM( x1, grad1, 1, &_dEdw, 1, &_dEdw, CV_GEMM_A_T );
// update bias part of dEdw
for( k = 0; k < dcount; k++ )
{
double* dst = _dEdw.data.db + n1*n2;
const double* src = grad1->data.db + k*n2;
for(int j = 0; j < n2; j++ )
dst[j] += src[j];
}
if (i > 1)
cvInitMatHeader( &_w, n1, n2, CV_64F, weights[i] );
}
if (i > 1)
cvInitMatHeader( &_w, n1, n2, CV_64F, weights[i] );
#ifdef HAVE_TBB
lock.release();
#endif
cvInitMatHeader( grad2, dcount, n1, CV_64F, grad2->data.db );
if( i > 1 )
cvGEMM( grad1, &_w, 1, 0, 0, grad2, CV_GEMM_B_T );
@@ -1297,7 +1290,7 @@ int CvANN_MLP::train_rprop( CvVectors x0, CvVectors u, const double* sw )
double E = 0;
// first, iterate through all the samples and compute dEdw
cv::parallel_for(cv::BlockedRange(0, count),
cv::parallel_for_(cv::Range(0, count),
rprop_loop(this, weights, count, ivcount, &x0, l_count, layer_sizes,
ovcount, max_count, &u, sw, inv_count, dEdw, dcount0, &E, buf_sz)
);

View File

@@ -900,7 +900,7 @@ float CvGBTrees::predict_serial( const CvMat* _sample, const CvMat* _missing,
}
class Tree_predictor
class Tree_predictor : public cv::ParallelLoopBody
{
private:
pCvSeq* weak;
@@ -910,9 +910,7 @@ private:
const CvMat* missing;
const float shrinkage;
#ifdef HAVE_TBB
static tbb::spin_mutex SumMutex;
#endif
static cv::Mutex SumMutex;
public:
@@ -931,14 +929,11 @@ public:
Tree_predictor& operator=( const Tree_predictor& )
{ return *this; }
virtual void operator()(const cv::BlockedRange& range) const
virtual void operator()(const cv::Range& range) const
{
#ifdef HAVE_TBB
tbb::spin_mutex::scoped_lock lock;
#endif
CvSeqReader reader;
int begin = range.begin();
int end = range.end();
int begin = range.start;
int end = range.end;
int weak_count = end - begin;
CvDTree* tree;
@@ -956,13 +951,11 @@ public:
tmp_sum += shrinkage*(float)(tree->predict(sample, missing)->value);
}
}
#ifdef HAVE_TBB
lock.acquire(SumMutex);
sum[i] += tmp_sum;
lock.release();
#else
sum[i] += tmp_sum;
#endif
{
cv::AutoLock lock(SumMutex);
sum[i] += tmp_sum;
}
}
} // Tree_predictor::operator()
@@ -970,11 +963,7 @@ public:
}; // class Tree_predictor
#ifdef HAVE_TBB
tbb::spin_mutex Tree_predictor::SumMutex;
#endif
cv::Mutex Tree_predictor::SumMutex;
float CvGBTrees::predict( const CvMat* _sample, const CvMat* _missing,
@@ -992,12 +981,7 @@ float CvGBTrees::predict( const CvMat* _sample, const CvMat* _missing,
Tree_predictor predictor = Tree_predictor(weak_seq, class_count,
params.shrinkage, _sample, _missing, sum);
//#ifdef HAVE_TBB
// tbb::parallel_for(cv::BlockedRange(begin, end), predictor,
// tbb::auto_partitioner());
//#else
cv::parallel_for(cv::BlockedRange(begin, end), predictor);
//#endif
cv::parallel_for_(cv::Range(begin, end), predictor);
for (int i=0; i<class_count; ++i)
sum[i] = sum[i] /** params.shrinkage*/ + base_value;
@@ -1228,7 +1212,7 @@ void CvGBTrees::read( CvFileStorage* fs, CvFileNode* node )
//===========================================================================
class Sample_predictor
class Sample_predictor : public cv::ParallelLoopBody
{
private:
const CvGBTrees* gbt;
@@ -1258,10 +1242,10 @@ public:
{}
virtual void operator()(const cv::BlockedRange& range) const
virtual void operator()(const cv::Range& range) const
{
int begin = range.begin();
int end = range.end();
int begin = range.start;
int end = range.end;
CvMat x;
CvMat miss;
@@ -1317,11 +1301,7 @@ CvGBTrees::calc_error( CvMLData* _data, int type, std::vector<float> *resp )
Sample_predictor predictor = Sample_predictor(this, pred_resp, _data->get_values(),
_data->get_missing(), _sample_idx);
//#ifdef HAVE_TBB
// tbb::parallel_for(cv::BlockedRange(0,n), predictor, tbb::auto_partitioner());
//#else
cv::parallel_for(cv::BlockedRange(0,n), predictor);
//#endif
cv::parallel_for_(cv::Range(0,n), predictor);
int* sidx = _sample_idx ? _sample_idx->data.i : 0;
int r_step = CV_IS_MAT_CONT(response->type) ?

View File

@@ -306,7 +306,7 @@ float CvKNearest::write_results( int k, int k1, int start, int end,
return result;
}
struct P1 {
struct P1 : cv::ParallelLoopBody {
P1(const CvKNearest* _pointer, int _buf_sz, int _k, const CvMat* __samples, const float** __neighbors,
int _k1, CvMat* __results, CvMat* __neighbor_responses, CvMat* __dist, float* _result)
{
@@ -333,10 +333,10 @@ struct P1 {
float* result;
int buf_sz;
void operator()( const cv::BlockedRange& range ) const
void operator()( const cv::Range& range ) const
{
cv::AutoBuffer<float> buf(buf_sz);
for(int i = range.begin(); i < range.end(); i += 1 )
for(int i = range.start; i < range.end; i += 1 )
{
float* neighbor_responses = &buf[0];
float* dist = neighbor_responses + 1*k;
@@ -410,8 +410,8 @@ float CvKNearest::find_nearest( const CvMat* _samples, int k, CvMat* _results,
int k1 = get_sample_count();
k1 = MIN( k1, k );
cv::parallel_for(cv::BlockedRange(0, count), P1(this, buf_sz, k, _samples, _neighbors, k1,
_results, _neighbor_responses, _dist, &result)
cv::parallel_for_(cv::Range(0, count), P1(this, buf_sz, k, _samples, _neighbors, k1,
_results, _neighbor_responses, _dist, &result)
);
return result;

View File

@@ -277,7 +277,7 @@ bool CvNormalBayesClassifier::train( const CvMat* _train_data, const CvMat* _res
return result;
}
struct predict_body {
struct predict_body : cv::ParallelLoopBody {
predict_body(CvMat* _c, CvMat** _cov_rotate_mats, CvMat** _inv_eigen_values, CvMat** _avg,
const CvMat* _samples, const int* _vidx, CvMat* _cls_labels,
CvMat* _results, float* _value, int _var_count1
@@ -307,7 +307,7 @@ struct predict_body {
float* value;
int var_count1;
void operator()( const cv::BlockedRange& range ) const
void operator()( const cv::Range& range ) const
{
int cls = -1;
@@ -324,7 +324,7 @@ struct predict_body {
cv::AutoBuffer<double> buffer(nclasses + var_count1);
CvMat diff = cvMat( 1, var_count1, CV_64FC1, &buffer[0] );
for(int k = range.begin(); k < range.end(); k += 1 )
for(int k = range.start; k < range.end; k += 1 )
{
int ival;
double opt = FLT_MAX;
@@ -397,9 +397,9 @@ float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results ) c
const int* vidx = var_idx ? var_idx->data.i : 0;
cv::parallel_for(cv::BlockedRange(0, samples->rows), predict_body(c, cov_rotate_mats, inv_eigen_values, avg, samples,
vidx, cls_labels, results, &value, var_count
));
cv::parallel_for_(cv::Range(0, samples->rows),
predict_body(c, cov_rotate_mats, inv_eigen_values, avg, samples,
vidx, cls_labels, results, &value, var_count));
return value;
}

View File

@@ -2143,7 +2143,7 @@ float CvSVM::predict( const CvMat* sample, bool returnDFVal ) const
return result;
}
struct predict_body_svm {
struct predict_body_svm : ParallelLoopBody {
predict_body_svm(const CvSVM* _pointer, float* _result, const CvMat* _samples, CvMat* _results)
{
pointer = _pointer;
@@ -2157,9 +2157,9 @@ struct predict_body_svm {
const CvMat* samples;
CvMat* results;
void operator()( const cv::BlockedRange& range ) const
void operator()( const cv::Range& range ) const
{
for(int i = range.begin(); i < range.end(); i++ )
for(int i = range.start; i < range.end; i++ )
{
CvMat sample;
cvGetRow( samples, &sample, i );
@@ -2175,7 +2175,7 @@ struct predict_body_svm {
float CvSVM::predict(const CvMat* samples, CV_OUT CvMat* results) const
{
float result = 0;
cv::parallel_for(cv::BlockedRange(0, samples->rows),
cv::parallel_for_(cv::Range(0, samples->rows),
predict_body_svm(this, &result, samples, results)
);
return result;

View File

@@ -1,79 +0,0 @@
Background Subtraction
======================
.. highlight:: cpp
gpu::VIBE_GPU
-------------
.. ocv:class:: gpu::VIBE_GPU
Class used for background/foreground segmentation. ::
class VIBE_GPU
{
public:
explicit VIBE_GPU(unsigned long rngSeed = 1234567);
void initialize(const GpuMat& firstFrame, Stream& stream = Stream::Null());
void operator()(const GpuMat& frame, GpuMat& fgmask, Stream& stream = Stream::Null());
void release();
...
};
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [VIBE2011]_.
gpu::VIBE_GPU::VIBE_GPU
-----------------------
The constructor.
.. ocv:function:: gpu::VIBE_GPU::VIBE_GPU(unsigned long rngSeed = 1234567)
:param rngSeed: Value used to initiate a random sequence.
Default constructor sets all parameters to default values.
gpu::VIBE_GPU::initialize
-------------------------
Initialize background model and allocates all inner buffers.
.. ocv:function:: void gpu::VIBE_GPU::initialize(const GpuMat& firstFrame, Stream& stream = Stream::Null())
:param firstFrame: First frame from video sequence.
:param stream: Stream for the asynchronous version.
gpu::VIBE_GPU::operator()
-------------------------
Updates the background model and returns the foreground mask
.. ocv:function:: void gpu::VIBE_GPU::operator()(const GpuMat& frame, GpuMat& fgmask, Stream& stream = Stream::Null())
:param frame: Next video frame.
:param fgmask: The output foreground mask as an 8-bit binary image.
:param stream: Stream for the asynchronous version.
gpu::VIBE_GPU::release
----------------------
Releases all inner buffer's memory.
.. ocv:function:: void gpu::VIBE_GPU::release()
.. [VIBE2011] O. Barnich and M. Van D Roogenbroeck. *ViBe: A universal background subtraction algorithm for video sequences*. IEEE Transactions on Image Processing, 20(6) :1709-1724, June 2011

View File

@@ -8,4 +8,3 @@ The module contains algorithms that may be patented in some countries or have so
:maxdepth: 2
feature_detection
background_subtraction

View File

@@ -125,41 +125,6 @@ public:
GpuMat maxPosBuffer;
};
/*!
* The class implements the following algorithm:
* "ViBe: A universal background subtraction algorithm for video sequences"
* O. Barnich and M. Van D Roogenbroeck
* IEEE Transactions on Image Processing, 20(6) :1709-1724, June 2011
*/
class CV_EXPORTS VIBE_GPU
{
public:
//! the default constructor
explicit VIBE_GPU(unsigned long rngSeed = 1234567);
//! re-initiaization method
void initialize(const GpuMat& firstFrame, Stream& stream = Stream::Null());
//! the update operator
void operator()(const GpuMat& frame, GpuMat& fgmask, Stream& stream = Stream::Null());
//! releases all inner buffers
void release();
int nbSamples; // number of samples per pixel
int reqMatches; // #_min
int radius; // R
int subsamplingFactor; // amount of random subsampling
private:
Size frameSize_;
unsigned long rngSeed_;
GpuMat randStates_;
GpuMat samples_;
};
} // namespace gpu
} // namespace cv

View File

@@ -50,18 +50,6 @@ using namespace std;
using namespace testing;
using namespace perf;
#if defined(HAVE_XINE) || \
defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \
defined(WIN32) /* assume that we have ffmpeg */
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
//////////////////////////////////////////////////////////////////////
// SURF
@@ -108,75 +96,4 @@ PERF_TEST_P(Image, GPU_SURF,
}
}
//////////////////////////////////////////////////////
// VIBE
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST(Video_Cn, string, int);
PERF_TEST_P(Video_Cn, GPU_VIBE,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
GPU_CHANNELS_1_3_4))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame(frame);
cv::gpu::VIBE_GPU vibe;
cv::gpu::GpuMat foreground;
vibe(d_frame, foreground);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer(); next();
vibe(d_frame, foreground);
stopTimer();
}
GPU_SANITY_CHECK(foreground);
}
else
{
FAIL_NO_CPU();
}
}
#endif
#endif

View File

@@ -1,4 +1,11 @@
#include "perf_precomp.hpp"
#include "opencv2/ts/gpu_perf.hpp"
CV_PERF_TEST_MAIN(nonfree, perf::printCudaInfo())
static const char * impls[] = {
#ifdef HAVE_CUDA
"cuda",
#endif
"plain"
};
CV_PERF_TEST_MAIN_WITH_IMPLS(nonfree, impls, perf::printCudaInfo())

Some files were not shown because too many files have changed in this diff Show More