the first draft of transparent API and new UMat class. more files

This commit is contained in:
Vadim Pisarevsky 2013-10-22 14:05:15 +04:00
parent 278fb617d2
commit d8c8339bec
17 changed files with 1318 additions and 362 deletions

View File

@ -158,6 +158,9 @@ enum { REDUCE_SUM = 0,
//! swaps two matrices
CV_EXPORTS void swap(Mat& a, Mat& b);
//! swaps two umatrices
CV_EXPORTS void swap( UMat& a, UMat& b );
//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p.
CV_EXPORTS_W int borderInterpolate(int p, int len, int borderType);
@ -439,7 +442,7 @@ CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, M
//! computes covariation matrix of a set of samples
CV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar,
OutputArray mean, int flags, int ctype = CV_64F);
InputOutputArray mean, int flags, int ctype = CV_64F);
CV_EXPORTS_W void PCACompute(InputArray data, InputOutputArray mean,
OutputArray eigenvectors, int maxComponents = 0);

View File

@ -472,6 +472,9 @@ class CV_EXPORTS RNG;
class CV_EXPORTS Mat;
class CV_EXPORTS MatExpr;
class CV_EXPORTS UMat;
class CV_EXPORTS UMatExpr;
class CV_EXPORTS SparseMat;
typedef Mat MatND;

View File

@ -55,6 +55,9 @@
namespace cv
{
enum { ACCESS_READ=1<<24, ACCESS_WRITE=1<<25,
ACCESS_RW=3<<24, ACCESS_MASK=ACCESS_RW, ACCESS_FAST=1<<26 };
//////////////////////// Input/Output Array Arguments /////////////////////////////////
/*!
@ -67,7 +70,7 @@ public:
KIND_SHIFT = 16,
FIXED_TYPE = 0x8000 << KIND_SHIFT,
FIXED_SIZE = 0x4000 << KIND_SHIFT,
KIND_MASK = ~(FIXED_TYPE|FIXED_SIZE) - (1 << KIND_SHIFT) + 1,
KIND_MASK = 31 << KIND_SHIFT,
NONE = 0 << KIND_SHIFT,
MAT = 1 << KIND_SHIFT,
@ -79,10 +82,14 @@ public:
OPENGL_BUFFER = 7 << KIND_SHIFT,
CUDA_MEM = 8 << KIND_SHIFT,
GPU_MAT = 9 << KIND_SHIFT,
OCL_MAT =10 << KIND_SHIFT
OCL_MAT =10 << KIND_SHIFT,
UMAT =OCL_MAT,
STD_VECTOR_UMAT =11 << KIND_SHIFT,
UEXPR =12 << KIND_SHIFT
};
_InputArray();
_InputArray(int _flags, void* _obj);
_InputArray(const Mat& m);
_InputArray(const MatExpr& expr);
_InputArray(const std::vector<Mat>& vec);
@ -97,11 +104,16 @@ public:
_InputArray(const ogl::Buffer& buf);
_InputArray(const cuda::CudaMem& cuda_mem);
template<typename _Tp> _InputArray(const cudev::GpuMat_<_Tp>& m);
_InputArray(const UMat& um);
_InputArray(const std::vector<UMat>& umv);
_InputArray(const UMatExpr& uexpr);
virtual Mat getMat(int i=-1) const;
virtual Mat getMat(int idx=-1) const;
virtual UMat getUMat(int idx=-1) const;
virtual void getMatVector(std::vector<Mat>& mv) const;
virtual cuda::GpuMat getGpuMat() const;
virtual ogl::Buffer getOGlBuffer() const;
void* getObj() const;
virtual int kind() const;
virtual Size size(int i=-1) const;
@ -113,9 +125,13 @@ public:
virtual ~_InputArray();
protected:
int flags;
void* obj;
Size sz;
void init(int _flags, const void* _obj);
void init(int _flags, const void* _obj, Size _sz);
};
@ -140,6 +156,7 @@ public:
};
_OutputArray();
_OutputArray(int _flags, void* _obj);
_OutputArray(Mat& m);
_OutputArray(std::vector<Mat>& vec);
_OutputArray(cuda::GpuMat& d_mat);
@ -152,6 +169,8 @@ public:
template<typename _Tp> _OutputArray(Mat_<_Tp>& m);
template<typename _Tp> _OutputArray(_Tp* vec, int n);
template<typename _Tp, int m, int n> _OutputArray(Matx<_Tp, m, n>& matx);
_OutputArray(UMat& m);
_OutputArray(std::vector<UMat>& vec);
_OutputArray(const Mat& m);
_OutputArray(const std::vector<Mat>& vec);
@ -165,6 +184,8 @@ public:
template<typename _Tp> _OutputArray(const Mat_<_Tp>& m);
template<typename _Tp> _OutputArray(const _Tp* vec, int n);
template<typename _Tp, int m, int n> _OutputArray(const Matx<_Tp, m, n>& matx);
_OutputArray(const UMat& m);
_OutputArray(const std::vector<UMat>& vec);
virtual bool fixedSize() const;
virtual bool fixedType() const;
@ -178,23 +199,58 @@ public:
virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void release() const;
virtual void clear() const;
};
virtual ~_OutputArray();
class CV_EXPORTS _InputOutputArray : public _OutputArray
{
public:
_InputOutputArray();
_InputOutputArray(int _flags, void* _obj);
_InputOutputArray(Mat& m);
_InputOutputArray(std::vector<Mat>& vec);
_InputOutputArray(cuda::GpuMat& d_mat);
_InputOutputArray(ogl::Buffer& buf);
_InputOutputArray(cuda::CudaMem& cuda_mem);
template<typename _Tp> _InputOutputArray(cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _InputOutputArray(std::vector<_Tp>& vec);
template<typename _Tp> _InputOutputArray(std::vector<std::vector<_Tp> >& vec);
template<typename _Tp> _InputOutputArray(std::vector<Mat_<_Tp> >& vec);
template<typename _Tp> _InputOutputArray(Mat_<_Tp>& m);
template<typename _Tp> _InputOutputArray(_Tp* vec, int n);
template<typename _Tp, int m, int n> _InputOutputArray(Matx<_Tp, m, n>& matx);
_InputOutputArray(UMat& m);
_InputOutputArray(std::vector<UMat>& vec);
_InputOutputArray(const Mat& m);
_InputOutputArray(const std::vector<Mat>& vec);
_InputOutputArray(const cuda::GpuMat& d_mat);
_InputOutputArray(const ogl::Buffer& buf);
_InputOutputArray(const cuda::CudaMem& cuda_mem);
template<typename _Tp> _InputOutputArray(const cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _InputOutputArray(const std::vector<_Tp>& vec);
template<typename _Tp> _InputOutputArray(const std::vector<std::vector<_Tp> >& vec);
template<typename _Tp> _InputOutputArray(const std::vector<Mat_<_Tp> >& vec);
template<typename _Tp> _InputOutputArray(const Mat_<_Tp>& m);
template<typename _Tp> _InputOutputArray(const _Tp* vec, int n);
template<typename _Tp, int m, int n> _InputOutputArray(const Matx<_Tp, m, n>& matx);
_InputOutputArray(const UMat& m);
_InputOutputArray(const std::vector<UMat>& vec);
};
typedef const _InputArray& InputArray;
typedef InputArray InputArrayOfArrays;
typedef const _OutputArray& OutputArray;
typedef OutputArray OutputArrayOfArrays;
typedef OutputArray InputOutputArray;
typedef OutputArray InputOutputArrayOfArrays;
CV_EXPORTS OutputArray noArray();
typedef const _InputOutputArray& InputOutputArray;
typedef InputOutputArray InputOutputArrayOfArrays;
CV_EXPORTS InputOutputArray noArray();
/////////////////////////////////// MatAllocator //////////////////////////////////////
struct CV_EXPORTS UMatData;
/*!
Custom array allocator
@ -204,11 +260,27 @@ class CV_EXPORTS MatAllocator
public:
MatAllocator() {}
virtual ~MatAllocator() {}
virtual void allocate(int dims, const int* sizes, int type, int*& refcount,
uchar*& datastart, uchar*& data, size_t* step) = 0;
virtual void deallocate(int* refcount, uchar* datastart, uchar* data) = 0;
};
// let's comment it off for now to detect and fix all the uses of allocator
//virtual void allocate(int dims, const int* sizes, int type, int*& refcount,
// uchar*& datastart, uchar*& data, size_t* step) = 0;
//virtual void deallocate(int* refcount, uchar* datastart, uchar* data) = 0;
virtual UMatData* allocate(int dims, const int* sizes,
int type, size_t* step) const = 0;
virtual bool allocate(UMatData* data, int accessflags) const = 0;
virtual void deallocate(UMatData* data) const = 0;
virtual void map(UMatData* data, int accessflags) const = 0;
virtual void unmap(UMatData* data) const = 0;
virtual void download(UMatData* data, void* dst, int dims, const size_t sz[],
const size_t srcofs[], const size_t srcstep[],
const size_t dststep[]) const = 0;
virtual void upload(UMatData* data, const void* src, int dims, const size_t sz[],
const size_t dstofs[], const size_t dststep[],
const size_t srcstep[]) const = 0;
virtual void copy(UMatData* srcdata, UMatData* dstdata, int dims, const size_t sz[],
const size_t srcofs[], const size_t srcstep[],
const size_t dstofs[], const size_t dststep[], bool sync) const = 0;
};
//////////////////////////////// MatCommaInitializer //////////////////////////////////
@ -240,11 +312,80 @@ protected:
};
/////////////////////////////////////// Mat ///////////////////////////////////////////
/*!
// note that umatdata might be allocated together
// with the matrix data, not as a separate object.
// therefore, it does not have constructor or destructor;
// it should be explicitly initialized using init().
struct CV_EXPORTS UMatData
{
enum { COPY_ON_MAP=1, HOST_COPY_OBSOLETE=2,
DEVICE_COPY_OBSOLETE=4, TEMP_UMAT=8, TEMP_COPIED_UMAT=24 };
UMatData(const MatAllocator* allocator);
// provide atomic access to the structure
void lock();
void unlock();
bool hostCopyObsolete() const;
bool deviceCopyObsolete() const;
bool copyOnMap() const;
bool tempUMat() const;
bool tempCopiedUMat() const;
void markHostCopyObsolete(bool flag);
void markDeviceCopyObsolete(bool flag);
const MatAllocator* prevAllocator;
const MatAllocator* currAllocator;
int urefcount;
int refcount;
uchar* data;
uchar* origdata;
size_t size;
int flags;
void* handle;
};
struct CV_EXPORTS UMatDataAutoLock
{
UMatDataAutoLock(UMatData* u);
~UMatDataAutoLock();
UMatData* u;
};
struct CV_EXPORTS MatSize
{
MatSize(int* _p);
Size operator()() const;
const int& operator[](int i) const;
int& operator[](int i);
operator const int*() const;
bool operator == (const MatSize& sz) const;
bool operator != (const MatSize& sz) const;
int* p;
};
struct CV_EXPORTS MatStep
{
MatStep();
MatStep(size_t s);
const size_t& operator[](int i) const;
size_t& operator[](int i);
operator size_t() const;
MatStep& operator = (size_t s);
size_t* p;
size_t buf[2];
protected:
MatStep& operator = (const MatStep&);
};
/*!
The n-dimensional matrix class.
The class represents an n-dimensional dense numerical array that can act as
@ -497,14 +638,6 @@ public:
//! builds matrix from comma initializer
template<typename _Tp> explicit Mat(const MatCommaInitializer_<_Tp>& commaInitializer);
// //! converts old-style CvMat to the new matrix; the data is not copied by default
// Mat(const CvMat* m, bool copyData=false);
// //! converts old-style CvMatND to the new matrix; the data is not copied by default
// Mat(const CvMatND* m, bool copyData=false);
// //! converts old-style IplImage to the new matrix; the data is not copied by default
// Mat(const IplImage* img, bool copyData=false);
//Mat(const void* img, bool copyData=false);
//! download data from GpuMat
explicit Mat(const cuda::GpuMat& m);
@ -514,6 +647,9 @@ public:
Mat& operator = (const Mat& m);
Mat& operator = (const MatExpr& expr);
//! retrieve UMat from Mat
UMat getUMat(int accessFlags) const;
//! returns a new matrix header for the specified row
Mat row(int y) const;
//! returns a new matrix header for the specified column
@ -748,37 +884,14 @@ public:
//! custom allocator
MatAllocator* allocator;
//! and the standard allocator
static MatAllocator* getStdAllocator();
struct CV_EXPORTS MSize
{
MSize(int* _p);
Size operator()() const;
const int& operator[](int i) const;
int& operator[](int i);
operator const int*() const;
bool operator == (const MSize& sz) const;
bool operator != (const MSize& sz) const;
//! interaction with UMat
UMatData* u;
int* p;
};
struct CV_EXPORTS MStep
{
MStep();
MStep(size_t s);
const size_t& operator[](int i) const;
size_t& operator[](int i);
operator size_t() const;
MStep& operator = (size_t s);
size_t* p;
size_t buf[2];
protected:
MStep& operator = (const MStep&);
};
MSize size;
MStep step;
MatSize size;
MatStep step;
protected:
};
@ -1001,6 +1114,205 @@ typedef Mat_<Vec3d> Mat3d;
typedef Mat_<Vec4d> Mat4d;
class CV_EXPORTS UMatExpr;
class CV_EXPORTS UMat
{
public:
//! default constructor
UMat();
//! constructs 2D matrix of the specified size and type
// (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)
UMat(int rows, int cols, int type);
UMat(Size size, int type);
//! constucts 2D matrix and fills it with the specified value _s.
UMat(int rows, int cols, int type, const Scalar& s);
UMat(Size size, int type, const Scalar& s);
//! constructs n-dimensional matrix
UMat(int ndims, const int* sizes, int type);
UMat(int ndims, const int* sizes, int type, const Scalar& s);
//! copy constructor
UMat(const UMat& m);
//! creates a matrix header for a part of the bigger matrix
UMat(const UMat& m, const Range& rowRange, const Range& colRange=Range::all());
UMat(const UMat& m, const Rect& roi);
UMat(const UMat& m, const Range* ranges);
//! builds matrix from std::vector with or without copying the data
template<typename _Tp> explicit UMat(const std::vector<_Tp>& vec, bool copyData=false);
//! builds matrix from cv::Vec; the data is copied by default
template<typename _Tp, int n> explicit UMat(const Vec<_Tp, n>& vec, bool copyData=true);
//! builds matrix from cv::Matx; the data is copied by default
template<typename _Tp, int m, int n> explicit UMat(const Matx<_Tp, m, n>& mtx, bool copyData=true);
//! builds matrix from a 2D point
template<typename _Tp> explicit UMat(const Point_<_Tp>& pt, bool copyData=true);
//! builds matrix from a 3D point
template<typename _Tp> explicit UMat(const Point3_<_Tp>& pt, bool copyData=true);
//! builds matrix from comma initializer
template<typename _Tp> explicit UMat(const MatCommaInitializer_<_Tp>& commaInitializer);
//! destructor - calls release()
~UMat();
//! assignment operators
UMat& operator = (const UMat& m);
UMat& operator = (const UMatExpr& expr);
Mat getMat(int flags) const;
//! returns a new matrix header for the specified row
UMat row(int y) const;
//! returns a new matrix header for the specified column
UMat col(int x) const;
//! ... for the specified row span
UMat rowRange(int startrow, int endrow) const;
UMat rowRange(const Range& r) const;
//! ... for the specified column span
UMat colRange(int startcol, int endcol) const;
UMat colRange(const Range& r) const;
//! ... for the specified diagonal
// (d=0 - the main diagonal,
// >0 - a diagonal from the lower half,
// <0 - a diagonal from the upper half)
UMat diag(int d=0) const;
//! constructs a square diagonal matrix which main diagonal is vector "d"
static UMat diag(const UMat& d);
//! returns deep copy of the matrix, i.e. the data is copied
UMat clone() const;
//! copies the matrix content to "m".
// It calls m.create(this->size(), this->type()).
void copyTo( OutputArray m ) const;
//! copies those matrix elements to "m" that are marked with non-zero mask elements.
void copyTo( OutputArray m, InputArray mask ) const;
//! converts matrix to another datatype with optional scalng. See cvConvertScale.
void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const;
void assignTo( UMat& m, int type=-1 ) const;
//! sets every matrix element to s
UMat& operator = (const Scalar& s);
//! sets some of the matrix elements to s, according to the mask
UMat& setTo(InputArray value, InputArray mask=noArray());
//! creates alternative matrix header for the same data, with different
// number of channels and/or different number of rows. see cvReshape.
UMat reshape(int cn, int rows=0) const;
UMat reshape(int cn, int newndims, const int* newsz) const;
//! matrix transposition by means of matrix expressions
UMatExpr t() const;
//! matrix inversion by means of matrix expressions
UMatExpr inv(int method=DECOMP_LU) const;
//! per-element matrix multiplication by means of matrix expressions
UMatExpr mul(InputArray m, double scale=1) const;
//! computes cross-product of 2 3D vectors
UMat cross(InputArray m) const;
//! computes dot-product
double dot(InputArray m) const;
//! Matlab-style matrix initialization
static UMatExpr zeros(int rows, int cols, int type);
static UMatExpr zeros(Size size, int type);
static UMatExpr zeros(int ndims, const int* sz, int type);
static UMatExpr ones(int rows, int cols, int type);
static UMatExpr ones(Size size, int type);
static UMatExpr ones(int ndims, const int* sz, int type);
static UMatExpr eye(int rows, int cols, int type);
static UMatExpr eye(Size size, int type);
//! allocates new matrix data unless the matrix already has specified size and type.
// previous data is unreferenced if needed.
void create(int rows, int cols, int type);
void create(Size size, int type);
void create(int ndims, const int* sizes, int type);
//! increases the reference counter; use with care to avoid memleaks
void addref();
//! decreases reference counter;
// deallocates the data when reference counter reaches 0.
void release();
//! deallocates the matrix data
void deallocate();
//! internal use function; properly re-allocates _size, _step arrays
void copySize(const UMat& m);
//! locates matrix header within a parent matrix. See below
void locateROI( Size& wholeSize, Point& ofs ) const;
//! moves/resizes the current matrix ROI inside the parent matrix.
UMat& adjustROI( int dtop, int dbottom, int dleft, int dright );
//! extracts a rectangular sub-matrix
// (this is a generalized form of row, rowRange etc.)
UMat operator()( Range rowRange, Range colRange ) const;
UMat operator()( const Rect& roi ) const;
UMat operator()( const Range* ranges ) const;
//! returns true iff the matrix data is continuous
// (i.e. when there are no gaps between successive rows).
// similar to CV_IS_MAT_CONT(cvmat->type)
bool isContinuous() const;
//! returns true if the matrix is a submatrix of another matrix
bool isSubmatrix() const;
//! returns element size in bytes,
// similar to CV_ELEM_SIZE(cvmat->type)
size_t elemSize() const;
//! returns the size of element channel in bytes.
size_t elemSize1() const;
//! returns element type, similar to CV_MAT_TYPE(cvmat->type)
int type() const;
//! returns element type, similar to CV_MAT_DEPTH(cvmat->type)
int depth() const;
//! returns element type, similar to CV_MAT_CN(cvmat->type)
int channels() const;
//! returns step/elemSize1()
size_t step1(int i=0) const;
//! returns true if matrix data is NULL
bool empty() const;
//! returns the total number of matrix elements
size_t total() const;
//! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise
int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const;
void* handle(int accessFlags) const;
void ndoffset(size_t* ofs) const;
enum { MAGIC_VAL = 0x42FF0000, AUTO_STEP = 0, CONTINUOUS_FLAG = CV_MAT_CONT_FLAG, SUBMATRIX_FLAG = CV_SUBMAT_FLAG };
enum { MAGIC_MASK = 0xFFFF0000, TYPE_MASK = 0x00000FFF, DEPTH_MASK = 7 };
/*! includes several bit-fields:
- the magic signature
- continuity flag
- depth
- number of channels
*/
int flags;
//! the matrix dimensionality, >= 2
int dims;
//! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions
int rows, cols;
//! custom allocator
MatAllocator* allocator;
//! and the standard allocator
static MatAllocator* getStdAllocator();
// black-box container of UMat data
UMatData* u;
// offset of the submatrix (or 0)
size_t offset;
MatSize size;
MatStep step;
protected:
};
/////////////////////////// multi-dimensional sparse matrix //////////////////////////

View File

@ -52,119 +52,236 @@ namespace cv
//////////////////////// Input/Output Arrays ////////////////////////
inline void _InputArray::init(int _flags, const void* _obj)
{ flags = _flags; obj = (void*)_obj; }
inline void _InputArray::init(int _flags, const void* _obj, Size _sz)
{ flags = _flags; obj = (void*)_obj; sz = _sz; }
inline void* _InputArray::getObj() const { return obj; }
inline _InputArray::_InputArray() { init(0, 0); }
inline _InputArray::_InputArray(int _flags, void* _obj) { init(_flags, _obj); }
inline _InputArray::_InputArray(const Mat& m) { init(MAT+ACCESS_READ, &m); }
inline _InputArray::_InputArray(const std::vector<Mat>& vec) { init(STD_VECTOR_MAT+ACCESS_READ, &vec); }
inline _InputArray::_InputArray(const UMat& m) { init(UMAT+ACCESS_READ, &m); }
inline _InputArray::_InputArray(const std::vector<UMat>& vec) { init(STD_VECTOR_UMAT+ACCESS_READ, &vec); }
template<typename _Tp> inline
_InputArray::_InputArray(const std::vector<_Tp>& vec)
: flags(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type), obj((void*)&vec)
{}
{ init(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type + ACCESS_READ, &vec); }
template<typename _Tp> inline
_InputArray::_InputArray(const std::vector<std::vector<_Tp> >& vec)
: flags(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type), obj((void*)&vec)
{}
{ init(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type + ACCESS_READ, &vec); }
template<typename _Tp> inline
_InputArray::_InputArray(const std::vector<Mat_<_Tp> >& vec)
: flags(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type), obj((void*)&vec)
{}
{ init(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type + ACCESS_READ, &vec); }
template<typename _Tp, int m, int n> inline
_InputArray::_InputArray(const Matx<_Tp, m, n>& mtx)
: flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)&mtx), sz(n, m)
{}
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_READ, &mtx, Size(n, m)); }
template<typename _Tp> inline
_InputArray::_InputArray(const _Tp* vec, int n)
: flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)vec), sz(n, 1)
{}
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_READ, vec, Size(n, 1)); }
template<typename _Tp> inline
_InputArray::_InputArray(const Mat_<_Tp>& m)
: flags(FIXED_TYPE + MAT + DataType<_Tp>::type), obj((void*)&m)
{}
{ init(FIXED_TYPE + MAT + DataType<_Tp>::type + ACCESS_READ, &m); }
inline _InputArray::_InputArray(const double& val)
{ init(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F + ACCESS_READ, &val, Size(1,1)); }
inline _InputArray::_InputArray(const MatExpr& expr)
{ init(FIXED_TYPE + FIXED_SIZE + EXPR + ACCESS_READ, &expr); }
inline _InputArray::_InputArray(const cuda::GpuMat& d_mat)
{ init(GPU_MAT + ACCESS_READ, &d_mat); }
inline _InputArray::_InputArray(const ogl::Buffer& buf)
{ init(OPENGL_BUFFER + ACCESS_READ, &buf); }
inline _InputArray::_InputArray(const cuda::CudaMem& cuda_mem)
{ init(CUDA_MEM + ACCESS_READ, &cuda_mem); }
inline _InputArray::~_InputArray() {}
////////////////////////////////////////////////////////////////////////////////////////
inline _OutputArray::_OutputArray() { init(ACCESS_WRITE, 0); }
inline _OutputArray::_OutputArray(int _flags, void* _obj) { init(_flags|ACCESS_WRITE, _obj); }
inline _OutputArray::_OutputArray(Mat& m) { init(MAT+ACCESS_WRITE, &m); }
inline _OutputArray::_OutputArray(std::vector<Mat>& vec) { init(STD_VECTOR_MAT+ACCESS_WRITE, &vec); }
inline _OutputArray::_OutputArray(UMat& m) { init(UMAT+ACCESS_WRITE, &m); }
inline _OutputArray::_OutputArray(std::vector<UMat>& vec) { init(STD_VECTOR_UMAT+ACCESS_WRITE, &vec); }
template<typename _Tp> inline
_OutputArray::_OutputArray(std::vector<_Tp>& vec)
: _InputArray(vec)
{}
{ init(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type + ACCESS_WRITE, &vec); }
template<typename _Tp> inline
_OutputArray::_OutputArray(std::vector<std::vector<_Tp> >& vec)
: _InputArray(vec)
{}
{ init(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type + ACCESS_WRITE, &vec); }
template<typename _Tp> inline
_OutputArray::_OutputArray(std::vector<Mat_<_Tp> >& vec)
: _InputArray(vec)
{}
{ init(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type + ACCESS_WRITE, &vec); }
template<typename _Tp> inline
_OutputArray::_OutputArray(Mat_<_Tp>& m)
: _InputArray(m)
{}
{ init(FIXED_TYPE + MAT + DataType<_Tp>::type + ACCESS_WRITE, &m); }
template<typename _Tp, int m, int n> inline
_OutputArray::_OutputArray(Matx<_Tp, m, n>& mtx)
: _InputArray(mtx)
{}
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_WRITE, &mtx, Size(n, m)); }
template<typename _Tp> inline
_OutputArray::_OutputArray(_Tp* vec, int n)
: _InputArray(vec, n)
{}
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_WRITE, vec, Size(n, 1)); }
template<typename _Tp> inline
_OutputArray::_OutputArray(const std::vector<_Tp>& vec)
: _InputArray(vec)
{
flags |= FIXED_SIZE;
}
{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR + DataType<_Tp>::type + ACCESS_WRITE, &vec); }
template<typename _Tp> inline
_OutputArray::_OutputArray(const std::vector<std::vector<_Tp> >& vec)
: _InputArray(vec)
{
flags |= FIXED_SIZE;
}
{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_VECTOR + DataType<_Tp>::type + ACCESS_WRITE, &vec); }
template<typename _Tp> inline
_OutputArray::_OutputArray(const std::vector<Mat_<_Tp> >& vec)
: _InputArray(vec)
{
flags |= FIXED_SIZE;
}
{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_MAT + DataType<_Tp>::type + ACCESS_WRITE, &vec); }
template<typename _Tp> inline
_OutputArray::_OutputArray(const Mat_<_Tp>& m)
: _InputArray(m)
{
flags |= FIXED_SIZE;
}
{ init(FIXED_TYPE + FIXED_SIZE + MAT + DataType<_Tp>::type + ACCESS_WRITE, &m); }
template<typename _Tp, int m, int n> inline
_OutputArray::_OutputArray(const Matx<_Tp, m, n>& mtx)
: _InputArray(mtx)
{}
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_WRITE, &mtx, Size(n, m)); }
template<typename _Tp> inline
_OutputArray::_OutputArray(const _Tp* vec, int n)
: _InputArray(vec, n)
{}
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_WRITE, vec, Size(n, 1)); }
inline _OutputArray::_OutputArray(cuda::GpuMat& d_mat)
{ init(GPU_MAT + ACCESS_WRITE, &d_mat); }
inline _OutputArray::_OutputArray(ogl::Buffer& buf)
{ init(OPENGL_BUFFER + ACCESS_WRITE, &buf); }
//////////////////////////////// Mat ////////////////////////////////
inline _OutputArray::_OutputArray(cuda::CudaMem& cuda_mem)
{ init(CUDA_MEM + ACCESS_WRITE, &cuda_mem); }
inline _OutputArray::_OutputArray(const Mat& m)
{ init(FIXED_TYPE + FIXED_SIZE + MAT + ACCESS_WRITE, &m); }
inline _OutputArray::_OutputArray(const std::vector<Mat>& vec)
{ init(FIXED_SIZE + STD_VECTOR_MAT + ACCESS_WRITE, &vec); }
inline _OutputArray::_OutputArray(const cuda::GpuMat& d_mat)
{ init(FIXED_TYPE + FIXED_SIZE + GPU_MAT + ACCESS_WRITE, &d_mat); }
inline _OutputArray::_OutputArray(const ogl::Buffer& buf)
{ init(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER + ACCESS_WRITE, &buf); }
inline _OutputArray::_OutputArray(const cuda::CudaMem& cuda_mem)
{ init(FIXED_TYPE + FIXED_SIZE + CUDA_MEM + ACCESS_WRITE, &cuda_mem); }
///////////////////////////////////////////////////////////////////////////////////////////
inline _InputOutputArray::_InputOutputArray() { init(ACCESS_RW, 0); }
inline _InputOutputArray::_InputOutputArray(int _flags, void* _obj) { init(_flags|ACCESS_RW, _obj); }
inline _InputOutputArray::_InputOutputArray(Mat& m) { init(MAT+ACCESS_RW, &m); }
inline _InputOutputArray::_InputOutputArray(std::vector<Mat>& vec) { init(STD_VECTOR_MAT+ACCESS_RW, &vec); }
inline _InputOutputArray::_InputOutputArray(UMat& m) { init(UMAT+ACCESS_RW, &m); }
inline _InputOutputArray::_InputOutputArray(std::vector<UMat>& vec) { init(STD_VECTOR_UMAT+ACCESS_RW, &vec); }
template<typename _Tp> inline
_InputOutputArray::_InputOutputArray(std::vector<_Tp>& vec)
{ init(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type + ACCESS_RW, &vec); }
template<typename _Tp> inline
_InputOutputArray::_InputOutputArray(std::vector<std::vector<_Tp> >& vec)
{ init(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type + ACCESS_RW, &vec); }
template<typename _Tp> inline
_InputOutputArray::_InputOutputArray(std::vector<Mat_<_Tp> >& vec)
{ init(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type + ACCESS_RW, &vec); }
template<typename _Tp> inline
_InputOutputArray::_InputOutputArray(Mat_<_Tp>& m)
{ init(FIXED_TYPE + MAT + DataType<_Tp>::type + ACCESS_RW, &m); }
template<typename _Tp, int m, int n> inline
_InputOutputArray::_InputOutputArray(Matx<_Tp, m, n>& mtx)
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_RW, &mtx, Size(n, m)); }
template<typename _Tp> inline
_InputOutputArray::_InputOutputArray(_Tp* vec, int n)
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_RW, vec, Size(n, 1)); }
template<typename _Tp> inline
_InputOutputArray::_InputOutputArray(const std::vector<_Tp>& vec)
{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR + DataType<_Tp>::type + ACCESS_RW, &vec); }
template<typename _Tp> inline
_InputOutputArray::_InputOutputArray(const std::vector<std::vector<_Tp> >& vec)
{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_VECTOR + DataType<_Tp>::type + ACCESS_RW, &vec); }
template<typename _Tp> inline
_InputOutputArray::_InputOutputArray(const std::vector<Mat_<_Tp> >& vec)
{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_MAT + DataType<_Tp>::type + ACCESS_RW, &vec); }
template<typename _Tp> inline
_InputOutputArray::_InputOutputArray(const Mat_<_Tp>& m)
{ init(FIXED_TYPE + FIXED_SIZE + MAT + DataType<_Tp>::type + ACCESS_RW, &m); }
template<typename _Tp, int m, int n> inline
_InputOutputArray::_InputOutputArray(const Matx<_Tp, m, n>& mtx)
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_RW, &mtx, Size(n, m)); }
template<typename _Tp> inline
_InputOutputArray::_InputOutputArray(const _Tp* vec, int n)
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_RW, vec, Size(n, 1)); }
inline _InputOutputArray::_InputOutputArray(cuda::GpuMat& d_mat)
{ init(GPU_MAT + ACCESS_RW, &d_mat); }
inline _InputOutputArray::_InputOutputArray(ogl::Buffer& buf)
{ init(OPENGL_BUFFER + ACCESS_RW, &buf); }
inline _InputOutputArray::_InputOutputArray(cuda::CudaMem& cuda_mem)
{ init(CUDA_MEM + ACCESS_RW, &cuda_mem); }
inline _InputOutputArray::_InputOutputArray(const Mat& m)
{ init(FIXED_TYPE + FIXED_SIZE + MAT + ACCESS_RW, &m); }
inline _InputOutputArray::_InputOutputArray(const std::vector<Mat>& vec)
{ init(FIXED_SIZE + STD_VECTOR_MAT + ACCESS_RW, &vec); }
inline _InputOutputArray::_InputOutputArray(const cuda::GpuMat& d_mat)
{ init(FIXED_TYPE + FIXED_SIZE + GPU_MAT + ACCESS_RW, &d_mat); }
inline _InputOutputArray::_InputOutputArray(const ogl::Buffer& buf)
{ init(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER + ACCESS_RW, &buf); }
inline _InputOutputArray::_InputOutputArray(const cuda::CudaMem& cuda_mem)
{ init(FIXED_TYPE + FIXED_SIZE + CUDA_MEM + ACCESS_RW, &cuda_mem); }
//////////////////////////////////////////// Mat //////////////////////////////////////////
inline
Mat::Mat()
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0),
datalimit(0), allocator(0), size(&rows)
datalimit(0), allocator(0), u(0), size(&rows)
{}
inline
Mat::Mat(int _rows, int _cols, int _type)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0),
datalimit(0), allocator(0), size(&rows)
datalimit(0), allocator(0), u(0), size(&rows)
{
create(_rows, _cols, _type);
}
@ -172,7 +289,7 @@ Mat::Mat(int _rows, int _cols, int _type)
inline
Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0),
datalimit(0), allocator(0), size(&rows)
datalimit(0), allocator(0), u(0), size(&rows)
{
create(_rows, _cols, _type);
*this = _s;
@ -181,7 +298,7 @@ Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s)
inline
Mat::Mat(Size _sz, int _type)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0),
datalimit(0), allocator(0), size(&rows)
datalimit(0), allocator(0), u(0), size(&rows)
{
create( _sz.height, _sz.width, _type );
}
@ -189,7 +306,7 @@ Mat::Mat(Size _sz, int _type)
inline
Mat::Mat(Size _sz, int _type, const Scalar& _s)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0),
datalimit(0), allocator(0), size(&rows)
datalimit(0), allocator(0), u(0), size(&rows)
{
create(_sz.height, _sz.width, _type);
*this = _s;
@ -198,7 +315,7 @@ Mat::Mat(Size _sz, int _type, const Scalar& _s)
inline
Mat::Mat(int _dims, const int* _sz, int _type)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0),
datalimit(0), allocator(0), size(&rows)
datalimit(0), allocator(0), u(0), size(&rows)
{
create(_dims, _sz, _type);
}
@ -206,7 +323,7 @@ Mat::Mat(int _dims, const int* _sz, int _type)
inline
Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0),
datalimit(0), allocator(0), size(&rows)
datalimit(0), allocator(0), u(0), size(&rows)
{
create(_dims, _sz, _type);
*this = _s;
@ -216,7 +333,7 @@ inline
Mat::Mat(const Mat& m)
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), refcount(m.refcount),
datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator),
size(&rows)
u(m.u), size(&rows)
{
if( refcount )
CV_XADD(refcount, 1);
@ -235,7 +352,7 @@ inline
Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step)
: flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols),
data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), datalimit(0),
allocator(0), size(&rows)
allocator(0), u(0), size(&rows)
{
size_t esz = CV_ELEM_SIZE(_type);
size_t minstep = cols * esz;
@ -260,7 +377,7 @@ inline
Mat::Mat(Size _sz, int _type, void* _data, size_t _step)
: flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width),
data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), datalimit(0),
allocator(0), size(&rows)
allocator(0), u(0), size(&rows)
{
size_t esz = CV_ELEM_SIZE(_type);
size_t minstep = cols*esz;
@ -284,7 +401,7 @@ Mat::Mat(Size _sz, int _type, void* _data, size_t _step)
template<typename _Tp> inline
Mat::Mat(const std::vector<_Tp>& vec, bool copyData)
: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()),
cols(1), data(0), refcount(0), datastart(0), dataend(0), allocator(0), size(&rows)
cols(1), data(0), refcount(0), datastart(0), dataend(0), allocator(0), u(0), size(&rows)
{
if(vec.empty())
return;
@ -301,7 +418,7 @@ Mat::Mat(const std::vector<_Tp>& vec, bool copyData)
template<typename _Tp, int n> inline
Mat::Mat(const Vec<_Tp, n>& vec, bool copyData)
: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows(n), cols(1), data(0),
refcount(0), datastart(0), dataend(0), allocator(0), size(&rows)
refcount(0), datastart(0), dataend(0), allocator(0), u(0), size(&rows)
{
if( !copyData )
{
@ -317,7 +434,7 @@ Mat::Mat(const Vec<_Tp, n>& vec, bool copyData)
template<typename _Tp, int m, int n> inline
Mat::Mat(const Matx<_Tp,m,n>& M, bool copyData)
: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows(m), cols(n), data(0),
refcount(0), datastart(0), dataend(0), allocator(0), size(&rows)
refcount(0), datastart(0), dataend(0), allocator(0), u(0), size(&rows)
{
if( !copyData )
{
@ -333,7 +450,7 @@ Mat::Mat(const Matx<_Tp,m,n>& M, bool copyData)
template<typename _Tp> inline
Mat::Mat(const Point_<_Tp>& pt, bool copyData)
: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows(2), cols(1), data(0),
refcount(0), datastart(0), dataend(0), allocator(0), size(&rows)
refcount(0), datastart(0), dataend(0), allocator(0), u(0), size(&rows)
{
if( !copyData )
{
@ -352,7 +469,7 @@ Mat::Mat(const Point_<_Tp>& pt, bool copyData)
template<typename _Tp> inline
Mat::Mat(const Point3_<_Tp>& pt, bool copyData)
: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows(3), cols(1), data(0),
refcount(0), datastart(0), dataend(0), allocator(0), size(&rows)
refcount(0), datastart(0), dataend(0), allocator(0), u(0), size(&rows)
{
if( !copyData )
{
@ -372,7 +489,7 @@ Mat::Mat(const Point3_<_Tp>& pt, bool copyData)
template<typename _Tp> inline
Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer)
: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(0), rows(0), cols(0), data(0),
refcount(0), datastart(0), dataend(0), allocator(0), size(&rows)
refcount(0), datastart(0), dataend(0), allocator(0), u(0), size(&rows)
{
*this = commaInitializer.operator Mat_<_Tp>();
}
@ -410,6 +527,7 @@ Mat& Mat::operator = (const Mat& m)
datalimit = m.datalimit;
refcount = m.refcount;
allocator = m.allocator;
u = m.u;
}
return *this;
}
@ -497,6 +615,7 @@ inline void Mat::release()
data = datastart = dataend = datalimit = 0;
size.p[0] = 0;
refcount = 0;
u = 0;
}
inline
@ -913,41 +1032,39 @@ void Mat::push_back(const Mat_<_Tp>& m)
push_back((const Mat&)m);
}
///////////////////////////// Mat::MSize ////////////////////////////
///////////////////////////// MatSize ////////////////////////////
inline
Mat::MSize::MSize(int* _p)
MatSize::MatSize(int* _p)
: p(_p) {}
inline
Size Mat::MSize::operator()() const
Size MatSize::operator()() const
{
CV_DbgAssert(p[-1] <= 2);
return Size(p[1], p[0]);
}
inline
const int& Mat::MSize::operator[](int i) const
const int& MatSize::operator[](int i) const
{
return p[i];
}
inline
int& Mat::MSize::operator[](int i)
int& MatSize::operator[](int i)
{
return p[i];
}
inline
Mat::MSize::operator const int*() const
MatSize::operator const int*() const
{
return p;
}
inline
bool Mat::MSize::operator == (const MSize& sz) const
bool MatSize::operator == (const MatSize& sz) const
{
int d = p[-1];
int dsz = sz.p[-1];
@ -963,46 +1080,46 @@ bool Mat::MSize::operator == (const MSize& sz) const
}
inline
bool Mat::MSize::operator != (const MSize& sz) const
bool MatSize::operator != (const MatSize& sz) const
{
return !(*this == sz);
}
///////////////////////////// Mat::MStep ////////////////////////////
///////////////////////////// MatStep ////////////////////////////
inline
Mat::MStep::MStep()
MatStep::MatStep()
{
p = buf; p[0] = p[1] = 0;
}
inline
Mat::MStep::MStep(size_t s)
MatStep::MatStep(size_t s)
{
p = buf; p[0] = s; p[1] = 0;
}
inline
const size_t& Mat::MStep::operator[](int i) const
const size_t& MatStep::operator[](int i) const
{
return p[i];
}
inline
size_t& Mat::MStep::operator[](int i)
size_t& MatStep::operator[](int i)
{
return p[i];
}
inline Mat::MStep::operator size_t() const
inline MatStep::operator size_t() const
{
CV_DbgAssert( p == buf );
return buf[0];
}
inline Mat::MStep& Mat::MStep::operator = (size_t s)
inline MatStep& MatStep::operator = (size_t s)
{
CV_DbgAssert( p == buf );
buf[0] = s;
@ -1438,43 +1555,6 @@ MatIterator_<_Tp> Mat_<_Tp>::end()
}
/*template<typename T1, typename T2, typename Op> inline
void process( const Mat_<T1>& m1, Mat_<T2>& m2, Op op )
{
int y, x, rows = m1.rows, cols = m1.cols;
CV_DbgAssert( m1.size() == m2.size() );
for( y = 0; y < rows; y++ )
{
const T1* src = m1[y];
T2* dst = m2[y];
for( x = 0; x < cols; x++ )
dst[x] = op(src[x]);
}
}
template<typename T1, typename T2, typename T3, typename Op> inline
void process( const Mat_<T1>& m1, const Mat_<T2>& m2, Mat_<T3>& m3, Op op )
{
int y, x, rows = m1.rows, cols = m1.cols;
CV_DbgAssert( m1.size() == m2.size() );
for( y = 0; y < rows; y++ )
{
const T1* src1 = m1[y];
const T2* src2 = m2[y];
T3* dst = m3[y];
for( x = 0; x < cols; x++ )
dst[x] = op( src1[x], src2[x] );
}
}*/
///////////////////////////// SparseMat /////////////////////////////
inline
@ -2956,6 +3036,320 @@ const Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b)
return a;
}
//////////////////////////////// UMat ////////////////////////////////
inline
UMat::UMat()
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), u(0), offset(0), size(&rows)
{}
inline
UMat::UMat(int _rows, int _cols, int _type)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), u(0), offset(0), size(&rows)
{
create(_rows, _cols, _type);
}
inline
UMat::UMat(int _rows, int _cols, int _type, const Scalar& _s)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), u(0), offset(0), size(&rows)
{
create(_rows, _cols, _type);
*this = _s;
}
inline
UMat::UMat(Size _sz, int _type)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), u(0), offset(0), size(&rows)
{
create( _sz.height, _sz.width, _type );
}
inline
UMat::UMat(Size _sz, int _type, const Scalar& _s)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), u(0), offset(0), size(&rows)
{
create(_sz.height, _sz.width, _type);
*this = _s;
}
inline
UMat::UMat(int _dims, const int* _sz, int _type)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), u(0), offset(0), size(&rows)
{
create(_dims, _sz, _type);
}
inline
UMat::UMat(int _dims, const int* _sz, int _type, const Scalar& _s)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), u(0), offset(0), size(&rows)
{
create(_dims, _sz, _type);
*this = _s;
}
inline
UMat::UMat(const UMat& m)
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),
u(m.u), offset(m.offset), size(&rows)
{
if( u )
CV_XADD(&(u->urefcount), 1);
if( m.dims <= 2 )
{
step[0] = m.step[0]; step[1] = m.step[1];
}
else
{
dims = 0;
copySize(m);
}
}
template<typename _Tp> inline
UMat::UMat(const std::vector<_Tp>& vec, bool copyData)
: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()),
cols(1), allocator(0), u(0), offset(0), size(&rows)
{
if(vec.empty())
return;
if( !copyData )
{
// !!!TODO!!!
CV_Error(Error::StsNotImplemented, "");
}
else
Mat((int)vec.size(), 1, DataType<_Tp>::type, (uchar*)&vec[0]).copyTo(*this);
}
inline
UMat::~UMat()
{
release();
if( step.p != step.buf )
fastFree(step.p);
}
inline
UMat& UMat::operator = (const UMat& m)
{
if( this != &m )
{
if( m.u )
CV_XADD(&(m.u->urefcount), 1);
release();
flags = m.flags;
if( dims <= 2 && m.dims <= 2 )
{
dims = m.dims;
rows = m.rows;
cols = m.cols;
step[0] = m.step[0];
step[1] = m.step[1];
}
else
copySize(m);
allocator = m.allocator;
u = m.u;
offset = m.offset;
}
return *this;
}
inline
UMat UMat::row(int y) const
{
return UMat(*this, Range(y, y + 1), Range::all());
}
inline
UMat UMat::col(int x) const
{
return UMat(*this, Range::all(), Range(x, x + 1));
}
inline
UMat UMat::rowRange(int startrow, int endrow) const
{
return UMat(*this, Range(startrow, endrow), Range::all());
}
inline
UMat UMat::rowRange(const Range& r) const
{
return UMat(*this, r, Range::all());
}
inline
UMat UMat::colRange(int startcol, int endcol) const
{
return UMat(*this, Range::all(), Range(startcol, endcol));
}
inline
UMat UMat::colRange(const Range& r) const
{
return UMat(*this, Range::all(), r);
}
inline
UMat UMat::clone() const
{
UMat m;
copyTo(m);
return m;
}
inline
void UMat::assignTo( UMat& m, int _type ) const
{
if( _type < 0 )
m = *this;
else
convertTo(m, _type);
}
inline
void UMat::create(int _rows, int _cols, int _type)
{
_type &= TYPE_MASK;
if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && u )
return;
int sz[] = {_rows, _cols};
create(2, sz, _type);
}
inline
void UMat::create(Size _sz, int _type)
{
create(_sz.height, _sz.width, _type);
}
inline
void UMat::addref()
{
if( u )
CV_XADD(&(u->urefcount), 1);
}
inline void UMat::release()
{
if( u && CV_XADD(&(u->urefcount), -1) == 1 )
deallocate();
size.p[0] = 0;
u = 0;
}
inline
UMat UMat::operator()( Range _rowRange, Range _colRange ) const
{
return UMat(*this, _rowRange, _colRange);
}
inline
UMat UMat::operator()( const Rect& roi ) const
{
return UMat(*this, roi);
}
inline
UMat UMat::operator()(const Range* ranges) const
{
return UMat(*this, ranges);
}
inline
bool UMat::isContinuous() const
{
return (flags & CONTINUOUS_FLAG) != 0;
}
inline
bool UMat::isSubmatrix() const
{
return (flags & SUBMATRIX_FLAG) != 0;
}
inline
size_t UMat::elemSize() const
{
return dims > 0 ? step.p[dims - 1] : 0;
}
inline
size_t UMat::elemSize1() const
{
return CV_ELEM_SIZE1(flags);
}
inline
int UMat::type() const
{
return CV_MAT_TYPE(flags);
}
inline
int UMat::depth() const
{
return CV_MAT_DEPTH(flags);
}
inline
int UMat::channels() const
{
return CV_MAT_CN(flags);
}
inline
size_t UMat::step1(int i) const
{
return step.p[i] / elemSize1();
}
inline
bool UMat::empty() const
{
return u == 0 || total() == 0;
}
inline
size_t UMat::total() const
{
if( dims <= 2 )
return (size_t)rows * cols;
size_t p = 1;
for( int i = 0; i < dims; i++ )
p *= size[i];
return p;
}
inline bool UMatData::hostCopyObsolete() const { return (flags & HOST_COPY_OBSOLETE) != 0; }
inline bool UMatData::deviceCopyObsolete() const { return (flags & DEVICE_COPY_OBSOLETE) != 0; }
inline bool UMatData::copyOnMap() const { return (flags & COPY_ON_MAP) != 0; }
inline bool UMatData::tempUMat() const { return (flags & TEMP_UMAT) != 0; }
inline bool UMatData::tempCopiedUMat() const { return (flags & TEMP_COPIED_UMAT) == TEMP_COPIED_UMAT; }
inline void UMatData::markHostCopyObsolete(bool flag)
{
if(flag)
flags |= HOST_COPY_OBSOLETE;
else
flags &= ~HOST_COPY_OBSOLETE;
}
inline void UMatData::markDeviceCopyObsolete(bool flag)
{
if(flag)
flags |= DEVICE_COPY_OBSOLETE;
else
flags &= ~DEVICE_COPY_OBSOLETE;
}
inline UMatDataAutoLock::UMatDataAutoLock(UMatData* _u) : u(_u) { u->lock(); }
inline UMatDataAutoLock::~UMatDataAutoLock() { u->unlock(); }
} //cv
#endif

View File

@ -271,7 +271,7 @@ void cv::split(InputArray _m, OutputArrayOfArrays _mv)
_mv.release();
return;
}
CV_Assert( !_mv.fixedType() || CV_MAT_TYPE(_mv.flags) == m.depth() );
CV_Assert( !_mv.fixedType() || _mv.type() == m.depth() );
_mv.create(m.channels(), 1, m.depth());
Mat* dst = &_mv.getMatRef(0);
split(m, dst);

View File

@ -1610,7 +1610,7 @@ MatExpr Mat::mul(InputArray m, double scale) const
MatExpr e;
if(m.kind() == _InputArray::EXPR)
{
const MatExpr& me = *(const MatExpr*)m.obj;
const MatExpr& me = *(const MatExpr*)m.getObj();
me.op->multiply(MatExpr(*this), me, e, scale);
}
else

View File

@ -48,6 +48,154 @@
namespace cv {
class StdMatAllocator : public MatAllocator
{
public:
UMatData* allocate(int dims, const int* sizes, int type, size_t* step) const
{
size_t total = CV_ELEM_SIZE(type);
for( int i = dims-1; i >= 0; i-- )
{
if( step )
step[i] = total;
total *= sizes[i];
}
uchar* data = (uchar*)fastMalloc(total);
UMatData* u = new UMatData(this);
u->data = u->origdata = data;
u->size = total;
u->refcount = 1;
return u;
}
bool allocate(UMatData* u, int accessFlags) const
{
if(!u) return false;
if(u->handle != 0)
return true;
return UMat::getStdAllocator()->allocate(u, accessFlags);
}
void deallocate(UMatData* u) const
{
if(u)
fastFree(u->origdata);
delete u;
}
void map(UMatData*, int) const
{
}
void unmap(UMatData* u) const
{
if(u->urefcount == 0)
deallocate(u);
}
void download(UMatData* u, void* dstptr,
int dims, const size_t sz[],
const size_t srcofs[], const size_t srcstep[],
const size_t dststep[]) const
{
if(!u)
return;
int isz[CV_MAX_DIM];
uchar* srcptr = u->data;
for( int i = 0; i < dims; i++ )
{
CV_Assert( sz[i] <= (size_t)INT_MAX );
if( sz[i] == 0 )
return;
if( srcofs )
srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1);
isz[i] = (int)sz[i];
}
Mat src(dims, isz, CV_8U, srcptr, srcstep);
Mat dst(dims, isz, CV_8U, dstptr, dststep);
const Mat* arrays[] = { &src, &dst };
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs, 2);
size_t j, planesz = it.size;
for( j = 0; j < it.nplanes; j++, ++it )
memcpy(ptrs[1], ptrs[0], planesz);
}
void upload(UMatData* u, const void* srcptr, int dims, const size_t sz[],
const size_t dstofs[], const size_t dststep[],
const size_t srcstep[]) const
{
if(!u)
return;
int isz[CV_MAX_DIM];
uchar* dstptr = u->data;
for( int i = 0; i < dims; i++ )
{
CV_Assert( sz[i] <= (size_t)INT_MAX );
if( sz[i] == 0 )
return;
if( dstofs )
dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1);
isz[i] = (int)sz[i];
}
Mat src(dims, isz, CV_8U, (void*)srcptr, srcstep);
Mat dst(dims, isz, CV_8U, dstptr, dststep);
const Mat* arrays[] = { &src, &dst };
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs, 2);
size_t j, planesz = it.size;
for( j = 0; j < it.nplanes; j++, ++it )
memcpy(ptrs[1], ptrs[0], planesz);
}
void copy(UMatData* usrc, UMatData* udst, int dims, const size_t sz[],
const size_t srcofs[], const size_t srcstep[],
const size_t dstofs[], const size_t dststep[], bool) const
{
if(!usrc || !udst)
return;
int isz[CV_MAX_DIM];
uchar* srcptr = usrc->data;
uchar* dstptr = udst->data;
for( int i = 0; i < dims; i++ )
{
CV_Assert( sz[i] <= (size_t)INT_MAX );
if( sz[i] == 0 )
return;
if( srcofs )
srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1);
if( dstofs )
dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1);
isz[i] = (int)sz[i];
}
Mat src(dims, isz, CV_8U, srcptr, srcstep);
Mat dst(dims, isz, CV_8U, dstptr, dststep);
const Mat* arrays[] = { &src, &dst };
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs, 2);
size_t j, planesz = it.size;
for( j = 0; j < it.nplanes; j++, ++it )
memcpy(ptrs[1], ptrs[0], planesz);
}
};
MatAllocator* Mat::getStdAllocator()
{
static StdMatAllocator allocator;
return &allocator;
}
void swap( Mat& a, Mat& b )
{
std::swap(a.flags, b.flags);
@ -60,6 +208,7 @@ void swap( Mat& a, Mat& b )
std::swap(a.dataend, b.dataend);
std::swap(a.datalimit, b.datalimit);
std::swap(a.allocator, b.allocator);
std::swap(a.u, b.u);
std::swap(a.size.p, b.size.p);
std::swap(a.step.p, b.step.p);
@ -161,6 +310,8 @@ static void finalizeHdr(Mat& m)
int d = m.dims;
if( d > 2 )
m.rows = m.cols = -1;
if(m.u)
m.data = m.datastart = m.u->data;
if( m.data )
{
m.datalimit = m.datastart + m.size[0]*m.step[0];
@ -203,36 +354,25 @@ void Mat::create(int d, const int* _sizes, int _type)
if( total() > 0 )
{
MatAllocator *a = allocator, *a0 = getStdAllocator();
#ifdef HAVE_TGPU
if( !allocator || allocator == tegra::getAllocator() ) allocator = tegra::getAllocator(d, _sizes, _type);
if( !a || a == tegra::getAllocator() )
a = tegra::getAllocator(d, _sizes, _type);
#endif
if( !allocator )
if(!a)
a = a0;
try
{
size_t totalsize = alignSize(step.p[0]*size.p[0], (int)sizeof(*refcount));
data = datastart = (uchar*)fastMalloc(totalsize + (int)sizeof(*refcount));
refcount = (int*)(data + totalsize);
*refcount = 1;
u = a->allocate(dims, size, _type, step.p);
CV_Assert(u != 0);
}
else
catch(...)
{
#ifdef HAVE_TGPU
try
{
allocator->allocate(dims, size, _type, refcount, datastart, data, step.p);
CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
}catch(...)
{
allocator = 0;
size_t totalSize = alignSize(step.p[0]*size.p[0], (int)sizeof(*refcount));
data = datastart = (uchar*)fastMalloc(totalSize + (int)sizeof(*refcount));
refcount = (int*)(data + totalSize);
*refcount = 1;
}
#else
allocator->allocate(dims, size, _type, refcount, datastart, data, step.p);
CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
#endif
if(a != a0)
u = a0->allocate(dims, size, _type, step.p);
CV_Assert(u != 0);
}
CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
}
finalizeHdr(*this);
@ -250,16 +390,10 @@ void Mat::copySize(const Mat& m)
void Mat::deallocate()
{
if( allocator )
allocator->deallocate(refcount, datastart, data);
else
{
CV_DbgAssert(refcount != 0);
fastFree(datastart);
}
if(u)
(u->currAllocator ? u->currAllocator : allocator ? allocator : getStdAllocator())->unmap(u);
}
Mat::Mat(const Mat& m, const Range& _rowRange, const Range& _colRange)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0),
datalimit(0), allocator(0), size(&rows)
@ -938,20 +1072,10 @@ void scalarToRawData(const Scalar& s, void* _buf, int type, int unroll_to)
Input/Output Array
\*************************************************************************************************/
_InputArray::_InputArray() : flags(0), obj(0) {}
_InputArray::_InputArray(const Mat& m) : flags(MAT), obj((void*)&m) {}
_InputArray::_InputArray(const std::vector<Mat>& vec) : flags(STD_VECTOR_MAT), obj((void*)&vec) {}
_InputArray::_InputArray(const double& val) : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&val), sz(Size(1,1)) {}
_InputArray::_InputArray(const MatExpr& expr) : flags(FIXED_TYPE + FIXED_SIZE + EXPR), obj((void*)&expr) {}
_InputArray::_InputArray(const cuda::GpuMat& d_mat) : flags(GPU_MAT), obj((void*)&d_mat) {}
_InputArray::_InputArray(const ogl::Buffer& buf) : flags(OPENGL_BUFFER), obj((void*)&buf) {}
_InputArray::_InputArray(const cuda::CudaMem& cuda_mem) : flags(CUDA_MEM), obj((void*)&cuda_mem) {}
_InputArray::~_InputArray() {}
Mat _InputArray::getMat(int i) const
{
int k = kind();
int accessFlags = flags & ACCESS_MASK;
if( k == MAT )
{
@ -961,6 +1085,14 @@ Mat _InputArray::getMat(int i) const
return m->row(i);
}
if( k == UMAT )
{
const UMat* m = (const UMat*)obj;
if( i < 0 )
return m->getMat(accessFlags);
return m->getMat(accessFlags).row(i);
}
if( k == EXPR )
{
CV_Assert( i < 0 );
@ -995,11 +1127,6 @@ Mat _InputArray::getMat(int i) const
return !v.empty() ? Mat(size(i), t, (void*)&v[0]) : Mat();
}
if( k == OCL_MAT )
{
CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
}
if( k == STD_VECTOR_MAT )
{
const std::vector<Mat>& v = *(const std::vector<Mat>*)obj;
@ -1008,6 +1135,14 @@ Mat _InputArray::getMat(int i) const
return v[i];
}
if( k == STD_VECTOR_UMAT )
{
const std::vector<UMat>& v = *(const std::vector<UMat>*)obj;
CV_Assert( 0 <= i && i < (int)v.size() );
return v[i].getMat(accessFlags);
}
if( k == OPENGL_BUFFER )
{
CV_Assert( i < 0 );
@ -1022,8 +1157,7 @@ Mat _InputArray::getMat(int i) const
return Mat();
}
CV_Assert( k == CUDA_MEM );
//if( k == CUDA_MEM )
if( k == CUDA_MEM )
{
CV_Assert( i < 0 );
@ -1031,12 +1165,49 @@ Mat _InputArray::getMat(int i) const
return cuda_mem->createMatHeader();
}
CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
return Mat();
}
UMat _InputArray::getUMat(int i) const
{
int k = kind();
int accessFlags = flags & ACCESS_MASK;
if( k == UMAT )
{
const UMat* m = (const UMat*)obj;
if( i < 0 )
return *m;
return m->row(i);
}
if( k == STD_VECTOR_UMAT )
{
const std::vector<UMat>& v = *(const std::vector<UMat>*)obj;
CV_Assert( 0 <= i && i < (int)v.size() );
return v[i];
}
if( k == MAT )
{
const Mat* m = (const Mat*)obj;
if( i < 0 )
return m->getUMat(accessFlags);
return m->row(i).getUMat(accessFlags);
}
return getMat(i).getUMat(accessFlags);
}
void _InputArray::getMatVector(std::vector<Mat>& mv) const
{
int k = kind();
int accessFlags = flags & ACCESS_MASK;
if( k == MAT )
{
@ -1105,19 +1276,18 @@ void _InputArray::getMatVector(std::vector<Mat>& mv) const
return;
}
if( k == OCL_MAT )
if( k == STD_VECTOR_UMAT )
{
CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
}
const std::vector<UMat>& v = *(const std::vector<UMat>*)obj;
size_t i, n = v.size();
mv.resize(n);
CV_Assert( k == STD_VECTOR_MAT );
//if( k == STD_VECTOR_MAT )
{
const std::vector<Mat>& v = *(const std::vector<Mat>*)obj;
mv.resize(v.size());
std::copy(v.begin(), v.end(), mv.begin());
for( i = 0; i < n; i++ )
mv[i] = v[i].getMat(accessFlags);
return;
}
CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
}
cuda::GpuMat _InputArray::getGpuMat() const
@ -1180,6 +1350,12 @@ Size _InputArray::size(int i) const
return ((const MatExpr*)obj)->size();
}
if( k == UMAT )
{
CV_Assert( i < 0 );
return ((const UMat*)obj)->size();
}
if( k == MATX )
{
CV_Assert( i < 0 );
@ -1258,6 +1434,12 @@ size_t _InputArray::total(int i) const
return ((const Mat*)obj)->total();
}
if( k == UMAT )
{
CV_Assert( i < 0 );
return ((const UMat*)obj)->total();
}
if( k == STD_VECTOR_MAT )
{
const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
@ -1278,6 +1460,9 @@ int _InputArray::type(int i) const
if( k == MAT )
return ((const Mat*)obj)->type();
if( k == UMAT )
return ((const UMat*)obj)->type();
if( k == EXPR )
return ((const MatExpr*)obj)->type();
@ -1323,6 +1508,9 @@ bool _InputArray::empty() const
if( k == MAT )
return ((const Mat*)obj)->empty();
if( k == UMAT )
return ((const UMat*)obj)->empty();
if( k == EXPR )
return false;
@ -1367,21 +1555,6 @@ bool _InputArray::empty() const
}
_OutputArray::_OutputArray() {}
_OutputArray::_OutputArray(Mat& m) : _InputArray(m) {}
_OutputArray::_OutputArray(std::vector<Mat>& vec) : _InputArray(vec) {}
_OutputArray::_OutputArray(cuda::GpuMat& d_mat) : _InputArray(d_mat) {}
_OutputArray::_OutputArray(ogl::Buffer& buf) : _InputArray(buf) {}
_OutputArray::_OutputArray(cuda::CudaMem& cuda_mem) : _InputArray(cuda_mem) {}
_OutputArray::_OutputArray(const Mat& m) : _InputArray(m) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const std::vector<Mat>& vec) : _InputArray(vec) {flags |= FIXED_SIZE;}
_OutputArray::_OutputArray(const cuda::GpuMat& d_mat) : _InputArray(d_mat) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const ogl::Buffer& buf) : _InputArray(buf) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const cuda::CudaMem& cuda_mem) : _InputArray(cuda_mem) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::~_OutputArray() {}
bool _OutputArray::fixedSize() const
{
return (flags & FIXED_SIZE) == FIXED_SIZE;
@ -1402,6 +1575,13 @@ void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int
((Mat*)obj)->create(_sz, mtype);
return;
}
if( k == UMAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((UMat*)obj)->size.operator()() == _sz);
CV_Assert(!fixedType() || ((UMat*)obj)->type() == mtype);
((UMat*)obj)->create(_sz, mtype);
return;
}
if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == _sz);
@ -1437,6 +1617,13 @@ void _OutputArray::create(int rows, int cols, int mtype, int i, bool allowTransp
((Mat*)obj)->create(rows, cols, mtype);
return;
}
if( k == UMAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((UMat*)obj)->size.operator()() == Size(cols, rows));
CV_Assert(!fixedType() || ((UMat*)obj)->type() == mtype);
((UMat*)obj)->create(rows, cols, mtype);
return;
}
if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == Size(cols, rows));
@ -1462,7 +1649,8 @@ void _OutputArray::create(int rows, int cols, int mtype, int i, bool allowTransp
create(2, sizes, mtype, i, allowTransposed, fixedDepthMask);
}
void _OutputArray::create(int dims, const int* sizes, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
void _OutputArray::create(int dims, const int* sizes, int mtype, int i,
bool allowTransposed, int fixedDepthMask) const
{
int k = kind();
mtype = CV_MAT_TYPE(mtype);
@ -1501,6 +1689,40 @@ void _OutputArray::create(int dims, const int* sizes, int mtype, int i, bool all
return;
}
if( k == UMAT )
{
CV_Assert( i < 0 );
UMat& m = *(UMat*)obj;
if( allowTransposed )
{
if( !m.isContinuous() )
{
CV_Assert(!fixedType() && !fixedSize());
m.release();
}
if( dims == 2 && m.dims == 2 && !m.empty() &&
m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
return;
}
if(fixedType())
{
if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
mtype = m.type();
else
CV_Assert(CV_MAT_TYPE(mtype) == m.type());
}
if(fixedSize())
{
CV_Assert(m.dims == dims);
for(int j = 0; j < dims; ++j)
CV_Assert(m.size[j] == sizes[j]);
}
m.create(dims, sizes, mtype);
return;
}
if( k == MATX )
{
CV_Assert( i < 0 );
@ -1593,19 +1815,13 @@ void _OutputArray::create(int dims, const int* sizes, int mtype, int i, bool all
return;
}
if( k == OCL_MAT )
{
CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
}
if( k == NONE )
{
CV_Error(CV_StsNullPtr, "create() called for the missing output array" );
return;
}
CV_Assert( k == STD_VECTOR_MAT );
//if( k == STD_VECTOR_MAT )
if( k == STD_VECTOR_MAT )
{
std::vector<Mat>& v = *(std::vector<Mat>*)obj;
@ -1662,6 +1878,8 @@ void _OutputArray::create(int dims, const int* sizes, int mtype, int i, bool all
m.create(dims, sizes, mtype);
}
CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
}
void _OutputArray::release() const
@ -1709,16 +1927,12 @@ void _OutputArray::release() const
return;
}
if( k == OCL_MAT )
{
CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
}
CV_Assert( k == STD_VECTOR_MAT );
//if( k == STD_VECTOR_MAT )
if( k == STD_VECTOR_MAT )
{
((std::vector<Mat>*)obj)->clear();
}
CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
}
void _OutputArray::clear() const
@ -1778,8 +1992,8 @@ cuda::CudaMem& _OutputArray::getCudaMemRef() const
return *(cuda::CudaMem*)obj;
}
static _OutputArray _none;
OutputArray noArray() { return _none; }
static _InputOutputArray _none;
InputOutputArray noArray() { return _none; }
}

View File

@ -50,6 +50,7 @@
#include "opencv2/core/private.hpp"
#include "opencv2/core/private.cuda.hpp"
#include "opencv2/core/ocl.hpp"
#include <assert.h>
#include <ctype.h>
@ -215,6 +216,19 @@ inline bool checkScalar(const Mat& sc, int atype, int sckind, int akind)
void convertAndUnrollScalar( const Mat& sc, int buftype, uchar* scbuf, size_t blocksize );
struct TLSData
{
TLSData();
RNG rng;
int device;
ocl::Queue oclQueue;
int useOpenCL; // 1 - use, 0 - do not use, -1 - auto/not initialized
static TLSData* get();
};
namespace ocl { MatAllocator* getOpenCLAllocator(); }
}
#endif /*_CXCORE_INTERNAL_H_*/

View File

@ -727,85 +727,11 @@ void RNG::fill( InputOutputArray _mat, int disttype,
}
}
#ifdef WIN32
#ifdef HAVE_WINRT
// using C++11 thread attribute for local thread data
__declspec( thread ) RNG* rng = NULL;
void deleteThreadRNGData()
{
if (rng)
delete rng;
}
RNG& theRNG()
cv::RNG& cv::theRNG()
{
if (!rng)
{
rng = new RNG;
}
return *rng;
}
#else
#ifdef WINCE
# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
#endif
static DWORD tlsRNGKey = TLS_OUT_OF_INDEXES;
void deleteThreadRNGData()
{
if( tlsRNGKey != TLS_OUT_OF_INDEXES )
delete (RNG*)TlsGetValue( tlsRNGKey );
}
RNG& theRNG()
{
if( tlsRNGKey == TLS_OUT_OF_INDEXES )
{
tlsRNGKey = TlsAlloc();
CV_Assert(tlsRNGKey != TLS_OUT_OF_INDEXES);
}
RNG* rng = (RNG*)TlsGetValue( tlsRNGKey );
if( !rng )
{
rng = new RNG;
TlsSetValue( tlsRNGKey, rng );
}
return *rng;
}
#endif //HAVE_WINRT
#else
static pthread_key_t tlsRNGKey = 0;
static pthread_once_t tlsRNGKeyOnce = PTHREAD_ONCE_INIT;
static void deleteRNG(void* data)
{
delete (RNG*)data;
}
static void makeRNGKey()
{
int errcode = pthread_key_create(&tlsRNGKey, deleteRNG);
CV_Assert(errcode == 0);
}
RNG& theRNG()
{
pthread_once(&tlsRNGKeyOnce, makeRNGKey);
RNG* rng = (RNG*)pthread_getspecific(tlsRNGKey);
if( !rng )
{
rng = new RNG;
pthread_setspecific(tlsRNGKey, rng);
}
return *rng;
}
#endif
return TLSData::get()->rng;
}
void cv::randu(InputOutputArray dst, InputArray low, InputArray high)

View File

@ -830,4 +830,92 @@ bool Mutex::trylock() { return impl->trylock(); }
}
//////////////////////////////// thread-local storage ////////////////////////////////
namespace cv
{
TLSData::TLSData()
{
device = 0;
useOpenCL = -1;
}
#ifdef WIN32
#ifdef HAVE_WINRT
// using C++11 thread attribute for local thread data
static __declspec( thread ) TLSData* g_tlsdata = NULL;
static void deleteThreadRNGData()
{
if (g_tlsdata)
delete g_tlsdata;
}
TLSData* TLSData::get()
{
if (!g_tlsdata)
{
g_tlsdata = new TLSData;
}
return g_tlsdata;
}
#else
#ifdef WINCE
# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
#endif
static DWORD tlsKey = TLS_OUT_OF_INDEXES;
void deleteThreadData()
{
if( tlsKey != TLS_OUT_OF_INDEXES )
delete (TLSData*)TlsGetValue( tlsKey );
}
TLSData* TLSData::get()
{
if( tlsKey == TLS_OUT_OF_INDEXES )
{
tlsRNGKey = TlsAlloc();
CV_Assert(tlsRNGKey != TLS_OUT_OF_INDEXES);
}
TLSData* d = (TLSData*)TlsGetValue( tlsKey );
if( !d )
{
d = new TLSData;
TlsSetValue( tlsRNGKey, d );
}
return d;
}
#endif //HAVE_WINRT
#else
static pthread_key_t tlsKey = 0;
static pthread_once_t tlsKeyOnce = PTHREAD_ONCE_INIT;
static void deleteTLSData(void* data)
{
delete (TLSData*)data;
}
static void makeKey()
{
int errcode = pthread_key_create(&tlsKey, deleteTLSData);
CV_Assert(errcode == 0);
}
TLSData* TLSData::get()
{
pthread_once(&tlsKeyOnce, makeKey);
TLSData* d = (TLSData*)pthread_getspecific(tlsKey);
if( !d )
{
d = new TLSData;
pthread_setspecific(tlsKey, d);
}
return d;
}
#endif
}
/* End of file. */

View File

@ -9,7 +9,7 @@ using std::tr1::get;
typedef tr1::tuple<Size, MatType> Size_Source_t;
typedef TestBaseWithParam<Size_Source_t> Size_Source;
typedef TestBaseWithParam<Size> MatSize;
typedef TestBaseWithParam<Size> TestMatSize;
static const float rangeHight = 256.0f;
static const float rangeLow = 0.0f;
@ -99,7 +99,7 @@ PERF_TEST_P(Size_Source, calcHist3d,
SANITY_CHECK(hist);
}
PERF_TEST_P(MatSize, equalizeHist,
PERF_TEST_P(TestMatSize, equalizeHist,
testing::Values(TYPICAL_MAT_SIZES)
)
{

View File

@ -102,7 +102,8 @@ float
CvEM::predict( const CvMat* _sample, CvMat* _probs ) const
{
Mat prbs0 = cvarrToMat(_probs), prbs = prbs0, sample = cvarrToMat(_sample);
int cls = static_cast<int>(emObj.predict(sample, _probs ? _OutputArray(prbs) : cv::noArray())[1]);
int cls = static_cast<int>(emObj.predict(sample, _probs ? _OutputArray(prbs) :
(OutputArray)cv::noArray())[1]);
if(_probs)
{
if( prbs.data != prbs0.data )
@ -208,13 +209,16 @@ bool CvEM::train( const Mat& _samples, const Mat& _sample_idx,
bool isOk = false;
if( _params.start_step == EM::START_AUTO_STEP )
isOk = emObj.train(_samples,
logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
logLikelihoods, _labels ? _OutputArray(*_labels) :
(OutputArray)cv::noArray(), probs);
else if( _params.start_step == EM::START_E_STEP )
isOk = emObj.trainE(_samples, means, covshdrs, weights,
logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
logLikelihoods, _labels ? _OutputArray(*_labels) :
(OutputArray)cv::noArray(), probs);
else if( _params.start_step == EM::START_M_STEP )
isOk = emObj.trainM(_samples, prbs,
logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
logLikelihoods, _labels ? _OutputArray(*_labels) :
(OutputArray)cv::noArray(), probs);
else
CV_Error(CV_StsBadArg, "Bad start type of EM algorithm");
@ -230,7 +234,9 @@ bool CvEM::train( const Mat& _samples, const Mat& _sample_idx,
float
CvEM::predict( const Mat& _sample, Mat* _probs ) const
{
return static_cast<float>(emObj.predict(_sample, _probs ? _OutputArray(*_probs) : cv::noArray())[1]);
return static_cast<float>(emObj.predict(_sample, _probs ?
_OutputArray(*_probs) :
(OutputArray)cv::noArray())[1]);
}
int CvEM::getNClusters() const

View File

@ -82,7 +82,7 @@ cvExtractSURF( const CvArr* _img, const CvArr* _mask,
surf->set("upright", params.upright != 0);
surf->set("extended", params.extended != 0);
surf->operator()(img, mask, kpt, _descriptors ? _OutputArray(descr) : noArray(),
surf->operator()(img, mask, kpt, _descriptors ? _OutputArray(descr) : (OutputArray)noArray(),
useProvidedKeyPts != 0);
if( _keypoints )

View File

@ -154,30 +154,24 @@ void cv::ocl::oclMat::upload(const Mat &m)
cv::ocl::oclMat::operator cv::_InputArray()
{
_InputArray newInputArray;
newInputArray.flags = cv::_InputArray::OCL_MAT;
newInputArray.obj = reinterpret_cast<void *>(this);
return newInputArray;
return _InputArray(cv::_InputArray::OCL_MAT, this);
}
cv::ocl::oclMat::operator cv::_OutputArray()
{
_OutputArray newOutputArray;
newOutputArray.flags = cv::_InputArray::OCL_MAT;
newOutputArray.obj = reinterpret_cast<void *>(this);
return newOutputArray;
return _OutputArray(cv::_InputArray::OCL_MAT, this);
}
cv::ocl::oclMat& cv::ocl::getOclMatRef(InputArray src)
{
CV_Assert(src.flags & cv::_InputArray::OCL_MAT);
return *reinterpret_cast<oclMat*>(src.obj);
CV_Assert(src.kind() == cv::_InputArray::OCL_MAT);
return *(oclMat*)src.getObj();
}
cv::ocl::oclMat& cv::ocl::getOclMatRef(OutputArray src)
{
CV_Assert(src.flags & cv::_InputArray::OCL_MAT);
return *reinterpret_cast<oclMat*>(src.obj);
CV_Assert(src.kind() == cv::_InputArray::OCL_MAT);
return *(oclMat*)src.getObj();
}
void cv::ocl::oclMat::download(cv::Mat &m) const

View File

@ -163,7 +163,9 @@ namespace
void Farneback::impl(const Mat& input0, const Mat& input1, OutputArray dst)
{
calcOpticalFlowFarneback(input0, input1, dst, pyrScale_, numLevels_, winSize_, numIters_, polyN_, polySigma_, flags_);
calcOpticalFlowFarneback(input0, input1, (InputOutputArray)dst, pyrScale_,
numLevels_, winSize_, numIters_,
polyN_, polySigma_, flags_);
}
}
@ -325,7 +327,7 @@ namespace
alg_->set("iterations", iterations_);
alg_->set("useInitialFlow", useInitialFlow_);
alg_->calc(input0, input1, dst);
alg_->calc(input0, input1, (InputOutputArray)dst);
}
void DualTVL1::collectGarbage()

View File

@ -352,7 +352,7 @@ cvCalcOpticalFlowPyrLK( const void* arrA, const void* arrB,
if( error )
err = cv::Mat(count, 1, CV_32F, (void*)error);
cv::calcOpticalFlowPyrLK( A, B, ptA, ptB, st,
error ? cv::_OutputArray(err) : cv::noArray(),
error ? cv::_OutputArray(err) : (cv::_OutputArray)cv::noArray(),
winSize, level, criteria, flags);
}

View File

@ -564,7 +564,7 @@ FarnebackUpdateFlow_GaussianBlur( const Mat& _R0, const Mat& _R1,
}
void cv::calcOpticalFlowFarneback( InputArray _prev0, InputArray _next0,
OutputArray _flow0, double pyr_scale, int levels, int winsize,
InputOutputArray _flow0, double pyr_scale, int levels, int winsize,
int iterations, int poly_n, double poly_sigma, int flags )
{
Mat prev0 = _prev0.getMat(), next0 = _next0.getMat();