rename CudaMem -> HostMem to better reflect its purpose

This commit is contained in:
Vladislav Vinogradov 2014-12-19 13:46:28 +03:00
parent 9210d8e542
commit 53862687d5
20 changed files with 173 additions and 155 deletions

View File

@ -705,7 +705,7 @@ namespace ogl
namespace cuda
{
class CV_EXPORTS GpuMat;
class CV_EXPORTS CudaMem;
class CV_EXPORTS HostMem;
class CV_EXPORTS Stream;
class CV_EXPORTS Event;
}

View File

@ -56,7 +56,9 @@ namespace cv { namespace cuda {
//! @addtogroup cuda_struct
//! @{
//////////////////////////////// GpuMat ///////////////////////////////
//===================================================================================
// GpuMat
//===================================================================================
/** @brief Base storage class for GPU memory with reference counting.
@ -318,7 +320,9 @@ CV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr
CV_EXPORTS void setBufferPoolUsage(bool on);
CV_EXPORTS void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCount);
//////////////////////////////// CudaMem ////////////////////////////////
//===================================================================================
// HostMem
//===================================================================================
/** @brief Class with reference counting wrapping special memory type allocation functions from CUDA.
@ -335,43 +339,43 @@ Its interface is also Mat-like but with additional memory type parameters.
@note Allocation size of such memory types is usually limited. For more details, see *CUDA 2.2
Pinned Memory APIs* document or *CUDA C Programming Guide*.
*/
class CV_EXPORTS CudaMem
class CV_EXPORTS HostMem
{
public:
enum AllocType { PAGE_LOCKED = 1, SHARED = 2, WRITE_COMBINED = 4 };
explicit CudaMem(AllocType alloc_type = PAGE_LOCKED);
explicit HostMem(AllocType alloc_type = PAGE_LOCKED);
CudaMem(const CudaMem& m);
HostMem(const HostMem& m);
CudaMem(int rows, int cols, int type, AllocType alloc_type = PAGE_LOCKED);
CudaMem(Size size, int type, AllocType alloc_type = PAGE_LOCKED);
HostMem(int rows, int cols, int type, AllocType alloc_type = PAGE_LOCKED);
HostMem(Size size, int type, AllocType alloc_type = PAGE_LOCKED);
//! creates from host memory with coping data
explicit CudaMem(InputArray arr, AllocType alloc_type = PAGE_LOCKED);
explicit HostMem(InputArray arr, AllocType alloc_type = PAGE_LOCKED);
~CudaMem();
~HostMem();
CudaMem& operator =(const CudaMem& m);
HostMem& operator =(const HostMem& m);
//! swaps with other smart pointer
void swap(CudaMem& b);
void swap(HostMem& b);
//! returns deep copy of the matrix, i.e. the data is copied
CudaMem clone() const;
HostMem clone() const;
//! allocates new matrix data unless the matrix already has specified size and type.
void create(int rows, int cols, int type);
void create(Size size, int type);
//! creates alternative CudaMem header for the same data, with different
//! creates alternative HostMem header for the same data, with different
//! number of channels and/or different number of rows
CudaMem reshape(int cn, int rows = 0) const;
HostMem reshape(int cn, int rows = 0) const;
//! decrements reference counter and released memory if needed.
void release();
//! returns matrix header with disabled reference counting for CudaMem data.
//! returns matrix header with disabled reference counting for HostMem data.
Mat createMatHeader() const;
/** @brief Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting
@ -420,7 +424,9 @@ CV_EXPORTS void registerPageLocked(Mat& m);
*/
CV_EXPORTS void unregisterPageLocked(Mat& m);
///////////////////////////////// Stream //////////////////////////////////
//===================================================================================
// Stream
//===================================================================================
/** @brief This class encapsulates a queue of asynchronous calls.
@ -514,7 +520,9 @@ private:
//! @} cuda_struct
//////////////////////////////// Initialization & Info ////////////////////////
//===================================================================================
// Initialization & Info
//===================================================================================
//! @addtogroup cuda_init
//! @{

View File

@ -50,7 +50,9 @@
namespace cv { namespace cuda {
//////////////////////////////// GpuMat ///////////////////////////////
//===================================================================================
// GpuMat
//===================================================================================
inline
GpuMat::GpuMat(Allocator* allocator_)
@ -374,16 +376,18 @@ void swap(GpuMat& a, GpuMat& b)
a.swap(b);
}
//////////////////////////////// CudaMem ////////////////////////////////
//===================================================================================
// HostMem
//===================================================================================
inline
CudaMem::CudaMem(AllocType alloc_type_)
HostMem::HostMem(AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
}
inline
CudaMem::CudaMem(const CudaMem& m)
HostMem::HostMem(const HostMem& m)
: flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type)
{
if( refcount )
@ -391,7 +395,7 @@ CudaMem::CudaMem(const CudaMem& m)
}
inline
CudaMem::CudaMem(int rows_, int cols_, int type_, AllocType alloc_type_)
HostMem::HostMem(int rows_, int cols_, int type_, AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
if (rows_ > 0 && cols_ > 0)
@ -399,7 +403,7 @@ CudaMem::CudaMem(int rows_, int cols_, int type_, AllocType alloc_type_)
}
inline
CudaMem::CudaMem(Size size_, int type_, AllocType alloc_type_)
HostMem::HostMem(Size size_, int type_, AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
if (size_.height > 0 && size_.width > 0)
@ -407,24 +411,24 @@ CudaMem::CudaMem(Size size_, int type_, AllocType alloc_type_)
}
inline
CudaMem::CudaMem(InputArray arr, AllocType alloc_type_)
HostMem::HostMem(InputArray arr, AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
arr.getMat().copyTo(*this);
}
inline
CudaMem::~CudaMem()
HostMem::~HostMem()
{
release();
}
inline
CudaMem& CudaMem::operator =(const CudaMem& m)
HostMem& HostMem::operator =(const HostMem& m)
{
if (this != &m)
{
CudaMem temp(m);
HostMem temp(m);
swap(temp);
}
@ -432,7 +436,7 @@ CudaMem& CudaMem::operator =(const CudaMem& m)
}
inline
void CudaMem::swap(CudaMem& b)
void HostMem::swap(HostMem& b)
{
std::swap(flags, b.flags);
std::swap(rows, b.rows);
@ -446,86 +450,88 @@ void CudaMem::swap(CudaMem& b)
}
inline
CudaMem CudaMem::clone() const
HostMem HostMem::clone() const
{
CudaMem m(size(), type(), alloc_type);
HostMem m(size(), type(), alloc_type);
createMatHeader().copyTo(m);
return m;
}
inline
void CudaMem::create(Size size_, int type_)
void HostMem::create(Size size_, int type_)
{
create(size_.height, size_.width, type_);
}
inline
Mat CudaMem::createMatHeader() const
Mat HostMem::createMatHeader() const
{
return Mat(size(), type(), data, step);
}
inline
bool CudaMem::isContinuous() const
bool HostMem::isContinuous() const
{
return (flags & Mat::CONTINUOUS_FLAG) != 0;
}
inline
size_t CudaMem::elemSize() const
size_t HostMem::elemSize() const
{
return CV_ELEM_SIZE(flags);
}
inline
size_t CudaMem::elemSize1() const
size_t HostMem::elemSize1() const
{
return CV_ELEM_SIZE1(flags);
}
inline
int CudaMem::type() const
int HostMem::type() const
{
return CV_MAT_TYPE(flags);
}
inline
int CudaMem::depth() const
int HostMem::depth() const
{
return CV_MAT_DEPTH(flags);
}
inline
int CudaMem::channels() const
int HostMem::channels() const
{
return CV_MAT_CN(flags);
}
inline
size_t CudaMem::step1() const
size_t HostMem::step1() const
{
return step / elemSize1();
}
inline
Size CudaMem::size() const
Size HostMem::size() const
{
return Size(cols, rows);
}
inline
bool CudaMem::empty() const
bool HostMem::empty() const
{
return data == 0;
}
static inline
void swap(CudaMem& a, CudaMem& b)
void swap(HostMem& a, HostMem& b)
{
a.swap(b);
}
//////////////////////////////// Stream ///////////////////////////////
//===================================================================================
// Stream
//===================================================================================
inline
Stream::Stream(const Ptr<Impl>& impl)
@ -533,7 +539,9 @@ Stream::Stream(const Ptr<Impl>& impl)
{
}
//////////////////////////////// Initialization & Info ////////////////////////
//===================================================================================
// Initialization & Info
//===================================================================================
inline
bool TargetArchs::has(int major, int minor)
@ -592,7 +600,9 @@ bool DeviceInfo::supports(FeatureSet feature_set) const
}} // namespace cv { namespace cuda {
//////////////////////////////// Mat ////////////////////////////////
//===================================================================================
// Mat
//===================================================================================
namespace cv {

View File

@ -160,8 +160,8 @@ public:
STD_VECTOR_MAT = 5 << KIND_SHIFT,
EXPR = 6 << KIND_SHIFT,
OPENGL_BUFFER = 7 << KIND_SHIFT,
CUDA_MEM = 8 << KIND_SHIFT,
GPU_MAT = 9 << KIND_SHIFT,
CUDA_HOST_MEM = 8 << KIND_SHIFT,
CUDA_GPU_MAT = 9 << KIND_SHIFT,
UMAT =10 << KIND_SHIFT,
STD_VECTOR_UMAT =11 << KIND_SHIFT
};
@ -180,7 +180,7 @@ public:
_InputArray(const double& val);
_InputArray(const cuda::GpuMat& d_mat);
_InputArray(const ogl::Buffer& buf);
_InputArray(const cuda::CudaMem& cuda_mem);
_InputArray(const cuda::HostMem& cuda_mem);
template<typename _Tp> _InputArray(const cudev::GpuMat_<_Tp>& m);
_InputArray(const UMat& um);
_InputArray(const std::vector<UMat>& umv);
@ -277,7 +277,7 @@ public:
_OutputArray(std::vector<Mat>& vec);
_OutputArray(cuda::GpuMat& d_mat);
_OutputArray(ogl::Buffer& buf);
_OutputArray(cuda::CudaMem& cuda_mem);
_OutputArray(cuda::HostMem& cuda_mem);
template<typename _Tp> _OutputArray(cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _OutputArray(std::vector<_Tp>& vec);
template<typename _Tp> _OutputArray(std::vector<std::vector<_Tp> >& vec);
@ -292,7 +292,7 @@ public:
_OutputArray(const std::vector<Mat>& vec);
_OutputArray(const cuda::GpuMat& d_mat);
_OutputArray(const ogl::Buffer& buf);
_OutputArray(const cuda::CudaMem& cuda_mem);
_OutputArray(const cuda::HostMem& cuda_mem);
template<typename _Tp> _OutputArray(const cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _OutputArray(const std::vector<_Tp>& vec);
template<typename _Tp> _OutputArray(const std::vector<std::vector<_Tp> >& vec);
@ -310,7 +310,7 @@ public:
virtual UMat& getUMatRef(int i=-1) const;
virtual cuda::GpuMat& getGpuMatRef() const;
virtual ogl::Buffer& getOGlBufferRef() const;
virtual cuda::CudaMem& getCudaMemRef() const;
virtual cuda::HostMem& getHostMemRef() const;
virtual void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
@ -333,7 +333,7 @@ public:
_InputOutputArray(std::vector<Mat>& vec);
_InputOutputArray(cuda::GpuMat& d_mat);
_InputOutputArray(ogl::Buffer& buf);
_InputOutputArray(cuda::CudaMem& cuda_mem);
_InputOutputArray(cuda::HostMem& cuda_mem);
template<typename _Tp> _InputOutputArray(cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _InputOutputArray(std::vector<_Tp>& vec);
template<typename _Tp> _InputOutputArray(std::vector<std::vector<_Tp> >& vec);
@ -348,7 +348,7 @@ public:
_InputOutputArray(const std::vector<Mat>& vec);
_InputOutputArray(const cuda::GpuMat& d_mat);
_InputOutputArray(const ogl::Buffer& buf);
_InputOutputArray(const cuda::CudaMem& cuda_mem);
_InputOutputArray(const cuda::HostMem& cuda_mem);
template<typename _Tp> _InputOutputArray(const cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _InputOutputArray(const std::vector<_Tp>& vec);
template<typename _Tp> _InputOutputArray(const std::vector<std::vector<_Tp> >& vec);

View File

@ -100,13 +100,13 @@ inline _InputArray::_InputArray(const MatExpr& expr)
{ init(FIXED_TYPE + FIXED_SIZE + EXPR + ACCESS_READ, &expr); }
inline _InputArray::_InputArray(const cuda::GpuMat& d_mat)
{ init(GPU_MAT + ACCESS_READ, &d_mat); }
{ init(CUDA_GPU_MAT + ACCESS_READ, &d_mat); }
inline _InputArray::_InputArray(const ogl::Buffer& buf)
{ init(OPENGL_BUFFER + ACCESS_READ, &buf); }
inline _InputArray::_InputArray(const cuda::CudaMem& cuda_mem)
{ init(CUDA_MEM + ACCESS_READ, &cuda_mem); }
inline _InputArray::_InputArray(const cuda::HostMem& cuda_mem)
{ init(CUDA_HOST_MEM + ACCESS_READ, &cuda_mem); }
inline _InputArray::~_InputArray() {}
@ -174,13 +174,13 @@ _OutputArray::_OutputArray(const _Tp* vec, int n)
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_WRITE, vec, Size(n, 1)); }
inline _OutputArray::_OutputArray(cuda::GpuMat& d_mat)
{ init(GPU_MAT + ACCESS_WRITE, &d_mat); }
{ init(CUDA_GPU_MAT + ACCESS_WRITE, &d_mat); }
inline _OutputArray::_OutputArray(ogl::Buffer& buf)
{ init(OPENGL_BUFFER + ACCESS_WRITE, &buf); }
inline _OutputArray::_OutputArray(cuda::CudaMem& cuda_mem)
{ init(CUDA_MEM + ACCESS_WRITE, &cuda_mem); }
inline _OutputArray::_OutputArray(cuda::HostMem& cuda_mem)
{ init(CUDA_HOST_MEM + ACCESS_WRITE, &cuda_mem); }
inline _OutputArray::_OutputArray(const Mat& m)
{ init(FIXED_TYPE + FIXED_SIZE + MAT + ACCESS_WRITE, &m); }
@ -195,13 +195,13 @@ inline _OutputArray::_OutputArray(const std::vector<UMat>& vec)
{ init(FIXED_SIZE + STD_VECTOR_UMAT + ACCESS_WRITE, &vec); }
inline _OutputArray::_OutputArray(const cuda::GpuMat& d_mat)
{ init(FIXED_TYPE + FIXED_SIZE + GPU_MAT + ACCESS_WRITE, &d_mat); }
{ init(FIXED_TYPE + FIXED_SIZE + CUDA_GPU_MAT + ACCESS_WRITE, &d_mat); }
inline _OutputArray::_OutputArray(const ogl::Buffer& buf)
{ init(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER + ACCESS_WRITE, &buf); }
inline _OutputArray::_OutputArray(const cuda::CudaMem& cuda_mem)
{ init(FIXED_TYPE + FIXED_SIZE + CUDA_MEM + ACCESS_WRITE, &cuda_mem); }
inline _OutputArray::_OutputArray(const cuda::HostMem& cuda_mem)
{ init(FIXED_TYPE + FIXED_SIZE + CUDA_HOST_MEM + ACCESS_WRITE, &cuda_mem); }
///////////////////////////////////////////////////////////////////////////////////////////
@ -261,13 +261,13 @@ _InputOutputArray::_InputOutputArray(const _Tp* vec, int n)
{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_RW, vec, Size(n, 1)); }
inline _InputOutputArray::_InputOutputArray(cuda::GpuMat& d_mat)
{ init(GPU_MAT + ACCESS_RW, &d_mat); }
{ init(CUDA_GPU_MAT + ACCESS_RW, &d_mat); }
inline _InputOutputArray::_InputOutputArray(ogl::Buffer& buf)
{ init(OPENGL_BUFFER + ACCESS_RW, &buf); }
inline _InputOutputArray::_InputOutputArray(cuda::CudaMem& cuda_mem)
{ init(CUDA_MEM + ACCESS_RW, &cuda_mem); }
inline _InputOutputArray::_InputOutputArray(cuda::HostMem& cuda_mem)
{ init(CUDA_HOST_MEM + ACCESS_RW, &cuda_mem); }
inline _InputOutputArray::_InputOutputArray(const Mat& m)
{ init(FIXED_TYPE + FIXED_SIZE + MAT + ACCESS_RW, &m); }
@ -282,13 +282,13 @@ inline _InputOutputArray::_InputOutputArray(const std::vector<UMat>& vec)
{ init(FIXED_SIZE + STD_VECTOR_UMAT + ACCESS_RW, &vec); }
inline _InputOutputArray::_InputOutputArray(const cuda::GpuMat& d_mat)
{ init(FIXED_TYPE + FIXED_SIZE + GPU_MAT + ACCESS_RW, &d_mat); }
{ init(FIXED_TYPE + FIXED_SIZE + CUDA_GPU_MAT + ACCESS_RW, &d_mat); }
inline _InputOutputArray::_InputOutputArray(const ogl::Buffer& buf)
{ init(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER + ACCESS_RW, &buf); }
inline _InputOutputArray::_InputOutputArray(const cuda::CudaMem& cuda_mem)
{ init(FIXED_TYPE + FIXED_SIZE + CUDA_MEM + ACCESS_RW, &cuda_mem); }
inline _InputOutputArray::_InputOutputArray(const cuda::HostMem& cuda_mem)
{ init(FIXED_TYPE + FIXED_SIZE + CUDA_HOST_MEM + ACCESS_RW, &cuda_mem); }
//////////////////////////////////////////// Mat //////////////////////////////////////////

View File

@ -275,12 +275,12 @@ void cv::cuda::createContinuous(int rows, int cols, int type, OutputArray arr)
::createContinuousImpl(rows, cols, type, arr.getMatRef());
break;
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
::createContinuousImpl(rows, cols, type, arr.getGpuMatRef());
break;
case _InputArray::CUDA_MEM:
::createContinuousImpl(rows, cols, type, arr.getCudaMemRef());
case _InputArray::CUDA_HOST_MEM:
::createContinuousImpl(rows, cols, type, arr.getHostMemRef());
break;
default:
@ -329,12 +329,12 @@ void cv::cuda::ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
::ensureSizeIsEnoughImpl(rows, cols, type, arr.getMatRef());
break;
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
::ensureSizeIsEnoughImpl(rows, cols, type, arr.getGpuMatRef());
break;
case _InputArray::CUDA_MEM:
::ensureSizeIsEnoughImpl(rows, cols, type, arr.getCudaMemRef());
case _InputArray::CUDA_HOST_MEM:
::ensureSizeIsEnoughImpl(rows, cols, type, arr.getHostMemRef());
break;
default:

View File

@ -59,7 +59,7 @@ namespace
}
#endif
void cv::cuda::CudaMem::create(int rows_, int cols_, int type_)
void cv::cuda::HostMem::create(int rows_, int cols_, int type_)
{
#ifndef HAVE_CUDA
(void) rows_;
@ -123,9 +123,9 @@ void cv::cuda::CudaMem::create(int rows_, int cols_, int type_)
#endif
}
CudaMem cv::cuda::CudaMem::reshape(int new_cn, int new_rows) const
HostMem cv::cuda::HostMem::reshape(int new_cn, int new_rows) const
{
CudaMem hdr = *this;
HostMem hdr = *this;
int cn = channels();
if (new_cn == 0)
@ -166,7 +166,7 @@ CudaMem cv::cuda::CudaMem::reshape(int new_cn, int new_rows) const
return hdr;
}
void cv::cuda::CudaMem::release()
void cv::cuda::HostMem::release()
{
#ifdef HAVE_CUDA
if (refcount && CV_XADD(refcount, -1) == 1)
@ -181,7 +181,7 @@ void cv::cuda::CudaMem::release()
#endif
}
GpuMat cv::cuda::CudaMem::createGpuMatHeader() const
GpuMat cv::cuda::HostMem::createGpuMatHeader() const
{
#ifndef HAVE_CUDA
throw_no_cuda();

View File

@ -1187,18 +1187,18 @@ Mat _InputArray::getMat(int i) const
return Mat();
}
if( k == GPU_MAT )
if( k == CUDA_GPU_MAT )
{
CV_Assert( i < 0 );
CV_Error(cv::Error::StsNotImplemented, "You should explicitly call download method for cuda::GpuMat object");
return Mat();
}
if( k == CUDA_MEM )
if( k == CUDA_HOST_MEM )
{
CV_Assert( i < 0 );
const cuda::CudaMem* cuda_mem = (const cuda::CudaMem*)obj;
const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
return cuda_mem->createMatHeader();
}
@ -1391,15 +1391,15 @@ cuda::GpuMat _InputArray::getGpuMat() const
{
int k = kind();
if (k == GPU_MAT)
if (k == CUDA_GPU_MAT)
{
const cuda::GpuMat* d_mat = (const cuda::GpuMat*)obj;
return *d_mat;
}
if (k == CUDA_MEM)
if (k == CUDA_HOST_MEM)
{
const cuda::CudaMem* cuda_mem = (const cuda::CudaMem*)obj;
const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
return cuda_mem->createGpuMatHeader();
}
@ -1412,7 +1412,7 @@ cuda::GpuMat _InputArray::getGpuMat() const
if (k == NONE)
return cuda::GpuMat();
CV_Error(cv::Error::StsNotImplemented, "getGpuMat is available only for cuda::GpuMat and cuda::CudaMem");
CV_Error(cv::Error::StsNotImplemented, "getGpuMat is available only for cuda::GpuMat and cuda::HostMem");
return cuda::GpuMat();
}
@ -1520,18 +1520,18 @@ Size _InputArray::size(int i) const
return buf->size();
}
if( k == GPU_MAT )
if( k == CUDA_GPU_MAT )
{
CV_Assert( i < 0 );
const cuda::GpuMat* d_mat = (const cuda::GpuMat*)obj;
return d_mat->size();
}
CV_Assert( k == CUDA_MEM );
//if( k == CUDA_MEM )
CV_Assert( k == CUDA_HOST_MEM );
//if( k == CUDA_HOST_MEM )
{
CV_Assert( i < 0 );
const cuda::CudaMem* cuda_mem = (const cuda::CudaMem*)obj;
const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
return cuda_mem->size();
}
}
@ -1700,14 +1700,14 @@ int _InputArray::dims(int i) const
return 2;
}
if( k == GPU_MAT )
if( k == CUDA_GPU_MAT )
{
CV_Assert( i < 0 );
return 2;
}
CV_Assert( k == CUDA_MEM );
//if( k == CUDA_MEM )
CV_Assert( k == CUDA_HOST_MEM );
//if( k == CUDA_HOST_MEM )
{
CV_Assert( i < 0 );
return 2;
@ -1799,12 +1799,12 @@ int _InputArray::type(int i) const
if( k == OPENGL_BUFFER )
return ((const ogl::Buffer*)obj)->type();
if( k == GPU_MAT )
if( k == CUDA_GPU_MAT )
return ((const cuda::GpuMat*)obj)->type();
CV_Assert( k == CUDA_MEM );
//if( k == CUDA_MEM )
return ((const cuda::CudaMem*)obj)->type();
CV_Assert( k == CUDA_HOST_MEM );
//if( k == CUDA_HOST_MEM )
return ((const cuda::HostMem*)obj)->type();
}
int _InputArray::depth(int i) const
@ -1863,12 +1863,12 @@ bool _InputArray::empty() const
if( k == OPENGL_BUFFER )
return ((const ogl::Buffer*)obj)->empty();
if( k == GPU_MAT )
if( k == CUDA_GPU_MAT )
return ((const cuda::GpuMat*)obj)->empty();
CV_Assert( k == CUDA_MEM );
//if( k == CUDA_MEM )
return ((const cuda::CudaMem*)obj)->empty();
CV_Assert( k == CUDA_HOST_MEM );
//if( k == CUDA_HOST_MEM )
return ((const cuda::HostMem*)obj)->empty();
}
bool _InputArray::isContinuous(int i) const
@ -1970,7 +1970,7 @@ size_t _InputArray::offset(int i) const
return vv[i].offset;
}
if( k == GPU_MAT )
if( k == CUDA_GPU_MAT )
{
CV_Assert( i < 0 );
const cuda::GpuMat * const m = ((const cuda::GpuMat*)obj);
@ -2016,7 +2016,7 @@ size_t _InputArray::step(int i) const
return vv[i].step;
}
if( k == GPU_MAT )
if( k == CUDA_GPU_MAT )
{
CV_Assert( i < 0 );
return ((const cuda::GpuMat*)obj)->step;
@ -2095,7 +2095,7 @@ void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int
((UMat*)obj)->create(_sz, mtype);
return;
}
if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
if( k == CUDA_GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == _sz);
CV_Assert(!fixedType() || ((cuda::GpuMat*)obj)->type() == mtype);
@ -2109,11 +2109,11 @@ void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int
((ogl::Buffer*)obj)->create(_sz, mtype);
return;
}
if( k == CUDA_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
if( k == CUDA_HOST_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((cuda::CudaMem*)obj)->size() == _sz);
CV_Assert(!fixedType() || ((cuda::CudaMem*)obj)->type() == mtype);
((cuda::CudaMem*)obj)->create(_sz, mtype);
CV_Assert(!fixedSize() || ((cuda::HostMem*)obj)->size() == _sz);
CV_Assert(!fixedType() || ((cuda::HostMem*)obj)->type() == mtype);
((cuda::HostMem*)obj)->create(_sz, mtype);
return;
}
int sizes[] = {_sz.height, _sz.width};
@ -2137,7 +2137,7 @@ void _OutputArray::create(int _rows, int _cols, int mtype, int i, bool allowTran
((UMat*)obj)->create(_rows, _cols, mtype);
return;
}
if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
if( k == CUDA_GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == Size(_cols, _rows));
CV_Assert(!fixedType() || ((cuda::GpuMat*)obj)->type() == mtype);
@ -2151,11 +2151,11 @@ void _OutputArray::create(int _rows, int _cols, int mtype, int i, bool allowTran
((ogl::Buffer*)obj)->create(_rows, _cols, mtype);
return;
}
if( k == CUDA_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
if( k == CUDA_HOST_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((cuda::CudaMem*)obj)->size() == Size(_cols, _rows));
CV_Assert(!fixedType() || ((cuda::CudaMem*)obj)->type() == mtype);
((cuda::CudaMem*)obj)->create(_rows, _cols, mtype);
CV_Assert(!fixedSize() || ((cuda::HostMem*)obj)->size() == Size(_cols, _rows));
CV_Assert(!fixedType() || ((cuda::HostMem*)obj)->type() == mtype);
((cuda::HostMem*)obj)->create(_rows, _cols, mtype);
return;
}
int sizes[] = {_rows, _cols};
@ -2479,15 +2479,15 @@ void _OutputArray::release() const
return;
}
if( k == GPU_MAT )
if( k == CUDA_GPU_MAT )
{
((cuda::GpuMat*)obj)->release();
return;
}
if( k == CUDA_MEM )
if( k == CUDA_HOST_MEM )
{
((cuda::CudaMem*)obj)->release();
((cuda::HostMem*)obj)->release();
return;
}
@ -2583,7 +2583,7 @@ UMat& _OutputArray::getUMatRef(int i) const
cuda::GpuMat& _OutputArray::getGpuMatRef() const
{
int k = kind();
CV_Assert( k == GPU_MAT );
CV_Assert( k == CUDA_GPU_MAT );
return *(cuda::GpuMat*)obj;
}
@ -2594,11 +2594,11 @@ ogl::Buffer& _OutputArray::getOGlBufferRef() const
return *(ogl::Buffer*)obj;
}
cuda::CudaMem& _OutputArray::getCudaMemRef() const
cuda::HostMem& _OutputArray::getHostMemRef() const
{
int k = kind();
CV_Assert( k == CUDA_MEM );
return *(cuda::CudaMem*)obj;
CV_Assert( k == CUDA_HOST_MEM );
return *(cuda::HostMem*)obj;
}
void _OutputArray::setTo(const _InputArray& arr, const _InputArray & mask) const
@ -2614,10 +2614,10 @@ void _OutputArray::setTo(const _InputArray& arr, const _InputArray & mask) const
}
else if( k == UMAT )
((UMat*)obj)->setTo(arr, mask);
else if( k == GPU_MAT )
else if( k == CUDA_GPU_MAT )
{
Mat value = arr.getMat();
CV_Assert( checkScalar(value, type(), arr.kind(), _InputArray::GPU_MAT) );
CV_Assert( checkScalar(value, type(), arr.kind(), _InputArray::CUDA_GPU_MAT) );
((cuda::GpuMat*)obj)->setTo(Scalar(Vec<double, 4>(value.ptr<double>())), mask);
}
else

View File

@ -509,7 +509,7 @@ cv::ogl::Buffer::Buffer(InputArray arr, Target target, bool autoRelease) : rows_
switch (kind)
{
case _InputArray::OPENGL_BUFFER:
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
copyFrom(arr, target, autoRelease);
break;
@ -594,7 +594,7 @@ void cv::ogl::Buffer::copyFrom(InputArray arr, Target target, bool autoRelease)
break;
}
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -657,7 +657,7 @@ void cv::ogl::Buffer::copyTo(OutputArray arr) const
break;
}
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -1018,7 +1018,7 @@ cv::ogl::Texture2D::Texture2D(InputArray arr, bool autoRelease) : rows_(0), cols
break;
}
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -1132,7 +1132,7 @@ void cv::ogl::Texture2D::copyFrom(InputArray arr, bool autoRelease)
break;
}
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -1184,7 +1184,7 @@ void cv::ogl::Texture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) c
break;
}
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
{
#ifndef HAVE_CUDA
throw_no_cuda();

View File

@ -52,10 +52,10 @@ using namespace cvtest;
struct Async : testing::TestWithParam<cv::cuda::DeviceInfo>
{
cv::cuda::CudaMem src;
cv::cuda::HostMem src;
cv::cuda::GpuMat d_src;
cv::cuda::CudaMem dst;
cv::cuda::HostMem dst;
cv::cuda::GpuMat d_dst;
virtual void SetUp()
@ -63,7 +63,7 @@ struct Async : testing::TestWithParam<cv::cuda::DeviceInfo>
cv::cuda::DeviceInfo devInfo = GetParam();
cv::cuda::setDevice(devInfo.deviceID());
src = cv::cuda::CudaMem(cv::cuda::CudaMem::PAGE_LOCKED);
src = cv::cuda::HostMem(cv::cuda::HostMem::PAGE_LOCKED);
cv::Mat m = randomMat(cv::Size(128, 128), CV_8UC1);
m.copyTo(src);
@ -76,8 +76,8 @@ void checkMemSet(int status, void* userData)
Async* test = reinterpret_cast<Async*>(userData);
cv::cuda::CudaMem src = test->src;
cv::cuda::CudaMem dst = test->dst;
cv::cuda::HostMem src = test->src;
cv::cuda::HostMem dst = test->dst;
cv::Mat dst_gold = cv::Mat::zeros(src.size(), src.type());
@ -105,8 +105,8 @@ void checkConvert(int status, void* userData)
Async* test = reinterpret_cast<Async*>(userData);
cv::cuda::CudaMem src = test->src;
cv::cuda::CudaMem dst = test->dst;
cv::cuda::HostMem src = test->src;
cv::cuda::HostMem dst = test->dst;
cv::Mat dst_gold;
src.createMatHeader().convertTo(dst_gold, CV_32S);

View File

@ -74,7 +74,7 @@ namespace
LookUpTableImpl::LookUpTableImpl(InputArray _lut)
{
if (_lut.kind() == _InputArray::GPU_MAT)
if (_lut.kind() == _InputArray::CUDA_GPU_MAT)
{
d_lut = _lut.getGpuMat();
}

View File

@ -467,14 +467,14 @@ void cv::cuda::evenLevels(OutputArray _levels, int nLevels, int lowerLevel, int
_levels.create(1, nLevels, CV_32SC1);
Mat host_levels;
if (kind == _InputArray::GPU_MAT)
if (kind == _InputArray::CUDA_GPU_MAT)
host_levels.create(1, nLevels, CV_32SC1);
else
host_levels = _levels.getMat();
nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) );
if (kind == _InputArray::GPU_MAT)
if (kind == _InputArray::CUDA_GPU_MAT)
_levels.getGpuMatRef().upload(host_levels);
}

View File

@ -341,7 +341,7 @@ namespace cv {
template<typename _Tp>
__host__ _InputArray::_InputArray(const cudev::GpuMat_<_Tp>& m)
: flags(FIXED_TYPE + GPU_MAT + DataType<_Tp>::type), obj((void*)&m)
: flags(FIXED_TYPE + CUDA_GPU_MAT + DataType<_Tp>::type), obj((void*)&m)
{}
template<typename _Tp>

View File

@ -297,7 +297,7 @@ void cv::imshow( const String& winname, InputArray _img )
cv::ogl::Texture2D& tex = ownWndTexs[winname];
if (_img.kind() == _InputArray::GPU_MAT)
if (_img.kind() == _InputArray::CUDA_GPU_MAT)
{
cv::ogl::Buffer& buf = ownWndBufs[winname];
buf.copyFrom(_img);

View File

@ -514,7 +514,7 @@ namespace
++outPos_;
const GpuMat& curOutput = at(outPos_, outputs_);
if (_output.kind() == _InputArray::GPU_MAT)
if (_output.kind() == _InputArray::CUDA_GPU_MAT)
curOutput.convertTo(_output.getGpuMatRef(), CV_8U);
else
{

View File

@ -116,7 +116,7 @@ namespace
{
if (_frame.kind() == _InputArray::MAT)
vc_ >> _frame.getMatRef();
else if(_frame.kind() == _InputArray::GPU_MAT)
else if(_frame.kind() == _InputArray::CUDA_GPU_MAT)
{
vc_ >> frame_;
arrCopy(frame_, _frame);
@ -226,7 +226,7 @@ namespace
void VideoFrameSource_CUDA::nextFrame(OutputArray _frame)
{
if (_frame.kind() == _InputArray::GPU_MAT)
if (_frame.kind() == _InputArray::CUDA_GPU_MAT)
{
bool res = reader_->nextFrame(_frame.getGpuMatRef());
if (!res)

View File

@ -49,7 +49,7 @@ Mat cv::superres::arrGetMat(InputArray arr, Mat& buf)
{
switch (arr.kind())
{
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
arr.getGpuMat().download(buf);
return buf;
@ -66,7 +66,7 @@ UMat cv::superres::arrGetUMat(InputArray arr, UMat& buf)
{
switch (arr.kind())
{
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
arr.getGpuMat().download(buf);
return buf;
@ -83,7 +83,7 @@ GpuMat cv::superres::arrGetGpuMat(InputArray arr, GpuMat& buf)
{
switch (arr.kind())
{
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
return arr.getGpuMat();
case _InputArray::OPENGL_BUFFER:
@ -184,7 +184,7 @@ namespace
switch (src.kind())
{
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
#ifdef HAVE_OPENCV_CUDAIMGPROC
cuda::cvtColor(src.getGpuMat(), dst.getGpuMatRef(), code, cn);
#else
@ -218,7 +218,7 @@ namespace
switch (src.kind())
{
case _InputArray::GPU_MAT:
case _InputArray::CUDA_GPU_MAT:
src.getGpuMat().convertTo(dst.getGpuMatRef(), depth, scale);
break;

View File

@ -458,7 +458,7 @@ namespace
GpuMat input0 = convertToType(frame0, work_type_, buf_[2], buf_[3]);
GpuMat input1 = convertToType(frame1, work_type_, buf_[4], buf_[5]);
if (_flow2.needed() && _flow1.kind() == _InputArray::GPU_MAT && _flow2.kind() == _InputArray::GPU_MAT)
if (_flow2.needed() && _flow1.kind() == _InputArray::CUDA_GPU_MAT && _flow2.kind() == _InputArray::CUDA_GPU_MAT)
{
impl(input0, input1, _flow1.getGpuMatRef(), _flow2.getGpuMatRef());
return;

View File

@ -278,7 +278,7 @@ namespace cvtest
Mat getMat(InputArray arr)
{
if (arr.kind() == _InputArray::GPU_MAT)
if (arr.kind() == _InputArray::CUDA_GPU_MAT)
{
Mat m;
arr.getGpuMat().download(m);

View File

@ -278,7 +278,7 @@ public:
StereoMultiGpuStream();
~StereoMultiGpuStream();
void compute(const CudaMem& leftFrame, const CudaMem& rightFrame, CudaMem& disparity);
void compute(const HostMem& leftFrame, const HostMem& rightFrame, HostMem& disparity);
private:
GpuMat d_leftFrames[2];
@ -316,7 +316,7 @@ StereoMultiGpuStream::~StereoMultiGpuStream()
streams[1].release();
}
void StereoMultiGpuStream::compute(const CudaMem& leftFrame, const CudaMem& rightFrame, CudaMem& disparity)
void StereoMultiGpuStream::compute(const HostMem& leftFrame, const HostMem& rightFrame, HostMem& disparity)
{
disparity.create(leftFrame.size(), CV_8UC1);
@ -403,7 +403,7 @@ int main(int argc, char** argv)
cout << endl;
Mat leftFrame, rightFrame;
CudaMem leftGrayFrame, rightGrayFrame;
HostMem leftGrayFrame, rightGrayFrame;
StereoSingleGpu gpu0Alg(0);
StereoSingleGpu gpu1Alg(1);
@ -413,7 +413,7 @@ int main(int argc, char** argv)
Mat disparityGpu0;
Mat disparityGpu1;
Mat disparityMultiThread;
CudaMem disparityMultiStream;
HostMem disparityMultiStream;
Mat disparityGpu0Show;
Mat disparityGpu1Show;