fixed bugs in page locked memory allocation
avoid extra gpu memory allocation in BP and CSBP
This commit is contained in:
@@ -68,7 +68,7 @@ namespace cv
|
|||||||
|
|
||||||
//////////////////////////////// GpuMat ////////////////////////////////
|
//////////////////////////////// GpuMat ////////////////////////////////
|
||||||
class Stream;
|
class Stream;
|
||||||
class MatPL;
|
class CudaMem;
|
||||||
|
|
||||||
//! Smart pointer for GPU memory with reference counting. Its interface is mostly similar with cv::Mat.
|
//! Smart pointer for GPU memory with reference counting. Its interface is mostly similar with cv::Mat.
|
||||||
class CV_EXPORTS GpuMat
|
class CV_EXPORTS GpuMat
|
||||||
@@ -111,12 +111,16 @@ namespace cv
|
|||||||
|
|
||||||
//! pefroms blocking upload data to GpuMat. .
|
//! pefroms blocking upload data to GpuMat. .
|
||||||
void upload(const cv::Mat& m);
|
void upload(const cv::Mat& m);
|
||||||
void upload(const MatPL& m, Stream& stream);
|
|
||||||
|
|
||||||
//! Downloads data from device to host memory. Blocking calls.
|
//! upload async
|
||||||
|
void upload(const CudaMem& m, Stream& stream);
|
||||||
|
|
||||||
|
//! downloads data from device to host memory. Blocking calls.
|
||||||
operator Mat() const;
|
operator Mat() const;
|
||||||
void download(cv::Mat& m) const;
|
void download(cv::Mat& m) const;
|
||||||
void download(MatPL& m, Stream& stream) const;
|
|
||||||
|
//! download async
|
||||||
|
void download(CudaMem& m, Stream& stream) const;
|
||||||
|
|
||||||
//! returns a new GpuMatrix header for the specified row
|
//! returns a new GpuMatrix header for the specified row
|
||||||
GpuMat row(int y) const;
|
GpuMat row(int y) const;
|
||||||
@@ -223,52 +227,50 @@ namespace cv
|
|||||||
uchar* dataend;
|
uchar* dataend;
|
||||||
};
|
};
|
||||||
|
|
||||||
//////////////////////////////// MatPL ////////////////////////////////
|
//////////////////////////////// CudaMem ////////////////////////////////
|
||||||
// MatPL is limited cv::Mat with page locked memory allocation.
|
// CudaMem is limited cv::Mat with page locked memory allocation.
|
||||||
// Page locked memory is only needed for async and faster coping to GPU.
|
// Page locked memory is only needed for async and faster coping to GPU.
|
||||||
// It is convertable to cv::Mat header without reference counting
|
// It is convertable to cv::Mat header without reference counting
|
||||||
// so you can use it with other opencv functions.
|
// so you can use it with other opencv functions.
|
||||||
|
|
||||||
class CV_EXPORTS MatPL
|
class CV_EXPORTS CudaMem
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
enum { ALLOC_PAGE_LOCKED = 1, ALLOC_ZEROCOPY = 2, ALLOC_WRITE_COMBINED = 4 };
|
||||||
|
|
||||||
//Supported. Now behaviour is like ALLOC_DEFAULT.
|
CudaMem();
|
||||||
enum { ALLOC_PAGE_LOCKED = 0, ALLOC_ZEROCOPY = 1, ALLOC_WRITE_COMBINED = 4 };
|
CudaMem(const CudaMem& m);
|
||||||
|
|
||||||
MatPL();
|
CudaMem(int _rows, int _cols, int _type, int _alloc_type = ALLOC_PAGE_LOCKED);
|
||||||
MatPL(const MatPL& m);
|
CudaMem(Size _size, int _type, int _alloc_type = ALLOC_PAGE_LOCKED);
|
||||||
|
|
||||||
MatPL(int _rows, int _cols, int _type, int type_alloc = ALLOC_PAGE_LOCKED);
|
|
||||||
MatPL(Size _size, int _type, int type_alloc = ALLOC_PAGE_LOCKED);
|
|
||||||
|
|
||||||
|
|
||||||
//! creates from cv::Mat with coping data
|
//! creates from cv::Mat with coping data
|
||||||
explicit MatPL(const Mat& m, int type_alloc = ALLOC_PAGE_LOCKED);
|
explicit CudaMem(const Mat& m, int _alloc_type = ALLOC_PAGE_LOCKED);
|
||||||
|
|
||||||
~MatPL();
|
~CudaMem();
|
||||||
|
|
||||||
MatPL& operator = (const MatPL& m);
|
CudaMem& operator = (const CudaMem& m);
|
||||||
|
|
||||||
//! returns deep copy of the matrix, i.e. the data is copied
|
//! returns deep copy of the matrix, i.e. the data is copied
|
||||||
MatPL clone() const;
|
CudaMem clone() const;
|
||||||
|
|
||||||
//! allocates new matrix data unless the matrix already has specified size and type.
|
//! allocates new matrix data unless the matrix already has specified size and type.
|
||||||
void create(int _rows, int _cols, int _type, int type_alloc = ALLOC_PAGE_LOCKED);
|
void create(int _rows, int _cols, int _type, int _alloc_type = ALLOC_PAGE_LOCKED);
|
||||||
void create(Size _size, int _type, int type_alloc = ALLOC_PAGE_LOCKED);
|
void create(Size _size, int _type, int _alloc_type = ALLOC_PAGE_LOCKED);
|
||||||
|
|
||||||
//! decrements reference counter and released memory if needed.
|
//! decrements reference counter and released memory if needed.
|
||||||
void release();
|
void release();
|
||||||
|
|
||||||
//! returns matrix header with disabled reference counting for MatPL data.
|
//! returns matrix header with disabled reference counting for CudaMem data.
|
||||||
Mat createMatHeader() const;
|
Mat createMatHeader() const;
|
||||||
operator Mat() const;
|
operator Mat() const;
|
||||||
|
|
||||||
operator GpuMat() const;
|
operator GpuMat() const;
|
||||||
|
|
||||||
|
//returns if host memory can be mapperd to gpu address space;
|
||||||
static bool can_device_map_to_host();
|
static bool can_device_map_to_host();
|
||||||
|
|
||||||
|
|
||||||
// Please see cv::Mat for descriptions
|
// Please see cv::Mat for descriptions
|
||||||
bool isContinuous() const;
|
bool isContinuous() const;
|
||||||
size_t elemSize() const;
|
size_t elemSize() const;
|
||||||
@@ -314,13 +316,13 @@ namespace cv
|
|||||||
void waitForCompletion();
|
void waitForCompletion();
|
||||||
|
|
||||||
//! downloads asynchronously.
|
//! downloads asynchronously.
|
||||||
// Warning! cv::Mat must point to page locked memory (i.e. to MatPL data or to its subMat)
|
// Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its subMat)
|
||||||
void enqueueDownload(const GpuMat& src, MatPL& dst);
|
void enqueueDownload(const GpuMat& src, CudaMem& dst);
|
||||||
void enqueueDownload(const GpuMat& src, Mat& dst);
|
void enqueueDownload(const GpuMat& src, Mat& dst);
|
||||||
|
|
||||||
//! uploads asynchronously.
|
//! uploads asynchronously.
|
||||||
// Warning! cv::Mat must point to page locked memory (i.e. to MatPL data or to its ROI)
|
// Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its ROI)
|
||||||
void enqueueUpload(const MatPL& src, GpuMat& dst);
|
void enqueueUpload(const CudaMem& src, GpuMat& dst);
|
||||||
void enqueueUpload(const Mat& src, GpuMat& dst);
|
void enqueueUpload(const Mat& src, GpuMat& dst);
|
||||||
|
|
||||||
void enqueueCopy(const GpuMat& src, GpuMat& dst);
|
void enqueueCopy(const GpuMat& src, GpuMat& dst);
|
||||||
|
@@ -339,43 +339,43 @@ static inline void swap( GpuMat& a, GpuMat& b ) { a.swap(b); }
|
|||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////
|
||||||
//////////////////////////////// MatPL ////////////////////////////////
|
//////////////////////////////// CudaMem ////////////////////////////////
|
||||||
///////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
inline MatPL::MatPL() : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) {}
|
inline CudaMem::CudaMem() : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(0) {}
|
||||||
inline MatPL::MatPL(int _rows, int _cols, int _type, int type_alloc) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
|
inline CudaMem::CudaMem(int _rows, int _cols, int _type, int _alloc_type) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(0)
|
||||||
{
|
{
|
||||||
if( _rows > 0 && _cols > 0 )
|
if( _rows > 0 && _cols > 0 )
|
||||||
create( _rows, _cols, _type , type_alloc);
|
create( _rows, _cols, _type, _alloc_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline MatPL::MatPL(Size _size, int _type, int type_alloc) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
|
inline CudaMem::CudaMem(Size _size, int _type, int _alloc_type) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(0)
|
||||||
{
|
{
|
||||||
if( _size.height > 0 && _size.width > 0 )
|
if( _size.height > 0 && _size.width > 0 )
|
||||||
create( _size.height, _size.width, _type, type_alloc );
|
create( _size.height, _size.width, _type, _alloc_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline MatPL::MatPL(const MatPL& m) : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(0), dataend(0)
|
inline CudaMem::CudaMem(const CudaMem& m) : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type)
|
||||||
{
|
{
|
||||||
if( refcount )
|
if( refcount )
|
||||||
CV_XADD(refcount, 1);
|
CV_XADD(refcount, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline MatPL::MatPL(const Mat& m, int type_alloc) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
|
inline CudaMem::CudaMem(const Mat& m, int _alloc_type) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(0)
|
||||||
{
|
{
|
||||||
if( m.rows > 0 && m.cols > 0 )
|
if( m.rows > 0 && m.cols > 0 )
|
||||||
create( m.size(), m.type() , type_alloc);
|
create( m.size(), m.type(), _alloc_type);
|
||||||
|
|
||||||
Mat tmp = createMatHeader();
|
Mat tmp = createMatHeader();
|
||||||
m.copyTo(tmp);
|
m.copyTo(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline MatPL::~MatPL()
|
inline CudaMem::~CudaMem()
|
||||||
{
|
{
|
||||||
release();
|
release();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline MatPL& MatPL::operator = (const MatPL& m)
|
inline CudaMem& CudaMem::operator = (const CudaMem& m)
|
||||||
{
|
{
|
||||||
if( this != &m )
|
if( this != &m )
|
||||||
{
|
{
|
||||||
@@ -393,31 +393,31 @@ inline MatPL& MatPL::operator = (const MatPL& m)
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline MatPL MatPL::clone() const
|
inline CudaMem CudaMem::clone() const
|
||||||
{
|
{
|
||||||
MatPL m(size(), type());
|
CudaMem m(size(), type(), alloc_type);
|
||||||
Mat to = m;
|
Mat to = m;
|
||||||
Mat from = *this;
|
Mat from = *this;
|
||||||
from.copyTo(to);
|
from.copyTo(to);
|
||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MatPL::create(Size _size, int _type, int type_alloc) { create(_size.height, _size.width, _type, type_alloc); }
|
inline void CudaMem::create(Size _size, int _type, int _alloc_type) { create(_size.height, _size.width, _type, _alloc_type); }
|
||||||
//CCP void MatPL::create(int _rows, int _cols, int _type);
|
//CCP void CudaMem::create(int _rows, int _cols, int _type, int _alloc_type);
|
||||||
//CPP void MatPL::release();
|
//CPP void CudaMem::release();
|
||||||
|
|
||||||
inline Mat MatPL::createMatHeader() const { return Mat(size(), type(), data); }
|
inline Mat CudaMem::createMatHeader() const { return Mat(size(), type(), data); }
|
||||||
inline MatPL::operator Mat() const { return createMatHeader(); }
|
inline CudaMem::operator Mat() const { return createMatHeader(); }
|
||||||
|
|
||||||
inline bool MatPL::isContinuous() const { return (flags & Mat::CONTINUOUS_FLAG) != 0; }
|
inline bool CudaMem::isContinuous() const { return (flags & Mat::CONTINUOUS_FLAG) != 0; }
|
||||||
inline size_t MatPL::elemSize() const { return CV_ELEM_SIZE(flags); }
|
inline size_t CudaMem::elemSize() const { return CV_ELEM_SIZE(flags); }
|
||||||
inline size_t MatPL::elemSize1() const { return CV_ELEM_SIZE1(flags); }
|
inline size_t CudaMem::elemSize1() const { return CV_ELEM_SIZE1(flags); }
|
||||||
inline int MatPL::type() const { return CV_MAT_TYPE(flags); }
|
inline int CudaMem::type() const { return CV_MAT_TYPE(flags); }
|
||||||
inline int MatPL::depth() const { return CV_MAT_DEPTH(flags); }
|
inline int CudaMem::depth() const { return CV_MAT_DEPTH(flags); }
|
||||||
inline int MatPL::channels() const { return CV_MAT_CN(flags); }
|
inline int CudaMem::channels() const { return CV_MAT_CN(flags); }
|
||||||
inline size_t MatPL::step1() const { return step/elemSize1(); }
|
inline size_t CudaMem::step1() const { return step/elemSize1(); }
|
||||||
inline Size MatPL::size() const { return Size(cols, rows); }
|
inline Size CudaMem::size() const { return Size(cols, rows); }
|
||||||
inline bool MatPL::empty() const { return data == 0; }
|
inline bool CudaMem::empty() const { return data == 0; }
|
||||||
|
|
||||||
|
|
||||||
} /* end of namespace gpu */
|
} /* end of namespace gpu */
|
||||||
|
@@ -234,7 +234,7 @@ namespace
|
|||||||
if (disp.empty())
|
if (disp.empty())
|
||||||
disp.create(rows, cols, CV_16S);
|
disp.create(rows, cols, CV_16S);
|
||||||
|
|
||||||
out = ((disp.type() == CV_16S) ? disp : GpuMat(rows, cols, CV_16S));
|
out = ((disp.type() == CV_16S) ? disp : (out.create(rows, cols, CV_16S), out));
|
||||||
out = zero;
|
out = zero;
|
||||||
|
|
||||||
bp::output(rthis.msg_type, u, d, l, r, datas.front(), disp, stream);
|
bp::output(rthis.msg_type, u, d, l, r, datas.front(), disp, stream);
|
||||||
|
@@ -251,7 +251,7 @@ static void csbp_operator(StereoConstantSpaceBP& rthis, GpuMat u[2], GpuMat d[2]
|
|||||||
if (disp.empty())
|
if (disp.empty())
|
||||||
disp.create(rows, cols, CV_16S);
|
disp.create(rows, cols, CV_16S);
|
||||||
|
|
||||||
out = ((disp.type() == CV_16S) ? disp : GpuMat(rows, cols, CV_16S));
|
out = ((disp.type() == CV_16S) ? disp : (out.create(rows, cols, CV_16S), out));
|
||||||
out = zero;
|
out = zero;
|
||||||
|
|
||||||
csbp::compute_disp(u[cur_idx].ptr<T>(), d[cur_idx].ptr<T>(), l[cur_idx].ptr<T>(), r[cur_idx].ptr<T>(),
|
csbp::compute_disp(u[cur_idx].ptr<T>(), d[cur_idx].ptr<T>(), l[cur_idx].ptr<T>(), r[cur_idx].ptr<T>(),
|
||||||
|
@@ -57,8 +57,8 @@ Stream& cv::gpu::Stream::operator=(const Stream& /*stream*/) { throw_nogpu(); re
|
|||||||
bool cv::gpu::Stream::queryIfComplete() { throw_nogpu(); return true; }
|
bool cv::gpu::Stream::queryIfComplete() { throw_nogpu(); return true; }
|
||||||
void cv::gpu::Stream::waitForCompletion() { throw_nogpu(); }
|
void cv::gpu::Stream::waitForCompletion() { throw_nogpu(); }
|
||||||
void cv::gpu::Stream::enqueueDownload(const GpuMat& /*src*/, Mat& /*dst*/) { throw_nogpu(); }
|
void cv::gpu::Stream::enqueueDownload(const GpuMat& /*src*/, Mat& /*dst*/) { throw_nogpu(); }
|
||||||
void cv::gpu::Stream::enqueueDownload(const GpuMat& /*src*/, MatPL& /*dst*/) { throw_nogpu(); }
|
void cv::gpu::Stream::enqueueDownload(const GpuMat& /*src*/, CudaMem& /*dst*/) { throw_nogpu(); }
|
||||||
void cv::gpu::Stream::enqueueUpload(const MatPL& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); }
|
void cv::gpu::Stream::enqueueUpload(const CudaMem& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); }
|
||||||
void cv::gpu::Stream::enqueueUpload(const Mat& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); }
|
void cv::gpu::Stream::enqueueUpload(const Mat& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); }
|
||||||
void cv::gpu::Stream::enqueueCopy(const GpuMat& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); }
|
void cv::gpu::Stream::enqueueCopy(const GpuMat& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); }
|
||||||
void cv::gpu::Stream::enqueueMemSet(const GpuMat& /*src*/, Scalar /*val*/) { throw_nogpu(); }
|
void cv::gpu::Stream::enqueueMemSet(const GpuMat& /*src*/, Scalar /*val*/) { throw_nogpu(); }
|
||||||
@@ -150,9 +150,9 @@ void cv::gpu::Stream::enqueueDownload(const GpuMat& src, Mat& dst)
|
|||||||
CV_Assert(src.cols == dst.cols && src.rows == dst.rows && src.type() == dst.type() )
|
CV_Assert(src.cols == dst.cols && src.rows == dst.rows && src.type() == dst.type() )
|
||||||
devcopy(src, dst, impl->stream, cudaMemcpyDeviceToHost);
|
devcopy(src, dst, impl->stream, cudaMemcpyDeviceToHost);
|
||||||
}
|
}
|
||||||
void cv::gpu::Stream::enqueueDownload(const GpuMat& src, MatPL& dst) { devcopy(src, dst, impl->stream, cudaMemcpyDeviceToHost); }
|
void cv::gpu::Stream::enqueueDownload(const GpuMat& src, CudaMem& dst) { devcopy(src, dst, impl->stream, cudaMemcpyDeviceToHost); }
|
||||||
|
|
||||||
void cv::gpu::Stream::enqueueUpload(const MatPL& src, GpuMat& dst){ devcopy(src, dst, impl->stream, cudaMemcpyHostToDevice); }
|
void cv::gpu::Stream::enqueueUpload(const CudaMem& src, GpuMat& dst){ devcopy(src, dst, impl->stream, cudaMemcpyHostToDevice); }
|
||||||
void cv::gpu::Stream::enqueueUpload(const Mat& src, GpuMat& dst) { devcopy(src, dst, impl->stream, cudaMemcpyHostToDevice); }
|
void cv::gpu::Stream::enqueueUpload(const Mat& src, GpuMat& dst) { devcopy(src, dst, impl->stream, cudaMemcpyHostToDevice); }
|
||||||
void cv::gpu::Stream::enqueueCopy(const GpuMat& src, GpuMat& dst) { devcopy(src, dst, impl->stream, cudaMemcpyDeviceToDevice); }
|
void cv::gpu::Stream::enqueueCopy(const GpuMat& src, GpuMat& dst) { devcopy(src, dst, impl->stream, cudaMemcpyDeviceToDevice); }
|
||||||
|
|
||||||
|
@@ -67,9 +67,9 @@ namespace cv
|
|||||||
void GpuMat::create(int /*_rows*/, int /*_cols*/, int /*_type*/) { throw_nogpu(); }
|
void GpuMat::create(int /*_rows*/, int /*_cols*/, int /*_type*/) { throw_nogpu(); }
|
||||||
void GpuMat::release() { throw_nogpu(); }
|
void GpuMat::release() { throw_nogpu(); }
|
||||||
|
|
||||||
void MatPL::create(int /*_rows*/, int /*_cols*/, int /*_type*/, int /*type_alloc*/) { throw_nogpu(); }
|
void CudaMem::create(int /*_rows*/, int /*_cols*/, int /*_type*/, int /*type_alloc*/) { throw_nogpu(); }
|
||||||
bool MatPL::can_device_map_to_host() { throw_nogpu(); return false; }
|
bool CudaMem::can_device_map_to_host() { throw_nogpu(); return false; }
|
||||||
void MatPL::release() { throw_nogpu(); }
|
void CudaMem::release() { throw_nogpu(); }
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -83,7 +83,7 @@ void cv::gpu::GpuMat::upload(const Mat& m)
|
|||||||
cudaSafeCall( cudaMemcpy2D(data, step, m.data, m.step, cols * elemSize(), rows, cudaMemcpyHostToDevice) );
|
cudaSafeCall( cudaMemcpy2D(data, step, m.data, m.step, cols * elemSize(), rows, cudaMemcpyHostToDevice) );
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::GpuMat::upload(const MatPL& m, Stream& stream)
|
void cv::gpu::GpuMat::upload(const CudaMem& m, Stream& stream)
|
||||||
{
|
{
|
||||||
CV_DbgAssert(!m.empty());
|
CV_DbgAssert(!m.empty());
|
||||||
stream.enqueueUpload(m, *this);
|
stream.enqueueUpload(m, *this);
|
||||||
@@ -96,7 +96,7 @@ void cv::gpu::GpuMat::download(cv::Mat& m) const
|
|||||||
cudaSafeCall( cudaMemcpy2D(m.data, m.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost) );
|
cudaSafeCall( cudaMemcpy2D(m.data, m.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost) );
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::GpuMat::download(MatPL& m, Stream& stream) const
|
void cv::gpu::GpuMat::download(CudaMem& m, Stream& stream) const
|
||||||
{
|
{
|
||||||
CV_DbgAssert(!m.empty());
|
CV_DbgAssert(!m.empty());
|
||||||
stream.enqueueDownload(*this, m);
|
stream.enqueueDownload(*this, m);
|
||||||
@@ -210,15 +210,6 @@ GpuMat cv::gpu::GpuMat::reshape(int new_cn, int new_rows) const
|
|||||||
return hdr;
|
return hdr;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool cv::gpu::MatPL::can_device_map_to_host()
|
|
||||||
{
|
|
||||||
cudaDeviceProp prop;
|
|
||||||
cudaGetDeviceProperties(&prop, 0);
|
|
||||||
|
|
||||||
return (prop.canMapHostMemory != 0) ? true : false;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
|
void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
|
||||||
{
|
{
|
||||||
_type &= TYPE_MASK;
|
_type &= TYPE_MASK;
|
||||||
@@ -266,12 +257,21 @@ void cv::gpu::GpuMat::release()
|
|||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////
|
||||||
//////////////////////////////// MatPL ////////////////////////////////
|
//////////////////////////////// CudaMem //////////////////////////////
|
||||||
///////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
void cv::gpu::MatPL::create(int _rows, int _cols, int _type, int type_alloc)
|
bool cv::gpu::CudaMem::can_device_map_to_host()
|
||||||
{
|
{
|
||||||
alloc_type = type_alloc;
|
cudaDeviceProp prop;
|
||||||
|
cudaGetDeviceProperties(&prop, 0);
|
||||||
|
return (prop.canMapHostMemory != 0) ? true : false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void cv::gpu::CudaMem::create(int _rows, int _cols, int _type, int _alloc_type)
|
||||||
|
{
|
||||||
|
if (_alloc_type == ALLOC_ZEROCOPY && !can_device_map_to_host())
|
||||||
|
cv::gpu::error("ZeroCopy is not supported by current device", __FILE__, __LINE__);
|
||||||
|
|
||||||
_type &= TYPE_MASK;
|
_type &= TYPE_MASK;
|
||||||
if( rows == _rows && cols == _cols && type() == _type && data )
|
if( rows == _rows && cols == _cols && type() == _type && data )
|
||||||
return;
|
return;
|
||||||
@@ -279,7 +279,7 @@ void cv::gpu::MatPL::create(int _rows, int _cols, int _type, int type_alloc)
|
|||||||
release();
|
release();
|
||||||
CV_DbgAssert( _rows >= 0 && _cols >= 0 );
|
CV_DbgAssert( _rows >= 0 && _cols >= 0 );
|
||||||
if( _rows > 0 && _cols > 0 )
|
if( _rows > 0 && _cols > 0 )
|
||||||
{
|
{
|
||||||
flags = Mat::MAGIC_VAL + Mat::CONTINUOUS_FLAG + _type;
|
flags = Mat::MAGIC_VAL + Mat::CONTINUOUS_FLAG + _type;
|
||||||
rows = _rows;
|
rows = _rows;
|
||||||
cols = _cols;
|
cols = _cols;
|
||||||
@@ -291,24 +291,15 @@ void cv::gpu::MatPL::create(int _rows, int _cols, int _type, int type_alloc)
|
|||||||
size_t datasize = alignSize(nettosize, (int)sizeof(*refcount));
|
size_t datasize = alignSize(nettosize, (int)sizeof(*refcount));
|
||||||
|
|
||||||
//datastart = data = (uchar*)fastMalloc(datasize + sizeof(*refcount));
|
//datastart = data = (uchar*)fastMalloc(datasize + sizeof(*refcount));
|
||||||
|
alloc_type = _alloc_type;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
switch (type_alloc)
|
switch (alloc_type)
|
||||||
{
|
{
|
||||||
case ALLOC_PAGE_LOCKED: cudaSafeCall( cudaHostAlloc( &ptr, datasize, cudaHostAllocDefault) ); break;
|
case ALLOC_PAGE_LOCKED: cudaSafeCall( cudaHostAlloc( &ptr, datasize, cudaHostAllocDefault) ); break;
|
||||||
case ALLOC_ZEROCOPY:
|
case ALLOC_ZEROCOPY: cudaSafeCall( cudaHostAlloc( &ptr, datasize, cudaHostAllocMapped) ); break;
|
||||||
if (can_device_map_to_host() == true)
|
|
||||||
{
|
|
||||||
cudaSafeCall( cudaHostAlloc( &ptr, datasize, cudaHostAllocMapped) );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
cv::gpu::error("ZeroCopy is not supported by current device", __FILE__, __LINE__);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case ALLOC_WRITE_COMBINED: cudaSafeCall( cudaHostAlloc( &ptr, datasize, cudaHostAllocWriteCombined) ); break;
|
case ALLOC_WRITE_COMBINED: cudaSafeCall( cudaHostAlloc( &ptr, datasize, cudaHostAllocWriteCombined) ); break;
|
||||||
|
default: cv::gpu::error("Invalid alloc type", __FILE__, __LINE__);
|
||||||
default:
|
|
||||||
cv::gpu::error("Invalid alloc type", __FILE__, __LINE__);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
datastart = data = (uchar*)ptr;
|
datastart = data = (uchar*)ptr;
|
||||||
@@ -319,20 +310,22 @@ void cv::gpu::MatPL::create(int _rows, int _cols, int _type, int type_alloc)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline MatPL::operator GpuMat() const
|
inline CudaMem::operator GpuMat() const
|
||||||
{
|
{
|
||||||
|
GpuMat res;
|
||||||
if (alloc_type == ALLOC_ZEROCOPY)
|
if (alloc_type == ALLOC_ZEROCOPY)
|
||||||
{
|
{
|
||||||
void ** pdev;
|
void *pdev;
|
||||||
cudaHostGetDevicePointer( pdev, this->data, 0 );
|
cudaSafeCall( cudaHostGetDevicePointer( &pdev, data, 0 ) );
|
||||||
GpuMat m(this->rows, this->cols, this->type(), *pdev, this->step);
|
res = GpuMat(rows, cols, type(), pdev, step);
|
||||||
return m;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
cv::gpu::error("", __FILE__, __LINE__);
|
cv::gpu::error("Zero-copy is not supported or memory was allocated without zero-copy flag", __FILE__, __LINE__);
|
||||||
|
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::MatPL::release()
|
void cv::gpu::CudaMem::release()
|
||||||
{
|
{
|
||||||
if( refcount && CV_XADD(refcount, -1) == 1 )
|
if( refcount && CV_XADD(refcount, -1) == 1 )
|
||||||
{
|
{
|
||||||
|
Reference in New Issue
Block a user