Move opengl iterop code to cv::ogl namespace

Fixed issues: #2737 #2848
This commit is contained in:
Vladislav Vinogradov
2013-03-06 15:18:44 +04:00
committed by Andrey Kamaev
parent 6569a58518
commit ecb2ebfba4
31 changed files with 2261 additions and 944 deletions

View File

@@ -84,10 +84,11 @@ class Mat;
class SparseMat;
typedef Mat MatND;
class GlBuffer;
class GlTexture2D;
class GlArrays;
class GlCamera;
namespace ogl {
class Buffer;
class Texture2D;
class Arrays;
}
namespace gpu {
class GpuMat;
@@ -1303,7 +1304,7 @@ public:
STD_VECTOR_MAT = 5 << KIND_SHIFT,
EXPR = 6 << KIND_SHIFT,
OPENGL_BUFFER = 7 << KIND_SHIFT,
OPENGL_TEXTURE2D = 8 << KIND_SHIFT,
OPENGL_TEXTURE = 8 << KIND_SHIFT,
GPU_MAT = 9 << KIND_SHIFT
};
_InputArray();
@@ -1319,15 +1320,15 @@ public:
template<typename _Tp, int m, int n> _InputArray(const Matx<_Tp, m, n>& matx);
_InputArray(const Scalar& s);
_InputArray(const double& val);
_InputArray(const GlBuffer& buf);
_InputArray(const GlTexture2D& tex);
_InputArray(const gpu::GpuMat& d_mat);
_InputArray(const ogl::Buffer& buf);
_InputArray(const ogl::Texture2D& tex);
virtual Mat getMat(int i=-1) const;
virtual void getMatVector(std::vector<Mat>& mv) const;
virtual GlBuffer getGlBuffer() const;
virtual GlTexture2D getGlTexture2D() const;
virtual gpu::GpuMat getGpuMat() const;
virtual ogl::Buffer getOGlBuffer() const;
virtual ogl::Texture2D getOGlTexture2D() const;
virtual int kind() const;
virtual Size size(int i=-1) const;
@@ -1377,8 +1378,8 @@ public:
template<typename _Tp, int m, int n> _OutputArray(Matx<_Tp, m, n>& matx);
template<typename _Tp> _OutputArray(_Tp* vec, int n);
_OutputArray(gpu::GpuMat& d_mat);
_OutputArray(GlBuffer& buf);
_OutputArray(GlTexture2D& tex);
_OutputArray(ogl::Buffer& buf);
_OutputArray(ogl::Texture2D& tex);
_OutputArray(const Mat& m);
template<typename _Tp> _OutputArray(const std::vector<_Tp>& vec);
@@ -1389,16 +1390,16 @@ public:
template<typename _Tp, int m, int n> _OutputArray(const Matx<_Tp, m, n>& matx);
template<typename _Tp> _OutputArray(const _Tp* vec, int n);
_OutputArray(const gpu::GpuMat& d_mat);
_OutputArray(const GlBuffer& buf);
_OutputArray(const GlTexture2D& tex);
_OutputArray(const ogl::Buffer& buf);
_OutputArray(const ogl::Texture2D& tex);
virtual bool fixedSize() const;
virtual bool fixedType() const;
virtual bool needed() const;
virtual Mat& getMatRef(int i=-1) const;
virtual gpu::GpuMat& getGpuMatRef() const;
virtual GlBuffer& getGlBufferRef() const;
virtual GlTexture2D& getGlTexture2DRef() const;
virtual ogl::Buffer& getOGlBufferRef() const;
virtual ogl::Texture2D& getOGlTexture2DRef() const;
virtual void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;

View File

@@ -143,7 +143,6 @@ namespace cv { namespace gpu
int multi_processor_count_;
int majorVersion_;
int minorVersion_;
size_t sharedMemPerBlock_;
};
CV_EXPORTS void printCudaDeviceInfo(int device);

View File

@@ -750,4 +750,14 @@ typedef struct CvBigFuncTable
(tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \
(tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG
namespace cv { namespace ogl {
CV_EXPORTS bool checkError(const char* file, const int line, const char* func = "");
}}
#if defined(__GNUC__)
#define CV_CheckGlError() CV_DbgAssert( (cv::ogl::checkError(__FILE__, __LINE__, __func__)) )
#else
#define CV_CheckGlError() CV_DbgAssert( (cv::ogl::checkError(__FILE__, __LINE__)) )
#endif
#endif // __OPENCV_CORE_INTERNAL_HPP__

View File

@@ -47,20 +47,12 @@
#include "opencv2/core/core.hpp"
namespace cv {
CV_EXPORTS bool checkGlError(const char* file, const int line, const char* func = "");
#if defined(__GNUC__)
#define CV_CheckGlError() CV_DbgAssert( (cv::checkGlError(__FILE__, __LINE__, __func__)) )
#else
#define CV_CheckGlError() CV_DbgAssert( (cv::checkGlError(__FILE__, __LINE__)) )
#endif
namespace cv { namespace ogl {
/////////////////// OpenGL Objects ///////////////////
//! Smart pointer for OpenGL buffer memory with reference counting.
class CV_EXPORTS GlBuffer
class CV_EXPORTS Buffer
{
public:
enum Target
@@ -79,18 +71,18 @@ public:
};
//! create empty buffer
GlBuffer();
Buffer();
//! create buffer from existed buffer id
GlBuffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false);
GlBuffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false);
Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false);
Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false);
//! create buffer
GlBuffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);
GlBuffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);
Buffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);
Buffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);
//! copy from host/device memory
explicit GlBuffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false);
explicit Buffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false);
//! create buffer
void create(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);
@@ -109,7 +101,7 @@ public:
void copyTo(OutputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false) const;
//! create copy of current buffer
GlBuffer clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const;
Buffer clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const;
//! bind buffer for specified target
void bind(Target target) const;
@@ -147,10 +139,8 @@ private:
int type_;
};
template <> CV_EXPORTS void Ptr<GlBuffer::Impl>::delete_obj();
//! Smart pointer for OpenGL 2D texture memory with reference counting.
class CV_EXPORTS GlTexture2D
class CV_EXPORTS Texture2D
{
public:
enum Format
@@ -162,18 +152,18 @@ public:
};
//! create empty texture
GlTexture2D();
Texture2D();
//! create texture from existed texture id
GlTexture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false);
GlTexture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false);
Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false);
Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false);
//! create texture
GlTexture2D(int arows, int acols, Format aformat, bool autoRelease = false);
GlTexture2D(Size asize, Format aformat, bool autoRelease = false);
Texture2D(int arows, int acols, Format aformat, bool autoRelease = false);
Texture2D(Size asize, Format aformat, bool autoRelease = false);
//! copy from host/device memory
explicit GlTexture2D(InputArray arr, bool autoRelease = false);
explicit Texture2D(InputArray arr, bool autoRelease = false);
//! create texture
void create(int arows, int acols, Format aformat, bool autoRelease = false);
@@ -212,13 +202,11 @@ private:
Format format_;
};
template <> CV_EXPORTS void Ptr<GlTexture2D::Impl>::delete_obj();
//! OpenGL Arrays
class CV_EXPORTS GlArrays
class CV_EXPORTS Arrays
{
public:
GlArrays();
Arrays();
void setVertexArray(InputArray vertex);
void resetVertexArray();
@@ -243,46 +231,53 @@ public:
private:
int size_;
GlBuffer vertex_;
GlBuffer color_;
GlBuffer normal_;
GlBuffer texCoord_;
Buffer vertex_;
Buffer color_;
Buffer normal_;
Buffer texCoord_;
};
/////////////////// Render Functions ///////////////////
//! render texture rectangle in window
CV_EXPORTS void render(const GlTexture2D& tex,
CV_EXPORTS void render(const Texture2D& tex,
Rect_<double> wndRect = Rect_<double>(0.0, 0.0, 1.0, 1.0),
Rect_<double> texRect = Rect_<double>(0.0, 0.0, 1.0, 1.0));
//! render mode
namespace RenderMode {
enum {
POINTS = 0x0000,
LINES = 0x0001,
LINE_LOOP = 0x0002,
LINE_STRIP = 0x0003,
TRIANGLES = 0x0004,
TRIANGLE_STRIP = 0x0005,
TRIANGLE_FAN = 0x0006,
QUADS = 0x0007,
QUAD_STRIP = 0x0008,
POLYGON = 0x0009
};
}
enum {
POINTS = 0x0000,
LINES = 0x0001,
LINE_LOOP = 0x0002,
LINE_STRIP = 0x0003,
TRIANGLES = 0x0004,
TRIANGLE_STRIP = 0x0005,
TRIANGLE_FAN = 0x0006,
QUADS = 0x0007,
QUAD_STRIP = 0x0008,
POLYGON = 0x0009
};
//! render OpenGL arrays
CV_EXPORTS void render(const GlArrays& arr, int mode = RenderMode::POINTS, Scalar color = Scalar::all(255));
CV_EXPORTS void render(const GlArrays& arr, InputArray indices, int mode = RenderMode::POINTS, Scalar color = Scalar::all(255));
CV_EXPORTS void render(const Arrays& arr, int mode = POINTS, Scalar color = Scalar::all(255));
CV_EXPORTS void render(const Arrays& arr, InputArray indices, int mode = POINTS, Scalar color = Scalar::all(255));
}} // namespace cv::gl
namespace cv { namespace gpu {
//! set a CUDA device to use OpenGL interoperability
CV_EXPORTS void setGlDevice(int device = 0);
}}
namespace cv {
template <> CV_EXPORTS void Ptr<cv::ogl::Buffer::Impl>::delete_obj();
template <> CV_EXPORTS void Ptr<cv::ogl::Texture2D::Impl>::delete_obj();
namespace gpu {
//! set a CUDA device to use OpenGL interoperability
CV_EXPORTS void setGlDevice(int device = 0);
}
} // namespace cv
#endif // __cplusplus
#endif // __OPENCV_OPENGL_INTEROP_HPP__

View File

@@ -60,8 +60,111 @@
#endif
#endif
using namespace cv;
using namespace cv::gpu;
#ifndef HAVE_CUDA
#define throw_nogpu CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support")
#else // HAVE_CUDA
namespace
{
#if defined(__GNUC__)
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__, __func__)
#else /* defined(__CUDACC__) || defined(__MSVC__) */
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__)
#endif
inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "")
{
if (cudaSuccess != err)
cv::gpu::error(cudaGetErrorString(err), file, line, func);
}
inline void ___nppSafeCall(int err, const char *file, const int line, const char *func = "")
{
if (err < 0)
{
std::ostringstream msg;
msg << "NPP API Call Error: " << err;
cv::gpu::error(msg.str().c_str(), file, line, func);
}
}
}
#endif // HAVE_CUDA
//////////////////////////////// Initialization & Info ////////////////////////
#ifndef HAVE_CUDA
int cv::gpu::getCudaEnabledDeviceCount() { return 0; }
void cv::gpu::setDevice(int) { throw_nogpu; }
int cv::gpu::getDevice() { throw_nogpu; return 0; }
void cv::gpu::resetDevice() { throw_nogpu; }
bool cv::gpu::deviceSupports(FeatureSet) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::builtWith(FeatureSet) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::has(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasPtx(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasBin(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasEqualOrGreater(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int, int) { throw_nogpu; return false; }
size_t cv::gpu::DeviceInfo::sharedMemPerBlock() const { throw_nogpu; return 0; }
void cv::gpu::DeviceInfo::queryMemory(size_t&, size_t&) const { throw_nogpu; }
size_t cv::gpu::DeviceInfo::freeMemory() const { throw_nogpu; return 0; }
size_t cv::gpu::DeviceInfo::totalMemory() const { throw_nogpu; return 0; }
bool cv::gpu::DeviceInfo::supports(FeatureSet) const { throw_nogpu; return false; }
bool cv::gpu::DeviceInfo::isCompatible() const { throw_nogpu; return false; }
void cv::gpu::DeviceInfo::query() { throw_nogpu; }
void cv::gpu::printCudaDeviceInfo(int) { throw_nogpu; }
void cv::gpu::printShortCudaDeviceInfo(int) { throw_nogpu; }
#else // HAVE_CUDA
int cv::gpu::getCudaEnabledDeviceCount()
{
int count;
cudaError_t error = cudaGetDeviceCount( &count );
if (error == cudaErrorInsufficientDriver)
return -1;
if (error == cudaErrorNoDevice)
return 0;
cudaSafeCall( error );
return count;
}
void cv::gpu::setDevice(int device)
{
cudaSafeCall( cudaSetDevice( device ) );
}
int cv::gpu::getDevice()
{
int device;
cudaSafeCall( cudaGetDevice( &device ) );
return device;
}
void cv::gpu::resetDevice()
{
cudaSafeCall( cudaDeviceReset() );
}
namespace
{
class CudaArch
@@ -69,7 +172,7 @@ namespace
public:
CudaArch();
bool builtWith(cv::gpu::FeatureSet feature_set) const;
bool builtWith(FeatureSet feature_set) const;
bool hasPtx(int major, int minor) const;
bool hasBin(int major, int minor) const;
bool hasEqualOrLessPtx(int major, int minor) const;
@@ -88,14 +191,12 @@ namespace
CudaArch::CudaArch()
{
#ifdef HAVE_CUDA
fromStr(CUDA_ARCH_BIN, bin);
fromStr(CUDA_ARCH_PTX, ptx);
fromStr(CUDA_ARCH_FEATURES, features);
#endif
}
bool CudaArch::builtWith(cv::gpu::FeatureSet feature_set) const
bool CudaArch::builtWith(FeatureSet feature_set) const
{
return !features.empty() && (features.back() >= feature_set);
}
@@ -145,12 +246,7 @@ namespace
bool cv::gpu::TargetArchs::builtWith(cv::gpu::FeatureSet feature_set)
{
#if defined (HAVE_CUDA)
return cudaArch.builtWith(feature_set);
#else
(void)feature_set;
return false;
#endif
}
bool cv::gpu::TargetArchs::has(int major, int minor)
@@ -160,35 +256,17 @@ bool cv::gpu::TargetArchs::has(int major, int minor)
bool cv::gpu::TargetArchs::hasPtx(int major, int minor)
{
#if defined (HAVE_CUDA)
return cudaArch.hasPtx(major, minor);
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasBin(int major, int minor)
{
#if defined (HAVE_CUDA)
return cudaArch.hasBin(major, minor);
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor)
{
#if defined (HAVE_CUDA)
return cudaArch.hasEqualOrLessPtx(major, minor);
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrGreater(int major, int minor)
@@ -198,24 +276,12 @@ bool cv::gpu::TargetArchs::hasEqualOrGreater(int major, int minor)
bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
{
#if defined (HAVE_CUDA)
return cudaArch.hasEqualOrGreaterPtx(major, minor);
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
{
#if defined (HAVE_CUDA)
return cudaArch.hasEqualOrGreaterBin(major, minor);
#else
(void)major;
(void)minor;
return false;
#endif
}
bool cv::gpu::deviceSupports(FeatureSet feature_set)
@@ -243,108 +309,84 @@ bool cv::gpu::deviceSupports(FeatureSet feature_set)
return TargetArchs::builtWith(feature_set) && (version >= feature_set);
}
#if !defined (HAVE_CUDA)
#define throw_nogpu CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support")
int cv::gpu::getCudaEnabledDeviceCount() { return 0; }
void cv::gpu::setDevice(int) { throw_nogpu; }
int cv::gpu::getDevice() { throw_nogpu; return 0; }
void cv::gpu::resetDevice() { throw_nogpu; }
size_t cv::gpu::DeviceInfo::freeMemory() const { throw_nogpu; return 0; }
size_t cv::gpu::DeviceInfo::totalMemory() const { throw_nogpu; return 0; }
bool cv::gpu::DeviceInfo::supports(cv::gpu::FeatureSet) const { throw_nogpu; return false; }
bool cv::gpu::DeviceInfo::isCompatible() const { throw_nogpu; return false; }
void cv::gpu::DeviceInfo::query() { throw_nogpu; }
void cv::gpu::DeviceInfo::queryMemory(size_t&, size_t&) const { throw_nogpu; }
void cv::gpu::printCudaDeviceInfo(int) { throw_nogpu; }
void cv::gpu::printShortCudaDeviceInfo(int) { throw_nogpu; }
#undef throw_nogpu
#else // HAVE_CUDA
namespace
{
#if defined(__GNUC__)
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__, __func__)
#else /* defined(__CUDACC__) || defined(__MSVC__) */
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__)
#endif
inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "")
class DeviceProps
{
if (cudaSuccess != err)
cv::gpu::error(cudaGetErrorString(err), file, line, func);
public:
DeviceProps();
~DeviceProps();
cudaDeviceProp* get(int devID);
private:
std::vector<cudaDeviceProp*> props_;
};
DeviceProps::DeviceProps()
{
props_.resize(10, 0);
}
inline void ___nppSafeCall(int err, const char *file, const int line, const char *func = "")
DeviceProps::~DeviceProps()
{
if (err < 0)
for (size_t i = 0; i < props_.size(); ++i)
{
std::ostringstream msg;
msg << "NPP API Call Error: " << err;
cv::gpu::error(msg.str().c_str(), file, line, func);
if (props_[i])
delete props_[i];
}
props_.clear();
}
cudaDeviceProp* DeviceProps::get(int devID)
{
if (devID >= (int) props_.size())
props_.resize(devID + 5, 0);
if (!props_[devID])
{
props_[devID] = new cudaDeviceProp;
cudaSafeCall( cudaGetDeviceProperties(props_[devID], devID) );
}
return props_[devID];
}
DeviceProps deviceProps;
}
int cv::gpu::getCudaEnabledDeviceCount()
size_t cv::gpu::DeviceInfo::sharedMemPerBlock() const
{
int count;
cudaError_t error = cudaGetDeviceCount( &count );
if (error == cudaErrorInsufficientDriver)
return -1;
if (error == cudaErrorNoDevice)
return 0;
cudaSafeCall(error);
return count;
return deviceProps.get(device_id_)->sharedMemPerBlock;
}
void cv::gpu::setDevice(int device)
void cv::gpu::DeviceInfo::queryMemory(size_t& _totalMemory, size_t& _freeMemory) const
{
cudaSafeCall( cudaSetDevice( device ) );
}
int prevDeviceID = getDevice();
if (prevDeviceID != device_id_)
setDevice(device_id_);
int cv::gpu::getDevice()
{
int device;
cudaSafeCall( cudaGetDevice( &device ) );
return device;
}
cudaSafeCall( cudaMemGetInfo(&_freeMemory, &_totalMemory) );
void cv::gpu::resetDevice()
{
cudaSafeCall( cudaDeviceReset() );
if (prevDeviceID != device_id_)
setDevice(prevDeviceID);
}
size_t cv::gpu::DeviceInfo::freeMemory() const
{
size_t free_memory, total_memory;
queryMemory(free_memory, total_memory);
return free_memory;
size_t _totalMemory, _freeMemory;
queryMemory(_totalMemory, _freeMemory);
return _freeMemory;
}
size_t cv::gpu::DeviceInfo::totalMemory() const
{
size_t free_memory, total_memory;
queryMemory(free_memory, total_memory);
return total_memory;
size_t _totalMemory, _freeMemory;
queryMemory(_totalMemory, _freeMemory);
return _totalMemory;
}
bool cv::gpu::DeviceInfo::supports(cv::gpu::FeatureSet feature_set) const
bool cv::gpu::DeviceInfo::supports(FeatureSet feature_set) const
{
int version = majorVersion() * 10 + minorVersion();
return version >= feature_set;
@@ -366,27 +408,12 @@ bool cv::gpu::DeviceInfo::isCompatible() const
void cv::gpu::DeviceInfo::query()
{
cudaDeviceProp prop;
cudaSafeCall(cudaGetDeviceProperties(&prop, device_id_));
name_ = prop.name;
multi_processor_count_ = prop.multiProcessorCount;
majorVersion_ = prop.major;
minorVersion_ = prop.minor;
sharedMemPerBlock_ = prop.sharedMemPerBlock;
}
const cudaDeviceProp* prop = deviceProps.get(device_id_);
size_t cv::gpu::DeviceInfo::sharedMemPerBlock() const {return sharedMemPerBlock_;}
void cv::gpu::DeviceInfo::queryMemory(size_t& free_memory, size_t& total_memory) const
{
int prev_device_id = getDevice();
if (prev_device_id != device_id_)
setDevice(device_id_);
cudaSafeCall(cudaMemGetInfo(&free_memory, &total_memory));
if (prev_device_id != device_id_)
setDevice(prev_device_id);
name_ = prop->name;
multi_processor_count_ = prop->multiProcessorCount;
majorVersion_ = prop->major;
minorVersion_ = prop->minor;
}
namespace
@@ -642,7 +669,7 @@ cv::gpu::GpuMat::GpuMat(const Mat& m) :
upload(m);
}
cv::gpu::GpuMat& cv::gpu::GpuMat::operator = (const cv::gpu::GpuMat& m)
GpuMat& cv::gpu::GpuMat::operator = (const GpuMat& m)
{
if (this != &m)
{
@@ -689,7 +716,7 @@ void cv::gpu::GpuMat::locateROI(Size& wholeSize, Point& ofs) const
wholeSize.width = std::max(static_cast<int>((delta2 - step * (wholeSize.height - 1)) / esz), ofs.x + cols);
}
cv::gpu::GpuMat& cv::gpu::GpuMat::adjustROI(int dtop, int dbottom, int dleft, int dright)
GpuMat& cv::gpu::GpuMat::adjustROI(int dtop, int dbottom, int dleft, int dright)
{
Size wholeSize;
Point ofs;
@@ -715,7 +742,7 @@ cv::gpu::GpuMat& cv::gpu::GpuMat::adjustROI(int dtop, int dbottom, int dleft, in
return *this;
}
cv::gpu::GpuMat cv::gpu::GpuMat::reshape(int new_cn, int new_rows) const
GpuMat cv::gpu::GpuMat::reshape(int new_cn, int new_rows) const
{
GpuMat hdr = *this;
@@ -758,7 +785,7 @@ cv::gpu::GpuMat cv::gpu::GpuMat::reshape(int new_cn, int new_rows) const
return hdr;
}
cv::Mat::Mat(const cv::gpu::GpuMat& m) : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows)
cv::Mat::Mat(const GpuMat& m) : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows)
{
m.download(*this);
}
@@ -800,7 +827,7 @@ void cv::gpu::ensureSizeIsEnough(int rows, int cols, int type, GpuMat& m)
}
}
cv::gpu::GpuMat cv::gpu::allocMatFromBuf(int rows, int cols, int type, GpuMat &mat)
GpuMat cv::gpu::allocMatFromBuf(int rows, int cols, int type, GpuMat &mat)
{
if (!mat.empty() && mat.type() == type && mat.rows >= rows && mat.cols >= cols)
return mat(Rect(0, 0, cols, rows));
@@ -814,41 +841,41 @@ namespace
public:
virtual ~GpuFuncTable() {}
virtual void copy(const cv::Mat& src, cv::gpu::GpuMat& dst) const = 0;
virtual void copy(const cv::gpu::GpuMat& src, cv::Mat& dst) const = 0;
virtual void copy(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) const = 0;
virtual void copy(const Mat& src, GpuMat& dst) const = 0;
virtual void copy(const GpuMat& src, Mat& dst) const = 0;
virtual void copy(const GpuMat& src, GpuMat& dst) const = 0;
virtual void copyWithMask(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, const cv::gpu::GpuMat& mask) const = 0;
virtual void copyWithMask(const GpuMat& src, GpuMat& dst, const GpuMat& mask) const = 0;
virtual void convert(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) const = 0;
virtual void convert(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, double alpha, double beta) const = 0;
virtual void convert(const GpuMat& src, GpuMat& dst) const = 0;
virtual void convert(const GpuMat& src, GpuMat& dst, double alpha, double beta) const = 0;
virtual void setTo(cv::gpu::GpuMat& m, cv::Scalar s, const cv::gpu::GpuMat& mask) const = 0;
virtual void setTo(GpuMat& m, Scalar s, const GpuMat& mask) const = 0;
virtual void mallocPitch(void** devPtr, size_t* step, size_t width, size_t height) const = 0;
virtual void free(void* devPtr) const = 0;
};
}
#if !defined HAVE_CUDA || defined(CUDA_DISABLER_)
#ifndef HAVE_CUDA
namespace
{
class EmptyFuncTable : public GpuFuncTable
{
public:
void copy(const cv::Mat&, cv::gpu::GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void copy(const cv::gpu::GpuMat&, cv::Mat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void copy(const cv::gpu::GpuMat&, cv::gpu::GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void copy(const Mat&, GpuMat&) const { throw_nogpu; }
void copy(const GpuMat&, Mat&) const { throw_nogpu; }
void copy(const GpuMat&, GpuMat&) const { throw_nogpu; }
void copyWithMask(const cv::gpu::GpuMat&, cv::gpu::GpuMat&, const cv::gpu::GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void copyWithMask(const GpuMat&, GpuMat&, const GpuMat&) const { throw_nogpu; }
void convert(const cv::gpu::GpuMat&, cv::gpu::GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void convert(const cv::gpu::GpuMat&, cv::gpu::GpuMat&, double, double) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void convert(const GpuMat&, GpuMat&) const { throw_nogpu; }
void convert(const GpuMat&, GpuMat&, double, double) const { throw_nogpu; }
void setTo(cv::gpu::GpuMat&, cv::Scalar, const cv::gpu::GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void setTo(GpuMat&, Scalar, const GpuMat&) const { throw_nogpu; }
void mallocPitch(void**, size_t*, size_t, size_t) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); }
void mallocPitch(void**, size_t*, size_t, size_t) const { throw_nogpu; }
void free(void*) const {}
};
@@ -876,15 +903,15 @@ namespace cv { namespace gpu { namespace device
namespace
{
template <typename T> void kernelSetCaller(cv::gpu::GpuMat& src, cv::Scalar s, cudaStream_t stream)
template <typename T> void kernelSetCaller(GpuMat& src, Scalar s, cudaStream_t stream)
{
cv::Scalar_<T> sf = s;
Scalar_<T> sf = s;
cv::gpu::device::set_to_gpu(src, sf.val, src.channels(), stream);
}
template <typename T> void kernelSetCaller(cv::gpu::GpuMat& src, cv::Scalar s, const cv::gpu::GpuMat& mask, cudaStream_t stream)
template <typename T> void kernelSetCaller(GpuMat& src, Scalar s, const GpuMat& mask, cudaStream_t stream)
{
cv::Scalar_<T> sf = s;
Scalar_<T> sf = s;
cv::gpu::device::set_to_gpu(src, sf.val, mask, src.channels(), stream);
}
}
@@ -992,7 +1019,7 @@ namespace
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
typedef typename NPPTypeTraits<DDEPTH>::npp_type dst_t;
static void call(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst)
static void call(const GpuMat& src, GpuMat& dst)
{
NppiSize sz;
sz.width = src.cols;
@@ -1007,7 +1034,7 @@ namespace
{
typedef typename NPPTypeTraits<DDEPTH>::npp_type dst_t;
static void call(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst)
static void call(const GpuMat& src, GpuMat& dst)
{
NppiSize sz;
sz.width = src.cols;
@@ -1047,13 +1074,13 @@ namespace
{
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void call(cv::gpu::GpuMat& src, cv::Scalar s)
static void call(GpuMat& src, Scalar s)
{
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
cv::Scalar_<src_t> nppS = s;
Scalar_<src_t> nppS = s;
nppSafeCall( func(nppS.val, src.ptr<src_t>(), static_cast<int>(src.step), sz) );
@@ -1064,13 +1091,13 @@ namespace
{
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void call(cv::gpu::GpuMat& src, cv::Scalar s)
static void call(GpuMat& src, Scalar s)
{
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
cv::Scalar_<src_t> nppS = s;
Scalar_<src_t> nppS = s;
nppSafeCall( func(nppS[0], src.ptr<src_t>(), static_cast<int>(src.step), sz) );
@@ -1095,13 +1122,13 @@ namespace
{
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void call(cv::gpu::GpuMat& src, cv::Scalar s, const cv::gpu::GpuMat& mask)
static void call(GpuMat& src, Scalar s, const GpuMat& mask)
{
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
cv::Scalar_<src_t> nppS = s;
Scalar_<src_t> nppS = s;
nppSafeCall( func(nppS.val, src.ptr<src_t>(), static_cast<int>(src.step), sz, mask.ptr<Npp8u>(), static_cast<int>(mask.step)) );
@@ -1112,13 +1139,13 @@ namespace
{
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void call(cv::gpu::GpuMat& src, cv::Scalar s, const cv::gpu::GpuMat& mask)
static void call(GpuMat& src, Scalar s, const GpuMat& mask)
{
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
cv::Scalar_<src_t> nppS = s;
Scalar_<src_t> nppS = s;
nppSafeCall( func(nppS[0], src.ptr<src_t>(), static_cast<int>(src.step), sz, mask.ptr<Npp8u>(), static_cast<int>(mask.step)) );
@@ -1140,7 +1167,7 @@ namespace
{
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void call(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, const cv::gpu::GpuMat& mask, cudaStream_t /*stream*/)
static void call(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t /*stream*/)
{
NppiSize sz;
sz.width = src.cols;
@@ -1163,20 +1190,20 @@ namespace
class CudaFuncTable : public GpuFuncTable
{
public:
void copy(const cv::Mat& src, cv::gpu::GpuMat& dst) const
void copy(const Mat& src, GpuMat& dst) const
{
cudaSafeCall( cudaMemcpy2D(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyHostToDevice) );
}
void copy(const cv::gpu::GpuMat& src, cv::Mat& dst) const
void copy(const GpuMat& src, Mat& dst) const
{
cudaSafeCall( cudaMemcpy2D(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyDeviceToHost) );
}
void copy(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) const
void copy(const GpuMat& src, GpuMat& dst) const
{
cudaSafeCall( cudaMemcpy2D(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyDeviceToDevice) );
}
void copyWithMask(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, const cv::gpu::GpuMat& mask) const
void copyWithMask(const GpuMat& src, GpuMat& dst, const GpuMat& mask) const
{
CV_Assert(src.depth() <= CV_64F && src.channels() <= 4);
CV_Assert(src.size() == dst.size() && src.type() == dst.type());
@@ -1184,11 +1211,11 @@ namespace
if (src.depth() == CV_64F)
{
if (!cv::gpu::TargetArchs::builtWith(cv::gpu::NATIVE_DOUBLE) || !cv::gpu::DeviceInfo().supports(cv::gpu::NATIVE_DOUBLE))
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
typedef void (*func_t)(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, const cv::gpu::GpuMat& mask, cudaStream_t stream);
typedef void (*func_t)(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t stream);
static const func_t funcs[7][4] =
{
/* 8U */ {NppCopyMasked<CV_8U , nppiCopy_8u_C1MR >::call, cv::gpu::copyWithMask, NppCopyMasked<CV_8U , nppiCopy_8u_C3MR >::call, NppCopyMasked<CV_8U , nppiCopy_8u_C4MR >::call},
@@ -1205,9 +1232,9 @@ namespace
func(src, dst, mask, 0);
}
void convert(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) const
void convert(const GpuMat& src, GpuMat& dst) const
{
typedef void (*func_t)(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst);
typedef void (*func_t)(const GpuMat& src, GpuMat& dst);
static const func_t funcs[7][7][4] =
{
{
@@ -1281,7 +1308,7 @@ namespace
if (src.depth() == CV_64F || dst.depth() == CV_64F)
{
if (!cv::gpu::TargetArchs::builtWith(cv::gpu::NATIVE_DOUBLE) || !cv::gpu::DeviceInfo().supports(cv::gpu::NATIVE_DOUBLE))
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
@@ -1298,21 +1325,21 @@ namespace
func(src, dst);
}
void convert(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, double alpha, double beta) const
void convert(const GpuMat& src, GpuMat& dst, double alpha, double beta) const
{
CV_Assert(src.depth() <= CV_64F && src.channels() <= 4);
CV_Assert(dst.depth() <= CV_64F);
if (src.depth() == CV_64F || dst.depth() == CV_64F)
{
if (!cv::gpu::TargetArchs::builtWith(cv::gpu::NATIVE_DOUBLE) || !cv::gpu::DeviceInfo().supports(cv::gpu::NATIVE_DOUBLE))
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
cv::gpu::convertTo(src, dst, alpha, beta);
}
void setTo(cv::gpu::GpuMat& m, cv::Scalar s, const cv::gpu::GpuMat& mask) const
void setTo(GpuMat& m, Scalar s, const GpuMat& mask) const
{
if (mask.empty())
{
@@ -1328,13 +1355,13 @@ namespace
if (cn == 1 || (cn == 2 && s[0] == s[1]) || (cn == 3 && s[0] == s[1] && s[0] == s[2]) || (cn == 4 && s[0] == s[1] && s[0] == s[2] && s[0] == s[3]))
{
int val = cv::saturate_cast<uchar>(s[0]);
int val = saturate_cast<uchar>(s[0]);
cudaSafeCall( cudaMemset2D(m.data, m.step, val, m.cols * m.elemSize(), m.rows) );
return;
}
}
typedef void (*func_t)(cv::gpu::GpuMat& src, cv::Scalar s);
typedef void (*func_t)(GpuMat& src, Scalar s);
static const func_t funcs[7][4] =
{
{NppSet<CV_8U , 1, nppiSet_8u_C1R >::call, cv::gpu::setTo , cv::gpu::setTo , NppSet<CV_8U , 4, nppiSet_8u_C4R >::call},
@@ -1350,7 +1377,7 @@ namespace
if (m.depth() == CV_64F)
{
if (!cv::gpu::TargetArchs::builtWith(cv::gpu::NATIVE_DOUBLE) || !cv::gpu::DeviceInfo().supports(cv::gpu::NATIVE_DOUBLE))
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
@@ -1358,7 +1385,7 @@ namespace
}
else
{
typedef void (*func_t)(cv::gpu::GpuMat& src, cv::Scalar s, const cv::gpu::GpuMat& mask);
typedef void (*func_t)(GpuMat& src, Scalar s, const GpuMat& mask);
static const func_t funcs[7][4] =
{
{NppSetMask<CV_8U , 1, nppiSet_8u_C1MR >::call, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_8U , 4, nppiSet_8u_C4MR >::call},
@@ -1374,7 +1401,7 @@ namespace
if (m.depth() == CV_64F)
{
if (!cv::gpu::TargetArchs::builtWith(cv::gpu::NATIVE_DOUBLE) || !cv::gpu::DeviceInfo().supports(cv::gpu::NATIVE_DOUBLE))
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
@@ -1402,7 +1429,7 @@ namespace
#endif // HAVE_CUDA
void cv::gpu::GpuMat::upload(const cv::Mat& m)
void cv::gpu::GpuMat::upload(const Mat& m)
{
CV_DbgAssert(!m.empty());
@@ -1411,7 +1438,7 @@ void cv::gpu::GpuMat::upload(const cv::Mat& m)
gpuFuncTable()->copy(m, *this);
}
void cv::gpu::GpuMat::download(cv::Mat& m) const
void cv::gpu::GpuMat::download(Mat& m) const
{
CV_DbgAssert(!empty());
@@ -1420,7 +1447,7 @@ void cv::gpu::GpuMat::download(cv::Mat& m) const
gpuFuncTable()->copy(*this, m);
}
void cv::gpu::GpuMat::copyTo(cv::gpu::GpuMat& m) const
void cv::gpu::GpuMat::copyTo(GpuMat& m) const
{
CV_DbgAssert(!empty());
@@ -1429,7 +1456,7 @@ void cv::gpu::GpuMat::copyTo(cv::gpu::GpuMat& m) const
gpuFuncTable()->copy(*this, m);
}
void cv::gpu::GpuMat::copyTo(cv::gpu::GpuMat& mat, const cv::gpu::GpuMat& mask) const
void cv::gpu::GpuMat::copyTo(GpuMat& mat, const GpuMat& mask) const
{
if (mask.empty())
copyTo(mat);
@@ -1441,7 +1468,7 @@ void cv::gpu::GpuMat::copyTo(cv::gpu::GpuMat& mat, const cv::gpu::GpuMat& mask)
}
}
void cv::gpu::GpuMat::convertTo(cv::gpu::GpuMat& dst, int rtype, double alpha, double beta) const
void cv::gpu::GpuMat::convertTo(GpuMat& dst, int rtype, double alpha, double beta) const
{
bool noScale = fabs(alpha - 1) < std::numeric_limits<double>::epsilon() && fabs(beta) < std::numeric_limits<double>::epsilon();
@@ -1458,8 +1485,8 @@ void cv::gpu::GpuMat::convertTo(cv::gpu::GpuMat& dst, int rtype, double alpha, d
return;
}
cv::gpu::GpuMat temp;
const cv::gpu::GpuMat* psrc = this;
GpuMat temp;
const GpuMat* psrc = this;
if (sdepth != ddepth && psrc == &dst)
{
temp = *this;
@@ -1474,7 +1501,7 @@ void cv::gpu::GpuMat::convertTo(cv::gpu::GpuMat& dst, int rtype, double alpha, d
gpuFuncTable()->convert(*psrc, dst, alpha, beta);
}
cv::gpu::GpuMat& cv::gpu::GpuMat::setTo(cv::Scalar s, const cv::gpu::GpuMat& mask)
GpuMat& cv::gpu::GpuMat::setTo(Scalar s, const GpuMat& mask)
{
CV_Assert(mask.empty() || mask.type() == CV_8UC1);
CV_DbgAssert(!empty());
@@ -1498,7 +1525,7 @@ void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
if (_rows > 0 && _cols > 0)
{
flags = cv::Mat::MAGIC_VAL + _type;
flags = Mat::MAGIC_VAL + _type;
rows = _rows;
cols = _cols;
@@ -1512,7 +1539,7 @@ void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
step = esz * cols;
if (esz * cols == step)
flags |= cv::Mat::CONTINUOUS_FLAG;
flags |= Mat::CONTINUOUS_FLAG;
int64 _nettosize = static_cast<int64>(step) * rows;
size_t nettosize = static_cast<size_t>(_nettosize);

View File

@@ -934,9 +934,9 @@ _InputArray::_InputArray(const Mat& m) : flags(MAT), obj((void*)&m) {}
_InputArray::_InputArray(const std::vector<Mat>& vec) : flags(STD_VECTOR_MAT), obj((void*)&vec) {}
_InputArray::_InputArray(const double& val) : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&val), sz(Size(1,1)) {}
_InputArray::_InputArray(const MatExpr& expr) : flags(FIXED_TYPE + FIXED_SIZE + EXPR), obj((void*)&expr) {}
_InputArray::_InputArray(const GlBuffer& buf) : flags(OPENGL_BUFFER), obj((void*)&buf) {}
_InputArray::_InputArray(const GlTexture2D &tex) : flags(OPENGL_TEXTURE2D), obj((void*)&tex) {}
_InputArray::_InputArray(const gpu::GpuMat& d_mat) : flags(GPU_MAT), obj((void*)&d_mat) {}
_InputArray::_InputArray(const ogl::Buffer& buf) : flags(OPENGL_BUFFER), obj((void*)&buf) {}
_InputArray::_InputArray(const ogl::Texture2D& tex) : flags(OPENGL_TEXTURE), obj((void*)&tex) {}
Mat _InputArray::getMat(int i) const
{
@@ -1076,40 +1076,34 @@ void _InputArray::getMatVector(std::vector<Mat>& mv) const
}
}
GlBuffer _InputArray::getGlBuffer() const
{
int k = kind();
CV_Assert(k == OPENGL_BUFFER);
//if( k == OPENGL_BUFFER )
{
const GlBuffer* buf = (const GlBuffer*)obj;
return *buf;
}
}
GlTexture2D _InputArray::getGlTexture2D() const
{
int k = kind();
CV_Assert(k == OPENGL_TEXTURE2D);
//if( k == OPENGL_TEXTURE )
{
const GlTexture2D* tex = (const GlTexture2D*)obj;
return *tex;
}
}
gpu::GpuMat _InputArray::getGpuMat() const
{
int k = kind();
CV_Assert(k == GPU_MAT);
//if( k == GPU_MAT )
{
const gpu::GpuMat* d_mat = (const gpu::GpuMat*)obj;
return *d_mat;
}
const gpu::GpuMat* d_mat = (const gpu::GpuMat*)obj;
return *d_mat;
}
ogl::Buffer _InputArray::getOGlBuffer() const
{
int k = kind();
CV_Assert(k == OPENGL_BUFFER);
const ogl::Buffer* gl_buf = (const ogl::Buffer*)obj;
return *gl_buf;
}
ogl::Texture2D _InputArray::getOGlTexture2D() const
{
int k = kind();
CV_Assert(k == OPENGL_TEXTURE);
const ogl::Texture2D* gl_tex = (const ogl::Texture2D*)obj;
return *gl_tex;
}
int _InputArray::kind() const
@@ -1176,14 +1170,14 @@ Size _InputArray::size(int i) const
if( k == OPENGL_BUFFER )
{
CV_Assert( i < 0 );
const GlBuffer* buf = (const GlBuffer*)obj;
const ogl::Buffer* buf = (const ogl::Buffer*)obj;
return buf->size();
}
if( k == OPENGL_TEXTURE2D )
if( k == OPENGL_TEXTURE )
{
CV_Assert( i < 0 );
const GlTexture2D* tex = (const GlTexture2D*)obj;
const ogl::Texture2D* tex = (const ogl::Texture2D*)obj;
return tex->size();
}
@@ -1244,7 +1238,7 @@ int _InputArray::type(int i) const
}
if( k == OPENGL_BUFFER )
return ((const GlBuffer*)obj)->type();
return ((const ogl::Buffer*)obj)->type();
CV_Assert( k == GPU_MAT );
//if( k == GPU_MAT )
@@ -1296,10 +1290,10 @@ bool _InputArray::empty() const
}
if( k == OPENGL_BUFFER )
return ((const GlBuffer*)obj)->empty();
return ((const ogl::Buffer*)obj)->empty();
if( k == OPENGL_TEXTURE2D )
return ((const GlTexture2D*)obj)->empty();
if( k == OPENGL_TEXTURE )
return ((const ogl::Texture2D*)obj)->empty();
CV_Assert( k == GPU_MAT );
//if( k == GPU_MAT )
@@ -1312,14 +1306,14 @@ _OutputArray::~_OutputArray() {}
_OutputArray::_OutputArray(Mat& m) : _InputArray(m) {}
_OutputArray::_OutputArray(std::vector<Mat>& vec) : _InputArray(vec) {}
_OutputArray::_OutputArray(gpu::GpuMat& d_mat) : _InputArray(d_mat) {}
_OutputArray::_OutputArray(GlBuffer& buf) : _InputArray(buf) {}
_OutputArray::_OutputArray(GlTexture2D& tex) : _InputArray(tex) {}
_OutputArray::_OutputArray(ogl::Buffer& buf) : _InputArray(buf) {}
_OutputArray::_OutputArray(ogl::Texture2D& tex) : _InputArray(tex) {}
_OutputArray::_OutputArray(const Mat& m) : _InputArray(m) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const std::vector<Mat>& vec) : _InputArray(vec) {flags |= FIXED_SIZE;}
_OutputArray::_OutputArray(const gpu::GpuMat& d_mat) : _InputArray(d_mat) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const GlBuffer& buf) : _InputArray(buf) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const GlTexture2D& tex) : _InputArray(tex) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const ogl::Buffer& buf) : _InputArray(buf) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const ogl::Texture2D& tex) : _InputArray(tex) {flags |= FIXED_SIZE|FIXED_TYPE;}
bool _OutputArray::fixedSize() const
@@ -1351,9 +1345,9 @@ void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int
}
if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((GlBuffer*)obj)->size() == _sz);
CV_Assert(!fixedType() || ((GlBuffer*)obj)->type() == mtype);
((GlBuffer*)obj)->create(_sz, mtype);
CV_Assert(!fixedSize() || ((ogl::Buffer*)obj)->size() == _sz);
CV_Assert(!fixedType() || ((ogl::Buffer*)obj)->type() == mtype);
((ogl::Buffer*)obj)->create(_sz, mtype);
return;
}
int sizes[] = {_sz.height, _sz.width};
@@ -1379,9 +1373,9 @@ void _OutputArray::create(int rows, int cols, int mtype, int i, bool allowTransp
}
if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((GlBuffer*)obj)->size() == Size(cols, rows));
CV_Assert(!fixedType() || ((GlBuffer*)obj)->type() == mtype);
((GlBuffer*)obj)->create(rows, cols, mtype);
CV_Assert(!fixedSize() || ((ogl::Buffer*)obj)->size() == Size(cols, rows));
CV_Assert(!fixedType() || ((ogl::Buffer*)obj)->type() == mtype);
((ogl::Buffer*)obj)->create(rows, cols, mtype);
return;
}
int sizes[] = {rows, cols};
@@ -1605,13 +1599,13 @@ void _OutputArray::release() const
if( k == OPENGL_BUFFER )
{
((GlBuffer*)obj)->release();
((ogl::Buffer*)obj)->release();
return;
}
if( k == OPENGL_TEXTURE2D )
if( k == OPENGL_TEXTURE )
{
((GlTexture2D*)obj)->release();
((ogl::Texture2D*)obj)->release();
return;
}
@@ -1680,18 +1674,18 @@ gpu::GpuMat& _OutputArray::getGpuMatRef() const
return *(gpu::GpuMat*)obj;
}
GlBuffer& _OutputArray::getGlBufferRef() const
ogl::Buffer& _OutputArray::getOGlBufferRef() const
{
int k = kind();
CV_Assert( k == OPENGL_BUFFER );
return *(GlBuffer*)obj;
return *(ogl::Buffer*)obj;
}
GlTexture2D& _OutputArray::getGlTexture2DRef() const
ogl::Texture2D& _OutputArray::getOGlTexture2DRef() const
{
int k = kind();
CV_Assert( k == OPENGL_TEXTURE2D );
return *(GlTexture2D*)obj;
CV_Assert( k == OPENGL_TEXTURE );
return *(ogl::Texture2D*)obj;
}
static _OutputArray _none;

View File

@@ -53,6 +53,9 @@
#endif
#endif
using namespace cv;
using namespace cv::gpu;
namespace
{
#ifndef HAVE_OPENGL
@@ -80,7 +83,7 @@ namespace
#endif
}
bool cv::checkGlError(const char* file, const int line, const char* func)
bool cv::ogl::checkError(const char* file, const int line, const char* func)
{
#ifndef HAVE_OPENGL
(void) file;
@@ -307,17 +310,17 @@ namespace
#endif
////////////////////////////////////////////////////////////////////////
// GlBuffer
// ogl::Buffer
#ifndef HAVE_OPENGL
class cv::GlBuffer::Impl
class cv::ogl::Buffer::Impl
{
};
#else
class cv::GlBuffer::Impl
class cv::ogl::Buffer::Impl
{
public:
static const Ptr<Impl>& empty();
@@ -359,21 +362,21 @@ private:
#endif
};
const cv::Ptr<cv::GlBuffer::Impl>& cv::GlBuffer::Impl::empty()
const Ptr<cv::ogl::Buffer::Impl>& cv::ogl::Buffer::Impl::empty()
{
static Ptr<Impl> p(new Impl);
return p;
}
cv::GlBuffer::Impl::Impl() : bufId_(0), autoRelease_(true)
cv::ogl::Buffer::Impl::Impl() : bufId_(0), autoRelease_(true)
{
}
cv::GlBuffer::Impl::Impl(GLuint abufId, bool autoRelease) : bufId_(abufId), autoRelease_(autoRelease)
cv::ogl::Buffer::Impl::Impl(GLuint abufId, bool autoRelease) : bufId_(abufId), autoRelease_(autoRelease)
{
}
cv::GlBuffer::Impl::Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease) : bufId_(0), autoRelease_(autoRelease)
cv::ogl::Buffer::Impl::Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease) : bufId_(0), autoRelease_(autoRelease)
{
gl::GenBuffers(1, &bufId_);
CV_CheckGlError();
@@ -390,19 +393,19 @@ cv::GlBuffer::Impl::Impl(GLsizeiptr size, const GLvoid* data, GLenum target, boo
CV_CheckGlError();
}
cv::GlBuffer::Impl::~Impl()
cv::ogl::Buffer::Impl::~Impl()
{
if (autoRelease_ && bufId_)
gl::DeleteBuffers(1, &bufId_);
}
void cv::GlBuffer::Impl::bind(GLenum target) const
void cv::ogl::Buffer::Impl::bind(GLenum target) const
{
gl::BindBuffer(target, bufId_);
CV_CheckGlError();
}
void cv::GlBuffer::Impl::copyFrom(GLuint srcBuf, GLsizeiptr size)
void cv::ogl::Buffer::Impl::copyFrom(GLuint srcBuf, GLsizeiptr size)
{
gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_);
CV_CheckGlError();
@@ -414,7 +417,7 @@ void cv::GlBuffer::Impl::copyFrom(GLuint srcBuf, GLsizeiptr size)
CV_CheckGlError();
}
void cv::GlBuffer::Impl::copyFrom(GLsizeiptr size, const GLvoid* data)
void cv::ogl::Buffer::Impl::copyFrom(GLsizeiptr size, const GLvoid* data)
{
gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_);
CV_CheckGlError();
@@ -423,7 +426,7 @@ void cv::GlBuffer::Impl::copyFrom(GLsizeiptr size, const GLvoid* data)
CV_CheckGlError();
}
void cv::GlBuffer::Impl::copyTo(GLsizeiptr size, GLvoid* data) const
void cv::ogl::Buffer::Impl::copyTo(GLsizeiptr size, GLvoid* data) const
{
gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_);
CV_CheckGlError();
@@ -432,7 +435,7 @@ void cv::GlBuffer::Impl::copyTo(GLsizeiptr size, GLvoid* data) const
CV_CheckGlError();
}
void* cv::GlBuffer::Impl::mapHost(GLenum access)
void* cv::ogl::Buffer::Impl::mapHost(GLenum access)
{
gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_);
CV_CheckGlError();
@@ -443,31 +446,31 @@ void* cv::GlBuffer::Impl::mapHost(GLenum access)
return data;
}
void cv::GlBuffer::Impl::unmapHost()
void cv::ogl::Buffer::Impl::unmapHost()
{
gl::UnmapBuffer(gl::COPY_READ_BUFFER);
}
#ifdef HAVE_CUDA
void cv::GlBuffer::Impl::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream)
void cv::ogl::Buffer::Impl::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream)
{
cudaResource_.registerBuffer(bufId_);
cudaResource_.copyFrom(src, spitch, width, height, stream);
}
void cv::GlBuffer::Impl::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream) const
void cv::ogl::Buffer::Impl::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream) const
{
cudaResource_.registerBuffer(bufId_);
cudaResource_.copyTo(dst, dpitch, width, height, stream);
}
void* cv::GlBuffer::Impl::mapDevice(cudaStream_t stream)
void* cv::ogl::Buffer::Impl::mapDevice(cudaStream_t stream)
{
cudaResource_.registerBuffer(bufId_);
return cudaResource_.map(stream);
}
void cv::GlBuffer::Impl::unmapDevice(cudaStream_t stream)
void cv::ogl::Buffer::Impl::unmapDevice(cudaStream_t stream)
{
cudaResource_.unmap(stream);
}
@@ -475,7 +478,7 @@ void cv::GlBuffer::Impl::unmapHost()
#endif // HAVE_OPENGL
cv::GlBuffer::GlBuffer() : rows_(0), cols_(0), type_(0)
cv::ogl::Buffer::Buffer() : rows_(0), cols_(0), type_(0)
{
#ifndef HAVE_OPENGL
throw_nogl();
@@ -484,7 +487,7 @@ cv::GlBuffer::GlBuffer() : rows_(0), cols_(0), type_(0)
#endif
}
cv::GlBuffer::GlBuffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
cv::ogl::Buffer::Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
{
#ifndef HAVE_OPENGL
(void) arows;
@@ -501,7 +504,7 @@ cv::GlBuffer::GlBuffer(int arows, int acols, int atype, unsigned int abufId, boo
#endif
}
cv::GlBuffer::GlBuffer(Size asize, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
cv::ogl::Buffer::Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
{
#ifndef HAVE_OPENGL
(void) asize;
@@ -517,17 +520,17 @@ cv::GlBuffer::GlBuffer(Size asize, int atype, unsigned int abufId, bool autoRele
#endif
}
cv::GlBuffer::GlBuffer(int arows, int acols, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
cv::ogl::Buffer::Buffer(int arows, int acols, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
{
create(arows, acols, atype, target, autoRelease);
}
cv::GlBuffer::GlBuffer(Size asize, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
cv::ogl::Buffer::Buffer(Size asize, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
{
create(asize, atype, target, autoRelease);
}
cv::GlBuffer::GlBuffer(InputArray arr, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
cv::ogl::Buffer::Buffer(InputArray arr, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
{
#ifndef HAVE_OPENGL
(void) arr;
@@ -545,7 +548,7 @@ cv::GlBuffer::GlBuffer(InputArray arr, Target target, bool autoRelease) : rows_(
break;
}
case _InputArray::OPENGL_TEXTURE2D:
case _InputArray::OPENGL_TEXTURE:
{
copyFrom(arr, target, autoRelease);
break;
@@ -572,7 +575,7 @@ cv::GlBuffer::GlBuffer(InputArray arr, Target target, bool autoRelease) : rows_(
#endif
}
void cv::GlBuffer::create(int arows, int acols, int atype, Target target, bool autoRelease)
void cv::ogl::Buffer::create(int arows, int acols, int atype, Target target, bool autoRelease)
{
#ifndef HAVE_OPENGL
(void) arows;
@@ -593,7 +596,7 @@ void cv::GlBuffer::create(int arows, int acols, int atype, Target target, bool a
#endif
}
void cv::GlBuffer::release()
void cv::ogl::Buffer::release()
{
#ifdef HAVE_OPENGL
if (*impl_.refcount == 1)
@@ -605,7 +608,7 @@ void cv::GlBuffer::release()
#endif
}
void cv::GlBuffer::setAutoRelease(bool flag)
void cv::ogl::Buffer::setAutoRelease(bool flag)
{
#ifndef HAVE_OPENGL
(void) flag;
@@ -615,7 +618,7 @@ void cv::GlBuffer::setAutoRelease(bool flag)
#endif
}
void cv::GlBuffer::copyFrom(InputArray arr, Target target, bool autoRelease)
void cv::ogl::Buffer::copyFrom(InputArray arr, Target target, bool autoRelease)
{
#ifndef HAVE_OPENGL
(void) arr;
@@ -625,9 +628,9 @@ void cv::GlBuffer::copyFrom(InputArray arr, Target target, bool autoRelease)
#else
const int kind = arr.kind();
if (kind == _InputArray::OPENGL_TEXTURE2D)
if (kind == _InputArray::OPENGL_TEXTURE)
{
GlTexture2D tex = arr.getGlTexture2D();
ogl::Texture2D tex = arr.getOGlTexture2D();
tex.copyTo(*this);
setAutoRelease(autoRelease);
return;
@@ -641,7 +644,7 @@ void cv::GlBuffer::copyFrom(InputArray arr, Target target, bool autoRelease)
{
case _InputArray::OPENGL_BUFFER:
{
GlBuffer buf = arr.getGlBuffer();
ogl::Buffer buf = arr.getOGlBuffer();
impl_->copyFrom(buf.bufId(), asize.area() * CV_ELEM_SIZE(atype));
break;
}
@@ -668,7 +671,7 @@ void cv::GlBuffer::copyFrom(InputArray arr, Target target, bool autoRelease)
#endif
}
void cv::GlBuffer::copyTo(OutputArray arr, Target target, bool autoRelease) const
void cv::ogl::Buffer::copyTo(OutputArray arr, Target target, bool autoRelease) const
{
#ifndef HAVE_OPENGL
(void) arr;
@@ -682,13 +685,13 @@ void cv::GlBuffer::copyTo(OutputArray arr, Target target, bool autoRelease) cons
{
case _InputArray::OPENGL_BUFFER:
{
arr.getGlBufferRef().copyFrom(*this, target, autoRelease);
arr.getOGlBufferRef().copyFrom(*this, target, autoRelease);
break;
}
case _InputArray::OPENGL_TEXTURE2D:
case _InputArray::OPENGL_TEXTURE:
{
arr.getGlTexture2DRef().copyFrom(*this, autoRelease);
arr.getOGlTexture2DRef().copyFrom(*this, autoRelease);
break;
}
@@ -716,21 +719,21 @@ void cv::GlBuffer::copyTo(OutputArray arr, Target target, bool autoRelease) cons
#endif
}
cv::GlBuffer cv::GlBuffer::clone(Target target, bool autoRelease) const
cv::ogl::Buffer cv::ogl::Buffer::clone(Target target, bool autoRelease) const
{
#ifndef HAVE_OPENGL
(void) target;
(void) autoRelease;
throw_nogl();
return GlBuffer();
return cv::ogl::Buffer();
#else
GlBuffer buf;
ogl::Buffer buf;
buf.copyFrom(*this, target, autoRelease);
return buf;
#endif
}
void cv::GlBuffer::bind(Target target) const
void cv::ogl::Buffer::bind(Target target) const
{
#ifndef HAVE_OPENGL
(void) target;
@@ -740,7 +743,7 @@ void cv::GlBuffer::bind(Target target) const
#endif
}
void cv::GlBuffer::unbind(Target target)
void cv::ogl::Buffer::unbind(Target target)
{
#ifndef HAVE_OPENGL
(void) target;
@@ -751,18 +754,18 @@ void cv::GlBuffer::unbind(Target target)
#endif
}
cv::Mat cv::GlBuffer::mapHost(Access access)
Mat cv::ogl::Buffer::mapHost(Access access)
{
#ifndef HAVE_OPENGL
(void) access;
throw_nogl();
return cv::Mat();
return Mat();
#else
return cv::Mat(rows_, cols_, type_, impl_->mapHost(access));
return Mat(rows_, cols_, type_, impl_->mapHost(access));
#endif
}
void cv::GlBuffer::unmapHost()
void cv::ogl::Buffer::unmapHost()
{
#ifndef HAVE_OPENGL
throw_nogl();
@@ -771,22 +774,22 @@ void cv::GlBuffer::unmapHost()
#endif
}
cv::gpu::GpuMat cv::GlBuffer::mapDevice()
GpuMat cv::ogl::Buffer::mapDevice()
{
#ifndef HAVE_OPENGL
throw_nogl();
return cv::gpu::GpuMat();
return GpuMat();
#else
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
throw_nocuda();
return cv::gpu::GpuMat();
return GpuMat();
#else
return cv::gpu::GpuMat(rows_, cols_, type_, impl_->mapDevice());
return GpuMat(rows_, cols_, type_, impl_->mapDevice());
#endif
#endif
}
void cv::GlBuffer::unmapDevice()
void cv::ogl::Buffer::unmapDevice()
{
#ifndef HAVE_OPENGL
throw_nogl();
@@ -799,7 +802,7 @@ void cv::GlBuffer::unmapDevice()
#endif
}
unsigned int cv::GlBuffer::bufId() const
unsigned int cv::ogl::Buffer::bufId() const
{
#ifndef HAVE_OPENGL
throw_nogl();
@@ -809,23 +812,23 @@ unsigned int cv::GlBuffer::bufId() const
#endif
}
template <> void cv::Ptr<cv::GlBuffer::Impl>::delete_obj()
template <> void cv::Ptr<cv::ogl::Buffer::Impl>::delete_obj()
{
if (obj) delete obj;
}
//////////////////////////////////////////////////////////////////////////////////////////
// GlTexture2D
// ogl::Texture
#ifndef HAVE_OPENGL
class cv::GlTexture2D::Impl
class cv::ogl::Texture2D::Impl
{
};
#else
class cv::GlTexture2D::Impl
class cv::ogl::Texture2D::Impl
{
public:
static const Ptr<Impl> empty();
@@ -850,21 +853,21 @@ private:
bool autoRelease_;
};
const cv::Ptr<cv::GlTexture2D::Impl> cv::GlTexture2D::Impl::empty()
const Ptr<cv::ogl::Texture2D::Impl> cv::ogl::Texture2D::Impl::empty()
{
static Ptr<Impl> p(new Impl);
return p;
}
cv::GlTexture2D::Impl::Impl() : texId_(0), autoRelease_(true)
cv::ogl::Texture2D::Impl::Impl() : texId_(0), autoRelease_(true)
{
}
cv::GlTexture2D::Impl::Impl(GLuint atexId, bool autoRelease) : texId_(atexId), autoRelease_(autoRelease)
cv::ogl::Texture2D::Impl::Impl(GLuint atexId, bool autoRelease) : texId_(atexId), autoRelease_(autoRelease)
{
}
cv::GlTexture2D::Impl::Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease) : texId_(0), autoRelease_(autoRelease)
cv::ogl::Texture2D::Impl::Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease) : texId_(0), autoRelease_(autoRelease)
{
gl::GenTextures(1, &texId_);
CV_CheckGlError();
@@ -884,13 +887,13 @@ cv::GlTexture2D::Impl::Impl(GLint internalFormat, GLsizei width, GLsizei height,
CV_CheckGlError();
}
cv::GlTexture2D::Impl::~Impl()
cv::ogl::Texture2D::Impl::~Impl()
{
if (autoRelease_ && texId_)
gl::DeleteTextures(1, &texId_);
}
void cv::GlTexture2D::Impl::copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels)
void cv::ogl::Texture2D::Impl::copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels)
{
gl::BindTexture(gl::TEXTURE_2D, texId_);
CV_CheckGlError();
@@ -905,7 +908,7 @@ void cv::GlTexture2D::Impl::copyFrom(GLsizei width, GLsizei height, GLenum forma
CV_CheckGlError();
}
void cv::GlTexture2D::Impl::copyTo(GLenum format, GLenum type, GLvoid* pixels) const
void cv::ogl::Texture2D::Impl::copyTo(GLenum format, GLenum type, GLvoid* pixels) const
{
gl::BindTexture(gl::TEXTURE_2D, texId_);
CV_CheckGlError();
@@ -917,7 +920,7 @@ void cv::GlTexture2D::Impl::copyTo(GLenum format, GLenum type, GLvoid* pixels) c
CV_CheckGlError();
}
void cv::GlTexture2D::Impl::bind() const
void cv::ogl::Texture2D::Impl::bind() const
{
gl::BindTexture(gl::TEXTURE_2D, texId_);
CV_CheckGlError();
@@ -925,7 +928,7 @@ void cv::GlTexture2D::Impl::bind() const
#endif // HAVE_OPENGL
cv::GlTexture2D::GlTexture2D() : rows_(0), cols_(0), format_(NONE)
cv::ogl::Texture2D::Texture2D() : rows_(0), cols_(0), format_(NONE)
{
#ifndef HAVE_OPENGL
throw_nogl();
@@ -934,7 +937,7 @@ cv::GlTexture2D::GlTexture2D() : rows_(0), cols_(0), format_(NONE)
#endif
}
cv::GlTexture2D::GlTexture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
cv::ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{
#ifndef HAVE_OPENGL
(void) arows;
@@ -951,7 +954,7 @@ cv::GlTexture2D::GlTexture2D(int arows, int acols, Format aformat, unsigned int
#endif
}
cv::GlTexture2D::GlTexture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
cv::ogl::Texture2D::Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{
#ifndef HAVE_OPENGL
(void) asize;
@@ -967,17 +970,17 @@ cv::GlTexture2D::GlTexture2D(Size asize, Format aformat, unsigned int atexId, bo
#endif
}
cv::GlTexture2D::GlTexture2D(int arows, int acols, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
cv::ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{
create(arows, acols, aformat, autoRelease);
}
cv::GlTexture2D::GlTexture2D(Size asize, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
cv::ogl::Texture2D::Texture2D(Size asize, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{
create(asize, aformat, autoRelease);
}
cv::GlTexture2D::GlTexture2D(InputArray arr, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
cv::ogl::Texture2D::Texture2D(InputArray arr, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{
#ifndef HAVE_OPENGL
(void) arr;
@@ -1008,10 +1011,10 @@ cv::GlTexture2D::GlTexture2D(InputArray arr, bool autoRelease) : rows_(0), cols_
{
case _InputArray::OPENGL_BUFFER:
{
GlBuffer buf = arr.getGlBuffer();
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER);
ogl::Buffer buf = arr.getOGlBuffer();
buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
break;
}
@@ -1021,10 +1024,10 @@ cv::GlTexture2D::GlTexture2D(InputArray arr, bool autoRelease) : rows_(0), cols_
throw_nocuda();
#else
GpuMat dmat = arr.getGpuMat();
GlBuffer buf(dmat, GlBuffer::PIXEL_UNPACK_BUFFER);
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER);
ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER);
buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
#endif
break;
@@ -1034,7 +1037,7 @@ cv::GlTexture2D::GlTexture2D(InputArray arr, bool autoRelease) : rows_(0), cols_
{
Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() );
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data, autoRelease);
break;
}
@@ -1046,7 +1049,7 @@ cv::GlTexture2D::GlTexture2D(InputArray arr, bool autoRelease) : rows_(0), cols_
#endif
}
void cv::GlTexture2D::create(int arows, int acols, Format aformat, bool autoRelease)
void cv::ogl::Texture2D::create(int arows, int acols, Format aformat, bool autoRelease)
{
#ifndef HAVE_OPENGL
(void) arows;
@@ -1057,7 +1060,7 @@ void cv::GlTexture2D::create(int arows, int acols, Format aformat, bool autoRele
#else
if (rows_ != arows || cols_ != acols || format_ != aformat)
{
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(aformat, acols, arows, aformat, gl::FLOAT, 0, autoRelease);
rows_ = arows;
cols_ = acols;
@@ -1066,7 +1069,7 @@ void cv::GlTexture2D::create(int arows, int acols, Format aformat, bool autoRele
#endif
}
void cv::GlTexture2D::release()
void cv::ogl::Texture2D::release()
{
#ifdef HAVE_OPENGL
if (*impl_.refcount == 1)
@@ -1078,7 +1081,7 @@ void cv::GlTexture2D::release()
#endif
}
void cv::GlTexture2D::setAutoRelease(bool flag)
void cv::ogl::Texture2D::setAutoRelease(bool flag)
{
#ifndef HAVE_OPENGL
(void) flag;
@@ -1088,7 +1091,7 @@ void cv::GlTexture2D::setAutoRelease(bool flag)
#endif
}
void cv::GlTexture2D::copyFrom(InputArray arr, bool autoRelease)
void cv::ogl::Texture2D::copyFrom(InputArray arr, bool autoRelease)
{
#ifndef HAVE_OPENGL
(void) arr;
@@ -1121,10 +1124,10 @@ void cv::GlTexture2D::copyFrom(InputArray arr, bool autoRelease)
{
case _InputArray::OPENGL_BUFFER:
{
GlBuffer buf = arr.getGlBuffer();
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER);
ogl::Buffer buf = arr.getOGlBuffer();
buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
break;
}
@@ -1134,10 +1137,10 @@ void cv::GlTexture2D::copyFrom(InputArray arr, bool autoRelease)
throw_nocuda();
#else
GpuMat dmat = arr.getGpuMat();
GlBuffer buf(dmat, GlBuffer::PIXEL_UNPACK_BUFFER);
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER);
ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER);
buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
#endif
break;
@@ -1147,14 +1150,14 @@ void cv::GlTexture2D::copyFrom(InputArray arr, bool autoRelease)
{
Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() );
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data);
}
}
#endif
}
void cv::GlTexture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) const
void cv::ogl::Texture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) const
{
#ifndef HAVE_OPENGL
(void) arr;
@@ -1171,11 +1174,11 @@ void cv::GlTexture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) cons
{
case _InputArray::OPENGL_BUFFER:
{
GlBuffer& buf = arr.getGlBufferRef();
buf.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), GlBuffer::PIXEL_PACK_BUFFER, autoRelease);
buf.bind(GlBuffer::PIXEL_PACK_BUFFER);
ogl::Buffer& buf = arr.getOGlBufferRef();
buf.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER, autoRelease);
buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER);
impl_->copyTo(dstFormat, gl_types[ddepth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_PACK_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
break;
}
@@ -1184,10 +1187,10 @@ void cv::GlTexture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) cons
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
throw_nocuda();
#else
GlBuffer buf(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), GlBuffer::PIXEL_PACK_BUFFER);
buf.bind(GlBuffer::PIXEL_PACK_BUFFER);
ogl::Buffer buf(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER);
buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER);
impl_->copyTo(dstFormat, gl_types[ddepth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_PACK_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
buf.copyTo(arr);
#endif
@@ -1199,14 +1202,14 @@ void cv::GlTexture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) cons
arr.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn));
Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() );
GlBuffer::unbind(GlBuffer::PIXEL_PACK_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
impl_->copyTo(dstFormat, gl_types[ddepth], mat.data);
}
}
#endif
}
void cv::GlTexture2D::bind() const
void cv::ogl::Texture2D::bind() const
{
#ifndef HAVE_OPENGL
throw_nogl();
@@ -1215,7 +1218,7 @@ void cv::GlTexture2D::bind() const
#endif
}
unsigned int cv::GlTexture2D::texId() const
unsigned int cv::ogl::Texture2D::texId() const
{
#ifndef HAVE_OPENGL
throw_nogl();
@@ -1225,19 +1228,19 @@ unsigned int cv::GlTexture2D::texId() const
#endif
}
template <> void cv::Ptr<cv::GlTexture2D::Impl>::delete_obj()
template <> void cv::Ptr<cv::ogl::Texture2D::Impl>::delete_obj()
{
if (obj) delete obj;
}
////////////////////////////////////////////////////////////////////////
// GlArrays
// ogl::Arrays
cv::GlArrays::GlArrays() : size_(0)
cv::ogl::Arrays::Arrays() : size_(0)
{
}
void cv::GlArrays::setVertexArray(InputArray vertex)
void cv::ogl::Arrays::setVertexArray(InputArray vertex)
{
const int cn = vertex.channels();
const int depth = vertex.depth();
@@ -1246,37 +1249,37 @@ void cv::GlArrays::setVertexArray(InputArray vertex)
CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
if (vertex.kind() == _InputArray::OPENGL_BUFFER)
vertex_ = vertex.getGlBuffer();
vertex_ = vertex.getOGlBuffer();
else
vertex_.copyFrom(vertex);
size_ = vertex_.size().area();
}
void cv::GlArrays::resetVertexArray()
void cv::ogl::Arrays::resetVertexArray()
{
vertex_.release();
size_ = 0;
}
void cv::GlArrays::setColorArray(InputArray color)
void cv::ogl::Arrays::setColorArray(InputArray color)
{
const int cn = color.channels();
CV_Assert( cn == 3 || cn == 4 );
if (color.kind() == _InputArray::OPENGL_BUFFER)
color_ = color.getGlBuffer();
color_ = color.getOGlBuffer();
else
color_.copyFrom(color);
}
void cv::GlArrays::resetColorArray()
void cv::ogl::Arrays::resetColorArray()
{
color_.release();
}
void cv::GlArrays::setNormalArray(InputArray normal)
void cv::ogl::Arrays::setNormalArray(InputArray normal)
{
const int cn = normal.channels();
const int depth = normal.depth();
@@ -1285,17 +1288,17 @@ void cv::GlArrays::setNormalArray(InputArray normal)
CV_Assert( depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
if (normal.kind() == _InputArray::OPENGL_BUFFER)
normal_ = normal.getGlBuffer();
normal_ = normal.getOGlBuffer();
else
normal_.copyFrom(normal);
}
void cv::GlArrays::resetNormalArray()
void cv::ogl::Arrays::resetNormalArray()
{
normal_.release();
}
void cv::GlArrays::setTexCoordArray(InputArray texCoord)
void cv::ogl::Arrays::setTexCoordArray(InputArray texCoord)
{
const int cn = texCoord.channels();
const int depth = texCoord.depth();
@@ -1304,17 +1307,17 @@ void cv::GlArrays::setTexCoordArray(InputArray texCoord)
CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
if (texCoord.kind() == _InputArray::OPENGL_BUFFER)
texCoord_ = texCoord.getGlBuffer();
texCoord_ = texCoord.getOGlBuffer();
else
texCoord_.copyFrom(texCoord);
}
void cv::GlArrays::resetTexCoordArray()
void cv::ogl::Arrays::resetTexCoordArray()
{
texCoord_.release();
}
void cv::GlArrays::release()
void cv::ogl::Arrays::release()
{
resetVertexArray();
resetColorArray();
@@ -1322,7 +1325,7 @@ void cv::GlArrays::release()
resetTexCoordArray();
}
void cv::GlArrays::setAutoRelease(bool flag)
void cv::ogl::Arrays::setAutoRelease(bool flag)
{
vertex_.setAutoRelease(flag);
color_.setAutoRelease(flag);
@@ -1330,7 +1333,7 @@ void cv::GlArrays::setAutoRelease(bool flag)
texCoord_.setAutoRelease(flag);
}
void cv::GlArrays::bind() const
void cv::ogl::Arrays::bind() const
{
#ifndef HAVE_OPENGL
throw_nogl();
@@ -1349,7 +1352,7 @@ void cv::GlArrays::bind() const
gl::EnableClientState(gl::TEXTURE_COORD_ARRAY);
CV_CheckGlError();
texCoord_.bind(GlBuffer::ARRAY_BUFFER);
texCoord_.bind(ogl::Buffer::ARRAY_BUFFER);
gl::TexCoordPointer(texCoord_.channels(), gl_types[texCoord_.depth()], 0, 0);
CV_CheckGlError();
@@ -1365,7 +1368,7 @@ void cv::GlArrays::bind() const
gl::EnableClientState(gl::NORMAL_ARRAY);
CV_CheckGlError();
normal_.bind(GlBuffer::ARRAY_BUFFER);
normal_.bind(ogl::Buffer::ARRAY_BUFFER);
gl::NormalPointer(gl_types[normal_.depth()], 0, 0);
CV_CheckGlError();
@@ -1381,7 +1384,7 @@ void cv::GlArrays::bind() const
gl::EnableClientState(gl::COLOR_ARRAY);
CV_CheckGlError();
color_.bind(GlBuffer::ARRAY_BUFFER);
color_.bind(ogl::Buffer::ARRAY_BUFFER);
const int cn = color_.channels();
@@ -1399,20 +1402,20 @@ void cv::GlArrays::bind() const
gl::EnableClientState(gl::VERTEX_ARRAY);
CV_CheckGlError();
vertex_.bind(GlBuffer::ARRAY_BUFFER);
vertex_.bind(ogl::Buffer::ARRAY_BUFFER);
gl::VertexPointer(vertex_.channels(), gl_types[vertex_.depth()], 0, 0);
CV_CheckGlError();
}
GlBuffer::unbind(GlBuffer::ARRAY_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER);
#endif
}
////////////////////////////////////////////////////////////////////////
// Rendering
void cv::render(const GlTexture2D& tex, Rect_<double> wndRect, Rect_<double> texRect)
void cv::ogl::render(const ogl::Texture2D& tex, Rect_<double> wndRect, Rect_<double> texRect)
{
#ifndef HAVE_OPENGL
(void) tex;
@@ -1460,7 +1463,7 @@ void cv::render(const GlTexture2D& tex, Rect_<double> wndRect, Rect_<double> tex
texRect.x + texRect.width, texRect.y
};
GlBuffer::unbind(GlBuffer::ARRAY_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER);
gl::EnableClientState(gl::TEXTURE_COORD_ARRAY);
CV_CheckGlError();
@@ -1478,13 +1481,13 @@ void cv::render(const GlTexture2D& tex, Rect_<double> wndRect, Rect_<double> tex
gl::VertexPointer(3, gl::FLOAT, 0, vertex);
CV_CheckGlError();
gl::DrawArrays(cv::RenderMode::QUADS, 0, 4);
gl::DrawArrays(gl::QUADS, 0, 4);
CV_CheckGlError();
}
#endif
}
void cv::render(const GlArrays& arr, int mode, Scalar color)
void cv::ogl::render(const ogl::Arrays& arr, int mode, Scalar color)
{
#ifndef HAVE_OPENGL
(void) arr;
@@ -1503,7 +1506,7 @@ void cv::render(const GlArrays& arr, int mode, Scalar color)
#endif
}
void cv::render(const GlArrays& arr, InputArray indices, int mode, Scalar color)
void cv::ogl::render(const ogl::Arrays& arr, InputArray indices, int mode, Scalar color)
{
#ifndef HAVE_OPENGL
(void) arr;
@@ -1524,7 +1527,7 @@ void cv::render(const GlArrays& arr, InputArray indices, int mode, Scalar color)
{
case _InputArray::OPENGL_BUFFER :
{
GlBuffer buf = indices.getGlBuffer();
ogl::Buffer buf = indices.getOGlBuffer();
const int depth = buf.depth();
@@ -1539,11 +1542,11 @@ void cv::render(const GlArrays& arr, InputArray indices, int mode, Scalar color)
else
type = gl::UNSIGNED_INT;
buf.bind(GlBuffer::ELEMENT_ARRAY_BUFFER);
buf.bind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
gl::DrawElements(mode, buf.size().area(), type, 0);
GlBuffer::unbind(GlBuffer::ELEMENT_ARRAY_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
break;
}
@@ -1566,7 +1569,7 @@ void cv::render(const GlArrays& arr, InputArray indices, int mode, Scalar color)
else
type = gl::UNSIGNED_INT;
GlBuffer::unbind(GlBuffer::ELEMENT_ARRAY_BUFFER);
ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
gl::DrawElements(mode, mat.size().area(), type, mat.data);
}