Reword and merge pull request #625 from jet47/gpu-fixes

This commit is contained in:
Andrey Kamaev 2013-03-12 18:14:23 +04:00
commit 9f3ce0dd97
31 changed files with 2261 additions and 944 deletions

View File

@ -13,7 +13,7 @@ if(CMAKE_COMPILER_IS_GNUCXX AND NOT APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL "Cl
return() return()
endif() endif()
find_package(CUDA 4.2) find_package(CUDA 4.2 QUIET)
if(CUDA_FOUND) if(CUDA_FOUND)
set(HAVE_CUDA 1) set(HAVE_CUDA 1)

View File

@ -230,7 +230,7 @@ foreach(__opttype OPT DBG)
endif() endif()
endif() endif()
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY} ${CUDA_nvcuvid_LIBRARY} ${CUDA_nvcuvenc_LIBRARY}) list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
if(OpenCV_USE_CUBLAS) if(OpenCV_USE_CUBLAS)
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_CUBLAS_LIBRARIES}) list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_CUBLAS_LIBRARIES})

View File

@ -84,10 +84,11 @@ class Mat;
class SparseMat; class SparseMat;
typedef Mat MatND; typedef Mat MatND;
class GlBuffer; namespace ogl {
class GlTexture2D; class Buffer;
class GlArrays; class Texture2D;
class GlCamera; class Arrays;
}
namespace gpu { namespace gpu {
class GpuMat; class GpuMat;
@ -1305,7 +1306,7 @@ public:
STD_VECTOR_MAT = 5 << KIND_SHIFT, STD_VECTOR_MAT = 5 << KIND_SHIFT,
EXPR = 6 << KIND_SHIFT, EXPR = 6 << KIND_SHIFT,
OPENGL_BUFFER = 7 << KIND_SHIFT, OPENGL_BUFFER = 7 << KIND_SHIFT,
OPENGL_TEXTURE2D = 8 << KIND_SHIFT, OPENGL_TEXTURE = 8 << KIND_SHIFT,
GPU_MAT = 9 << KIND_SHIFT GPU_MAT = 9 << KIND_SHIFT
}; };
_InputArray(); _InputArray();
@ -1321,15 +1322,15 @@ public:
template<typename _Tp, int m, int n> _InputArray(const Matx<_Tp, m, n>& matx); template<typename _Tp, int m, int n> _InputArray(const Matx<_Tp, m, n>& matx);
_InputArray(const Scalar& s); _InputArray(const Scalar& s);
_InputArray(const double& val); _InputArray(const double& val);
_InputArray(const GlBuffer& buf);
_InputArray(const GlTexture2D& tex);
_InputArray(const gpu::GpuMat& d_mat); _InputArray(const gpu::GpuMat& d_mat);
_InputArray(const ogl::Buffer& buf);
_InputArray(const ogl::Texture2D& tex);
virtual Mat getMat(int i=-1) const; virtual Mat getMat(int i=-1) const;
virtual void getMatVector(std::vector<Mat>& mv) const; virtual void getMatVector(std::vector<Mat>& mv) const;
virtual GlBuffer getGlBuffer() const;
virtual GlTexture2D getGlTexture2D() const;
virtual gpu::GpuMat getGpuMat() const; virtual gpu::GpuMat getGpuMat() const;
virtual ogl::Buffer getOGlBuffer() const;
virtual ogl::Texture2D getOGlTexture2D() const;
virtual int kind() const; virtual int kind() const;
virtual Size size(int i=-1) const; virtual Size size(int i=-1) const;
@ -1379,8 +1380,8 @@ public:
template<typename _Tp, int m, int n> _OutputArray(Matx<_Tp, m, n>& matx); template<typename _Tp, int m, int n> _OutputArray(Matx<_Tp, m, n>& matx);
template<typename _Tp> _OutputArray(_Tp* vec, int n); template<typename _Tp> _OutputArray(_Tp* vec, int n);
_OutputArray(gpu::GpuMat& d_mat); _OutputArray(gpu::GpuMat& d_mat);
_OutputArray(GlBuffer& buf); _OutputArray(ogl::Buffer& buf);
_OutputArray(GlTexture2D& tex); _OutputArray(ogl::Texture2D& tex);
_OutputArray(const Mat& m); _OutputArray(const Mat& m);
template<typename _Tp> _OutputArray(const std::vector<_Tp>& vec); template<typename _Tp> _OutputArray(const std::vector<_Tp>& vec);
@ -1391,16 +1392,16 @@ public:
template<typename _Tp, int m, int n> _OutputArray(const Matx<_Tp, m, n>& matx); template<typename _Tp, int m, int n> _OutputArray(const Matx<_Tp, m, n>& matx);
template<typename _Tp> _OutputArray(const _Tp* vec, int n); template<typename _Tp> _OutputArray(const _Tp* vec, int n);
_OutputArray(const gpu::GpuMat& d_mat); _OutputArray(const gpu::GpuMat& d_mat);
_OutputArray(const GlBuffer& buf); _OutputArray(const ogl::Buffer& buf);
_OutputArray(const GlTexture2D& tex); _OutputArray(const ogl::Texture2D& tex);
virtual bool fixedSize() const; virtual bool fixedSize() const;
virtual bool fixedType() const; virtual bool fixedType() const;
virtual bool needed() const; virtual bool needed() const;
virtual Mat& getMatRef(int i=-1) const; virtual Mat& getMatRef(int i=-1) const;
virtual gpu::GpuMat& getGpuMatRef() const; virtual gpu::GpuMat& getGpuMatRef() const;
virtual GlBuffer& getGlBufferRef() const; virtual ogl::Buffer& getOGlBufferRef() const;
virtual GlTexture2D& getGlTexture2DRef() const; virtual ogl::Texture2D& getOGlTexture2DRef() const;
virtual void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; virtual void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;

View File

@ -143,7 +143,6 @@ namespace cv { namespace gpu
int multi_processor_count_; int multi_processor_count_;
int majorVersion_; int majorVersion_;
int minorVersion_; int minorVersion_;
size_t sharedMemPerBlock_;
}; };
CV_EXPORTS void printCudaDeviceInfo(int device); CV_EXPORTS void printCudaDeviceInfo(int device);

View File

@ -750,4 +750,14 @@ typedef struct CvBigFuncTable
(tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \ (tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \
(tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG (tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG
namespace cv { namespace ogl {
CV_EXPORTS bool checkError(const char* file, const int line, const char* func = "");
}}
#if defined(__GNUC__)
#define CV_CheckGlError() CV_DbgAssert( (cv::ogl::checkError(__FILE__, __LINE__, __func__)) )
#else
#define CV_CheckGlError() CV_DbgAssert( (cv::ogl::checkError(__FILE__, __LINE__)) )
#endif
#endif // __OPENCV_CORE_INTERNAL_HPP__ #endif // __OPENCV_CORE_INTERNAL_HPP__

View File

@ -47,20 +47,12 @@
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
namespace cv { namespace cv { namespace ogl {
CV_EXPORTS bool checkGlError(const char* file, const int line, const char* func = "");
#if defined(__GNUC__)
#define CV_CheckGlError() CV_DbgAssert( (cv::checkGlError(__FILE__, __LINE__, __func__)) )
#else
#define CV_CheckGlError() CV_DbgAssert( (cv::checkGlError(__FILE__, __LINE__)) )
#endif
/////////////////// OpenGL Objects /////////////////// /////////////////// OpenGL Objects ///////////////////
//! Smart pointer for OpenGL buffer memory with reference counting. //! Smart pointer for OpenGL buffer memory with reference counting.
class CV_EXPORTS GlBuffer class CV_EXPORTS Buffer
{ {
public: public:
enum Target enum Target
@ -79,18 +71,18 @@ public:
}; };
//! create empty buffer //! create empty buffer
GlBuffer(); Buffer();
//! create buffer from existed buffer id //! create buffer from existed buffer id
GlBuffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false); Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false);
GlBuffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false); Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false);
//! create buffer //! create buffer
GlBuffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); Buffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);
GlBuffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); Buffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);
//! copy from host/device memory //! copy from host/device memory
explicit GlBuffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false); explicit Buffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false);
//! create buffer //! create buffer
void create(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); void create(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);
@ -109,7 +101,7 @@ public:
void copyTo(OutputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false) const; void copyTo(OutputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false) const;
//! create copy of current buffer //! create copy of current buffer
GlBuffer clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const; Buffer clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const;
//! bind buffer for specified target //! bind buffer for specified target
void bind(Target target) const; void bind(Target target) const;
@ -147,10 +139,8 @@ private:
int type_; int type_;
}; };
template <> CV_EXPORTS void Ptr<GlBuffer::Impl>::delete_obj();
//! Smart pointer for OpenGL 2D texture memory with reference counting. //! Smart pointer for OpenGL 2D texture memory with reference counting.
class CV_EXPORTS GlTexture2D class CV_EXPORTS Texture2D
{ {
public: public:
enum Format enum Format
@ -162,18 +152,18 @@ public:
}; };
//! create empty texture //! create empty texture
GlTexture2D(); Texture2D();
//! create texture from existed texture id //! create texture from existed texture id
GlTexture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false); Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false);
GlTexture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false); Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false);
//! create texture //! create texture
GlTexture2D(int arows, int acols, Format aformat, bool autoRelease = false); Texture2D(int arows, int acols, Format aformat, bool autoRelease = false);
GlTexture2D(Size asize, Format aformat, bool autoRelease = false); Texture2D(Size asize, Format aformat, bool autoRelease = false);
//! copy from host/device memory //! copy from host/device memory
explicit GlTexture2D(InputArray arr, bool autoRelease = false); explicit Texture2D(InputArray arr, bool autoRelease = false);
//! create texture //! create texture
void create(int arows, int acols, Format aformat, bool autoRelease = false); void create(int arows, int acols, Format aformat, bool autoRelease = false);
@ -212,13 +202,11 @@ private:
Format format_; Format format_;
}; };
template <> CV_EXPORTS void Ptr<GlTexture2D::Impl>::delete_obj();
//! OpenGL Arrays //! OpenGL Arrays
class CV_EXPORTS GlArrays class CV_EXPORTS Arrays
{ {
public: public:
GlArrays(); Arrays();
void setVertexArray(InputArray vertex); void setVertexArray(InputArray vertex);
void resetVertexArray(); void resetVertexArray();
@ -243,46 +231,53 @@ public:
private: private:
int size_; int size_;
GlBuffer vertex_; Buffer vertex_;
GlBuffer color_; Buffer color_;
GlBuffer normal_; Buffer normal_;
GlBuffer texCoord_; Buffer texCoord_;
}; };
/////////////////// Render Functions /////////////////// /////////////////// Render Functions ///////////////////
//! render texture rectangle in window //! render texture rectangle in window
CV_EXPORTS void render(const GlTexture2D& tex, CV_EXPORTS void render(const Texture2D& tex,
Rect_<double> wndRect = Rect_<double>(0.0, 0.0, 1.0, 1.0), Rect_<double> wndRect = Rect_<double>(0.0, 0.0, 1.0, 1.0),
Rect_<double> texRect = Rect_<double>(0.0, 0.0, 1.0, 1.0)); Rect_<double> texRect = Rect_<double>(0.0, 0.0, 1.0, 1.0));
//! render mode //! render mode
namespace RenderMode { enum {
enum { POINTS = 0x0000,
POINTS = 0x0000, LINES = 0x0001,
LINES = 0x0001, LINE_LOOP = 0x0002,
LINE_LOOP = 0x0002, LINE_STRIP = 0x0003,
LINE_STRIP = 0x0003, TRIANGLES = 0x0004,
TRIANGLES = 0x0004, TRIANGLE_STRIP = 0x0005,
TRIANGLE_STRIP = 0x0005, TRIANGLE_FAN = 0x0006,
TRIANGLE_FAN = 0x0006, QUADS = 0x0007,
QUADS = 0x0007, QUAD_STRIP = 0x0008,
QUAD_STRIP = 0x0008, POLYGON = 0x0009
POLYGON = 0x0009 };
};
}
//! render OpenGL arrays //! render OpenGL arrays
CV_EXPORTS void render(const GlArrays& arr, int mode = RenderMode::POINTS, Scalar color = Scalar::all(255)); CV_EXPORTS void render(const Arrays& arr, int mode = POINTS, Scalar color = Scalar::all(255));
CV_EXPORTS void render(const GlArrays& arr, InputArray indices, int mode = RenderMode::POINTS, Scalar color = Scalar::all(255)); CV_EXPORTS void render(const Arrays& arr, InputArray indices, int mode = POINTS, Scalar color = Scalar::all(255));
}} // namespace cv::gl
namespace cv { namespace gpu {
//! set a CUDA device to use OpenGL interoperability
CV_EXPORTS void setGlDevice(int device = 0);
}}
namespace cv {
template <> CV_EXPORTS void Ptr<cv::ogl::Buffer::Impl>::delete_obj();
template <> CV_EXPORTS void Ptr<cv::ogl::Texture2D::Impl>::delete_obj();
namespace gpu {
//! set a CUDA device to use OpenGL interoperability
CV_EXPORTS void setGlDevice(int device = 0);
} }
} // namespace cv
#endif // __cplusplus #endif // __cplusplus
#endif // __OPENCV_OPENGL_INTEROP_HPP__ #endif // __OPENCV_OPENGL_INTEROP_HPP__

View File

@ -60,8 +60,111 @@
#endif #endif
#endif #endif
using namespace cv;
using namespace cv::gpu;
#ifndef HAVE_CUDA
#define throw_nogpu CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support")
#else // HAVE_CUDA
namespace
{
#if defined(__GNUC__)
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__, __func__)
#else /* defined(__CUDACC__) || defined(__MSVC__) */
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__)
#endif
inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "")
{
if (cudaSuccess != err)
cv::gpu::error(cudaGetErrorString(err), file, line, func);
}
inline void ___nppSafeCall(int err, const char *file, const int line, const char *func = "")
{
if (err < 0)
{
std::ostringstream msg;
msg << "NPP API Call Error: " << err;
cv::gpu::error(msg.str().c_str(), file, line, func);
}
}
}
#endif // HAVE_CUDA
//////////////////////////////// Initialization & Info //////////////////////// //////////////////////////////// Initialization & Info ////////////////////////
#ifndef HAVE_CUDA
int cv::gpu::getCudaEnabledDeviceCount() { return 0; }
void cv::gpu::setDevice(int) { throw_nogpu; }
int cv::gpu::getDevice() { throw_nogpu; return 0; }
void cv::gpu::resetDevice() { throw_nogpu; }
bool cv::gpu::deviceSupports(FeatureSet) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::builtWith(FeatureSet) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::has(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasPtx(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasBin(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasEqualOrGreater(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int, int) { throw_nogpu; return false; }
bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int, int) { throw_nogpu; return false; }
size_t cv::gpu::DeviceInfo::sharedMemPerBlock() const { throw_nogpu; return 0; }
void cv::gpu::DeviceInfo::queryMemory(size_t&, size_t&) const { throw_nogpu; }
size_t cv::gpu::DeviceInfo::freeMemory() const { throw_nogpu; return 0; }
size_t cv::gpu::DeviceInfo::totalMemory() const { throw_nogpu; return 0; }
bool cv::gpu::DeviceInfo::supports(FeatureSet) const { throw_nogpu; return false; }
bool cv::gpu::DeviceInfo::isCompatible() const { throw_nogpu; return false; }
void cv::gpu::DeviceInfo::query() { throw_nogpu; }
void cv::gpu::printCudaDeviceInfo(int) { throw_nogpu; }
void cv::gpu::printShortCudaDeviceInfo(int) { throw_nogpu; }
#else // HAVE_CUDA
int cv::gpu::getCudaEnabledDeviceCount()
{
int count;
cudaError_t error = cudaGetDeviceCount( &count );
if (error == cudaErrorInsufficientDriver)
return -1;
if (error == cudaErrorNoDevice)
return 0;
cudaSafeCall( error );
return count;
}
void cv::gpu::setDevice(int device)
{
cudaSafeCall( cudaSetDevice( device ) );
}
int cv::gpu::getDevice()
{
int device;
cudaSafeCall( cudaGetDevice( &device ) );
return device;
}
void cv::gpu::resetDevice()
{
cudaSafeCall( cudaDeviceReset() );
}
namespace namespace
{ {
class CudaArch class CudaArch
@ -69,7 +172,7 @@ namespace
public: public:
CudaArch(); CudaArch();
bool builtWith(cv::gpu::FeatureSet feature_set) const; bool builtWith(FeatureSet feature_set) const;
bool hasPtx(int major, int minor) const; bool hasPtx(int major, int minor) const;
bool hasBin(int major, int minor) const; bool hasBin(int major, int minor) const;
bool hasEqualOrLessPtx(int major, int minor) const; bool hasEqualOrLessPtx(int major, int minor) const;
@ -88,14 +191,12 @@ namespace
CudaArch::CudaArch() CudaArch::CudaArch()
{ {
#ifdef HAVE_CUDA
fromStr(CUDA_ARCH_BIN, bin); fromStr(CUDA_ARCH_BIN, bin);
fromStr(CUDA_ARCH_PTX, ptx); fromStr(CUDA_ARCH_PTX, ptx);
fromStr(CUDA_ARCH_FEATURES, features); fromStr(CUDA_ARCH_FEATURES, features);
#endif
} }
bool CudaArch::builtWith(cv::gpu::FeatureSet feature_set) const bool CudaArch::builtWith(FeatureSet feature_set) const
{ {
return !features.empty() && (features.back() >= feature_set); return !features.empty() && (features.back() >= feature_set);
} }
@ -145,12 +246,7 @@ namespace
bool cv::gpu::TargetArchs::builtWith(cv::gpu::FeatureSet feature_set) bool cv::gpu::TargetArchs::builtWith(cv::gpu::FeatureSet feature_set)
{ {
#if defined (HAVE_CUDA)
return cudaArch.builtWith(feature_set); return cudaArch.builtWith(feature_set);
#else
(void)feature_set;
return false;
#endif
} }
bool cv::gpu::TargetArchs::has(int major, int minor) bool cv::gpu::TargetArchs::has(int major, int minor)
@ -160,35 +256,17 @@ bool cv::gpu::TargetArchs::has(int major, int minor)
bool cv::gpu::TargetArchs::hasPtx(int major, int minor) bool cv::gpu::TargetArchs::hasPtx(int major, int minor)
{ {
#if defined (HAVE_CUDA)
return cudaArch.hasPtx(major, minor); return cudaArch.hasPtx(major, minor);
#else
(void)major;
(void)minor;
return false;
#endif
} }
bool cv::gpu::TargetArchs::hasBin(int major, int minor) bool cv::gpu::TargetArchs::hasBin(int major, int minor)
{ {
#if defined (HAVE_CUDA)
return cudaArch.hasBin(major, minor); return cudaArch.hasBin(major, minor);
#else
(void)major;
(void)minor;
return false;
#endif
} }
bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor) bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor)
{ {
#if defined (HAVE_CUDA)
return cudaArch.hasEqualOrLessPtx(major, minor); return cudaArch.hasEqualOrLessPtx(major, minor);
#else
(void)major;
(void)minor;
return false;
#endif
} }
bool cv::gpu::TargetArchs::hasEqualOrGreater(int major, int minor) bool cv::gpu::TargetArchs::hasEqualOrGreater(int major, int minor)
@ -198,24 +276,12 @@ bool cv::gpu::TargetArchs::hasEqualOrGreater(int major, int minor)
bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor) bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
{ {
#if defined (HAVE_CUDA)
return cudaArch.hasEqualOrGreaterPtx(major, minor); return cudaArch.hasEqualOrGreaterPtx(major, minor);
#else
(void)major;
(void)minor;
return false;
#endif
} }
bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int major, int minor) bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
{ {
#if defined (HAVE_CUDA)
return cudaArch.hasEqualOrGreaterBin(major, minor); return cudaArch.hasEqualOrGreaterBin(major, minor);
#else
(void)major;
(void)minor;
return false;
#endif
} }
bool cv::gpu::deviceSupports(FeatureSet feature_set) bool cv::gpu::deviceSupports(FeatureSet feature_set)
@ -243,108 +309,84 @@ bool cv::gpu::deviceSupports(FeatureSet feature_set)
return TargetArchs::builtWith(feature_set) && (version >= feature_set); return TargetArchs::builtWith(feature_set) && (version >= feature_set);
} }
#if !defined (HAVE_CUDA)
#define throw_nogpu CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support")
int cv::gpu::getCudaEnabledDeviceCount() { return 0; }
void cv::gpu::setDevice(int) { throw_nogpu; }
int cv::gpu::getDevice() { throw_nogpu; return 0; }
void cv::gpu::resetDevice() { throw_nogpu; }
size_t cv::gpu::DeviceInfo::freeMemory() const { throw_nogpu; return 0; }
size_t cv::gpu::DeviceInfo::totalMemory() const { throw_nogpu; return 0; }
bool cv::gpu::DeviceInfo::supports(cv::gpu::FeatureSet) const { throw_nogpu; return false; }
bool cv::gpu::DeviceInfo::isCompatible() const { throw_nogpu; return false; }
void cv::gpu::DeviceInfo::query() { throw_nogpu; }
void cv::gpu::DeviceInfo::queryMemory(size_t&, size_t&) const { throw_nogpu; }
void cv::gpu::printCudaDeviceInfo(int) { throw_nogpu; }
void cv::gpu::printShortCudaDeviceInfo(int) { throw_nogpu; }
#undef throw_nogpu
#else // HAVE_CUDA
namespace namespace
{ {
#if defined(__GNUC__) class DeviceProps
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__, __func__)
#else /* defined(__CUDACC__) || defined(__MSVC__) */
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
#define nppSafeCall(expr) ___nppSafeCall(expr, __FILE__, __LINE__)
#endif
inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "")
{ {
if (cudaSuccess != err) public:
cv::gpu::error(cudaGetErrorString(err), file, line, func); DeviceProps();
~DeviceProps();
cudaDeviceProp* get(int devID);
private:
std::vector<cudaDeviceProp*> props_;
};
DeviceProps::DeviceProps()
{
props_.resize(10, 0);
} }
inline void ___nppSafeCall(int err, const char *file, const int line, const char *func = "") DeviceProps::~DeviceProps()
{ {
if (err < 0) for (size_t i = 0; i < props_.size(); ++i)
{ {
std::ostringstream msg; if (props_[i])
msg << "NPP API Call Error: " << err; delete props_[i];
cv::gpu::error(msg.str().c_str(), file, line, func);
} }
props_.clear();
} }
cudaDeviceProp* DeviceProps::get(int devID)
{
if (devID >= (int) props_.size())
props_.resize(devID + 5, 0);
if (!props_[devID])
{
props_[devID] = new cudaDeviceProp;
cudaSafeCall( cudaGetDeviceProperties(props_[devID], devID) );
}
return props_[devID];
}
DeviceProps deviceProps;
} }
int cv::gpu::getCudaEnabledDeviceCount() size_t cv::gpu::DeviceInfo::sharedMemPerBlock() const
{ {
int count; return deviceProps.get(device_id_)->sharedMemPerBlock;
cudaError_t error = cudaGetDeviceCount( &count );
if (error == cudaErrorInsufficientDriver)
return -1;
if (error == cudaErrorNoDevice)
return 0;
cudaSafeCall(error);
return count;
} }
void cv::gpu::setDevice(int device) void cv::gpu::DeviceInfo::queryMemory(size_t& _totalMemory, size_t& _freeMemory) const
{ {
cudaSafeCall( cudaSetDevice( device ) ); int prevDeviceID = getDevice();
} if (prevDeviceID != device_id_)
setDevice(device_id_);
int cv::gpu::getDevice() cudaSafeCall( cudaMemGetInfo(&_freeMemory, &_totalMemory) );
{
int device;
cudaSafeCall( cudaGetDevice( &device ) );
return device;
}
void cv::gpu::resetDevice() if (prevDeviceID != device_id_)
{ setDevice(prevDeviceID);
cudaSafeCall( cudaDeviceReset() );
} }
size_t cv::gpu::DeviceInfo::freeMemory() const size_t cv::gpu::DeviceInfo::freeMemory() const
{ {
size_t free_memory, total_memory; size_t _totalMemory, _freeMemory;
queryMemory(free_memory, total_memory); queryMemory(_totalMemory, _freeMemory);
return free_memory; return _freeMemory;
} }
size_t cv::gpu::DeviceInfo::totalMemory() const size_t cv::gpu::DeviceInfo::totalMemory() const
{ {
size_t free_memory, total_memory; size_t _totalMemory, _freeMemory;
queryMemory(free_memory, total_memory); queryMemory(_totalMemory, _freeMemory);
return total_memory; return _totalMemory;
} }
bool cv::gpu::DeviceInfo::supports(cv::gpu::FeatureSet feature_set) const bool cv::gpu::DeviceInfo::supports(FeatureSet feature_set) const
{ {
int version = majorVersion() * 10 + minorVersion(); int version = majorVersion() * 10 + minorVersion();
return version >= feature_set; return version >= feature_set;
@ -366,27 +408,12 @@ bool cv::gpu::DeviceInfo::isCompatible() const
void cv::gpu::DeviceInfo::query() void cv::gpu::DeviceInfo::query()
{ {
cudaDeviceProp prop; const cudaDeviceProp* prop = deviceProps.get(device_id_);
cudaSafeCall(cudaGetDeviceProperties(&prop, device_id_));
name_ = prop.name;
multi_processor_count_ = prop.multiProcessorCount;
majorVersion_ = prop.major;
minorVersion_ = prop.minor;
sharedMemPerBlock_ = prop.sharedMemPerBlock;
}
size_t cv::gpu::DeviceInfo::sharedMemPerBlock() const {return sharedMemPerBlock_;} name_ = prop->name;
multi_processor_count_ = prop->multiProcessorCount;
void cv::gpu::DeviceInfo::queryMemory(size_t& free_memory, size_t& total_memory) const majorVersion_ = prop->major;
{ minorVersion_ = prop->minor;
int prev_device_id = getDevice();
if (prev_device_id != device_id_)
setDevice(device_id_);
cudaSafeCall(cudaMemGetInfo(&free_memory, &total_memory));
if (prev_device_id != device_id_)
setDevice(prev_device_id);
} }
namespace namespace
@ -642,7 +669,7 @@ cv::gpu::GpuMat::GpuMat(const Mat& m) :
upload(m); upload(m);
} }
cv::gpu::GpuMat& cv::gpu::GpuMat::operator = (const cv::gpu::GpuMat& m) GpuMat& cv::gpu::GpuMat::operator = (const GpuMat& m)
{ {
if (this != &m) if (this != &m)
{ {
@ -689,7 +716,7 @@ void cv::gpu::GpuMat::locateROI(Size& wholeSize, Point& ofs) const
wholeSize.width = std::max(static_cast<int>((delta2 - step * (wholeSize.height - 1)) / esz), ofs.x + cols); wholeSize.width = std::max(static_cast<int>((delta2 - step * (wholeSize.height - 1)) / esz), ofs.x + cols);
} }
cv::gpu::GpuMat& cv::gpu::GpuMat::adjustROI(int dtop, int dbottom, int dleft, int dright) GpuMat& cv::gpu::GpuMat::adjustROI(int dtop, int dbottom, int dleft, int dright)
{ {
Size wholeSize; Size wholeSize;
Point ofs; Point ofs;
@ -715,7 +742,7 @@ cv::gpu::GpuMat& cv::gpu::GpuMat::adjustROI(int dtop, int dbottom, int dleft, in
return *this; return *this;
} }
cv::gpu::GpuMat cv::gpu::GpuMat::reshape(int new_cn, int new_rows) const GpuMat cv::gpu::GpuMat::reshape(int new_cn, int new_rows) const
{ {
GpuMat hdr = *this; GpuMat hdr = *this;
@ -758,7 +785,7 @@ cv::gpu::GpuMat cv::gpu::GpuMat::reshape(int new_cn, int new_rows) const
return hdr; return hdr;
} }
cv::Mat::Mat(const cv::gpu::GpuMat& m) : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) cv::Mat::Mat(const GpuMat& m) : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows)
{ {
m.download(*this); m.download(*this);
} }
@ -800,7 +827,7 @@ void cv::gpu::ensureSizeIsEnough(int rows, int cols, int type, GpuMat& m)
} }
} }
cv::gpu::GpuMat cv::gpu::allocMatFromBuf(int rows, int cols, int type, GpuMat &mat) GpuMat cv::gpu::allocMatFromBuf(int rows, int cols, int type, GpuMat &mat)
{ {
if (!mat.empty() && mat.type() == type && mat.rows >= rows && mat.cols >= cols) if (!mat.empty() && mat.type() == type && mat.rows >= rows && mat.cols >= cols)
return mat(Rect(0, 0, cols, rows)); return mat(Rect(0, 0, cols, rows));
@ -814,41 +841,41 @@ namespace
public: public:
virtual ~GpuFuncTable() {} virtual ~GpuFuncTable() {}
virtual void copy(const cv::Mat& src, cv::gpu::GpuMat& dst) const = 0; virtual void copy(const Mat& src, GpuMat& dst) const = 0;
virtual void copy(const cv::gpu::GpuMat& src, cv::Mat& dst) const = 0; virtual void copy(const GpuMat& src, Mat& dst) const = 0;
virtual void copy(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) const = 0; virtual void copy(const GpuMat& src, GpuMat& dst) const = 0;
virtual void copyWithMask(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, const cv::gpu::GpuMat& mask) const = 0; virtual void copyWithMask(const GpuMat& src, GpuMat& dst, const GpuMat& mask) const = 0;
virtual void convert(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) const = 0; virtual void convert(const GpuMat& src, GpuMat& dst) const = 0;
virtual void convert(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, double alpha, double beta) const = 0; virtual void convert(const GpuMat& src, GpuMat& dst, double alpha, double beta) const = 0;
virtual void setTo(cv::gpu::GpuMat& m, cv::Scalar s, const cv::gpu::GpuMat& mask) const = 0; virtual void setTo(GpuMat& m, Scalar s, const GpuMat& mask) const = 0;
virtual void mallocPitch(void** devPtr, size_t* step, size_t width, size_t height) const = 0; virtual void mallocPitch(void** devPtr, size_t* step, size_t width, size_t height) const = 0;
virtual void free(void* devPtr) const = 0; virtual void free(void* devPtr) const = 0;
}; };
} }
#if !defined HAVE_CUDA || defined(CUDA_DISABLER_) #ifndef HAVE_CUDA
namespace namespace
{ {
class EmptyFuncTable : public GpuFuncTable class EmptyFuncTable : public GpuFuncTable
{ {
public: public:
void copy(const cv::Mat&, cv::gpu::GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); } void copy(const Mat&, GpuMat&) const { throw_nogpu; }
void copy(const cv::gpu::GpuMat&, cv::Mat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); } void copy(const GpuMat&, Mat&) const { throw_nogpu; }
void copy(const cv::gpu::GpuMat&, cv::gpu::GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); } void copy(const GpuMat&, GpuMat&) const { throw_nogpu; }
void copyWithMask(const cv::gpu::GpuMat&, cv::gpu::GpuMat&, const cv::gpu::GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); } void copyWithMask(const GpuMat&, GpuMat&, const GpuMat&) const { throw_nogpu; }
void convert(const cv::gpu::GpuMat&, cv::gpu::GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); } void convert(const GpuMat&, GpuMat&) const { throw_nogpu; }
void convert(const cv::gpu::GpuMat&, cv::gpu::GpuMat&, double, double) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); } void convert(const GpuMat&, GpuMat&, double, double) const { throw_nogpu; }
void setTo(cv::gpu::GpuMat&, cv::Scalar, const cv::gpu::GpuMat&) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); } void setTo(GpuMat&, Scalar, const GpuMat&) const { throw_nogpu; }
void mallocPitch(void**, size_t*, size_t, size_t) const { CV_Error(CV_GpuNotSupported, "The library is compiled without CUDA support"); } void mallocPitch(void**, size_t*, size_t, size_t) const { throw_nogpu; }
void free(void*) const {} void free(void*) const {}
}; };
@ -876,15 +903,15 @@ namespace cv { namespace gpu { namespace device
namespace namespace
{ {
template <typename T> void kernelSetCaller(cv::gpu::GpuMat& src, cv::Scalar s, cudaStream_t stream) template <typename T> void kernelSetCaller(GpuMat& src, Scalar s, cudaStream_t stream)
{ {
cv::Scalar_<T> sf = s; Scalar_<T> sf = s;
cv::gpu::device::set_to_gpu(src, sf.val, src.channels(), stream); cv::gpu::device::set_to_gpu(src, sf.val, src.channels(), stream);
} }
template <typename T> void kernelSetCaller(cv::gpu::GpuMat& src, cv::Scalar s, const cv::gpu::GpuMat& mask, cudaStream_t stream) template <typename T> void kernelSetCaller(GpuMat& src, Scalar s, const GpuMat& mask, cudaStream_t stream)
{ {
cv::Scalar_<T> sf = s; Scalar_<T> sf = s;
cv::gpu::device::set_to_gpu(src, sf.val, mask, src.channels(), stream); cv::gpu::device::set_to_gpu(src, sf.val, mask, src.channels(), stream);
} }
} }
@ -992,7 +1019,7 @@ namespace
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
typedef typename NPPTypeTraits<DDEPTH>::npp_type dst_t; typedef typename NPPTypeTraits<DDEPTH>::npp_type dst_t;
static void call(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) static void call(const GpuMat& src, GpuMat& dst)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
@ -1007,7 +1034,7 @@ namespace
{ {
typedef typename NPPTypeTraits<DDEPTH>::npp_type dst_t; typedef typename NPPTypeTraits<DDEPTH>::npp_type dst_t;
static void call(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) static void call(const GpuMat& src, GpuMat& dst)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
@ -1047,13 +1074,13 @@ namespace
{ {
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void call(cv::gpu::GpuMat& src, cv::Scalar s) static void call(GpuMat& src, Scalar s)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
sz.height = src.rows; sz.height = src.rows;
cv::Scalar_<src_t> nppS = s; Scalar_<src_t> nppS = s;
nppSafeCall( func(nppS.val, src.ptr<src_t>(), static_cast<int>(src.step), sz) ); nppSafeCall( func(nppS.val, src.ptr<src_t>(), static_cast<int>(src.step), sz) );
@ -1064,13 +1091,13 @@ namespace
{ {
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void call(cv::gpu::GpuMat& src, cv::Scalar s) static void call(GpuMat& src, Scalar s)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
sz.height = src.rows; sz.height = src.rows;
cv::Scalar_<src_t> nppS = s; Scalar_<src_t> nppS = s;
nppSafeCall( func(nppS[0], src.ptr<src_t>(), static_cast<int>(src.step), sz) ); nppSafeCall( func(nppS[0], src.ptr<src_t>(), static_cast<int>(src.step), sz) );
@ -1095,13 +1122,13 @@ namespace
{ {
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void call(cv::gpu::GpuMat& src, cv::Scalar s, const cv::gpu::GpuMat& mask) static void call(GpuMat& src, Scalar s, const GpuMat& mask)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
sz.height = src.rows; sz.height = src.rows;
cv::Scalar_<src_t> nppS = s; Scalar_<src_t> nppS = s;
nppSafeCall( func(nppS.val, src.ptr<src_t>(), static_cast<int>(src.step), sz, mask.ptr<Npp8u>(), static_cast<int>(mask.step)) ); nppSafeCall( func(nppS.val, src.ptr<src_t>(), static_cast<int>(src.step), sz, mask.ptr<Npp8u>(), static_cast<int>(mask.step)) );
@ -1112,13 +1139,13 @@ namespace
{ {
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void call(cv::gpu::GpuMat& src, cv::Scalar s, const cv::gpu::GpuMat& mask) static void call(GpuMat& src, Scalar s, const GpuMat& mask)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
sz.height = src.rows; sz.height = src.rows;
cv::Scalar_<src_t> nppS = s; Scalar_<src_t> nppS = s;
nppSafeCall( func(nppS[0], src.ptr<src_t>(), static_cast<int>(src.step), sz, mask.ptr<Npp8u>(), static_cast<int>(mask.step)) ); nppSafeCall( func(nppS[0], src.ptr<src_t>(), static_cast<int>(src.step), sz, mask.ptr<Npp8u>(), static_cast<int>(mask.step)) );
@ -1140,7 +1167,7 @@ namespace
{ {
typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t; typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
static void call(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, const cv::gpu::GpuMat& mask, cudaStream_t /*stream*/) static void call(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t /*stream*/)
{ {
NppiSize sz; NppiSize sz;
sz.width = src.cols; sz.width = src.cols;
@ -1163,20 +1190,20 @@ namespace
class CudaFuncTable : public GpuFuncTable class CudaFuncTable : public GpuFuncTable
{ {
public: public:
void copy(const cv::Mat& src, cv::gpu::GpuMat& dst) const void copy(const Mat& src, GpuMat& dst) const
{ {
cudaSafeCall( cudaMemcpy2D(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyHostToDevice) ); cudaSafeCall( cudaMemcpy2D(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyHostToDevice) );
} }
void copy(const cv::gpu::GpuMat& src, cv::Mat& dst) const void copy(const GpuMat& src, Mat& dst) const
{ {
cudaSafeCall( cudaMemcpy2D(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyDeviceToHost) ); cudaSafeCall( cudaMemcpy2D(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyDeviceToHost) );
} }
void copy(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) const void copy(const GpuMat& src, GpuMat& dst) const
{ {
cudaSafeCall( cudaMemcpy2D(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyDeviceToDevice) ); cudaSafeCall( cudaMemcpy2D(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyDeviceToDevice) );
} }
void copyWithMask(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, const cv::gpu::GpuMat& mask) const void copyWithMask(const GpuMat& src, GpuMat& dst, const GpuMat& mask) const
{ {
CV_Assert(src.depth() <= CV_64F && src.channels() <= 4); CV_Assert(src.depth() <= CV_64F && src.channels() <= 4);
CV_Assert(src.size() == dst.size() && src.type() == dst.type()); CV_Assert(src.size() == dst.size() && src.type() == dst.type());
@ -1184,11 +1211,11 @@ namespace
if (src.depth() == CV_64F) if (src.depth() == CV_64F)
{ {
if (!cv::gpu::TargetArchs::builtWith(cv::gpu::NATIVE_DOUBLE) || !cv::gpu::DeviceInfo().supports(cv::gpu::NATIVE_DOUBLE)) if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double"); CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
} }
typedef void (*func_t)(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, const cv::gpu::GpuMat& mask, cudaStream_t stream); typedef void (*func_t)(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t stream);
static const func_t funcs[7][4] = static const func_t funcs[7][4] =
{ {
/* 8U */ {NppCopyMasked<CV_8U , nppiCopy_8u_C1MR >::call, cv::gpu::copyWithMask, NppCopyMasked<CV_8U , nppiCopy_8u_C3MR >::call, NppCopyMasked<CV_8U , nppiCopy_8u_C4MR >::call}, /* 8U */ {NppCopyMasked<CV_8U , nppiCopy_8u_C1MR >::call, cv::gpu::copyWithMask, NppCopyMasked<CV_8U , nppiCopy_8u_C3MR >::call, NppCopyMasked<CV_8U , nppiCopy_8u_C4MR >::call},
@ -1205,9 +1232,9 @@ namespace
func(src, dst, mask, 0); func(src, dst, mask, 0);
} }
void convert(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) const void convert(const GpuMat& src, GpuMat& dst) const
{ {
typedef void (*func_t)(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst); typedef void (*func_t)(const GpuMat& src, GpuMat& dst);
static const func_t funcs[7][7][4] = static const func_t funcs[7][7][4] =
{ {
{ {
@ -1281,7 +1308,7 @@ namespace
if (src.depth() == CV_64F || dst.depth() == CV_64F) if (src.depth() == CV_64F || dst.depth() == CV_64F)
{ {
if (!cv::gpu::TargetArchs::builtWith(cv::gpu::NATIVE_DOUBLE) || !cv::gpu::DeviceInfo().supports(cv::gpu::NATIVE_DOUBLE)) if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double"); CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
} }
@ -1298,21 +1325,21 @@ namespace
func(src, dst); func(src, dst);
} }
void convert(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, double alpha, double beta) const void convert(const GpuMat& src, GpuMat& dst, double alpha, double beta) const
{ {
CV_Assert(src.depth() <= CV_64F && src.channels() <= 4); CV_Assert(src.depth() <= CV_64F && src.channels() <= 4);
CV_Assert(dst.depth() <= CV_64F); CV_Assert(dst.depth() <= CV_64F);
if (src.depth() == CV_64F || dst.depth() == CV_64F) if (src.depth() == CV_64F || dst.depth() == CV_64F)
{ {
if (!cv::gpu::TargetArchs::builtWith(cv::gpu::NATIVE_DOUBLE) || !cv::gpu::DeviceInfo().supports(cv::gpu::NATIVE_DOUBLE)) if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double"); CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
} }
cv::gpu::convertTo(src, dst, alpha, beta); cv::gpu::convertTo(src, dst, alpha, beta);
} }
void setTo(cv::gpu::GpuMat& m, cv::Scalar s, const cv::gpu::GpuMat& mask) const void setTo(GpuMat& m, Scalar s, const GpuMat& mask) const
{ {
if (mask.empty()) if (mask.empty())
{ {
@ -1328,13 +1355,13 @@ namespace
if (cn == 1 || (cn == 2 && s[0] == s[1]) || (cn == 3 && s[0] == s[1] && s[0] == s[2]) || (cn == 4 && s[0] == s[1] && s[0] == s[2] && s[0] == s[3])) if (cn == 1 || (cn == 2 && s[0] == s[1]) || (cn == 3 && s[0] == s[1] && s[0] == s[2]) || (cn == 4 && s[0] == s[1] && s[0] == s[2] && s[0] == s[3]))
{ {
int val = cv::saturate_cast<uchar>(s[0]); int val = saturate_cast<uchar>(s[0]);
cudaSafeCall( cudaMemset2D(m.data, m.step, val, m.cols * m.elemSize(), m.rows) ); cudaSafeCall( cudaMemset2D(m.data, m.step, val, m.cols * m.elemSize(), m.rows) );
return; return;
} }
} }
typedef void (*func_t)(cv::gpu::GpuMat& src, cv::Scalar s); typedef void (*func_t)(GpuMat& src, Scalar s);
static const func_t funcs[7][4] = static const func_t funcs[7][4] =
{ {
{NppSet<CV_8U , 1, nppiSet_8u_C1R >::call, cv::gpu::setTo , cv::gpu::setTo , NppSet<CV_8U , 4, nppiSet_8u_C4R >::call}, {NppSet<CV_8U , 1, nppiSet_8u_C1R >::call, cv::gpu::setTo , cv::gpu::setTo , NppSet<CV_8U , 4, nppiSet_8u_C4R >::call},
@ -1350,7 +1377,7 @@ namespace
if (m.depth() == CV_64F) if (m.depth() == CV_64F)
{ {
if (!cv::gpu::TargetArchs::builtWith(cv::gpu::NATIVE_DOUBLE) || !cv::gpu::DeviceInfo().supports(cv::gpu::NATIVE_DOUBLE)) if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double"); CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
} }
@ -1358,7 +1385,7 @@ namespace
} }
else else
{ {
typedef void (*func_t)(cv::gpu::GpuMat& src, cv::Scalar s, const cv::gpu::GpuMat& mask); typedef void (*func_t)(GpuMat& src, Scalar s, const GpuMat& mask);
static const func_t funcs[7][4] = static const func_t funcs[7][4] =
{ {
{NppSetMask<CV_8U , 1, nppiSet_8u_C1MR >::call, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_8U , 4, nppiSet_8u_C4MR >::call}, {NppSetMask<CV_8U , 1, nppiSet_8u_C1MR >::call, cv::gpu::setTo, cv::gpu::setTo, NppSetMask<CV_8U , 4, nppiSet_8u_C4MR >::call},
@ -1374,7 +1401,7 @@ namespace
if (m.depth() == CV_64F) if (m.depth() == CV_64F)
{ {
if (!cv::gpu::TargetArchs::builtWith(cv::gpu::NATIVE_DOUBLE) || !cv::gpu::DeviceInfo().supports(cv::gpu::NATIVE_DOUBLE)) if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double"); CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
} }
@ -1402,7 +1429,7 @@ namespace
#endif // HAVE_CUDA #endif // HAVE_CUDA
void cv::gpu::GpuMat::upload(const cv::Mat& m) void cv::gpu::GpuMat::upload(const Mat& m)
{ {
CV_DbgAssert(!m.empty()); CV_DbgAssert(!m.empty());
@ -1411,7 +1438,7 @@ void cv::gpu::GpuMat::upload(const cv::Mat& m)
gpuFuncTable()->copy(m, *this); gpuFuncTable()->copy(m, *this);
} }
void cv::gpu::GpuMat::download(cv::Mat& m) const void cv::gpu::GpuMat::download(Mat& m) const
{ {
CV_DbgAssert(!empty()); CV_DbgAssert(!empty());
@ -1420,7 +1447,7 @@ void cv::gpu::GpuMat::download(cv::Mat& m) const
gpuFuncTable()->copy(*this, m); gpuFuncTable()->copy(*this, m);
} }
void cv::gpu::GpuMat::copyTo(cv::gpu::GpuMat& m) const void cv::gpu::GpuMat::copyTo(GpuMat& m) const
{ {
CV_DbgAssert(!empty()); CV_DbgAssert(!empty());
@ -1429,7 +1456,7 @@ void cv::gpu::GpuMat::copyTo(cv::gpu::GpuMat& m) const
gpuFuncTable()->copy(*this, m); gpuFuncTable()->copy(*this, m);
} }
void cv::gpu::GpuMat::copyTo(cv::gpu::GpuMat& mat, const cv::gpu::GpuMat& mask) const void cv::gpu::GpuMat::copyTo(GpuMat& mat, const GpuMat& mask) const
{ {
if (mask.empty()) if (mask.empty())
copyTo(mat); copyTo(mat);
@ -1441,7 +1468,7 @@ void cv::gpu::GpuMat::copyTo(cv::gpu::GpuMat& mat, const cv::gpu::GpuMat& mask)
} }
} }
void cv::gpu::GpuMat::convertTo(cv::gpu::GpuMat& dst, int rtype, double alpha, double beta) const void cv::gpu::GpuMat::convertTo(GpuMat& dst, int rtype, double alpha, double beta) const
{ {
bool noScale = fabs(alpha - 1) < std::numeric_limits<double>::epsilon() && fabs(beta) < std::numeric_limits<double>::epsilon(); bool noScale = fabs(alpha - 1) < std::numeric_limits<double>::epsilon() && fabs(beta) < std::numeric_limits<double>::epsilon();
@ -1458,8 +1485,8 @@ void cv::gpu::GpuMat::convertTo(cv::gpu::GpuMat& dst, int rtype, double alpha, d
return; return;
} }
cv::gpu::GpuMat temp; GpuMat temp;
const cv::gpu::GpuMat* psrc = this; const GpuMat* psrc = this;
if (sdepth != ddepth && psrc == &dst) if (sdepth != ddepth && psrc == &dst)
{ {
temp = *this; temp = *this;
@ -1474,7 +1501,7 @@ void cv::gpu::GpuMat::convertTo(cv::gpu::GpuMat& dst, int rtype, double alpha, d
gpuFuncTable()->convert(*psrc, dst, alpha, beta); gpuFuncTable()->convert(*psrc, dst, alpha, beta);
} }
cv::gpu::GpuMat& cv::gpu::GpuMat::setTo(cv::Scalar s, const cv::gpu::GpuMat& mask) GpuMat& cv::gpu::GpuMat::setTo(Scalar s, const GpuMat& mask)
{ {
CV_Assert(mask.empty() || mask.type() == CV_8UC1); CV_Assert(mask.empty() || mask.type() == CV_8UC1);
CV_DbgAssert(!empty()); CV_DbgAssert(!empty());
@ -1498,7 +1525,7 @@ void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
if (_rows > 0 && _cols > 0) if (_rows > 0 && _cols > 0)
{ {
flags = cv::Mat::MAGIC_VAL + _type; flags = Mat::MAGIC_VAL + _type;
rows = _rows; rows = _rows;
cols = _cols; cols = _cols;
@ -1512,7 +1539,7 @@ void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
step = esz * cols; step = esz * cols;
if (esz * cols == step) if (esz * cols == step)
flags |= cv::Mat::CONTINUOUS_FLAG; flags |= Mat::CONTINUOUS_FLAG;
int64 _nettosize = static_cast<int64>(step) * rows; int64 _nettosize = static_cast<int64>(step) * rows;
size_t nettosize = static_cast<size_t>(_nettosize); size_t nettosize = static_cast<size_t>(_nettosize);

View File

@ -934,9 +934,9 @@ _InputArray::_InputArray(const Mat& m) : flags(MAT), obj((void*)&m) {}
_InputArray::_InputArray(const std::vector<Mat>& vec) : flags(STD_VECTOR_MAT), obj((void*)&vec) {} _InputArray::_InputArray(const std::vector<Mat>& vec) : flags(STD_VECTOR_MAT), obj((void*)&vec) {}
_InputArray::_InputArray(const double& val) : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&val), sz(Size(1,1)) {} _InputArray::_InputArray(const double& val) : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&val), sz(Size(1,1)) {}
_InputArray::_InputArray(const MatExpr& expr) : flags(FIXED_TYPE + FIXED_SIZE + EXPR), obj((void*)&expr) {} _InputArray::_InputArray(const MatExpr& expr) : flags(FIXED_TYPE + FIXED_SIZE + EXPR), obj((void*)&expr) {}
_InputArray::_InputArray(const GlBuffer& buf) : flags(OPENGL_BUFFER), obj((void*)&buf) {}
_InputArray::_InputArray(const GlTexture2D &tex) : flags(OPENGL_TEXTURE2D), obj((void*)&tex) {}
_InputArray::_InputArray(const gpu::GpuMat& d_mat) : flags(GPU_MAT), obj((void*)&d_mat) {} _InputArray::_InputArray(const gpu::GpuMat& d_mat) : flags(GPU_MAT), obj((void*)&d_mat) {}
_InputArray::_InputArray(const ogl::Buffer& buf) : flags(OPENGL_BUFFER), obj((void*)&buf) {}
_InputArray::_InputArray(const ogl::Texture2D& tex) : flags(OPENGL_TEXTURE), obj((void*)&tex) {}
Mat _InputArray::getMat(int i) const Mat _InputArray::getMat(int i) const
{ {
@ -1076,40 +1076,34 @@ void _InputArray::getMatVector(std::vector<Mat>& mv) const
} }
} }
GlBuffer _InputArray::getGlBuffer() const
{
int k = kind();
CV_Assert(k == OPENGL_BUFFER);
//if( k == OPENGL_BUFFER )
{
const GlBuffer* buf = (const GlBuffer*)obj;
return *buf;
}
}
GlTexture2D _InputArray::getGlTexture2D() const
{
int k = kind();
CV_Assert(k == OPENGL_TEXTURE2D);
//if( k == OPENGL_TEXTURE )
{
const GlTexture2D* tex = (const GlTexture2D*)obj;
return *tex;
}
}
gpu::GpuMat _InputArray::getGpuMat() const gpu::GpuMat _InputArray::getGpuMat() const
{ {
int k = kind(); int k = kind();
CV_Assert(k == GPU_MAT); CV_Assert(k == GPU_MAT);
//if( k == GPU_MAT )
{ const gpu::GpuMat* d_mat = (const gpu::GpuMat*)obj;
const gpu::GpuMat* d_mat = (const gpu::GpuMat*)obj; return *d_mat;
return *d_mat; }
}
ogl::Buffer _InputArray::getOGlBuffer() const
{
int k = kind();
CV_Assert(k == OPENGL_BUFFER);
const ogl::Buffer* gl_buf = (const ogl::Buffer*)obj;
return *gl_buf;
}
ogl::Texture2D _InputArray::getOGlTexture2D() const
{
int k = kind();
CV_Assert(k == OPENGL_TEXTURE);
const ogl::Texture2D* gl_tex = (const ogl::Texture2D*)obj;
return *gl_tex;
} }
int _InputArray::kind() const int _InputArray::kind() const
@ -1176,14 +1170,14 @@ Size _InputArray::size(int i) const
if( k == OPENGL_BUFFER ) if( k == OPENGL_BUFFER )
{ {
CV_Assert( i < 0 ); CV_Assert( i < 0 );
const GlBuffer* buf = (const GlBuffer*)obj; const ogl::Buffer* buf = (const ogl::Buffer*)obj;
return buf->size(); return buf->size();
} }
if( k == OPENGL_TEXTURE2D ) if( k == OPENGL_TEXTURE )
{ {
CV_Assert( i < 0 ); CV_Assert( i < 0 );
const GlTexture2D* tex = (const GlTexture2D*)obj; const ogl::Texture2D* tex = (const ogl::Texture2D*)obj;
return tex->size(); return tex->size();
} }
@ -1244,7 +1238,7 @@ int _InputArray::type(int i) const
} }
if( k == OPENGL_BUFFER ) if( k == OPENGL_BUFFER )
return ((const GlBuffer*)obj)->type(); return ((const ogl::Buffer*)obj)->type();
CV_Assert( k == GPU_MAT ); CV_Assert( k == GPU_MAT );
//if( k == GPU_MAT ) //if( k == GPU_MAT )
@ -1296,10 +1290,10 @@ bool _InputArray::empty() const
} }
if( k == OPENGL_BUFFER ) if( k == OPENGL_BUFFER )
return ((const GlBuffer*)obj)->empty(); return ((const ogl::Buffer*)obj)->empty();
if( k == OPENGL_TEXTURE2D ) if( k == OPENGL_TEXTURE )
return ((const GlTexture2D*)obj)->empty(); return ((const ogl::Texture2D*)obj)->empty();
CV_Assert( k == GPU_MAT ); CV_Assert( k == GPU_MAT );
//if( k == GPU_MAT ) //if( k == GPU_MAT )
@ -1312,14 +1306,14 @@ _OutputArray::~_OutputArray() {}
_OutputArray::_OutputArray(Mat& m) : _InputArray(m) {} _OutputArray::_OutputArray(Mat& m) : _InputArray(m) {}
_OutputArray::_OutputArray(std::vector<Mat>& vec) : _InputArray(vec) {} _OutputArray::_OutputArray(std::vector<Mat>& vec) : _InputArray(vec) {}
_OutputArray::_OutputArray(gpu::GpuMat& d_mat) : _InputArray(d_mat) {} _OutputArray::_OutputArray(gpu::GpuMat& d_mat) : _InputArray(d_mat) {}
_OutputArray::_OutputArray(GlBuffer& buf) : _InputArray(buf) {} _OutputArray::_OutputArray(ogl::Buffer& buf) : _InputArray(buf) {}
_OutputArray::_OutputArray(GlTexture2D& tex) : _InputArray(tex) {} _OutputArray::_OutputArray(ogl::Texture2D& tex) : _InputArray(tex) {}
_OutputArray::_OutputArray(const Mat& m) : _InputArray(m) {flags |= FIXED_SIZE|FIXED_TYPE;} _OutputArray::_OutputArray(const Mat& m) : _InputArray(m) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const std::vector<Mat>& vec) : _InputArray(vec) {flags |= FIXED_SIZE;} _OutputArray::_OutputArray(const std::vector<Mat>& vec) : _InputArray(vec) {flags |= FIXED_SIZE;}
_OutputArray::_OutputArray(const gpu::GpuMat& d_mat) : _InputArray(d_mat) {flags |= FIXED_SIZE|FIXED_TYPE;} _OutputArray::_OutputArray(const gpu::GpuMat& d_mat) : _InputArray(d_mat) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const GlBuffer& buf) : _InputArray(buf) {flags |= FIXED_SIZE|FIXED_TYPE;} _OutputArray::_OutputArray(const ogl::Buffer& buf) : _InputArray(buf) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const GlTexture2D& tex) : _InputArray(tex) {flags |= FIXED_SIZE|FIXED_TYPE;} _OutputArray::_OutputArray(const ogl::Texture2D& tex) : _InputArray(tex) {flags |= FIXED_SIZE|FIXED_TYPE;}
bool _OutputArray::fixedSize() const bool _OutputArray::fixedSize() const
@ -1351,9 +1345,9 @@ void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int
} }
if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 ) if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{ {
CV_Assert(!fixedSize() || ((GlBuffer*)obj)->size() == _sz); CV_Assert(!fixedSize() || ((ogl::Buffer*)obj)->size() == _sz);
CV_Assert(!fixedType() || ((GlBuffer*)obj)->type() == mtype); CV_Assert(!fixedType() || ((ogl::Buffer*)obj)->type() == mtype);
((GlBuffer*)obj)->create(_sz, mtype); ((ogl::Buffer*)obj)->create(_sz, mtype);
return; return;
} }
int sizes[] = {_sz.height, _sz.width}; int sizes[] = {_sz.height, _sz.width};
@ -1379,9 +1373,9 @@ void _OutputArray::create(int rows, int cols, int mtype, int i, bool allowTransp
} }
if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 ) if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{ {
CV_Assert(!fixedSize() || ((GlBuffer*)obj)->size() == Size(cols, rows)); CV_Assert(!fixedSize() || ((ogl::Buffer*)obj)->size() == Size(cols, rows));
CV_Assert(!fixedType() || ((GlBuffer*)obj)->type() == mtype); CV_Assert(!fixedType() || ((ogl::Buffer*)obj)->type() == mtype);
((GlBuffer*)obj)->create(rows, cols, mtype); ((ogl::Buffer*)obj)->create(rows, cols, mtype);
return; return;
} }
int sizes[] = {rows, cols}; int sizes[] = {rows, cols};
@ -1605,13 +1599,13 @@ void _OutputArray::release() const
if( k == OPENGL_BUFFER ) if( k == OPENGL_BUFFER )
{ {
((GlBuffer*)obj)->release(); ((ogl::Buffer*)obj)->release();
return; return;
} }
if( k == OPENGL_TEXTURE2D ) if( k == OPENGL_TEXTURE )
{ {
((GlTexture2D*)obj)->release(); ((ogl::Texture2D*)obj)->release();
return; return;
} }
@ -1680,18 +1674,18 @@ gpu::GpuMat& _OutputArray::getGpuMatRef() const
return *(gpu::GpuMat*)obj; return *(gpu::GpuMat*)obj;
} }
GlBuffer& _OutputArray::getGlBufferRef() const ogl::Buffer& _OutputArray::getOGlBufferRef() const
{ {
int k = kind(); int k = kind();
CV_Assert( k == OPENGL_BUFFER ); CV_Assert( k == OPENGL_BUFFER );
return *(GlBuffer*)obj; return *(ogl::Buffer*)obj;
} }
GlTexture2D& _OutputArray::getGlTexture2DRef() const ogl::Texture2D& _OutputArray::getOGlTexture2DRef() const
{ {
int k = kind(); int k = kind();
CV_Assert( k == OPENGL_TEXTURE2D ); CV_Assert( k == OPENGL_TEXTURE );
return *(GlTexture2D*)obj; return *(ogl::Texture2D*)obj;
} }
static _OutputArray _none; static _OutputArray _none;

View File

@ -53,6 +53,9 @@
#endif #endif
#endif #endif
using namespace cv;
using namespace cv::gpu;
namespace namespace
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
@ -80,7 +83,7 @@ namespace
#endif #endif
} }
bool cv::checkGlError(const char* file, const int line, const char* func) bool cv::ogl::checkError(const char* file, const int line, const char* func)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) file; (void) file;
@ -307,17 +310,17 @@ namespace
#endif #endif
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
// GlBuffer // ogl::Buffer
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
class cv::GlBuffer::Impl class cv::ogl::Buffer::Impl
{ {
}; };
#else #else
class cv::GlBuffer::Impl class cv::ogl::Buffer::Impl
{ {
public: public:
static const Ptr<Impl>& empty(); static const Ptr<Impl>& empty();
@ -359,21 +362,21 @@ private:
#endif #endif
}; };
const cv::Ptr<cv::GlBuffer::Impl>& cv::GlBuffer::Impl::empty() const Ptr<cv::ogl::Buffer::Impl>& cv::ogl::Buffer::Impl::empty()
{ {
static Ptr<Impl> p(new Impl); static Ptr<Impl> p(new Impl);
return p; return p;
} }
cv::GlBuffer::Impl::Impl() : bufId_(0), autoRelease_(true) cv::ogl::Buffer::Impl::Impl() : bufId_(0), autoRelease_(true)
{ {
} }
cv::GlBuffer::Impl::Impl(GLuint abufId, bool autoRelease) : bufId_(abufId), autoRelease_(autoRelease) cv::ogl::Buffer::Impl::Impl(GLuint abufId, bool autoRelease) : bufId_(abufId), autoRelease_(autoRelease)
{ {
} }
cv::GlBuffer::Impl::Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease) : bufId_(0), autoRelease_(autoRelease) cv::ogl::Buffer::Impl::Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease) : bufId_(0), autoRelease_(autoRelease)
{ {
gl::GenBuffers(1, &bufId_); gl::GenBuffers(1, &bufId_);
CV_CheckGlError(); CV_CheckGlError();
@ -390,19 +393,19 @@ cv::GlBuffer::Impl::Impl(GLsizeiptr size, const GLvoid* data, GLenum target, boo
CV_CheckGlError(); CV_CheckGlError();
} }
cv::GlBuffer::Impl::~Impl() cv::ogl::Buffer::Impl::~Impl()
{ {
if (autoRelease_ && bufId_) if (autoRelease_ && bufId_)
gl::DeleteBuffers(1, &bufId_); gl::DeleteBuffers(1, &bufId_);
} }
void cv::GlBuffer::Impl::bind(GLenum target) const void cv::ogl::Buffer::Impl::bind(GLenum target) const
{ {
gl::BindBuffer(target, bufId_); gl::BindBuffer(target, bufId_);
CV_CheckGlError(); CV_CheckGlError();
} }
void cv::GlBuffer::Impl::copyFrom(GLuint srcBuf, GLsizeiptr size) void cv::ogl::Buffer::Impl::copyFrom(GLuint srcBuf, GLsizeiptr size)
{ {
gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_); gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_);
CV_CheckGlError(); CV_CheckGlError();
@ -414,7 +417,7 @@ void cv::GlBuffer::Impl::copyFrom(GLuint srcBuf, GLsizeiptr size)
CV_CheckGlError(); CV_CheckGlError();
} }
void cv::GlBuffer::Impl::copyFrom(GLsizeiptr size, const GLvoid* data) void cv::ogl::Buffer::Impl::copyFrom(GLsizeiptr size, const GLvoid* data)
{ {
gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_); gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_);
CV_CheckGlError(); CV_CheckGlError();
@ -423,7 +426,7 @@ void cv::GlBuffer::Impl::copyFrom(GLsizeiptr size, const GLvoid* data)
CV_CheckGlError(); CV_CheckGlError();
} }
void cv::GlBuffer::Impl::copyTo(GLsizeiptr size, GLvoid* data) const void cv::ogl::Buffer::Impl::copyTo(GLsizeiptr size, GLvoid* data) const
{ {
gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_); gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_);
CV_CheckGlError(); CV_CheckGlError();
@ -432,7 +435,7 @@ void cv::GlBuffer::Impl::copyTo(GLsizeiptr size, GLvoid* data) const
CV_CheckGlError(); CV_CheckGlError();
} }
void* cv::GlBuffer::Impl::mapHost(GLenum access) void* cv::ogl::Buffer::Impl::mapHost(GLenum access)
{ {
gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_); gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_);
CV_CheckGlError(); CV_CheckGlError();
@ -443,31 +446,31 @@ void* cv::GlBuffer::Impl::mapHost(GLenum access)
return data; return data;
} }
void cv::GlBuffer::Impl::unmapHost() void cv::ogl::Buffer::Impl::unmapHost()
{ {
gl::UnmapBuffer(gl::COPY_READ_BUFFER); gl::UnmapBuffer(gl::COPY_READ_BUFFER);
} }
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
void cv::GlBuffer::Impl::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream) void cv::ogl::Buffer::Impl::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream)
{ {
cudaResource_.registerBuffer(bufId_); cudaResource_.registerBuffer(bufId_);
cudaResource_.copyFrom(src, spitch, width, height, stream); cudaResource_.copyFrom(src, spitch, width, height, stream);
} }
void cv::GlBuffer::Impl::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream) const void cv::ogl::Buffer::Impl::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream) const
{ {
cudaResource_.registerBuffer(bufId_); cudaResource_.registerBuffer(bufId_);
cudaResource_.copyTo(dst, dpitch, width, height, stream); cudaResource_.copyTo(dst, dpitch, width, height, stream);
} }
void* cv::GlBuffer::Impl::mapDevice(cudaStream_t stream) void* cv::ogl::Buffer::Impl::mapDevice(cudaStream_t stream)
{ {
cudaResource_.registerBuffer(bufId_); cudaResource_.registerBuffer(bufId_);
return cudaResource_.map(stream); return cudaResource_.map(stream);
} }
void cv::GlBuffer::Impl::unmapDevice(cudaStream_t stream) void cv::ogl::Buffer::Impl::unmapDevice(cudaStream_t stream)
{ {
cudaResource_.unmap(stream); cudaResource_.unmap(stream);
} }
@ -475,7 +478,7 @@ void cv::GlBuffer::Impl::unmapHost()
#endif // HAVE_OPENGL #endif // HAVE_OPENGL
cv::GlBuffer::GlBuffer() : rows_(0), cols_(0), type_(0) cv::ogl::Buffer::Buffer() : rows_(0), cols_(0), type_(0)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
throw_nogl(); throw_nogl();
@ -484,7 +487,7 @@ cv::GlBuffer::GlBuffer() : rows_(0), cols_(0), type_(0)
#endif #endif
} }
cv::GlBuffer::GlBuffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0) cv::ogl::Buffer::Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arows; (void) arows;
@ -501,7 +504,7 @@ cv::GlBuffer::GlBuffer(int arows, int acols, int atype, unsigned int abufId, boo
#endif #endif
} }
cv::GlBuffer::GlBuffer(Size asize, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0) cv::ogl::Buffer::Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) asize; (void) asize;
@ -517,17 +520,17 @@ cv::GlBuffer::GlBuffer(Size asize, int atype, unsigned int abufId, bool autoRele
#endif #endif
} }
cv::GlBuffer::GlBuffer(int arows, int acols, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0) cv::ogl::Buffer::Buffer(int arows, int acols, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
{ {
create(arows, acols, atype, target, autoRelease); create(arows, acols, atype, target, autoRelease);
} }
cv::GlBuffer::GlBuffer(Size asize, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0) cv::ogl::Buffer::Buffer(Size asize, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
{ {
create(asize, atype, target, autoRelease); create(asize, atype, target, autoRelease);
} }
cv::GlBuffer::GlBuffer(InputArray arr, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0) cv::ogl::Buffer::Buffer(InputArray arr, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arr; (void) arr;
@ -545,7 +548,7 @@ cv::GlBuffer::GlBuffer(InputArray arr, Target target, bool autoRelease) : rows_(
break; break;
} }
case _InputArray::OPENGL_TEXTURE2D: case _InputArray::OPENGL_TEXTURE:
{ {
copyFrom(arr, target, autoRelease); copyFrom(arr, target, autoRelease);
break; break;
@ -572,7 +575,7 @@ cv::GlBuffer::GlBuffer(InputArray arr, Target target, bool autoRelease) : rows_(
#endif #endif
} }
void cv::GlBuffer::create(int arows, int acols, int atype, Target target, bool autoRelease) void cv::ogl::Buffer::create(int arows, int acols, int atype, Target target, bool autoRelease)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arows; (void) arows;
@ -593,7 +596,7 @@ void cv::GlBuffer::create(int arows, int acols, int atype, Target target, bool a
#endif #endif
} }
void cv::GlBuffer::release() void cv::ogl::Buffer::release()
{ {
#ifdef HAVE_OPENGL #ifdef HAVE_OPENGL
if (*impl_.refcount == 1) if (*impl_.refcount == 1)
@ -605,7 +608,7 @@ void cv::GlBuffer::release()
#endif #endif
} }
void cv::GlBuffer::setAutoRelease(bool flag) void cv::ogl::Buffer::setAutoRelease(bool flag)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) flag; (void) flag;
@ -615,7 +618,7 @@ void cv::GlBuffer::setAutoRelease(bool flag)
#endif #endif
} }
void cv::GlBuffer::copyFrom(InputArray arr, Target target, bool autoRelease) void cv::ogl::Buffer::copyFrom(InputArray arr, Target target, bool autoRelease)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arr; (void) arr;
@ -625,9 +628,9 @@ void cv::GlBuffer::copyFrom(InputArray arr, Target target, bool autoRelease)
#else #else
const int kind = arr.kind(); const int kind = arr.kind();
if (kind == _InputArray::OPENGL_TEXTURE2D) if (kind == _InputArray::OPENGL_TEXTURE)
{ {
GlTexture2D tex = arr.getGlTexture2D(); ogl::Texture2D tex = arr.getOGlTexture2D();
tex.copyTo(*this); tex.copyTo(*this);
setAutoRelease(autoRelease); setAutoRelease(autoRelease);
return; return;
@ -641,7 +644,7 @@ void cv::GlBuffer::copyFrom(InputArray arr, Target target, bool autoRelease)
{ {
case _InputArray::OPENGL_BUFFER: case _InputArray::OPENGL_BUFFER:
{ {
GlBuffer buf = arr.getGlBuffer(); ogl::Buffer buf = arr.getOGlBuffer();
impl_->copyFrom(buf.bufId(), asize.area() * CV_ELEM_SIZE(atype)); impl_->copyFrom(buf.bufId(), asize.area() * CV_ELEM_SIZE(atype));
break; break;
} }
@ -668,7 +671,7 @@ void cv::GlBuffer::copyFrom(InputArray arr, Target target, bool autoRelease)
#endif #endif
} }
void cv::GlBuffer::copyTo(OutputArray arr, Target target, bool autoRelease) const void cv::ogl::Buffer::copyTo(OutputArray arr, Target target, bool autoRelease) const
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arr; (void) arr;
@ -682,13 +685,13 @@ void cv::GlBuffer::copyTo(OutputArray arr, Target target, bool autoRelease) cons
{ {
case _InputArray::OPENGL_BUFFER: case _InputArray::OPENGL_BUFFER:
{ {
arr.getGlBufferRef().copyFrom(*this, target, autoRelease); arr.getOGlBufferRef().copyFrom(*this, target, autoRelease);
break; break;
} }
case _InputArray::OPENGL_TEXTURE2D: case _InputArray::OPENGL_TEXTURE:
{ {
arr.getGlTexture2DRef().copyFrom(*this, autoRelease); arr.getOGlTexture2DRef().copyFrom(*this, autoRelease);
break; break;
} }
@ -716,21 +719,21 @@ void cv::GlBuffer::copyTo(OutputArray arr, Target target, bool autoRelease) cons
#endif #endif
} }
cv::GlBuffer cv::GlBuffer::clone(Target target, bool autoRelease) const cv::ogl::Buffer cv::ogl::Buffer::clone(Target target, bool autoRelease) const
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) target; (void) target;
(void) autoRelease; (void) autoRelease;
throw_nogl(); throw_nogl();
return GlBuffer(); return cv::ogl::Buffer();
#else #else
GlBuffer buf; ogl::Buffer buf;
buf.copyFrom(*this, target, autoRelease); buf.copyFrom(*this, target, autoRelease);
return buf; return buf;
#endif #endif
} }
void cv::GlBuffer::bind(Target target) const void cv::ogl::Buffer::bind(Target target) const
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) target; (void) target;
@ -740,7 +743,7 @@ void cv::GlBuffer::bind(Target target) const
#endif #endif
} }
void cv::GlBuffer::unbind(Target target) void cv::ogl::Buffer::unbind(Target target)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) target; (void) target;
@ -751,18 +754,18 @@ void cv::GlBuffer::unbind(Target target)
#endif #endif
} }
cv::Mat cv::GlBuffer::mapHost(Access access) Mat cv::ogl::Buffer::mapHost(Access access)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) access; (void) access;
throw_nogl(); throw_nogl();
return cv::Mat(); return Mat();
#else #else
return cv::Mat(rows_, cols_, type_, impl_->mapHost(access)); return Mat(rows_, cols_, type_, impl_->mapHost(access));
#endif #endif
} }
void cv::GlBuffer::unmapHost() void cv::ogl::Buffer::unmapHost()
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
throw_nogl(); throw_nogl();
@ -771,22 +774,22 @@ void cv::GlBuffer::unmapHost()
#endif #endif
} }
cv::gpu::GpuMat cv::GlBuffer::mapDevice() GpuMat cv::ogl::Buffer::mapDevice()
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
throw_nogl(); throw_nogl();
return cv::gpu::GpuMat(); return GpuMat();
#else #else
#if !defined HAVE_CUDA || defined(CUDA_DISABLER) #if !defined HAVE_CUDA || defined(CUDA_DISABLER)
throw_nocuda(); throw_nocuda();
return cv::gpu::GpuMat(); return GpuMat();
#else #else
return cv::gpu::GpuMat(rows_, cols_, type_, impl_->mapDevice()); return GpuMat(rows_, cols_, type_, impl_->mapDevice());
#endif #endif
#endif #endif
} }
void cv::GlBuffer::unmapDevice() void cv::ogl::Buffer::unmapDevice()
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
throw_nogl(); throw_nogl();
@ -799,7 +802,7 @@ void cv::GlBuffer::unmapDevice()
#endif #endif
} }
unsigned int cv::GlBuffer::bufId() const unsigned int cv::ogl::Buffer::bufId() const
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
throw_nogl(); throw_nogl();
@ -809,23 +812,23 @@ unsigned int cv::GlBuffer::bufId() const
#endif #endif
} }
template <> void cv::Ptr<cv::GlBuffer::Impl>::delete_obj() template <> void cv::Ptr<cv::ogl::Buffer::Impl>::delete_obj()
{ {
if (obj) delete obj; if (obj) delete obj;
} }
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
// GlTexture2D // ogl::Texture
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
class cv::GlTexture2D::Impl class cv::ogl::Texture2D::Impl
{ {
}; };
#else #else
class cv::GlTexture2D::Impl class cv::ogl::Texture2D::Impl
{ {
public: public:
static const Ptr<Impl> empty(); static const Ptr<Impl> empty();
@ -850,21 +853,21 @@ private:
bool autoRelease_; bool autoRelease_;
}; };
const cv::Ptr<cv::GlTexture2D::Impl> cv::GlTexture2D::Impl::empty() const Ptr<cv::ogl::Texture2D::Impl> cv::ogl::Texture2D::Impl::empty()
{ {
static Ptr<Impl> p(new Impl); static Ptr<Impl> p(new Impl);
return p; return p;
} }
cv::GlTexture2D::Impl::Impl() : texId_(0), autoRelease_(true) cv::ogl::Texture2D::Impl::Impl() : texId_(0), autoRelease_(true)
{ {
} }
cv::GlTexture2D::Impl::Impl(GLuint atexId, bool autoRelease) : texId_(atexId), autoRelease_(autoRelease) cv::ogl::Texture2D::Impl::Impl(GLuint atexId, bool autoRelease) : texId_(atexId), autoRelease_(autoRelease)
{ {
} }
cv::GlTexture2D::Impl::Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease) : texId_(0), autoRelease_(autoRelease) cv::ogl::Texture2D::Impl::Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease) : texId_(0), autoRelease_(autoRelease)
{ {
gl::GenTextures(1, &texId_); gl::GenTextures(1, &texId_);
CV_CheckGlError(); CV_CheckGlError();
@ -884,13 +887,13 @@ cv::GlTexture2D::Impl::Impl(GLint internalFormat, GLsizei width, GLsizei height,
CV_CheckGlError(); CV_CheckGlError();
} }
cv::GlTexture2D::Impl::~Impl() cv::ogl::Texture2D::Impl::~Impl()
{ {
if (autoRelease_ && texId_) if (autoRelease_ && texId_)
gl::DeleteTextures(1, &texId_); gl::DeleteTextures(1, &texId_);
} }
void cv::GlTexture2D::Impl::copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels) void cv::ogl::Texture2D::Impl::copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels)
{ {
gl::BindTexture(gl::TEXTURE_2D, texId_); gl::BindTexture(gl::TEXTURE_2D, texId_);
CV_CheckGlError(); CV_CheckGlError();
@ -905,7 +908,7 @@ void cv::GlTexture2D::Impl::copyFrom(GLsizei width, GLsizei height, GLenum forma
CV_CheckGlError(); CV_CheckGlError();
} }
void cv::GlTexture2D::Impl::copyTo(GLenum format, GLenum type, GLvoid* pixels) const void cv::ogl::Texture2D::Impl::copyTo(GLenum format, GLenum type, GLvoid* pixels) const
{ {
gl::BindTexture(gl::TEXTURE_2D, texId_); gl::BindTexture(gl::TEXTURE_2D, texId_);
CV_CheckGlError(); CV_CheckGlError();
@ -917,7 +920,7 @@ void cv::GlTexture2D::Impl::copyTo(GLenum format, GLenum type, GLvoid* pixels) c
CV_CheckGlError(); CV_CheckGlError();
} }
void cv::GlTexture2D::Impl::bind() const void cv::ogl::Texture2D::Impl::bind() const
{ {
gl::BindTexture(gl::TEXTURE_2D, texId_); gl::BindTexture(gl::TEXTURE_2D, texId_);
CV_CheckGlError(); CV_CheckGlError();
@ -925,7 +928,7 @@ void cv::GlTexture2D::Impl::bind() const
#endif // HAVE_OPENGL #endif // HAVE_OPENGL
cv::GlTexture2D::GlTexture2D() : rows_(0), cols_(0), format_(NONE) cv::ogl::Texture2D::Texture2D() : rows_(0), cols_(0), format_(NONE)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
throw_nogl(); throw_nogl();
@ -934,7 +937,7 @@ cv::GlTexture2D::GlTexture2D() : rows_(0), cols_(0), format_(NONE)
#endif #endif
} }
cv::GlTexture2D::GlTexture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE) cv::ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arows; (void) arows;
@ -951,7 +954,7 @@ cv::GlTexture2D::GlTexture2D(int arows, int acols, Format aformat, unsigned int
#endif #endif
} }
cv::GlTexture2D::GlTexture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE) cv::ogl::Texture2D::Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) asize; (void) asize;
@ -967,17 +970,17 @@ cv::GlTexture2D::GlTexture2D(Size asize, Format aformat, unsigned int atexId, bo
#endif #endif
} }
cv::GlTexture2D::GlTexture2D(int arows, int acols, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE) cv::ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{ {
create(arows, acols, aformat, autoRelease); create(arows, acols, aformat, autoRelease);
} }
cv::GlTexture2D::GlTexture2D(Size asize, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE) cv::ogl::Texture2D::Texture2D(Size asize, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{ {
create(asize, aformat, autoRelease); create(asize, aformat, autoRelease);
} }
cv::GlTexture2D::GlTexture2D(InputArray arr, bool autoRelease) : rows_(0), cols_(0), format_(NONE) cv::ogl::Texture2D::Texture2D(InputArray arr, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arr; (void) arr;
@ -1008,10 +1011,10 @@ cv::GlTexture2D::GlTexture2D(InputArray arr, bool autoRelease) : rows_(0), cols_
{ {
case _InputArray::OPENGL_BUFFER: case _InputArray::OPENGL_BUFFER:
{ {
GlBuffer buf = arr.getGlBuffer(); ogl::Buffer buf = arr.getOGlBuffer();
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER); buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease); impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
break; break;
} }
@ -1021,10 +1024,10 @@ cv::GlTexture2D::GlTexture2D(InputArray arr, bool autoRelease) : rows_(0), cols_
throw_nocuda(); throw_nocuda();
#else #else
GpuMat dmat = arr.getGpuMat(); GpuMat dmat = arr.getGpuMat();
GlBuffer buf(dmat, GlBuffer::PIXEL_UNPACK_BUFFER); ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER);
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER); buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease); impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
#endif #endif
break; break;
@ -1034,7 +1037,7 @@ cv::GlTexture2D::GlTexture2D(InputArray arr, bool autoRelease) : rows_(0), cols_
{ {
Mat mat = arr.getMat(); Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() ); CV_Assert( mat.isContinuous() );
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data, autoRelease); impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data, autoRelease);
break; break;
} }
@ -1046,7 +1049,7 @@ cv::GlTexture2D::GlTexture2D(InputArray arr, bool autoRelease) : rows_(0), cols_
#endif #endif
} }
void cv::GlTexture2D::create(int arows, int acols, Format aformat, bool autoRelease) void cv::ogl::Texture2D::create(int arows, int acols, Format aformat, bool autoRelease)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arows; (void) arows;
@ -1057,7 +1060,7 @@ void cv::GlTexture2D::create(int arows, int acols, Format aformat, bool autoRele
#else #else
if (rows_ != arows || cols_ != acols || format_ != aformat) if (rows_ != arows || cols_ != acols || format_ != aformat)
{ {
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(aformat, acols, arows, aformat, gl::FLOAT, 0, autoRelease); impl_ = new Impl(aformat, acols, arows, aformat, gl::FLOAT, 0, autoRelease);
rows_ = arows; rows_ = arows;
cols_ = acols; cols_ = acols;
@ -1066,7 +1069,7 @@ void cv::GlTexture2D::create(int arows, int acols, Format aformat, bool autoRele
#endif #endif
} }
void cv::GlTexture2D::release() void cv::ogl::Texture2D::release()
{ {
#ifdef HAVE_OPENGL #ifdef HAVE_OPENGL
if (*impl_.refcount == 1) if (*impl_.refcount == 1)
@ -1078,7 +1081,7 @@ void cv::GlTexture2D::release()
#endif #endif
} }
void cv::GlTexture2D::setAutoRelease(bool flag) void cv::ogl::Texture2D::setAutoRelease(bool flag)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) flag; (void) flag;
@ -1088,7 +1091,7 @@ void cv::GlTexture2D::setAutoRelease(bool flag)
#endif #endif
} }
void cv::GlTexture2D::copyFrom(InputArray arr, bool autoRelease) void cv::ogl::Texture2D::copyFrom(InputArray arr, bool autoRelease)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arr; (void) arr;
@ -1121,10 +1124,10 @@ void cv::GlTexture2D::copyFrom(InputArray arr, bool autoRelease)
{ {
case _InputArray::OPENGL_BUFFER: case _InputArray::OPENGL_BUFFER:
{ {
GlBuffer buf = arr.getGlBuffer(); ogl::Buffer buf = arr.getOGlBuffer();
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER); buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0); impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
break; break;
} }
@ -1134,10 +1137,10 @@ void cv::GlTexture2D::copyFrom(InputArray arr, bool autoRelease)
throw_nocuda(); throw_nocuda();
#else #else
GpuMat dmat = arr.getGpuMat(); GpuMat dmat = arr.getGpuMat();
GlBuffer buf(dmat, GlBuffer::PIXEL_UNPACK_BUFFER); ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER);
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER); buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0); impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
#endif #endif
break; break;
@ -1147,14 +1150,14 @@ void cv::GlTexture2D::copyFrom(InputArray arr, bool autoRelease)
{ {
Mat mat = arr.getMat(); Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() ); CV_Assert( mat.isContinuous() );
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data); impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data);
} }
} }
#endif #endif
} }
void cv::GlTexture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) const void cv::ogl::Texture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) const
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arr; (void) arr;
@ -1171,11 +1174,11 @@ void cv::GlTexture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) cons
{ {
case _InputArray::OPENGL_BUFFER: case _InputArray::OPENGL_BUFFER:
{ {
GlBuffer& buf = arr.getGlBufferRef(); ogl::Buffer& buf = arr.getOGlBufferRef();
buf.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), GlBuffer::PIXEL_PACK_BUFFER, autoRelease); buf.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER, autoRelease);
buf.bind(GlBuffer::PIXEL_PACK_BUFFER); buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER);
impl_->copyTo(dstFormat, gl_types[ddepth], 0); impl_->copyTo(dstFormat, gl_types[ddepth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_PACK_BUFFER); ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
break; break;
} }
@ -1184,10 +1187,10 @@ void cv::GlTexture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) cons
#if !defined HAVE_CUDA || defined(CUDA_DISABLER) #if !defined HAVE_CUDA || defined(CUDA_DISABLER)
throw_nocuda(); throw_nocuda();
#else #else
GlBuffer buf(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), GlBuffer::PIXEL_PACK_BUFFER); ogl::Buffer buf(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER);
buf.bind(GlBuffer::PIXEL_PACK_BUFFER); buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER);
impl_->copyTo(dstFormat, gl_types[ddepth], 0); impl_->copyTo(dstFormat, gl_types[ddepth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_PACK_BUFFER); ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
buf.copyTo(arr); buf.copyTo(arr);
#endif #endif
@ -1199,14 +1202,14 @@ void cv::GlTexture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) cons
arr.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn)); arr.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn));
Mat mat = arr.getMat(); Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() ); CV_Assert( mat.isContinuous() );
GlBuffer::unbind(GlBuffer::PIXEL_PACK_BUFFER); ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
impl_->copyTo(dstFormat, gl_types[ddepth], mat.data); impl_->copyTo(dstFormat, gl_types[ddepth], mat.data);
} }
} }
#endif #endif
} }
void cv::GlTexture2D::bind() const void cv::ogl::Texture2D::bind() const
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
throw_nogl(); throw_nogl();
@ -1215,7 +1218,7 @@ void cv::GlTexture2D::bind() const
#endif #endif
} }
unsigned int cv::GlTexture2D::texId() const unsigned int cv::ogl::Texture2D::texId() const
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
throw_nogl(); throw_nogl();
@ -1225,19 +1228,19 @@ unsigned int cv::GlTexture2D::texId() const
#endif #endif
} }
template <> void cv::Ptr<cv::GlTexture2D::Impl>::delete_obj() template <> void cv::Ptr<cv::ogl::Texture2D::Impl>::delete_obj()
{ {
if (obj) delete obj; if (obj) delete obj;
} }
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
// GlArrays // ogl::Arrays
cv::GlArrays::GlArrays() : size_(0) cv::ogl::Arrays::Arrays() : size_(0)
{ {
} }
void cv::GlArrays::setVertexArray(InputArray vertex) void cv::ogl::Arrays::setVertexArray(InputArray vertex)
{ {
const int cn = vertex.channels(); const int cn = vertex.channels();
const int depth = vertex.depth(); const int depth = vertex.depth();
@ -1246,37 +1249,37 @@ void cv::GlArrays::setVertexArray(InputArray vertex)
CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
if (vertex.kind() == _InputArray::OPENGL_BUFFER) if (vertex.kind() == _InputArray::OPENGL_BUFFER)
vertex_ = vertex.getGlBuffer(); vertex_ = vertex.getOGlBuffer();
else else
vertex_.copyFrom(vertex); vertex_.copyFrom(vertex);
size_ = vertex_.size().area(); size_ = vertex_.size().area();
} }
void cv::GlArrays::resetVertexArray() void cv::ogl::Arrays::resetVertexArray()
{ {
vertex_.release(); vertex_.release();
size_ = 0; size_ = 0;
} }
void cv::GlArrays::setColorArray(InputArray color) void cv::ogl::Arrays::setColorArray(InputArray color)
{ {
const int cn = color.channels(); const int cn = color.channels();
CV_Assert( cn == 3 || cn == 4 ); CV_Assert( cn == 3 || cn == 4 );
if (color.kind() == _InputArray::OPENGL_BUFFER) if (color.kind() == _InputArray::OPENGL_BUFFER)
color_ = color.getGlBuffer(); color_ = color.getOGlBuffer();
else else
color_.copyFrom(color); color_.copyFrom(color);
} }
void cv::GlArrays::resetColorArray() void cv::ogl::Arrays::resetColorArray()
{ {
color_.release(); color_.release();
} }
void cv::GlArrays::setNormalArray(InputArray normal) void cv::ogl::Arrays::setNormalArray(InputArray normal)
{ {
const int cn = normal.channels(); const int cn = normal.channels();
const int depth = normal.depth(); const int depth = normal.depth();
@ -1285,17 +1288,17 @@ void cv::GlArrays::setNormalArray(InputArray normal)
CV_Assert( depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); CV_Assert( depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
if (normal.kind() == _InputArray::OPENGL_BUFFER) if (normal.kind() == _InputArray::OPENGL_BUFFER)
normal_ = normal.getGlBuffer(); normal_ = normal.getOGlBuffer();
else else
normal_.copyFrom(normal); normal_.copyFrom(normal);
} }
void cv::GlArrays::resetNormalArray() void cv::ogl::Arrays::resetNormalArray()
{ {
normal_.release(); normal_.release();
} }
void cv::GlArrays::setTexCoordArray(InputArray texCoord) void cv::ogl::Arrays::setTexCoordArray(InputArray texCoord)
{ {
const int cn = texCoord.channels(); const int cn = texCoord.channels();
const int depth = texCoord.depth(); const int depth = texCoord.depth();
@ -1304,17 +1307,17 @@ void cv::GlArrays::setTexCoordArray(InputArray texCoord)
CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
if (texCoord.kind() == _InputArray::OPENGL_BUFFER) if (texCoord.kind() == _InputArray::OPENGL_BUFFER)
texCoord_ = texCoord.getGlBuffer(); texCoord_ = texCoord.getOGlBuffer();
else else
texCoord_.copyFrom(texCoord); texCoord_.copyFrom(texCoord);
} }
void cv::GlArrays::resetTexCoordArray() void cv::ogl::Arrays::resetTexCoordArray()
{ {
texCoord_.release(); texCoord_.release();
} }
void cv::GlArrays::release() void cv::ogl::Arrays::release()
{ {
resetVertexArray(); resetVertexArray();
resetColorArray(); resetColorArray();
@ -1322,7 +1325,7 @@ void cv::GlArrays::release()
resetTexCoordArray(); resetTexCoordArray();
} }
void cv::GlArrays::setAutoRelease(bool flag) void cv::ogl::Arrays::setAutoRelease(bool flag)
{ {
vertex_.setAutoRelease(flag); vertex_.setAutoRelease(flag);
color_.setAutoRelease(flag); color_.setAutoRelease(flag);
@ -1330,7 +1333,7 @@ void cv::GlArrays::setAutoRelease(bool flag)
texCoord_.setAutoRelease(flag); texCoord_.setAutoRelease(flag);
} }
void cv::GlArrays::bind() const void cv::ogl::Arrays::bind() const
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
throw_nogl(); throw_nogl();
@ -1349,7 +1352,7 @@ void cv::GlArrays::bind() const
gl::EnableClientState(gl::TEXTURE_COORD_ARRAY); gl::EnableClientState(gl::TEXTURE_COORD_ARRAY);
CV_CheckGlError(); CV_CheckGlError();
texCoord_.bind(GlBuffer::ARRAY_BUFFER); texCoord_.bind(ogl::Buffer::ARRAY_BUFFER);
gl::TexCoordPointer(texCoord_.channels(), gl_types[texCoord_.depth()], 0, 0); gl::TexCoordPointer(texCoord_.channels(), gl_types[texCoord_.depth()], 0, 0);
CV_CheckGlError(); CV_CheckGlError();
@ -1365,7 +1368,7 @@ void cv::GlArrays::bind() const
gl::EnableClientState(gl::NORMAL_ARRAY); gl::EnableClientState(gl::NORMAL_ARRAY);
CV_CheckGlError(); CV_CheckGlError();
normal_.bind(GlBuffer::ARRAY_BUFFER); normal_.bind(ogl::Buffer::ARRAY_BUFFER);
gl::NormalPointer(gl_types[normal_.depth()], 0, 0); gl::NormalPointer(gl_types[normal_.depth()], 0, 0);
CV_CheckGlError(); CV_CheckGlError();
@ -1381,7 +1384,7 @@ void cv::GlArrays::bind() const
gl::EnableClientState(gl::COLOR_ARRAY); gl::EnableClientState(gl::COLOR_ARRAY);
CV_CheckGlError(); CV_CheckGlError();
color_.bind(GlBuffer::ARRAY_BUFFER); color_.bind(ogl::Buffer::ARRAY_BUFFER);
const int cn = color_.channels(); const int cn = color_.channels();
@ -1399,20 +1402,20 @@ void cv::GlArrays::bind() const
gl::EnableClientState(gl::VERTEX_ARRAY); gl::EnableClientState(gl::VERTEX_ARRAY);
CV_CheckGlError(); CV_CheckGlError();
vertex_.bind(GlBuffer::ARRAY_BUFFER); vertex_.bind(ogl::Buffer::ARRAY_BUFFER);
gl::VertexPointer(vertex_.channels(), gl_types[vertex_.depth()], 0, 0); gl::VertexPointer(vertex_.channels(), gl_types[vertex_.depth()], 0, 0);
CV_CheckGlError(); CV_CheckGlError();
} }
GlBuffer::unbind(GlBuffer::ARRAY_BUFFER); ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER);
#endif #endif
} }
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
// Rendering // Rendering
void cv::render(const GlTexture2D& tex, Rect_<double> wndRect, Rect_<double> texRect) void cv::ogl::render(const ogl::Texture2D& tex, Rect_<double> wndRect, Rect_<double> texRect)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) tex; (void) tex;
@ -1460,7 +1463,7 @@ void cv::render(const GlTexture2D& tex, Rect_<double> wndRect, Rect_<double> tex
texRect.x + texRect.width, texRect.y texRect.x + texRect.width, texRect.y
}; };
GlBuffer::unbind(GlBuffer::ARRAY_BUFFER); ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER);
gl::EnableClientState(gl::TEXTURE_COORD_ARRAY); gl::EnableClientState(gl::TEXTURE_COORD_ARRAY);
CV_CheckGlError(); CV_CheckGlError();
@ -1478,13 +1481,13 @@ void cv::render(const GlTexture2D& tex, Rect_<double> wndRect, Rect_<double> tex
gl::VertexPointer(3, gl::FLOAT, 0, vertex); gl::VertexPointer(3, gl::FLOAT, 0, vertex);
CV_CheckGlError(); CV_CheckGlError();
gl::DrawArrays(cv::RenderMode::QUADS, 0, 4); gl::DrawArrays(gl::QUADS, 0, 4);
CV_CheckGlError(); CV_CheckGlError();
} }
#endif #endif
} }
void cv::render(const GlArrays& arr, int mode, Scalar color) void cv::ogl::render(const ogl::Arrays& arr, int mode, Scalar color)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arr; (void) arr;
@ -1503,7 +1506,7 @@ void cv::render(const GlArrays& arr, int mode, Scalar color)
#endif #endif
} }
void cv::render(const GlArrays& arr, InputArray indices, int mode, Scalar color) void cv::ogl::render(const ogl::Arrays& arr, InputArray indices, int mode, Scalar color)
{ {
#ifndef HAVE_OPENGL #ifndef HAVE_OPENGL
(void) arr; (void) arr;
@ -1524,7 +1527,7 @@ void cv::render(const GlArrays& arr, InputArray indices, int mode, Scalar color)
{ {
case _InputArray::OPENGL_BUFFER : case _InputArray::OPENGL_BUFFER :
{ {
GlBuffer buf = indices.getGlBuffer(); ogl::Buffer buf = indices.getOGlBuffer();
const int depth = buf.depth(); const int depth = buf.depth();
@ -1539,11 +1542,11 @@ void cv::render(const GlArrays& arr, InputArray indices, int mode, Scalar color)
else else
type = gl::UNSIGNED_INT; type = gl::UNSIGNED_INT;
buf.bind(GlBuffer::ELEMENT_ARRAY_BUFFER); buf.bind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
gl::DrawElements(mode, buf.size().area(), type, 0); gl::DrawElements(mode, buf.size().area(), type, 0);
GlBuffer::unbind(GlBuffer::ELEMENT_ARRAY_BUFFER); ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
break; break;
} }
@ -1566,7 +1569,7 @@ void cv::render(const GlArrays& arr, InputArray indices, int mode, Scalar color)
else else
type = gl::UNSIGNED_INT; type = gl::UNSIGNED_INT;
GlBuffer::unbind(GlBuffer::ELEMENT_ARRAY_BUFFER); ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
gl::DrawElements(mode, mat.size().area(), type, mat.data); gl::DrawElements(mode, mat.size().area(), type, mat.data);
} }

View File

@ -216,6 +216,86 @@ namespace cv { namespace gpu { namespace device
OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgra, 4, 4, 0) OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgra, 4, 4, 0)
#undef OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS #undef OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab, 3, 3, true, 2)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab, 4, 3, true, 2)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab4, 3, 4, true, 2)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab4, 4, 4, true, 2)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab, 3, 3, true, 0)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab, 4, 3, true, 0)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab4, 3, 4, true, 0)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab4, 4, 4, true, 0)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab, 3, 3, false, 2)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab, 4, 3, false, 2)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab4, 3, 4, false, 2)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab4, 4, 4, false, 2)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab, 3, 3, false, 0)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab, 4, 3, false, 0)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab4, 3, 4, false, 0)
OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab4, 4, 4, false, 0)
#undef OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgb, 3, 3, true, 2)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgb, 4, 3, true, 2)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgba, 3, 4, true, 2)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgba, 4, 4, true, 2)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgr, 3, 3, true, 0)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgr, 4, 3, true, 0)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgra, 3, 4, true, 0)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgra, 4, 4, true, 0)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgb, 3, 3, false, 2)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgb, 4, 3, false, 2)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgba, 3, 4, false, 2)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgba, 4, 4, false, 2)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgr, 3, 3, false, 0)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgr, 4, 3, false, 0)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgra, 3, 4, false, 0)
OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgra, 4, 4, false, 0)
#undef OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv, 3, 3, true, 2)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv, 4, 3, true, 2)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv4, 3, 4, true, 2)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv4, 4, 4, true, 2)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv, 3, 3, true, 0)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv, 4, 3, true, 0)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv4, 3, 4, true, 0)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv4, 4, 4, true, 0)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv, 3, 3, false, 2)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv, 4, 3, false, 2)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv4, 3, 4, false, 2)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv4, 4, 4, false, 2)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv, 3, 3, false, 0)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv, 4, 3, false, 0)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv4, 3, 4, false, 0)
OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv4, 4, 4, false, 0)
#undef OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgb, 3, 3, true, 2)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgb, 4, 3, true, 2)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgba, 3, 4, true, 2)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgba, 4, 4, true, 2)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgr, 3, 3, true, 0)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgr, 4, 3, true, 0)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgra, 3, 4, true, 0)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgra, 4, 4, true, 0)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgb, 3, 3, false, 2)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgb, 4, 3, false, 2)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgba, 3, 4, false, 2)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgba, 4, 4, false, 2)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgr, 3, 3, false, 0)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgr, 4, 3, false, 0)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgra, 3, 4, false, 0)
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgra, 4, 4, false, 0)
#undef OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS
}}} // namespace cv { namespace gpu { namespace device }}} // namespace cv { namespace gpu { namespace device
#endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__ #endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__

File diff suppressed because one or more lines are too long

View File

@ -25,3 +25,4 @@ if(WIN32)
set_target_properties(${the_target} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG") set_target_properties(${the_target} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG")
endif() endif()
endif() endif()

View File

@ -53,7 +53,7 @@ void cv::gpu::gammaCorrection(const GpuMat&, GpuMat&, bool, Stream&) { throw_nog
#else /* !defined (HAVE_CUDA) */ #else /* !defined (HAVE_CUDA) */
#include <cvt_colot_internal.h> #include "cvt_color_internal.h"
namespace cv { namespace gpu { namespace cv { namespace gpu {
namespace device namespace device
@ -69,7 +69,7 @@ using namespace ::cv::gpu::device;
namespace namespace
{ {
typedef void (*gpu_func_t)(const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream); typedef void (*gpu_func_t)(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
void bgr_to_rgb(const GpuMat& src, GpuMat& dst, int, Stream& stream) void bgr_to_rgb(const GpuMat& src, GpuMat& dst, int, Stream& stream)
{ {
@ -1155,154 +1155,420 @@ namespace
funcs[dcn == 4][src.channels() == 4][src.depth()](src, dst, StreamAccessor::getStream(stream)); funcs[dcn == 4][src.channels() == 4][src.depth()](src, dst, StreamAccessor::getStream(stream));
} }
void bgr_to_lab(const GpuMat& src, GpuMat& dst, int dcn, Stream& st) void bgr_to_lab(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{ {
#if (CUDA_VERSION < 5000) using namespace cv::gpu::device;
(void)src; static const gpu_func_t funcs[2][2][2] =
(void)dst; {
(void)dcn; {
(void)st; {bgr_to_lab_8u, bgr_to_lab_32f},
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" ); {bgra_to_lab_8u, bgra_to_lab_32f}
#else },
CV_Assert(src.depth() == CV_8U); {
CV_Assert(src.channels() == 3); {bgr_to_lab4_8u, bgr_to_lab4_32f},
{bgra_to_lab4_8u, bgra_to_lab4_32f}
}
};
dcn = src.channels(); if (dcn <= 0) dcn = 3;
dst.create(src.size(), CV_MAKETYPE(src.depth(), dcn)); CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
cudaStream_t stream = StreamAccessor::getStream(st); dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
NppStreamHandler h(stream);
NppiSize oSizeROI; funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
oSizeROI.width = src.cols;
oSizeROI.height = src.rows;
nppSafeCall( nppiBGRToLab_8u_C3R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
#endif
} }
void rgb_to_lab(const GpuMat& src, GpuMat& dst, int, Stream& stream) void rgb_to_lab(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{ {
bgr_to_rgb(src, dst, -1, stream); using namespace cv::gpu::device;
bgr_to_lab(dst, dst, -1, stream); static const gpu_func_t funcs[2][2][2] =
{
{
{rgb_to_lab_8u, rgb_to_lab_32f},
{rgba_to_lab_8u, rgba_to_lab_32f}
},
{
{rgb_to_lab4_8u, rgb_to_lab4_32f},
{rgba_to_lab4_8u, rgba_to_lab4_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
} }
void lab_to_bgr(const GpuMat& src, GpuMat& dst, int dcn, Stream& st) void lbgr_to_lab(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{ {
#if (CUDA_VERSION < 5000) using namespace cv::gpu::device;
(void)src; static const gpu_func_t funcs[2][2][2] =
(void)dst; {
(void)dcn; {
(void)st; {lbgr_to_lab_8u, lbgr_to_lab_32f},
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" ); {lbgra_to_lab_8u, lbgra_to_lab_32f}
#else },
CV_Assert(src.depth() == CV_8U); {
CV_Assert(src.channels() == 3); {lbgr_to_lab4_8u, lbgr_to_lab4_32f},
{lbgra_to_lab4_8u, lbgra_to_lab4_32f}
}
};
dcn = src.channels(); if (dcn <= 0) dcn = 3;
dst.create(src.size(), CV_MAKETYPE(src.depth(), dcn)); CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
cudaStream_t stream = StreamAccessor::getStream(st); dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
NppStreamHandler h(stream);
NppiSize oSizeROI; funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
oSizeROI.width = src.cols;
oSizeROI.height = src.rows;
nppSafeCall( nppiLabToBGR_8u_C3R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
#endif
} }
void lab_to_rgb(const GpuMat& src, GpuMat& dst, int, Stream& stream) void lrgb_to_lab(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{ {
lab_to_bgr(src, dst, -1, stream); using namespace cv::gpu::device;
bgr_to_rgb(dst, dst, -1, stream); static const gpu_func_t funcs[2][2][2] =
{
{
{lrgb_to_lab_8u, lrgb_to_lab_32f},
{lrgba_to_lab_8u, lrgba_to_lab_32f}
},
{
{lrgb_to_lab4_8u, lrgb_to_lab4_32f},
{lrgba_to_lab4_8u, lrgba_to_lab4_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
} }
void rgb_to_luv(const GpuMat& src, GpuMat& dst, int dcn, Stream& st) void lab_to_bgr(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{ {
#if (CUDA_VERSION < 5000) using namespace cv::gpu::device;
(void)src; static const gpu_func_t funcs[2][2][2] =
(void)dst; {
(void)dcn; {
(void)st; {lab_to_bgr_8u, lab_to_bgr_32f},
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" ); {lab4_to_bgr_8u, lab4_to_bgr_32f}
#else },
CV_Assert(src.depth() == CV_8U); {
CV_Assert(src.channels() == 3 || src.channels() == 4); {lab_to_bgra_8u, lab_to_bgra_32f},
{lab4_to_bgra_8u, lab4_to_bgra_32f}
}
};
dcn = src.channels(); if (dcn <= 0) dcn = 3;
dst.create(src.size(), CV_MAKETYPE(src.depth(), dcn)); CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
cudaStream_t stream = StreamAccessor::getStream(st); dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
NppStreamHandler h(stream);
NppiSize oSizeROI; funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
oSizeROI.width = src.cols;
oSizeROI.height = src.rows;
if (dcn == 3)
nppSafeCall( nppiRGBToLUV_8u_C3R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
else
nppSafeCall( nppiRGBToLUV_8u_AC4R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
#endif
} }
void bgr_to_luv(const GpuMat& src, GpuMat& dst, int, Stream& stream) void lab_to_rgb(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{ {
bgr_to_rgb(src, dst, -1, stream); using namespace cv::gpu::device;
rgb_to_luv(dst, dst, -1, stream); static const gpu_func_t funcs[2][2][2] =
{
{
{lab_to_rgb_8u, lab_to_rgb_32f},
{lab4_to_rgb_8u, lab4_to_rgb_32f}
},
{
{lab_to_rgba_8u, lab_to_rgba_32f},
{lab4_to_rgba_8u, lab4_to_rgba_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
} }
void luv_to_rgb(const GpuMat& src, GpuMat& dst, int dcn, Stream& st) void lab_to_lbgr(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{ {
#if (CUDA_VERSION < 5000) using namespace cv::gpu::device;
(void)src; static const gpu_func_t funcs[2][2][2] =
(void)dst; {
(void)dcn; {
(void)st; {lab_to_lbgr_8u, lab_to_lbgr_32f},
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" ); {lab4_to_lbgr_8u, lab4_to_lbgr_32f}
#else },
CV_Assert(src.depth() == CV_8U); {
CV_Assert(src.channels() == 3 || src.channels() == 4); {lab_to_lbgra_8u, lab_to_lbgra_32f},
{lab4_to_lbgra_8u, lab4_to_lbgra_32f}
}
};
dcn = src.channels(); if (dcn <= 0) dcn = 3;
dst.create(src.size(), CV_MAKETYPE(src.depth(), dcn)); CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
cudaStream_t stream = StreamAccessor::getStream(st); dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
NppStreamHandler h(stream);
NppiSize oSizeROI; funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
oSizeROI.width = src.cols;
oSizeROI.height = src.rows;
if (dcn == 3)
nppSafeCall( nppiLUVToRGB_8u_C3R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
else
nppSafeCall( nppiLUVToRGB_8u_AC4R(src.ptr<Npp8u>(), static_cast<int>(src.step), dst.ptr<Npp8u>(), static_cast<int>(dst.step), oSizeROI) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
#endif
} }
void luv_to_bgr(const GpuMat& src, GpuMat& dst, int, Stream& stream) void lab_to_lrgb(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{ {
luv_to_rgb(src, dst, -1, stream); using namespace cv::gpu::device;
bgr_to_rgb(dst, dst, -1, stream); static const gpu_func_t funcs[2][2][2] =
{
{
{lab_to_lrgb_8u, lab_to_lrgb_32f},
{lab4_to_lrgb_8u, lab4_to_lrgb_32f}
},
{
{lab_to_lrgba_8u, lab_to_lrgba_32f},
{lab4_to_lrgba_8u, lab4_to_lrgba_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
}
void bgr_to_luv(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
using namespace cv::gpu::device;
static const gpu_func_t funcs[2][2][2] =
{
{
{bgr_to_luv_8u, bgr_to_luv_32f},
{bgra_to_luv_8u, bgra_to_luv_32f}
},
{
{bgr_to_luv4_8u, bgr_to_luv4_32f},
{bgra_to_luv4_8u, bgra_to_luv4_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
}
void rgb_to_luv(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
using namespace cv::gpu::device;
static const gpu_func_t funcs[2][2][2] =
{
{
{rgb_to_luv_8u, rgb_to_luv_32f},
{rgba_to_luv_8u, rgba_to_luv_32f}
},
{
{rgb_to_luv4_8u, rgb_to_luv4_32f},
{rgba_to_luv4_8u, rgba_to_luv4_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
}
void lbgr_to_luv(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
using namespace cv::gpu::device;
static const gpu_func_t funcs[2][2][2] =
{
{
{lbgr_to_luv_8u, lbgr_to_luv_32f},
{lbgra_to_luv_8u, lbgra_to_luv_32f}
},
{
{lbgr_to_luv4_8u, lbgr_to_luv4_32f},
{lbgra_to_luv4_8u, lbgra_to_luv4_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
}
void lrgb_to_luv(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
using namespace cv::gpu::device;
static const gpu_func_t funcs[2][2][2] =
{
{
{lrgb_to_luv_8u, lrgb_to_luv_32f},
{lrgba_to_luv_8u, lrgba_to_luv_32f}
},
{
{lrgb_to_luv4_8u, lrgb_to_luv4_32f},
{lrgba_to_luv4_8u, lrgba_to_luv4_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
}
void luv_to_bgr(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
using namespace cv::gpu::device;
static const gpu_func_t funcs[2][2][2] =
{
{
{luv_to_bgr_8u, luv_to_bgr_32f},
{luv4_to_bgr_8u, luv4_to_bgr_32f}
},
{
{luv_to_bgra_8u, luv_to_bgra_32f},
{luv4_to_bgra_8u, luv4_to_bgra_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
}
void luv_to_rgb(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
using namespace cv::gpu::device;
static const gpu_func_t funcs[2][2][2] =
{
{
{luv_to_rgb_8u, luv_to_rgb_32f},
{luv4_to_rgb_8u, luv4_to_rgb_32f}
},
{
{luv_to_rgba_8u, luv_to_rgba_32f},
{luv4_to_rgba_8u, luv4_to_rgba_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
}
void luv_to_lbgr(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
using namespace cv::gpu::device;
static const gpu_func_t funcs[2][2][2] =
{
{
{luv_to_lbgr_8u, luv_to_lbgr_32f},
{luv4_to_lbgr_8u, luv4_to_lbgr_32f}
},
{
{luv_to_lbgra_8u, luv_to_lbgra_32f},
{luv4_to_lbgra_8u, luv4_to_lbgra_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
}
void luv_to_lrgb(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream)
{
using namespace cv::gpu::device;
static const gpu_func_t funcs[2][2][2] =
{
{
{luv_to_lrgb_8u, luv_to_lrgb_32f},
{luv4_to_lrgb_8u, luv4_to_lrgb_32f}
},
{
{luv_to_lrgba_8u, luv_to_lrgba_32f},
{luv4_to_lrgba_8u, luv4_to_lrgba_32f}
}
};
if (dcn <= 0) dcn = 3;
CV_Assert(src.depth() == CV_8U || src.depth() == CV_32F);
CV_Assert(src.channels() == 3 || src.channels() == 4);
CV_Assert(dcn == 3 || dcn == 4);
dst.create(src.size(), CV_MAKE_TYPE(src.depth(), dcn));
funcs[dcn == 4][src.channels() == 4][src.depth() == CV_32F](src, dst, StreamAccessor::getStream(stream));
} }
void rgba_to_mbgra(const GpuMat& src, GpuMat& dst, int, Stream& st) void rgba_to_mbgra(const GpuMat& src, GpuMat& dst, int, Stream& st)
@ -1475,15 +1741,15 @@ void cv::gpu::cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn, Stream
hls_to_bgr_full, // CV_HLS2BGR_FULL = 72 hls_to_bgr_full, // CV_HLS2BGR_FULL = 72
hls_to_rgb_full, // CV_HLS2RGB_FULL = 73 hls_to_rgb_full, // CV_HLS2RGB_FULL = 73
0, // CV_LBGR2Lab = 74 lbgr_to_lab, // CV_LBGR2Lab = 74
0, // CV_LRGB2Lab = 75 lrgb_to_lab, // CV_LRGB2Lab = 75
0, // CV_LBGR2Luv = 76 lbgr_to_luv, // CV_LBGR2Luv = 76
0, // CV_LRGB2Luv = 77 lrgb_to_luv, // CV_LRGB2Luv = 77
0, // CV_Lab2LBGR = 78 lab_to_lbgr, // CV_Lab2LBGR = 78
0, // CV_Lab2LRGB = 79 lab_to_lrgb, // CV_Lab2LRGB = 79
0, // CV_Luv2LBGR = 80 luv_to_lbgr, // CV_Luv2LBGR = 80
0, // CV_Luv2LRGB = 81 luv_to_lrgb, // CV_Luv2LRGB = 81
bgr_to_yuv, // CV_BGR2YUV = 82 bgr_to_yuv, // CV_BGR2YUV = 82
rgb_to_yuv, // CV_RGB2YUV = 83 rgb_to_yuv, // CV_RGB2YUV = 83

View File

@ -42,10 +42,10 @@
#if !defined CUDA_DISABLER #if !defined CUDA_DISABLER
#include <internal_shared.hpp> #include "internal_shared.hpp"
#include <opencv2/gpu/device/transform.hpp> #include "opencv2/gpu/device/transform.hpp"
#include <opencv2/gpu/device/color.hpp> #include "opencv2/gpu/device/color.hpp"
#include <cvt_colot_internal.h> #include "cvt_color_internal.h"
namespace cv { namespace gpu { namespace device namespace cv { namespace gpu { namespace device
{ {
@ -224,7 +224,7 @@ namespace cv { namespace gpu { namespace device
}; };
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \ #define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \
void name(const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream) \ void name(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream) \
{ \ { \
traits::functor_type functor = traits::create_functor(); \ traits::functor_type functor = traits::create_functor(); \
typedef typename traits::functor_type::argument_type src_t; \ typedef typename traits::functor_type::argument_type src_t; \
@ -241,6 +241,10 @@ namespace cv { namespace gpu { namespace device
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(name) \ #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \
@ -339,46 +343,119 @@ namespace cv { namespace gpu { namespace device
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgra)
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL
}}} // namespace cv { namespace gpu { namespace device }}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ #endif /* CUDA_DISABLER */

View File

@ -46,8 +46,7 @@
#include <thrust/device_ptr.h> #include <thrust/device_ptr.h>
#include <thrust/remove.h> #include <thrust/remove.h>
#include <thrust/functional.h> #include <thrust/functional.h>
#include "opencv2/gpu/device/common.hpp"
#include "internal_shared.hpp"
namespace cv { namespace gpu { namespace device { namespace globmotion { namespace cv { namespace gpu { namespace device { namespace globmotion {
@ -60,10 +59,10 @@ int compactPoints(int N, float *points0, float *points1, const uchar *mask)
thrust::device_ptr<float2> dpoints1((float2*)points1); thrust::device_ptr<float2> dpoints1((float2*)points1);
thrust::device_ptr<const uchar> dmask(mask); thrust::device_ptr<const uchar> dmask(mask);
return thrust::remove_if(thrust::make_zip_iterator(thrust::make_tuple(dpoints0, dpoints1)), return (int)(thrust::remove_if(thrust::make_zip_iterator(thrust::make_tuple(dpoints0, dpoints1)),
thrust::make_zip_iterator(thrust::make_tuple(dpoints0 + N, dpoints1 + N)), thrust::make_zip_iterator(thrust::make_tuple(dpoints0 + N, dpoints1 + N)),
dmask, thrust::not1(thrust::identity<uchar>())) dmask, thrust::not1(thrust::identity<uchar>()))
- thrust::make_zip_iterator(make_tuple(dpoints0, dpoints1)); - thrust::make_zip_iterator(make_tuple(dpoints0, dpoints1)));
} }

View File

@ -295,7 +295,7 @@ namespace cv { namespace gpu { namespace device
int grid = divUp(workAmount, block); int grid = divUp(workAmount, block);
cudaFuncSetCacheConfig(lbp_cascade, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(lbp_cascade, cudaFuncCachePreferL1);
Cascade cascade((Stage*)mstages.ptr(), nstages, (ClNode*)mnodes.ptr(), mleaves.ptr(), msubsets.ptr(), (uchar4*)mfeatures.ptr(), subsetSize); Cascade cascade((Stage*)mstages.ptr(), nstages, (ClNode*)mnodes.ptr(), mleaves.ptr(), msubsets.ptr(), (uchar4*)mfeatures.ptr(), subsetSize);
lbp_cascade<<<grid, block>>>(cascade, frameW, frameH, windowW, windowH, initialScale, factor, workAmount, integral.ptr(), integral.step / sizeof(int), objects, classified); lbp_cascade<<<grid, block>>>(cascade, frameW, frameH, windowW, windowH, initialScale, factor, workAmount, integral.ptr(), (int)integral.step / sizeof(int), objects, classified);
} }
} }
}}} }}}

View File

@ -907,7 +907,7 @@ namespace minMaxLoc
getLaunchCfg(cols, rows, block, grid); getLaunchCfg(cols, rows, block, grid);
// For values // For values
b1cols = grid.x * grid.y * elem_size; b1cols = (int)(grid.x * grid.y * elem_size);
b1rows = 2; b1rows = 2;
// For locations // For locations

View File

@ -454,7 +454,7 @@ namespace cv { namespace gpu { namespace device
grid.x = divUp(cols, threads.x << 1); grid.x = divUp(cols, threads.x << 1);
grid.y = divUp(rows, threads.y); grid.y = divUp(rows, threads.y);
int elem_step = u.step/sizeof(T); int elem_step = (int)(u.step / sizeof(T));
for(int t = 0; t < iters; ++t) for(int t = 0; t < iters; ++t)
{ {

View File

@ -45,15 +45,19 @@
namespace cv { namespace gpu { namespace device namespace cv { namespace gpu { namespace device
{ {
#define OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name) \ #define OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name) \
void name(const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream); void name(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#define OPENCV_GPU_DECLARE_CVTCOLOR_ALL(name) \ #define OPENCV_GPU_DECLARE_CVTCOLOR_ALL(name) \
OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \
OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _16u) \ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _16u) \
OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f) OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f)
#define OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(name) \ #define OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(name) \
OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \
OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f)
#define OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(name) \
OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \
OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f) \ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f) \
OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _full_8u) \ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _full_8u) \
@ -152,46 +156,119 @@ namespace cv { namespace gpu { namespace device
OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_bgra) OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_bgra)
OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_bgra) OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_bgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hsv) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hsv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hsv) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hsv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hsv4) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hsv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hsv4) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hsv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hsv) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hsv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hsv) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hsv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hsv4) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hsv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hsv4) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hsv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_rgb) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_rgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_rgba) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_rgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_rgb) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_rgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_rgba) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_rgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_bgr) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_bgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_bgra) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv_to_bgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_bgr) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_bgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_bgra) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hsv4_to_bgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hls) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hls)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hls) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hls)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hls4) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgb_to_hls4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hls4) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(rgba_to_hls4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hls) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hls)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hls) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hls)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hls4) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgr_to_hls4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hls4) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(bgra_to_hls4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_rgb) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_rgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_rgba) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_rgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_rgb) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_rgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_rgba) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_rgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_bgr) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_bgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_bgra) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls_to_bgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_bgr) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_bgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_bgra) OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL(hls4_to_bgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_lab)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_lab)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_lab4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_lab4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_lab)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_lab)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_lab4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_lab4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgb_to_lab)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgba_to_lab)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgb_to_lab4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgba_to_lab4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgr_to_lab)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgra_to_lab)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgr_to_lab4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgra_to_lab4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_rgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_rgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_rgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_rgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_bgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_bgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_bgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_bgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_lrgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_lrgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_lrgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_lrgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_lbgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_lbgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab_to_lbgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lab4_to_lbgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_luv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_luv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_luv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_luv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_luv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_luv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_luv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_luv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgb_to_luv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgba_to_luv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgb_to_luv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lrgba_to_luv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgr_to_luv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgra_to_luv)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgr_to_luv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(lbgra_to_luv4)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_rgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_rgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_rgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_rgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_bgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_bgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_bgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_bgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_lrgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_lrgb)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_lrgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_lrgba)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_lbgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_lbgr)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv_to_lbgra)
OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(luv4_to_lbgra)
#undef OPENCV_GPU_DECLARE_CVTCOLOR_ONE #undef OPENCV_GPU_DECLARE_CVTCOLOR_ONE
#undef OPENCV_GPU_DECLARE_CVTCOLOR_ALL #undef OPENCV_GPU_DECLARE_CVTCOLOR_ALL
#undef OPENCV_GPU_DECLARE_CVTCOLOR_8U32F #undef OPENCV_GPU_DECLARE_CVTCOLOR_8U32F
#undef OPENCV_GPU_DECLARE_CVTCOLOR_8U32F_FULL
}}} }}}
#endif #endif

View File

@ -176,28 +176,11 @@ void cv::gpu::FastNonLocalMeansDenoising::simpleMethod(const GpuMat& src, GpuMat
void cv::gpu::FastNonLocalMeansDenoising::labMethod( const GpuMat& src, GpuMat& dst, float h_luminance, float h_color, int search_window, int block_window, Stream& s) void cv::gpu::FastNonLocalMeansDenoising::labMethod( const GpuMat& src, GpuMat& dst, float h_luminance, float h_color, int search_window, int block_window, Stream& s)
{ {
#if (CUDA_VERSION < 5000)
(void)src;
(void)dst;
(void)h_luminance;
(void)h_color;
(void)search_window;
(void)block_window;
(void)s;
CV_Error( CV_GpuApiCallError, "Lab method required CUDA 5.0 and higher" );
#else
CV_Assert(src.type() == CV_8UC3); CV_Assert(src.type() == CV_8UC3);
lab.create(src.size(), src.type()); lab.create(src.size(), src.type());
cv::gpu::cvtColor(src, lab, CV_BGR2Lab, 0, s); cv::gpu::cvtColor(src, lab, CV_BGR2Lab, 0, s);
/*Mat t;
cv::cvtColor(Mat(src), t, CV_BGR2Lab);
lab.upload(t);*/
l.create(src.size(), CV_8U); l.create(src.size(), CV_8U);
ab.create(src.size(), CV_8UC2); ab.create(src.size(), CV_8UC2);
device::imgproc::fnlm_split_channels(lab, l, ab, StreamAccessor::getStream(s)); device::imgproc::fnlm_split_channels(lab, l, ab, StreamAccessor::getStream(s));
@ -207,11 +190,6 @@ void cv::gpu::FastNonLocalMeansDenoising::labMethod( const GpuMat& src, GpuMat&
device::imgproc::fnlm_merge_channels(l, ab, lab, StreamAccessor::getStream(s)); device::imgproc::fnlm_merge_channels(l, ab, lab, StreamAccessor::getStream(s));
cv::gpu::cvtColor(lab, dst, CV_Lab2BGR, 0, s); cv::gpu::cvtColor(lab, dst, CV_Lab2BGR, 0, s);
/*cv::cvtColor(Mat(lab), t, CV_Lab2BGR);
dst.upload(t);*/
#endif
} }

View File

@ -296,7 +296,7 @@ NCV_EXPORTS void ncvSetDebugOutputHandler(NCVDebugOutputHandler* func);
do \ do \
{ \ { \
cudaError_t res = cudaGetLastError(); \ cudaError_t res = cudaGetLastError(); \
ncvAssertPrintReturn(cudaSuccess==res, "cudaError_t=" << res, errCode); \ ncvAssertPrintReturn(cudaSuccess==res, "cudaError_t=" << (int)res, errCode); \
} while (0) } while (0)

View File

@ -71,9 +71,9 @@ void cv::gpu::VideoWriter_GPU::EncoderParams::save(const std::string&) const { t
#else // !defined HAVE_CUDA || !defined WIN32 #else // !defined HAVE_CUDA || !defined WIN32
#ifdef HAVE_FFMPEG #ifdef HAVE_FFMPEG
#include "cap_ffmpeg_impl.hpp" #include "../src/cap_ffmpeg_impl.hpp"
#else #else
#include "cap_ffmpeg_api.hpp" #include "../src/cap_ffmpeg_api.hpp"
#endif #endif

View File

@ -13,10 +13,50 @@
#include <float.h> #include <float.h>
#if defined(__GNUC__) && !defined(__APPLE__) #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__)
#include <fpu_control.h> #include <fpu_control.h>
#endif #endif
namespace
{
// http://www.christian-seiler.de/projekte/fpmath/
class FpuControl
{
public:
FpuControl();
~FpuControl();
private:
#if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__)
fpu_control_t fpu_oldcw, fpu_cw;
#elif defined(_WIN32) && !defined(_WIN64)
unsigned int fpu_oldcw, fpu_cw;
#endif
};
FpuControl::FpuControl()
{
#if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__)
_FPU_GETCW(fpu_oldcw);
fpu_cw = (fpu_oldcw & ~_FPU_EXTENDED & ~_FPU_DOUBLE & ~_FPU_SINGLE) | _FPU_SINGLE;
_FPU_SETCW(fpu_cw);
#elif defined(_WIN32) && !defined(_WIN64)
_controlfp_s(&fpu_cw, 0, 0);
fpu_oldcw = fpu_cw;
_controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
#endif
}
FpuControl::~FpuControl()
{
#if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__)
_FPU_SETCW(fpu_oldcw);
#elif defined(_WIN32) && !defined(_WIN64)
_controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
#endif
}
}
#include "TestHaarCascadeApplication.h" #include "TestHaarCascadeApplication.h"
#include "NCVHaarObjectDetection.hpp" #include "NCVHaarObjectDetection.hpp"
@ -47,12 +87,8 @@ bool TestHaarCascadeApplication::init()
return true; return true;
} }
bool TestHaarCascadeApplication::process() bool TestHaarCascadeApplication::process()
{ {
#if defined(__APPLE)
return true;
#endif
NCVStatus ncvStat; NCVStatus ncvStat;
bool rcode = false; bool rcode = false;
@ -205,56 +241,19 @@ bool TestHaarCascadeApplication::process()
} }
ncvAssertReturn(cudaSuccess == cudaStreamSynchronize(0), false); ncvAssertReturn(cudaSuccess == cudaStreamSynchronize(0), false);
#if !defined(__APPLE__) {
// calculations here
FpuControl fpu;
(void) fpu;
#if defined(__GNUC__) ncvStat = ncvApplyHaarClassifierCascade_host(
//http://www.christian-seiler.de/projekte/fpmath/ h_integralImage, h_rectStdDev, h_pixelMask,
detectionsOnThisScale_h,
haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, false,
searchRoiU, 1, 1.0f);
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
}
#ifndef _FPU_EXTENDED
#define _FPU_EXTENDED 0
#endif
#ifndef _FPU_DOUBLE
#define _FPU_DOUBLE 0
#endif
#ifndef _FPU_SINGLE
#define _FPU_SINGLE 0
#endif
fpu_control_t fpu_oldcw, fpu_cw;
_FPU_GETCW(fpu_oldcw); // store old cw
fpu_cw = (fpu_oldcw & ~_FPU_EXTENDED & ~_FPU_DOUBLE & ~_FPU_SINGLE) | _FPU_SINGLE;
_FPU_SETCW(fpu_cw);
// calculations here
ncvStat = ncvApplyHaarClassifierCascade_host(
h_integralImage, h_rectStdDev, h_pixelMask,
detectionsOnThisScale_h,
haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, false,
searchRoiU, 1, 1.0f);
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
_FPU_SETCW(fpu_oldcw); // restore old cw
#else
#ifndef _WIN64
Ncv32u fpu_oldcw, fpu_cw;
_controlfp_s(&fpu_cw, 0, 0);
fpu_oldcw = fpu_cw;
_controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
#endif
ncvStat = ncvApplyHaarClassifierCascade_host(
h_integralImage, h_rectStdDev, h_pixelMask,
detectionsOnThisScale_h,
haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, false,
searchRoiU, 1, 1.0f);
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
#ifndef _WIN64
_controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
#endif
#endif
#endif
NCV_SKIP_COND_END NCV_SKIP_COND_END
int devId; int devId;

View File

@ -25,7 +25,7 @@
#include "NCVAutoTestLister.hpp" #include "NCVAutoTestLister.hpp"
#include "NCVTestSourceProvider.hpp" #include "NCVTestSourceProvider.hpp"
#include <main_test_nvidia.h> #include "main_test_nvidia.h"
static std::string path; static std::string path;
@ -97,7 +97,7 @@ void generateRectStdDevTests(NCVAutoTestLister &testLister, NCVTestSourceProvide
template <class T> template <class T>
void generateResizeTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T> &src) void generateResizeTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T> &src)
{ {
for (Ncv32u i=1; i<480; i+=3) for (Ncv32u i=2; i<10; ++i)
{ {
char testName[80]; char testName[80];
sprintf(testName, "TestResize_VGA_s%d", i); sprintf(testName, "TestResize_VGA_s%d", i);
@ -105,7 +105,7 @@ void generateResizeTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T>
testLister.add(new TestResize<T>(testName, src, 640, 480, i, false)); testLister.add(new TestResize<T>(testName, src, 640, 480, i, false));
} }
for (Ncv32u i=1; i<1080; i+=5) for (Ncv32u i=2; i<10; ++i)
{ {
char testName[80]; char testName[80];
sprintf(testName, "TestResize_1080_s%d", i); sprintf(testName, "TestResize_1080_s%d", i);
@ -117,7 +117,7 @@ void generateResizeTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T>
void generateNPPSTVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength) void generateNPPSTVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength)
{ {
//compaction //compaction
for (Ncv32f _i=256.0; _i<maxLength; _i*=1.1f) for (Ncv32f _i=256.0; _i<maxLength; _i*=1.5f)
{ {
Ncv32u i = (Ncv32u)_i; Ncv32u i = (Ncv32u)_i;
char testName[80]; char testName[80];
@ -132,13 +132,13 @@ void generateNPPSTVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvid
testLister.add(new TestCompact(testName, src, i, 0xC001C0DE, 0)); testLister.add(new TestCompact(testName, src, i, 0xC001C0DE, 0));
testLister.add(new TestCompact(testName, src, i, 0xC001C0DE, 100)); testLister.add(new TestCompact(testName, src, i, 0xC001C0DE, 100));
} }
for (Ncv32u i=256*256-256; i<256*256+257; i++) for (Ncv32u i=256*256-10; i<256*256+10; i++)
{ {
char testName[80]; char testName[80];
sprintf(testName, "Compaction%d", i); sprintf(testName, "Compaction%d", i);
testLister.add(new TestCompact(testName, src, i, 0xFFFFFFFF, 40)); testLister.add(new TestCompact(testName, src, i, 0xFFFFFFFF, 40));
} }
for (Ncv32u i=256*256*256-10; i<256*256*256+10; i++) for (Ncv32u i=256*256*256-2; i<256*256*256+2; i++)
{ {
char testName[80]; char testName[80];
sprintf(testName, "Compaction%d", i); sprintf(testName, "Compaction%d", i);
@ -212,7 +212,7 @@ void generateDrawRectsTests(NCVAutoTestLister &testLister,
void generateVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength) void generateVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength)
{ {
//growth //growth
for (Ncv32f _i=10.0; _i<maxLength; _i*=1.1f) for (Ncv32f _i=10.0; _i<maxLength; _i*=1.5f)
{ {
Ncv32u i = (Ncv32u)_i; Ncv32u i = (Ncv32u)_i;
char testName[80]; char testName[80];
@ -253,16 +253,16 @@ void generateHaarApplicationTests(NCVAutoTestLister &testLister, NCVTestSourcePr
Ncv32u maxWidth, Ncv32u maxHeight) Ncv32u maxWidth, Ncv32u maxHeight)
{ {
(void)maxHeight; (void)maxHeight;
for (Ncv32u i=20; i<512; i+=11) for (Ncv32u i=100; i<512; i+=41)
{ {
for (Ncv32u j=20; j<128; j+=5) for (Ncv32u j=100; j<128; j+=25)
{ {
char testName[80]; char testName[80];
sprintf(testName, "HaarAppl%d_%d", i, j); sprintf(testName, "HaarAppl%d_%d", i, j);
testLister.add(new TestHaarCascadeApplication(testName, src, path + "haarcascade_frontalface_alt.xml", j, i)); testLister.add(new TestHaarCascadeApplication(testName, src, path + "haarcascade_frontalface_alt.xml", j, i));
} }
} }
for (Ncv32f _i=20.0; _i<maxWidth; _i*=1.1f) for (Ncv32f _i=20.0; _i<maxWidth; _i*=1.5f)
{ {
Ncv32u i = (Ncv32u)_i; Ncv32u i = (Ncv32u)_i;
char testName[80]; char testName[80];
@ -285,11 +285,11 @@ bool nvidia_NPPST_Integral_Image(const std::string& test_data_path, OutputLevel
NCVAutoTestLister testListerII("NPPST Integral Image", outputLevel); NCVAutoTestLister testListerII("NPPST Integral Image", outputLevel);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 4096, 4096); NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 2048, 2048);
NCVTestSourceProvider<Ncv32f> testSrcRandom_32f(2010, -1.0f, 1.0f, 4096, 4096); NCVTestSourceProvider<Ncv32f> testSrcRandom_32f(2010, -1.0f, 1.0f, 2048, 2048);
generateIntegralTests<Ncv8u, Ncv32u>(testListerII, testSrcRandom_8u, 4096, 4096); generateIntegralTests<Ncv8u, Ncv32u>(testListerII, testSrcRandom_8u, 2048, 2048);
generateIntegralTests<Ncv32f, Ncv32f>(testListerII, testSrcRandom_32f, 4096, 4096); generateIntegralTests<Ncv32f, Ncv32f>(testListerII, testSrcRandom_32f, 2048, 2048);
return testListerII.invoke(); return testListerII.invoke();
} }
@ -301,9 +301,9 @@ bool nvidia_NPPST_Squared_Integral_Image(const std::string& test_data_path, Outp
NCVAutoTestLister testListerSII("NPPST Squared Integral Image", outputLevel); NCVAutoTestLister testListerSII("NPPST Squared Integral Image", outputLevel);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 4096, 4096); NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 2048, 2048);
generateSquaredIntegralTests(testListerSII, testSrcRandom_8u, 4096, 4096); generateSquaredIntegralTests(testListerSII, testSrcRandom_8u, 2048, 2048);
return testListerSII.invoke(); return testListerSII.invoke();
} }
@ -315,9 +315,9 @@ bool nvidia_NPPST_RectStdDev(const std::string& test_data_path, OutputLevel outp
NCVAutoTestLister testListerRStdDev("NPPST RectStdDev", outputLevel); NCVAutoTestLister testListerRStdDev("NPPST RectStdDev", outputLevel);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 4096, 4096); NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 2048, 2048);
generateRectStdDevTests(testListerRStdDev, testSrcRandom_8u, 4096, 4096); generateRectStdDevTests(testListerRStdDev, testSrcRandom_8u, 2048, 2048);
return testListerRStdDev.invoke(); return testListerRStdDev.invoke();
} }
@ -329,8 +329,8 @@ bool nvidia_NPPST_Resize(const std::string& test_data_path, OutputLevel outputLe
NCVAutoTestLister testListerResize("NPPST Resize", outputLevel); NCVAutoTestLister testListerResize("NPPST Resize", outputLevel);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 4096, 4096); NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 2048, 2048);
NCVTestSourceProvider<Ncv64u> testSrcRandom_64u(2010, 0, -1, 4096, 4096); NCVTestSourceProvider<Ncv64u> testSrcRandom_64u(2010, 0, -1, 2048, 2048);
generateResizeTests(testListerResize, testSrcRandom_32u); generateResizeTests(testListerResize, testSrcRandom_32u);
generateResizeTests(testListerResize, testSrcRandom_64u); generateResizeTests(testListerResize, testSrcRandom_64u);
@ -345,9 +345,9 @@ bool nvidia_NPPST_Vector_Operations(const std::string& test_data_path, OutputLev
NCVAutoTestLister testListerNPPSTVectorOperations("NPPST Vector Operations", outputLevel); NCVAutoTestLister testListerNPPSTVectorOperations("NPPST Vector Operations", outputLevel);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 4096, 4096); NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 2048, 2048);
generateNPPSTVectorTests(testListerNPPSTVectorOperations, testSrcRandom_32u, 4096*4096); generateNPPSTVectorTests(testListerNPPSTVectorOperations, testSrcRandom_32u, 2048*2048);
return testListerNPPSTVectorOperations.invoke(); return testListerNPPSTVectorOperations.invoke();
} }
@ -359,8 +359,8 @@ bool nvidia_NPPST_Transpose(const std::string& test_data_path, OutputLevel outpu
NCVAutoTestLister testListerTranspose("NPPST Transpose", outputLevel); NCVAutoTestLister testListerTranspose("NPPST Transpose", outputLevel);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 4096, 4096); NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 2048, 2048);
NCVTestSourceProvider<Ncv64u> testSrcRandom_64u(2010, 0, -1, 4096, 4096); NCVTestSourceProvider<Ncv64u> testSrcRandom_64u(2010, 0, -1, 2048, 2048);
generateTransposeTests(testListerTranspose, testSrcRandom_32u); generateTransposeTests(testListerTranspose, testSrcRandom_32u);
generateTransposeTests(testListerTranspose, testSrcRandom_64u); generateTransposeTests(testListerTranspose, testSrcRandom_64u);
@ -404,7 +404,7 @@ bool nvidia_NCV_Haar_Cascade_Application(const std::string& test_data_path, Outp
NCVTestSourceProvider<Ncv8u> testSrcFacesVGA_8u(path + "group_1_640x480_VGA.pgm"); NCVTestSourceProvider<Ncv8u> testSrcFacesVGA_8u(path + "group_1_640x480_VGA.pgm");
generateHaarApplicationTests(testListerHaarAppl, testSrcFacesVGA_8u, 1280, 720); generateHaarApplicationTests(testListerHaarAppl, testSrcFacesVGA_8u, 640, 480);
return testListerHaarAppl.invoke(); return testListerHaarAppl.invoke();
} }
@ -416,9 +416,9 @@ bool nvidia_NCV_Hypotheses_Filtration(const std::string& test_data_path, OutputL
NCVAutoTestLister testListerHypFiltration("Hypotheses Filtration", outputLevel); NCVAutoTestLister testListerHypFiltration("Hypotheses Filtration", outputLevel);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 4096, 4096); NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 2048, 2048);
generateHypothesesFiltrationTests(testListerHypFiltration, testSrcRandom_32u, 1024); generateHypothesesFiltrationTests(testListerHypFiltration, testSrcRandom_32u, 512);
return testListerHypFiltration.invoke(); return testListerHypFiltration.invoke();
} }
@ -430,11 +430,11 @@ bool nvidia_NCV_Visualization(const std::string& test_data_path, OutputLevel out
NCVAutoTestLister testListerVisualize("Visualization", outputLevel); NCVAutoTestLister testListerVisualize("Visualization", outputLevel);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 4096, 4096); NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 2048, 2048);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, RAND_MAX, 4096, 4096); NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, RAND_MAX, 2048, 2048);
generateDrawRectsTests(testListerVisualize, testSrcRandom_8u, testSrcRandom_32u, 4096, 4096); generateDrawRectsTests(testListerVisualize, testSrcRandom_8u, testSrcRandom_32u, 2048, 2048);
generateDrawRectsTests(testListerVisualize, testSrcRandom_32u, testSrcRandom_32u, 4096, 4096); generateDrawRectsTests(testListerVisualize, testSrcRandom_32u, testSrcRandom_32u, 2048, 2048);
return testListerVisualize.invoke(); return testListerVisualize.invoke();
} }

View File

@ -1609,72 +1609,444 @@ GPU_TEST_P(CvtColor, RGBA2YUV4)
EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5); EXPECT_MAT_NEAR(dst_gold, h_dst, 1e-5);
} }
#if defined (CUDA_VERSION) && (CUDA_VERSION >= 5000)
GPU_TEST_P(CvtColor, BGR2Lab) GPU_TEST_P(CvtColor, BGR2Lab)
{ {
if (depth != CV_8U) if (depth == CV_16U)
return; return;
cv::Mat src = readImage("stereobm/aloe-L.png"); cv::Mat src = img;
cv::gpu::GpuMat dst_lab = createMat(src.size(), src.type(), useRoi); cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst_lab, cv::COLOR_BGR2Lab); cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Lab);
cv::gpu::GpuMat dst_bgr = createMat(src.size(), src.type(), useRoi); cv::Mat dst_gold;
cv::gpu::cvtColor(dst_lab, dst_bgr, cv::COLOR_Lab2BGR); cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Lab);
EXPECT_MAT_NEAR(src, dst_bgr, 10); EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
} }
GPU_TEST_P(CvtColor, RGB2Lab) GPU_TEST_P(CvtColor, RGB2Lab)
{ {
if (depth != CV_8U) if (depth == CV_16U)
return; return;
cv::Mat src = readImage("stereobm/aloe-L.png"); cv::Mat src = img;
cv::gpu::GpuMat dst_lab = createMat(src.size(), src.type(), useRoi); cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst_lab, cv::COLOR_RGB2Lab); cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2Lab);
cv::gpu::GpuMat dst_bgr = createMat(src.size(), src.type(), useRoi); cv::Mat dst_gold;
cv::gpu::cvtColor(dst_lab, dst_bgr, cv::COLOR_Lab2RGB); cv::cvtColor(src, dst_gold, cv::COLOR_RGB2Lab);
EXPECT_MAT_NEAR(src, dst_bgr, 10); EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
GPU_TEST_P(CvtColor, BGRA2Lab4)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Lab, 4);
ASSERT_EQ(4, dst.channels());
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Lab);
cv::Mat h_dst(dst);
cv::Mat channels[4];
cv::split(h_dst, channels);
cv::merge(channels, 3, h_dst);
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
}
GPU_TEST_P(CvtColor, LBGR2Lab)
{
if (depth == CV_16U)
return;
cv::Mat src = img;
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Lab);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Lab);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
GPU_TEST_P(CvtColor, LRGB2Lab)
{
if (depth == CV_16U)
return;
cv::Mat src = img;
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LRGB2Lab);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_LRGB2Lab);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
GPU_TEST_P(CvtColor, LBGRA2Lab4)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Lab, 4);
ASSERT_EQ(4, dst.channels());
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Lab);
cv::Mat h_dst(dst);
cv::Mat channels[4];
cv::split(h_dst, channels);
cv::merge(channels, 3, h_dst);
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
}
GPU_TEST_P(CvtColor, Lab2BGR)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2BGR);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Lab2BGR);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
}
GPU_TEST_P(CvtColor, Lab2RGB)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2RGB);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Lab2RGB);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
}
GPU_TEST_P(CvtColor, Lab2BGRA)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2BGR, 4);
ASSERT_EQ(4, dst.channels());
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Lab2BGR, 4);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
}
GPU_TEST_P(CvtColor, Lab2LBGR)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2LBGR);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Lab2LBGR);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
}
GPU_TEST_P(CvtColor, Lab2LRGB)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2LRGB);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Lab2LRGB);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
}
GPU_TEST_P(CvtColor, Lab2LRGBA)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Lab);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Lab2LRGB, 4);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Lab2LRGB, 4);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-5);
} }
GPU_TEST_P(CvtColor, BGR2Luv) GPU_TEST_P(CvtColor, BGR2Luv)
{ {
if (depth != CV_8U) if (depth == CV_16U)
return; return;
cv::Mat src = img; cv::Mat src = img;
cv::gpu::GpuMat dst_luv = createMat(src.size(), src.type(), useRoi); cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst_luv, cv::COLOR_BGR2Luv); cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Luv);
cv::gpu::GpuMat dst_rgb = createMat(src.size(), src.type(), useRoi); cv::Mat dst_gold;
cv::gpu::cvtColor(dst_luv, dst_rgb, cv::COLOR_Luv2BGR); cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Luv);
EXPECT_MAT_NEAR(src, dst_rgb, 10); EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
} }
GPU_TEST_P(CvtColor, RGB2Luv) GPU_TEST_P(CvtColor, RGB2Luv)
{ {
if (depth != CV_8U) if (depth == CV_16U)
return; return;
cv::Mat src = img; cv::Mat src = img;
cv::gpu::GpuMat dst_luv = createMat(src.size(), src.type(), useRoi); cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst_luv, cv::COLOR_RGB2Luv); cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_RGB2Luv);
cv::gpu::GpuMat dst_rgb = createMat(src.size(), src.type(), useRoi); cv::Mat dst_gold;
cv::gpu::cvtColor(dst_luv, dst_rgb, cv::COLOR_Luv2RGB); cv::cvtColor(src, dst_gold, cv::COLOR_RGB2Luv);
EXPECT_MAT_NEAR(src, dst_rgb, 10); EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
} }
GPU_TEST_P(CvtColor, BGRA2Luv4)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_BGR2Luv, 4);
ASSERT_EQ(4, dst.channels());
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_BGR2Luv);
cv::Mat h_dst(dst);
cv::Mat channels[4];
cv::split(h_dst, channels);
cv::merge(channels, 3, h_dst);
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
}
GPU_TEST_P(CvtColor, LBGR2Luv)
{
if (depth == CV_16U)
return;
cv::Mat src = img;
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Luv);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Luv);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
GPU_TEST_P(CvtColor, LRGB2Luv)
{
if (depth == CV_16U)
return;
cv::Mat src = img;
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LRGB2Luv);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_LRGB2Luv);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-3);
}
GPU_TEST_P(CvtColor, LBGRA2Luv4)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2RGBA);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_LBGR2Luv, 4);
ASSERT_EQ(4, dst.channels());
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_LBGR2Luv);
cv::Mat h_dst(dst);
cv::Mat channels[4];
cv::split(h_dst, channels);
cv::merge(channels, 3, h_dst);
EXPECT_MAT_NEAR(dst_gold, h_dst, depth == CV_8U ? 1 : 1e-3);
}
GPU_TEST_P(CvtColor, Luv2BGR)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2BGR);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Luv2BGR);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
}
GPU_TEST_P(CvtColor, Luv2RGB)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2RGB);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Luv2RGB);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
}
GPU_TEST_P(CvtColor, Luv2BGRA)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2BGR, 4);
ASSERT_EQ(4, dst.channels());
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Luv2BGR, 4);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
}
GPU_TEST_P(CvtColor, Luv2LBGR)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2LBGR);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Luv2LBGR);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
}
GPU_TEST_P(CvtColor, Luv2LRGB)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2LRGB);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Luv2LRGB);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
}
GPU_TEST_P(CvtColor, Luv2LRGBA)
{
if (depth == CV_16U)
return;
cv::Mat src;
cv::cvtColor(img, src, cv::COLOR_BGR2Luv);
cv::gpu::GpuMat dst;
cv::gpu::cvtColor(loadMat(src, useRoi), dst, cv::COLOR_Luv2LRGB, 4);
cv::Mat dst_gold;
cv::cvtColor(src, dst_gold, cv::COLOR_Luv2LRGB, 4);
EXPECT_MAT_NEAR(dst_gold, dst, depth == CV_8U ? 1 : 1e-4);
}
#if defined (CUDA_VERSION) && (CUDA_VERSION >= 5000)
GPU_TEST_P(CvtColor, RGBA2mRGBA) GPU_TEST_P(CvtColor, RGBA2mRGBA)
{ {
if (depth != CV_8U) if (depth != CV_8U)

View File

@ -1124,7 +1124,7 @@ GPU_TEST_P(Divide_Scalar, WithOutScale)
cv::Mat dst_gold; cv::Mat dst_gold;
cv::divide(mat, val, dst_gold, 1, depth.second); cv::divide(mat, val, dst_gold, 1, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0); EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
} }
} }
@ -1154,7 +1154,7 @@ GPU_TEST_P(Divide_Scalar, WithScale)
cv::Mat dst_gold; cv::Mat dst_gold;
cv::divide(mat, val, dst_gold, scale, depth.second); cv::divide(mat, val, dst_gold, scale, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-2 : 0.0); EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-2 : 1.0);
} }
} }
@ -1210,7 +1210,7 @@ GPU_TEST_P(Divide_Scalar_Inv, Accuracy)
cv::Mat dst_gold; cv::Mat dst_gold;
cv::divide(scale, mat, dst_gold, depth.second); cv::divide(scale, mat, dst_gold, depth.second);
EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0); EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
} }
} }

View File

@ -166,7 +166,7 @@ struct Labeling : testing::TestWithParam<cv::gpu::DeviceInfo>
} }
}; };
GPU_TEST_P(Labeling, ConnectedComponents) GPU_TEST_P(Labeling, DISABLED_ConnectedComponents)
{ {
cv::Mat image; cv::Mat image;
cvtColor(loat_image(), image, CV_BGR2GRAY); cvtColor(loat_image(), image, CV_BGR2GRAY);
@ -186,7 +186,7 @@ GPU_TEST_P(Labeling, ConnectedComponents)
cv::gpu::connectivityMask(cv::gpu::GpuMat(image), mask, cv::Scalar::all(0), cv::Scalar::all(2)); cv::gpu::connectivityMask(cv::gpu::GpuMat(image), mask, cv::Scalar::all(0), cv::Scalar::all(2));
ASSERT_NO_THROW(cv::gpu::labelComponents(mask, components)); cv::gpu::labelComponents(mask, components);
host.checkCorrectness(cv::Mat(components)); host.checkCorrectness(cv::Mat(components));
} }

View File

@ -44,9 +44,9 @@
#if defined(HAVE_CUDA) && defined(HAVE_OPENGL) #if defined(HAVE_CUDA) && defined(HAVE_OPENGL)
///////////////////////////////////////////// /////////////////////////////////////////////
// GlBuffer // Buffer
PARAM_TEST_CASE(GlBuffer, cv::Size, MatType) PARAM_TEST_CASE(Buffer, cv::Size, MatType)
{ {
static void SetUpTestCase() static void SetUpTestCase()
{ {
@ -68,29 +68,29 @@ PARAM_TEST_CASE(GlBuffer, cv::Size, MatType)
} }
}; };
GPU_TEST_P(GlBuffer, Constructor1) GPU_TEST_P(Buffer, Constructor1)
{ {
cv::GlBuffer buf(size.height, size.width, type, cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer buf(size.height, size.width, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_EQ(size.height, buf.rows()); EXPECT_EQ(size.height, buf.rows());
EXPECT_EQ(size.width, buf.cols()); EXPECT_EQ(size.width, buf.cols());
EXPECT_EQ(type, buf.type()); EXPECT_EQ(type, buf.type());
} }
GPU_TEST_P(GlBuffer, Constructor2) GPU_TEST_P(Buffer, Constructor2)
{ {
cv::GlBuffer buf(size, type, cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer buf(size, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_EQ(size.height, buf.rows()); EXPECT_EQ(size.height, buf.rows());
EXPECT_EQ(size.width, buf.cols()); EXPECT_EQ(size.width, buf.cols());
EXPECT_EQ(type, buf.type()); EXPECT_EQ(type, buf.type());
} }
GPU_TEST_P(GlBuffer, ConstructorFromMat) GPU_TEST_P(Buffer, ConstructorFromMat)
{ {
cv::Mat gold = randomMat(size, type); cv::Mat gold = randomMat(size, type);
cv::GlBuffer buf(gold, cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::Mat bufData; cv::Mat bufData;
buf.copyTo(bufData); buf.copyTo(bufData);
@ -98,12 +98,12 @@ GPU_TEST_P(GlBuffer, ConstructorFromMat)
EXPECT_MAT_NEAR(gold, bufData, 0); EXPECT_MAT_NEAR(gold, bufData, 0);
} }
GPU_TEST_P(GlBuffer, ConstructorFromGpuMat) GPU_TEST_P(Buffer, ConstructorFromGpuMat)
{ {
cv::Mat gold = randomMat(size, type); cv::Mat gold = randomMat(size, type);
cv::gpu::GpuMat d_gold(gold); cv::gpu::GpuMat d_gold(gold);
cv::GlBuffer buf(d_gold, cv::GlBuffer::ARRAY_BUFFER); cv::ogl::Buffer buf(d_gold, cv::ogl::Buffer::ARRAY_BUFFER);
cv::Mat bufData; cv::Mat bufData;
buf.copyTo(bufData); buf.copyTo(bufData);
@ -111,11 +111,11 @@ GPU_TEST_P(GlBuffer, ConstructorFromGpuMat)
EXPECT_MAT_NEAR(gold, bufData, 0); EXPECT_MAT_NEAR(gold, bufData, 0);
} }
GPU_TEST_P(GlBuffer, ConstructorFromGlBuffer) GPU_TEST_P(Buffer, ConstructorFromBuffer)
{ {
cv::GlBuffer buf_gold(size, type, cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer buf_gold(size, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::GlBuffer buf(buf_gold); cv::ogl::Buffer buf(buf_gold);
EXPECT_EQ(buf_gold.bufId(), buf.bufId()); EXPECT_EQ(buf_gold.bufId(), buf.bufId());
EXPECT_EQ(buf_gold.rows(), buf.rows()); EXPECT_EQ(buf_gold.rows(), buf.rows());
@ -123,7 +123,7 @@ GPU_TEST_P(GlBuffer, ConstructorFromGlBuffer)
EXPECT_EQ(buf_gold.type(), buf.type()); EXPECT_EQ(buf_gold.type(), buf.type());
} }
GPU_TEST_P(GlBuffer, ConstructorFromGlTexture2D) GPU_TEST_P(Buffer, ConstructorFromTexture2D)
{ {
const int depth = CV_MAT_DEPTH(type); const int depth = CV_MAT_DEPTH(type);
const int cn = CV_MAT_CN(type); const int cn = CV_MAT_CN(type);
@ -132,9 +132,9 @@ GPU_TEST_P(GlBuffer, ConstructorFromGlTexture2D)
return; return;
cv::Mat gold = randomMat(size, type, 0, 1.0); cv::Mat gold = randomMat(size, type, 0, 1.0);
cv::GlTexture2D tex_gold(gold, true); cv::ogl::Texture2D tex_gold(gold, true);
cv::GlBuffer buf(tex_gold, cv::GlBuffer::PIXEL_PACK_BUFFER, true); cv::ogl::Buffer buf(tex_gold, cv::ogl::Buffer::PIXEL_PACK_BUFFER, true);
cv::Mat bufData; cv::Mat bufData;
buf.copyTo(bufData); buf.copyTo(bufData);
@ -142,22 +142,22 @@ GPU_TEST_P(GlBuffer, ConstructorFromGlTexture2D)
EXPECT_MAT_NEAR(gold, bufData, 1e-2); EXPECT_MAT_NEAR(gold, bufData, 1e-2);
} }
GPU_TEST_P(GlBuffer, Create) GPU_TEST_P(Buffer, Create)
{ {
cv::GlBuffer buf; cv::ogl::Buffer buf;
buf.create(size.height, size.width, type, cv::GlBuffer::ARRAY_BUFFER, true); buf.create(size.height, size.width, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_EQ(size.height, buf.rows()); EXPECT_EQ(size.height, buf.rows());
EXPECT_EQ(size.width, buf.cols()); EXPECT_EQ(size.width, buf.cols());
EXPECT_EQ(type, buf.type()); EXPECT_EQ(type, buf.type());
} }
GPU_TEST_P(GlBuffer, CopyFromMat) GPU_TEST_P(Buffer, CopyFromMat)
{ {
cv::Mat gold = randomMat(size, type); cv::Mat gold = randomMat(size, type);
cv::GlBuffer buf; cv::ogl::Buffer buf;
buf.copyFrom(gold, cv::GlBuffer::ARRAY_BUFFER, true); buf.copyFrom(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::Mat bufData; cv::Mat bufData;
buf.copyTo(bufData); buf.copyTo(bufData);
@ -165,13 +165,13 @@ GPU_TEST_P(GlBuffer, CopyFromMat)
EXPECT_MAT_NEAR(gold, bufData, 0); EXPECT_MAT_NEAR(gold, bufData, 0);
} }
GPU_TEST_P(GlBuffer, CopyFromGpuMat) GPU_TEST_P(Buffer, CopyFromGpuMat)
{ {
cv::Mat gold = randomMat(size, type); cv::Mat gold = randomMat(size, type);
cv::gpu::GpuMat d_gold(gold); cv::gpu::GpuMat d_gold(gold);
cv::GlBuffer buf; cv::ogl::Buffer buf;
buf.copyFrom(d_gold, cv::GlBuffer::ARRAY_BUFFER, true); buf.copyFrom(d_gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::Mat bufData; cv::Mat bufData;
buf.copyTo(bufData); buf.copyTo(bufData);
@ -179,13 +179,13 @@ GPU_TEST_P(GlBuffer, CopyFromGpuMat)
EXPECT_MAT_NEAR(gold, bufData, 0); EXPECT_MAT_NEAR(gold, bufData, 0);
} }
GPU_TEST_P(GlBuffer, CopyFromGlBuffer) GPU_TEST_P(Buffer, CopyFromBuffer)
{ {
cv::Mat gold = randomMat(size, type); cv::Mat gold = randomMat(size, type);
cv::GlBuffer buf_gold(gold, cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer buf_gold(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::GlBuffer buf; cv::ogl::Buffer buf;
buf.copyFrom(buf_gold, cv::GlBuffer::ARRAY_BUFFER, true); buf.copyFrom(buf_gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_NE(buf_gold.bufId(), buf.bufId()); EXPECT_NE(buf_gold.bufId(), buf.bufId());
@ -195,7 +195,7 @@ GPU_TEST_P(GlBuffer, CopyFromGlBuffer)
EXPECT_MAT_NEAR(gold, bufData, 0); EXPECT_MAT_NEAR(gold, bufData, 0);
} }
GPU_TEST_P(GlBuffer, CopyFromGlTexture2D) GPU_TEST_P(Buffer, CopyFromTexture2D)
{ {
const int depth = CV_MAT_DEPTH(type); const int depth = CV_MAT_DEPTH(type);
const int cn = CV_MAT_CN(type); const int cn = CV_MAT_CN(type);
@ -204,10 +204,10 @@ GPU_TEST_P(GlBuffer, CopyFromGlTexture2D)
return; return;
cv::Mat gold = randomMat(size, type, 0, 1.0); cv::Mat gold = randomMat(size, type, 0, 1.0);
cv::GlTexture2D tex_gold(gold, true); cv::ogl::Texture2D tex_gold(gold, true);
cv::GlBuffer buf; cv::ogl::Buffer buf;
buf.copyFrom(tex_gold, cv::GlBuffer::ARRAY_BUFFER, true); buf.copyFrom(tex_gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::Mat bufData; cv::Mat bufData;
buf.copyTo(bufData); buf.copyTo(bufData);
@ -215,11 +215,11 @@ GPU_TEST_P(GlBuffer, CopyFromGlTexture2D)
EXPECT_MAT_NEAR(gold, bufData, 1e-2); EXPECT_MAT_NEAR(gold, bufData, 1e-2);
} }
GPU_TEST_P(GlBuffer, CopyToGpuMat) GPU_TEST_P(Buffer, CopyToGpuMat)
{ {
cv::Mat gold = randomMat(size, type); cv::Mat gold = randomMat(size, type);
cv::GlBuffer buf(gold, cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::gpu::GpuMat dst; cv::gpu::GpuMat dst;
buf.copyTo(dst); buf.copyTo(dst);
@ -227,14 +227,14 @@ GPU_TEST_P(GlBuffer, CopyToGpuMat)
EXPECT_MAT_NEAR(gold, dst, 0); EXPECT_MAT_NEAR(gold, dst, 0);
} }
GPU_TEST_P(GlBuffer, CopyToGlBuffer) GPU_TEST_P(Buffer, CopyToBuffer)
{ {
cv::Mat gold = randomMat(size, type); cv::Mat gold = randomMat(size, type);
cv::GlBuffer buf(gold, cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::GlBuffer dst; cv::ogl::Buffer dst;
buf.copyTo(dst, cv::GlBuffer::ARRAY_BUFFER, true); buf.copyTo(dst, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_NE(buf.bufId(), dst.bufId()); EXPECT_NE(buf.bufId(), dst.bufId());
@ -244,7 +244,7 @@ GPU_TEST_P(GlBuffer, CopyToGlBuffer)
EXPECT_MAT_NEAR(gold, bufData, 0); EXPECT_MAT_NEAR(gold, bufData, 0);
} }
GPU_TEST_P(GlBuffer, CopyToGlTexture2D) GPU_TEST_P(Buffer, CopyToTexture2D)
{ {
const int depth = CV_MAT_DEPTH(type); const int depth = CV_MAT_DEPTH(type);
const int cn = CV_MAT_CN(type); const int cn = CV_MAT_CN(type);
@ -254,10 +254,10 @@ GPU_TEST_P(GlBuffer, CopyToGlTexture2D)
cv::Mat gold = randomMat(size, type, 0, 1.0); cv::Mat gold = randomMat(size, type, 0, 1.0);
cv::GlBuffer buf(gold, cv::GlBuffer::PIXEL_PACK_BUFFER, true); cv::ogl::Buffer buf(gold, cv::ogl::Buffer::PIXEL_PACK_BUFFER, true);
cv::GlTexture2D tex; cv::ogl::Texture2D tex;
buf.copyTo(tex, cv::GlBuffer::PIXEL_PACK_BUFFER, true); buf.copyTo(tex, cv::ogl::Buffer::PIXEL_PACK_BUFFER, true);
cv::Mat texData; cv::Mat texData;
tex.copyTo(texData); tex.copyTo(texData);
@ -265,13 +265,13 @@ GPU_TEST_P(GlBuffer, CopyToGlTexture2D)
EXPECT_MAT_NEAR(gold, texData, 1e-2); EXPECT_MAT_NEAR(gold, texData, 1e-2);
} }
GPU_TEST_P(GlBuffer, Clone) GPU_TEST_P(Buffer, Clone)
{ {
cv::Mat gold = randomMat(size, type); cv::Mat gold = randomMat(size, type);
cv::GlBuffer buf(gold, cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::GlBuffer dst = buf.clone(cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer dst = buf.clone(cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_NE(buf.bufId(), dst.bufId()); EXPECT_NE(buf.bufId(), dst.bufId());
@ -281,26 +281,26 @@ GPU_TEST_P(GlBuffer, Clone)
EXPECT_MAT_NEAR(gold, bufData, 0); EXPECT_MAT_NEAR(gold, bufData, 0);
} }
GPU_TEST_P(GlBuffer, MapHostRead) GPU_TEST_P(Buffer, MapHostRead)
{ {
cv::Mat gold = randomMat(size, type); cv::Mat gold = randomMat(size, type);
cv::GlBuffer buf(gold, cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::Mat dst = buf.mapHost(cv::GlBuffer::READ_ONLY); cv::Mat dst = buf.mapHost(cv::ogl::Buffer::READ_ONLY);
EXPECT_MAT_NEAR(gold, dst, 0); EXPECT_MAT_NEAR(gold, dst, 0);
buf.unmapHost(); buf.unmapHost();
} }
GPU_TEST_P(GlBuffer, MapHostWrite) GPU_TEST_P(Buffer, MapHostWrite)
{ {
cv::Mat gold = randomMat(size, type); cv::Mat gold = randomMat(size, type);
cv::GlBuffer buf(size, type, cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer buf(size, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::Mat dst = buf.mapHost(cv::GlBuffer::WRITE_ONLY); cv::Mat dst = buf.mapHost(cv::ogl::Buffer::WRITE_ONLY);
gold.copyTo(dst); gold.copyTo(dst);
buf.unmapHost(); buf.unmapHost();
dst.release(); dst.release();
@ -311,11 +311,11 @@ GPU_TEST_P(GlBuffer, MapHostWrite)
EXPECT_MAT_NEAR(gold, bufData, 0); EXPECT_MAT_NEAR(gold, bufData, 0);
} }
GPU_TEST_P(GlBuffer, MapDevice) GPU_TEST_P(Buffer, MapDevice)
{ {
cv::Mat gold = randomMat(size, type); cv::Mat gold = randomMat(size, type);
cv::GlBuffer buf(gold, cv::GlBuffer::ARRAY_BUFFER, true); cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::gpu::GpuMat dst = buf.mapDevice(); cv::gpu::GpuMat dst = buf.mapDevice();
@ -324,12 +324,12 @@ GPU_TEST_P(GlBuffer, MapDevice)
buf.unmapDevice(); buf.unmapDevice();
} }
INSTANTIATE_TEST_CASE_P(OpenGL, GlBuffer, testing::Combine(DIFFERENT_SIZES, ALL_TYPES)); INSTANTIATE_TEST_CASE_P(OpenGL, Buffer, testing::Combine(DIFFERENT_SIZES, ALL_TYPES));
///////////////////////////////////////////// /////////////////////////////////////////////
// GlTexture2D // Texture2D
PARAM_TEST_CASE(GlTexture2D, cv::Size, MatType) PARAM_TEST_CASE(Texture2D, cv::Size, MatType)
{ {
static void SetUpTestCase() static void SetUpTestCase()
{ {
@ -345,7 +345,7 @@ PARAM_TEST_CASE(GlTexture2D, cv::Size, MatType)
int type; int type;
int depth; int depth;
int cn; int cn;
cv::GlTexture2D::Format format; cv::ogl::Texture2D::Format format;
virtual void SetUp() virtual void SetUp()
{ {
@ -354,33 +354,33 @@ PARAM_TEST_CASE(GlTexture2D, cv::Size, MatType)
depth = CV_MAT_DEPTH(type); depth = CV_MAT_DEPTH(type);
cn = CV_MAT_CN(type); cn = CV_MAT_CN(type);
format = cn == 1 ? cv::GlTexture2D::DEPTH_COMPONENT : cn == 3 ? cv::GlTexture2D::RGB : cn == 4 ? cv::GlTexture2D::RGBA : cv::GlTexture2D::NONE; format = cn == 1 ? cv::ogl::Texture2D::DEPTH_COMPONENT : cn == 3 ? cv::ogl::Texture2D::RGB : cn == 4 ? cv::ogl::Texture2D::RGBA : cv::ogl::Texture2D::NONE;
} }
}; };
GPU_TEST_P(GlTexture2D, Constructor1) GPU_TEST_P(Texture2D, Constructor1)
{ {
cv::GlTexture2D tex(size.height, size.width, format, true); cv::ogl::Texture2D tex(size.height, size.width, format, true);
EXPECT_EQ(size.height, tex.rows()); EXPECT_EQ(size.height, tex.rows());
EXPECT_EQ(size.width, tex.cols()); EXPECT_EQ(size.width, tex.cols());
EXPECT_EQ(format, tex.format()); EXPECT_EQ(format, tex.format());
} }
GPU_TEST_P(GlTexture2D, Constructor2) GPU_TEST_P(Texture2D, Constructor2)
{ {
cv::GlTexture2D tex(size, format, true); cv::ogl::Texture2D tex(size, format, true);
EXPECT_EQ(size.height, tex.rows()); EXPECT_EQ(size.height, tex.rows());
EXPECT_EQ(size.width, tex.cols()); EXPECT_EQ(size.width, tex.cols());
EXPECT_EQ(format, tex.format()); EXPECT_EQ(format, tex.format());
} }
GPU_TEST_P(GlTexture2D, ConstructorFromMat) GPU_TEST_P(Texture2D, ConstructorFromMat)
{ {
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1); cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::GlTexture2D tex(gold, true); cv::ogl::Texture2D tex(gold, true);
cv::Mat texData; cv::Mat texData;
tex.copyTo(texData, depth); tex.copyTo(texData, depth);
@ -388,12 +388,12 @@ GPU_TEST_P(GlTexture2D, ConstructorFromMat)
EXPECT_MAT_NEAR(gold, texData, 1e-2); EXPECT_MAT_NEAR(gold, texData, 1e-2);
} }
GPU_TEST_P(GlTexture2D, ConstructorFromGpuMat) GPU_TEST_P(Texture2D, ConstructorFromGpuMat)
{ {
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1); cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::gpu::GpuMat d_gold(gold); cv::gpu::GpuMat d_gold(gold);
cv::GlTexture2D tex(d_gold, true); cv::ogl::Texture2D tex(d_gold, true);
cv::Mat texData; cv::Mat texData;
tex.copyTo(texData, depth); tex.copyTo(texData, depth);
@ -401,12 +401,12 @@ GPU_TEST_P(GlTexture2D, ConstructorFromGpuMat)
EXPECT_MAT_NEAR(gold, texData, 1e-2); EXPECT_MAT_NEAR(gold, texData, 1e-2);
} }
GPU_TEST_P(GlTexture2D, ConstructorFromGlBuffer) GPU_TEST_P(Texture2D, ConstructorFromBuffer)
{ {
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1); cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::GlBuffer buf_gold(gold, cv::GlBuffer::PIXEL_UNPACK_BUFFER, true); cv::ogl::Buffer buf_gold(gold, cv::ogl::Buffer::PIXEL_UNPACK_BUFFER, true);
cv::GlTexture2D tex(buf_gold, true); cv::ogl::Texture2D tex(buf_gold, true);
cv::Mat texData; cv::Mat texData;
tex.copyTo(texData, depth); tex.copyTo(texData, depth);
@ -414,10 +414,10 @@ GPU_TEST_P(GlTexture2D, ConstructorFromGlBuffer)
EXPECT_MAT_NEAR(gold, texData, 1e-2); EXPECT_MAT_NEAR(gold, texData, 1e-2);
} }
GPU_TEST_P(GlTexture2D, ConstructorFromGlTexture2D) GPU_TEST_P(Texture2D, ConstructorFromTexture2D)
{ {
cv::GlTexture2D tex_gold(size, format, true); cv::ogl::Texture2D tex_gold(size, format, true);
cv::GlTexture2D tex(tex_gold); cv::ogl::Texture2D tex(tex_gold);
EXPECT_EQ(tex_gold.texId(), tex.texId()); EXPECT_EQ(tex_gold.texId(), tex.texId());
EXPECT_EQ(tex_gold.rows(), tex.rows()); EXPECT_EQ(tex_gold.rows(), tex.rows());
@ -425,9 +425,9 @@ GPU_TEST_P(GlTexture2D, ConstructorFromGlTexture2D)
EXPECT_EQ(tex_gold.format(), tex.format()); EXPECT_EQ(tex_gold.format(), tex.format());
} }
GPU_TEST_P(GlTexture2D, Create) GPU_TEST_P(Texture2D, Create)
{ {
cv::GlTexture2D tex; cv::ogl::Texture2D tex;
tex.create(size.height, size.width, format, true); tex.create(size.height, size.width, format, true);
EXPECT_EQ(size.height, tex.rows()); EXPECT_EQ(size.height, tex.rows());
@ -435,11 +435,11 @@ GPU_TEST_P(GlTexture2D, Create)
EXPECT_EQ(format, tex.format()); EXPECT_EQ(format, tex.format());
} }
GPU_TEST_P(GlTexture2D, CopyFromMat) GPU_TEST_P(Texture2D, CopyFromMat)
{ {
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1); cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::GlTexture2D tex; cv::ogl::Texture2D tex;
tex.copyFrom(gold, true); tex.copyFrom(gold, true);
cv::Mat texData; cv::Mat texData;
@ -448,12 +448,12 @@ GPU_TEST_P(GlTexture2D, CopyFromMat)
EXPECT_MAT_NEAR(gold, texData, 1e-2); EXPECT_MAT_NEAR(gold, texData, 1e-2);
} }
GPU_TEST_P(GlTexture2D, CopyFromGpuMat) GPU_TEST_P(Texture2D, CopyFromGpuMat)
{ {
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1); cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::gpu::GpuMat d_gold(gold); cv::gpu::GpuMat d_gold(gold);
cv::GlTexture2D tex; cv::ogl::Texture2D tex;
tex.copyFrom(d_gold, true); tex.copyFrom(d_gold, true);
cv::Mat texData; cv::Mat texData;
@ -462,12 +462,12 @@ GPU_TEST_P(GlTexture2D, CopyFromGpuMat)
EXPECT_MAT_NEAR(gold, texData, 1e-2); EXPECT_MAT_NEAR(gold, texData, 1e-2);
} }
GPU_TEST_P(GlTexture2D, CopyFromGlBuffer) GPU_TEST_P(Texture2D, CopyFromBuffer)
{ {
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1); cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::GlBuffer buf_gold(gold, cv::GlBuffer::PIXEL_UNPACK_BUFFER, true); cv::ogl::Buffer buf_gold(gold, cv::ogl::Buffer::PIXEL_UNPACK_BUFFER, true);
cv::GlTexture2D tex; cv::ogl::Texture2D tex;
tex.copyFrom(buf_gold, true); tex.copyFrom(buf_gold, true);
cv::Mat texData; cv::Mat texData;
@ -476,11 +476,11 @@ GPU_TEST_P(GlTexture2D, CopyFromGlBuffer)
EXPECT_MAT_NEAR(gold, texData, 1e-2); EXPECT_MAT_NEAR(gold, texData, 1e-2);
} }
GPU_TEST_P(GlTexture2D, CopyToGpuMat) GPU_TEST_P(Texture2D, CopyToGpuMat)
{ {
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1); cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::GlTexture2D tex(gold, true); cv::ogl::Texture2D tex(gold, true);
cv::gpu::GpuMat dst; cv::gpu::GpuMat dst;
tex.copyTo(dst, depth); tex.copyTo(dst, depth);
@ -488,13 +488,13 @@ GPU_TEST_P(GlTexture2D, CopyToGpuMat)
EXPECT_MAT_NEAR(gold, dst, 1e-2); EXPECT_MAT_NEAR(gold, dst, 1e-2);
} }
GPU_TEST_P(GlTexture2D, CopyToGlBuffer) GPU_TEST_P(Texture2D, CopyToBuffer)
{ {
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1); cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::GlTexture2D tex(gold, true); cv::ogl::Texture2D tex(gold, true);
cv::GlBuffer dst; cv::ogl::Buffer dst;
tex.copyTo(dst, depth, true); tex.copyTo(dst, depth, true);
cv::Mat bufData; cv::Mat bufData;
@ -503,6 +503,6 @@ GPU_TEST_P(GlTexture2D, CopyToGlBuffer)
EXPECT_MAT_NEAR(gold, bufData, 1e-2); EXPECT_MAT_NEAR(gold, bufData, 1e-2);
} }
INSTANTIATE_TEST_CASE_P(OpenGL, GlTexture2D, testing::Combine(DIFFERENT_SIZES, testing::Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4))); INSTANTIATE_TEST_CASE_P(OpenGL, Texture2D, testing::Combine(DIFFERENT_SIZES, testing::Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4)));
#endif #endif

View File

@ -241,15 +241,15 @@ void cv::updateWindow(const std::string& windowName)
#ifdef HAVE_OPENGL #ifdef HAVE_OPENGL
namespace namespace
{ {
std::map<std::string, cv::GlTexture2D> wndTexs; std::map<std::string, cv::ogl::Texture2D> wndTexs;
std::map<std::string, cv::GlTexture2D> ownWndTexs; std::map<std::string, cv::ogl::Texture2D> ownWndTexs;
std::map<std::string, cv::GlBuffer> ownWndBufs; std::map<std::string, cv::ogl::Buffer> ownWndBufs;
void CV_CDECL glDrawTextureCallback(void* userdata) void glDrawTextureCallback(void* userdata)
{ {
cv::GlTexture2D* texObj = static_cast<cv::GlTexture2D*>(userdata); cv::ogl::Texture2D* texObj = static_cast<cv::ogl::Texture2D*>(userdata);
cv::render(*texObj); cv::ogl::render(*texObj);
} }
} }
#endif // HAVE_OPENGL #endif // HAVE_OPENGL
@ -281,11 +281,11 @@ void cv::imshow( const std::string& winname, InputArray _img )
setOpenGlContext(winname); setOpenGlContext(winname);
if (_img.kind() == _InputArray::OPENGL_TEXTURE2D) if (_img.kind() == _InputArray::OPENGL_TEXTURE)
{ {
cv::GlTexture2D& tex = wndTexs[winname]; cv::ogl::Texture2D& tex = wndTexs[winname];
tex = _img.getGlTexture2D(); tex = _img.getOGlTexture2D();
tex.setAutoRelease(false); tex.setAutoRelease(false);
@ -293,11 +293,11 @@ void cv::imshow( const std::string& winname, InputArray _img )
} }
else else
{ {
cv::GlTexture2D& tex = ownWndTexs[winname]; cv::ogl::Texture2D& tex = ownWndTexs[winname];
if (_img.kind() == _InputArray::GPU_MAT) if (_img.kind() == _InputArray::GPU_MAT)
{ {
cv::GlBuffer& buf = ownWndBufs[winname]; cv::ogl::Buffer& buf = ownWndBufs[winname];
buf.copyFrom(_img); buf.copyFrom(_img);
buf.setAutoRelease(false); buf.setAutoRelease(false);

View File

@ -474,21 +474,6 @@ CV_EXPORTS void PrintTo(const Size& sz, ::std::ostream* os);
INSTANTIATE_TEST_CASE_P(/*none*/, fixture##_##name, params);\ INSTANTIATE_TEST_CASE_P(/*none*/, fixture##_##name, params);\
void fixture##_##name::PerfTestBody() void fixture##_##name::PerfTestBody()
#define GPU_PERF_TEST_P(fixture, name, params) \
class fixture##_##name : public fixture {\
public:\
fixture##_##name() {}\
protected:\
virtual void PerfTestBody();\
};\
TEST_P(fixture##_##name, name /*perf*/) \
{ \
try { RunPerfTestBody(); } \
catch (...) { cv::gpu::resetDevice(); throw; } \
} \
INSTANTIATE_TEST_CASE_P(/*none*/, fixture##_##name, params);\
void fixture##_##name::PerfTestBody()
#define CV_PERF_TEST_MAIN(testsuitname, ...) \ #define CV_PERF_TEST_MAIN(testsuitname, ...) \
int main(int argc, char **argv)\ int main(int argc, char **argv)\

View File

@ -37,41 +37,20 @@ const int win_height = 640;
struct DrawData struct DrawData
{ {
GlArrays arr; ogl::Arrays arr;
GlTexture2D tex; ogl::Texture2D tex;
GlBuffer indices; ogl::Buffer indices;
}; };
void CV_CDECL draw(void* userdata); void draw(void* userdata);
void CV_CDECL draw(void* userdata) void draw(void* userdata)
{ {
static double angle = 0.0;
DrawData* data = static_cast<DrawData*>(userdata); DrawData* data = static_cast<DrawData*>(userdata);
glMatrixMode(GL_PROJECTION); glRotated(0.6, 0, 1, 0);
glLoadIdentity();
gluPerspective(45.0, (double)win_width / win_height, 0.1, 100.0);
glMatrixMode(GL_MODELVIEW); ogl::render(data->arr, data->indices, ogl::TRIANGLES);
glLoadIdentity();
gluLookAt(0, 0, 3, 0, 0, 0, 0, 1, 0);
glRotated(angle, 0, 1, 0);
glEnable(GL_TEXTURE_2D);
data->tex.bind();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexEnvi(GL_TEXTURE_2D, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glDisable(GL_CULL_FACE);
render(data->arr, data->indices, RenderMode::TRIANGLES);
angle += 0.3;
} }
int main(int argc, char* argv[]) int main(int argc, char* argv[])
@ -108,12 +87,28 @@ int main(int argc, char* argv[])
data.indices.copyFrom(indices); data.indices.copyFrom(indices);
data.tex.copyFrom(img); data.tex.copyFrom(img);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45.0, (double)win_width / win_height, 0.1, 100.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0, 0, 3, 0, 0, 0, 0, 1, 0);
glEnable(GL_TEXTURE_2D);
data.tex.bind();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexEnvi(GL_TEXTURE_2D, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glDisable(GL_CULL_FACE);
setOpenGlDrawCallback("OpenGL", draw, &data); setOpenGlDrawCallback("OpenGL", draw, &data);
for (;;) for (;;)
{ {
updateWindow("OpenGL"); updateWindow("OpenGL");
int key = waitKey(10); int key = waitKey(40);
if ((key & 0xff) == 27) if ((key & 0xff) == 27)
break; break;
} }