added support to build without cuda.

This commit is contained in:
Anatoly Baksheev
2010-07-15 13:36:00 +00:00
parent e1bd5aeadd
commit 2c84a66ec7
9 changed files with 169 additions and 138 deletions

View File

@@ -41,33 +41,37 @@
//M*/
#include "precomp.hpp"
#include "opencv2/gpu/stream_access.hpp"
using namespace cv;
using namespace cv::gpu;
cv::gpu::CudaStream::CudaStream() : impl( fastMalloc(sizeof(cudaStream_t)) )
cv::gpu::CudaStream::CudaStream() : impl( (Impl*)fastMalloc(sizeof(Impl)) )
{
cudaSafeCall( cudaStreamCreate((cudaStream_t*)impl) );
//cudaSafeCall( cudaStreamCreate( &impl->stream) );
}
cv::gpu::CudaStream::~CudaStream()
{
cudaSafeCall( cudaStreamDestroy( *(cudaStream_t*)impl ) );
cv::fastFree( impl );
if (impl)
{
cudaSafeCall( cudaStreamDestroy( *(cudaStream_t*)impl ) );
cv::fastFree( impl );
}
}
bool cv::gpu::CudaStream::queryIfComplete()
{
cudaError_t err = cudaStreamQuery( *(cudaStream_t*)impl );
//cudaError_t err = cudaStreamQuery( *(cudaStream_t*)impl );
if (err == cudaSuccess)
return true;
//if (err == cudaSuccess)
// return true;
if (err == cudaErrorNotReady)
return false;
//if (err == cudaErrorNotReady)
// return false;
//cudaErrorInvalidResourceHandle
cudaSafeCall( err );
////cudaErrorInvalidResourceHandle
//cudaSafeCall( err );
return true;
}
void cv::gpu::CudaStream::waitForCompletion()

View File

@@ -45,6 +45,16 @@
using namespace cv;
using namespace cv::gpu;
#ifndef HAVE_CUDA
CV_EXPORTS int cv::gpu::getCudaEnabledDeviceCount() { return 0; }
CV_EXPORTS string cv::gpu::getDeviceName(int /*device*/) { cudaSafeCall(0); return 0; }
CV_EXPORTS void cv::gpu::setDevice(int /*device*/) { cudaSafeCall(0); }
CV_EXPORTS void cv::gpu::getComputeCapability(int /*device*/, int* /*major*/, int* /*minor*/) { cudaSafeCall(0); }
CV_EXPORTS int cv::gpu::getNumberOfSMs(int /*device*/) { cudaSafeCall(0); return 0; }
#else
CV_EXPORTS int cv::gpu::getCudaEnabledDeviceCount()
{
int count;
@@ -78,4 +88,6 @@ CV_EXPORTS int cv::gpu::getNumberOfSMs(int device)
cudaDeviceProp prop;
cudaSafeCall( cudaGetDeviceProperties( &prop, device ) );
return prop.multiProcessorCount;
}
}
#endif

View File

@@ -57,41 +57,39 @@
#include "cuda_shared.hpp"
#ifndef HAVE_CUDA
#define cudaSafeCall(err) CV_Error(CV_GpuNotFound, "The library is compilled with no GPU support")
#define cudaCallerSafeCall(err) CV_Error(CV_GpuNotFound, "The library is compilled with no GPU support")
#else /* HAVE_CUDA */
#if _MSC_VER >= 1200
#pragma warning (disable : 4100 4211 4201 4408)
#pragma warning (disable : 4100 4211 4201 4408)
#endif
#include "cuda_runtime_api.h"
#define cudaCallerSafeCall(err) err;
#define cudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__)
//inline void __cudaSafeCall( cudaError err, const char *file, const int line )
//{
// if( cudaSuccess != err)
// CV_Error_(CV_StsAssert, ("%s(%i) : Runtime API error : %s.\n", cudaGetErrorString(err)));
//}
#ifdef __GNUC__
#define cudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__, __func__)
#else
#define cudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__)
#endif
namespace cv
{
namespace gpu
{
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
static inline void __cudaSafeCall( cudaError err, const char *file, const int line, const char *func = "")
{
if( cudaSuccess != err)
{
std::cerr << file << "(" << line << ") : cudaSafeCall() Runtime API error : " << cudaGetErrorString(err) << "\n";
exit(-1);
}
}
template<class T>
inline DevMem2D_<T> getDevMem(const GpuMat& mat)
{
return DevMem2D_<T>(mat.rows, mat.cols, mat.data, mat.step);
}
cv::error( cv::Exception(CV_GpuApiCallError, cudaGetErrorString(err), func, file, line) );
}
}
}
#endif /* HAVE_CUDA */
#endif

View File

@@ -41,7 +41,6 @@
//M*/
#include "precomp.hpp"
#include <limits>
using namespace cv;
using namespace cv::gpu;
@@ -69,5 +68,5 @@ void StereoBM_GPU::operator() ( const GpuMat& left, const GpuMat& right, GpuMat&
DevMem2D disp = disparity;
DevMem2D_<uint> mssd = minSSD;
impl::stereoBM_GPU(left, right, disp, ndisp, mssd);
cudaCallerSafeCall( impl::stereoBM_GPU(left, right, disp, ndisp, mssd) );
}