compilation with no cuda re factored

This commit is contained in:
Anatoly Baksheev
2010-07-19 09:31:12 +00:00
parent 20e2dc84b0
commit 07825bad1e
15 changed files with 555 additions and 587 deletions

View File

@@ -0,0 +1,80 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CUDA_SHARED_HPP__
#define __OPENCV_CUDA_SHARED_HPP__
#include "opencv2/gpu/devmem2d.hpp"
#include "cuda_runtime_api.h"
namespace cv
{
namespace gpu
{
typedef unsigned char uchar;
typedef unsigned short ushort;
typedef unsigned int uint;
extern "C" void error( const char *error_string, const char *file, const int line, const char *func = "");
namespace impl
{
static inline int divUp(int a, int b) { return (a % b == 0) ? a/b : a/b + 1; }
extern "C" void stereoBM_GPU(const DevMem2D& left, const DevMem2D& right, DevMem2D& disp, int maxdisp, DevMem2D_<uint>& minSSD_buf);
}
}
}
#if defined(__GNUC__)
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__);
#else /* defined(__CUDACC__) || defined(__MSVC__) */
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
#endif
static inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "")
{
if( cudaSuccess != err)
cv::gpu::error(cudaGetErrorString(err), __FILE__, __LINE__, func);
}
#endif /* __OPENCV_CUDA_SHARED_HPP__ */

View File

@@ -0,0 +1,318 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "cuda_shared.hpp"
#define ROWSperTHREAD 21 // the number of rows a thread will process
#define BLOCK_W 128 // the thread block width (464)
#define N_DISPARITIES 8
#define STEREO_MIND 0 // The minimum d range to check
#define STEREO_DISP_STEP N_DISPARITIES // the d step, must be <= 1 to avoid aliasing
#define RADIUS 9 // Kernel Radius 5V & 5H = 11x11 kernel
#define WINSZ (2 * RADIUS + 1)
#define N_DIRTY_PIXELS (2 * RADIUS)
#define COL_SSD_SIZE (BLOCK_W + N_DIRTY_PIXELS)
#define SHARED_MEM_SIZE (COL_SSD_SIZE) // amount of shared memory used
__constant__ unsigned int* cminSSDImage;
__constant__ size_t cminSSD_step;
__constant__ int cwidth;
__constant__ int cheight;
namespace device_code
{
__device__ int SQ(int a)
{
return a * a;
}
__device__ unsigned int CalcSSD(unsigned int *col_ssd_cache, unsigned int *col_ssd)
{
unsigned int cache = 0;
unsigned int cache2 = 0;
for(int i = 1; i <= RADIUS; i++)
cache += col_ssd[i];
col_ssd_cache[0] = cache;
__syncthreads();
if (threadIdx.x < BLOCK_W - RADIUS)
cache2 = col_ssd_cache[RADIUS];
else
for(int i = RADIUS + 1; i < WINSZ; i++)
cache2 += col_ssd[i];
return col_ssd[0] + cache + cache2;
}
__device__ uint2 MinSSD(unsigned int *col_ssd_cache, unsigned int *col_ssd)
{
unsigned int ssd[N_DISPARITIES];
ssd[0] = CalcSSD(col_ssd_cache, col_ssd + 0 * SHARED_MEM_SIZE);
ssd[1] = CalcSSD(col_ssd_cache, col_ssd + 1 * SHARED_MEM_SIZE);
ssd[2] = CalcSSD(col_ssd_cache, col_ssd + 2 * SHARED_MEM_SIZE);
ssd[3] = CalcSSD(col_ssd_cache, col_ssd + 3 * SHARED_MEM_SIZE);
ssd[4] = CalcSSD(col_ssd_cache, col_ssd + 4 * SHARED_MEM_SIZE);
ssd[5] = CalcSSD(col_ssd_cache, col_ssd + 5 * SHARED_MEM_SIZE);
ssd[6] = CalcSSD(col_ssd_cache, col_ssd + 6 * SHARED_MEM_SIZE);
ssd[7] = CalcSSD(col_ssd_cache, col_ssd + 7 * SHARED_MEM_SIZE);
int mssd = min(min(min(ssd[0], ssd[1]), min(ssd[4], ssd[5])), min(min(ssd[2], ssd[3]), min(ssd[6], ssd[7])));
int bestIdx = 0;
for (int i = 0; i < N_DISPARITIES; i++)
{
if (mssd == ssd[i])
bestIdx = i;
}
return make_uint2(mssd, bestIdx);
}
__device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned char* imageR, int d, unsigned int *col_ssd)
{
unsigned char leftPixel1;
unsigned char leftPixel2;
unsigned char rightPixel1[8];
unsigned char rightPixel2[8];
unsigned int diff1, diff2;
leftPixel1 = imageL[idx1];
leftPixel2 = imageL[idx2];
idx1 = idx1 - d;
idx2 = idx2 - d;
rightPixel1[7] = imageR[idx1 - 7];
rightPixel1[0] = imageR[idx1 - 0];
rightPixel1[1] = imageR[idx1 - 1];
rightPixel1[2] = imageR[idx1 - 2];
rightPixel1[3] = imageR[idx1 - 3];
rightPixel1[4] = imageR[idx1 - 4];
rightPixel1[5] = imageR[idx1 - 5];
rightPixel1[6] = imageR[idx1 - 6];
rightPixel2[7] = imageR[idx2 - 7];
rightPixel2[0] = imageR[idx2 - 0];
rightPixel2[1] = imageR[idx2 - 1];
rightPixel2[2] = imageR[idx2 - 2];
rightPixel2[3] = imageR[idx2 - 3];
rightPixel2[4] = imageR[idx2 - 4];
rightPixel2[5] = imageR[idx2 - 5];
rightPixel2[6] = imageR[idx2 - 6];
diff1 = leftPixel1 - rightPixel1[0];
diff2 = leftPixel2 - rightPixel2[0];
col_ssd[0 * SHARED_MEM_SIZE] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[1];
diff2 = leftPixel2 - rightPixel2[1];
col_ssd[1 * SHARED_MEM_SIZE] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[2];
diff2 = leftPixel2 - rightPixel2[2];
col_ssd[2 * SHARED_MEM_SIZE] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[3];
diff2 = leftPixel2 - rightPixel2[3];
col_ssd[3 * SHARED_MEM_SIZE] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[4];
diff2 = leftPixel2 - rightPixel2[4];
col_ssd[4 * SHARED_MEM_SIZE] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[5];
diff2 = leftPixel2 - rightPixel2[5];
col_ssd[5 * SHARED_MEM_SIZE] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[6];
diff2 = leftPixel2 - rightPixel2[6];
col_ssd[6 * SHARED_MEM_SIZE] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[7];
diff2 = leftPixel2 - rightPixel2[7];
col_ssd[7 * SHARED_MEM_SIZE] += SQ(diff2) - SQ(diff1);
}
__device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* imageL, unsigned char* imageR, int d, unsigned int *col_ssd)
{
unsigned char leftPixel1;
int idx;
unsigned int diffa[] = {0, 0, 0, 0, 0, 0, 0, 0};
for(int i = 0; i < WINSZ; i++)
{
idx = y_tex * im_pitch + x_tex;
leftPixel1 = imageL[idx];
idx = idx - d;
diffa[0] += SQ(leftPixel1 - imageR[idx - 0]);
diffa[1] += SQ(leftPixel1 - imageR[idx - 1]);
diffa[2] += SQ(leftPixel1 - imageR[idx - 2]);
diffa[3] += SQ(leftPixel1 - imageR[idx - 3]);
diffa[4] += SQ(leftPixel1 - imageR[idx - 4]);
diffa[5] += SQ(leftPixel1 - imageR[idx - 5]);
diffa[6] += SQ(leftPixel1 - imageR[idx - 6]);
diffa[7] += SQ(leftPixel1 - imageR[idx - 7]);
y_tex += 1;
}
col_ssd[0 * SHARED_MEM_SIZE] = diffa[0];
col_ssd[1 * SHARED_MEM_SIZE] = diffa[1];
col_ssd[2 * SHARED_MEM_SIZE] = diffa[2];
col_ssd[3 * SHARED_MEM_SIZE] = diffa[3];
col_ssd[4 * SHARED_MEM_SIZE] = diffa[4];
col_ssd[5 * SHARED_MEM_SIZE] = diffa[5];
col_ssd[6 * SHARED_MEM_SIZE] = diffa[6];
col_ssd[7 * SHARED_MEM_SIZE] = diffa[7];
}
extern "C" __global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, unsigned char* disp, size_t disp_pitch, int maxdisp)
{
extern __shared__ unsigned int col_ssd_cache[];
unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x;
unsigned int *col_ssd_extra = threadIdx.x < N_DIRTY_PIXELS ? col_ssd + BLOCK_W : 0;
//#define X (blockIdx.x * BLOCK_W + threadIdx.x + STEREO_MAXD)
int X = (blockIdx.x * BLOCK_W + threadIdx.x + maxdisp);
//#define Y (__mul24(blockIdx.y, ROWSperTHREAD) + RADIUS)
#define Y (blockIdx.y * ROWSperTHREAD + RADIUS)
//int Y = blockIdx.y * ROWSperTHREAD + RADIUS;
unsigned int* minSSDImage = cminSSDImage + X + Y * cminSSD_step;
unsigned char* disparImage = disp + X + Y * disp_pitch;
/* if (X < cwidth)
{
unsigned int *minSSDImage_end = minSSDImage + min(ROWSperTHREAD, cheight - Y) * minssd_step;
for(uint *ptr = minSSDImage; ptr != minSSDImage_end; ptr += minssd_step )
*ptr = 0xFFFFFFFF;
}*/
int end_row = min(ROWSperTHREAD, cheight - Y);
int y_tex;
int x_tex = X - RADIUS;
for(int d = STEREO_MIND; d < maxdisp; d += STEREO_DISP_STEP)
{
y_tex = Y - RADIUS;
InitColSSD(x_tex, y_tex, img_step, left, right, d, col_ssd);
if (col_ssd_extra > 0)
InitColSSD(x_tex + BLOCK_W, y_tex, img_step, left, right, d, col_ssd_extra);
__syncthreads(); //before MinSSD function
if (X < cwidth - RADIUS && Y < cheight - RADIUS)
{
uint2 minSSD = MinSSD(col_ssd_cache + threadIdx.x, col_ssd);
if (minSSD.x < minSSDImage[0])
{
disparImage[0] = (unsigned char)(d + minSSD.y);
minSSDImage[0] = minSSD.x;
}
}
for(int row = 1; row < end_row; row++)
{
int idx1 = y_tex * img_step + x_tex;
int idx2 = (y_tex + WINSZ) * img_step + x_tex;
__syncthreads();
StepDown(idx1, idx2, left, right, d, col_ssd);
if (col_ssd_extra)
StepDown(idx1, idx2, left + BLOCK_W, right + BLOCK_W, d, col_ssd_extra);
y_tex += 1;
__syncthreads(); //before MinSSD function
if (X < cwidth - RADIUS && row < cheight - RADIUS - Y)
{
int idx = row * cminSSD_step;
uint2 minSSD = MinSSD(col_ssd_cache + threadIdx.x, col_ssd);
if (minSSD.x < minSSDImage[idx])
{
disparImage[disp_pitch * row] = (unsigned char)(d + minSSD.y);
minSSDImage[idx] = minSSD.x;
}
}
} // for row loop
} // for d loop
}
}
extern "C" void cv::gpu::impl::stereoBM_GPU(const DevMem2D& left, const DevMem2D& right, DevMem2D& disp, int maxdisp, DevMem2D_<unsigned int>& minSSD_buf)
{
//cudaSafeCall( cudaFuncSetCacheConfig(&stereoKernel, cudaFuncCachePreferL1) );
//cudaSafeCall( cudaFuncSetCacheConfig(&stereoKernel, cudaFuncCachePreferShared) );
size_t smem_size = (BLOCK_W + N_DISPARITIES * SHARED_MEM_SIZE) * sizeof(unsigned int);
cudaSafeCall( cudaMemset2D(disp.ptr, disp.step, 0, disp.cols, disp. rows) );
cudaSafeCall( cudaMemset2D(minSSD_buf.ptr, minSSD_buf.step, 0xFF, minSSD_buf.cols * minSSD_buf.elemSize(), disp. rows) );
dim3 grid(1,1,1);
dim3 threads(BLOCK_W, 1, 1);
grid.x = divUp(left.cols - maxdisp - 2 * RADIUS, BLOCK_W);
grid.y = divUp(left.rows - 2 * RADIUS, ROWSperTHREAD);
cudaSafeCall( cudaMemcpyToSymbol( cwidth, &left.cols, sizeof (left.cols) ) );
cudaSafeCall( cudaMemcpyToSymbol( cheight, &left.rows, sizeof (left.rows) ) );
cudaSafeCall( cudaMemcpyToSymbol( cminSSDImage, &minSSD_buf.ptr, sizeof (minSSD_buf.ptr) ) );
size_t minssd_step = minSSD_buf.step/minSSD_buf.elemSize();
cudaSafeCall( cudaMemcpyToSymbol( cminSSD_step, &minssd_step, sizeof (minssd_step) ) );
device_code::stereoKernel<<<grid, threads, smem_size>>>(left.ptr, right.ptr, left.step, disp.ptr, disp.step, maxdisp);
cudaSafeCall( cudaThreadSynchronize() );
}

View File

@@ -41,56 +41,118 @@
//M*/
#include "precomp.hpp"
//#include "opencv2/gpu/stream_access.hpp"
using namespace cv;
using namespace cv::gpu;
cv::gpu::CudaStream::CudaStream() //: impl( (Impl*)fastMalloc(sizeof(Impl)) )
#if !defined (HAVE_CUDA)
void cv::gpu::CudaStream::create() { throw_nogpu(); }
void cv::gpu::CudaStream::release() { throw_nogpu(); }
cv::gpu::CudaStream::CudaStream() : impl(0) { throw_nogpu(); }
cv::gpu::CudaStream::~CudaStream() { throw_nogpu(); }
cv::gpu::CudaStream::CudaStream(const CudaStream& stream) { throw_nogpu(); }
CudaStream& cv::gpu::CudaStream::operator=(const CudaStream& stream) { throw_nogpu(); return *this; }
bool cv::gpu::CudaStream::queryIfComplete() { throw_nogpu(); return true; }
void cv::gpu::CudaStream::waitForCompletion() { throw_nogpu(); }
void cv::gpu::CudaStream::enqueueDownload(const GpuMat& src, Mat& dst) { throw_nogpu(); }
void cv::gpu::CudaStream::enqueueDownload(const GpuMat& src, MatPL& dst) { throw_nogpu(); }
void cv::gpu::CudaStream::enqueueUpload(const MatPL& src, GpuMat& dst) { throw_nogpu(); }
void cv::gpu::CudaStream::enqueueUpload(const Mat& src, GpuMat& dst) { throw_nogpu(); }
void cv::gpu::CudaStream::enqueueCopy(const GpuMat& src, GpuMat& dst) { throw_nogpu(); }
void cv::gpu::CudaStream::enqueueMemSet(const GpuMat& src, Scalar val) { throw_nogpu(); }
void cv::gpu::CudaStream::enqueueMemSet(const GpuMat& src, Scalar val, const GpuMat& mask) { throw_nogpu(); }
void cv::gpu::CudaStream::enqueueConvert(const GpuMat& src, GpuMat& dst, int type, double a, double b) { throw_nogpu(); }
#else /* !defined (HAVE_CUDA) */
#include "opencv2/gpu/stream_accessor.hpp"
struct CudaStream::Impl
{
//cudaSafeCall( cudaStreamCreate( &impl->stream) );
cudaStream_t stream;
int ref_counter;
};
namespace
{
template<class S, class D> void devcopy(const S& src, D& dst, cudaStream_t s, cudaMemcpyKind k)
{
dst.create(src.size(), src.type());
size_t bwidth = src.cols * src.elemSize();
cudaSafeCall( cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, bwidth, src.rows, k, s) );
};
}
cv::gpu::CudaStream::~CudaStream()
CV_EXPORTS cudaStream_t cv::gpu::StreamAccessor::getStream(const CudaStream& stream) { return stream.impl->stream; };
void cv::gpu::CudaStream::create()
{
if (impl)
release();
cudaStream_t stream;
cudaSafeCall( cudaStreamCreate( &stream ) );
impl = (CudaStream::Impl*)fastMalloc(sizeof(CudaStream::Impl));
impl->stream = stream;
impl->ref_counter = 1;
}
void cv::gpu::CudaStream::release()
{
if( impl && CV_XADD(&impl->ref_counter, -1) == 1 )
{
cudaSafeCall( cudaStreamDestroy( *(cudaStream_t*)impl ) );
cudaSafeCall( cudaStreamDestroy( impl->stream ) );
cv::fastFree( impl );
}
}
cv::gpu::CudaStream::CudaStream() : impl(0) { create(); }
cv::gpu::CudaStream::~CudaStream() { release(); }
cv::gpu::CudaStream::CudaStream(const CudaStream& stream) : impl(stream.impl)
{
if( impl )
CV_XADD(&impl->ref_counter, 1);
}
CudaStream& cv::gpu::CudaStream::operator=(const CudaStream& stream)
{
if( this != &stream )
{
if( stream.impl )
CV_XADD(&stream.impl->ref_counter, 1);
release();
impl = stream.impl;
}
return *this;
}
bool cv::gpu::CudaStream::queryIfComplete()
{
//cudaError_t err = cudaStreamQuery( *(cudaStream_t*)impl );
cudaError_t err = cudaStreamQuery( impl->stream );
//if (err == cudaSuccess)
// return true;
if (err == cudaErrorNotReady || err == cudaSuccess)
return err == cudaSuccess;
//if (err == cudaErrorNotReady)
// return false;
////cudaErrorInvalidResourceHandle
//cudaSafeCall( err );
return true;
}
void cv::gpu::CudaStream::waitForCompletion()
{
cudaSafeCall( cudaStreamSynchronize( *(cudaStream_t*)impl ) );
cudaSafeCall(err);
}
void cv::gpu::CudaStream::enqueueDownload(const GpuMat& src, Mat& dst)
{
// cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyDeviceToHost,
}
void cv::gpu::CudaStream::enqueueUpload(const Mat& src, GpuMat& dst)
{
CV_Assert(!"Not implemented");
}
void cv::gpu::CudaStream::enqueueCopy(const GpuMat& src, GpuMat& dst)
{
CV_Assert(!"Not implemented");
void cv::gpu::CudaStream::waitForCompletion() { cudaSafeCall( cudaStreamSynchronize( impl->stream ) ); }
void cv::gpu::CudaStream::enqueueDownload(const GpuMat& src, Mat& dst)
{
// if not -> allocation will be done, but after that dst will not point to page locked memory
CV_Assert(src.cols == dst.cols && src.rows == dst.rows && src.type() == dst.type() )
devcopy(src, dst, impl->stream, cudaMemcpyDeviceToHost);
}
void cv::gpu::CudaStream::enqueueDownload(const GpuMat& src, MatPL& dst) { devcopy(src, dst, impl->stream, cudaMemcpyDeviceToHost); }
void cv::gpu::CudaStream::enqueueUpload(const MatPL& src, GpuMat& dst){ devcopy(src, dst, impl->stream, cudaMemcpyHostToDevice); }
void cv::gpu::CudaStream::enqueueUpload(const Mat& src, GpuMat& dst) { devcopy(src, dst, impl->stream, cudaMemcpyHostToDevice); }
void cv::gpu::CudaStream::enqueueCopy(const GpuMat& src, GpuMat& dst) { devcopy(src, dst, impl->stream, cudaMemcpyDeviceToDevice); }
void cv::gpu::CudaStream::enqueueMemSet(const GpuMat& src, Scalar val)
{
@@ -102,11 +164,10 @@ void cv::gpu::CudaStream::enqueueMemSet(const GpuMat& src, Scalar val, const Gpu
CV_Assert(!"Not implemented");
}
void cv::gpu::CudaStream::enqueueConvert(const GpuMat& src, GpuMat& dst, int type)
void cv::gpu::CudaStream::enqueueConvert(const GpuMat& src, GpuMat& dst, int type, double a, double b)
{
CV_Assert(!"Not implemented");
}
//struct cudaStream_t& cv::gpu::CudaStream::getStream() { return stream; }
#endif /* !defined (HAVE_CUDA) */

View File

@@ -45,15 +45,18 @@
using namespace cv;
using namespace cv::gpu;
#ifndef HAVE_CUDA
#if !defined (HAVE_CUDA)
CV_EXPORTS int cv::gpu::getCudaEnabledDeviceCount() { return 0; }
CV_EXPORTS string cv::gpu::getDeviceName(int /*device*/) { cudaSafeCall(0); return 0; }
CV_EXPORTS void cv::gpu::setDevice(int /*device*/) { cudaSafeCall(0); }
CV_EXPORTS void cv::gpu::getComputeCapability(int /*device*/, int* /*major*/, int* /*minor*/) { cudaSafeCall(0); }
CV_EXPORTS int cv::gpu::getNumberOfSMs(int /*device*/) { cudaSafeCall(0); return 0; }
CV_EXPORTS string cv::gpu::getDeviceName(int /*device*/) { throw_nogpu(); return 0; }
CV_EXPORTS void cv::gpu::setDevice(int /*device*/) { throw_nogpu(); }
CV_EXPORTS int cv::gpu::getDevice() { throw_nogpu(); return 0; }
CV_EXPORTS void cv::gpu::getComputeCapability(int /*device*/, int* /*major*/, int* /*minor*/) { throw_nogpu(); }
CV_EXPORTS int cv::gpu::getNumberOfSMs(int /*device*/) { throw_nogpu(); return 0; }
#else
#else /* !defined (HAVE_CUDA) */
CV_EXPORTS int cv::gpu::getCudaEnabledDeviceCount()
{
@@ -73,6 +76,12 @@ CV_EXPORTS void cv::gpu::setDevice(int device)
{
cudaSafeCall( cudaSetDevice( device ) );
}
CV_EXPORTS int cv::gpu::getDevice()
{
int device;
cudaSafeCall( cudaGetDevice( &device ) );
return device;
}
CV_EXPORTS void cv::gpu::getComputeCapability(int device, int* major, int* minor)
{
@@ -90,4 +99,5 @@ CV_EXPORTS int cv::gpu::getNumberOfSMs(int device)
return prop.multiProcessorCount;
}
#endif
#endif

View File

@@ -45,23 +45,53 @@
using namespace cv;
using namespace cv::gpu;
////////////////////////////////////////////////////////////////////////
//////////////////////////////// GpuMat ////////////////////////////////
////////////////////////////////////////////////////////////////////////
void GpuMat::upload(const Mat& m)
#if !defined (HAVE_CUDA)
namespace cv
{
namespace gpu
{
void GpuMat::upload(const Mat& /*m*/) { throw_nogpu(); }
void GpuMat::download(cv::Mat& /*m*/) const { throw_nogpu(); }
void GpuMat::copyTo( GpuMat& /*m*/ ) const { throw_nogpu(); }
void GpuMat::copyTo( GpuMat& /*m*/, const GpuMat&/* mask */) const { throw_nogpu(); }
void GpuMat::convertTo( GpuMat& /*m*/, int /*rtype*/, double /*alpha*/, double /*beta*/ ) const { throw_nogpu(); }
GpuMat& GpuMat::operator = (const Scalar& /*s*/) { throw_nogpu(); return *this; }
GpuMat& GpuMat::setTo(const Scalar& /*s*/, const GpuMat& /*mask*/) { throw_nogpu(); return *this; }
GpuMat GpuMat::reshape(int /*new_cn*/, int /*new_rows*/) const { throw_nogpu(); return GpuMat(); }
void GpuMat::create(int /*_rows*/, int /*_cols*/, int /*_type*/) { throw_nogpu(); }
void GpuMat::release() { throw_nogpu(); }
void MatPL::create(int /*_rows*/, int /*_cols*/, int /*_type*/) { throw_nogpu(); }
void MatPL::release() { throw_nogpu(); }
}
}
#else /* !defined (HAVE_CUDA) */
void cv::gpu::GpuMat::upload(const Mat& m)
{
CV_DbgAssert(!m.empty());
create(m.size(), m.type());
cudaSafeCall( cudaMemcpy2D(data, step, m.data, m.step, cols * elemSize(), rows, cudaMemcpyHostToDevice) );
}
void GpuMat::download(cv::Mat& m) const
void cv::gpu::GpuMat::download(cv::Mat& m) const
{
CV_DbgAssert(!this->empty());
m.create(size(), type());
cudaSafeCall( cudaMemcpy2D(m.data, m.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost) );
}
void GpuMat::copyTo( GpuMat& m ) const
void cv::gpu::GpuMat::copyTo( GpuMat& m ) const
{
CV_DbgAssert(!this->empty());
m.create(size(), type());
@@ -69,45 +99,30 @@ void GpuMat::copyTo( GpuMat& m ) const
cudaSafeCall( cudaThreadSynchronize() );
}
void GpuMat::copyTo( GpuMat& /*m*/, const GpuMat&/* mask */) const
void cv::gpu::GpuMat::copyTo( GpuMat& /*m*/, const GpuMat&/* mask */) const
{
CV_Assert(!"Not implemented");
}
void cv::gpu::GpuMat::convertTo( GpuMat& /*m*/, int /*rtype*/, double /*alpha*/, double /*beta*/ ) const
{
CV_Assert(!"Not implemented");
}
void GpuMat::convertTo( GpuMat& /*m*/, int /*rtype*/, double /*alpha*/, double /*beta*/ ) const
GpuMat& cv::gpu::GpuMat::operator = (const Scalar& /*s*/)
{
CV_Assert(!"Not implemented");
}
GpuMat& GpuMat::operator = (const Scalar& s)
{
cv::gpu::impl::set_to_without_mask(*this, s.val, this->depth(), this->channels());
CV_Assert(!"Not implemented");
return *this;
}
GpuMat& GpuMat::setTo(const Scalar& s, const GpuMat& mask)
GpuMat& cv::gpu::GpuMat::setTo(const Scalar& /*s*/, const GpuMat& /*mask*/)
{
CV_Assert(mask.type() == CV_32F);
CV_DbgAssert(!this->empty());
this->channels();
this->depth();
if (mask.empty())
{
cv::gpu::impl::set_to_without_mask(*this, s.val, this->depth(), this->channels());
}
else
{
cv::gpu::impl::set_to_with_mask(*this, s.val, mask, this->depth(), this->channels());
}
CV_Assert(!"Not implemented");
return *this;
}
GpuMat GpuMat::reshape(int new_cn, int new_rows) const
GpuMat cv::gpu::GpuMat::reshape(int new_cn, int new_rows) const
{
GpuMat hdr = *this;
@@ -148,7 +163,7 @@ GpuMat GpuMat::reshape(int new_cn, int new_rows) const
return hdr;
}
void GpuMat::create(int _rows, int _cols, int _type)
void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
{
_type &= TYPE_MASK;
if( rows == _rows && cols == _cols && type() == _type && data )
@@ -162,7 +177,7 @@ void GpuMat::create(int _rows, int _cols, int _type)
rows = _rows;
cols = _cols;
size_t esz = elemSize();
size_t esz = elemSize();
void *dev_ptr;
cudaSafeCall( cudaMallocPitch(&dev_ptr, &step, esz * cols, rows) );
@@ -174,19 +189,19 @@ void GpuMat::create(int _rows, int _cols, int _type)
size_t nettosize = (size_t)_nettosize;
datastart = data = (uchar*)dev_ptr;
dataend = data + nettosize;
dataend = data + nettosize;
refcount = (int*)fastMalloc(sizeof(*refcount));
*refcount = 1;
}
}
void GpuMat::release()
void cv::gpu::GpuMat::release()
{
if( refcount && CV_XADD(refcount, -1) == 1 )
{
fastFree(refcount);
cudaSafeCall( cudaFree(datastart) );
cudaSafeCall( cudaFree(datastart) );
}
data = datastart = dataend = 0;
step = rows = cols = 0;
@@ -194,7 +209,52 @@ void GpuMat::release()
}
///////////////////////////////////////////////////////////////////////
//////////////////////////////// MatPL ////////////////////////////////
///////////////////////////////////////////////////////////////////////
void cv::gpu::MatPL::create(int _rows, int _cols, int _type)
{
_type &= TYPE_MASK;
if( rows == _rows && cols == _cols && type() == _type && data )
return;
if( data )
release();
CV_DbgAssert( _rows >= 0 && _cols >= 0 );
if( _rows > 0 && _cols > 0 )
{
flags = Mat::MAGIC_VAL + Mat::CONTINUOUS_FLAG + _type;
rows = _rows;
cols = _cols;
step = elemSize()*cols;
int64 _nettosize = (int64)step*rows;
size_t nettosize = (size_t)_nettosize;
if( _nettosize != (int64)nettosize )
CV_Error(CV_StsNoMem, "Too big buffer is allocated");
size_t datasize = alignSize(nettosize, (int)sizeof(*refcount));
//datastart = data = (uchar*)fastMalloc(datasize + sizeof(*refcount));
void *ptr;
cudaSafeCall( cudaHostAlloc( &ptr, datasize, cudaHostAllocDefault) );
datastart = data = (uchar*)ptr;
dataend = data + nettosize;
refcount = (int*)cv::fastMalloc(sizeof(*refcount));
*refcount = 1;
}
}
void cv::gpu::MatPL::release()
{
if( refcount && CV_XADD(refcount, -1) == 1 )
{
cudaSafeCall( cudaFreeHost(datastart ) );
fastFree(refcount);
}
data = datastart = dataend = 0;
step = rows = cols = 0;
refcount = 0;
}
#endif /* !defined (HAVE_CUDA) */

View File

@@ -44,7 +44,13 @@
/* End of file. */
extern "C" void cv::gpu::error( const char *error_string, const char *file, const int line, const char *func)
{
cv::error( cv::Exception(CV_GpuApiCallError, error_string, func, file, line) );
}
namespace cv
{
namespace gpu
{
extern "C" void error(const char *error_string, const char *file, const int line, const char *func)
{
cv::error( cv::Exception(CV_GpuApiCallError, error_string, func, file, line) );
}
}
}

View File

@@ -53,30 +53,17 @@
#include <iostream>
#include "opencv2/gpu/gpu.hpp"
#include "cuda_shared.hpp"
#ifndef HAVE_CUDA
#define cudaSafeCall(expr) CV_Error(CV_GpuNotFound, "The library is compilled with no GPU support")
#define cudaCallerSafeCall(expr) CV_Error(CV_GpuNotFound, "The library is compilled with no GPU support")
#else /* HAVE_CUDA */
#if _MSC_VER >= 1200
#pragma warning (disable : 4100 4211 4201 4408)
#endif
#include "cuda_runtime_api.h"
#ifdef __GNUC__
#define cudaSafeCall(expr) { cudaError_t err = expr; if(cudaSuccess != err) cv::gpu::error(cudaGetErrorString(err), __FILE__, __LINE__, __func__); }
#else
#define cudaSafeCall(expr) { cudaError_t err = expr; if(cudaSuccess != err) cv::gpu::error(cudaGetErrorString(err), __FILE__, __LINE__); }
#endif
#define cudaCallerSafeCall(expr) expr;
#endif /* HAVE_CUDA */
#if defined(HAVE_CUDA)
#endif
#include "cuda_shared.hpp"
#include "cuda_runtime_api.h"
#else /* defined(HAVE_CUDA) */
static inline void throw_nogpu() { CV_Error(CV_GpuNotFound, "The library is compilled with no GPU support"); }
#endif /* defined(HAVE_CUDA) */
#endif /* __OPENCV_PRECOMP_H__ */

View File

@@ -44,15 +44,45 @@
using namespace cv;
using namespace cv::gpu;
#if !defined (HAVE_CUDA)
cv::gpu::StereoBM_GPU::StereoBM_GPU() { throw_nogpu(); }
cv::gpu::StereoBM_GPU::StereoBM_GPU(int preset_, int ndisparities_) { throw_nogpu(); }
bool cv::gpu::StereoBM_GPU::checkIfGpuCallReasonable() { throw_nogpu(); return false; }
void cv::gpu::StereoBM_GPU::operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity) { throw_nogpu(); }
void cv::gpu::StereoBM_GPU::operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity, const CudaStream& stream) { throw_nogpu(); }
#else /* !defined (HAVE_CUDA) */
StereoBM_GPU::StereoBM_GPU() : preset(BASIC_PRESET), ndisp(64) {}
StereoBM_GPU::StereoBM_GPU(int preset_, int ndisparities_) : preset(preset_), ndisp(ndisparities_)
cv::gpu::StereoBM_GPU::StereoBM_GPU() : preset(BASIC_PRESET), ndisp(64) {}
cv::gpu::StereoBM_GPU::StereoBM_GPU(int preset_, int ndisparities_) : preset(preset_), ndisp(ndisparities_)
{
const int max_supported_ndisp = 1 << (sizeof(unsigned char) * 8);
CV_Assert(ndisp <= max_supported_ndisp);
CV_Assert(ndisp % 8 == 0);
}
bool cv::gpu::StereoBM_GPU::checkIfGpuCallReasonable()
{
if (0 == getCudaEnabledDeviceCount())
return false;
int device = getDevice();
int minor, major;
getComputeCapability(device, &major, &minor);
int numSM = getNumberOfSMs(device);
if (major > 1 || numSM > 16)
return true;
return false;
}
void StereoBM_GPU::operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity) const
void cv::gpu::StereoBM_GPU::operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity)
{
CV_DbgAssert(left.rows == right.rows && left.cols == right.cols);
CV_DbgAssert(left.type() == CV_8UC1);
@@ -67,6 +97,13 @@ void StereoBM_GPU::operator() ( const GpuMat& left, const GpuMat& right, GpuMat&
}
DevMem2D disp = disparity;
DevMem2D_<uint> mssd = minSSD;
cudaCallerSafeCall( impl::stereoBM_GPU(left, right, disp, ndisp, mssd) );
DevMem2D_<unsigned int> mssd = minSSD;
impl::stereoBM_GPU(left, right, disp, ndisp, mssd);
}
void cv::gpu::StereoBM_GPU::operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity, const CudaStream& stream)
{
CV_Assert(!"Not implemented");
}
#endif /* !defined (HAVE_CUDA) */