added downsample function into gpu module, refactored it a little bit, added guard for CUDA related include in cascadeclassifier_nvidia_api.cpp

This commit is contained in:
Alexey Spizhevoy
2011-04-08 08:04:56 +00:00
parent 6cec5ff552
commit 97282d8ff8
8 changed files with 127 additions and 14 deletions

View File

@@ -63,8 +63,8 @@ namespace cv { namespace gpu
const PtrStepf weights1, const PtrStepf weights2, PtrStep result);
}}
void cv::gpu::blendLinear(const GpuMat& img1, const GpuMat& img2,
const GpuMat& weights1, const GpuMat& weights2, GpuMat& result)
void cv::gpu::blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2,
GpuMat& result)
{
CV_Assert(img1.size() == img2.size());
CV_Assert(img1.type() == img2.type());
@@ -94,7 +94,7 @@ void cv::gpu::blendLinear(const GpuMat& img1, const GpuMat& img2,
(const PtrStepf)weights1, (const PtrStepf)weights2, (PtrStepf)result);
break;
default:
CV_Error(CV_StsBadArg, "unsupported image depth in linear blending method");
CV_Error(CV_StsUnsupportedFormat, "bad image depth in linear blending function");
}
}

View File

@@ -73,7 +73,7 @@ namespace cv { namespace gpu
dim3 threads(16, 16);
dim3 grid(divUp(cols * cn, threads.x), divUp(rows, threads.y));
blendLinearKernel<T><<<grid, threads>>>(rows, cols * cn, cn, img1, img2, weights1, weights2, result);
blendLinearKernel<<<grid, threads>>>(rows, cols * cn, cn, img1, img2, weights1, weights2, result);
cudaSafeCall(cudaThreadSynchronize());
}

View File

@@ -883,5 +883,32 @@ namespace cv { namespace gpu { namespace imgproc
cudaSafeCall(cudaThreadSynchronize());
}
/////////////////////////////////////////////////////////////////////////
// downsample
template <typename T>
__global__ void downsampleKernel(const PtrStep_<T> src, int rows, int cols, int k, PtrStep_<T> dst)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < cols && y < rows)
dst.ptr(y)[x] = src.ptr(y * k)[x * k];
}
template <typename T>
void downsampleCaller(const PtrStep_<T> src, int rows, int cols, int k, PtrStep_<T> dst)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
downsampleKernel<<<grid, threads>>>(src, rows, cols, k, dst);
cudaSafeCall(cudaThreadSynchronize());
}
template void downsampleCaller(const PtrStep src, int rows, int cols, int k, PtrStep dst);
template void downsampleCaller(const PtrStepf src, int rows, int cols, int k, PtrStepf dst);
}}}

View File

@@ -82,6 +82,7 @@ void cv::gpu::dft(const GpuMat&, GpuMat&, Size, int) { throw_nogpu(); }
void cv::gpu::ConvolveBuf::create(Size, Size) { throw_nogpu(); }
void cv::gpu::convolve(const GpuMat&, const GpuMat&, GpuMat&, bool) { throw_nogpu(); }
void cv::gpu::convolve(const GpuMat&, const GpuMat&, GpuMat&, bool, ConvolveBuf&) { throw_nogpu(); }
void cv::gpu::downsample(const GpuMat&, GpuMat&, int) { throw_nogpu(); }
#else /* !defined (HAVE_CUDA) */
@@ -1355,7 +1356,33 @@ void cv::gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result,
cufftSafeCall(cufftDestroy(planC2R));
}
////////////////////////////////////////////////////////////////////
// downsample
namespace cv { namespace gpu { namespace imgproc
{
template <typename T>
void downsampleCaller(const PtrStep_<T> src, int rows, int cols, int k, PtrStep_<T> dst);
}}}
void cv::gpu::downsample(const GpuMat& src, GpuMat& dst, int k)
{
CV_Assert(src.channels() == 1);
dst.create((src.rows + k - 1) / k, (src.cols + k - 1) / k, src.type());
switch (src.depth())
{
case CV_8U:
imgproc::downsampleCaller((const PtrStep)src, dst.rows, dst.cols, k, (PtrStep)dst);
break;
case CV_32F:
imgproc::downsampleCaller((const PtrStepf)src, dst.rows, dst.cols, k, (PtrStepf)dst);
break;
default:
CV_Error(CV_StsUnsupportedFormat, "bad image depth in downsample function");
}
}
#endif /* !defined (HAVE_CUDA) */