refactored gpu module

This commit is contained in:
Alexey Spizhevoy
2010-12-10 10:23:32 +00:00
parent 97484089c5
commit d557c800a7
6 changed files with 141 additions and 75 deletions

View File

@@ -55,7 +55,8 @@ texture<unsigned char, 2> imageTex_8U;
texture<unsigned char, 2> templTex_8U;
__global__ void matchTemplateNaiveKernel_8U_SQDIFF(int w, int h, DevMem2Df result)
__global__ void matchTemplateNaiveKernel_8U_SQDIFF(int w, int h,
DevMem2Df result)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
@@ -80,11 +81,12 @@ __global__ void matchTemplateNaiveKernel_8U_SQDIFF(int w, int h, DevMem2Df resul
}
void matchTemplateNaive_8U_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result)
void matchTemplateNaive_8U_SQDIFF(const DevMem2D image, const DevMem2D templ,
DevMem2Df result)
{
dim3 threads(32, 8);
dim3 grid(divUp(image.cols - templ.cols + 1, threads.x),
divUp(image.rows - templ.rows + 1, threads.y));
divUp(image.rows - templ.rows + 1, threads.y));
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
cudaBindTexture2D(0, imageTex_8U, image.data, desc, image.cols, image.rows, image.step);
@@ -103,7 +105,8 @@ texture<float, 2> imageTex_32F;
texture<float, 2> templTex_32F;
__global__ void matchTemplateNaiveKernel_32F_SQDIFF(int w, int h, DevMem2Df result)
__global__ void matchTemplateNaiveKernel_32F_SQDIFF(int w, int h,
DevMem2Df result)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
@@ -128,11 +131,12 @@ __global__ void matchTemplateNaiveKernel_32F_SQDIFF(int w, int h, DevMem2Df resu
}
void matchTemplateNaive_32F_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result)
void matchTemplateNaive_32F_SQDIFF(const DevMem2D image, const DevMem2D templ,
DevMem2Df result)
{
dim3 threads(32, 8);
dim3 grid(divUp(image.cols - templ.cols + 1, threads.x),
divUp(image.rows - templ.rows + 1, threads.y));
divUp(image.rows - templ.rows + 1, threads.y));
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, imageTex_32F, image.data, desc, image.cols, image.rows, image.step);
@@ -147,8 +151,9 @@ void matchTemplateNaive_32F_SQDIFF(const DevMem2D image, const DevMem2D templ, D
}
__global__ void multiplyAndNormalizeSpectsKernel(int n, float scale, const cufftComplex* a,
const cufftComplex* b, cufftComplex* c)
__global__ void multiplyAndNormalizeSpectsKernel(
int n, float scale, const cufftComplex* a,
const cufftComplex* b, cufftComplex* c)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < n)
@@ -159,8 +164,8 @@ __global__ void multiplyAndNormalizeSpectsKernel(int n, float scale, const cufft
}
void multiplyAndNormalizeSpects(int n, float scale, const cufftComplex* a, const cufftComplex* b,
cufftComplex* c)
void multiplyAndNormalizeSpects(int n, float scale, const cufftComplex* a,
const cufftComplex* b, cufftComplex* c)
{
dim3 threads(256);
dim3 grid(divUp(n, threads.x));
@@ -169,4 +174,35 @@ void multiplyAndNormalizeSpects(int n, float scale, const cufftComplex* a, const
}
__global__ void matchTemplatePreparedKernel_8U_SQDIFF(
int w, int h, const PtrStepf image_sumsq, float templ_sumsq,
DevMem2Df result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sq = image_sumsq.ptr(y + h)[x + w]
- image_sumsq.ptr(y)[x + w]
- image_sumsq.ptr(y + h)[x]
+ image_sumsq.ptr(y)[x];
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = image_sq - 2.f * ccorr + templ_sumsq;
}
}
void matchTemplatePrepared_8U_SQDIFF(
int w, int h, const DevMem2Df image_sumsq, float templ_sumsq,
DevMem2Df result)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
matchTemplatePreparedKernel_8U_SQDIFF<<<grid, threads>>>(
w, h, image_sumsq, templ_sumsq, result);
cudaSafeCall(cudaThreadSynchronize());
}
}}}

View File

@@ -57,6 +57,26 @@ using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace mathfunc
{
template <int size, typename T>
__device__ void sum_in_smem(volatile T* data, const unsigned int tid)
{
T sum = data[tid];
if (size >= 512) { if (tid < 256) { data[tid] = sum = sum + data[tid + 256]; } __syncthreads(); }
if (size >= 256) { if (tid < 128) { data[tid] = sum = sum + data[tid + 128]; } __syncthreads(); }
if (size >= 128) { if (tid < 64) { data[tid] = sum = sum + data[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) data[tid] = sum = sum + data[tid + 32];
if (size >= 32) data[tid] = sum = sum + data[tid + 16];
if (size >= 16) data[tid] = sum = sum + data[tid + 8];
if (size >= 8) data[tid] = sum = sum + data[tid + 4];
if (size >= 4) data[tid] = sum = sum + data[tid + 2];
if (size >= 2) data[tid] = sum = sum + data[tid + 1];
}
}
struct Nothing
{
static __device__ void calc(int, int, float, float, float*, size_t, float)
@@ -1103,27 +1123,6 @@ namespace cv { namespace gpu { namespace mathfunc
}
template <int size, typename T>
__device__ void sum_is_smem(volatile T* data, const unsigned int tid)
{
T sum = data[tid];
if (size >= 512) { if (tid < 256) { data[tid] = sum = sum + data[tid + 256]; } __syncthreads(); }
if (size >= 256) { if (tid < 128) { data[tid] = sum = sum + data[tid + 128]; } __syncthreads(); }
if (size >= 128) { if (tid < 64) { data[tid] = sum = sum + data[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) data[tid] = sum = sum + data[tid + 32];
if (size >= 32) data[tid] = sum = sum + data[tid + 16];
if (size >= 16) data[tid] = sum = sum + data[tid + 8];
if (size >= 8) data[tid] = sum = sum + data[tid + 4];
if (size >= 4) data[tid] = sum = sum + data[tid + 2];
if (size >= 2) data[tid] = sum = sum + data[tid + 1];
}
}
template <int nthreads, typename T>
__global__ void count_non_zero_kernel(const DevMem2D src, volatile unsigned int* count)
{
@@ -1144,7 +1143,7 @@ namespace cv { namespace gpu { namespace mathfunc
scount[tid] = cnt;
__syncthreads();
sum_is_smem<nthreads, unsigned int>(scount, tid);
sum_in_smem<nthreads, unsigned int>(scount, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
@@ -1165,7 +1164,7 @@ namespace cv { namespace gpu { namespace mathfunc
scount[tid] = tid < gridDim.x * gridDim.y ? count[tid] : 0;
__syncthreads();
sum_is_smem<nthreads, unsigned int>(scount, tid);
sum_in_smem<nthreads, unsigned int>(scount, tid);
if (tid == 0)
{
@@ -1213,7 +1212,7 @@ namespace cv { namespace gpu { namespace mathfunc
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
scount[tid] = tid < size ? count[tid] : 0;
sum_is_smem<nthreads, unsigned int>(scount, tid);
sum_in_smem<nthreads, unsigned int>(scount, tid);
if (tid == 0)
{