add extra checks to data_step_down to prevent out-of-border access

This commit is contained in:
Vladislav Vinogradov 2015-09-10 10:05:25 +03:00
parent f903192c17
commit 3ef067cc65
2 changed files with 12 additions and 12 deletions

View File

@ -255,7 +255,7 @@ namespace cv { namespace gpu { namespace device
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
template <typename T> template <typename T>
__global__ void data_step_down(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst) __global__ void data_step_down(int dst_cols, int dst_rows, int src_cols, int src_rows, const PtrStep<T> src, PtrStep<T> dst)
{ {
const int x = blockIdx.x * blockDim.x + threadIdx.x; const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y; const int y = blockIdx.y * blockDim.y + threadIdx.y;
@ -264,10 +264,10 @@ namespace cv { namespace gpu { namespace device
{ {
for (int d = 0; d < cndisp; ++d) for (int d = 0; d < cndisp; ++d)
{ {
float dst_reg = src.ptr(d * src_rows + (2*y+0))[(2*x+0)]; float dst_reg = src.ptr(d * src_rows + ::min(2*y+0, src_rows-1))[::min(2*x+0, src_cols-1)];
dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+0)]; dst_reg += src.ptr(d * src_rows + ::min(2*y+1, src_rows-1))[::min(2*x+0, src_cols-1)];
dst_reg += src.ptr(d * src_rows + (2*y+0))[(2*x+1)]; dst_reg += src.ptr(d * src_rows + ::min(2*y+0, src_rows-1))[::min(2*x+1, src_cols-1)];
dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+1)]; dst_reg += src.ptr(d * src_rows + ::min(2*y+1, src_rows-1))[::min(2*x+1, src_cols-1)];
dst.ptr(d * dst_rows + y)[x] = saturate_cast<T>(dst_reg); dst.ptr(d * dst_rows + y)[x] = saturate_cast<T>(dst_reg);
} }
@ -275,7 +275,7 @@ namespace cv { namespace gpu { namespace device
} }
template<typename T> template<typename T>
void data_step_down_gpu(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream) void data_step_down_gpu(int dst_cols, int dst_rows, int src_cols, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream)
{ {
dim3 threads(32, 8, 1); dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1); dim3 grid(1, 1, 1);
@ -283,15 +283,15 @@ namespace cv { namespace gpu { namespace device
grid.x = divUp(dst_cols, threads.x); grid.x = divUp(dst_cols, threads.x);
grid.y = divUp(dst_rows, threads.y); grid.y = divUp(dst_rows, threads.y);
data_step_down<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)src, (PtrStepSz<T>)dst); data_step_down<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_cols, src_rows, (PtrStepSz<T>)src, (PtrStepSz<T>)dst);
cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaGetLastError() );
if (stream == 0) if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() ); cudaSafeCall( cudaDeviceSynchronize() );
} }
template void data_step_down_gpu<short>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream); template void data_step_down_gpu<short>(int dst_cols, int dst_rows, int src_cols, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream);
template void data_step_down_gpu<float>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream); template void data_step_down_gpu<float>(int dst_cols, int dst_rows, int src_cols, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream);
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
/////////////////// level up messages //////////////////////// /////////////////// level up messages ////////////////////////

View File

@ -67,7 +67,7 @@ namespace cv { namespace gpu { namespace device
template<typename T, typename D> template<typename T, typename D>
void comp_data_gpu(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream); void comp_data_gpu(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream);
template<typename T> template<typename T>
void data_step_down_gpu(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream); void data_step_down_gpu(int dst_cols, int dst_rows, int src_cols, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream);
template <typename T> template <typename T>
void level_up_messages_gpu(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, cudaStream_t stream); void level_up_messages_gpu(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, cudaStream_t stream);
template <typename T> template <typename T>
@ -253,7 +253,7 @@ namespace
void calcBP(GpuMat& disp, Stream& stream) void calcBP(GpuMat& disp, Stream& stream)
{ {
typedef void (*data_step_down_t)(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream); typedef void (*data_step_down_t)(int dst_cols, int dst_rows, int src_cols, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream);
static const data_step_down_t data_step_down_callers[2] = static const data_step_down_t data_step_down_callers[2] =
{ {
data_step_down_gpu<short>, data_step_down_gpu<float> data_step_down_gpu<short>, data_step_down_gpu<float>
@ -288,7 +288,7 @@ namespace
createContinuous(rows_all[i] * rthis.ndisp, cols_all[i], rthis.msg_type, datas[i]); createContinuous(rows_all[i] * rthis.ndisp, cols_all[i], rthis.msg_type, datas[i]);
data_step_down_callers[funcIdx](cols_all[i], rows_all[i], rows_all[i-1], datas[i-1], datas[i], cudaStream); data_step_down_callers[funcIdx](cols_all[i], rows_all[i], cols_all[i-1], rows_all[i-1], datas[i-1], datas[i], cudaStream);
} }
PtrStepSzb mus[] = {u, u2}; PtrStepSzb mus[] = {u, u2};