Merge pull request #2543 from apavlenko:24_haar_revert
This commit is contained in:
commit
51530d4d8e
@ -65,15 +65,15 @@ ocl::integral
|
||||
-----------------
|
||||
Computes an integral image.
|
||||
|
||||
.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum, oclMat &sqsum, int sdepth=-1)
|
||||
.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum, oclMat &sqsum)
|
||||
|
||||
.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum, int sdepth=-1)
|
||||
.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum)
|
||||
|
||||
:param src: Source image. Only ``CV_8UC1`` images are supported for now.
|
||||
|
||||
:param sum: Integral image containing 32-bit unsigned integer or 32-bit floating-point .
|
||||
:param sum: Integral image containing 32-bit unsigned integer values packed into ``CV_32SC1`` .
|
||||
|
||||
:param sqsum: Sqsum values is ``CV_32FC1`` or ``CV_64FC1`` type.
|
||||
:param sqsum: Sqsum values is ``CV_32FC1`` type.
|
||||
|
||||
.. seealso:: :ocv:func:`integral`
|
||||
|
||||
|
@ -859,10 +859,10 @@ namespace cv
|
||||
CV_EXPORTS void warpPerspective(const oclMat &src, oclMat &dst, const Mat &M, Size dsize, int flags = INTER_LINEAR);
|
||||
|
||||
//! computes the integral image and integral for the squared image
|
||||
// sum will support CV_32S, CV_32F, sqsum - support CV32F, CV_64F
|
||||
// sum will have CV_32S type, sqsum - CV32F type
|
||||
// supports only CV_8UC1 source type
|
||||
CV_EXPORTS void integral(const oclMat &src, oclMat &sum, oclMat &sqsum, int sdepth=-1 );
|
||||
CV_EXPORTS void integral(const oclMat &src, oclMat &sum, int sdepth=-1 );
|
||||
CV_EXPORTS void integral(const oclMat &src, oclMat &sum, oclMat &sqsum);
|
||||
CV_EXPORTS void integral(const oclMat &src, oclMat &sum);
|
||||
CV_EXPORTS void cornerHarris(const oclMat &src, oclMat &dst, int blockSize, int ksize, double k, int bordertype = cv::BORDER_DEFAULT);
|
||||
CV_EXPORTS void cornerHarris_dxdy(const oclMat &src, oclMat &dst, oclMat &Dx, oclMat &Dy,
|
||||
int blockSize, int ksize, double k, int bordertype = cv::BORDER_DEFAULT);
|
||||
@ -936,7 +936,7 @@ namespace cv
|
||||
Size m_maxSize;
|
||||
vector<CvSize> sizev;
|
||||
vector<float> scalev;
|
||||
oclMat gimg1, gsum, gsqsum, gsqsum_t;
|
||||
oclMat gimg1, gsum, gsqsum;
|
||||
void * buffers;
|
||||
};
|
||||
|
||||
|
@ -237,7 +237,7 @@ OCL_PERF_TEST_P(CornerHarrisFixture, CornerHarris,
|
||||
typedef tuple<Size, MatDepth> IntegralParams;
|
||||
typedef TestBaseWithParam<IntegralParams> IntegralFixture;
|
||||
|
||||
OCL_PERF_TEST_P(IntegralFixture, Integral1, ::testing::Combine(OCL_TEST_SIZES, OCL_PERF_ENUM(CV_32S, CV_32F)))
|
||||
OCL_PERF_TEST_P(IntegralFixture, DISABLED_Integral1, ::testing::Combine(OCL_TEST_SIZES, OCL_PERF_ENUM(CV_32S, CV_32F)))
|
||||
{
|
||||
const IntegralParams params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
@ -250,7 +250,7 @@ OCL_PERF_TEST_P(IntegralFixture, Integral1, ::testing::Combine(OCL_TEST_SIZES, O
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst;
|
||||
|
||||
OCL_TEST_CYCLE() cv::ocl::integral(oclSrc, oclDst, sdepth);
|
||||
// OCL_TEST_CYCLE() cv::ocl::integral(oclSrc, oclDst, sdepth);
|
||||
|
||||
oclDst.download(dst);
|
||||
|
||||
|
@ -109,13 +109,13 @@ OCL_PERF_TEST_P(CV_TM_CCORR_NORMEDFixture, matchTemplate,
|
||||
|
||||
oclDst.download(dst);
|
||||
|
||||
SANITY_CHECK(dst, 3e-2);
|
||||
SANITY_CHECK(dst, 2e-2);
|
||||
}
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::matchTemplate(src, templ, dst, CV_TM_CCORR_NORMED);
|
||||
|
||||
SANITY_CHECK(dst, 3e-2);
|
||||
SANITY_CHECK(dst, 2e-2);
|
||||
}
|
||||
else
|
||||
OCL_PERF_ELSE
|
||||
|
@ -747,15 +747,6 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
|
||||
oclMat gsum(totalheight + 4, gimg.cols + 1, CV_32SC1);
|
||||
oclMat gsqsum(totalheight + 4, gimg.cols + 1, CV_32FC1);
|
||||
|
||||
int sdepth = 0;
|
||||
if(Context::getContext()->supportsFeature(FEATURE_CL_DOUBLE))
|
||||
sdepth = CV_64FC1;
|
||||
else
|
||||
sdepth = CV_32FC1;
|
||||
sdepth = CV_MAT_DEPTH(sdepth);
|
||||
int type = CV_MAKE_TYPE(sdepth, 1);
|
||||
oclMat gsqsum_t(totalheight + 4, gimg.cols + 1, type);
|
||||
|
||||
cl_mem stagebuffer;
|
||||
cl_mem nodebuffer;
|
||||
cl_mem candidatebuffer;
|
||||
@ -763,7 +754,6 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
|
||||
cv::Rect roi, roi2;
|
||||
cv::Mat imgroi, imgroisq;
|
||||
cv::ocl::oclMat resizeroi, gimgroi, gimgroisq;
|
||||
|
||||
int grp_per_CU = 12;
|
||||
|
||||
size_t blocksize = 8;
|
||||
@ -783,7 +773,7 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
|
||||
roi2 = Rect(0, 0, sz.width - 1, sz.height - 1);
|
||||
resizeroi = gimg1(roi2);
|
||||
gimgroi = gsum(roi);
|
||||
gimgroisq = gsqsum_t(roi);
|
||||
gimgroisq = gsqsum(roi);
|
||||
int width = gimgroi.cols - 1 - cascade->orig_window_size.width;
|
||||
int height = gimgroi.rows - 1 - cascade->orig_window_size.height;
|
||||
scaleinfo[i].width_height = (width << 16) | height;
|
||||
@ -797,13 +787,8 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
|
||||
scaleinfo[i].factor = factor;
|
||||
cv::ocl::resize(gimg, resizeroi, Size(sz.width - 1, sz.height - 1), 0, 0, INTER_LINEAR);
|
||||
cv::ocl::integral(resizeroi, gimgroi, gimgroisq);
|
||||
|
||||
indexy += sz.height;
|
||||
}
|
||||
if(gsqsum_t.depth() == CV_64F)
|
||||
gsqsum_t.convertTo(gsqsum, CV_32FC1);
|
||||
else
|
||||
gsqsum = gsqsum_t;
|
||||
|
||||
gcascade = (GpuHidHaarClassifierCascade *)cascade->hid_cascade;
|
||||
stage = (GpuHidHaarStageClassifier *)(gcascade + 1);
|
||||
@ -1040,12 +1025,7 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
|
||||
int n_factors = 0;
|
||||
oclMat gsum;
|
||||
oclMat gsqsum;
|
||||
oclMat gsqsum_t;
|
||||
cv::ocl::integral(gimg, gsum, gsqsum_t);
|
||||
if(gsqsum_t.depth() == CV_64F)
|
||||
gsqsum_t.convertTo(gsqsum, CV_32FC1);
|
||||
else
|
||||
gsqsum = gsqsum_t;
|
||||
cv::ocl::integral(gimg, gsum, gsqsum);
|
||||
CvSize sz;
|
||||
vector<CvSize> sizev;
|
||||
vector<float> scalev;
|
||||
@ -1320,16 +1300,12 @@ void cv::ocl::OclCascadeClassifierBuf::detectMultiScale(oclMat &gimg, CV_OUT std
|
||||
roi2 = Rect(0, 0, sz.width - 1, sz.height - 1);
|
||||
resizeroi = gimg1(roi2);
|
||||
gimgroi = gsum(roi);
|
||||
gimgroisq = gsqsum_t(roi);
|
||||
gimgroisq = gsqsum(roi);
|
||||
|
||||
cv::ocl::resize(gimg, resizeroi, Size(sz.width - 1, sz.height - 1), 0, 0, INTER_LINEAR);
|
||||
cv::ocl::integral(resizeroi, gimgroi, gimgroisq);
|
||||
indexy += sz.height;
|
||||
}
|
||||
if(gsqsum_t.depth() == CV_64F)
|
||||
gsqsum_t.convertTo(gsqsum, CV_32FC1);
|
||||
else
|
||||
gsqsum = gsqsum_t;
|
||||
|
||||
gcascade = (GpuHidHaarClassifierCascade *)(cascade->hid_cascade);
|
||||
stage = (GpuHidHaarStageClassifier *)(gcascade + 1);
|
||||
@ -1391,11 +1367,7 @@ void cv::ocl::OclCascadeClassifierBuf::detectMultiScale(oclMat &gimg, CV_OUT std
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::ocl::integral(gimg, gsum, gsqsum_t);
|
||||
if(gsqsum_t.depth() == CV_64F)
|
||||
gsqsum_t.convertTo(gsqsum, CV_32FC1);
|
||||
else
|
||||
gsqsum = gsqsum_t;
|
||||
cv::ocl::integral(gimg, gsum, gsqsum);
|
||||
|
||||
gcascade = (GpuHidHaarClassifierCascade *)cascade->hid_cascade;
|
||||
|
||||
@ -1621,7 +1593,6 @@ void cv::ocl::OclCascadeClassifierBuf::CreateFactorRelatedBufs(
|
||||
gimg1.release();
|
||||
gsum.release();
|
||||
gsqsum.release();
|
||||
gsqsum_t.release();
|
||||
}
|
||||
else if (!(m_flags & CV_HAAR_SCALE_IMAGE) && (flags & CV_HAAR_SCALE_IMAGE))
|
||||
{
|
||||
@ -1696,16 +1667,6 @@ void cv::ocl::OclCascadeClassifierBuf::CreateFactorRelatedBufs(
|
||||
gsum.create(totalheight + 4, cols + 1, CV_32SC1);
|
||||
gsqsum.create(totalheight + 4, cols + 1, CV_32FC1);
|
||||
|
||||
int sdepth = 0;
|
||||
if(Context::getContext()->supportsFeature(FEATURE_CL_DOUBLE))
|
||||
sdepth = CV_64FC1;
|
||||
else
|
||||
sdepth = CV_32FC1;
|
||||
sdepth = CV_MAT_DEPTH(sdepth);
|
||||
int type = CV_MAKE_TYPE(sdepth, 1);
|
||||
|
||||
gsqsum_t.create(totalheight + 4, cols + 1, type);
|
||||
|
||||
scaleinfo = (detect_piramid_info *)malloc(sizeof(detect_piramid_info) * loopcount);
|
||||
for( int i = 0; i < loopcount; i++ )
|
||||
{
|
||||
|
@ -898,7 +898,7 @@ namespace cv
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// integral
|
||||
|
||||
void integral(const oclMat &src, oclMat &sum, oclMat &sqsum, int sdepth)
|
||||
void integral(const oclMat &src, oclMat &sum, oclMat &sqsum)
|
||||
{
|
||||
CV_Assert(src.type() == CV_8UC1);
|
||||
if (!src.clCxt->supportsFeature(ocl::FEATURE_CL_DOUBLE) && src.depth() == CV_64F)
|
||||
@ -907,11 +907,6 @@ namespace cv
|
||||
return;
|
||||
}
|
||||
|
||||
if( sdepth <= 0 )
|
||||
sdepth = CV_32S;
|
||||
sdepth = CV_MAT_DEPTH(sdepth);
|
||||
int type = CV_MAKE_TYPE(sdepth, 1);
|
||||
|
||||
int vlen = 4;
|
||||
int offset = src.offset / vlen;
|
||||
int pre_invalid = src.offset % vlen;
|
||||
@ -919,26 +914,17 @@ namespace cv
|
||||
|
||||
oclMat t_sum , t_sqsum;
|
||||
int w = src.cols + 1, h = src.rows + 1;
|
||||
|
||||
char build_option[250];
|
||||
if(Context::getContext()->supportsFeature(ocl::FEATURE_CL_DOUBLE))
|
||||
{
|
||||
t_sqsum.create(src.cols, src.rows, CV_64FC1);
|
||||
sqsum.create(h, w, CV_64FC1);
|
||||
sprintf(build_option, "-D TYPE=double -D TYPE4=double4 -D convert_TYPE4=convert_double4");
|
||||
}
|
||||
else
|
||||
{
|
||||
t_sqsum.create(src.cols, src.rows, CV_32FC1);
|
||||
sqsum.create(h, w, CV_32FC1);
|
||||
sprintf(build_option, "-D TYPE=float -D TYPE4=float4 -D convert_TYPE4=convert_float4");
|
||||
}
|
||||
int depth = src.depth() == CV_8U ? CV_32S : CV_64F;
|
||||
int type = CV_MAKE_TYPE(depth, 1);
|
||||
|
||||
t_sum.create(src.cols, src.rows, type);
|
||||
sum.create(h, w, type);
|
||||
|
||||
int sum_offset = sum.offset / sum.elemSize();
|
||||
int sqsum_offset = sqsum.offset / sqsum.elemSize();
|
||||
t_sqsum.create(src.cols, src.rows, CV_32FC1);
|
||||
sqsum.create(h, w, CV_32FC1);
|
||||
|
||||
int sum_offset = sum.offset / vlen;
|
||||
int sqsum_offset = sqsum.offset / vlen;
|
||||
|
||||
vector<pair<size_t , const void *> > args;
|
||||
args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data ));
|
||||
@ -950,9 +936,8 @@ namespace cv
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.cols ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.step ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.step));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sqsum.step));
|
||||
size_t gt[3] = {((vcols + 1) / 2) * 256, 1, 1}, lt[3] = {256, 1, 1};
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_cols", gt, lt, args, -1, sdepth, build_option);
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_cols", gt, lt, args, -1, depth);
|
||||
|
||||
args.clear();
|
||||
args.push_back( make_pair( sizeof(cl_mem) , (void *)&t_sum.data ));
|
||||
@ -962,16 +947,15 @@ namespace cv
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.rows ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.cols ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.step ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sqsum.step));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum.step));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sqsum.step));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum_offset));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sqsum_offset));
|
||||
size_t gt2[3] = {t_sum.cols * 32, 1, 1}, lt2[3] = {256, 1, 1};
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_rows", gt2, lt2, args, -1, sdepth, build_option);
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_rows", gt2, lt2, args, -1, depth);
|
||||
}
|
||||
|
||||
void integral(const oclMat &src, oclMat &sum, int sdepth)
|
||||
void integral(const oclMat &src, oclMat &sum)
|
||||
{
|
||||
CV_Assert(src.type() == CV_8UC1);
|
||||
int vlen = 4;
|
||||
@ -979,13 +963,10 @@ namespace cv
|
||||
int pre_invalid = src.offset % vlen;
|
||||
int vcols = (pre_invalid + src.cols + vlen - 1) / vlen;
|
||||
|
||||
if( sdepth <= 0 )
|
||||
sdepth = CV_32S;
|
||||
sdepth = CV_MAT_DEPTH(sdepth);
|
||||
int type = CV_MAKE_TYPE(sdepth, 1);
|
||||
|
||||
oclMat t_sum;
|
||||
int w = src.cols + 1, h = src.rows + 1;
|
||||
int depth = src.depth() == CV_8U ? CV_32S : CV_32F;
|
||||
int type = CV_MAKE_TYPE(depth, 1);
|
||||
|
||||
t_sum.create(src.cols, src.rows, type);
|
||||
sum.create(h, w, type);
|
||||
@ -1001,7 +982,7 @@ namespace cv
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.step ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.step));
|
||||
size_t gt[3] = {((vcols + 1) / 2) * 256, 1, 1}, lt[3] = {256, 1, 1};
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_cols", gt, lt, args, -1, sdepth);
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_cols", gt, lt, args, -1, depth);
|
||||
|
||||
args.clear();
|
||||
args.push_back( make_pair( sizeof(cl_mem) , (void *)&t_sum.data ));
|
||||
@ -1012,7 +993,7 @@ namespace cv
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum.step));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum_offset));
|
||||
size_t gt2[3] = {t_sum.cols * 32, 1, 1}, lt2[3] = {256, 1, 1};
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_rows", gt2, lt2, args, -1, sdepth);
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_rows", gt2, lt2, args, -1, depth);
|
||||
}
|
||||
|
||||
/////////////////////// corner //////////////////////////////
|
||||
|
@ -245,15 +245,12 @@ namespace cv
|
||||
void matchTemplate_CCORR_NORMED(
|
||||
const oclMat &image, const oclMat &templ, oclMat &result, MatchTemplateBuf &buf)
|
||||
{
|
||||
cv::ocl::oclMat temp;
|
||||
matchTemplate_CCORR(image, templ, result, buf);
|
||||
buf.image_sums.resize(1);
|
||||
buf.image_sqsums.resize(1);
|
||||
integral(image.reshape(1), buf.image_sums[0], temp);
|
||||
if(temp.depth() == CV_64F)
|
||||
temp.convertTo(buf.image_sqsums[0], CV_32FC1);
|
||||
else
|
||||
buf.image_sqsums[0] = temp;
|
||||
|
||||
integral(image.reshape(1), buf.image_sums[0], buf.image_sqsums[0]);
|
||||
|
||||
unsigned long long templ_sqsum = (unsigned long long)sqrSum(templ.reshape(1))[0];
|
||||
|
||||
Context *clCxt = image.clCxt;
|
||||
@ -419,12 +416,7 @@ namespace cv
|
||||
{
|
||||
buf.image_sums.resize(1);
|
||||
buf.image_sqsums.resize(1);
|
||||
cv::ocl::oclMat temp;
|
||||
integral(image, buf.image_sums[0], temp);
|
||||
if(temp.depth() == CV_64F)
|
||||
temp.convertTo(buf.image_sqsums[0], CV_32FC1);
|
||||
else
|
||||
buf.image_sqsums[0] = temp;
|
||||
integral(image, buf.image_sums[0], buf.image_sqsums[0]);
|
||||
|
||||
templ_sum[0] = (float)sum(templ)[0];
|
||||
|
||||
@ -460,14 +452,10 @@ namespace cv
|
||||
templ_sum *= scale;
|
||||
buf.image_sums.resize(buf.images.size());
|
||||
buf.image_sqsums.resize(buf.images.size());
|
||||
cv::ocl::oclMat temp;
|
||||
|
||||
for(int i = 0; i < image.oclchannels(); i ++)
|
||||
{
|
||||
integral(buf.images[i], buf.image_sums[i], temp);
|
||||
if(temp.depth() == CV_64F)
|
||||
temp.convertTo(buf.image_sqsums[i], CV_32FC1);
|
||||
else
|
||||
buf.image_sqsums[i] = temp;
|
||||
integral(buf.images[i], buf.image_sums[i], buf.image_sqsums[i]);
|
||||
}
|
||||
|
||||
switch(image.oclchannels())
|
||||
|
@ -62,13 +62,13 @@ typedef struct __attribute__((aligned (128) )) GpuHidHaarTreeNode
|
||||
GpuHidHaarTreeNode;
|
||||
|
||||
|
||||
//typedef struct __attribute__((aligned (32))) GpuHidHaarClassifier
|
||||
//{
|
||||
// int count __attribute__((aligned (4)));
|
||||
// GpuHidHaarTreeNode* node __attribute__((aligned (8)));
|
||||
// float* alpha __attribute__((aligned (8)));
|
||||
//}
|
||||
//GpuHidHaarClassifier;
|
||||
typedef struct __attribute__((aligned (32))) GpuHidHaarClassifier
|
||||
{
|
||||
int count __attribute__((aligned (4)));
|
||||
GpuHidHaarTreeNode* node __attribute__((aligned (8)));
|
||||
float* alpha __attribute__((aligned (8)));
|
||||
}
|
||||
GpuHidHaarClassifier;
|
||||
|
||||
|
||||
typedef struct __attribute__((aligned (64))) GpuHidHaarStageClassifier
|
||||
@ -84,22 +84,22 @@ typedef struct __attribute__((aligned (64))) GpuHidHaarStageClassifier
|
||||
GpuHidHaarStageClassifier;
|
||||
|
||||
|
||||
//typedef struct __attribute__((aligned (64))) GpuHidHaarClassifierCascade
|
||||
//{
|
||||
// int count __attribute__((aligned (4)));
|
||||
// int is_stump_based __attribute__((aligned (4)));
|
||||
// int has_tilted_features __attribute__((aligned (4)));
|
||||
// int is_tree __attribute__((aligned (4)));
|
||||
// int pq0 __attribute__((aligned (4)));
|
||||
// int pq1 __attribute__((aligned (4)));
|
||||
// int pq2 __attribute__((aligned (4)));
|
||||
// int pq3 __attribute__((aligned (4)));
|
||||
// int p0 __attribute__((aligned (4)));
|
||||
// int p1 __attribute__((aligned (4)));
|
||||
// int p2 __attribute__((aligned (4)));
|
||||
// int p3 __attribute__((aligned (4)));
|
||||
// float inv_window_area __attribute__((aligned (4)));
|
||||
//} GpuHidHaarClassifierCascade;
|
||||
typedef struct __attribute__((aligned (64))) GpuHidHaarClassifierCascade
|
||||
{
|
||||
int count __attribute__((aligned (4)));
|
||||
int is_stump_based __attribute__((aligned (4)));
|
||||
int has_tilted_features __attribute__((aligned (4)));
|
||||
int is_tree __attribute__((aligned (4)));
|
||||
int pq0 __attribute__((aligned (4)));
|
||||
int pq1 __attribute__((aligned (4)));
|
||||
int pq2 __attribute__((aligned (4)));
|
||||
int pq3 __attribute__((aligned (4)));
|
||||
int p0 __attribute__((aligned (4)));
|
||||
int p1 __attribute__((aligned (4)));
|
||||
int p2 __attribute__((aligned (4)));
|
||||
int p3 __attribute__((aligned (4)));
|
||||
float inv_window_area __attribute__((aligned (4)));
|
||||
} GpuHidHaarClassifierCascade;
|
||||
|
||||
|
||||
#ifdef PACKED_CLASSIFIER
|
||||
@ -196,12 +196,10 @@ __kernel void gpuRunHaarClassifierCascadePacked(
|
||||
for(int stageloop = start_stage; (stageloop < end_stage) && result; stageloop++ )
|
||||
{// iterate until candidate is valid
|
||||
float stage_sum = 0.0f;
|
||||
__global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
|
||||
((__global uchar*)stagecascadeptr+stageloop*sizeof(GpuHidHaarStageClassifier));
|
||||
int lcl_off = (yl*DATA_SIZE_X)+(xl);
|
||||
int stagecount = stageinfo->count;
|
||||
float stagethreshold = stageinfo->threshold;
|
||||
for(int nodeloop = 0; nodeloop < stagecount; nodecounter++,nodeloop++ )
|
||||
int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
|
||||
float stagethreshold = as_float(stageinfo.y);
|
||||
int lcl_off = (lid_y*DATA_SIZE_X)+(lid_x);
|
||||
for(int nodeloop = 0; nodeloop < stageinfo.x; nodecounter++,nodeloop++ )
|
||||
{
|
||||
// simple macro to extract shorts from int
|
||||
#define M0(_t) ((_t)&0xFFFF)
|
||||
@ -357,17 +355,14 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
|
||||
variance_norm_factor = variance_norm_factor * correction - mean * mean;
|
||||
variance_norm_factor = variance_norm_factor >=0.f ? sqrt(variance_norm_factor) : 1.f;
|
||||
|
||||
for(int stageloop = start_stage; (stageloop < split_stage) && result; stageloop++ )
|
||||
for(int stageloop = start_stage; (stageloop < split_stage) && result; stageloop++ )
|
||||
{
|
||||
float stage_sum = 0.f;
|
||||
__global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
|
||||
((__global uchar*)stagecascadeptr+stageloop*sizeof(GpuHidHaarStageClassifier));
|
||||
int stagecount = stageinfo->count;
|
||||
float stagethreshold = stageinfo->threshold;
|
||||
for(int nodeloop = 0; nodeloop < stagecount; )
|
||||
int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
|
||||
float stagethreshold = as_float(stageinfo.y);
|
||||
for(int nodeloop = 0; nodeloop < stageinfo.x; )
|
||||
{
|
||||
__global GpuHidHaarTreeNode* currentnodeptr = (__global GpuHidHaarTreeNode*)
|
||||
(((__global uchar*)nodeptr) + nodecounter * sizeof(GpuHidHaarTreeNode));
|
||||
__global GpuHidHaarTreeNode* currentnodeptr = (nodeptr + nodecounter);
|
||||
|
||||
int4 info1 = *(__global int4*)(&(currentnodeptr->p[0][0]));
|
||||
int4 info2 = *(__global int4*)(&(currentnodeptr->p[1][0]));
|
||||
@ -423,7 +418,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
|
||||
#endif
|
||||
}
|
||||
|
||||
result = (stage_sum >= stagethreshold) ? 1 : 0;
|
||||
result = (stage_sum >= stagethreshold);
|
||||
}
|
||||
if(factor < 2)
|
||||
{
|
||||
@ -452,17 +447,14 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
|
||||
lclcount[0]=0;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
//int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
|
||||
__global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
|
||||
((__global uchar*)stagecascadeptr+stageloop*sizeof(GpuHidHaarStageClassifier));
|
||||
int stagecount = stageinfo->count;
|
||||
float stagethreshold = stageinfo->threshold;
|
||||
int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
|
||||
float stagethreshold = as_float(stageinfo.y);
|
||||
|
||||
int perfscale = queuecount > 4 ? 3 : 2;
|
||||
int queuecount_loop = (queuecount + (1<<perfscale)-1) >> perfscale;
|
||||
int lcl_compute_win = lcl_sz >> perfscale;
|
||||
int lcl_compute_win_id = (lcl_id >>(6-perfscale));
|
||||
int lcl_loops = (stagecount + lcl_compute_win -1) >> (6-perfscale);
|
||||
int lcl_loops = (stageinfo.x + lcl_compute_win -1) >> (6-perfscale);
|
||||
int lcl_compute_id = lcl_id - (lcl_compute_win_id << (6-perfscale));
|
||||
for(int queueloop=0; queueloop<queuecount_loop; queueloop++)
|
||||
{
|
||||
@ -477,10 +469,10 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
|
||||
float part_sum = 0.f;
|
||||
const int stump_factor = STUMP_BASED ? 1 : 2;
|
||||
int root_offset = 0;
|
||||
for(int lcl_loop=0; lcl_loop<lcl_loops && tempnodecounter<stagecount;)
|
||||
for(int lcl_loop=0; lcl_loop<lcl_loops && tempnodecounter<stageinfo.x;)
|
||||
{
|
||||
__global GpuHidHaarTreeNode* currentnodeptr = (__global GpuHidHaarTreeNode*)
|
||||
(((__global uchar*)nodeptr) + sizeof(GpuHidHaarTreeNode) * ((nodecounter + tempnodecounter) * stump_factor + root_offset));
|
||||
__global GpuHidHaarTreeNode* currentnodeptr =
|
||||
nodeptr + (nodecounter + tempnodecounter) * stump_factor + root_offset;
|
||||
|
||||
int4 info1 = *(__global int4*)(&(currentnodeptr->p[0][0]));
|
||||
int4 info2 = *(__global int4*)(&(currentnodeptr->p[1][0]));
|
||||
@ -557,7 +549,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
|
||||
|
||||
queuecount = lclcount[0];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
nodecounter += stagecount;
|
||||
nodecounter += stageinfo.x;
|
||||
}//end for(int stageloop = splitstage; stageloop< endstage && queuecount>0;stageloop++)
|
||||
|
||||
if(lcl_id<queuecount)
|
||||
|
@ -59,13 +59,13 @@ typedef struct __attribute__((aligned(128))) GpuHidHaarTreeNode
|
||||
int right __attribute__((aligned(4)));
|
||||
}
|
||||
GpuHidHaarTreeNode;
|
||||
//typedef struct __attribute__((aligned(32))) GpuHidHaarClassifier
|
||||
//{
|
||||
// int count __attribute__((aligned(4)));
|
||||
// GpuHidHaarTreeNode *node __attribute__((aligned(8)));
|
||||
// float *alpha __attribute__((aligned(8)));
|
||||
//}
|
||||
//GpuHidHaarClassifier;
|
||||
typedef struct __attribute__((aligned(32))) GpuHidHaarClassifier
|
||||
{
|
||||
int count __attribute__((aligned(4)));
|
||||
GpuHidHaarTreeNode *node __attribute__((aligned(8)));
|
||||
float *alpha __attribute__((aligned(8)));
|
||||
}
|
||||
GpuHidHaarClassifier;
|
||||
typedef struct __attribute__((aligned(64))) GpuHidHaarStageClassifier
|
||||
{
|
||||
int count __attribute__((aligned(4)));
|
||||
@ -77,29 +77,29 @@ typedef struct __attribute__((aligned(64))) GpuHidHaarStageClassifier
|
||||
int reserved3 __attribute__((aligned(8)));
|
||||
}
|
||||
GpuHidHaarStageClassifier;
|
||||
//typedef struct __attribute__((aligned(64))) GpuHidHaarClassifierCascade
|
||||
//{
|
||||
// int count __attribute__((aligned(4)));
|
||||
// int is_stump_based __attribute__((aligned(4)));
|
||||
// int has_tilted_features __attribute__((aligned(4)));
|
||||
// int is_tree __attribute__((aligned(4)));
|
||||
// int pq0 __attribute__((aligned(4)));
|
||||
// int pq1 __attribute__((aligned(4)));
|
||||
// int pq2 __attribute__((aligned(4)));
|
||||
// int pq3 __attribute__((aligned(4)));
|
||||
// int p0 __attribute__((aligned(4)));
|
||||
// int p1 __attribute__((aligned(4)));
|
||||
// int p2 __attribute__((aligned(4)));
|
||||
// int p3 __attribute__((aligned(4)));
|
||||
// float inv_window_area __attribute__((aligned(4)));
|
||||
//} GpuHidHaarClassifierCascade;
|
||||
typedef struct __attribute__((aligned(64))) GpuHidHaarClassifierCascade
|
||||
{
|
||||
int count __attribute__((aligned(4)));
|
||||
int is_stump_based __attribute__((aligned(4)));
|
||||
int has_tilted_features __attribute__((aligned(4)));
|
||||
int is_tree __attribute__((aligned(4)));
|
||||
int pq0 __attribute__((aligned(4)));
|
||||
int pq1 __attribute__((aligned(4)));
|
||||
int pq2 __attribute__((aligned(4)));
|
||||
int pq3 __attribute__((aligned(4)));
|
||||
int p0 __attribute__((aligned(4)));
|
||||
int p1 __attribute__((aligned(4)));
|
||||
int p2 __attribute__((aligned(4)));
|
||||
int p3 __attribute__((aligned(4)));
|
||||
float inv_window_area __attribute__((aligned(4)));
|
||||
} GpuHidHaarClassifierCascade;
|
||||
|
||||
__kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
global GpuHidHaarStageClassifier *stagecascadeptr_,
|
||||
global GpuHidHaarStageClassifier *stagecascadeptr,
|
||||
global int4 *info,
|
||||
global GpuHidHaarTreeNode *nodeptr_,
|
||||
global GpuHidHaarTreeNode *nodeptr,
|
||||
global const int *restrict sum,
|
||||
global const float *restrict sqsum,
|
||||
global const float *restrict sqsum,
|
||||
global int4 *candidate,
|
||||
const int rows,
|
||||
const int cols,
|
||||
@ -132,7 +132,8 @@ __kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
int max_idx = rows * cols - 1;
|
||||
for (int scalei = 0; scalei < loopcount; scalei++)
|
||||
{
|
||||
int4 scaleinfo1 = info[scalei];
|
||||
int4 scaleinfo1;
|
||||
scaleinfo1 = info[scalei];
|
||||
int grpnumperline = (scaleinfo1.y & 0xffff0000) >> 16;
|
||||
int totalgrp = scaleinfo1.y & 0xffff;
|
||||
float factor = as_float(scaleinfo1.w);
|
||||
@ -173,18 +174,15 @@ __kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
for (int stageloop = start_stage; (stageloop < end_stage) && result; stageloop++)
|
||||
{
|
||||
float stage_sum = 0.f;
|
||||
__global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
|
||||
(((__global uchar*)stagecascadeptr_)+stageloop*sizeof(GpuHidHaarStageClassifier));
|
||||
int stagecount = stageinfo->count;
|
||||
int stagecount = stagecascadeptr[stageloop].count;
|
||||
for (int nodeloop = 0; nodeloop < stagecount;)
|
||||
{
|
||||
__global GpuHidHaarTreeNode* currentnodeptr = (__global GpuHidHaarTreeNode*)
|
||||
(((__global uchar*)nodeptr_) + nodecounter * sizeof(GpuHidHaarTreeNode));
|
||||
__global GpuHidHaarTreeNode *currentnodeptr = (nodeptr + nodecounter);
|
||||
int4 info1 = *(__global int4 *)(&(currentnodeptr->p[0][0]));
|
||||
int4 info2 = *(__global int4 *)(&(currentnodeptr->p[1][0]));
|
||||
int4 info3 = *(__global int4 *)(&(currentnodeptr->p[2][0]));
|
||||
float4 w = *(__global float4 *)(&(currentnodeptr->weight[0]));
|
||||
float3 alpha3 = *(__global float3*)(&(currentnodeptr->alpha[0]));
|
||||
float3 alpha3 = *(__global float3 *)(&(currentnodeptr->alpha[0]));
|
||||
float nodethreshold = w.w * variance_norm_factor;
|
||||
|
||||
info1.x += p_offset;
|
||||
@ -206,7 +204,7 @@ __kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
sum[clamp(mad24(info3.w, step, info3.x), 0, max_idx)]
|
||||
+ sum[clamp(mad24(info3.w, step, info3.z), 0, max_idx)]) * w.z;
|
||||
|
||||
bool passThres = (classsum >= nodethreshold) ? 1 : 0;
|
||||
bool passThres = classsum >= nodethreshold;
|
||||
|
||||
#if STUMP_BASED
|
||||
stage_sum += passThres ? alpha3.y : alpha3.x;
|
||||
@ -236,8 +234,7 @@ __kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
result = (stage_sum >= stageinfo->threshold) ? 1 : 0;
|
||||
result = (int)(stage_sum >= stagecascadeptr[stageloop].threshold);
|
||||
}
|
||||
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
@ -284,14 +281,11 @@ __kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
}
|
||||
}
|
||||
}
|
||||
__kernel void gpuscaleclassifier(global GpuHidHaarTreeNode *orinode, global GpuHidHaarTreeNode *newnode, float scale, float weight_scale, const int nodenum)
|
||||
__kernel void gpuscaleclassifier(global GpuHidHaarTreeNode *orinode, global GpuHidHaarTreeNode *newnode, float scale, float weight_scale, int nodenum)
|
||||
{
|
||||
const int counter = get_global_id(0);
|
||||
int counter = get_global_id(0);
|
||||
int tr_x[3], tr_y[3], tr_h[3], tr_w[3], i = 0;
|
||||
GpuHidHaarTreeNode t1 = *(__global GpuHidHaarTreeNode*)
|
||||
(((__global uchar*)orinode) + counter * sizeof(GpuHidHaarTreeNode));
|
||||
__global GpuHidHaarTreeNode* pNew = (__global GpuHidHaarTreeNode*)
|
||||
(((__global uchar*)newnode) + (counter + nodenum) * sizeof(GpuHidHaarTreeNode));
|
||||
GpuHidHaarTreeNode t1 = *(orinode + counter);
|
||||
|
||||
#pragma unroll
|
||||
for (i = 0; i < 3; i++)
|
||||
@ -303,21 +297,22 @@ __kernel void gpuscaleclassifier(global GpuHidHaarTreeNode *orinode, global GpuH
|
||||
}
|
||||
|
||||
t1.weight[0] = -(t1.weight[1] * tr_h[1] * tr_w[1] + t1.weight[2] * tr_h[2] * tr_w[2]) / (tr_h[0] * tr_w[0]);
|
||||
counter += nodenum;
|
||||
|
||||
#pragma unroll
|
||||
for (i = 0; i < 3; i++)
|
||||
{
|
||||
pNew->p[i][0] = tr_x[i];
|
||||
pNew->p[i][1] = tr_y[i];
|
||||
pNew->p[i][2] = tr_x[i] + tr_w[i];
|
||||
pNew->p[i][3] = tr_y[i] + tr_h[i];
|
||||
pNew->weight[i] = t1.weight[i] * weight_scale;
|
||||
newnode[counter].p[i][0] = tr_x[i];
|
||||
newnode[counter].p[i][1] = tr_y[i];
|
||||
newnode[counter].p[i][2] = tr_x[i] + tr_w[i];
|
||||
newnode[counter].p[i][3] = tr_y[i] + tr_h[i];
|
||||
newnode[counter].weight[i] = t1.weight[i] * weight_scale;
|
||||
}
|
||||
|
||||
pNew->left = t1.left;
|
||||
pNew->right = t1.right;
|
||||
pNew->threshold = t1.threshold;
|
||||
pNew->alpha[0] = t1.alpha[0];
|
||||
pNew->alpha[1] = t1.alpha[1];
|
||||
pNew->alpha[2] = t1.alpha[2];
|
||||
newnode[counter].left = t1.left;
|
||||
newnode[counter].right = t1.right;
|
||||
newnode[counter].threshold = t1.threshold;
|
||||
newnode[counter].alpha[0] = t1.alpha[0];
|
||||
newnode[counter].alpha[1] = t1.alpha[1];
|
||||
newnode[counter].alpha[2] = t1.alpha[2];
|
||||
}
|
||||
|
@ -49,9 +49,6 @@
|
||||
#elif defined (cl_khr_fp64)
|
||||
#pragma OPENCL EXTENSION cl_khr_fp64:enable
|
||||
#endif
|
||||
#define CONVERT(step) ((step)>>1)
|
||||
#else
|
||||
#define CONVERT(step) ((step))
|
||||
#endif
|
||||
|
||||
#define LSIZE 256
|
||||
@ -64,17 +61,17 @@
|
||||
#define GET_CONFLICT_OFFSET(lid) ((lid) >> LOG_NUM_BANKS)
|
||||
|
||||
|
||||
kernel void integral_cols_D4(__global uchar4 *src,__global int *sum ,__global TYPE *sqsum,
|
||||
int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step,int dst1_step)
|
||||
kernel void integral_cols_D4(__global uchar4 *src,__global int *sum ,__global float *sqsum,
|
||||
int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
int4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsum_t[2];
|
||||
float4 sqsum_t[2];
|
||||
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local int* sum_p;
|
||||
__local TYPE* sqsum_p;
|
||||
__local float* sqsum_p;
|
||||
src_step = src_step >> 2;
|
||||
gid = gid << 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
@ -83,237 +80,17 @@ kernel void integral_cols_D4(__global uchar4 *src,__global int *sum ,__global TY
|
||||
src_t[1] = (i + lid < rows ? convert_int4(src[src_offset + (lid+i) * src_step + min(gid + 1, cols - 1)]) : 0);
|
||||
|
||||
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = convert_TYPE4(src_t[0] * src_t[0]);
|
||||
lm_sqsum[0][bf_loc] = convert_float4(src_t[0] * src_t[0]);
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = convert_TYPE4(src_t[1] * src_t[1]);
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step /4, loc_s1 = loc_s0 + dst_step ;
|
||||
int loc_sq0 = gid * CONVERT(dst1_step) + i + lid - 1 - pre_invalid * dst1_step / sizeof(TYPE),loc_sq1 = loc_sq0 + CONVERT(dst1_step);
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
|
||||
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
|
||||
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
kernel void integral_rows_D4(__global int4 *srcsum,__global TYPE4 * srcsqsum,__global int *sum ,
|
||||
__global TYPE *sqsum,int rows,int cols,int src_step,int src1_step,int sum_step,
|
||||
int sqsum_step,int sum_offset,int sqsum_offset)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
int4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsrc_t[2],sqsum_t[2];
|
||||
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local int *sum_p;
|
||||
__local TYPE *sqsum_p;
|
||||
src_step = src_step >> 4;
|
||||
src1_step = (src1_step / sizeof(TYPE)) >> 2 ;
|
||||
gid <<= 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid ] : (int4)0;
|
||||
sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid ] : (TYPE4)0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid + 1] : (int4)0;
|
||||
sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid + 1] : (TYPE4)0;
|
||||
|
||||
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = sqsrc_t[0];
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = sqsrc_t[1];
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(gid == 0 && (i + lid) <= rows)
|
||||
{
|
||||
sum[sum_offset + i + lid] = 0;
|
||||
sqsum[sqsum_offset + i + lid] = 0;
|
||||
}
|
||||
if(i + lid == 0)
|
||||
{
|
||||
int loc0 = gid * sum_step;
|
||||
int loc1 = gid * CONVERT(sqsum_step);
|
||||
for(int k = 1; k <= 8; k++)
|
||||
{
|
||||
if(gid * 4 + k > cols) break;
|
||||
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
|
||||
sqsum[sqsum_offset + loc1 + k * sqsum_step / sizeof(TYPE)] = 0;
|
||||
}
|
||||
}
|
||||
int loc_s0 = sum_offset + gid * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
|
||||
int loc_sq0 = sqsum_offset + gid * CONVERT(sqsum_step) + sqsum_step / sizeof(TYPE) + i + lid, loc_sq1 = loc_sq0 + CONVERT(sqsum_step) ;
|
||||
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
kernel void integral_cols_D5(__global uchar4 *src,__global float *sum ,__global TYPE *sqsum,
|
||||
int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step, int dst1_step)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float* sum_p;
|
||||
__local TYPE* sqsum_p;
|
||||
src_step = src_step >> 2;
|
||||
gid = gid << 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + min(gid, cols - 1)]) : (float4)0);
|
||||
src_t[1] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + min(gid + 1, cols - 1)]) : (float4)0);
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = convert_TYPE4(src_t[0] * src_t[0]);
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = convert_TYPE4(src_t[1] * src_t[1]);
|
||||
lm_sqsum[1][bf_loc] = convert_float4(src_t[1] * src_t[1]);
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
@ -355,28 +132,27 @@ kernel void integral_cols_D5(__global uchar4 *src,__global float *sum ,__global
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step / 4, loc_s1 = loc_s0 + dst_step ;
|
||||
int loc_sq0 = gid * CONVERT(dst1_step) + i + lid - 1 - pre_invalid * dst1_step / sizeof(TYPE), loc_sq1 = loc_sq0 + CONVERT(dst1_step);
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
|
||||
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
sqsum[loc_s0 + k * dst_step / 4] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
|
||||
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
sqsum[loc_s1 + k * dst_step / 4] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
@ -384,31 +160,30 @@ kernel void integral_cols_D5(__global uchar4 *src,__global float *sum ,__global
|
||||
}
|
||||
|
||||
|
||||
kernel void integral_rows_D5(__global float4 *srcsum,__global TYPE4 * srcsqsum,__global float *sum ,
|
||||
__global TYPE *sqsum,int rows,int cols,int src_step,int src1_step, int sum_step,
|
||||
kernel void integral_rows_D4(__global int4 *srcsum,__global float4 * srcsqsum,__global int *sum ,
|
||||
__global float *sqsum,int rows,int cols,int src_step,int sum_step,
|
||||
int sqsum_step,int sum_offset,int sqsum_offset)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsrc_t[2],sqsum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float *sum_p;
|
||||
__local TYPE *sqsum_p;
|
||||
int4 src_t[2], sum_t[2];
|
||||
float4 sqsrc_t[2],sqsum_t[2];
|
||||
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local int *sum_p;
|
||||
__local float *sqsum_p;
|
||||
src_step = src_step >> 4;
|
||||
src1_step = (src1_step / sizeof(TYPE)) >> 2;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2] : (float4)0;
|
||||
sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid * 2] : (TYPE4)0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
|
||||
sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid * 2 + 1] : (TYPE4)0;
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2] : (int4)0;
|
||||
sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2] : (float4)0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2 + 1] : (int4)0;
|
||||
sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
@ -465,16 +240,114 @@ kernel void integral_rows_D5(__global float4 *srcsum,__global TYPE4 * srcsqsum,_
|
||||
if(i + lid == 0)
|
||||
{
|
||||
int loc0 = gid * 2 * sum_step;
|
||||
int loc1 = gid * 2 * CONVERT(sqsum_step);
|
||||
int loc1 = gid * 2 * sqsum_step;
|
||||
for(int k = 1; k <= 8; k++)
|
||||
{
|
||||
if(gid * 8 + k > cols) break;
|
||||
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
|
||||
sqsum[sqsum_offset + loc1 + k * sqsum_step / sizeof(TYPE)] = 0;
|
||||
sqsum[sqsum_offset + loc1 + k * sqsum_step / 4] = 0;
|
||||
}
|
||||
}
|
||||
int loc_s0 = sum_offset + gid * 2 * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
|
||||
int loc_sq0 = sqsum_offset + gid * 2 * CONVERT(sqsum_step) + sqsum_step / sizeof(TYPE) + i + lid, loc_sq1 = loc_sq0 + CONVERT(sqsum_step) ;
|
||||
int loc_sq0 = sqsum_offset + gid * 2 * sqsum_step + sqsum_step / 4 + i + lid, loc_sq1 = loc_sq0 + sqsum_step ;
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * sqsum_step / 4] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * sqsum_step / 4] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
kernel void integral_cols_D5(__global uchar4 *src,__global float *sum ,__global float *sqsum,
|
||||
int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
float4 sqsum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float* sum_p;
|
||||
__local float* sqsum_p;
|
||||
src_step = src_step >> 2;
|
||||
gid = gid << 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + min(gid, cols - 1)]) : (float4)0);
|
||||
src_t[1] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + min(gid + 1, cols - 1)]) : (float4)0);
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = convert_float4(src_t[0] * src_t[0]);
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = convert_float4(src_t[1] * src_t[1]);
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step / 4, loc_s1 = loc_s0 + dst_step ;
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
@ -482,20 +355,138 @@ kernel void integral_rows_D5(__global float4 *srcsum,__global TYPE4 * srcsqsum,_
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
|
||||
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_s0 + k * dst_step / 4] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
|
||||
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_s1 + k * dst_step / 4] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
kernel void integral_rows_D5(__global float4 *srcsum,__global float4 * srcsqsum,__global float *sum ,
|
||||
__global float *sqsum,int rows,int cols,int src_step,int sum_step,
|
||||
int sqsum_step,int sum_offset,int sqsum_offset)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
float4 sqsrc_t[2],sqsum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float *sum_p;
|
||||
__local float *sqsum_p;
|
||||
src_step = src_step >> 4;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2] : (float4)0;
|
||||
sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2] : (float4)0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
|
||||
sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = sqsrc_t[0];
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = sqsrc_t[1];
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(gid == 0 && (i + lid) <= rows)
|
||||
{
|
||||
sum[sum_offset + i + lid] = 0;
|
||||
sqsum[sqsum_offset + i + lid] = 0;
|
||||
}
|
||||
if(i + lid == 0)
|
||||
{
|
||||
int loc0 = gid * 2 * sum_step;
|
||||
int loc1 = gid * 2 * sqsum_step;
|
||||
for(int k = 1; k <= 8; k++)
|
||||
{
|
||||
if(gid * 8 + k > cols) break;
|
||||
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
|
||||
sqsum[sqsum_offset + loc1 + k * sqsum_step / 4] = 0;
|
||||
}
|
||||
}
|
||||
int loc_s0 = sum_offset + gid * 2 * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
|
||||
int loc_sq0 = sqsum_offset + gid * 2 * sqsum_step + sqsum_step / 4 + i + lid, loc_sq1 = loc_sq0 + sqsum_step ;
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * sqsum_step / 4] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * sqsum_step / 4] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
@ -295,33 +295,23 @@ OCL_TEST_P(CornerHarris, Mat)
|
||||
|
||||
//////////////////////////////////integral/////////////////////////////////////////////////
|
||||
|
||||
struct Integral :
|
||||
public ImgprocTestBase
|
||||
{
|
||||
int sdepth;
|
||||
typedef ImgprocTestBase Integral;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
type = GET_PARAM(0);
|
||||
blockSize = GET_PARAM(1);
|
||||
sdepth = GET_PARAM(2);
|
||||
useRoi = GET_PARAM(3);
|
||||
}
|
||||
};
|
||||
OCL_TEST_P(Integral, Mat1)
|
||||
{
|
||||
for (int j = 0; j < LOOP_TIMES; j++)
|
||||
{
|
||||
random_roi();
|
||||
|
||||
ocl::integral(gsrc_roi, gdst_roi, sdepth);
|
||||
integral(src_roi, dst_roi, sdepth);
|
||||
ocl::integral(gsrc_roi, gdst_roi);
|
||||
integral(src_roi, dst_roi);
|
||||
|
||||
Near();
|
||||
}
|
||||
}
|
||||
|
||||
OCL_TEST_P(Integral, Mat2)
|
||||
// TODO wrong output type
|
||||
OCL_TEST_P(Integral, DISABLED_Mat2)
|
||||
{
|
||||
Mat dst1;
|
||||
ocl::oclMat gdst1;
|
||||
@ -330,12 +320,10 @@ OCL_TEST_P(Integral, Mat2)
|
||||
{
|
||||
random_roi();
|
||||
|
||||
integral(src_roi, dst_roi, dst1, sdepth);
|
||||
ocl::integral(gsrc_roi, gdst_roi, gdst1, sdepth);
|
||||
integral(src_roi, dst1, dst_roi);
|
||||
ocl::integral(gsrc_roi, gdst1, gdst_roi);
|
||||
|
||||
Near();
|
||||
if(gdst1.clCxt->supportsFeature(ocl::FEATURE_CL_DOUBLE))
|
||||
EXPECT_MAT_NEAR(dst1, Mat(gdst1), 0.);
|
||||
}
|
||||
}
|
||||
|
||||
@ -575,7 +563,7 @@ INSTANTIATE_TEST_CASE_P(Imgproc, CornerHarris, Combine(
|
||||
INSTANTIATE_TEST_CASE_P(Imgproc, Integral, Combine(
|
||||
Values((MatType)CV_8UC1), // TODO does not work with CV_32F, CV_64F
|
||||
Values(0), // not used
|
||||
Values((MatType)CV_32SC1, (MatType)CV_32FC1),
|
||||
Values(0), // not used
|
||||
Bool()));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Imgproc, Threshold, Combine(
|
||||
|
Loading…
x
Reference in New Issue
Block a user