move gpu version of soft cascade to dedicated module

This commit is contained in:
marina.kolpakova
2013-03-03 11:11:42 +04:00
parent 9b00c14fff
commit 5120322cea
16 changed files with 504 additions and 249 deletions

View File

@@ -1,567 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/gpu/device/common.hpp>
#include <opencv2/gpu/device/saturate_cast.hpp>
#include <icf.hpp>
#include <float.h>
#include <stdio.h>
namespace cv { namespace gpu { namespace device {
namespace icf {
template <int FACTOR>
__device__ __forceinline__ uchar shrink(const uchar* ptr, const int pitch, const int y, const int x)
{
int out = 0;
#pragma unroll
for(int dy = 0; dy < FACTOR; ++dy)
#pragma unroll
for(int dx = 0; dx < FACTOR; ++dx)
{
out += ptr[dy * pitch + dx];
}
return static_cast<uchar>(out / (FACTOR * FACTOR));
}
template<int FACTOR>
__global__ void shrink(const uchar* __restrict__ hogluv, const int inPitch,
uchar* __restrict__ shrank, const int outPitch )
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const uchar* ptr = hogluv + (FACTOR * y) * inPitch + (FACTOR * x);
shrank[ y * outPitch + x] = shrink<FACTOR>(ptr, inPitch, y, x);
}
void shrink(const cv::gpu::PtrStepSzb& channels, cv::gpu::PtrStepSzb shrunk)
{
dim3 block(32, 8);
dim3 grid(shrunk.cols / 32, shrunk.rows / 8);
shrink<4><<<grid, block>>>((uchar*)channels.ptr(), channels.step, (uchar*)shrunk.ptr(), shrunk.step);
cudaSafeCall(cudaDeviceSynchronize());
}
__device__ __forceinline__ void luv(const float& b, const float& g, const float& r, uchar& __l, uchar& __u, uchar& __v)
{
// rgb -> XYZ
float x = 0.412453f * r + 0.357580f * g + 0.180423f * b;
float y = 0.212671f * r + 0.715160f * g + 0.072169f * b;
float z = 0.019334f * r + 0.119193f * g + 0.950227f * b;
// computed for D65
const float _ur = 0.19783303699678276f;
const float _vr = 0.46833047435252234f;
const float divisor = fmax((x + 15.f * y + 3.f * z), FLT_EPSILON);
const float _u = __fdividef(4.f * x, divisor);
const float _v = __fdividef(9.f * y, divisor);
float hack = static_cast<float>(__float2int_rn(y * 2047)) / 2047;
const float L = fmax(0.f, ((116.f * cbrtf(hack)) - 16.f));
const float U = 13.f * L * (_u - _ur);
const float V = 13.f * L * (_v - _vr);
// L in [0, 100], u in [-134, 220], v in [-140, 122]
__l = static_cast<uchar>( L * (255.f / 100.f));
__u = static_cast<uchar>((U + 134.f) * (255.f / (220.f + 134.f )));
__v = static_cast<uchar>((V + 140.f) * (255.f / (122.f + 140.f )));
}
__global__ void bgr2Luv_d(const uchar* rgb, const int rgbPitch, uchar* luvg, const int luvgPitch)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
uchar3 color = ((uchar3*)(rgb + rgbPitch * y))[x];
uchar l, u, v;
luv(color.x / 255.f, color.y / 255.f, color.z / 255.f, l, u, v);
luvg[luvgPitch * y + x] = l;
luvg[luvgPitch * (y + 480) + x] = u;
luvg[luvgPitch * (y + 2 * 480) + x] = v;
}
void bgr2Luv(const PtrStepSzb& bgr, PtrStepSzb luv)
{
dim3 block(32, 8);
dim3 grid(bgr.cols / 32, bgr.rows / 8);
bgr2Luv_d<<<grid, block>>>((const uchar*)bgr.ptr(0), bgr.step, (uchar*)luv.ptr(0), luv.step);
cudaSafeCall(cudaDeviceSynchronize());
}
template<bool isDefaultNum>
__device__ __forceinline__ int fast_angle_bin(const float& dx, const float& dy)
{
const float angle_quantum = CV_PI / 6.f;
float angle = atan2(dx, dy) + (angle_quantum / 2.f);
if (angle < 0) angle += CV_PI;
const float angle_scaling = 1.f / angle_quantum;
return static_cast<int>(angle * angle_scaling) % 6;
}
template<>
__device__ __forceinline__ int fast_angle_bin<true>(const float& dy, const float& dx)
{
int index = 0;
float max_dot = fabs(dx);
{
const float dot_product = fabs(dx * 0.8660254037844386f + dy * 0.5f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 1;
}
}
{
const float dot_product = fabs(dy * 0.8660254037844386f + dx * 0.5f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 2;
}
}
{
int i = 3;
float2 bin_vector_i;
bin_vector_i.x = ::cos(i * (CV_PI / 6.f));
bin_vector_i.y = ::sin(i * (CV_PI / 6.f));
const float dot_product = fabs(dx * bin_vector_i.x + dy * bin_vector_i.y);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = i;
}
}
{
const float dot_product = fabs(dx * (-0.4999999999999998f) + dy * 0.8660254037844387f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 4;
}
}
{
const float dot_product = fabs(dx * (-0.8660254037844387f) + dy * 0.49999999999999994f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 5;
}
}
return index;
}
texture<uchar, cudaTextureType2D, cudaReadModeElementType> tgray;
template<bool isDefaultNum>
__global__ void gray2hog(PtrStepSzb mag)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const float dx = tex2D(tgray, x + 1, y + 0) - tex2D(tgray, x - 1, y - 0);
const float dy = tex2D(tgray, x + 0, y + 1) - tex2D(tgray, x - 0, y - 1);
const float magnitude = sqrtf((dx * dx) + (dy * dy)) * (1.0f / sqrtf(2));
const uchar cmag = static_cast<uchar>(magnitude);
mag( 480 * 6 + y, x) = cmag;
mag( 480 * fast_angle_bin<isDefaultNum>(dy, dx) + y, x) = cmag;
}
void gray2hog(const PtrStepSzb& gray, PtrStepSzb mag, const int bins)
{
dim3 block(32, 8);
dim3 grid(gray.cols / 32, gray.rows / 8);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar>();
cudaSafeCall( cudaBindTexture2D(0, tgray, gray.data, desc, gray.cols, gray.rows, gray.step) );
if (bins == 6)
gray2hog<true><<<grid, block>>>(mag);
else
gray2hog<false><<<grid, block>>>(mag);
cudaSafeCall(cudaDeviceSynchronize());
}
// ToDo: use textures or uncached load instruction.
__global__ void magToHist(const uchar* __restrict__ mag,
const float* __restrict__ angle, const int angPitch,
uchar* __restrict__ hog, const int hogPitch, const int fh)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int bin = (int)(angle[y * angPitch + x]);
const uchar val = mag[y * hogPitch + x];
hog[((fh * bin) + y) * hogPitch + x] = val;
}
void fillBins(cv::gpu::PtrStepSzb hogluv, const cv::gpu::PtrStepSzf& nangle,
const int fw, const int fh, const int bins, cudaStream_t stream )
{
const uchar* mag = (const uchar*)hogluv.ptr(fh * bins);
uchar* hog = (uchar*)hogluv.ptr();
const float* angle = (const float*)nangle.ptr();
dim3 block(32, 8);
dim3 grid(fw / 32, fh / 8);
magToHist<<<grid, block, 0, stream>>>(mag, angle, nangle.step / sizeof(float), hog, hogluv.step, fh);
if (!stream)
{
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
__device__ __forceinline__ float overlapArea(const Detection &a, const Detection &b)
{
int w = ::min(a.x + a.w, b.x + b.w) - ::max(a.x, b.x);
int h = ::min(a.y + a.h, b.y + b.h) - ::max(a.y, b.y);
return (w < 0 || h < 0)? 0.f : (float)(w * h);
}
texture<uint4, cudaTextureType2D, cudaReadModeElementType> tdetections;
__global__ void overlap(const uint* n, uchar* overlaps)
{
const int idx = threadIdx.x;
const int total = *n;
for (int i = idx + 1; i < total; i += 192)
{
const uint4 _a = tex2D(tdetections, i, 0);
const Detection& a = *((Detection*)(&_a));
bool excluded = false;
for (int j = i + 1; j < total; ++j)
{
const uint4 _b = tex2D(tdetections, j, 0);
const Detection& b = *((Detection*)(&_b));
float ovl = overlapArea(a, b) / ::min(a.w * a.h, b.w * b.h);
if (ovl > 0.65f)
{
int suppessed = (a.confidence > b.confidence)? j : i;
overlaps[suppessed] = 1;
excluded = excluded || (suppessed == i);
}
#if __CUDA_ARCH__ >= 120
if (__all(excluded)) break;
#endif
}
}
}
__global__ void collect(const uint* n, uchar* overlaps, uint* ctr, uint4* suppressed)
{
const int idx = threadIdx.x;
const int total = *n;
for (int i = idx; i < total; i += 192)
{
if (!overlaps[i])
{
int oidx = atomicInc(ctr, 50);
suppressed[oidx] = tex2D(tdetections, i + 1, 0);
}
}
}
void suppress(const PtrStepSzb& objects, PtrStepSzb overlaps, PtrStepSzi ndetections,
PtrStepSzb suppressed, cudaStream_t stream)
{
int block = 192;
int grid = 1;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uint4>();
size_t offset;
cudaSafeCall( cudaBindTexture2D(&offset, tdetections, objects.data, desc, objects.cols / sizeof(uint4), objects.rows, objects.step));
overlap<<<grid, block>>>((uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0));
collect<<<grid, block>>>((uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0), (uint*)suppressed.ptr(0), ((uint4*)suppressed.ptr(0)) + 1);
if (!stream)
{
cudaSafeCall( cudaGetLastError());
cudaSafeCall( cudaDeviceSynchronize());
}
}
template<typename Policy>
struct PrefixSum
{
__device static void apply(float& impact)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
#pragma unroll
// scan on shuffle functions
for (int i = 1; i < Policy::WARP; i *= 2)
{
const float n = __shfl_up(impact, i, Policy::WARP);
if (threadIdx.x >= i)
impact += n;
}
#else
__shared__ volatile float ptr[Policy::STA_X * Policy::STA_Y];
const int idx = threadIdx.y * Policy::STA_X + threadIdx.x;
ptr[idx] = impact;
if ( threadIdx.x >= 1) ptr [idx ] = (ptr [idx - 1] + ptr [idx]);
if ( threadIdx.x >= 2) ptr [idx ] = (ptr [idx - 2] + ptr [idx]);
if ( threadIdx.x >= 4) ptr [idx ] = (ptr [idx - 4] + ptr [idx]);
if ( threadIdx.x >= 8) ptr [idx ] = (ptr [idx - 8] + ptr [idx]);
if ( threadIdx.x >= 16) ptr [idx ] = (ptr [idx - 16] + ptr [idx]);
impact = ptr[idx];
#endif
}
};
texture<int, cudaTextureType2D, cudaReadModeElementType> thogluv;
template<bool isUp>
__device__ __forceinline__ float rescale(const Level& level, Node& node)
{
uchar4& scaledRect = node.rect;
float relScale = level.relScale;
float farea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
// rescale
scaledRect.x = __float2int_rn(relScale * scaledRect.x);
scaledRect.y = __float2int_rn(relScale * scaledRect.y);
scaledRect.z = __float2int_rn(relScale * scaledRect.z);
scaledRect.w = __float2int_rn(relScale * scaledRect.w);
float sarea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
const float expected_new_area = farea * relScale * relScale;
float approx = (sarea == 0)? 1: __fdividef(sarea, expected_new_area);
float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
return rootThreshold;
}
template<>
__device__ __forceinline__ float rescale<true>(const Level& level, Node& node)
{
uchar4& scaledRect = node.rect;
float relScale = level.relScale;
float farea = scaledRect.z * scaledRect.w;
// rescale
scaledRect.x = __float2int_rn(relScale * scaledRect.x);
scaledRect.y = __float2int_rn(relScale * scaledRect.y);
scaledRect.z = __float2int_rn(relScale * scaledRect.z);
scaledRect.w = __float2int_rn(relScale * scaledRect.w);
float sarea = scaledRect.z * scaledRect.w;
const float expected_new_area = farea * relScale * relScale;
float approx = __fdividef(sarea, expected_new_area);
float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
return rootThreshold;
}
template<bool isUp>
__device__ __forceinline__ int get(int x, int y, uchar4 area)
{
int a = tex2D(thogluv, x + area.x, y + area.y);
int b = tex2D(thogluv, x + area.z, y + area.y);
int c = tex2D(thogluv, x + area.z, y + area.w);
int d = tex2D(thogluv, x + area.x, y + area.w);
return (a - b + c - d);
}
template<>
__device__ __forceinline__ int get<true>(int x, int y, uchar4 area)
{
x += area.x;
y += area.y;
int a = tex2D(thogluv, x, y);
int b = tex2D(thogluv, x + area.z, y);
int c = tex2D(thogluv, x + area.z, y + area.w);
int d = tex2D(thogluv, x, y + area.w);
return (a - b + c - d);
}
texture<float2, cudaTextureType2D, cudaReadModeElementType> troi;
template<typename Policy>
template<bool isUp>
__device void CascadeInvoker<Policy>::detect(Detection* objects, const uint ndetections, uint* ctr, const int downscales) const
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x;
// load Level
__shared__ Level level;
// check POI
__shared__ volatile char roiCache[Policy::STA_Y];
if (!threadIdx.y && !threadIdx.x)
((float2*)roiCache)[threadIdx.x] = tex2D(troi, blockIdx.y, x);
__syncthreads();
if (!roiCache[threadIdx.y]) return;
if (!threadIdx.x)
level = levels[downscales + blockIdx.z];
if(x >= level.workRect.x || y >= level.workRect.y) return;
int st = level.octave * level.step;
const int stEnd = st + level.step;
const int hogluvStep = gridDim.y * Policy::STA_Y;
float confidence = 0.f;
for(; st < stEnd; st += Policy::WARP)
{
const int nId = (st + threadIdx.x) * 3;
Node node = nodes[nId];
float threshold = rescale<isUp>(level, node);
int sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
int next = 1 + (int)(sum >= threshold);
node = nodes[nId + next];
threshold = rescale<isUp>(level, node);
sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
const int lShift = (next - 1) * 2 + (int)(sum >= threshold);
float impact = leaves[(st + threadIdx.x) * 4 + lShift];
PrefixSum<Policy>::apply(impact);
#if __CUDA_ARCH__ >= 120
if(__any((confidence + impact <= stages[(st + threadIdx.x)]))) st += 2048;
#endif
#if __CUDA_ARCH__ >= 300
impact = __shfl(impact, 31);
#endif
confidence += impact;
}
if(!threadIdx.x && st == stEnd && ((confidence - FLT_EPSILON) >= 0))
{
int idx = atomicInc(ctr, ndetections);
objects[idx] = Detection(__float2int_rn(x * Policy::SHRINKAGE),
__float2int_rn(y * Policy::SHRINKAGE), level.objSize.x, level.objSize.y, confidence);
}
}
template<typename Policy, bool isUp>
__global__ void soft_cascade(const CascadeInvoker<Policy> invoker, Detection* objects, const uint n, uint* ctr, const int downs)
{
invoker.template detect<isUp>(objects, n, ctr, downs);
}
template<typename Policy>
void CascadeInvoker<Policy>::operator()(const PtrStepSzb& roi, const PtrStepSzi& hogluv,
PtrStepSz<uchar4> objects, const int downscales, const cudaStream_t& stream) const
{
int fw = roi.rows;
int fh = roi.cols;
dim3 grid(fw, fh / Policy::STA_Y, downscales);
uint* ctr = (uint*)(objects.ptr(0));
Detection* det = ((Detection*)objects.ptr(0)) + 1;
uint max_det = objects.cols / sizeof(Detection);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<int>();
cudaSafeCall( cudaBindTexture2D(0, thogluv, hogluv.data, desc, hogluv.cols, hogluv.rows, hogluv.step));
cudaChannelFormatDesc desc_roi = cudaCreateChannelDesc<typename Policy::roi_type>();
cudaSafeCall( cudaBindTexture2D(0, troi, roi.data, desc_roi, roi.cols / Policy::STA_Y, roi.rows, roi.step));
const CascadeInvoker<Policy> inv = *this;
soft_cascade<Policy, false><<<grid, Policy::block(), 0, stream>>>(inv, det, max_det, ctr, 0);
cudaSafeCall( cudaGetLastError());
grid = dim3(fw, fh / Policy::STA_Y, min(38, scales) - downscales);
soft_cascade<Policy, true><<<grid, Policy::block(), 0, stream>>>(inv, det, max_det, ctr, downscales);
if (!stream)
{
cudaSafeCall( cudaGetLastError());
cudaSafeCall( cudaDeviceSynchronize());
}
}
template void CascadeInvoker<GK107PolicyX4>::operator()(const PtrStepSzb& roi, const PtrStepSzi& hogluv,
PtrStepSz<uchar4> objects, const int downscales, const cudaStream_t& stream) const;
}
}}}

View File

@@ -1,59 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv { namespace gpu
{
CV_INIT_ALGORITHM(SCascade, "CascadeDetector.SCascade",
obj.info()->addParam(obj, "minScale", obj.minScale);
obj.info()->addParam(obj, "maxScale", obj.maxScale);
obj.info()->addParam(obj, "scales", obj.scales));
bool initModule_gpu(void)
{
Ptr<Algorithm> sc = createSCascade();
return sc->info() != 0;
}
} }

View File

@@ -1,154 +0,0 @@
//M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M
#ifndef __OPENCV_ICF_HPP__
#define __OPENCV_ICF_HPP__
#include <opencv2/gpu/device/common.hpp>
#if defined __CUDACC__
# define __device __device__ __forceinline__
#else
# define __device
#endif
namespace cv { namespace gpu { namespace device {
namespace icf {
struct Octave
{
ushort index;
ushort stages;
ushort shrinkage;
ushort2 size;
float scale;
Octave(const ushort i, const ushort s, const ushort sh, const ushort2 sz, const float sc)
: index(i), stages(s), shrinkage(sh), size(sz), scale(sc) {}
};
struct Level //is actually 24 bytes
{
int octave;
int step;
float relScale;
float scaling[2]; // calculated according to Dollal paper
// for 640x480 we can not get overflow
uchar2 workRect;
uchar2 objSize;
Level(int idx, const Octave& oct, const float scale, const int w, const int h);
__device Level(){}
};
struct Node
{
uchar4 rect;
// ushort channel;
unsigned int threshold;
enum { THRESHOLD_MASK = 0x0FFFFFFF };
Node(const uchar4 r, const unsigned int ch, const unsigned int t) : rect(r), threshold(t + (ch << 28)) {}
};
struct Detection
{
ushort x;
ushort y;
ushort w;
ushort h;
float confidence;
int kind;
Detection(){}
__device Detection(int _x, int _y, uchar _w, uchar _h, float c)
: x(_x), y(_y), w(_w), h(_h), confidence(c), kind(0) {};
};
struct GK107PolicyX4
{
enum {WARP = 32, STA_X = WARP, STA_Y = 8, SHRINKAGE = 4};
typedef float2 roi_type;
static const dim3 block()
{
return dim3(STA_X, STA_Y);
}
};
template<typename Policy>
struct CascadeInvoker
{
CascadeInvoker(): levels(0), stages(0), nodes(0), leaves(0), scales(0) {}
CascadeInvoker(const PtrStepSzb& _levels, const PtrStepSzf& _stages,
const PtrStepSzb& _nodes, const PtrStepSzf& _leaves)
: levels((const Level*)_levels.ptr()),
stages((const float*)_stages.ptr()),
nodes((const Node*)_nodes.ptr()), leaves((const float*)_leaves.ptr()),
scales(_levels.cols / sizeof(Level))
{}
const Level* levels;
const float* stages;
const Node* nodes;
const float* leaves;
int scales;
void operator()(const PtrStepSzb& roi, const PtrStepSzi& hogluv, PtrStepSz<uchar4> objects,
const int downscales, const cudaStream_t& stream = 0) const;
template<bool isUp>
__device void detect(Detection* objects, const unsigned int ndetections, unsigned int* ctr, const int downscales) const;
};
}
}}}
#endif

View File

@@ -1,679 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#if !defined (HAVE_CUDA)
cv::gpu::SCascade::SCascade(const double, const double, const int, const int) { throw_nogpu(); }
cv::gpu::SCascade::~SCascade() { throw_nogpu(); }
bool cv::gpu::SCascade::load(const FileNode&) { throw_nogpu(); return false;}
void cv::gpu::SCascade::detect(InputArray, InputArray, OutputArray, Stream&) const { throw_nogpu(); }
void cv::gpu::SCascade::read(const FileNode& fn) { Algorithm::read(fn); }
cv::gpu::ChannelsProcessor::ChannelsProcessor() { throw_nogpu(); }
cv::gpu::ChannelsProcessor::~ChannelsProcessor() { throw_nogpu(); }
cv::Ptr<cv::gpu::ChannelsProcessor> cv::gpu::ChannelsProcessor::create(const int, const int, const int)
{ throw_nogpu(); return cv::Ptr<cv::gpu::ChannelsProcessor>(0); }
#else
# include "icf.hpp"
cv::gpu::device::icf::Level::Level(int idx, const Octave& oct, const float scale, const int w, const int h)
: octave(idx), step(oct.stages), relScale(scale / oct.scale)
{
workRect.x = cvRound(w / (float)oct.shrinkage);
workRect.y = cvRound(h / (float)oct.shrinkage);
objSize.x = cv::saturate_cast<uchar>(oct.size.x * relScale);
objSize.y = cv::saturate_cast<uchar>(oct.size.y * relScale);
// according to R. Benenson, M. Mathias, R. Timofte and L. Van Gool's and Dallal's papers
if (fabs(relScale - 1.f) < FLT_EPSILON)
scaling[0] = scaling[1] = 1.f;
else
{
scaling[0] = (relScale < 1.f) ? 0.89f * ::pow(relScale, 1.099f / ::log(2.0f)) : 1.f;
scaling[1] = relScale * relScale;
}
}
namespace cv { namespace gpu { namespace device {
namespace icf {
void fillBins(cv::gpu::PtrStepSzb hogluv, const cv::gpu::PtrStepSzf& nangle,
const int fw, const int fh, const int bins, cudaStream_t stream);
void suppress(const PtrStepSzb& objects, PtrStepSzb overlaps, PtrStepSzi ndetections,
PtrStepSzb suppressed, cudaStream_t stream);
void bgr2Luv(const PtrStepSzb& bgr, PtrStepSzb luv);
void gray2hog(const PtrStepSzb& gray, PtrStepSzb mag, const int bins);
void shrink(const cv::gpu::PtrStepSzb& channels, cv::gpu::PtrStepSzb shrunk);
}
}}}
struct cv::gpu::SCascade::Fields
{
static Fields* parseCascade(const FileNode &root, const float mins, const float maxs, const int totals, const int method)
{
static const char *const SC_STAGE_TYPE = "stageType";
static const char *const SC_BOOST = "BOOST";
static const char *const SC_FEATURE_TYPE = "featureType";
static const char *const SC_ICF = "ICF";
static const char *const SC_ORIG_W = "width";
static const char *const SC_ORIG_H = "height";
static const char *const SC_FEATURE_FORMAT = "featureFormat";
static const char *const SC_SHRINKAGE = "shrinkage";
static const char *const SC_OCTAVES = "octaves";
static const char *const SC_OCT_SCALE = "scale";
static const char *const SC_OCT_WEAKS = "weaks";
static const char *const SC_TREES = "trees";
static const char *const SC_WEAK_THRESHOLD = "treeThreshold";
static const char *const SC_FEATURES = "features";
static const char *const SC_INTERNAL = "internalNodes";
static const char *const SC_LEAF = "leafValues";
static const char *const SC_F_CHANNEL = "channel";
static const char *const SC_F_RECT = "rect";
// only Ada Boost supported
std::string stageTypeStr = (std::string)root[SC_STAGE_TYPE];
CV_Assert(stageTypeStr == SC_BOOST);
// only HOG-like integral channel features supported
std::string featureTypeStr = (std::string)root[SC_FEATURE_TYPE];
CV_Assert(featureTypeStr == SC_ICF);
int origWidth = (int)root[SC_ORIG_W];
int origHeight = (int)root[SC_ORIG_H];
std::string fformat = (std::string)root[SC_FEATURE_FORMAT];
bool useBoxes = (fformat == "BOX");
ushort shrinkage = cv::saturate_cast<ushort>((int)root[SC_SHRINKAGE]);
FileNode fn = root[SC_OCTAVES];
if (fn.empty()) return 0;
using namespace device::icf;
std::vector<Octave> voctaves;
std::vector<float> vstages;
std::vector<Node> vnodes;
std::vector<float> vleaves;
FileNodeIterator it = fn.begin(), it_end = fn.end();
for (ushort octIndex = 0; it != it_end; ++it, ++octIndex)
{
FileNode fns = *it;
float scale = powf(2.f,saturate_cast<float>((int)fns[SC_OCT_SCALE]));
bool isUPOctave = scale >= 1;
ushort nweaks = saturate_cast<ushort>((int)fns[SC_OCT_WEAKS]);
ushort2 size;
size.x = cvRound(origWidth * scale);
size.y = cvRound(origHeight * scale);
Octave octave(octIndex, nweaks, shrinkage, size, scale);
CV_Assert(octave.stages > 0);
voctaves.push_back(octave);
FileNode ffs = fns[SC_FEATURES];
if (ffs.empty()) return 0;
std::vector<cv::Rect> feature_rects;
std::vector<int> feature_channels;
FileNodeIterator ftrs = ffs.begin(), ftrs_end = ffs.end();
int feature_offset = 0;
for (; ftrs != ftrs_end; ++ftrs, ++feature_offset )
{
cv::FileNode ftn = (*ftrs)[SC_F_RECT];
cv::FileNodeIterator r_it = ftn.begin();
int x = (int)*(r_it++);
int y = (int)*(r_it++);
int w = (int)*(r_it++);
int h = (int)*(r_it++);
if (useBoxes)
{
if (isUPOctave)
{
w -= x;
h -= y;
}
}
else
{
if (!isUPOctave)
{
w += x;
h += y;
}
}
feature_rects.push_back(cv::Rect(x, y, w, h));
feature_channels.push_back((int)(*ftrs)[SC_F_CHANNEL]);
}
fns = fns[SC_TREES];
if (fn.empty()) return false;
// for each stage (~ decision tree with H = 2)
FileNodeIterator st = fns.begin(), st_end = fns.end();
for (; st != st_end; ++st )
{
FileNode octfn = *st;
float threshold = (float)octfn[SC_WEAK_THRESHOLD];
vstages.push_back(threshold);
FileNode intfns = octfn[SC_INTERNAL];
FileNodeIterator inIt = intfns.begin(), inIt_end = intfns.end();
for (; inIt != inIt_end;)
{
inIt +=2;
int featureIdx = (int)(*(inIt++));
float orig_threshold = (float)(*(inIt++));
unsigned int th = saturate_cast<unsigned int>((int)orig_threshold);
cv::Rect& r = feature_rects[featureIdx];
uchar4 rect;
rect.x = saturate_cast<uchar>(r.x);
rect.y = saturate_cast<uchar>(r.y);
rect.z = saturate_cast<uchar>(r.width);
rect.w = saturate_cast<uchar>(r.height);
unsigned int channel = saturate_cast<unsigned int>(feature_channels[featureIdx]);
vnodes.push_back(Node(rect, channel, th));
}
intfns = octfn[SC_LEAF];
inIt = intfns.begin(), inIt_end = intfns.end();
for (; inIt != inIt_end; ++inIt)
{
vleaves.push_back((float)(*inIt));
}
}
}
cv::Mat hoctaves(1, (int) (voctaves.size() * sizeof(Octave)), CV_8UC1, (uchar*)&(voctaves[0]));
CV_Assert(!hoctaves.empty());
cv::Mat hstages(cv::Mat(vstages).reshape(1,1));
CV_Assert(!hstages.empty());
cv::Mat hnodes(1, (int) (vnodes.size() * sizeof(Node)), CV_8UC1, (uchar*)&(vnodes[0]) );
CV_Assert(!hnodes.empty());
cv::Mat hleaves(cv::Mat(vleaves).reshape(1,1));
CV_Assert(!hleaves.empty());
Fields* fields = new Fields(mins, maxs, totals, origWidth, origHeight, shrinkage, 0,
hoctaves, hstages, hnodes, hleaves, method);
fields->voctaves = voctaves;
fields->createLevels(DEFAULT_FRAME_HEIGHT, DEFAULT_FRAME_WIDTH);
return fields;
}
bool check(float mins,float maxs, int scales)
{
bool updated = (minScale == mins) || (maxScale == maxs) || (totals = scales);
minScale = mins;
maxScale = maxScale;
totals = scales;
return updated;
}
int createLevels(const int fh, const int fw)
{
using namespace device::icf;
std::vector<Level> vlevels;
float logFactor = (::log(maxScale) - ::log(minScale)) / (totals -1);
float scale = minScale;
int dcs = 0;
for (int sc = 0; sc < totals; ++sc)
{
int width = (int)::std::max(0.0f, fw - (origObjWidth * scale));
int height = (int)::std::max(0.0f, fh - (origObjHeight * scale));
float logScale = ::log(scale);
int fit = fitOctave(voctaves, logScale);
Level level(fit, voctaves[fit], scale, width, height);
if (!width || !height)
break;
else
{
vlevels.push_back(level);
if (voctaves[fit].scale < 1) ++dcs;
}
if (::fabs(scale - maxScale) < FLT_EPSILON) break;
scale = ::std::min(maxScale, ::expf(::log(scale) + logFactor));
}
cv::Mat hlevels = cv::Mat(1, (int) (vlevels.size() * sizeof(Level)), CV_8UC1, (uchar*)&(vlevels[0]) );
CV_Assert(!hlevels.empty());
levels.upload(hlevels);
downscales = dcs;
return dcs;
}
bool update(int fh, int fw, int shr)
{
shrunk.create(fh / shr * HOG_LUV_BINS, fw / shr, CV_8UC1);
integralBuffer.create(shrunk.rows, shrunk.cols, CV_32SC1);
hogluv.create((fh / shr) * HOG_LUV_BINS + 1, fw / shr + 1, CV_32SC1);
hogluv.setTo(cv::Scalar::all(0));
overlaps.create(1, 5000, CV_8UC1);
suppressed.create(1, sizeof(Detection) * 51, CV_8UC1);
return true;
}
Fields( const float mins, const float maxs, const int tts, const int ow, const int oh, const int shr, const int ds,
cv::Mat hoctaves, cv::Mat hstages, cv::Mat hnodes, cv::Mat hleaves, int method)
: minScale(mins), maxScale(maxs), totals(tts), origObjWidth(ow), origObjHeight(oh), shrinkage(shr), downscales(ds)
{
update(DEFAULT_FRAME_HEIGHT, DEFAULT_FRAME_WIDTH, shr);
octaves.upload(hoctaves);
stages.upload(hstages);
nodes.upload(hnodes);
leaves.upload(hleaves);
preprocessor = ChannelsProcessor::create(shrinkage, 6, method);
}
void detect(cv::gpu::GpuMat& objects, Stream& s) const
{
if (s)
s.enqueueMemSet(objects, 0);
else
cudaMemset(objects.data, 0, sizeof(Detection));
cudaSafeCall( cudaGetLastError());
device::icf::CascadeInvoker<device::icf::GK107PolicyX4> invoker
= device::icf::CascadeInvoker<device::icf::GK107PolicyX4>(levels, stages, nodes, leaves);
cudaStream_t stream = StreamAccessor::getStream(s);
invoker(mask, hogluv, objects, downscales, stream);
}
void suppress(GpuMat& objects, Stream& s)
{
GpuMat ndetections = GpuMat(objects, cv::Rect(0, 0, sizeof(Detection), 1));
ensureSizeIsEnough(objects.rows, objects.cols, CV_8UC1, overlaps);
if (s)
{
s.enqueueMemSet(overlaps, 0);
s.enqueueMemSet(suppressed, 0);
}
else
{
overlaps.setTo(0);
suppressed.setTo(0);
}
cudaStream_t stream = StreamAccessor::getStream(s);
device::icf::suppress(objects, overlaps, ndetections, suppressed, stream);
}
private:
typedef std::vector<device::icf::Octave>::const_iterator octIt_t;
static int fitOctave(const std::vector<device::icf::Octave>& octs, const float& logFactor)
{
float minAbsLog = FLT_MAX;
int res = 0;
for (int oct = 0; oct < (int)octs.size(); ++oct)
{
const device::icf::Octave& octave =octs[oct];
float logOctave = ::log(octave.scale);
float logAbsScale = ::fabs(logFactor - logOctave);
if(logAbsScale < minAbsLog)
{
res = oct;
minAbsLog = logAbsScale;
}
}
return res;
}
public:
cv::Ptr<ChannelsProcessor> preprocessor;
// scales range
float minScale;
float maxScale;
int totals;
int origObjWidth;
int origObjHeight;
const int shrinkage;
int downscales;
// 160x120x10
GpuMat shrunk;
// temporal mat for integral
GpuMat integralBuffer;
// 161x121x10
GpuMat hogluv;
// used for suppression
GpuMat suppressed;
// used for area overlap computing during
GpuMat overlaps;
// Cascade from xml
GpuMat octaves;
GpuMat stages;
GpuMat nodes;
GpuMat leaves;
GpuMat levels;
// For ROI
GpuMat mask;
GpuMat genRoiTmp;
// GpuMat collected;
std::vector<device::icf::Octave> voctaves;
// DeviceInfo info;
enum { BOOST = 0 };
enum
{
DEFAULT_FRAME_WIDTH = 640,
DEFAULT_FRAME_HEIGHT = 480,
HOG_LUV_BINS = 10
};
};
cv::gpu::SCascade::SCascade(const double mins, const double maxs, const int sc, const int fl)
: fields(0), minScale(mins), maxScale(maxs), scales(sc), flags(fl) {}
cv::gpu::SCascade::~SCascade() { delete fields; }
bool cv::gpu::SCascade::load(const FileNode& fn)
{
if (fields) delete fields;
fields = Fields::parseCascade(fn, (float)minScale, (float)maxScale, scales, flags);
return fields != 0;
}
void cv::gpu::SCascade::detect(InputArray _image, InputArray _rois, OutputArray _objects, Stream& s) const
{
CV_Assert(fields);
// only color images and precomputed integrals are supported
int type = _image.type();
CV_Assert(type == CV_8UC3 || type == CV_32SC1 || (!_rois.empty()));
const GpuMat image = _image.getGpuMat();
if (_objects.empty()) _objects.create(1, 4096 * sizeof(Detection), CV_8UC1);
GpuMat rois = _rois.getGpuMat(), objects = _objects.getGpuMat();
/// roi
Fields& flds = *fields;
int shr = flds.shrinkage;
flds.mask.create( rois.cols / shr, rois.rows / shr, rois.type());
cv::gpu::resize(rois, flds.genRoiTmp, cv::Size(), 1.f / shr, 1.f / shr, CV_INTER_AREA, s);
cv::gpu::transpose(flds.genRoiTmp, flds.mask, s);
if (type == CV_8UC3)
{
flds.update(image.rows, image.cols, flds.shrinkage);
if (flds.check((float)minScale, (float)maxScale, scales))
flds.createLevels(image.rows, image.cols);
flds.preprocessor->apply(image, flds.shrunk);
cv::gpu::integralBuffered(flds.shrunk, flds.hogluv, flds.integralBuffer, s);
}
else
{
if (s)
s.enqueueCopy(image, flds.hogluv);
else
image.copyTo(flds.hogluv);
}
flds.detect(objects, s);
if ( (flags && NMS_MASK) != NO_REJECT)
{
GpuMat spr(objects, cv::Rect(0, 0, flds.suppressed.cols, flds.suppressed.rows));
flds.suppress(objects, s);
flds.suppressed.copyTo(spr);
}
}
void cv::gpu::SCascade::read(const FileNode& fn)
{
Algorithm::read(fn);
}
namespace {
using cv::InputArray;
using cv::OutputArray;
using cv::gpu::Stream;
using cv::gpu::GpuMat;
inline void setZero(cv::gpu::GpuMat& m, Stream& s)
{
if (s)
s.enqueueMemSet(m, 0);
else
m.setTo(0);
}
struct GenricPreprocessor : public cv::gpu::ChannelsProcessor
{
GenricPreprocessor(const int s, const int b) : cv::gpu::ChannelsProcessor(), shrinkage(s), bins(b) {}
virtual ~GenricPreprocessor() {}
virtual void apply(InputArray _frame, OutputArray _shrunk, Stream& s = Stream::Null())
{
const GpuMat frame = _frame.getGpuMat();
_shrunk.create(frame.rows * (4 + bins) / shrinkage, frame.cols / shrinkage, CV_8UC1);
GpuMat shrunk = _shrunk.getGpuMat();
channels.create(frame.rows * (4 + bins), frame.cols, CV_8UC1);
setZero(channels, s);
cv::gpu::cvtColor(frame, gray, CV_BGR2GRAY, s);
createHogBins(s);
createLuvBins(frame, s);
cv::gpu::resize(channels, shrunk, cv::Size(), 1.f / shrinkage, 1.f / shrinkage, CV_INTER_AREA, s);
}
private:
void createHogBins(Stream& s)
{
static const int fw = gray.cols;
static const int fh = gray.rows;
fplane.create(fh * HOG_BINS, fw, CV_32FC1);
GpuMat dfdx(fplane, cv::Rect(0, 0, fw, fh));
GpuMat dfdy(fplane, cv::Rect(0, fh, fw, fh));
cv::gpu::Sobel(gray, dfdx, CV_32F, 1, 0, sobelBuf, 3, 1, cv::BORDER_DEFAULT, -1, s);
cv::gpu::Sobel(gray, dfdy, CV_32F, 0, 1, sobelBuf, 3, 1, cv::BORDER_DEFAULT, -1, s);
GpuMat mag(fplane, cv::Rect(0, 2 * fh, fw, fh));
GpuMat ang(fplane, cv::Rect(0, 3 * fh, fw, fh));
cv::gpu::cartToPolar(dfdx, dfdy, mag, ang, true, s);
// normalize magnitude to uchar interval and angles to 6 bins
GpuMat nmag(fplane, cv::Rect(0, 4 * fh, fw, fh));
GpuMat nang(fplane, cv::Rect(0, 5 * fh, fw, fh));
cv::gpu::multiply(mag, cv::Scalar::all(1.f / (8 *::log(2.0f))), nmag, 1, -1, s);
cv::gpu::multiply(ang, cv::Scalar::all(1.f / 60.f), nang, 1, -1, s);
//create uchar magnitude
GpuMat cmag(channels, cv::Rect(0, fh * HOG_BINS, fw, fh));
if (s)
s.enqueueConvert(nmag, cmag, CV_8UC1);
else
nmag.convertTo(cmag, CV_8UC1);
cudaStream_t stream = cv::gpu::StreamAccessor::getStream(s);
cv::gpu::device::icf::fillBins(channels, nang, fw, fh, HOG_BINS, stream);
}
void createLuvBins(const cv::gpu::GpuMat& colored, Stream& s)
{
static const int fw = colored.cols;
static const int fh = colored.rows;
cv::gpu::cvtColor(colored, luv, CV_BGR2Luv, s);
std::vector<GpuMat> splited;
for(int i = 0; i < LUV_BINS; ++i)
{
splited.push_back(GpuMat(channels, cv::Rect(0, fh * (7 + i), fw, fh)));
}
cv::gpu::split(luv, splited, s);
}
enum {HOG_BINS = 6, LUV_BINS = 3};
const int shrinkage;
const int bins;
GpuMat gray;
GpuMat luv;
GpuMat channels;
// preallocated buffer for floating point operations
GpuMat fplane;
GpuMat sobelBuf;
};
struct SeparablePreprocessor : public cv::gpu::ChannelsProcessor
{
SeparablePreprocessor(const int s, const int b) : cv::gpu::ChannelsProcessor(), shrinkage(s), bins(b) {}
virtual ~SeparablePreprocessor() {}
virtual void apply(InputArray _frame, OutputArray _shrunk, Stream& s = Stream::Null())
{
const GpuMat frame = _frame.getGpuMat();
cv::gpu::GaussianBlur(frame, bgr, cv::Size(3, 3), -1.0);
_shrunk.create(frame.rows * (4 + bins) / shrinkage, frame.cols / shrinkage, CV_8UC1);
GpuMat shrunk = _shrunk.getGpuMat();
channels.create(frame.rows * (4 + bins), frame.cols, CV_8UC1);
setZero(channels, s);
cv::gpu::cvtColor(bgr, gray, CV_BGR2GRAY);
cv::gpu::device::icf::gray2hog(gray, channels(cv::Rect(0, 0, bgr.cols, bgr.rows * (bins + 1))), bins);
cv::gpu::GpuMat luv(channels, cv::Rect(0, bgr.rows * (bins + 1), bgr.cols, bgr.rows * 3));
cv::gpu::device::icf::bgr2Luv(bgr, luv);
cv::gpu::device::icf::shrink(channels, shrunk);
}
private:
const int shrinkage;
const int bins;
GpuMat bgr;
GpuMat gray;
GpuMat channels;
};
}
cv::Ptr<cv::gpu::ChannelsProcessor> cv::gpu::ChannelsProcessor::create(const int s, const int b, const int m)
{
CV_Assert((m && SEPARABLE) || (m && GENERIC));
if (m && GENERIC)
return cv::Ptr<cv::gpu::ChannelsProcessor>(new GenricPreprocessor(s, b));
return cv::Ptr<cv::gpu::ChannelsProcessor>(new SeparablePreprocessor(s, b));
}
cv::gpu::ChannelsProcessor::ChannelsProcessor() { }
cv::gpu::ChannelsProcessor::~ChannelsProcessor() { }
#endif