Set stricter warning rules for gcc

This commit is contained in:
Andrey Kamaev
2012-06-07 17:21:29 +00:00
parent 0395f7c63f
commit 49a1ba6038
241 changed files with 9054 additions and 8947 deletions

View File

@@ -5,7 +5,7 @@
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#if GTEST_CREATE_SHARED_LIBRARY
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif

View File

@@ -9,8 +9,8 @@
//
//
// API
// int GetPointOfIntersection(const float *f,
const float a, const float b,
// int GetPointOfIntersection(const float *f,
const float a, const float b,
int q1, int q2, float *point);
// INPUT
// f - function on the regular grid
@@ -23,15 +23,15 @@
// RESULT
// Error status
*/
int GetPointOfIntersection(const float *f,
const float a, const float b,
int GetPointOfIntersection(const float *f,
const float a, const float b,
int q1, int q2, float *point)
{
if (q1 == q2)
{
return DISTANCE_TRANSFORM_EQUAL_POINTS;
} /* if (q1 == q2) */
(*point) = ( (f[q2] - a * q2 + b *q2 * q2) -
} /* if (q1 == q2) */
(*point) = ( (f[q2] - a * q2 + b *q2 * q2) -
(f[q1] - a * q1 + b * q1 * q1) ) / (2 * b * (q2 - q1));
return DISTANCE_TRANSFORM_OK;
}
@@ -43,9 +43,9 @@ int GetPointOfIntersection(const float *f,
//
// API
// int DistanceTransformOneDimensionalProblem(const float *f, const int n,
const float a, const float b,
const float a, const float b,
float *distanceTransform,
int *points);
int *points);
// INPUT
// f - function on the regular grid
// n - grid dimension
@@ -58,7 +58,7 @@ int GetPointOfIntersection(const float *f,
// Error status
*/
int DistanceTransformOneDimensionalProblem(const float *f, const int n,
const float a, const float b,
const float a, const float b,
float *distanceTransform,
int *points)
{
@@ -73,7 +73,7 @@ int DistanceTransformOneDimensionalProblem(const float *f, const int n,
// Allocation memory (must be free in this function)
v = (int *)malloc (sizeof(int) * n);
z = (float *)malloc (sizeof(float) * (n + 1));
v[0] = 0;
z[0] = (float)F_MIN; // left border of envelope
z[1] = (float)F_MAX; // right border of envelope
@@ -89,7 +89,7 @@ int DistanceTransformOneDimensionalProblem(const float *f, const int n,
} /* if (tmp != DISTANCE_TRANSFORM_OK) */
if (pointIntersection <= z[k])
{
// Envelope doesn't contain current parabola
// Envelope doesn't contain current parabola
do
{
k--;
@@ -144,7 +144,7 @@ int DistanceTransformOneDimensionalProblem(const float *f, const int n,
// INPUT
// k - index of the previous cycle element
// n - number of matrix rows
// q - parameter that equal
// q - parameter that equal
(number_of_rows * number_of_columns - 1)
// OUTPUT
// None
@@ -196,7 +196,7 @@ void TransposeCycleElements(float *a, int *cycle, int cycle_len)
// RESULT
// Error status
*/
void TransposeCycleElements_int(int *a, int *cycle, int cycle_len)
static void TransposeCycleElements_int(int *a, int *cycle, int cycle_len)
{
int i;
int buf;
@@ -229,7 +229,7 @@ void Transpose(float *a, int n, int m)
int max_cycle_len;
max_cycle_len = n * m;
// Allocation memory (must be free in this function)
cycle = (int *)malloc(sizeof(int) * max_cycle_len);
@@ -240,12 +240,12 @@ void Transpose(float *a, int n, int m)
k = GetNextCycleElement(i, n, q);
cycle[cycle_len] = i;
cycle_len++;
while (k > i)
{
cycle[cycle_len] = k;
{
cycle[cycle_len] = k;
cycle_len++;
k = GetNextCycleElement(k, n, q);
k = GetNextCycleElement(k, n, q);
}
if (k == i)
{
@@ -272,14 +272,14 @@ void Transpose(float *a, int n, int m)
// RESULT
// None
*/
void Transpose_int(int *a, int n, int m)
static void Transpose_int(int *a, int n, int m)
{
int *cycle;
int i, k, q, cycle_len;
int max_cycle_len;
max_cycle_len = n * m;
// Allocation memory (must be free in this function)
cycle = (int *)malloc(sizeof(int) * max_cycle_len);
@@ -290,12 +290,12 @@ void Transpose_int(int *a, int n, int m)
k = GetNextCycleElement(i, n, q);
cycle[cycle_len] = i;
cycle_len++;
while (k > i)
{
cycle[cycle_len] = k;
{
cycle[cycle_len] = k;
cycle_len++;
k = GetNextCycleElement(k, n, q);
k = GetNextCycleElement(k, n, q);
}
if (k == i)
{
@@ -311,21 +311,21 @@ void Transpose_int(int *a, int n, int m)
/*
// Decision of two dimensional problem generalized distance transform
// on the regular grid at all points
// min{d2(y' - y) + d4(y' - y)(y' - y) +
// min{d2(y' - y) + d4(y' - y)(y' - y) +
min(d1(x' - x) + d3(x' - x)(x' - x) + f(x',y'))} (on x', y')
//
// API
// int DistanceTransformTwoDimensionalProblem(const float *f,
// int DistanceTransformTwoDimensionalProblem(const float *f,
const int n, const int m,
const float coeff[4],
const float coeff[4],
float *distanceTransform,
int *pointsX, int *pointsY);
int *pointsX, int *pointsY);
// INPUT
// f - function on the regular grid
// n - number of rows
// m - number of columns
// coeff - coefficients of optimizable function
coeff[0] = d1, coeff[1] = d2,
coeff[0] = d1, coeff[1] = d2,
coeff[2] = d3, coeff[3] = d4
// OUTPUT
// distanceTransform - values of generalized distance transform
@@ -334,9 +334,9 @@ void Transpose_int(int *a, int n, int m)
// RESULT
// Error status
*/
int DistanceTransformTwoDimensionalProblem(const float *f,
int DistanceTransformTwoDimensionalProblem(const float *f,
const int n, const int m,
const float coeff[4],
const float coeff[4],
float *distanceTransform,
int *pointsX, int *pointsY)
{
@@ -349,10 +349,10 @@ int DistanceTransformTwoDimensionalProblem(const float *f,
for (i = 0; i < n; i++)
{
resOneDimProblem = DistanceTransformOneDimensionalProblem(
f + i * m, m,
coeff[0], coeff[2],
&internalDistTrans[i * m],
&internalPointsX[i * m]);
f + i * m, m,
coeff[0], coeff[2],
&internalDistTrans[i * m],
&internalPointsX[i * m]);
if (resOneDimProblem != DISTANCE_TRANSFORM_OK)
return DISTANCE_TRANSFORM_ERROR;
}
@@ -360,9 +360,9 @@ int DistanceTransformTwoDimensionalProblem(const float *f,
for (j = 0; j < m; j++)
{
resOneDimProblem = DistanceTransformOneDimensionalProblem(
&internalDistTrans[j * n], n,
coeff[1], coeff[3],
distanceTransform + j * n,
&internalDistTrans[j * n], n,
coeff[1], coeff[3],
distanceTransform + j * n,
pointsY + j * n);
if (resOneDimProblem != DISTANCE_TRANSFORM_OK)
return DISTANCE_TRANSFORM_ERROR;

View File

@@ -30,12 +30,12 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
int height, width, numChannels;
int i, j, kk, c, ii, jj, d;
float * datadx, * datady;
//<2F><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20> <20><><EFBFBD><EFBFBD><EFBFBD>
int ch;
int ch;
//<2F><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
float magnitude, x, y, tx, ty;
IplImage * dx, * dy;
int *nearest;
float *w, a_x, b_x;
@@ -51,7 +51,7 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
// <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
int * alfa;
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
float boundary_x[NUM_SECTOR + 1];
float boundary_y[NUM_SECTOR + 1];
@@ -63,9 +63,9 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
numChannels = image->nChannels;
dx = cvCreateImage(cvSize(image->width, image->height),
dx = cvCreateImage(cvSize(image->width, image->height),
IPL_DEPTH_32F, 3);
dy = cvCreateImage(cvSize(image->width, image->height),
dy = cvCreateImage(cvSize(image->width, image->height),
IPL_DEPTH_32F, 3);
sizeX = width / k;
@@ -77,7 +77,7 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
cvFilter2D(image, dx, &kernel_dx, cvPoint(-1, 0));
cvFilter2D(image, dy, &kernel_dy, cvPoint(0, -1));
float arg_vector;
for(i = 0; i <= NUM_SECTOR; i++)
{
@@ -113,20 +113,20 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
y = ty;
}
}/*for(ch = 1; ch < numChannels; ch++)*/
max = boundary_x[0] * x + boundary_y[0] * y;
maxi = 0;
for (kk = 0; kk < NUM_SECTOR; kk++)
for (kk = 0; kk < NUM_SECTOR; kk++)
{
dotProd = boundary_x[kk] * x + boundary_y[kk] * y;
if (dotProd > max)
if (dotProd > max)
{
max = dotProd;
maxi = kk;
}
else
else
{
if (-dotProd > max)
if (-dotProd > max)
{
max = -dotProd;
maxi = kk + NUM_SECTOR;
@@ -134,14 +134,14 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
}
}
alfa[j * width * 2 + i * 2 ] = maxi % NUM_SECTOR;
alfa[j * width * 2 + i * 2 + 1] = maxi;
alfa[j * width * 2 + i * 2 + 1] = maxi;
}/*for(i = 0; i < width; i++)*/
}/*for(j = 0; j < height; j++)*/
//<2F><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
nearest = (int *)malloc(sizeof(int ) * k);
w = (float*)malloc(sizeof(float) * (k * 2));
for(i = 0; i < k / 2; i++)
{
nearest[i] = -1;
@@ -155,15 +155,15 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
{
b_x = k / 2 + j + 0.5f;
a_x = k / 2 - j - 0.5f;
w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
}/*for(j = 0; j < k / 2; j++)*/
for(j = k / 2; j < k; j++)
{
a_x = j - k / 2 + 0.5f;
b_x =-j + k / 2 - 0.5f + k;
w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
}/*for(j = k / 2; j < k; j++)*/
@@ -176,40 +176,40 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
{
for(jj = 0; jj < k; jj++)
{
if ((i * k + ii > 0) &&
(i * k + ii < height - 1) &&
(j * k + jj > 0) &&
if ((i * k + ii > 0) &&
(i * k + ii < height - 1) &&
(j * k + jj > 0) &&
(j * k + jj < width - 1))
{
d = (k * i + ii) * width + (j * k + jj);
(*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 ]] +=
(*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 ]] +=
r[d] * w[ii * 2] * w[jj * 2];
(*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
(*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
r[d] * w[ii * 2] * w[jj * 2];
if ((i + nearest[ii] >= 0) &&
if ((i + nearest[ii] >= 0) &&
(i + nearest[ii] <= sizeY - 1))
{
(*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 ] ] +=
(*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 ] ] +=
r[d] * w[ii * 2 + 1] * w[jj * 2 ];
(*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
(*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
r[d] * w[ii * 2 + 1] * w[jj * 2 ];
}
if ((j + nearest[jj] >= 0) &&
if ((j + nearest[jj] >= 0) &&
(j + nearest[jj] <= sizeX - 1))
{
(*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=
(*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=
r[d] * w[ii * 2] * w[jj * 2 + 1];
(*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
(*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
r[d] * w[ii * 2] * w[jj * 2 + 1];
}
if ((i + nearest[ii] >= 0) &&
(i + nearest[ii] <= sizeY - 1) &&
(j + nearest[jj] >= 0) &&
if ((i + nearest[ii] >= 0) &&
(i + nearest[ii] <= sizeY - 1) &&
(j + nearest[jj] >= 0) &&
(j + nearest[jj] <= sizeX - 1))
{
(*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=
(*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=
r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
(*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
(*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
}
}
@@ -217,14 +217,14 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
}/*for(ii = 0; ii < k; ii++)*/
}/*for(j = 1; j < sizeX - 1; j++)*/
}/*for(i = 1; i < sizeY - 1; i++)*/
cvReleaseImage(&dx);
cvReleaseImage(&dy);
free(w);
free(nearest);
free(r);
free(alfa);
@@ -232,7 +232,7 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
}
/*
// Feature map Normalization and Truncation
// Feature map Normalization and Truncation
//
// API
// int normalizeAndTruncate(featureMap *map, const float alfa);
@@ -270,7 +270,7 @@ int normalizeAndTruncate(CvLSVMFeatureMap *map, const float alfa)
}/*for(j = 0; j < p; j++)*/
partOfNorm[i] = valOfNorm;
}/*for(i = 0; i < sizeX * sizeY; i++)*/
sizeX -= 2;
sizeY -= 2;
@@ -369,13 +369,13 @@ int normalizeAndTruncate(CvLSVMFeatureMap *map, const float alfa)
// Error status
*/
int PCAFeatureMaps(CvLSVMFeatureMap *map)
{
{
int i,j, ii, jj, k;
int sizeX, sizeY, p, pp, xp, yp, pos1, pos2;
float * newData;
float val;
float nx, ny;
sizeX = map->sizeX;
sizeY = map->sizeY;
p = map->numFeatures;
@@ -424,7 +424,7 @@ int PCAFeatureMaps(CvLSVMFeatureMap *map)
}/*for(jj = 0; jj < xp; jj++)*/
newData[pos2 + k] = val * nx;
k++;
} /*for(ii = 0; ii < yp; ii++)*/
} /*for(ii = 0; ii < yp; ii++)*/
}/*for(j = 0; j < sizeX; j++)*/
}/*for(i = 0; i < sizeY; i++)*/
//swop data
@@ -439,7 +439,7 @@ int PCAFeatureMaps(CvLSVMFeatureMap *map)
}
int getPathOfFeaturePyramid(IplImage * image,
static int getPathOfFeaturePyramid(IplImage * image,
float step, int numStep, int startIndex,
int sideLength, CvLSVMFeaturePyramid **maps)
{
@@ -447,7 +447,7 @@ int getPathOfFeaturePyramid(IplImage * image,
IplImage *scaleTmp;
float scale;
int i, err;
for(i = 0; i < numStep; i++)
{
scale = 1.0f / powf(step, (float)i);
@@ -462,13 +462,13 @@ int getPathOfFeaturePyramid(IplImage * image,
}
/*
// Getting feature pyramid
// Getting feature pyramid
//
// API
// int getFeaturePyramid(IplImage * image, const filterObject **all_F,
// int getFeaturePyramid(IplImage * image, const filterObject **all_F,
const int n_f,
const int lambda, const int k,
const int startX, const int startY,
const int lambda, const int k,
const int startX, const int startY,
const int W, const int H, featurePyramid **maps);
// INPUT
// image - image
@@ -484,7 +484,7 @@ int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps)
int numStep;
int maxNumCells;
int W, H;
if(image->depth == IPL_DEPTH_32F)
{
imgResize = image;
@@ -493,9 +493,9 @@ int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps)
{
imgResize = cvCreateImage(cvSize(image->width , image->height) ,
IPL_DEPTH_32F , 3);
cvConvert(image, imgResize);
cvConvert(image, imgResize);
}
W = imgResize->width;
H = imgResize->height;
@@ -506,14 +506,14 @@ int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps)
maxNumCells = H / SIDE_LENGTH;
}
numStep = (int)(logf((float) maxNumCells / (5.0f)) / logf( step )) + 1;
allocFeaturePyramidObject(maps, numStep + LAMBDA);
getPathOfFeaturePyramid(imgResize, step , LAMBDA, 0,
getPathOfFeaturePyramid(imgResize, step , LAMBDA, 0,
SIDE_LENGTH / 2, maps);
getPathOfFeaturePyramid(imgResize, step, numStep, LAMBDA,
getPathOfFeaturePyramid(imgResize, step, numStep, LAMBDA,
SIDE_LENGTH , maps);
if(image->depth != IPL_DEPTH_32F)
{
cvReleaseImage(&imgResize);

View File

@@ -1,14 +1,14 @@
#include "precomp.hpp"
#include "_lsvm_fft.h"
int getEntireRes(int number, int divisor, int *entire, int *res)
{
*entire = number / divisor;
*res = number % divisor;
return FFT_OK;
}
// static int getEntireRes(int number, int divisor, int *entire, int *res)
// {
// *entire = number / divisor;
// *res = number % divisor;
// return FFT_OK;
// }
int getMultipliers(int n, int *n1, int *n2)
static int getMultipliers(int n, int *n1, int *n2)
{
int multiplier, i;
if (n == 1)
@@ -36,13 +36,13 @@ int getMultipliers(int n, int *n1, int *n2)
// 1-dimensional FFT
//
// API
// int fft(float *x_in, float *x_out, int n, int shift);
// int fft(float *x_in, float *x_out, int n, int shift);
// INPUT
// x_in - input signal
// n - number of elements for searching Fourier image
// shift - shift between input elements
// OUTPUT
// x_out - output signal (contains 2n elements in order
// x_out - output signal (contains 2n elements in order
Re(x_in[0]), Im(x_in[0]), Re(x_in[1]), Im(x_in[1]) and etc.)
// RESULT
// Error status
@@ -107,8 +107,8 @@ int fft(float *x_in, float *x_out, int n, int shift)
// API
// int fftInverse(float *x_in, float *x_out, int n, int shift);
// INPUT
// x_in - Fourier image of 1d input signal(contains 2n elements
in order Re(x_in[0]), Im(x_in[0]),
// x_in - Fourier image of 1d input signal(contains 2n elements
in order Re(x_in[0]), Im(x_in[0]),
Re(x_in[1]), Im(x_in[1]) and etc.)
// n - number of elements for searching counter FFT image
// shift - shift between input elements
@@ -180,7 +180,7 @@ int fftInverse(float *x_in, float *x_out, int n, int shift)
// numColls - number of collumns
// OUTPUT
// x_out - output signal (contains (2 * numRows * numColls) elements
in order Re(x_in[0][0]), Im(x_in[0][0]),
in order Re(x_in[0][0]), Im(x_in[0][0]),
Re(x_in[0][1]), Im(x_in[0][1]) and etc.)
// RESULT
// Error status
@@ -193,14 +193,14 @@ int fft2d(float *x_in, float *x_out, int numRows, int numColls)
x_outTmp = (float *)malloc(sizeof(float) * (2 * size));
for (i = 0; i < numRows; i++)
{
fft(x_in + i * 2 * numColls,
fft(x_in + i * 2 * numColls,
x_outTmp + i * 2 * numColls,
numColls, 2);
}
for (i = 0; i < numColls; i++)
{
fft(x_outTmp + 2 * i,
x_out + 2 * i,
fft(x_outTmp + 2 * i,
x_out + 2 * i,
numRows, 2 * numColls);
}
free(x_outTmp);
@@ -213,8 +213,8 @@ int fft2d(float *x_in, float *x_out, int numRows, int numColls)
// API
// int fftInverse2d(float *x_in, float *x_out, int numRows, int numColls);
// INPUT
// x_in - Fourier image of matrix (contains (2 * numRows * numColls)
elements in order Re(x_in[0][0]), Im(x_in[0][0]),
// x_in - Fourier image of matrix (contains (2 * numRows * numColls)
elements in order Re(x_in[0][0]), Im(x_in[0][0]),
Re(x_in[0][1]), Im(x_in[0][1]) and etc.)
// numRows - number of rows
// numColls - number of collumns
@@ -237,8 +237,8 @@ int fftInverse2d(float *x_in, float *x_out, int numRows, int numColls)
}
for (i = 0; i < numColls; i++)
{
fftInverse(x_outTmp + 2 * i,
x_out + 2 * i,
fftInverse(x_outTmp + 2 * i,
x_out + 2 * i,
numRows, 2 * numColls);
}
free(x_outTmp);

View File

@@ -653,7 +653,7 @@ double icvEvalHidHaarClassifier( CvHidHaarClassifier* classifier,
}
CV_IMPL int
static int
cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
CvPoint pt, double& stage_sum, int start_stage )
{
@@ -759,7 +759,7 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
sum += calc_sum(node->feature.rect[1],p_offset) * node->feature.rect[1].weight;
if( node->feature.rect[2].p0 )
sum += calc_sum(node->feature.rect[2],p_offset) * node->feature.rect[2].weight;
stage_sum += classifier->alpha[sum >= t];
#else
// ayasin - NHM perf optim. Avoid use of costly flaky jcc
@@ -771,7 +771,7 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
if( node->feature.rect[2].p0 )
_sum += calc_sum(node->feature.rect[2],p_offset) * node->feature.rect[2].weight;
__m128d sum = _mm_set_sd(_sum);
t = _mm_cmpgt_sd(t, sum);
stage_sum = _mm_add_sd(stage_sum, _mm_blendv_pd(b, a, t));
#endif
@@ -823,7 +823,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
HaarDetectObjects_ScaleImage_Invoker( const CvHaarClassifierCascade* _cascade,
int _stripSize, double _factor,
const Mat& _sum1, const Mat& _sqsum1, Mat* _norm1,
Mat* _mask1, Rect _equRect, ConcurrentRectVector& _vec,
Mat* _mask1, Rect _equRect, ConcurrentRectVector& _vec,
std::vector<int>& _levels, std::vector<double>& _weights,
bool _outputLevels )
{
@@ -839,19 +839,19 @@ struct HaarDetectObjects_ScaleImage_Invoker
rejectLevels = _outputLevels ? &_levels : 0;
levelWeights = _outputLevels ? &_weights : 0;
}
void operator()( const BlockedRange& range ) const
{
Size winSize0 = cascade->orig_window_size;
Size winSize(cvRound(winSize0.width*factor), cvRound(winSize0.height*factor));
int y1 = range.begin()*stripSize, y2 = min(range.end()*stripSize, sum1.rows - 1 - winSize0.height);
if (y2 <= y1 || sum1.cols <= 1 + winSize0.width)
return;
Size ssz(sum1.cols - 1 - winSize0.width, y2 - y1);
int x, y, ystep = factor > 2 ? 1 : 2;
#ifdef HAVE_IPP
if( cascade->hid_cascade->ipp_stages )
{
@@ -860,7 +860,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
sqsum1.ptr<double>(y1), sqsum1.step,
norm1->ptr<float>(y1), norm1->step,
ippiSize(ssz.width, ssz.height), iequRect );
int positive = (ssz.width/ystep)*((ssz.height + ystep-1)/ystep);
if( ystep == 1 )
@@ -870,12 +870,12 @@ struct HaarDetectObjects_ScaleImage_Invoker
{
uchar* mask1row = mask1->ptr(y);
memset( mask1row, 0, ssz.width );
if( y % ystep == 0 )
for( x = 0; x < ssz.width; x += ystep )
mask1row[x] = (uchar)1;
}
for( int j = 0; j < cascade->count; j++ )
{
if( ippiApplyHaarClassifier_32f_C1R(
@@ -889,7 +889,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
if( positive <= 0 )
break;
}
if( positive > 0 )
for( y = y1; y < y2; y += ystep )
{
@@ -929,11 +929,11 @@ struct HaarDetectObjects_ScaleImage_Invoker
{
if( result > 0 )
vec->push_back(Rect(cvRound(x*factor), cvRound(y*factor),
winSize.width, winSize.height));
winSize.width, winSize.height));
}
}
}
const CvHaarClassifierCascade* cascade;
int stripSize;
double factor;
@@ -943,7 +943,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
std::vector<int>* rejectLevels;
std::vector<double>* levelWeights;
};
struct HaarDetectObjects_ScaleCascade_Invoker
{
@@ -960,7 +960,7 @@ struct HaarDetectObjects_ScaleCascade_Invoker
p = _p; pq = _pq;
vec = &_vec;
}
void operator()( const BlockedRange& range ) const
{
int iy, startY = range.begin(), endY = range.end();
@@ -968,14 +968,14 @@ struct HaarDetectObjects_ScaleCascade_Invoker
const int *pq0 = pq[0], *pq1 = pq[1], *pq2 = pq[2], *pq3 = pq[3];
bool doCannyPruning = p0 != 0;
int sstep = (int)(sumstep/sizeof(p0[0]));
for( iy = startY; iy < endY; iy++ )
{
int ix, y = cvRound(iy*ystep), ixstep = 1;
for( ix = xrange.start; ix < xrange.end; ix += ixstep )
{
int x = cvRound(ix*ystep); // it should really be ystep, not ixstep
if( doCannyPruning )
{
int offset = y*sstep + x;
@@ -987,7 +987,7 @@ struct HaarDetectObjects_ScaleCascade_Invoker
continue;
}
}
int result = cvRunHaarClassifierCascade( cascade, cvPoint(x, y), 0 );
if( result > 0 )
vec->push_back(Rect(x, y, winsize.width, winsize.height));
@@ -995,7 +995,7 @@ struct HaarDetectObjects_ScaleCascade_Invoker
}
}
}
const CvHaarClassifierCascade* cascade;
double ystep;
size_t sumstep;
@@ -1005,16 +1005,16 @@ struct HaarDetectObjects_ScaleCascade_Invoker
const int** pq;
ConcurrentRectVector* vec;
};
}
CvSeq*
cvHaarDetectObjectsForROC( const CvArr* _img,
cvHaarDetectObjectsForROC( const CvArr* _img,
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
std::vector<int>& rejectLevels, std::vector<double>& levelWeights,
double scaleFactor, int minNeighbors, int flags,
double scaleFactor, int minNeighbors, int flags,
CvSize minSize, CvSize maxSize, bool outputRejectLevels )
{
const double GROUP_EPS = 0.2;
@@ -1044,13 +1044,13 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
if( CV_MAT_DEPTH(img->type) != CV_8U )
CV_Error( CV_StsUnsupportedFormat, "Only 8-bit images are supported" );
if( scaleFactor <= 1 )
CV_Error( CV_StsOutOfRange, "scale factor must be > 1" );
if( findBiggestObject )
flags &= ~CV_HAAR_SCALE_IMAGE;
if( maxSize.height == 0 || maxSize.width == 0 )
{
maxSize.height = img->rows;
@@ -1132,7 +1132,7 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
#else
const int stripCount = 1;
#endif
#ifdef HAVE_IPP
if( use_ipp )
{
@@ -1141,8 +1141,8 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
}
else
#endif
cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );
cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );
cv::Mat _norm1(&norm1), _mask1(&mask1);
cv::parallel_for(cv::BlockedRange(0, stripCount),
cv::HaarDetectObjects_ScaleImage_Invoker(cascade,
@@ -1242,22 +1242,22 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
{
rectList.resize(allCandidates.size());
std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());
groupRectangles(rectList, std::max(minNeighbors, 1), GROUP_EPS);
if( !rectList.empty() )
{
size_t i, sz = rectList.size();
cv::Rect maxRect;
for( i = 0; i < sz; i++ )
{
if( rectList[i].area() > maxRect.area() )
maxRect = rectList[i];
}
allCandidates.push_back(maxRect);
scanROI = maxRect;
int dx = cvRound(maxRect.width*GROUP_EPS);
int dy = cvRound(maxRect.height*GROUP_EPS);
@@ -1265,7 +1265,7 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
scanROI.y = std::max(scanROI.y - dy, 0);
scanROI.width = std::min(scanROI.width + dx*2, img->cols-1-scanROI.x);
scanROI.height = std::min(scanROI.height + dy*2, img->rows-1-scanROI.y);
double minScale = roughSearch ? 0.6 : 0.4;
minSize.width = cvRound(maxRect.width*minScale);
minSize.height = cvRound(maxRect.height*minScale);
@@ -1277,7 +1277,7 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
rectList.resize(allCandidates.size());
if(!allCandidates.empty())
std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());
if( minNeighbors != 0 || findBiggestObject )
{
if( outputRejectLevels )
@@ -1291,11 +1291,11 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
}
else
rweights.resize(rectList.size(),0);
if( findBiggestObject && rectList.size() )
{
CvAvgComp result_comp = {{0,0,0,0},0};
for( size_t i = 0; i < rectList.size(); i++ )
{
cv::Rect r = rectList[i];
@@ -1322,14 +1322,14 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
}
CV_IMPL CvSeq*
cvHaarDetectObjects( const CvArr* _img,
cvHaarDetectObjects( const CvArr* _img,
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
double scaleFactor,
int minNeighbors, int flags, CvSize minSize, CvSize maxSize )
{
std::vector<int> fakeLevels;
std::vector<double> fakeWeights;
return cvHaarDetectObjectsForROC( _img, cascade, storage, fakeLevels, fakeWeights,
return cvHaarDetectObjectsForROC( _img, cascade, storage, fakeLevels, fakeWeights,
scaleFactor, minNeighbors, flags, minSize, maxSize, false );
}
@@ -2091,7 +2091,7 @@ namespace cv
HaarClassifierCascade::HaarClassifierCascade() {}
HaarClassifierCascade::HaarClassifierCascade(const String& filename)
{ load(filename); }
bool HaarClassifierCascade::load(const String& filename)
{
cascade = Ptr<CvHaarClassifierCascade>((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));

View File

@@ -3,11 +3,11 @@
#include "_lsvm_matching.h"
/*
// Transformation filter displacement from the block space
// Transformation filter displacement from the block space
// to the space of pixels at the initial image
//
// API
// int convertPoints(int countLevel, CvPoint *points, int *levels,
// int convertPoints(int countLevel, CvPoint *points, int *levels,
CvPoint **partsDisplacement, int kPoints, int n);
// INPUT
// countLevel - the number of levels in the feature pyramid
@@ -25,10 +25,10 @@
// RESULT
// Error status
*/
int convertPoints(int /*countLevel*/, int lambda,
int convertPoints(int /*countLevel*/, int lambda,
int initialImageLevel,
CvPoint *points, int *levels,
CvPoint **partsDisplacement, int kPoints, int n,
CvPoint *points, int *levels,
CvPoint **partsDisplacement, int kPoints, int n,
int maxXBorder,
int maxYBorder)
{
@@ -37,7 +37,7 @@ int convertPoints(int /*countLevel*/, int lambda,
step = powf( 2.0f, 1.0f / ((float)lambda) );
computeBorderSize(maxXBorder, maxYBorder, &bx, &by);
for (i = 0; i < kPoints; i++)
{
// scaling factor for root filter
@@ -48,10 +48,10 @@ int convertPoints(int /*countLevel*/, int lambda,
// scaling factor for part filters
scale = SIDE_LENGTH * powf(step, (float)(levels[i] - lambda - initialImageLevel));
for (j = 0; j < n; j++)
{
partsDisplacement[i][j].x = (int)((partsDisplacement[i][j].x -
{
partsDisplacement[i][j].x = (int)((partsDisplacement[i][j].x -
2 * bx + 1) * scale);
partsDisplacement[i][j].y = (int)((partsDisplacement[i][j].y -
partsDisplacement[i][j].y = (int)((partsDisplacement[i][j].y -
2 * by + 1) * scale);
}
}
@@ -62,7 +62,7 @@ int convertPoints(int /*countLevel*/, int lambda,
// Elimination boxes that are outside the image boudaries
//
// API
// int clippingBoxes(int width, int height,
// int clippingBoxes(int width, int height,
CvPoint *points, int kPoints);
// INPUT
// width - image wediht
@@ -72,12 +72,12 @@ int convertPoints(int /*countLevel*/, int lambda,
// kPoints - points number
// OUTPUT
// points - updated points (if coordinates less than zero then
set zero coordinate, if coordinates more than image
set zero coordinate, if coordinates more than image
size then set coordinates equal image size)
// RESULT
// Error status
*/
int clippingBoxes(int width, int height,
int clippingBoxes(int width, int height,
CvPoint *points, int kPoints)
{
int i;
@@ -111,7 +111,7 @@ int clippingBoxes(int width, int height,
int maxXBorder, int maxYBorder);
// INPUT
// image - initial image
// image - initial image
// maxXBorder - the largest root filter size (X-direction)
// maxYBorder - the largest root filter size (Y-direction)
// OUTPUT
@@ -149,54 +149,54 @@ CvLSVMFeaturePyramid* createFeaturePyramidWithBorder(IplImage *image,
// Computation of the root filter displacement and values of score function
//
// API
// int searchObject(const featurePyramid *H, const filterObject **all_F, int n,
float b,
// int searchObject(const featurePyramid *H, const filterObject **all_F, int n,
float b,
int maxXBorder,
int maxYBorder,
int maxYBorder,
CvPoint **points, int **levels, int *kPoints, float *score,
CvPoint ***partsDisplacement);
// INPUT
// image - initial image for searhing object
// all_F - the set of filters (the first element is root filter,
// all_F - the set of filters (the first element is root filter,
other elements - part filters)
// n - the number of part filters
// b - linear term of the score function
// maxXBorder - the largest root filter size (X-direction)
// maxYBorder - the largest root filter size (Y-direction)
// OUTPUT
// points - positions (x, y) of the upper-left corner
// points - positions (x, y) of the upper-left corner
of root filter frame
// levels - levels that correspond to each position
// kPoints - number of positions
// score - value of the score function
// partsDisplacement - part filters displacement for each position
// partsDisplacement - part filters displacement for each position
of the root filter
// RESULT
// Error status
*/
int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,
int n, float b,
int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,
int n, float b,
int maxXBorder,
int maxYBorder,
int maxYBorder,
CvPoint **points, int **levels, int *kPoints, float *score,
CvPoint ***partsDisplacement)
{
int opResult;
// Matching
opResult = maxFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder,
score, points, levels,
opResult = maxFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder,
score, points, levels,
kPoints, partsDisplacement);
if (opResult != LATENT_SVM_OK)
{
return LATENT_SVM_SEARCH_OBJECT_FAILED;
}
// Transformation filter displacement from the block space
// Transformation filter displacement from the block space
// to the space of pixels at the initial image
// that settles at the level number LAMBDA
convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),
(*levels), (*partsDisplacement), (*kPoints), n,
convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),
(*levels), (*partsDisplacement), (*kPoints), n,
maxXBorder, maxYBorder);
return LATENT_SVM_OK;
@@ -206,7 +206,7 @@ int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F
// Computation right bottom corners coordinates of bounding boxes
//
// API
// int estimateBoxes(CvPoint *points, int *levels, int kPoints,
// int estimateBoxes(CvPoint *points, int *levels, int kPoints,
int sizeX, int sizeY, CvPoint **oppositePoints);
// INPUT
// points - left top corners coordinates of bounding boxes
@@ -217,7 +217,7 @@ int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F
// RESULT
// Error status
*/
int estimateBoxes(CvPoint *points, int *levels, int kPoints,
static int estimateBoxes(CvPoint *points, int *levels, int kPoints,
int sizeX, int sizeY, CvPoint **oppositePoints)
{
int i;
@@ -237,16 +237,16 @@ int estimateBoxes(CvPoint *points, int *levels, int kPoints,
// Computation of the root filter displacement and values of score function
//
// API
// int searchObjectThreshold(const featurePyramid *H,
// int searchObjectThreshold(const featurePyramid *H,
const filterObject **all_F, int n,
float b,
int maxXBorder, int maxYBorder,
float b,
int maxXBorder, int maxYBorder,
float scoreThreshold,
CvPoint **points, int **levels, int *kPoints,
CvPoint **points, int **levels, int *kPoints,
float **score, CvPoint ***partsDisplacement);
// INPUT
// H - feature pyramid
// all_F - the set of filters (the first element is root filter,
// all_F - the set of filters (the first element is root filter,
other elements - part filters)
// n - the number of part filters
// b - linear term of the score function
@@ -254,22 +254,22 @@ int estimateBoxes(CvPoint *points, int *levels, int kPoints,
// maxYBorder - the largest root filter size (Y-direction)
// scoreThreshold - score threshold
// OUTPUT
// points - positions (x, y) of the upper-left corner
// points - positions (x, y) of the upper-left corner
of root filter frame
// levels - levels that correspond to each position
// kPoints - number of positions
// score - values of the score function
// partsDisplacement - part filters displacement for each position
// partsDisplacement - part filters displacement for each position
of the root filter
// RESULT
// Error status
*/
int searchObjectThreshold(const CvLSVMFeaturePyramid *H,
int searchObjectThreshold(const CvLSVMFeaturePyramid *H,
const CvLSVMFilterObject **all_F, int n,
float b,
int maxXBorder, int maxYBorder,
float b,
int maxXBorder, int maxYBorder,
float scoreThreshold,
CvPoint **points, int **levels, int *kPoints,
CvPoint **points, int **levels, int *kPoints,
float **score, CvPoint ***partsDisplacement,
int numThreads)
{
@@ -284,28 +284,28 @@ int searchObjectThreshold(const CvLSVMFeaturePyramid *H,
return opResult;
}
opResult = tbbThresholdFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder,
scoreThreshold, numThreads, score,
points, levels, kPoints,
scoreThreshold, numThreads, score,
points, levels, kPoints,
partsDisplacement);
#else
opResult = thresholdFunctionalScore(all_F, n, H, b,
maxXBorder, maxYBorder,
scoreThreshold,
score, points, levels,
opResult = thresholdFunctionalScore(all_F, n, H, b,
maxXBorder, maxYBorder,
scoreThreshold,
score, points, levels,
kPoints, partsDisplacement);
(void)numThreads;
(void)numThreads;
#endif
if (opResult != LATENT_SVM_OK)
{
return LATENT_SVM_SEARCH_OBJECT_FAILED;
}
// Transformation filter displacement from the block space
}
// Transformation filter displacement from the block space
// to the space of pixels at the initial image
// that settles at the level number LAMBDA
convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),
(*levels), (*partsDisplacement), (*kPoints), n,
convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),
(*levels), (*partsDisplacement), (*kPoints), n,
maxXBorder, maxYBorder);
return LATENT_SVM_OK;
@@ -350,9 +350,9 @@ int getOppositePoint(CvPoint point,
//
// API
// int showRootFilterBoxes(const IplImage *image,
const filterObject *filter,
const filterObject *filter,
CvPoint *points, int *levels, int kPoints,
CvScalar color, int thickness,
CvScalar color, int thickness,
int line_type, int shift);
// INPUT
// image - initial image
@@ -370,22 +370,22 @@ int getOppositePoint(CvPoint point,
// Error status
*/
int showRootFilterBoxes(IplImage *image,
const CvLSVMFilterObject *filter,
const CvLSVMFilterObject *filter,
CvPoint *points, int *levels, int kPoints,
CvScalar color, int thickness,
CvScalar color, int thickness,
int line_type, int shift)
{
{
int i;
float step;
CvPoint oppositePoint;
step = powf( 2.0f, 1.0f / ((float)LAMBDA));
for (i = 0; i < kPoints; i++)
{
// Drawing rectangle for filter
getOppositePoint(points[i], filter->sizeX, filter->sizeY,
getOppositePoint(points[i], filter->sizeX, filter->sizeY,
step, levels[i] - LAMBDA, &oppositePoint);
cvRectangle(image, points[i], oppositePoint,
cvRectangle(image, points[i], oppositePoint,
color, thickness, line_type, shift);
}
#ifdef HAVE_OPENCV_HIGHGUI
@@ -399,9 +399,9 @@ int showRootFilterBoxes(IplImage *image,
//
// API
// int showPartFilterBoxes(const IplImage *image,
const filterObject *filter,
const filterObject *filter,
CvPoint *points, int *levels, int kPoints,
CvScalar color, int thickness,
CvScalar color, int thickness,
int line_type, int shift);
// INPUT
// image - initial image
@@ -421,9 +421,9 @@ int showRootFilterBoxes(IplImage *image,
*/
int showPartFilterBoxes(IplImage *image,
const CvLSVMFilterObject **filters,
int n, CvPoint **partsDisplacement,
int n, CvPoint **partsDisplacement,
int *levels, int kPoints,
CvScalar color, int thickness,
CvScalar color, int thickness,
int line_type, int shift)
{
int i, j;
@@ -437,10 +437,10 @@ int showPartFilterBoxes(IplImage *image,
for (j = 0; j < n; j++)
{
// Drawing rectangles for part filters
getOppositePoint(partsDisplacement[i][j],
filters[j + 1]->sizeX, filters[j + 1]->sizeY,
getOppositePoint(partsDisplacement[i][j],
filters[j + 1]->sizeX, filters[j + 1]->sizeY,
step, levels[i] - 2 * LAMBDA, &oppositePoint);
cvRectangle(image, partsDisplacement[i][j], oppositePoint,
cvRectangle(image, partsDisplacement[i][j], oppositePoint,
color, thickness, line_type, shift);
}
}
@@ -454,8 +454,8 @@ int showPartFilterBoxes(IplImage *image,
// Drawing boxes
//
// API
// int showBoxes(const IplImage *img,
const CvPoint *points, const CvPoint *oppositePoints, int kPoints,
// int showBoxes(const IplImage *img,
const CvPoint *points, const CvPoint *oppositePoints, int kPoints,
CvScalar color, int thickness, int line_type, int shift);
// INPUT
// img - initial image
@@ -470,14 +470,14 @@ int showPartFilterBoxes(IplImage *image,
// RESULT
// Error status
*/
int showBoxes(IplImage *img,
const CvPoint *points, const CvPoint *oppositePoints, int kPoints,
int showBoxes(IplImage *img,
const CvPoint *points, const CvPoint *oppositePoints, int kPoints,
CvScalar color, int thickness, int line_type, int shift)
{
int i;
for (i = 0; i < kPoints; i++)
{
cvRectangle(img, points[i], oppositePoints[i],
cvRectangle(img, points[i], oppositePoints[i],
color, thickness, line_type, shift);
}
#ifdef HAVE_OPENCV_HIGHGUI
@@ -491,10 +491,10 @@ int showBoxes(IplImage *img,
//
// API
// int getMaxFilterDims(const filterObject **filters, int kComponents,
const int *kPartFilters,
const int *kPartFilters,
unsigned int *maxXBorder, unsigned int *maxYBorder);
// INPUT
// filters - a set of filters (at first root filter, then part filters
// filters - a set of filters (at first root filter, then part filters
and etc. for all components)
// kComponents - number of components
// kPartFilters - number of part filters for each component
@@ -505,10 +505,10 @@ int showBoxes(IplImage *img,
// Error status
*/
int getMaxFilterDims(const CvLSVMFilterObject **filters, int kComponents,
const int *kPartFilters,
const int *kPartFilters,
unsigned int *maxXBorder, unsigned int *maxYBorder)
{
int i, componentIndex;
int i, componentIndex;
*maxXBorder = filters[0]->sizeX;
*maxYBorder = filters[0]->sizeY;
componentIndex = kPartFilters[0] + 1;
@@ -532,7 +532,7 @@ int getMaxFilterDims(const CvLSVMFilterObject **filters, int kComponents,
//
// API
// int searchObjectThresholdSomeComponents(const featurePyramid *H,
const filterObject **filters,
const filterObject **filters,
int kComponents, const int *kPartFilters,
const float *b, float scoreThreshold,
CvPoint **points, CvPoint **oppPoints,
@@ -553,7 +553,7 @@ int getMaxFilterDims(const CvLSVMFilterObject **filters, int kComponents,
// Error status
*/
int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
const CvLSVMFilterObject **filters,
const CvLSVMFilterObject **filters,
int kComponents, const int *kPartFilters,
const float *b, float scoreThreshold,
CvPoint **points, CvPoint **oppPoints,
@@ -566,7 +566,7 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
CvPoint **pointsArr, **oppPointsArr, ***partsDisplacementArr;
float **scoreArr;
int *kPointsArr, **levelsArr;
// Allocation memory
pointsArr = (CvPoint **)malloc(sizeof(CvPoint *) * kComponents);
oppPointsArr = (CvPoint **)malloc(sizeof(CvPoint *) * kComponents);
@@ -574,7 +574,7 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
kPointsArr = (int *)malloc(sizeof(int) * kComponents);
levelsArr = (int **)malloc(sizeof(int *) * kComponents);
partsDisplacementArr = (CvPoint ***)malloc(sizeof(CvPoint **) * kComponents);
// Getting maximum filter dimensions
error = getMaxFilterDims(filters, kComponents, kPartFilters, &maxXBorder, &maxYBorder);
componentIndex = 0;
@@ -585,7 +585,7 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
#ifdef HAVE_TBB
error = searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i],
b[i], maxXBorder, maxYBorder, scoreThreshold,
&(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),
&(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),
&(scoreArr[i]), &(partsDisplacementArr[i]), numThreads);
if (error != LATENT_SVM_OK)
{
@@ -599,17 +599,17 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
return LATENT_SVM_SEARCH_OBJECT_FAILED;
}
#else
(void)numThreads;
(void)numThreads;
searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i],
b[i], maxXBorder, maxYBorder, scoreThreshold,
&(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),
b[i], maxXBorder, maxYBorder, scoreThreshold,
&(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),
&(scoreArr[i]), &(partsDisplacementArr[i]));
#endif
estimateBoxes(pointsArr[i], levelsArr[i], kPointsArr[i],
filters[componentIndex]->sizeX, filters[componentIndex]->sizeY, &(oppPointsArr[i]));
estimateBoxes(pointsArr[i], levelsArr[i], kPointsArr[i],
filters[componentIndex]->sizeX, filters[componentIndex]->sizeY, &(oppPointsArr[i]));
componentIndex += (kPartFilters[i] + 1);
*kPoints += kPointsArr[i];
}
}
*points = (CvPoint *)malloc(sizeof(CvPoint) * (*kPoints));
*oppPoints = (CvPoint *)malloc(sizeof(CvPoint) * (*kPoints));

View File

@@ -192,7 +192,7 @@ size_t LatentSvmDetector::getClassCount() const
return classNames.size();
}
string extractModelName( const string& filename )
static string extractModelName( const string& filename )
{
size_t startPos = filename.rfind('/');
if( startPos == string::npos )

View File

@@ -91,7 +91,7 @@ void Feature::write(FileStorage& fs) const
*
* \return The bounding box of all the templates in original image coordinates.
*/
Rect cropTemplates(std::vector<Template>& templates)
static Rect cropTemplates(std::vector<Template>& templates)
{
int min_x = std::numeric_limits<int>::max();
int min_y = std::numeric_limits<int>::max();
@@ -113,7 +113,7 @@ Rect cropTemplates(std::vector<Template>& templates)
max_y = std::max(max_y, y);
}
}
/// @todo Why require even min_x, min_y?
if (min_x % 2 == 1) --min_x;
if (min_y % 2 == 1) --min_y;
@@ -126,7 +126,7 @@ Rect cropTemplates(std::vector<Template>& templates)
templ.height = (max_y - min_y) >> templ.pyramid_level;
int offset_x = min_x >> templ.pyramid_level;
int offset_y = min_y >> templ.pyramid_level;
for (int j = 0; j < (int)templ.features.size(); ++j)
{
templ.features[j].x -= offset_x;
@@ -265,7 +265,7 @@ void hysteresisGradient(Mat& magnitude, Mat& angle,
* \param threshold Magnitude threshold. Keep only gradients whose norms are
* larger than this.
*/
void quantizedOrientations(const Mat& src, Mat& magnitude,
static void quantizedOrientations(const Mat& src, Mat& magnitude,
Mat& angle, float threshold)
{
magnitude.create(src.size(), CV_32F);
@@ -383,7 +383,7 @@ void hysteresisGradient(Mat& magnitude, Mat& quantized_angle,
{
if (mag_r[c] > threshold)
{
// Compute histogram of quantized bins in 3x3 patch around pixel
// Compute histogram of quantized bins in 3x3 patch around pixel
int histogram[8] = {0, 0, 0, 0, 0, 0, 0, 0};
uchar* patch3x3_row = &quantized_unfiltered(r-1, c-1);
@@ -391,17 +391,17 @@ void hysteresisGradient(Mat& magnitude, Mat& quantized_angle,
histogram[patch3x3_row[1]]++;
histogram[patch3x3_row[2]]++;
patch3x3_row += quantized_unfiltered.step1();
patch3x3_row += quantized_unfiltered.step1();
histogram[patch3x3_row[0]]++;
histogram[patch3x3_row[1]]++;
histogram[patch3x3_row[2]]++;
patch3x3_row += quantized_unfiltered.step1();
patch3x3_row += quantized_unfiltered.step1();
histogram[patch3x3_row[0]]++;
histogram[patch3x3_row[1]]++;
histogram[patch3x3_row[2]]++;
// Find bin with the most votes from the patch
// Find bin with the most votes from the patch
int max_votes = 0;
int index = -1;
for (int i = 0; i < 8; ++i)
@@ -413,8 +413,8 @@ void hysteresisGradient(Mat& magnitude, Mat& quantized_angle,
}
}
// Only accept the quantization if majority of pixels in the patch agree
static const int NEIGHBOR_THRESHOLD = 5;
// Only accept the quantization if majority of pixels in the patch agree
static const int NEIGHBOR_THRESHOLD = 5;
if (max_votes >= NEIGHBOR_THRESHOLD)
quantized_angle.at<uchar>(r, c) = 1 << index;
}
@@ -630,7 +630,7 @@ static void accumBilateral(long delta, long i, long j, long * A, long * b, int t
*
* \todo Should also need camera model, or at least focal lengths? Replace distance_threshold with mask?
*/
void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
static void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
int difference_threshold)
{
dst = Mat::zeros(src.size(), CV_8U);
@@ -923,7 +923,7 @@ void DepthNormal::write(FileStorage& fs) const
* Response maps *
\****************************************************************************************/
void orUnaligned8u(const uchar * src, const int src_stride,
static void orUnaligned8u(const uchar * src, const int src_stride,
uchar * dst, const int dst_stride,
const int width, const int height)
{
@@ -971,7 +971,7 @@ void orUnaligned8u(const uchar * src, const int src_stride,
__m128i* dst_ptr = reinterpret_cast<__m128i*>(dst + c);
*dst_ptr = _mm_or_si128(*dst_ptr, val);
}
}
}
#endif
for ( ; c < width; ++c)
dst[c] |= src[c];
@@ -991,7 +991,7 @@ void orUnaligned8u(const uchar * src, const int src_stride,
* \param[out] dst Destination 8-bit spread image.
* \param T Sampling step. Spread labels T/2 pixels in each direction.
*/
void spread(const Mat& src, Mat& dst, int T)
static void spread(const Mat& src, Mat& dst, int T)
{
// Allocate and zero-initialize spread (OR'ed) image
dst = Mat::zeros(src.size(), CV_8U);
@@ -1019,7 +1019,7 @@ CV_DECL_ALIGNED(16) static const unsigned char SIMILARITY_LUT[256] = {0, 4, 3, 4
* \param[in] src The source 8-bit spread quantized image.
* \param[out] response_maps Vector of 8 response maps, one for each bit label.
*/
void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
static void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
{
CV_Assert((src.rows * src.cols) % 16 == 0);
@@ -1027,16 +1027,16 @@ void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
response_maps.resize(8);
for (int i = 0; i < 8; ++i)
response_maps[i].create(src.size(), CV_8U);
Mat lsb4(src.size(), CV_8U);
Mat msb4(src.size(), CV_8U);
for (int r = 0; r < src.rows; ++r)
{
const uchar* src_r = src.ptr(r);
uchar* lsb4_r = lsb4.ptr(r);
uchar* msb4_r = msb4.ptr(r);
for (int c = 0; c < src.cols; ++c)
{
// Least significant 4 bits of spread image pixel
@@ -1100,7 +1100,7 @@ void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
* each of which is a linear memory of length (W/T)*(H/T).
* \param T Sampling step.
*/
void linearize(const Mat& response_map, Mat& linearized, int T)
static void linearize(const Mat& response_map, Mat& linearized, int T)
{
CV_Assert(response_map.rows % T == 0);
CV_Assert(response_map.cols % T == 0);
@@ -1109,7 +1109,7 @@ void linearize(const Mat& response_map, Mat& linearized, int T)
int mem_width = response_map.cols / T;
int mem_height = response_map.rows / T;
linearized.create(T*T, mem_width * mem_height, CV_8U);
// Outer two for loops iterate over top-left T^2 starting pixels
int index = 0;
for (int r_start = 0; r_start < T; ++r_start)
@@ -1118,7 +1118,7 @@ void linearize(const Mat& response_map, Mat& linearized, int T)
{
uchar* memory = linearized.ptr(index);
++index;
// Inner two loops copy every T-th pixel into the linear memory
for (int r = r_start; r < response_map.rows; r += T)
{
@@ -1134,8 +1134,8 @@ void linearize(const Mat& response_map, Mat& linearized, int T)
* Linearized similarities *
\****************************************************************************************/
const unsigned char* accessLinearMemory(const std::vector<Mat>& linear_memories,
const Feature& f, int T, int W)
static const unsigned char* accessLinearMemory(const std::vector<Mat>& linear_memories,
const Feature& f, int T, int W)
{
// Retrieve the TxT grid of linear memories associated with the feature label
const Mat& memory_grid = linear_memories[f.label];
@@ -1170,7 +1170,7 @@ const unsigned char* accessLinearMemory(const std::vector<Mat>& linear_memories,
* \param size Size (W, H) of the original input image.
* \param T Sampling step.
*/
void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
static void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
Mat& dst, Size size, int T)
{
// 63 features or less is a special case because the max similarity per-feature is 4.
@@ -1266,7 +1266,7 @@ void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
* \param T Sampling step.
* \param center Center of the local region.
*/
void similarityLocal(const std::vector<Mat>& linear_memories, const Template& templ,
static void similarityLocal(const std::vector<Mat>& linear_memories, const Template& templ,
Mat& dst, Size size, int T, Point center)
{
// Similar to whole-image similarity() above. This version takes a position 'center'
@@ -1342,7 +1342,7 @@ void similarityLocal(const std::vector<Mat>& linear_memories, const Template& te
}
}
void addUnaligned8u16u(const uchar * src1, const uchar * src2, ushort * res, int length)
static void addUnaligned8u16u(const uchar * src1, const uchar * src2, ushort * res, int length)
{
const uchar * end = src1 + length;
@@ -1362,7 +1362,7 @@ void addUnaligned8u16u(const uchar * src1, const uchar * src2, ushort * res, int
* \param[in] similarities Source 8-bit similarity images.
* \param[out] dst Destination 16-bit similarity image.
*/
void addSimilarities(const std::vector<Mat>& similarities, Mat& dst)
static void addSimilarities(const std::vector<Mat>& similarities, Mat& dst)
{
if (similarities.size() == 1)
{

View File

@@ -4,6 +4,8 @@
#include "_lsvmparser.h"
#include "_lsvm_error.h"
namespace
{
int isMODEL (char *str){
char stag [] = "<Model>";
char etag [] = "</Model>";
@@ -213,9 +215,9 @@ void parserRFilter (FILE * xmlf, int p, CvLSVMFilterObject * model, float *b){
if(ch == '>'){
tagBuf[j ] = ch;
tagBuf[j + 1] = '\0';
tagVal = getTeg(tagBuf);
if(tagVal == ERFILTER){
//printf("</RootFilter>\n");
return;
@@ -267,7 +269,7 @@ void parserRFilter (FILE * xmlf, int p, CvLSVMFilterObject * model, float *b){
}
tag = 0;
i = 0;
i = 0;
}else{
if((tag == 0)&& (st == 1)){
buf[i] = ch; i++;
@@ -275,7 +277,7 @@ void parserRFilter (FILE * xmlf, int p, CvLSVMFilterObject * model, float *b){
tagBuf[j] = ch; j++;
}
}
}
}
}
}
@@ -303,9 +305,9 @@ void parserV (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
if(ch == '>'){
tagBuf[j ] = ch;
tagBuf[j + 1] = '\0';
tagVal = getTeg(tagBuf);
if(tagVal == ETAGV){
//printf(" </V>\n");
return;
@@ -331,7 +333,7 @@ void parserV (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
//printf(" <Vy>%d</Vy>\n", model->V.y);
}
tag = 0;
i = 0;
i = 0;
}else{
if((tag == 0)&& (st == 1)){
buf[i] = ch; i++;
@@ -339,7 +341,7 @@ void parserV (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
tagBuf[j] = ch; j++;
}
}
}
}
}
}
void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
@@ -366,9 +368,9 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
if(ch == '>'){
tagBuf[j ] = ch;
tagBuf[j + 1] = '\0';
tagVal = getTeg(tagBuf);
if(tagVal == ETAGD){
//printf(" </D>\n");
return;
@@ -380,7 +382,7 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
if(tagVal == EDx){
st = 0;
buf[i] = '\0';
model->fineFunction[0] = (float)atof(buf);
//printf(" <Dx>%f</Dx>\n", model->fineFunction[0]);
}
@@ -391,7 +393,7 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
if(tagVal == EDy){
st = 0;
buf[i] = '\0';
model->fineFunction[1] = (float)atof(buf);
//printf(" <Dy>%f</Dy>\n", model->fineFunction[1]);
}
@@ -402,7 +404,7 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
if(tagVal == EDxx){
st = 0;
buf[i] = '\0';
model->fineFunction[2] = (float)atof(buf);
//printf(" <Dxx>%f</Dxx>\n", model->fineFunction[2]);
}
@@ -413,13 +415,13 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
if(tagVal == EDyy){
st = 0;
buf[i] = '\0';
model->fineFunction[3] = (float)atof(buf);
//printf(" <Dyy>%f</Dyy>\n", model->fineFunction[3]);
}
tag = 0;
i = 0;
i = 0;
}else{
if((tag == 0)&& (st == 1)){
buf[i] = ch; i++;
@@ -427,7 +429,7 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
tagBuf[j] = ch; j++;
}
}
}
}
}
}
@@ -465,9 +467,9 @@ void parserPFilter (FILE * xmlf, int p, int /*N_path*/, CvLSVMFilterObject * mo
if(ch == '>'){
tagBuf[j ] = ch;
tagBuf[j + 1] = '\0';
tagVal = getTeg(tagBuf);
if(tagVal == EPFILTER){
//printf("</PathFilter>\n");
return;
@@ -515,7 +517,7 @@ void parserPFilter (FILE * xmlf, int p, int /*N_path*/, CvLSVMFilterObject * mo
//printf("WEIGHTS OK\n");
}
tag = 0;
i = 0;
i = 0;
}else{
if((tag == 0)&& (st == 1)){
buf[i] = ch; i++;
@@ -523,7 +525,7 @@ void parserPFilter (FILE * xmlf, int p, int /*N_path*/, CvLSVMFilterObject * mo
tagBuf[j] = ch; j++;
}
}
}
}
}
}
void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last, int *max){
@@ -551,9 +553,9 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last
if(ch == '>'){
tagBuf[j ] = ch;
tagBuf[j + 1] = '\0';
tagVal = getTeg(tagBuf);
if(tagVal == EPFILTERs){
//printf("</PartFilters>\n");
return;
@@ -564,7 +566,7 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last
N_path++;
}
tag = 0;
i = 0;
i = 0;
}else{
if((tag == 0)&& (st == 1)){
buf[i] = ch; i++;
@@ -572,7 +574,7 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last
tagBuf[j] = ch; j++;
}
}
}
}
}
}
void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model, float *b, int *last, int *max){
@@ -599,9 +601,9 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model,
if(ch == '>'){
tagBuf[j ] = ch;
tagBuf[j + 1] = '\0';
tagVal = getTeg(tagBuf);
if(tagVal == ECOMP){
(*N_comp) ++;
return;
@@ -614,7 +616,7 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model,
parserPFilterS (xmlf, p, model, last, max);
}
tag = 0;
i = 0;
i = 0;
}else{
if((tag == 0)&& (st == 1)){
buf[i] = ch; i++;
@@ -622,7 +624,7 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model,
tagBuf[j] = ch; j++;
}
}
}
}
}
}
void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max, int **comp, float **b, int *count, float * score){
@@ -637,9 +639,9 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
int i,j, ii = 0;
char buf[1024];
char tagBuf[1024];
//printf("<Model>\n");
i = 0;
j = 0;
st = 0;
@@ -654,9 +656,9 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
if(ch == '>'){
tagBuf[j ] = ch;
tagBuf[j + 1] = '\0';
tagVal = getTeg(tagBuf);
if(tagVal == EMODEL){
//printf("</Model>\n");
for(ii = 0; ii <= *last; ii++){
@@ -671,7 +673,7 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
bb = (float *)malloc(sizeof(float));
* comp = cmp;
* b = bb;
* count = N_comp + 1;
* count = N_comp + 1;
} else {
cmp = (int *)malloc(sizeof(int) * (N_comp + 1));
bb = (float *)malloc(sizeof(float) * (N_comp + 1));
@@ -683,7 +685,7 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
free(* b );
* comp = cmp;
* b = bb;
* count = N_comp + 1;
* count = N_comp + 1;
}
parserComp(xmlf, p, &N_comp, model, &((*b)[N_comp]), last, max);
cmp[N_comp - 1] = *last;
@@ -709,7 +711,7 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
//printf("<ScoreThreshold>%f</ScoreThreshold>\n", score);
}
tag = 0;
i = 0;
i = 0;
}else{
if((tag == 0)&& (st == 1)){
buf[i] = ch; i++;
@@ -717,10 +719,12 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
tagBuf[j] = ch; j++;
}
}
}
}
}
}
}//namespace
int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, int *max, int **comp, float **b, int *count, float * score){
int st = 0;
int tag;
@@ -739,7 +743,7 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i
xmlf = fopen(filename, "rb");
if(xmlf == NULL)
return LSVM_PARSER_FILE_NOT_FOUND;
i = 0;
j = 0;
st = 0;
@@ -766,9 +770,9 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i
tagBuf[j] = ch; j++;
}
}
}
}
}
fclose(xmlf);
return LATENT_SVM_OK;
}
@@ -776,24 +780,24 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i
int loadModel(
const char *modelPath,
CvLSVMFilterObject ***filters,
int *kFilters,
int *kComponents,
int **kPartFilters,
float **b,
float *scoreThreshold){
int *kFilters,
int *kComponents,
int **kPartFilters,
float **b,
float *scoreThreshold){
int last;
int max;
int *comp;
int count;
int i;
int err;
int err;
float score;
//printf("start_parse\n\n");
err = LSVMparser(modelPath, filters, &last, &max, &comp, b, &count, &score);
if(err != LATENT_SVM_OK){
return err;
}
if(err != LATENT_SVM_OK){
return err;
}
(*kFilters) = last + 1;
(*kComponents) = count;
(*scoreThreshold) = (float) score;

View File

@@ -547,7 +547,7 @@ int addNullableBorder(CvLSVMFeatureMap *map, int bx, int by)
return LATENT_SVM_OK;
}
CvLSVMFeatureMap* featureMapBorderPartFilter(CvLSVMFeatureMap *map,
static CvLSVMFeatureMap* featureMapBorderPartFilter(CvLSVMFeatureMap *map,
int maxXBorder, int maxYBorder)
{
int bx, by;
@@ -1366,6 +1366,7 @@ int thresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,
return LATENT_SVM_OK;
}
#ifdef HAVE_TBB
/*
// Creating schedule of pyramid levels processing
//
@@ -1390,7 +1391,7 @@ int thresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,
// RESULT
// Error status
*/
int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,
static int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,
const int n, const int bx, const int by,
const int threadsNum, int *kLevels, int **processingLevels)
{
@@ -1521,7 +1522,6 @@ int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all
return LATENT_SVM_OK;
}
#ifdef HAVE_TBB
/*
// int tbbThresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,
const CvLSVMFeaturePyramid *H,
@@ -1679,7 +1679,7 @@ int tbbThresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,
}
#endif
void sort(int n, const float* x, int* indices)
static void sort(int n, const float* x, int* indices)
{
int i, j;
for (i = 0; i < n; i++)

View File

@@ -43,11 +43,11 @@
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4251 4710 4711 4514 4996 )
#endif
#ifdef HAVE_CVCONFIG_H
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif