Merge branch 2.4
This commit is contained in:
@@ -1935,20 +1935,14 @@ icvLoadCascadeCART( const char** input_cascade, int n, CvSize orig_window_size )
|
||||
CV_IMPL CvHaarClassifierCascade*
|
||||
cvLoadHaarClassifierCascade( const char* directory, CvSize orig_window_size )
|
||||
{
|
||||
const char** input_cascade = 0;
|
||||
CvHaarClassifierCascade *cascade = 0;
|
||||
|
||||
int i, n;
|
||||
const char* slash;
|
||||
char name[_MAX_PATH];
|
||||
int size = 0;
|
||||
char* ptr = 0;
|
||||
|
||||
if( !directory )
|
||||
CV_Error( CV_StsNullPtr, "Null path is passed" );
|
||||
|
||||
n = (int)strlen(directory)-1;
|
||||
slash = directory[n] == '\\' || directory[n] == '/' ? "" : "/";
|
||||
char name[_MAX_PATH];
|
||||
|
||||
int n = (int)strlen(directory)-1;
|
||||
const char* slash = directory[n] == '\\' || directory[n] == '/' ? "" : "/";
|
||||
int size = 0;
|
||||
|
||||
/* try to read the classifier from directory */
|
||||
for( n = 0; ; n++ )
|
||||
@@ -1969,10 +1963,14 @@ cvLoadHaarClassifierCascade( const char* directory, CvSize orig_window_size )
|
||||
CV_Error( CV_StsBadArg, "Invalid path" );
|
||||
|
||||
size += (n+1)*sizeof(char*);
|
||||
input_cascade = (const char**)cvAlloc( size );
|
||||
ptr = (char*)(input_cascade + n + 1);
|
||||
const char** input_cascade = (const char**)cvAlloc( size );
|
||||
|
||||
if( !input_cascade )
|
||||
CV_Error( CV_StsNoMem, "Could not allocate memory for input_cascade" );
|
||||
|
||||
char* ptr = (char*)(input_cascade + n + 1);
|
||||
|
||||
for( i = 0; i < n; i++ )
|
||||
for( int i = 0; i < n; i++ )
|
||||
{
|
||||
sprintf( name, "%s/%d/AdaBoostCARTHaarClassifier.txt", directory, i );
|
||||
FILE* f = fopen( name, "rb" );
|
||||
@@ -1990,7 +1988,8 @@ cvLoadHaarClassifierCascade( const char* directory, CvSize orig_window_size )
|
||||
}
|
||||
|
||||
input_cascade[n] = 0;
|
||||
cascade = icvLoadCascadeCART( input_cascade, n, orig_window_size );
|
||||
|
||||
CvHaarClassifierCascade* cascade = icvLoadCascadeCART( input_cascade, n, orig_window_size );
|
||||
|
||||
if( input_cascade )
|
||||
cvFree( &input_cascade );
|
||||
|
@@ -1396,7 +1396,7 @@ static int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObjec
|
||||
const int n, const int bx, const int by,
|
||||
const int threadsNum, int *kLevels, int **processingLevels)
|
||||
{
|
||||
int rootFilterDim, sumPartFiltersDim, i, numLevels, dbx, dby, numDotProducts;
|
||||
int rootFilterDim, sumPartFiltersDim, i, numLevels, dbx, dby;
|
||||
int j, minValue, argMin, lambda, maxValue, k;
|
||||
int *dotProd, *weights, *disp;
|
||||
if (H == NULL || all_F == NULL)
|
||||
@@ -1420,8 +1420,6 @@ static int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObjec
|
||||
// of feature map with part filter
|
||||
dbx = 2 * bx;
|
||||
dby = 2 * by;
|
||||
// Total number of dot products for all levels
|
||||
numDotProducts = 0;
|
||||
lambda = LAMBDA;
|
||||
for (i = 0; i < numLevels; i++)
|
||||
{
|
||||
@@ -1429,7 +1427,6 @@ static int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObjec
|
||||
H->pyramid[i + lambda]->sizeY * rootFilterDim +
|
||||
(H->pyramid[i]->sizeX + dbx) *
|
||||
(H->pyramid[i]->sizeY + dby) * sumPartFiltersDim;
|
||||
numDotProducts += dotProd[i];
|
||||
}
|
||||
// Allocation memory for saving dot product number performed by each thread
|
||||
weights = (int *)malloc(sizeof(int) * threadsNum);
|
||||
|
Reference in New Issue
Block a user