replaced alloca() (a.k.a. cvStackAlloc) with AutoBuffer or vector() everywhere. cvStackAlloc() is still defined, but we do not need alloca() anymore to compile and run OpenCV (fixes #889 and may be some others)
This commit is contained in:
@@ -265,30 +265,25 @@ void CvANN_MLP::create( const CvMat* _layer_sizes, int _activ_func,
|
||||
|
||||
float CvANN_MLP::predict( const CvMat* _inputs, CvMat* _outputs ) const
|
||||
{
|
||||
CV_FUNCNAME( "CvANN_MLP::predict" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
double* buf;
|
||||
int i, j, n, dn = 0, l_count, dn0, buf_sz, min_buf_sz;
|
||||
|
||||
if( !layer_sizes )
|
||||
CV_ERROR( CV_StsError, "The network has not been initialized" );
|
||||
CV_Error( CV_StsError, "The network has not been initialized" );
|
||||
|
||||
if( !CV_IS_MAT(_inputs) || !CV_IS_MAT(_outputs) ||
|
||||
!CV_ARE_TYPES_EQ(_inputs,_outputs) ||
|
||||
(CV_MAT_TYPE(_inputs->type) != CV_32FC1 &&
|
||||
CV_MAT_TYPE(_inputs->type) != CV_64FC1) ||
|
||||
_inputs->rows != _outputs->rows )
|
||||
CV_ERROR( CV_StsBadArg, "Both input and output must be floating-point matrices "
|
||||
CV_Error( CV_StsBadArg, "Both input and output must be floating-point matrices "
|
||||
"of the same type and have the same number of rows" );
|
||||
|
||||
if( _inputs->cols != layer_sizes->data.i[0] )
|
||||
CV_ERROR( CV_StsBadSize, "input matrix must have the same number of columns as "
|
||||
CV_Error( CV_StsBadSize, "input matrix must have the same number of columns as "
|
||||
"the number of neurons in the input layer" );
|
||||
|
||||
if( _outputs->cols != layer_sizes->data.i[layer_sizes->cols - 1] )
|
||||
CV_ERROR( CV_StsBadSize, "output matrix must have the same number of columns as "
|
||||
CV_Error( CV_StsBadSize, "output matrix must have the same number of columns as "
|
||||
"the number of neurons in the output layer" );
|
||||
n = dn0 = _inputs->rows;
|
||||
min_buf_sz = 2*max_count;
|
||||
@@ -301,7 +296,7 @@ float CvANN_MLP::predict( const CvMat* _inputs, CvMat* _outputs ) const
|
||||
buf_sz = dn0*min_buf_sz;
|
||||
}
|
||||
|
||||
buf = (double*)cvStackAlloc( buf_sz*sizeof(buf[0]) );
|
||||
cv::AutoBuffer<double> buf(buf_sz);
|
||||
l_count = layer_sizes->cols;
|
||||
|
||||
for( i = 0; i < n; i += dn )
|
||||
@@ -310,7 +305,7 @@ float CvANN_MLP::predict( const CvMat* _inputs, CvMat* _outputs ) const
|
||||
dn = MIN( dn0, n - i );
|
||||
|
||||
cvGetRows( _inputs, layer_in, i, i + dn );
|
||||
cvInitMatHeader( layer_out, dn, layer_in->cols, CV_64F, buf );
|
||||
cvInitMatHeader( layer_out, dn, layer_in->cols, CV_64F, &buf[0] );
|
||||
|
||||
scale_input( layer_in, layer_out );
|
||||
CV_SWAP( layer_in, layer_out, temp );
|
||||
@@ -332,8 +327,6 @@ float CvANN_MLP::predict( const CvMat* _inputs, CvMat* _outputs ) const
|
||||
scale_output( layer_in, layer_out );
|
||||
}
|
||||
|
||||
__END__;
|
||||
|
||||
return 0.f;
|
||||
}
|
||||
|
||||
|
@@ -1592,32 +1592,22 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
|
||||
CvMat* weak_responses, CvSlice slice,
|
||||
bool raw_mode, bool return_sum ) const
|
||||
{
|
||||
float* buf = 0;
|
||||
bool allocated = false;
|
||||
float value = -FLT_MAX;
|
||||
|
||||
CV_FUNCNAME( "CvBoost::predict" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
int i, weak_count, var_count;
|
||||
CvMat sample, missing;
|
||||
CvSeqReader reader;
|
||||
double sum = 0;
|
||||
int wstep = 0;
|
||||
const int* vtype;
|
||||
const int* cmap;
|
||||
const int* cofs;
|
||||
const float* sample_data;
|
||||
|
||||
if( !weak )
|
||||
CV_ERROR( CV_StsError, "The boosted tree ensemble has not been trained yet" );
|
||||
CV_Error( CV_StsError, "The boosted tree ensemble has not been trained yet" );
|
||||
|
||||
if( !CV_IS_MAT(_sample) || CV_MAT_TYPE(_sample->type) != CV_32FC1 ||
|
||||
(_sample->cols != 1 && _sample->rows != 1) ||
|
||||
(_sample->cols + _sample->rows - 1 != data->var_all && !raw_mode) ||
|
||||
(active_vars && _sample->cols + _sample->rows - 1 != active_vars->cols && raw_mode) )
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
CV_Error( CV_StsBadArg,
|
||||
"the input sample must be 1d floating-point vector with the same "
|
||||
"number of elements as the total number of variables or "
|
||||
"as the number of variables used for training" );
|
||||
@@ -1626,11 +1616,11 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
|
||||
{
|
||||
if( !CV_IS_MAT(_missing) || !CV_IS_MASK_ARR(_missing) ||
|
||||
!CV_ARE_SIZES_EQ(_missing, _sample) )
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
CV_Error( CV_StsBadArg,
|
||||
"the missing data mask must be 8-bit vector of the same size as input sample" );
|
||||
}
|
||||
|
||||
weak_count = cvSliceLength( slice, weak );
|
||||
int i, weak_count = cvSliceLength( slice, weak );
|
||||
if( weak_count >= weak->total )
|
||||
{
|
||||
weak_count = weak->total;
|
||||
@@ -1643,21 +1633,20 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
|
||||
CV_MAT_TYPE(weak_responses->type) != CV_32FC1 ||
|
||||
(weak_responses->cols != 1 && weak_responses->rows != 1) ||
|
||||
weak_responses->cols + weak_responses->rows - 1 != weak_count )
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
CV_Error( CV_StsBadArg,
|
||||
"The output matrix of weak classifier responses must be valid "
|
||||
"floating-point vector of the same number of components as the length of input slice" );
|
||||
wstep = CV_IS_MAT_CONT(weak_responses->type) ? 1 : weak_responses->step/sizeof(float);
|
||||
}
|
||||
|
||||
var_count = active_vars->cols;
|
||||
vtype = data->var_type->data.i;
|
||||
cmap = data->cat_map->data.i;
|
||||
cofs = data->cat_ofs->data.i;
|
||||
int var_count = active_vars->cols;
|
||||
const int* vtype = data->var_type->data.i;
|
||||
const int* cmap = data->cat_map->data.i;
|
||||
const int* cofs = data->cat_ofs->data.i;
|
||||
|
||||
// if need, preprocess the input vector
|
||||
if( !raw_mode )
|
||||
{
|
||||
int bufsize;
|
||||
int step, mstep = 0;
|
||||
const float* src_sample;
|
||||
const uchar* src_mask = 0;
|
||||
@@ -1667,16 +1656,9 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
|
||||
const int* vidx_abs = active_vars_abs->data.i;
|
||||
bool have_mask = _missing != 0;
|
||||
|
||||
bufsize = var_count*(sizeof(float) + sizeof(uchar));
|
||||
if( bufsize <= CV_MAX_LOCAL_SIZE )
|
||||
buf = (float*)cvStackAlloc( bufsize );
|
||||
else
|
||||
{
|
||||
CV_CALL( buf = (float*)cvAlloc( bufsize ));
|
||||
allocated = true;
|
||||
}
|
||||
dst_sample = buf;
|
||||
dst_mask = (uchar*)(buf + var_count);
|
||||
cv::AutoBuffer<float> buf(var_count + (var_count+3)/4);
|
||||
dst_sample = &buf[0];
|
||||
dst_mask = (uchar*)&buf[var_count];
|
||||
|
||||
src_sample = _sample->data.fl;
|
||||
step = CV_IS_MAT_CONT(_sample->type) ? 1 : _sample->step/sizeof(src_sample[0]);
|
||||
@@ -1700,7 +1682,7 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
|
||||
c = a;
|
||||
int ival = cvRound(val);
|
||||
if ( (ival != val) && (!m) )
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
CV_Error( CV_StsBadArg,
|
||||
"one of input categorical variable is not an integer" );
|
||||
|
||||
while( a < b )
|
||||
@@ -1741,7 +1723,7 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
|
||||
else
|
||||
{
|
||||
if( !CV_IS_MAT_CONT(_sample->type & (_missing ? _missing->type : -1)) )
|
||||
CV_ERROR( CV_StsBadArg, "In raw mode the input vectors must be continuous" );
|
||||
CV_Error( CV_StsBadArg, "In raw mode the input vectors must be continuous" );
|
||||
}
|
||||
|
||||
cvStartReadSeq( weak, &reader );
|
||||
@@ -1830,11 +1812,6 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
|
||||
value = (float)cmap[cofs[vtype[data->var_count]] + cls_idx];
|
||||
}
|
||||
|
||||
__END__;
|
||||
|
||||
if( allocated )
|
||||
cvFree( &buf );
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
|
@@ -209,39 +209,23 @@ float
|
||||
CvEM::predict( const CvMat* _sample, CvMat* _probs ) const
|
||||
{
|
||||
float* sample_data = 0;
|
||||
void* buffer = 0;
|
||||
int allocated_buffer = 0;
|
||||
int cls = 0;
|
||||
|
||||
CV_FUNCNAME( "CvEM::predict" );
|
||||
__BEGIN__;
|
||||
|
||||
int i, k, dims;
|
||||
int nclusters;
|
||||
int cov_mat_type = params.cov_mat_type;
|
||||
double opt = FLT_MAX;
|
||||
size_t size;
|
||||
CvMat diff, expo;
|
||||
|
||||
dims = means->cols;
|
||||
nclusters = params.nclusters;
|
||||
int i, dims = means->cols;
|
||||
int nclusters = params.nclusters;
|
||||
|
||||
CV_CALL( cvPreparePredictData( _sample, dims, 0, params.nclusters, _probs, &sample_data ));
|
||||
cvPreparePredictData( _sample, dims, 0, params.nclusters, _probs, &sample_data );
|
||||
|
||||
// allocate memory and initializing headers for calculating
|
||||
size = sizeof(double) * (nclusters + dims);
|
||||
if( size <= CV_MAX_LOCAL_SIZE )
|
||||
buffer = cvStackAlloc( size );
|
||||
else
|
||||
{
|
||||
CV_CALL( buffer = cvAlloc( size ));
|
||||
allocated_buffer = 1;
|
||||
}
|
||||
expo = cvMat( 1, nclusters, CV_64FC1, buffer );
|
||||
diff = cvMat( 1, dims, CV_64FC1, (double*)buffer + nclusters );
|
||||
cv::AutoBuffer<double> buffer(nclusters + dims);
|
||||
CvMat expo = cvMat(1, nclusters, CV_64F, &buffer[0] );
|
||||
CvMat diff = cvMat(1, dims, CV_64F, &buffer[nclusters] );
|
||||
|
||||
// calculate the probabilities
|
||||
for( k = 0; k < nclusters; k++ )
|
||||
for( int k = 0; k < nclusters; k++ )
|
||||
{
|
||||
const double* mean_k = (const double*)(means->data.ptr + means->step*k);
|
||||
const double* w = (const double*)(inv_eigen_values->data.ptr + inv_eigen_values->step*k);
|
||||
@@ -281,19 +265,15 @@ CvEM::predict( const CvMat* _sample, CvMat* _probs ) const
|
||||
|
||||
if( _probs )
|
||||
{
|
||||
CV_CALL( cvConvertScale( &expo, &expo, -0.5 ));
|
||||
CV_CALL( cvExp( &expo, &expo ));
|
||||
cvConvertScale( &expo, &expo, -0.5 );
|
||||
cvExp( &expo, &expo );
|
||||
if( _probs->cols == 1 )
|
||||
CV_CALL( cvReshape( &expo, &expo, 0, nclusters ));
|
||||
CV_CALL( cvConvertScale( &expo, _probs, 1./cvSum( &expo ).val[0] ));
|
||||
cvReshape( &expo, &expo, 0, nclusters );
|
||||
cvConvertScale( &expo, _probs, 1./cvSum( &expo ).val[0] );
|
||||
}
|
||||
|
||||
__END__;
|
||||
|
||||
if( sample_data != _sample->data.fl )
|
||||
cvFree( &sample_data );
|
||||
if( allocated_buffer )
|
||||
cvFree( &buffer );
|
||||
|
||||
return (float)cls;
|
||||
}
|
||||
|
@@ -903,8 +903,8 @@ CvDTreeSplit* CvForestERTree::find_split_ord_class( CvDTreeNode* node, int vi, f
|
||||
// calculate Gini index
|
||||
if ( !priors )
|
||||
{
|
||||
int* lc = (int*)cvStackAlloc(m*sizeof(lc[0]));
|
||||
int* rc = (int*)cvStackAlloc(m*sizeof(rc[0]));
|
||||
cv::AutoBuffer<int> lrc(m*2);
|
||||
int *lc = lrc, *rc = lc + m;
|
||||
int L = 0, R = 0;
|
||||
|
||||
// init arrays of class instance counters on both sides of the split
|
||||
@@ -939,8 +939,8 @@ CvDTreeSplit* CvForestERTree::find_split_ord_class( CvDTreeNode* node, int vi, f
|
||||
}
|
||||
else
|
||||
{
|
||||
double* lc = (double*)cvStackAlloc(m*sizeof(lc[0]));
|
||||
double* rc = (double*)cvStackAlloc(m*sizeof(rc[0]));
|
||||
cv::AutoBuffer<double> lrc(m*2);
|
||||
double *lc = lrc, *rc = lc + m;
|
||||
double L = 0, R = 0;
|
||||
|
||||
// init arrays of class instance counters on both sides of the split
|
||||
@@ -1013,7 +1013,7 @@ CvDTreeSplit* CvForestERTree::find_split_cat_class( CvDTreeNode* node, int vi, f
|
||||
const double* priors = data->have_priors ? data->priors_mult->data.db : 0;
|
||||
|
||||
// create random class mask
|
||||
int *valid_cidx = (int*)cvStackAlloc(vm*sizeof(valid_cidx[0]));
|
||||
cv::AutoBuffer<int> valid_cidx(vm);
|
||||
for (int i = 0; i < vm; i++)
|
||||
{
|
||||
valid_cidx[i] = -1;
|
||||
@@ -1059,8 +1059,8 @@ CvDTreeSplit* CvForestERTree::find_split_cat_class( CvDTreeNode* node, int vi, f
|
||||
double lbest_val = 0, rbest_val = 0;
|
||||
if( !priors )
|
||||
{
|
||||
int* lc = (int*)cvStackAlloc(cm*sizeof(lc[0]));
|
||||
int* rc = (int*)cvStackAlloc(cm*sizeof(rc[0]));
|
||||
cv::AutoBuffer<int> lrc(cm*2);
|
||||
int *lc = lrc, *rc = lc + cm;
|
||||
int L = 0, R = 0;
|
||||
// init arrays of class instance counters on both sides of the split
|
||||
for(int i = 0; i < cm; i++ )
|
||||
@@ -1096,8 +1096,8 @@ CvDTreeSplit* CvForestERTree::find_split_cat_class( CvDTreeNode* node, int vi, f
|
||||
}
|
||||
else
|
||||
{
|
||||
double* lc = (double*)cvStackAlloc(cm*sizeof(lc[0]));
|
||||
double* rc = (double*)cvStackAlloc(cm*sizeof(rc[0]));
|
||||
cv::AutoBuffer<int> lrc(cm*2);
|
||||
int *lc = lrc, *rc = lc + cm;
|
||||
double L = 0, R = 0;
|
||||
// init arrays of class instance counters on both sides of the split
|
||||
for(int i = 0; i < cm; i++ )
|
||||
@@ -1333,7 +1333,7 @@ void CvForestERTree::split_node_data( CvDTreeNode* node )
|
||||
CvDTreeNode *left = 0, *right = 0;
|
||||
int new_buf_idx = data->get_child_buf_idx( node );
|
||||
CvMat* buf = data->buf;
|
||||
int* temp_buf = (int*)cvStackAlloc(n*sizeof(temp_buf[0]));
|
||||
cv::AutoBuffer<int> temp_buf(n);
|
||||
|
||||
complete_node_dir(node);
|
||||
|
||||
|
@@ -306,43 +306,37 @@ float CvKNearest::find_nearest( const CvMat* _samples, int k, CvMat* _results,
|
||||
const float** _neighbors, CvMat* _neighbor_responses, CvMat* _dist ) const
|
||||
{
|
||||
float result = 0.f;
|
||||
bool local_alloc = false;
|
||||
float* buf = 0;
|
||||
const int max_blk_count = 128, max_buf_sz = 1 << 12;
|
||||
|
||||
CV_FUNCNAME( "CvKNearest::find_nearest" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
int i, count, count_scale, blk_count0, blk_count = 0, buf_sz, k1;
|
||||
|
||||
if( !samples )
|
||||
CV_ERROR( CV_StsError, "The search tree must be constructed first using train method" );
|
||||
CV_Error( CV_StsError, "The search tree must be constructed first using train method" );
|
||||
|
||||
if( !CV_IS_MAT(_samples) ||
|
||||
CV_MAT_TYPE(_samples->type) != CV_32FC1 ||
|
||||
_samples->cols != var_count )
|
||||
CV_ERROR( CV_StsBadArg, "Input samples must be floating-point matrix (<num_samples>x<var_count>)" );
|
||||
CV_Error( CV_StsBadArg, "Input samples must be floating-point matrix (<num_samples>x<var_count>)" );
|
||||
|
||||
if( _results && (!CV_IS_MAT(_results) ||
|
||||
(_results->cols != 1 && _results->rows != 1) ||
|
||||
_results->cols + _results->rows - 1 != _samples->rows) )
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
CV_Error( CV_StsBadArg,
|
||||
"The results must be 1d vector containing as much elements as the number of samples" );
|
||||
|
||||
if( _results && CV_MAT_TYPE(_results->type) != CV_32FC1 &&
|
||||
(CV_MAT_TYPE(_results->type) != CV_32SC1 || regression))
|
||||
CV_ERROR( CV_StsUnsupportedFormat,
|
||||
CV_Error( CV_StsUnsupportedFormat,
|
||||
"The results must be floating-point or integer (in case of classification) vector" );
|
||||
|
||||
if( k < 1 || k > max_k )
|
||||
CV_ERROR( CV_StsOutOfRange, "k must be within 1..max_k range" );
|
||||
CV_Error( CV_StsOutOfRange, "k must be within 1..max_k range" );
|
||||
|
||||
if( _neighbor_responses )
|
||||
{
|
||||
if( !CV_IS_MAT(_neighbor_responses) || CV_MAT_TYPE(_neighbor_responses->type) != CV_32FC1 ||
|
||||
_neighbor_responses->rows != _samples->rows || _neighbor_responses->cols != k )
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
CV_Error( CV_StsBadArg,
|
||||
"The neighbor responses (if present) must be floating-point matrix of <num_samples> x <k> size" );
|
||||
}
|
||||
|
||||
@@ -350,34 +344,28 @@ float CvKNearest::find_nearest( const CvMat* _samples, int k, CvMat* _results,
|
||||
{
|
||||
if( !CV_IS_MAT(_dist) || CV_MAT_TYPE(_dist->type) != CV_32FC1 ||
|
||||
_dist->rows != _samples->rows || _dist->cols != k )
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
CV_Error( CV_StsBadArg,
|
||||
"The distances from the neighbors (if present) must be floating-point matrix of <num_samples> x <k> size" );
|
||||
}
|
||||
|
||||
count = _samples->rows;
|
||||
count_scale = k*2*sizeof(float);
|
||||
count_scale = k*2;
|
||||
blk_count0 = MIN( count, max_blk_count );
|
||||
buf_sz = MIN( blk_count0 * count_scale, max_buf_sz );
|
||||
blk_count0 = MAX( buf_sz/count_scale, 1 );
|
||||
blk_count0 += blk_count0 % 2;
|
||||
blk_count0 = MIN( blk_count0, count );
|
||||
buf_sz = blk_count0 * count_scale + k*sizeof(float);
|
||||
buf_sz = blk_count0 * count_scale + k;
|
||||
k1 = get_sample_count();
|
||||
k1 = MIN( k1, k );
|
||||
|
||||
if( buf_sz <= CV_MAX_LOCAL_SIZE )
|
||||
{
|
||||
buf = (float*)cvStackAlloc( buf_sz );
|
||||
local_alloc = true;
|
||||
}
|
||||
else
|
||||
CV_CALL( buf = (float*)cvAlloc( buf_sz ));
|
||||
cv::AutoBuffer<float> buf(buf_sz);
|
||||
|
||||
for( i = 0; i < count; i += blk_count )
|
||||
{
|
||||
blk_count = MIN( count - i, blk_count0 );
|
||||
float* neighbor_responses = buf;
|
||||
float* dist = buf + blk_count*k;
|
||||
float* neighbor_responses = &buf[0];
|
||||
float* dist = neighbor_responses + blk_count*k;
|
||||
Cv32suf* sort_buf = (Cv32suf*)(dist + blk_count*k);
|
||||
|
||||
find_neighbors_direct( _samples, k, i, i + blk_count,
|
||||
@@ -389,11 +377,6 @@ float CvKNearest::find_nearest( const CvMat* _samples, int k, CvMat* _results,
|
||||
result = r;
|
||||
}
|
||||
|
||||
__END__;
|
||||
|
||||
if( !local_alloc )
|
||||
cvFree( &buf );
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@@ -281,28 +281,20 @@ bool CvNormalBayesClassifier::train( const CvMat* _train_data, const CvMat* _res
|
||||
float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results ) const
|
||||
{
|
||||
float value = 0;
|
||||
void* buffer = 0;
|
||||
int allocated_buffer = 0;
|
||||
|
||||
CV_FUNCNAME( "CvNormalBayesClassifier::predict" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
int i, j, k, cls = -1, _var_count, nclasses;
|
||||
int i, j, cls = -1;
|
||||
double opt = FLT_MAX;
|
||||
CvMat diff;
|
||||
int rtype = 0, rstep = 0, size;
|
||||
const int* vidx = 0;
|
||||
|
||||
nclasses = cls_labels->cols;
|
||||
_var_count = avg[0]->cols;
|
||||
int rtype = 0, rstep = 0;
|
||||
|
||||
int nclasses = cls_labels->cols;
|
||||
int _var_count = avg[0]->cols;
|
||||
|
||||
if( !CV_IS_MAT(samples) || CV_MAT_TYPE(samples->type) != CV_32FC1 || samples->cols != var_all )
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
CV_Error( CV_StsBadArg,
|
||||
"The input samples must be 32f matrix with the number of columns = var_all" );
|
||||
|
||||
if( samples->rows > 1 && !results )
|
||||
CV_ERROR( CV_StsNullPtr,
|
||||
CV_Error( CV_StsNullPtr,
|
||||
"When the number of input samples is >1, the output vector of results must be passed" );
|
||||
|
||||
if( results )
|
||||
@@ -311,29 +303,20 @@ float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results ) c
|
||||
CV_MAT_TYPE(results->type) != CV_32SC1) ||
|
||||
(results->cols != 1 && results->rows != 1) ||
|
||||
results->cols + results->rows - 1 != samples->rows )
|
||||
CV_ERROR( CV_StsBadArg, "The output array must be integer or floating-point vector "
|
||||
CV_Error( CV_StsBadArg, "The output array must be integer or floating-point vector "
|
||||
"with the number of elements = number of rows in the input matrix" );
|
||||
|
||||
rtype = CV_MAT_TYPE(results->type);
|
||||
rstep = CV_IS_MAT_CONT(results->type) ? 1 : results->step/CV_ELEM_SIZE(rtype);
|
||||
}
|
||||
|
||||
if( var_idx )
|
||||
vidx = var_idx->data.i;
|
||||
const int* vidx = var_idx ? var_idx->data.i : 0;
|
||||
|
||||
// allocate memory and initializing headers for calculating
|
||||
size = sizeof(double) * (nclasses + var_count);
|
||||
if( size <= CV_MAX_LOCAL_SIZE )
|
||||
buffer = cvStackAlloc( size );
|
||||
else
|
||||
{
|
||||
CV_CALL( buffer = cvAlloc( size ));
|
||||
allocated_buffer = 1;
|
||||
}
|
||||
cv::AutoBuffer<double> buffer(nclasses + var_count);
|
||||
CvMat diff = cvMat( 1, var_count, CV_64FC1, &buffer[0] );
|
||||
|
||||
diff = cvMat( 1, var_count, CV_64FC1, buffer );
|
||||
|
||||
for( k = 0; k < samples->rows; k++ )
|
||||
for( int k = 0; k < samples->rows; k++ )
|
||||
{
|
||||
int ival;
|
||||
|
||||
@@ -349,7 +332,7 @@ float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results ) c
|
||||
for( j = 0; j < _var_count; j++ )
|
||||
diff.data.db[j] = avg_data[j] - x[vidx ? vidx[j] : j];
|
||||
|
||||
CV_CALL(cvGEMM( &diff, u, 1, 0, 0, &diff, CV_GEMM_B_T ));
|
||||
cvGEMM( &diff, u, 1, 0, 0, &diff, CV_GEMM_B_T );
|
||||
for( j = 0; j < _var_count; j++ )
|
||||
{
|
||||
double d = diff.data.db[j];
|
||||
@@ -385,11 +368,6 @@ float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results ) c
|
||||
}*/
|
||||
}
|
||||
|
||||
__END__;
|
||||
|
||||
if( allocated_buffer )
|
||||
cvFree( &buffer );
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
|
@@ -2036,7 +2036,7 @@ void CvDTree::cluster_categories( const int* vectors, int n, int m,
|
||||
// TODO: consider adding priors (class weights) and sample weights to the clustering algorithm
|
||||
int iters = 0, max_iters = 100;
|
||||
int i, j, idx;
|
||||
double* buf = (double*)cvStackAlloc( (n + k)*sizeof(buf[0]) );
|
||||
cv::AutoBuffer<double> buf(n + k);
|
||||
double *v_weights = buf, *c_weights = buf + n;
|
||||
bool modified = true;
|
||||
RNG* r = data->rng;
|
||||
@@ -3558,40 +3558,30 @@ void CvDTree::free_tree()
|
||||
CvDTreeNode* CvDTree::predict( const CvMat* _sample,
|
||||
const CvMat* _missing, bool preprocessed_input ) const
|
||||
{
|
||||
CvDTreeNode* result = 0;
|
||||
int* catbuf = 0;
|
||||
cv::AutoBuffer<int> catbuf;
|
||||
|
||||
CV_FUNCNAME( "CvDTree::predict" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
int i, step, mstep = 0;
|
||||
const float* sample;
|
||||
int i, mstep = 0;
|
||||
const uchar* m = 0;
|
||||
CvDTreeNode* node = root;
|
||||
const int* vtype;
|
||||
const int* vidx;
|
||||
const int* cmap;
|
||||
const int* cofs;
|
||||
|
||||
if( !node )
|
||||
CV_ERROR( CV_StsError, "The tree has not been trained yet" );
|
||||
CV_Error( CV_StsError, "The tree has not been trained yet" );
|
||||
|
||||
if( !CV_IS_MAT(_sample) || CV_MAT_TYPE(_sample->type) != CV_32FC1 ||
|
||||
(_sample->cols != 1 && _sample->rows != 1) ||
|
||||
(_sample->cols + _sample->rows - 1 != data->var_all && !preprocessed_input) ||
|
||||
(_sample->cols + _sample->rows - 1 != data->var_count && preprocessed_input) )
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
CV_Error( CV_StsBadArg,
|
||||
"the input sample must be 1d floating-point vector with the same "
|
||||
"number of elements as the total number of variables used for training" );
|
||||
|
||||
sample = _sample->data.fl;
|
||||
step = CV_IS_MAT_CONT(_sample->type) ? 1 : _sample->step/sizeof(sample[0]);
|
||||
const float* sample = _sample->data.fl;
|
||||
int step = CV_IS_MAT_CONT(_sample->type) ? 1 : _sample->step/sizeof(sample[0]);
|
||||
|
||||
if( data->cat_count && !preprocessed_input ) // cache for categorical variables
|
||||
{
|
||||
int n = data->cat_count->cols;
|
||||
catbuf = (int*)cvStackAlloc(n*sizeof(catbuf[0]));
|
||||
catbuf.allocate(n);
|
||||
for( i = 0; i < n; i++ )
|
||||
catbuf[i] = -1;
|
||||
}
|
||||
@@ -3599,17 +3589,17 @@ CvDTreeNode* CvDTree::predict( const CvMat* _sample,
|
||||
if( _missing )
|
||||
{
|
||||
if( !CV_IS_MAT(_missing) || !CV_IS_MASK_ARR(_missing) ||
|
||||
!CV_ARE_SIZES_EQ(_missing, _sample) )
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
!CV_ARE_SIZES_EQ(_missing, _sample) )
|
||||
CV_Error( CV_StsBadArg,
|
||||
"the missing data mask must be 8-bit vector of the same size as input sample" );
|
||||
m = _missing->data.ptr;
|
||||
mstep = CV_IS_MAT_CONT(_missing->type) ? 1 : _missing->step/sizeof(m[0]);
|
||||
}
|
||||
|
||||
vtype = data->var_type->data.i;
|
||||
vidx = data->var_idx && !preprocessed_input ? data->var_idx->data.i : 0;
|
||||
cmap = data->cat_map ? data->cat_map->data.i : 0;
|
||||
cofs = data->cat_ofs ? data->cat_ofs->data.i : 0;
|
||||
const int* vtype = data->var_type->data.i;
|
||||
const int* vidx = data->var_idx && !preprocessed_input ? data->var_idx->data.i : 0;
|
||||
const int* cmap = data->cat_map ? data->cat_map->data.i : 0;
|
||||
const int* cofs = data->cat_ofs ? data->cat_ofs->data.i : 0;
|
||||
|
||||
while( node->Tn > pruned_tree_idx && node->left )
|
||||
{
|
||||
@@ -3640,7 +3630,7 @@ CvDTreeNode* CvDTree::predict( const CvMat* _sample,
|
||||
|
||||
int ival = cvRound(val);
|
||||
if( ival != val )
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
CV_Error( CV_StsBadArg,
|
||||
"one of input categorical variable is not an integer" );
|
||||
|
||||
int sh = 0;
|
||||
@@ -3678,11 +3668,7 @@ CvDTreeNode* CvDTree::predict( const CvMat* _sample,
|
||||
node = dir < 0 ? node->left : node->right;
|
||||
}
|
||||
|
||||
result = node;
|
||||
|
||||
__END__;
|
||||
|
||||
return result;
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user