amplement weidhting according to L. Bourdev and J. Brandt paper "Robust Object Detection Via Soft Cascade"

This commit is contained in:
marina.kolpakova
2012-12-12 15:35:30 +04:00
parent c091092174
commit a8d0e04912
4 changed files with 20 additions and 3 deletions

View File

@@ -158,7 +158,7 @@ protected:
float predict( const Mat& _sample, const cv::Range range) const; float predict( const Mat& _sample, const cv::Range range) const;
private: private:
void traverse(const CvBoostTree* tree, cv::FileStorage& fs, const float* th = 0) const; void traverse(const CvBoostTree* tree, cv::FileStorage& fs, const float* th = 0) const;
virtual void initial_weights(double (&p)[2]);
cv::Rect boundingBox; cv::Rect boundingBox;
int npositives; int npositives;

View File

@@ -295,6 +295,7 @@ void sft::Octave::generateNegatives(const Dataset& dataset)
dprintf("Processing negatives finished:\n\trequested %d negatives, viewed %d samples.\n", nnegatives, total); dprintf("Processing negatives finished:\n\trequested %d negatives, viewed %d samples.\n", nnegatives, total);
} }
template <typename T> int sgn(T val) { template <typename T> int sgn(T val) {
return (T(0) < val) - (val < T(0)); return (T(0) < val) - (val < T(0));
} }
@@ -378,6 +379,13 @@ void sft::Octave::write( cv::FileStorage &fso, const Mat& thresholds) const
<< "}"; << "}";
} }
void sft::Octave::initial_weights(double (&p)[2])
{
double n = data->sample_count;
p[0] = n / (double)(nnegatives) ;
p[1] = n / (double)(npositives);
}
bool sft::Octave::train(const Dataset& dataset, const FeaturePool& pool, int weaks, int treeDepth) bool sft::Octave::train(const Dataset& dataset, const FeaturePool& pool, int weaks, int treeDepth)
{ {
CV_Assert(treeDepth == 2); CV_Assert(treeDepth == 2);

View File

@@ -1251,6 +1251,8 @@ protected:
virtual void write_params( CvFileStorage* fs ) const; virtual void write_params( CvFileStorage* fs ) const;
virtual void read_params( CvFileStorage* fs, CvFileNode* node ); virtual void read_params( CvFileStorage* fs, CvFileNode* node );
virtual void initial_weights(double (&p)[2]);
CvDTreeTrainData* data; CvDTreeTrainData* data;
CvBoostParams params; CvBoostParams params;
CvSeq* weak; CvSeq* weak;

View File

@@ -1116,6 +1116,12 @@ bool CvBoost::train( CvMLData* _data,
return result; return result;
} }
void CvBoost::initial_weights(double (&p)[2])
{
p[0] = 1.;
p[1] = 1.;
}
void void
CvBoost::update_weights( CvBoostTree* tree ) CvBoost::update_weights( CvBoostTree* tree )
{ {
@@ -1159,8 +1165,9 @@ CvBoost::update_weights( CvBoostTree* tree )
// in case of logitboost and gentle adaboost each weak tree is a regression tree, // in case of logitboost and gentle adaboost each weak tree is a regression tree,
// so we need to convert class labels to floating-point values // so we need to convert class labels to floating-point values
double w0 = 1./n; double w0 = 1./ n;
double p[2] = { 1, 1 }; double p[2] = { 1., 1. };
initial_weights(p);
cvReleaseMat( &orig_response ); cvReleaseMat( &orig_response );
cvReleaseMat( &sum_response ); cvReleaseMat( &sum_response );