Updated ml module interfaces and documentation
This commit is contained in:
@@ -2802,43 +2802,36 @@ public:
|
||||
|
||||
#define CV_PURE_PROPERTY(type, name) \
|
||||
CV_WRAP virtual type get##name() const = 0; \
|
||||
CV_WRAP virtual void set##name(type _##name) = 0;
|
||||
CV_WRAP virtual void set##name(type val) = 0;
|
||||
|
||||
#define CV_PURE_PROPERTY_S(type, name) \
|
||||
CV_WRAP virtual type get##name() const = 0; \
|
||||
CV_WRAP virtual void set##name(const type & _##name) = 0;
|
||||
CV_WRAP virtual void set##name(const type & val) = 0;
|
||||
|
||||
#define CV_PURE_PROPERTY_RO(type, name) \
|
||||
CV_WRAP virtual type get##name() const = 0;
|
||||
|
||||
// basic property implementation
|
||||
|
||||
#define CV_IMPL_PROPERTY(type, name, member) \
|
||||
type get##name() const \
|
||||
{ \
|
||||
return member; \
|
||||
} \
|
||||
void set##name(type val) \
|
||||
{ \
|
||||
member = val; \
|
||||
}
|
||||
|
||||
#define CV_IMPL_PROPERTY_S(type, name, member) \
|
||||
type get##name() const \
|
||||
{ \
|
||||
return member; \
|
||||
} \
|
||||
void set##name(const type &val) \
|
||||
{ \
|
||||
member = val; \
|
||||
}
|
||||
|
||||
#define CV_IMPL_PROPERTY_RO(type, name, member) \
|
||||
type get##name() const \
|
||||
{ \
|
||||
return member; \
|
||||
}
|
||||
inline type get##name() const { return member; }
|
||||
|
||||
#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \
|
||||
CV_IMPL_PROPERTY_RO(r_type, name, member) \
|
||||
inline void set##name(w_type val) { member = val; }
|
||||
|
||||
#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \
|
||||
r_type get##name() const { return internal_obj.get##internal_name(); } \
|
||||
void set##name(w_type val) { internal_obj.set##internal_name(val); }
|
||||
|
||||
#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member)
|
||||
#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member)
|
||||
|
||||
#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj)
|
||||
#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj)
|
||||
|
||||
#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
|
||||
#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj)
|
||||
|
||||
struct Param {
|
||||
enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7,
|
||||
|
||||
@@ -449,40 +449,33 @@ classes 0 and 1, one can determine that the given data instance belongs to class
|
||||
\geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ .
|
||||
|
||||
In Logistic Regression, choosing the right parameters is of utmost importance for reducing the
|
||||
training error and ensuring high training accuracy. cv::ml::LogisticRegression::Params is the
|
||||
structure that defines parameters that are required to train a Logistic Regression classifier.
|
||||
training error and ensuring high training accuracy:
|
||||
|
||||
The learning rate is determined by cv::ml::LogisticRegression::Params.alpha. It determines how fast
|
||||
we approach the solution. It is a positive real number.
|
||||
- The learning rate can be set with @ref cv::ml::LogisticRegression::setLearningRate "setLearningRate"
|
||||
method. It determines how fast we approach the solution. It is a positive real number.
|
||||
|
||||
Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported in
|
||||
LogisticRegression. It is important that we mention the number of iterations these optimization
|
||||
algorithms have to run. The number of iterations are mentioned by
|
||||
cv::ml::LogisticRegression::Params.num_iters. The number of iterations can be thought as number of
|
||||
steps taken and learning rate specifies if it is a long step or a short step. These two parameters
|
||||
define how fast we arrive at a possible solution.
|
||||
- Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported
|
||||
in LogisticRegression. It is important that we mention the number of iterations these optimization
|
||||
algorithms have to run. The number of iterations can be set with @ref
|
||||
cv::ml::LogisticRegression::setIterations "setIterations". This parameter can be thought
|
||||
as number of steps taken and learning rate specifies if it is a long step or a short step. This
|
||||
and previous parameter define how fast we arrive at a possible solution.
|
||||
|
||||
In order to compensate for overfitting regularization is performed, which can be enabled by setting
|
||||
cv::ml::LogisticRegression::Params.regularized to a positive integer (greater than zero). One can
|
||||
specify what kind of regularization has to be performed by setting
|
||||
cv::ml::LogisticRegression::Params.norm to REG_L1 or REG_L2 values.
|
||||
- In order to compensate for overfitting regularization is performed, which can be enabled with
|
||||
@ref cv::ml::LogisticRegression::setRegularization "setRegularization". One can specify what
|
||||
kind of regularization has to be performed by passing one of @ref
|
||||
cv::ml::LogisticRegression::RegKinds "regularization kinds" to this method.
|
||||
|
||||
LogisticRegression provides a choice of 2 training methods with Batch Gradient Descent or the Mini-
|
||||
Batch Gradient Descent. To specify this, set cv::ml::LogisticRegression::Params::train_method to
|
||||
either BATCH or MINI_BATCH. If training method is set to MINI_BATCH, the size of the mini batch has
|
||||
to be to a postive integer using cv::ml::LogisticRegression::Params::mini_batch_size.
|
||||
- Logistic regression implementation provides a choice of 2 training methods with Batch Gradient
|
||||
Descent or the MiniBatch Gradient Descent. To specify this, call @ref
|
||||
cv::ml::LogisticRegression::setTrainMethod "setTrainMethod" with either @ref
|
||||
cv::ml::LogisticRegression::BATCH "LogisticRegression::BATCH" or @ref
|
||||
cv::ml::LogisticRegression::MINI_BATCH "LogisticRegression::MINI_BATCH". If training method is
|
||||
set to @ref cv::ml::LogisticRegression::MINI_BATCH "MINI_BATCH", the size of the mini batch has
|
||||
to be to a postive integer set with @ref cv::ml::LogisticRegression::setMiniBatchSize
|
||||
"setMiniBatchSize".
|
||||
|
||||
A sample set of training parameters for the Logistic Regression classifier can be initialized as
|
||||
follows:
|
||||
@code{.cpp}
|
||||
using namespace cv::ml;
|
||||
LogisticRegression::Params params;
|
||||
params.alpha = 0.5;
|
||||
params.num_iters = 10000;
|
||||
params.norm = LogisticRegression::REG_L2;
|
||||
params.regularized = 1;
|
||||
params.train_method = LogisticRegression::MINI_BATCH;
|
||||
params.mini_batch_size = 10;
|
||||
@endcode
|
||||
A sample set of training parameters for the Logistic Regression classifier can be initialized as follows:
|
||||
@snippet samples/cpp/logistic_regression.cpp init
|
||||
|
||||
@sa cv::ml::LogisticRegression
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -42,84 +42,57 @@
|
||||
|
||||
namespace cv { namespace ml {
|
||||
|
||||
ANN_MLP::Params::Params()
|
||||
struct AnnParams
|
||||
{
|
||||
layerSizes = Mat();
|
||||
activateFunc = SIGMOID_SYM;
|
||||
fparam1 = fparam2 = 0;
|
||||
termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 );
|
||||
trainMethod = RPROP;
|
||||
bpDWScale = bpMomentScale = 0.1;
|
||||
rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5;
|
||||
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
|
||||
}
|
||||
AnnParams()
|
||||
{
|
||||
termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 );
|
||||
trainMethod = ANN_MLP::RPROP;
|
||||
bpDWScale = bpMomentScale = 0.1;
|
||||
rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5;
|
||||
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
|
||||
}
|
||||
|
||||
TermCriteria termCrit;
|
||||
int trainMethod;
|
||||
|
||||
ANN_MLP::Params::Params( const Mat& _layerSizes, int _activateFunc, double _fparam1, double _fparam2,
|
||||
TermCriteria _termCrit, int _trainMethod, double _param1, double _param2 )
|
||||
double bpDWScale;
|
||||
double bpMomentScale;
|
||||
|
||||
double rpDW0;
|
||||
double rpDWPlus;
|
||||
double rpDWMinus;
|
||||
double rpDWMin;
|
||||
double rpDWMax;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline T inBounds(T val, T min_val, T max_val)
|
||||
{
|
||||
layerSizes = _layerSizes;
|
||||
activateFunc = _activateFunc;
|
||||
fparam1 = _fparam1;
|
||||
fparam2 = _fparam2;
|
||||
termCrit = _termCrit;
|
||||
trainMethod = _trainMethod;
|
||||
bpDWScale = bpMomentScale = 0.1;
|
||||
rpDW0 = 1.; rpDWPlus = 1.2; rpDWMinus = 0.5;
|
||||
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
|
||||
|
||||
if( trainMethod == RPROP )
|
||||
{
|
||||
rpDW0 = _param1;
|
||||
if( rpDW0 < FLT_EPSILON )
|
||||
rpDW0 = 1.;
|
||||
rpDWMin = _param2;
|
||||
rpDWMin = std::max( rpDWMin, 0. );
|
||||
}
|
||||
else if( trainMethod == BACKPROP )
|
||||
{
|
||||
bpDWScale = _param1;
|
||||
if( bpDWScale <= 0 )
|
||||
bpDWScale = 0.1;
|
||||
bpDWScale = std::max( bpDWScale, 1e-3 );
|
||||
bpDWScale = std::min( bpDWScale, 1. );
|
||||
bpMomentScale = _param2;
|
||||
if( bpMomentScale < 0 )
|
||||
bpMomentScale = 0.1;
|
||||
bpMomentScale = std::min( bpMomentScale, 1. );
|
||||
}
|
||||
else
|
||||
trainMethod = RPROP;
|
||||
return std::min(std::max(val, min_val), max_val);
|
||||
}
|
||||
|
||||
|
||||
class ANN_MLPImpl : public ANN_MLP
|
||||
{
|
||||
public:
|
||||
ANN_MLPImpl()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
ANN_MLPImpl( const Params& p )
|
||||
{
|
||||
clear();
|
||||
setParams(p);
|
||||
setActivationFunction( SIGMOID_SYM, 0, 0 );
|
||||
setLayerSizes(Mat());
|
||||
setTrainMethod(ANN_MLP::RPROP, 0.1, FLT_EPSILON);
|
||||
}
|
||||
|
||||
virtual ~ANN_MLPImpl() {}
|
||||
|
||||
void setParams(const Params& p)
|
||||
{
|
||||
params = p;
|
||||
create( params.layerSizes );
|
||||
set_activ_func( params.activateFunc, params.fparam1, params.fparam2 );
|
||||
}
|
||||
|
||||
Params getParams() const
|
||||
{
|
||||
return params;
|
||||
}
|
||||
CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.termCrit)
|
||||
CV_IMPL_PROPERTY(double, BackpropWeightScale, params.bpDWScale)
|
||||
CV_IMPL_PROPERTY(double, BackpropMomentumScale, params.bpMomentScale)
|
||||
CV_IMPL_PROPERTY(double, RpropDW0, params.rpDW0)
|
||||
CV_IMPL_PROPERTY(double, RpropDWPlus, params.rpDWPlus)
|
||||
CV_IMPL_PROPERTY(double, RpropDWMinus, params.rpDWMinus)
|
||||
CV_IMPL_PROPERTY(double, RpropDWMin, params.rpDWMin)
|
||||
CV_IMPL_PROPERTY(double, RpropDWMax, params.rpDWMax)
|
||||
|
||||
void clear()
|
||||
{
|
||||
@@ -132,7 +105,35 @@ public:
|
||||
|
||||
int layer_count() const { return (int)layer_sizes.size(); }
|
||||
|
||||
void set_activ_func( int _activ_func, double _f_param1, double _f_param2 )
|
||||
void setTrainMethod(int method, double param1, double param2)
|
||||
{
|
||||
if (method != ANN_MLP::RPROP && method != ANN_MLP::BACKPROP)
|
||||
method = ANN_MLP::RPROP;
|
||||
params.trainMethod = method;
|
||||
if(method == ANN_MLP::RPROP )
|
||||
{
|
||||
if( param1 < FLT_EPSILON )
|
||||
param1 = 1.;
|
||||
params.rpDW0 = param1;
|
||||
params.rpDWMin = std::max( param2, 0. );
|
||||
}
|
||||
else if(method == ANN_MLP::BACKPROP )
|
||||
{
|
||||
if( param1 <= 0 )
|
||||
param1 = 0.1;
|
||||
params.bpDWScale = inBounds<double>(param1, 1e-3, 1.);
|
||||
if( param2 < 0 )
|
||||
param2 = 0.1;
|
||||
params.bpMomentScale = std::min( param2, 1. );
|
||||
}
|
||||
}
|
||||
|
||||
int getTrainMethod() const
|
||||
{
|
||||
return params.trainMethod;
|
||||
}
|
||||
|
||||
void setActivationFunction(int _activ_func, double _f_param1, double _f_param2 )
|
||||
{
|
||||
if( _activ_func < 0 || _activ_func > GAUSSIAN )
|
||||
CV_Error( CV_StsOutOfRange, "Unknown activation function" );
|
||||
@@ -201,7 +202,12 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void create( InputArray _layer_sizes )
|
||||
Mat getLayerSizes() const
|
||||
{
|
||||
return Mat_<int>(layer_sizes, true);
|
||||
}
|
||||
|
||||
void setLayerSizes( InputArray _layer_sizes )
|
||||
{
|
||||
clear();
|
||||
|
||||
@@ -700,7 +706,7 @@ public:
|
||||
termcrit.maxCount = std::max((params.termCrit.type & CV_TERMCRIT_ITER ? params.termCrit.maxCount : MAX_ITER), 1);
|
||||
termcrit.epsilon = std::max((params.termCrit.type & CV_TERMCRIT_EPS ? params.termCrit.epsilon : DEFAULT_EPSILON), DBL_EPSILON);
|
||||
|
||||
int iter = params.trainMethod == Params::BACKPROP ?
|
||||
int iter = params.trainMethod == ANN_MLP::BACKPROP ?
|
||||
train_backprop( inputs, outputs, sw, termcrit ) :
|
||||
train_rprop( inputs, outputs, sw, termcrit );
|
||||
|
||||
@@ -1113,13 +1119,13 @@ public:
|
||||
fs << "min_val" << min_val << "max_val" << max_val << "min_val1" << min_val1 << "max_val1" << max_val1;
|
||||
|
||||
fs << "training_params" << "{";
|
||||
if( params.trainMethod == Params::BACKPROP )
|
||||
if( params.trainMethod == ANN_MLP::BACKPROP )
|
||||
{
|
||||
fs << "train_method" << "BACKPROP";
|
||||
fs << "dw_scale" << params.bpDWScale;
|
||||
fs << "moment_scale" << params.bpMomentScale;
|
||||
}
|
||||
else if( params.trainMethod == Params::RPROP )
|
||||
else if( params.trainMethod == ANN_MLP::RPROP )
|
||||
{
|
||||
fs << "train_method" << "RPROP";
|
||||
fs << "dw0" << params.rpDW0;
|
||||
@@ -1186,7 +1192,7 @@ public:
|
||||
f_param1 = (double)fn["f_param1"];
|
||||
f_param2 = (double)fn["f_param2"];
|
||||
|
||||
set_activ_func( activ_func, f_param1, f_param2 );
|
||||
setActivationFunction( activ_func, f_param1, f_param2 );
|
||||
|
||||
min_val = (double)fn["min_val"];
|
||||
max_val = (double)fn["max_val"];
|
||||
@@ -1194,7 +1200,7 @@ public:
|
||||
max_val1 = (double)fn["max_val1"];
|
||||
|
||||
FileNode tpn = fn["training_params"];
|
||||
params = Params();
|
||||
params = AnnParams();
|
||||
|
||||
if( !tpn.empty() )
|
||||
{
|
||||
@@ -1202,13 +1208,13 @@ public:
|
||||
|
||||
if( tmethod_name == "BACKPROP" )
|
||||
{
|
||||
params.trainMethod = Params::BACKPROP;
|
||||
params.trainMethod = ANN_MLP::BACKPROP;
|
||||
params.bpDWScale = (double)tpn["dw_scale"];
|
||||
params.bpMomentScale = (double)tpn["moment_scale"];
|
||||
}
|
||||
else if( tmethod_name == "RPROP" )
|
||||
{
|
||||
params.trainMethod = Params::RPROP;
|
||||
params.trainMethod = ANN_MLP::RPROP;
|
||||
params.rpDW0 = (double)tpn["dw0"];
|
||||
params.rpDWPlus = (double)tpn["dw_plus"];
|
||||
params.rpDWMinus = (double)tpn["dw_minus"];
|
||||
@@ -1244,7 +1250,7 @@ public:
|
||||
|
||||
vector<int> _layer_sizes;
|
||||
readVectorOrMat(fn["layer_sizes"], _layer_sizes);
|
||||
create( _layer_sizes );
|
||||
setLayerSizes( _layer_sizes );
|
||||
|
||||
int i, l_count = layer_count();
|
||||
read_params(fn);
|
||||
@@ -1267,11 +1273,6 @@ public:
|
||||
trained = true;
|
||||
}
|
||||
|
||||
Mat getLayerSizes() const
|
||||
{
|
||||
return Mat_<int>(layer_sizes, true);
|
||||
}
|
||||
|
||||
Mat getWeights(int layerIdx) const
|
||||
{
|
||||
CV_Assert( 0 <= layerIdx && layerIdx < (int)weights.size() );
|
||||
@@ -1304,17 +1305,16 @@ public:
|
||||
double min_val, max_val, min_val1, max_val1;
|
||||
int activ_func;
|
||||
int max_lsize, max_buf_sz;
|
||||
Params params;
|
||||
AnnParams params;
|
||||
RNG rng;
|
||||
Mutex mtx;
|
||||
bool trained;
|
||||
};
|
||||
|
||||
|
||||
Ptr<ANN_MLP> ANN_MLP::create(const ANN_MLP::Params& params)
|
||||
Ptr<ANN_MLP> ANN_MLP::create()
|
||||
{
|
||||
Ptr<ANN_MLPImpl> ann = makePtr<ANN_MLPImpl>(params);
|
||||
return ann;
|
||||
return makePtr<ANN_MLPImpl>();
|
||||
}
|
||||
|
||||
}}
|
||||
|
||||
@@ -54,48 +54,33 @@ log_ratio( double val )
|
||||
}
|
||||
|
||||
|
||||
Boost::Params::Params()
|
||||
BoostTreeParams::BoostTreeParams()
|
||||
{
|
||||
boostType = Boost::REAL;
|
||||
weakCount = 100;
|
||||
weightTrimRate = 0.95;
|
||||
CVFolds = 0;
|
||||
maxDepth = 1;
|
||||
}
|
||||
|
||||
|
||||
Boost::Params::Params( int _boostType, int _weak_count,
|
||||
double _weightTrimRate, int _maxDepth,
|
||||
bool _use_surrogates, const Mat& _priors )
|
||||
BoostTreeParams::BoostTreeParams( int _boostType, int _weak_count,
|
||||
double _weightTrimRate)
|
||||
{
|
||||
boostType = _boostType;
|
||||
weakCount = _weak_count;
|
||||
weightTrimRate = _weightTrimRate;
|
||||
CVFolds = 0;
|
||||
maxDepth = _maxDepth;
|
||||
useSurrogates = _use_surrogates;
|
||||
priors = _priors;
|
||||
}
|
||||
|
||||
|
||||
class DTreesImplForBoost : public DTreesImpl
|
||||
{
|
||||
public:
|
||||
DTreesImplForBoost() {}
|
||||
DTreesImplForBoost()
|
||||
{
|
||||
params.setCVFolds(0);
|
||||
params.setMaxDepth(1);
|
||||
}
|
||||
virtual ~DTreesImplForBoost() {}
|
||||
|
||||
bool isClassifier() const { return true; }
|
||||
|
||||
void setBParams(const Boost::Params& p)
|
||||
{
|
||||
bparams = p;
|
||||
}
|
||||
|
||||
Boost::Params getBParams() const
|
||||
{
|
||||
return bparams;
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
DTreesImpl::clear();
|
||||
@@ -199,10 +184,6 @@ public:
|
||||
|
||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||
{
|
||||
Params dp(bparams.maxDepth, bparams.minSampleCount, bparams.regressionAccuracy,
|
||||
bparams.useSurrogates, bparams.maxCategories, 0,
|
||||
false, false, bparams.priors);
|
||||
setDParams(dp);
|
||||
startTraining(trainData, flags);
|
||||
int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000;
|
||||
vector<int> sidx = w->sidx;
|
||||
@@ -426,12 +407,6 @@ public:
|
||||
void readParams( const FileNode& fn )
|
||||
{
|
||||
DTreesImpl::readParams(fn);
|
||||
bparams.maxDepth = params0.maxDepth;
|
||||
bparams.minSampleCount = params0.minSampleCount;
|
||||
bparams.regressionAccuracy = params0.regressionAccuracy;
|
||||
bparams.useSurrogates = params0.useSurrogates;
|
||||
bparams.maxCategories = params0.maxCategories;
|
||||
bparams.priors = params0.priors;
|
||||
|
||||
FileNode tparams_node = fn["training_params"];
|
||||
// check for old layout
|
||||
@@ -465,7 +440,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
Boost::Params bparams;
|
||||
BoostTreeParams bparams;
|
||||
vector<double> sumResult;
|
||||
};
|
||||
|
||||
@@ -476,6 +451,20 @@ public:
|
||||
BoostImpl() {}
|
||||
virtual ~BoostImpl() {}
|
||||
|
||||
CV_IMPL_PROPERTY(int, BoostType, impl.bparams.boostType)
|
||||
CV_IMPL_PROPERTY(int, WeakCount, impl.bparams.weakCount)
|
||||
CV_IMPL_PROPERTY(double, WeightTrimRate, impl.bparams.weightTrimRate)
|
||||
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params)
|
||||
|
||||
String getDefaultModelName() const { return "opencv_ml_boost"; }
|
||||
|
||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||
@@ -498,9 +487,6 @@ public:
|
||||
impl.read(fn);
|
||||
}
|
||||
|
||||
void setBParams(const Params& p) { impl.setBParams(p); }
|
||||
Params getBParams() const { return impl.getBParams(); }
|
||||
|
||||
int getVarCount() const { return impl.getVarCount(); }
|
||||
|
||||
bool isTrained() const { return impl.isTrained(); }
|
||||
@@ -515,11 +501,9 @@ public:
|
||||
};
|
||||
|
||||
|
||||
Ptr<Boost> Boost::create(const Params& params)
|
||||
Ptr<Boost> Boost::create()
|
||||
{
|
||||
Ptr<BoostImpl> p = makePtr<BoostImpl>();
|
||||
p->setBParams(params);
|
||||
return p;
|
||||
return makePtr<BoostImpl>();
|
||||
}
|
||||
|
||||
}}
|
||||
|
||||
@@ -48,37 +48,49 @@ namespace ml
|
||||
|
||||
const double minEigenValue = DBL_EPSILON;
|
||||
|
||||
EM::Params::Params(int _nclusters, int _covMatType, const TermCriteria& _termCrit)
|
||||
{
|
||||
nclusters = _nclusters;
|
||||
covMatType = _covMatType;
|
||||
termCrit = _termCrit;
|
||||
}
|
||||
|
||||
class CV_EXPORTS EMImpl : public EM
|
||||
{
|
||||
public:
|
||||
EMImpl(const Params& _params)
|
||||
|
||||
int nclusters;
|
||||
int covMatType;
|
||||
TermCriteria termCrit;
|
||||
|
||||
CV_IMPL_PROPERTY_S(TermCriteria, TermCriteria, termCrit)
|
||||
|
||||
void setClustersNumber(int val)
|
||||
{
|
||||
setParams(_params);
|
||||
nclusters = val;
|
||||
CV_Assert(nclusters > 1);
|
||||
}
|
||||
|
||||
int getClustersNumber() const
|
||||
{
|
||||
return nclusters;
|
||||
}
|
||||
|
||||
void setCovarianceMatrixType(int val)
|
||||
{
|
||||
covMatType = val;
|
||||
CV_Assert(covMatType == COV_MAT_SPHERICAL ||
|
||||
covMatType == COV_MAT_DIAGONAL ||
|
||||
covMatType == COV_MAT_GENERIC);
|
||||
}
|
||||
|
||||
int getCovarianceMatrixType() const
|
||||
{
|
||||
return covMatType;
|
||||
}
|
||||
|
||||
EMImpl()
|
||||
{
|
||||
nclusters = DEFAULT_NCLUSTERS;
|
||||
covMatType=EM::COV_MAT_DIAGONAL;
|
||||
termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, 1e-6);
|
||||
}
|
||||
|
||||
virtual ~EMImpl() {}
|
||||
|
||||
void setParams(const Params& _params)
|
||||
{
|
||||
params = _params;
|
||||
CV_Assert(params.nclusters > 1);
|
||||
CV_Assert(params.covMatType == COV_MAT_SPHERICAL ||
|
||||
params.covMatType == COV_MAT_DIAGONAL ||
|
||||
params.covMatType == COV_MAT_GENERIC);
|
||||
}
|
||||
|
||||
Params getParams() const
|
||||
{
|
||||
return params;
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
trainSamples.release();
|
||||
@@ -100,10 +112,10 @@ public:
|
||||
bool train(const Ptr<TrainData>& data, int)
|
||||
{
|
||||
Mat samples = data->getTrainSamples(), labels;
|
||||
return train_(samples, labels, noArray(), noArray());
|
||||
return trainEM(samples, labels, noArray(), noArray());
|
||||
}
|
||||
|
||||
bool train_(InputArray samples,
|
||||
bool trainEM(InputArray samples,
|
||||
OutputArray logLikelihoods,
|
||||
OutputArray labels,
|
||||
OutputArray probs)
|
||||
@@ -157,7 +169,7 @@ public:
|
||||
{
|
||||
if( _outputs.fixedType() )
|
||||
ptype = _outputs.type();
|
||||
_outputs.create(samples.rows, params.nclusters, ptype);
|
||||
_outputs.create(samples.rows, nclusters, ptype);
|
||||
}
|
||||
else
|
||||
nsamples = std::min(nsamples, 1);
|
||||
@@ -193,7 +205,7 @@ public:
|
||||
{
|
||||
if( _probs.fixedType() )
|
||||
ptype = _probs.type();
|
||||
_probs.create(1, params.nclusters, ptype);
|
||||
_probs.create(1, nclusters, ptype);
|
||||
probs = _probs.getMat();
|
||||
}
|
||||
|
||||
@@ -311,7 +323,6 @@ public:
|
||||
const std::vector<Mat>* covs0,
|
||||
const Mat* weights0)
|
||||
{
|
||||
int nclusters = params.nclusters, covMatType = params.covMatType;
|
||||
clear();
|
||||
|
||||
checkTrainData(startStep, samples, nclusters, covMatType, probs0, means0, covs0, weights0);
|
||||
@@ -350,7 +361,6 @@ public:
|
||||
|
||||
void decomposeCovs()
|
||||
{
|
||||
int nclusters = params.nclusters, covMatType = params.covMatType;
|
||||
CV_Assert(!covs.empty());
|
||||
covsEigenValues.resize(nclusters);
|
||||
if(covMatType == COV_MAT_GENERIC)
|
||||
@@ -383,7 +393,6 @@ public:
|
||||
|
||||
void clusterTrainSamples()
|
||||
{
|
||||
int nclusters = params.nclusters;
|
||||
int nsamples = trainSamples.rows;
|
||||
|
||||
// Cluster samples, compute/update means
|
||||
@@ -443,7 +452,6 @@ public:
|
||||
|
||||
void computeLogWeightDivDet()
|
||||
{
|
||||
int nclusters = params.nclusters;
|
||||
CV_Assert(!covsEigenValues.empty());
|
||||
|
||||
Mat logWeights;
|
||||
@@ -458,7 +466,7 @@ public:
|
||||
double logDetCov = 0.;
|
||||
const int evalCount = static_cast<int>(covsEigenValues[clusterIndex].total());
|
||||
for(int di = 0; di < evalCount; di++)
|
||||
logDetCov += std::log(covsEigenValues[clusterIndex].at<double>(params.covMatType != COV_MAT_SPHERICAL ? di : 0));
|
||||
logDetCov += std::log(covsEigenValues[clusterIndex].at<double>(covMatType != COV_MAT_SPHERICAL ? di : 0));
|
||||
|
||||
logWeightDivDet.at<double>(clusterIndex) = logWeights.at<double>(clusterIndex) - 0.5 * logDetCov;
|
||||
}
|
||||
@@ -466,7 +474,6 @@ public:
|
||||
|
||||
bool doTrain(int startStep, OutputArray logLikelihoods, OutputArray labels, OutputArray probs)
|
||||
{
|
||||
int nclusters = params.nclusters;
|
||||
int dim = trainSamples.cols;
|
||||
// Precompute the empty initial train data in the cases of START_E_STEP and START_AUTO_STEP
|
||||
if(startStep != START_M_STEP)
|
||||
@@ -488,9 +495,9 @@ public:
|
||||
mStep();
|
||||
|
||||
double trainLogLikelihood, prevTrainLogLikelihood = 0.;
|
||||
int maxIters = (params.termCrit.type & TermCriteria::MAX_ITER) ?
|
||||
params.termCrit.maxCount : DEFAULT_MAX_ITERS;
|
||||
double epsilon = (params.termCrit.type & TermCriteria::EPS) ? params.termCrit.epsilon : 0.;
|
||||
int maxIters = (termCrit.type & TermCriteria::MAX_ITER) ?
|
||||
termCrit.maxCount : DEFAULT_MAX_ITERS;
|
||||
double epsilon = (termCrit.type & TermCriteria::EPS) ? termCrit.epsilon : 0.;
|
||||
|
||||
for(int iter = 0; ; iter++)
|
||||
{
|
||||
@@ -521,12 +528,12 @@ public:
|
||||
covs.resize(nclusters);
|
||||
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
|
||||
{
|
||||
if(params.covMatType == COV_MAT_SPHERICAL)
|
||||
if(covMatType == COV_MAT_SPHERICAL)
|
||||
{
|
||||
covs[clusterIndex].create(dim, dim, CV_64FC1);
|
||||
setIdentity(covs[clusterIndex], Scalar(covsEigenValues[clusterIndex].at<double>(0)));
|
||||
}
|
||||
else if(params.covMatType == COV_MAT_DIAGONAL)
|
||||
else if(covMatType == COV_MAT_DIAGONAL)
|
||||
{
|
||||
covs[clusterIndex] = Mat::diag(covsEigenValues[clusterIndex]);
|
||||
}
|
||||
@@ -555,7 +562,6 @@ public:
|
||||
// see Alex Smola's blog http://blog.smola.org/page/2 for
|
||||
// details on the log-sum-exp trick
|
||||
|
||||
int nclusters = params.nclusters, covMatType = params.covMatType;
|
||||
int stype = sample.type();
|
||||
CV_Assert(!means.empty());
|
||||
CV_Assert((stype == CV_32F || stype == CV_64F) && (ptype == CV_32F || ptype == CV_64F));
|
||||
@@ -621,7 +627,7 @@ public:
|
||||
void eStep()
|
||||
{
|
||||
// Compute probs_ik from means_k, covs_k and weights_k.
|
||||
trainProbs.create(trainSamples.rows, params.nclusters, CV_64FC1);
|
||||
trainProbs.create(trainSamples.rows, nclusters, CV_64FC1);
|
||||
trainLabels.create(trainSamples.rows, 1, CV_32SC1);
|
||||
trainLogLikelihoods.create(trainSamples.rows, 1, CV_64FC1);
|
||||
|
||||
@@ -642,8 +648,6 @@ public:
|
||||
void mStep()
|
||||
{
|
||||
// Update means_k, covs_k and weights_k from probs_ik
|
||||
int nclusters = params.nclusters;
|
||||
int covMatType = params.covMatType;
|
||||
int dim = trainSamples.cols;
|
||||
|
||||
// Update weights
|
||||
@@ -755,12 +759,12 @@ public:
|
||||
|
||||
void write_params(FileStorage& fs) const
|
||||
{
|
||||
fs << "nclusters" << params.nclusters;
|
||||
fs << "cov_mat_type" << (params.covMatType == COV_MAT_SPHERICAL ? String("spherical") :
|
||||
params.covMatType == COV_MAT_DIAGONAL ? String("diagonal") :
|
||||
params.covMatType == COV_MAT_GENERIC ? String("generic") :
|
||||
format("unknown_%d", params.covMatType));
|
||||
writeTermCrit(fs, params.termCrit);
|
||||
fs << "nclusters" << nclusters;
|
||||
fs << "cov_mat_type" << (covMatType == COV_MAT_SPHERICAL ? String("spherical") :
|
||||
covMatType == COV_MAT_DIAGONAL ? String("diagonal") :
|
||||
covMatType == COV_MAT_GENERIC ? String("generic") :
|
||||
format("unknown_%d", covMatType));
|
||||
writeTermCrit(fs, termCrit);
|
||||
}
|
||||
|
||||
void write(FileStorage& fs) const
|
||||
@@ -781,15 +785,13 @@ public:
|
||||
|
||||
void read_params(const FileNode& fn)
|
||||
{
|
||||
Params _params;
|
||||
_params.nclusters = (int)fn["nclusters"];
|
||||
nclusters = (int)fn["nclusters"];
|
||||
String s = (String)fn["cov_mat_type"];
|
||||
_params.covMatType = s == "spherical" ? COV_MAT_SPHERICAL :
|
||||
covMatType = s == "spherical" ? COV_MAT_SPHERICAL :
|
||||
s == "diagonal" ? COV_MAT_DIAGONAL :
|
||||
s == "generic" ? COV_MAT_GENERIC : -1;
|
||||
CV_Assert(_params.covMatType >= 0);
|
||||
_params.termCrit = readTermCrit(fn);
|
||||
setParams(_params);
|
||||
CV_Assert(covMatType >= 0);
|
||||
termCrit = readTermCrit(fn);
|
||||
}
|
||||
|
||||
void read(const FileNode& fn)
|
||||
@@ -820,8 +822,6 @@ public:
|
||||
std::copy(covs.begin(), covs.end(), _covs.begin());
|
||||
}
|
||||
|
||||
Params params;
|
||||
|
||||
// all inner matrices have type CV_64FC1
|
||||
Mat trainSamples;
|
||||
Mat trainProbs;
|
||||
@@ -838,41 +838,9 @@ public:
|
||||
Mat logWeightDivDet;
|
||||
};
|
||||
|
||||
|
||||
Ptr<EM> EM::train(InputArray samples, OutputArray logLikelihoods,
|
||||
OutputArray labels, OutputArray probs,
|
||||
const EM::Params& params)
|
||||
Ptr<EM> EM::create()
|
||||
{
|
||||
Ptr<EMImpl> em = makePtr<EMImpl>(params);
|
||||
if(!em->train_(samples, logLikelihoods, labels, probs))
|
||||
em.release();
|
||||
return em;
|
||||
}
|
||||
|
||||
Ptr<EM> EM::train_startWithE(InputArray samples, InputArray means0,
|
||||
InputArray covs0, InputArray weights0,
|
||||
OutputArray logLikelihoods, OutputArray labels,
|
||||
OutputArray probs, const EM::Params& params)
|
||||
{
|
||||
Ptr<EMImpl> em = makePtr<EMImpl>(params);
|
||||
if(!em->trainE(samples, means0, covs0, weights0, logLikelihoods, labels, probs))
|
||||
em.release();
|
||||
return em;
|
||||
}
|
||||
|
||||
Ptr<EM> EM::train_startWithM(InputArray samples, InputArray probs0,
|
||||
OutputArray logLikelihoods, OutputArray labels,
|
||||
OutputArray probs, const EM::Params& params)
|
||||
{
|
||||
Ptr<EMImpl> em = makePtr<EMImpl>(params);
|
||||
if(!em->trainM(samples, probs0, logLikelihoods, labels, probs))
|
||||
em.release();
|
||||
return em;
|
||||
}
|
||||
|
||||
Ptr<EM> EM::create(const Params& params)
|
||||
{
|
||||
return makePtr<EMImpl>(params);
|
||||
return makePtr<EMImpl>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -50,46 +50,33 @@
|
||||
namespace cv {
|
||||
namespace ml {
|
||||
|
||||
KNearest::Params::Params(int k, bool isclassifier_, int Emax_, int algorithmType_) :
|
||||
defaultK(k),
|
||||
isclassifier(isclassifier_),
|
||||
Emax(Emax_),
|
||||
algorithmType(algorithmType_)
|
||||
{
|
||||
}
|
||||
const String NAME_BRUTE_FORCE = "opencv_ml_knn";
|
||||
const String NAME_KDTREE = "opencv_ml_knn_kd";
|
||||
|
||||
class KNearestImpl : public KNearest
|
||||
class Impl
|
||||
{
|
||||
public:
|
||||
KNearestImpl(const Params& p)
|
||||
Impl()
|
||||
{
|
||||
params = p;
|
||||
defaultK = 10;
|
||||
isclassifier = true;
|
||||
Emax = INT_MAX;
|
||||
}
|
||||
|
||||
virtual ~KNearestImpl() {}
|
||||
|
||||
Params getParams() const { return params; }
|
||||
void setParams(const Params& p) { params = p; }
|
||||
|
||||
bool isClassifier() const { return params.isclassifier; }
|
||||
bool isTrained() const { return !samples.empty(); }
|
||||
|
||||
String getDefaultModelName() const { return "opencv_ml_knn"; }
|
||||
|
||||
void clear()
|
||||
{
|
||||
samples.release();
|
||||
responses.release();
|
||||
}
|
||||
|
||||
int getVarCount() const { return samples.cols; }
|
||||
virtual ~Impl() {}
|
||||
virtual String getModelName() const = 0;
|
||||
virtual int getType() const = 0;
|
||||
virtual float findNearest( InputArray _samples, int k,
|
||||
OutputArray _results,
|
||||
OutputArray _neighborResponses,
|
||||
OutputArray _dists ) const = 0;
|
||||
|
||||
bool train( const Ptr<TrainData>& data, int flags )
|
||||
{
|
||||
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
|
||||
Mat new_responses;
|
||||
data->getTrainResponses().convertTo(new_responses, CV_32F);
|
||||
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty();
|
||||
bool update = (flags & ml::KNearest::UPDATE_MODEL) != 0 && !samples.empty();
|
||||
|
||||
CV_Assert( new_samples.type() == CV_32F );
|
||||
|
||||
@@ -106,9 +93,53 @@ public:
|
||||
samples.push_back(new_samples);
|
||||
responses.push_back(new_responses);
|
||||
|
||||
doTrain(samples);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual void doTrain(InputArray points) { (void)points; }
|
||||
|
||||
void clear()
|
||||
{
|
||||
samples.release();
|
||||
responses.release();
|
||||
}
|
||||
|
||||
void read( const FileNode& fn )
|
||||
{
|
||||
clear();
|
||||
isclassifier = (int)fn["is_classifier"] != 0;
|
||||
defaultK = (int)fn["default_k"];
|
||||
|
||||
fn["samples"] >> samples;
|
||||
fn["responses"] >> responses;
|
||||
}
|
||||
|
||||
void write( FileStorage& fs ) const
|
||||
{
|
||||
fs << "is_classifier" << (int)isclassifier;
|
||||
fs << "default_k" << defaultK;
|
||||
|
||||
fs << "samples" << samples;
|
||||
fs << "responses" << responses;
|
||||
}
|
||||
|
||||
public:
|
||||
int defaultK;
|
||||
bool isclassifier;
|
||||
int Emax;
|
||||
|
||||
Mat samples;
|
||||
Mat responses;
|
||||
};
|
||||
|
||||
class BruteForceImpl : public Impl
|
||||
{
|
||||
public:
|
||||
String getModelName() const { return NAME_BRUTE_FORCE; }
|
||||
int getType() const { return ml::KNearest::BRUTE_FORCE; }
|
||||
|
||||
void findNearestCore( const Mat& _samples, int k0, const Range& range,
|
||||
Mat* results, Mat* neighbor_responses,
|
||||
Mat* dists, float* presult ) const
|
||||
@@ -199,7 +230,7 @@ public:
|
||||
|
||||
if( results || testidx+range.start == 0 )
|
||||
{
|
||||
if( !params.isclassifier || k == 1 )
|
||||
if( !isclassifier || k == 1 )
|
||||
{
|
||||
float s = 0.f;
|
||||
for( j = 0; j < k; j++ )
|
||||
@@ -251,7 +282,7 @@ public:
|
||||
|
||||
struct findKNearestInvoker : public ParallelLoopBody
|
||||
{
|
||||
findKNearestInvoker(const KNearestImpl* _p, int _k, const Mat& __samples,
|
||||
findKNearestInvoker(const BruteForceImpl* _p, int _k, const Mat& __samples,
|
||||
Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult)
|
||||
{
|
||||
p = _p;
|
||||
@@ -273,7 +304,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
const KNearestImpl* p;
|
||||
const BruteForceImpl* p;
|
||||
int k;
|
||||
const Mat* _samples;
|
||||
Mat* _results;
|
||||
@@ -324,88 +355,18 @@ public:
|
||||
//invoker(Range(0, testcount));
|
||||
return result;
|
||||
}
|
||||
|
||||
float predict(InputArray inputs, OutputArray outputs, int) const
|
||||
{
|
||||
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() );
|
||||
}
|
||||
|
||||
void write( FileStorage& fs ) const
|
||||
{
|
||||
fs << "is_classifier" << (int)params.isclassifier;
|
||||
fs << "default_k" << params.defaultK;
|
||||
|
||||
fs << "samples" << samples;
|
||||
fs << "responses" << responses;
|
||||
}
|
||||
|
||||
void read( const FileNode& fn )
|
||||
{
|
||||
clear();
|
||||
params.isclassifier = (int)fn["is_classifier"] != 0;
|
||||
params.defaultK = (int)fn["default_k"];
|
||||
|
||||
fn["samples"] >> samples;
|
||||
fn["responses"] >> responses;
|
||||
}
|
||||
|
||||
Mat samples;
|
||||
Mat responses;
|
||||
Params params;
|
||||
};
|
||||
|
||||
|
||||
class KNearestKDTreeImpl : public KNearest
|
||||
class KDTreeImpl : public Impl
|
||||
{
|
||||
public:
|
||||
KNearestKDTreeImpl(const Params& p)
|
||||
String getModelName() const { return NAME_KDTREE; }
|
||||
int getType() const { return ml::KNearest::KDTREE; }
|
||||
|
||||
void doTrain(InputArray points)
|
||||
{
|
||||
params = p;
|
||||
}
|
||||
|
||||
virtual ~KNearestKDTreeImpl() {}
|
||||
|
||||
Params getParams() const { return params; }
|
||||
void setParams(const Params& p) { params = p; }
|
||||
|
||||
bool isClassifier() const { return params.isclassifier; }
|
||||
bool isTrained() const { return !samples.empty(); }
|
||||
|
||||
String getDefaultModelName() const { return "opencv_ml_knn_kd"; }
|
||||
|
||||
void clear()
|
||||
{
|
||||
samples.release();
|
||||
responses.release();
|
||||
}
|
||||
|
||||
int getVarCount() const { return samples.cols; }
|
||||
|
||||
bool train( const Ptr<TrainData>& data, int flags )
|
||||
{
|
||||
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
|
||||
Mat new_responses;
|
||||
data->getTrainResponses().convertTo(new_responses, CV_32F);
|
||||
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty();
|
||||
|
||||
CV_Assert( new_samples.type() == CV_32F );
|
||||
|
||||
if( !update )
|
||||
{
|
||||
clear();
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_Assert( new_samples.cols == samples.cols &&
|
||||
new_responses.cols == responses.cols );
|
||||
}
|
||||
|
||||
samples.push_back(new_samples);
|
||||
responses.push_back(new_responses);
|
||||
|
||||
tr.build(samples);
|
||||
|
||||
return true;
|
||||
tr.build(points);
|
||||
}
|
||||
|
||||
float findNearest( InputArray _samples, int k,
|
||||
@@ -460,51 +421,97 @@ public:
|
||||
{
|
||||
_d = d.row(i);
|
||||
}
|
||||
tr.findNearest(test_samples.row(i), k, params.Emax, _res, _nr, _d, noArray());
|
||||
tr.findNearest(test_samples.row(i), k, Emax, _res, _nr, _d, noArray());
|
||||
}
|
||||
|
||||
return result; // currently always 0
|
||||
}
|
||||
|
||||
float predict(InputArray inputs, OutputArray outputs, int) const
|
||||
KDTree tr;
|
||||
};
|
||||
|
||||
//================================================================
|
||||
|
||||
class KNearestImpl : public KNearest
|
||||
{
|
||||
CV_IMPL_PROPERTY(int, DefaultK, impl->defaultK)
|
||||
CV_IMPL_PROPERTY(bool, IsClassifier, impl->isclassifier)
|
||||
CV_IMPL_PROPERTY(int, Emax, impl->Emax)
|
||||
|
||||
public:
|
||||
int getAlgorithmType() const
|
||||
{
|
||||
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() );
|
||||
return impl->getType();
|
||||
}
|
||||
void setAlgorithmType(int val)
|
||||
{
|
||||
if (val != BRUTE_FORCE && val != KDTREE)
|
||||
val = BRUTE_FORCE;
|
||||
initImpl(val);
|
||||
}
|
||||
|
||||
public:
|
||||
KNearestImpl()
|
||||
{
|
||||
initImpl(BRUTE_FORCE);
|
||||
}
|
||||
~KNearestImpl()
|
||||
{
|
||||
}
|
||||
|
||||
bool isClassifier() const { return impl->isclassifier; }
|
||||
bool isTrained() const { return !impl->samples.empty(); }
|
||||
|
||||
int getVarCount() const { return impl->samples.cols; }
|
||||
|
||||
void write( FileStorage& fs ) const
|
||||
{
|
||||
fs << "is_classifier" << (int)params.isclassifier;
|
||||
fs << "default_k" << params.defaultK;
|
||||
|
||||
fs << "samples" << samples;
|
||||
fs << "responses" << responses;
|
||||
impl->write(fs);
|
||||
}
|
||||
|
||||
void read( const FileNode& fn )
|
||||
{
|
||||
clear();
|
||||
params.isclassifier = (int)fn["is_classifier"] != 0;
|
||||
params.defaultK = (int)fn["default_k"];
|
||||
|
||||
fn["samples"] >> samples;
|
||||
fn["responses"] >> responses;
|
||||
int algorithmType = BRUTE_FORCE;
|
||||
if (fn.name() == NAME_KDTREE)
|
||||
algorithmType = KDTREE;
|
||||
initImpl(algorithmType);
|
||||
impl->read(fn);
|
||||
}
|
||||
|
||||
KDTree tr;
|
||||
float findNearest( InputArray samples, int k,
|
||||
OutputArray results,
|
||||
OutputArray neighborResponses=noArray(),
|
||||
OutputArray dist=noArray() ) const
|
||||
{
|
||||
return impl->findNearest(samples, k, results, neighborResponses, dist);
|
||||
}
|
||||
|
||||
Mat samples;
|
||||
Mat responses;
|
||||
Params params;
|
||||
float predict(InputArray inputs, OutputArray outputs, int) const
|
||||
{
|
||||
return impl->findNearest( inputs, impl->defaultK, outputs, noArray(), noArray() );
|
||||
}
|
||||
|
||||
bool train( const Ptr<TrainData>& data, int flags )
|
||||
{
|
||||
return impl->train(data, flags);
|
||||
}
|
||||
|
||||
String getDefaultModelName() const { return impl->getModelName(); }
|
||||
|
||||
protected:
|
||||
void initImpl(int algorithmType)
|
||||
{
|
||||
if (algorithmType != KDTREE)
|
||||
impl = makePtr<BruteForceImpl>();
|
||||
else
|
||||
impl = makePtr<KDTreeImpl>();
|
||||
}
|
||||
Ptr<Impl> impl;
|
||||
};
|
||||
|
||||
Ptr<KNearest> KNearest::create(const Params& p)
|
||||
Ptr<KNearest> KNearest::create()
|
||||
{
|
||||
if (KDTREE==p.algorithmType)
|
||||
{
|
||||
return makePtr<KNearestKDTreeImpl>(p);
|
||||
}
|
||||
|
||||
return makePtr<KNearestImpl>(p);
|
||||
return makePtr<KNearestImpl>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -60,31 +60,41 @@ using namespace std;
|
||||
namespace cv {
|
||||
namespace ml {
|
||||
|
||||
LogisticRegression::Params::Params(double learning_rate,
|
||||
int iters,
|
||||
int method,
|
||||
int normlization,
|
||||
int reg,
|
||||
int batch_size)
|
||||
class LrParams
|
||||
{
|
||||
alpha = learning_rate;
|
||||
num_iters = iters;
|
||||
norm = normlization;
|
||||
regularized = reg;
|
||||
train_method = method;
|
||||
mini_batch_size = batch_size;
|
||||
term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
|
||||
}
|
||||
public:
|
||||
LrParams()
|
||||
{
|
||||
alpha = 0.001;
|
||||
num_iters = 1000;
|
||||
norm = LogisticRegression::REG_L2;
|
||||
train_method = LogisticRegression::BATCH;
|
||||
mini_batch_size = 1;
|
||||
term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
|
||||
}
|
||||
|
||||
double alpha; //!< learning rate.
|
||||
int num_iters; //!< number of iterations.
|
||||
int norm;
|
||||
int train_method;
|
||||
int mini_batch_size;
|
||||
TermCriteria term_crit;
|
||||
};
|
||||
|
||||
class LogisticRegressionImpl : public LogisticRegression
|
||||
{
|
||||
public:
|
||||
LogisticRegressionImpl(const Params& pms)
|
||||
: params(pms)
|
||||
{
|
||||
}
|
||||
|
||||
LogisticRegressionImpl() { }
|
||||
virtual ~LogisticRegressionImpl() {}
|
||||
|
||||
CV_IMPL_PROPERTY(double, LearningRate, params.alpha)
|
||||
CV_IMPL_PROPERTY(int, Iterations, params.num_iters)
|
||||
CV_IMPL_PROPERTY(int, Regularization, params.norm)
|
||||
CV_IMPL_PROPERTY(int, TrainMethod, params.train_method)
|
||||
CV_IMPL_PROPERTY(int, MiniBatchSize, params.mini_batch_size)
|
||||
CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.term_crit)
|
||||
|
||||
virtual bool train( const Ptr<TrainData>& trainData, int=0 );
|
||||
virtual float predict(InputArray samples, OutputArray results, int) const;
|
||||
virtual void clear();
|
||||
@@ -103,7 +113,7 @@ protected:
|
||||
bool set_label_map(const Mat& _labels_i);
|
||||
Mat remap_labels(const Mat& _labels_i, const map<int, int>& lmap) const;
|
||||
protected:
|
||||
Params params;
|
||||
LrParams params;
|
||||
Mat learnt_thetas;
|
||||
map<int, int> forward_mapper;
|
||||
map<int, int> reverse_mapper;
|
||||
@@ -111,9 +121,9 @@ protected:
|
||||
Mat labels_n;
|
||||
};
|
||||
|
||||
Ptr<LogisticRegression> LogisticRegression::create(const Params& params)
|
||||
Ptr<LogisticRegression> LogisticRegression::create()
|
||||
{
|
||||
return makePtr<LogisticRegressionImpl>(params);
|
||||
return makePtr<LogisticRegressionImpl>();
|
||||
}
|
||||
|
||||
bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int)
|
||||
@@ -312,7 +322,7 @@ double LogisticRegressionImpl::compute_cost(const Mat& _data, const Mat& _labels
|
||||
theta_b = _init_theta(Range(1, n), Range::all());
|
||||
multiply(theta_b, theta_b, theta_c, 1);
|
||||
|
||||
if(this->params.regularized > 0)
|
||||
if(params.norm != REG_NONE)
|
||||
{
|
||||
llambda = 1;
|
||||
}
|
||||
@@ -367,7 +377,7 @@ Mat LogisticRegressionImpl::compute_batch_gradient(const Mat& _data, const Mat&
|
||||
m = _data.rows;
|
||||
n = _data.cols;
|
||||
|
||||
if(this->params.regularized > 0)
|
||||
if(params.norm != REG_NONE)
|
||||
{
|
||||
llambda = 1;
|
||||
}
|
||||
@@ -439,7 +449,7 @@ Mat LogisticRegressionImpl::compute_mini_batch_gradient(const Mat& _data, const
|
||||
Mat data_d;
|
||||
Mat labels_l;
|
||||
|
||||
if(this->params.regularized > 0)
|
||||
if(params.norm != REG_NONE)
|
||||
{
|
||||
lambda_l = 1;
|
||||
}
|
||||
@@ -570,7 +580,6 @@ void LogisticRegressionImpl::write(FileStorage& fs) const
|
||||
fs<<"alpha"<<this->params.alpha;
|
||||
fs<<"iterations"<<this->params.num_iters;
|
||||
fs<<"norm"<<this->params.norm;
|
||||
fs<<"regularized"<<this->params.regularized;
|
||||
fs<<"train_method"<<this->params.train_method;
|
||||
if(this->params.train_method == LogisticRegression::MINI_BATCH)
|
||||
{
|
||||
@@ -592,7 +601,6 @@ void LogisticRegressionImpl::read(const FileNode& fn)
|
||||
this->params.alpha = (double)fn["alpha"];
|
||||
this->params.num_iters = (int)fn["iterations"];
|
||||
this->params.norm = (int)fn["norm"];
|
||||
this->params.regularized = (int)fn["regularized"];
|
||||
this->params.train_method = (int)fn["train_method"];
|
||||
|
||||
if(this->params.train_method == LogisticRegression::MINI_BATCH)
|
||||
|
||||
@@ -43,7 +43,6 @@
|
||||
namespace cv {
|
||||
namespace ml {
|
||||
|
||||
NormalBayesClassifier::Params::Params() {}
|
||||
|
||||
class NormalBayesClassifierImpl : public NormalBayesClassifier
|
||||
{
|
||||
@@ -53,9 +52,6 @@ public:
|
||||
nallvars = 0;
|
||||
}
|
||||
|
||||
void setParams(const Params&) {}
|
||||
Params getParams() const { return Params(); }
|
||||
|
||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||
{
|
||||
const float min_variation = FLT_EPSILON;
|
||||
@@ -455,7 +451,7 @@ public:
|
||||
};
|
||||
|
||||
|
||||
Ptr<NormalBayesClassifier> NormalBayesClassifier::create(const Params&)
|
||||
Ptr<NormalBayesClassifier> NormalBayesClassifier::create()
|
||||
{
|
||||
Ptr<NormalBayesClassifierImpl> p = makePtr<NormalBayesClassifierImpl>();
|
||||
return p;
|
||||
|
||||
@@ -120,6 +120,91 @@ namespace ml
|
||||
return termCrit;
|
||||
}
|
||||
|
||||
struct TreeParams
|
||||
{
|
||||
TreeParams();
|
||||
TreeParams( int maxDepth, int minSampleCount,
|
||||
double regressionAccuracy, bool useSurrogates,
|
||||
int maxCategories, int CVFolds,
|
||||
bool use1SERule, bool truncatePrunedTree,
|
||||
const Mat& priors );
|
||||
|
||||
inline void setMaxCategories(int val)
|
||||
{
|
||||
if( val < 2 )
|
||||
CV_Error( CV_StsOutOfRange, "max_categories should be >= 2" );
|
||||
maxCategories = std::min(val, 15 );
|
||||
}
|
||||
inline void setMaxDepth(int val)
|
||||
{
|
||||
if( val < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "max_depth should be >= 0" );
|
||||
maxDepth = std::min( val, 25 );
|
||||
}
|
||||
inline void setMinSampleCount(int val)
|
||||
{
|
||||
minSampleCount = std::max(val, 1);
|
||||
}
|
||||
inline void setCVFolds(int val)
|
||||
{
|
||||
if( val < 0 )
|
||||
CV_Error( CV_StsOutOfRange,
|
||||
"params.CVFolds should be =0 (the tree is not pruned) "
|
||||
"or n>0 (tree is pruned using n-fold cross-validation)" );
|
||||
if( val == 1 )
|
||||
val = 0;
|
||||
CVFolds = val;
|
||||
}
|
||||
inline void setRegressionAccuracy(float val)
|
||||
{
|
||||
if( val < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
|
||||
regressionAccuracy = val;
|
||||
}
|
||||
|
||||
inline int getMaxCategories() const { return maxCategories; }
|
||||
inline int getMaxDepth() const { return maxDepth; }
|
||||
inline int getMinSampleCount() const { return minSampleCount; }
|
||||
inline int getCVFolds() const { return CVFolds; }
|
||||
inline float getRegressionAccuracy() const { return regressionAccuracy; }
|
||||
|
||||
CV_IMPL_PROPERTY(bool, UseSurrogates, useSurrogates)
|
||||
CV_IMPL_PROPERTY(bool, Use1SERule, use1SERule)
|
||||
CV_IMPL_PROPERTY(bool, TruncatePrunedTree, truncatePrunedTree)
|
||||
CV_IMPL_PROPERTY_S(cv::Mat, Priors, priors)
|
||||
|
||||
public:
|
||||
bool useSurrogates;
|
||||
bool use1SERule;
|
||||
bool truncatePrunedTree;
|
||||
Mat priors;
|
||||
|
||||
protected:
|
||||
int maxCategories;
|
||||
int maxDepth;
|
||||
int minSampleCount;
|
||||
int CVFolds;
|
||||
float regressionAccuracy;
|
||||
};
|
||||
|
||||
struct RTreeParams
|
||||
{
|
||||
RTreeParams();
|
||||
RTreeParams(bool calcVarImportance, int nactiveVars, TermCriteria termCrit );
|
||||
bool calcVarImportance;
|
||||
int nactiveVars;
|
||||
TermCriteria termCrit;
|
||||
};
|
||||
|
||||
struct BoostTreeParams
|
||||
{
|
||||
BoostTreeParams();
|
||||
BoostTreeParams(int boostType, int weakCount, double weightTrimRate);
|
||||
int boostType;
|
||||
int weakCount;
|
||||
double weightTrimRate;
|
||||
};
|
||||
|
||||
class DTreesImpl : public DTrees
|
||||
{
|
||||
public:
|
||||
@@ -191,6 +276,16 @@ namespace ml
|
||||
int maxSubsetSize;
|
||||
};
|
||||
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxCategories, params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxDepth, params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, params)
|
||||
CV_WRAP_SAME_PROPERTY(int, CVFolds, params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, params)
|
||||
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, params)
|
||||
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, params)
|
||||
|
||||
DTreesImpl();
|
||||
virtual ~DTreesImpl();
|
||||
virtual void clear();
|
||||
@@ -202,8 +297,7 @@ namespace ml
|
||||
int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; }
|
||||
int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; }
|
||||
|
||||
virtual void setDParams(const Params& _params);
|
||||
virtual Params getDParams() const;
|
||||
virtual void setDParams(const TreeParams& _params);
|
||||
virtual void startTraining( const Ptr<TrainData>& trainData, int flags );
|
||||
virtual void endTraining();
|
||||
virtual void initCompVarIdx();
|
||||
@@ -250,7 +344,7 @@ namespace ml
|
||||
virtual const std::vector<Split>& getSplits() const { return splits; }
|
||||
virtual const std::vector<int>& getSubsets() const { return subsets; }
|
||||
|
||||
Params params0, params;
|
||||
TreeParams params;
|
||||
|
||||
vector<int> varIdx;
|
||||
vector<int> compVarIdx;
|
||||
|
||||
@@ -48,21 +48,16 @@ namespace ml {
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Random trees //
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
RTrees::Params::Params()
|
||||
: DTrees::Params(5, 10, 0.f, false, 10, 0, false, false, Mat())
|
||||
RTreeParams::RTreeParams()
|
||||
{
|
||||
calcVarImportance = false;
|
||||
nactiveVars = 0;
|
||||
termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1);
|
||||
}
|
||||
|
||||
RTrees::Params::Params( int _maxDepth, int _minSampleCount,
|
||||
double _regressionAccuracy, bool _useSurrogates,
|
||||
int _maxCategories, const Mat& _priors,
|
||||
bool _calcVarImportance, int _nactiveVars,
|
||||
TermCriteria _termCrit )
|
||||
: DTrees::Params(_maxDepth, _minSampleCount, _regressionAccuracy, _useSurrogates,
|
||||
_maxCategories, 0, false, false, _priors)
|
||||
RTreeParams::RTreeParams(bool _calcVarImportance,
|
||||
int _nactiveVars,
|
||||
TermCriteria _termCrit )
|
||||
{
|
||||
calcVarImportance = _calcVarImportance;
|
||||
nactiveVars = _nactiveVars;
|
||||
@@ -73,19 +68,20 @@ RTrees::Params::Params( int _maxDepth, int _minSampleCount,
|
||||
class DTreesImplForRTrees : public DTreesImpl
|
||||
{
|
||||
public:
|
||||
DTreesImplForRTrees() {}
|
||||
DTreesImplForRTrees()
|
||||
{
|
||||
params.setMaxDepth(5);
|
||||
params.setMinSampleCount(10);
|
||||
params.setRegressionAccuracy(0.f);
|
||||
params.useSurrogates = false;
|
||||
params.setMaxCategories(10);
|
||||
params.setCVFolds(0);
|
||||
params.use1SERule = false;
|
||||
params.truncatePrunedTree = false;
|
||||
params.priors = Mat();
|
||||
}
|
||||
virtual ~DTreesImplForRTrees() {}
|
||||
|
||||
void setRParams(const RTrees::Params& p)
|
||||
{
|
||||
rparams = p;
|
||||
}
|
||||
|
||||
RTrees::Params getRParams() const
|
||||
{
|
||||
return rparams;
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
DTreesImpl::clear();
|
||||
@@ -129,10 +125,6 @@ public:
|
||||
|
||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||
{
|
||||
Params dp(rparams.maxDepth, rparams.minSampleCount, rparams.regressionAccuracy,
|
||||
rparams.useSurrogates, rparams.maxCategories, rparams.CVFolds,
|
||||
rparams.use1SERule, rparams.truncatePrunedTree, rparams.priors);
|
||||
setDParams(dp);
|
||||
startTraining(trainData, flags);
|
||||
int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ?
|
||||
rparams.termCrit.maxCount : 10000;
|
||||
@@ -326,12 +318,6 @@ public:
|
||||
void readParams( const FileNode& fn )
|
||||
{
|
||||
DTreesImpl::readParams(fn);
|
||||
rparams.maxDepth = params0.maxDepth;
|
||||
rparams.minSampleCount = params0.minSampleCount;
|
||||
rparams.regressionAccuracy = params0.regressionAccuracy;
|
||||
rparams.useSurrogates = params0.useSurrogates;
|
||||
rparams.maxCategories = params0.maxCategories;
|
||||
rparams.priors = params0.priors;
|
||||
|
||||
FileNode tparams_node = fn["training_params"];
|
||||
rparams.nactiveVars = (int)tparams_node["nactive_vars"];
|
||||
@@ -361,7 +347,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
RTrees::Params rparams;
|
||||
RTreeParams rparams;
|
||||
double oobError;
|
||||
vector<float> varImportance;
|
||||
vector<int> allVars, activeVars;
|
||||
@@ -372,6 +358,20 @@ public:
|
||||
class RTreesImpl : public RTrees
|
||||
{
|
||||
public:
|
||||
CV_IMPL_PROPERTY(bool, CalculateVarImportance, impl.rparams.calcVarImportance)
|
||||
CV_IMPL_PROPERTY(int, ActiveVarCount, impl.rparams.nactiveVars)
|
||||
CV_IMPL_PROPERTY_S(TermCriteria, TermCriteria, impl.rparams.termCrit)
|
||||
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params)
|
||||
|
||||
RTreesImpl() {}
|
||||
virtual ~RTreesImpl() {}
|
||||
|
||||
@@ -397,9 +397,6 @@ public:
|
||||
impl.read(fn);
|
||||
}
|
||||
|
||||
void setRParams(const Params& p) { impl.setRParams(p); }
|
||||
Params getRParams() const { return impl.getRParams(); }
|
||||
|
||||
Mat getVarImportance() const { return Mat_<float>(impl.varImportance, true); }
|
||||
int getVarCount() const { return impl.getVarCount(); }
|
||||
|
||||
@@ -415,11 +412,9 @@ public:
|
||||
};
|
||||
|
||||
|
||||
Ptr<RTrees> RTrees::create(const Params& params)
|
||||
Ptr<RTrees> RTrees::create()
|
||||
{
|
||||
Ptr<RTreesImpl> p = makePtr<RTreesImpl>();
|
||||
p->setRParams(params);
|
||||
return p;
|
||||
return makePtr<RTreesImpl>();
|
||||
}
|
||||
|
||||
}}
|
||||
|
||||
@@ -103,54 +103,60 @@ static void checkParamGrid(const ParamGrid& pg)
|
||||
}
|
||||
|
||||
// SVM training parameters
|
||||
SVM::Params::Params()
|
||||
struct SvmParams
|
||||
{
|
||||
svmType = SVM::C_SVC;
|
||||
kernelType = SVM::RBF;
|
||||
degree = 0;
|
||||
gamma = 1;
|
||||
coef0 = 0;
|
||||
C = 1;
|
||||
nu = 0;
|
||||
p = 0;
|
||||
termCrit = TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON );
|
||||
}
|
||||
int svmType;
|
||||
int kernelType;
|
||||
double gamma;
|
||||
double coef0;
|
||||
double degree;
|
||||
double C;
|
||||
double nu;
|
||||
double p;
|
||||
Mat classWeights;
|
||||
TermCriteria termCrit;
|
||||
|
||||
SvmParams()
|
||||
{
|
||||
svmType = SVM::C_SVC;
|
||||
kernelType = SVM::RBF;
|
||||
degree = 0;
|
||||
gamma = 1;
|
||||
coef0 = 0;
|
||||
C = 1;
|
||||
nu = 0;
|
||||
p = 0;
|
||||
termCrit = TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON );
|
||||
}
|
||||
|
||||
SVM::Params::Params( int _svmType, int _kernelType,
|
||||
double _degree, double _gamma, double _coef0,
|
||||
double _Con, double _nu, double _p,
|
||||
const Mat& _classWeights, TermCriteria _termCrit )
|
||||
{
|
||||
svmType = _svmType;
|
||||
kernelType = _kernelType;
|
||||
degree = _degree;
|
||||
gamma = _gamma;
|
||||
coef0 = _coef0;
|
||||
C = _Con;
|
||||
nu = _nu;
|
||||
p = _p;
|
||||
classWeights = _classWeights;
|
||||
termCrit = _termCrit;
|
||||
}
|
||||
SvmParams( int _svmType, int _kernelType,
|
||||
double _degree, double _gamma, double _coef0,
|
||||
double _Con, double _nu, double _p,
|
||||
const Mat& _classWeights, TermCriteria _termCrit )
|
||||
{
|
||||
svmType = _svmType;
|
||||
kernelType = _kernelType;
|
||||
degree = _degree;
|
||||
gamma = _gamma;
|
||||
coef0 = _coef0;
|
||||
C = _Con;
|
||||
nu = _nu;
|
||||
p = _p;
|
||||
classWeights = _classWeights;
|
||||
termCrit = _termCrit;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
/////////////////////////////////////// SVM kernel ///////////////////////////////////////
|
||||
class SVMKernelImpl : public SVM::Kernel
|
||||
{
|
||||
public:
|
||||
SVMKernelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
SVMKernelImpl( const SVM::Params& _params )
|
||||
SVMKernelImpl( const SvmParams& _params = SvmParams() )
|
||||
{
|
||||
params = _params;
|
||||
}
|
||||
|
||||
virtual ~SVMKernelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
int getType() const
|
||||
{
|
||||
return params.kernelType;
|
||||
@@ -327,7 +333,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
SVM::Params params;
|
||||
SvmParams params;
|
||||
};
|
||||
|
||||
|
||||
@@ -1185,7 +1191,7 @@ public:
|
||||
int cache_size;
|
||||
int max_cache_size;
|
||||
Mat samples;
|
||||
SVM::Params params;
|
||||
SvmParams params;
|
||||
vector<KernelRow> lru_cache;
|
||||
int lru_first;
|
||||
int lru_last;
|
||||
@@ -1215,6 +1221,7 @@ public:
|
||||
SVMImpl()
|
||||
{
|
||||
clear();
|
||||
checkParams();
|
||||
}
|
||||
|
||||
~SVMImpl()
|
||||
@@ -1235,33 +1242,69 @@ public:
|
||||
return sv;
|
||||
}
|
||||
|
||||
void setParams( const Params& _params, const Ptr<Kernel>& _kernel )
|
||||
CV_IMPL_PROPERTY(int, Type, params.svmType)
|
||||
CV_IMPL_PROPERTY(double, Gamma, params.gamma)
|
||||
CV_IMPL_PROPERTY(double, Coef0, params.coef0)
|
||||
CV_IMPL_PROPERTY(double, Degree, params.degree)
|
||||
CV_IMPL_PROPERTY(double, C, params.C)
|
||||
CV_IMPL_PROPERTY(double, Nu, params.nu)
|
||||
CV_IMPL_PROPERTY(double, P, params.p)
|
||||
CV_IMPL_PROPERTY_S(cv::Mat, ClassWeights, params.classWeights)
|
||||
CV_IMPL_PROPERTY_S(cv::TermCriteria, TermCriteria, params.termCrit)
|
||||
|
||||
int getKernelType() const
|
||||
{
|
||||
params = _params;
|
||||
return params.kernelType;
|
||||
}
|
||||
|
||||
void setKernel(int kernelType)
|
||||
{
|
||||
params.kernelType = kernelType;
|
||||
if (kernelType != CUSTOM)
|
||||
kernel = makePtr<SVMKernelImpl>(params);
|
||||
}
|
||||
|
||||
void setCustomKernel(const Ptr<Kernel> &_kernel)
|
||||
{
|
||||
params.kernelType = CUSTOM;
|
||||
kernel = _kernel;
|
||||
}
|
||||
|
||||
void checkParams()
|
||||
{
|
||||
int kernelType = params.kernelType;
|
||||
if (kernelType != CUSTOM)
|
||||
{
|
||||
if( kernelType != LINEAR && kernelType != POLY &&
|
||||
kernelType != SIGMOID && kernelType != RBF &&
|
||||
kernelType != INTER && kernelType != CHI2)
|
||||
CV_Error( CV_StsBadArg, "Unknown/unsupported kernel type" );
|
||||
|
||||
if( kernelType == LINEAR )
|
||||
params.gamma = 1;
|
||||
else if( params.gamma <= 0 )
|
||||
CV_Error( CV_StsOutOfRange, "gamma parameter of the kernel must be positive" );
|
||||
|
||||
if( kernelType != SIGMOID && kernelType != POLY )
|
||||
params.coef0 = 0;
|
||||
else if( params.coef0 < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "The kernel parameter <coef0> must be positive or zero" );
|
||||
|
||||
if( kernelType != POLY )
|
||||
params.degree = 0;
|
||||
else if( params.degree <= 0 )
|
||||
CV_Error( CV_StsOutOfRange, "The kernel parameter <degree> must be positive" );
|
||||
|
||||
kernel = makePtr<SVMKernelImpl>(params);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!kernel)
|
||||
CV_Error( CV_StsBadArg, "Custom kernel is not set" );
|
||||
}
|
||||
|
||||
int svmType = params.svmType;
|
||||
|
||||
if( kernelType != LINEAR && kernelType != POLY &&
|
||||
kernelType != SIGMOID && kernelType != RBF &&
|
||||
kernelType != INTER && kernelType != CHI2)
|
||||
CV_Error( CV_StsBadArg, "Unknown/unsupported kernel type" );
|
||||
|
||||
if( kernelType == LINEAR )
|
||||
params.gamma = 1;
|
||||
else if( params.gamma <= 0 )
|
||||
CV_Error( CV_StsOutOfRange, "gamma parameter of the kernel must be positive" );
|
||||
|
||||
if( kernelType != SIGMOID && kernelType != POLY )
|
||||
params.coef0 = 0;
|
||||
else if( params.coef0 < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "The kernel parameter <coef0> must be positive or zero" );
|
||||
|
||||
if( kernelType != POLY )
|
||||
params.degree = 0;
|
||||
else if( params.degree <= 0 )
|
||||
CV_Error( CV_StsOutOfRange, "The kernel parameter <degree> must be positive" );
|
||||
|
||||
if( svmType != C_SVC && svmType != NU_SVC &&
|
||||
svmType != ONE_CLASS && svmType != EPS_SVR &&
|
||||
svmType != NU_SVR )
|
||||
@@ -1285,28 +1328,18 @@ public:
|
||||
if( svmType != C_SVC )
|
||||
params.classWeights.release();
|
||||
|
||||
termCrit = params.termCrit;
|
||||
if( !(termCrit.type & TermCriteria::EPS) )
|
||||
termCrit.epsilon = DBL_EPSILON;
|
||||
termCrit.epsilon = std::max(termCrit.epsilon, DBL_EPSILON);
|
||||
if( !(termCrit.type & TermCriteria::COUNT) )
|
||||
termCrit.maxCount = INT_MAX;
|
||||
termCrit.maxCount = std::max(termCrit.maxCount, 1);
|
||||
|
||||
if( _kernel )
|
||||
kernel = _kernel;
|
||||
else
|
||||
kernel = makePtr<SVMKernelImpl>(params);
|
||||
if( !(params.termCrit.type & TermCriteria::EPS) )
|
||||
params.termCrit.epsilon = DBL_EPSILON;
|
||||
params.termCrit.epsilon = std::max(params.termCrit.epsilon, DBL_EPSILON);
|
||||
if( !(params.termCrit.type & TermCriteria::COUNT) )
|
||||
params.termCrit.maxCount = INT_MAX;
|
||||
params.termCrit.maxCount = std::max(params.termCrit.maxCount, 1);
|
||||
}
|
||||
|
||||
Params getParams() const
|
||||
void setParams( const SvmParams& _params)
|
||||
{
|
||||
return params;
|
||||
}
|
||||
|
||||
Ptr<Kernel> getKernel() const
|
||||
{
|
||||
return kernel;
|
||||
params = _params;
|
||||
checkParams();
|
||||
}
|
||||
|
||||
int getSVCount(int i) const
|
||||
@@ -1335,9 +1368,9 @@ public:
|
||||
_responses.convertTo(_yf, CV_32F);
|
||||
|
||||
bool ok =
|
||||
svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, termCrit ) :
|
||||
svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, termCrit ) :
|
||||
svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, termCrit ) : false;
|
||||
svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, params.termCrit ) :
|
||||
svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, params.termCrit ) :
|
||||
svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, params.termCrit ) : false;
|
||||
|
||||
if( !ok )
|
||||
return false;
|
||||
@@ -1397,7 +1430,7 @@ public:
|
||||
//check that while cross-validation there were the samples from all the classes
|
||||
if( class_ranges[class_count] <= 0 )
|
||||
CV_Error( CV_StsBadArg, "While cross-validation one or more of the classes have "
|
||||
"been fell out of the sample. Try to enlarge <CvSVMParams::k_fold>" );
|
||||
"been fell out of the sample. Try to enlarge <Params::k_fold>" );
|
||||
|
||||
if( svmType == NU_SVC )
|
||||
{
|
||||
@@ -1448,10 +1481,10 @@ public:
|
||||
DecisionFunc df;
|
||||
bool ok = params.svmType == C_SVC ?
|
||||
Solver::solve_c_svc( temp_samples, temp_y, Cp, Cn,
|
||||
kernel, _alpha, sinfo, termCrit ) :
|
||||
kernel, _alpha, sinfo, params.termCrit ) :
|
||||
params.svmType == NU_SVC ?
|
||||
Solver::solve_nu_svc( temp_samples, temp_y, params.nu,
|
||||
kernel, _alpha, sinfo, termCrit ) :
|
||||
kernel, _alpha, sinfo, params.termCrit ) :
|
||||
false;
|
||||
if( !ok )
|
||||
return false;
|
||||
@@ -1557,6 +1590,8 @@ public:
|
||||
{
|
||||
clear();
|
||||
|
||||
checkParams();
|
||||
|
||||
int svmType = params.svmType;
|
||||
Mat samples = data->getTrainSamples();
|
||||
Mat responses;
|
||||
@@ -1586,6 +1621,8 @@ public:
|
||||
ParamGrid nu_grid, ParamGrid coef_grid, ParamGrid degree_grid,
|
||||
bool balanced )
|
||||
{
|
||||
checkParams();
|
||||
|
||||
int svmType = params.svmType;
|
||||
RNG rng((uint64)-1);
|
||||
|
||||
@@ -1708,7 +1745,7 @@ public:
|
||||
int test_sample_count = (sample_count + k_fold/2)/k_fold;
|
||||
int train_sample_count = sample_count - test_sample_count;
|
||||
|
||||
Params best_params = params;
|
||||
SvmParams best_params = params;
|
||||
double min_error = FLT_MAX;
|
||||
|
||||
int rtype = responses.type();
|
||||
@@ -1729,7 +1766,7 @@ public:
|
||||
FOR_IN_GRID(degree, degree_grid)
|
||||
{
|
||||
// make sure we updated the kernel and other parameters
|
||||
setParams(params, Ptr<Kernel>() );
|
||||
setParams(params);
|
||||
|
||||
double error = 0;
|
||||
for( k = 0; k < k_fold; k++ )
|
||||
@@ -1919,7 +1956,9 @@ public:
|
||||
kernelType == LINEAR ? "LINEAR" :
|
||||
kernelType == POLY ? "POLY" :
|
||||
kernelType == RBF ? "RBF" :
|
||||
kernelType == SIGMOID ? "SIGMOID" : format("Unknown_%d", kernelType);
|
||||
kernelType == SIGMOID ? "SIGMOID" :
|
||||
kernelType == CHI2 ? "CHI2" :
|
||||
kernelType == INTER ? "INTER" : format("Unknown_%d", kernelType);
|
||||
|
||||
fs << "svmType" << svm_type_str;
|
||||
|
||||
@@ -2036,7 +2075,7 @@ public:
|
||||
|
||||
void read_params( const FileNode& fn )
|
||||
{
|
||||
Params _params;
|
||||
SvmParams _params;
|
||||
|
||||
// check for old naming
|
||||
String svm_type_str = (String)(fn["svm_type"].empty() ? fn["svmType"] : fn["svm_type"]);
|
||||
@@ -2059,10 +2098,12 @@ public:
|
||||
kernel_type_str == "LINEAR" ? LINEAR :
|
||||
kernel_type_str == "POLY" ? POLY :
|
||||
kernel_type_str == "RBF" ? RBF :
|
||||
kernel_type_str == "SIGMOID" ? SIGMOID : -1;
|
||||
kernel_type_str == "SIGMOID" ? SIGMOID :
|
||||
kernel_type_str == "CHI2" ? CHI2 :
|
||||
kernel_type_str == "INTER" ? INTER : CUSTOM;
|
||||
|
||||
if( kernelType < 0 )
|
||||
CV_Error( CV_StsParseError, "Missing of invalid SVM kernel type" );
|
||||
if( kernelType == CUSTOM )
|
||||
CV_Error( CV_StsParseError, "Invalid SVM kernel type (or custom kernel)" );
|
||||
|
||||
_params.svmType = svmType;
|
||||
_params.kernelType = kernelType;
|
||||
@@ -2086,7 +2127,7 @@ public:
|
||||
else
|
||||
_params.termCrit = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 1000, FLT_EPSILON );
|
||||
|
||||
setParams( _params, Ptr<Kernel>() );
|
||||
setParams( _params );
|
||||
}
|
||||
|
||||
void read( const FileNode& fn )
|
||||
@@ -2154,8 +2195,7 @@ public:
|
||||
optimize_linear_svm();
|
||||
}
|
||||
|
||||
Params params;
|
||||
TermCriteria termCrit;
|
||||
SvmParams params;
|
||||
Mat class_labels;
|
||||
int var_count;
|
||||
Mat sv;
|
||||
@@ -2167,11 +2207,9 @@ public:
|
||||
};
|
||||
|
||||
|
||||
Ptr<SVM> SVM::create(const Params& params, const Ptr<SVM::Kernel>& kernel)
|
||||
Ptr<SVM> SVM::create()
|
||||
{
|
||||
Ptr<SVMImpl> p = makePtr<SVMImpl>();
|
||||
p->setParams(params, kernel);
|
||||
return p;
|
||||
return makePtr<SVMImpl>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -48,18 +48,7 @@ namespace ml {
|
||||
|
||||
using std::vector;
|
||||
|
||||
void DTrees::setDParams(const DTrees::Params&)
|
||||
{
|
||||
CV_Error(CV_StsNotImplemented, "");
|
||||
}
|
||||
|
||||
DTrees::Params DTrees::getDParams() const
|
||||
{
|
||||
CV_Error(CV_StsNotImplemented, "");
|
||||
return DTrees::Params();
|
||||
}
|
||||
|
||||
DTrees::Params::Params()
|
||||
TreeParams::TreeParams()
|
||||
{
|
||||
maxDepth = INT_MAX;
|
||||
minSampleCount = 10;
|
||||
@@ -72,11 +61,11 @@ DTrees::Params::Params()
|
||||
priors = Mat();
|
||||
}
|
||||
|
||||
DTrees::Params::Params( int _maxDepth, int _minSampleCount,
|
||||
double _regressionAccuracy, bool _useSurrogates,
|
||||
int _maxCategories, int _CVFolds,
|
||||
bool _use1SERule, bool _truncatePrunedTree,
|
||||
const Mat& _priors )
|
||||
TreeParams::TreeParams(int _maxDepth, int _minSampleCount,
|
||||
double _regressionAccuracy, bool _useSurrogates,
|
||||
int _maxCategories, int _CVFolds,
|
||||
bool _use1SERule, bool _truncatePrunedTree,
|
||||
const Mat& _priors)
|
||||
{
|
||||
maxDepth = _maxDepth;
|
||||
minSampleCount = _minSampleCount;
|
||||
@@ -248,7 +237,7 @@ const vector<int>& DTreesImpl::getActiveVars()
|
||||
|
||||
int DTreesImpl::addTree(const vector<int>& sidx )
|
||||
{
|
||||
size_t n = (params.maxDepth > 0 ? (1 << params.maxDepth) : 1024) + w->wnodes.size();
|
||||
size_t n = (params.getMaxDepth() > 0 ? (1 << params.getMaxDepth()) : 1024) + w->wnodes.size();
|
||||
|
||||
w->wnodes.reserve(n);
|
||||
w->wsplits.reserve(n);
|
||||
@@ -257,7 +246,7 @@ int DTreesImpl::addTree(const vector<int>& sidx )
|
||||
w->wsplits.clear();
|
||||
w->wsubsets.clear();
|
||||
|
||||
int cv_n = params.CVFolds;
|
||||
int cv_n = params.getCVFolds();
|
||||
|
||||
if( cv_n > 0 )
|
||||
{
|
||||
@@ -347,34 +336,9 @@ int DTreesImpl::addTree(const vector<int>& sidx )
|
||||
return root;
|
||||
}
|
||||
|
||||
DTrees::Params DTreesImpl::getDParams() const
|
||||
void DTreesImpl::setDParams(const TreeParams& _params)
|
||||
{
|
||||
return params0;
|
||||
}
|
||||
|
||||
void DTreesImpl::setDParams(const Params& _params)
|
||||
{
|
||||
params0 = params = _params;
|
||||
if( params.maxCategories < 2 )
|
||||
CV_Error( CV_StsOutOfRange, "params.max_categories should be >= 2" );
|
||||
params.maxCategories = std::min( params.maxCategories, 15 );
|
||||
|
||||
if( params.maxDepth < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "params.max_depth should be >= 0" );
|
||||
params.maxDepth = std::min( params.maxDepth, 25 );
|
||||
|
||||
params.minSampleCount = std::max(params.minSampleCount, 1);
|
||||
|
||||
if( params.CVFolds < 0 )
|
||||
CV_Error( CV_StsOutOfRange,
|
||||
"params.CVFolds should be =0 (the tree is not pruned) "
|
||||
"or n>0 (tree is pruned using n-fold cross-validation)" );
|
||||
|
||||
if( params.CVFolds == 1 )
|
||||
params.CVFolds = 0;
|
||||
|
||||
if( params.regressionAccuracy < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
|
||||
params = _params;
|
||||
}
|
||||
|
||||
int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
||||
@@ -385,7 +349,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
||||
|
||||
node.parent = parent;
|
||||
node.depth = parent >= 0 ? w->wnodes[parent].depth + 1 : 0;
|
||||
int nfolds = params.CVFolds;
|
||||
int nfolds = params.getCVFolds();
|
||||
|
||||
if( nfolds > 0 )
|
||||
{
|
||||
@@ -400,7 +364,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
||||
|
||||
calcValue( nidx, sidx );
|
||||
|
||||
if( n <= params.minSampleCount || node.depth >= params.maxDepth )
|
||||
if( n <= params.getMinSampleCount() || node.depth >= params.getMaxDepth() )
|
||||
can_split = false;
|
||||
else if( _isClassifier )
|
||||
{
|
||||
@@ -415,7 +379,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
||||
}
|
||||
else
|
||||
{
|
||||
if( sqrt(node.node_risk) < params.regressionAccuracy )
|
||||
if( sqrt(node.node_risk) < params.getRegressionAccuracy() )
|
||||
can_split = false;
|
||||
}
|
||||
|
||||
@@ -493,7 +457,7 @@ int DTreesImpl::findBestSplit( const vector<int>& _sidx )
|
||||
void DTreesImpl::calcValue( int nidx, const vector<int>& _sidx )
|
||||
{
|
||||
WNode* node = &w->wnodes[nidx];
|
||||
int i, j, k, n = (int)_sidx.size(), cv_n = params.CVFolds;
|
||||
int i, j, k, n = (int)_sidx.size(), cv_n = params.getCVFolds();
|
||||
int m = (int)classLabels.size();
|
||||
|
||||
cv::AutoBuffer<double> buf(std::max(m, 3)*(cv_n+1));
|
||||
@@ -841,8 +805,8 @@ DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector<int>& _si
|
||||
int m = (int)classLabels.size();
|
||||
|
||||
int base_size = m*(3 + mi) + mi + 1;
|
||||
if( m > 2 && mi > params.maxCategories )
|
||||
base_size += m*std::min(params.maxCategories, n) + mi;
|
||||
if( m > 2 && mi > params.getMaxCategories() )
|
||||
base_size += m*std::min(params.getMaxCategories(), n) + mi;
|
||||
else
|
||||
base_size += mi;
|
||||
AutoBuffer<double> buf(base_size + n);
|
||||
@@ -880,9 +844,9 @@ DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector<int>& _si
|
||||
|
||||
if( m > 2 )
|
||||
{
|
||||
if( mi > params.maxCategories )
|
||||
if( mi > params.getMaxCategories() )
|
||||
{
|
||||
mi = std::min(params.maxCategories, n);
|
||||
mi = std::min(params.getMaxCategories(), n);
|
||||
cjk = c_weights + _mi;
|
||||
cluster_labels = (int*)(cjk + m*mi);
|
||||
clusterCategories( _cjk, _mi, m, cjk, mi, cluster_labels );
|
||||
@@ -1228,7 +1192,7 @@ int DTreesImpl::pruneCV( int root )
|
||||
// 2. choose the best tree index (if need, apply 1SE rule).
|
||||
// 3. store the best index and cut the branches.
|
||||
|
||||
int ti, tree_count = 0, j, cv_n = params.CVFolds, n = w->wnodes[root].sample_count;
|
||||
int ti, tree_count = 0, j, cv_n = params.getCVFolds(), n = w->wnodes[root].sample_count;
|
||||
// currently, 1SE for regression is not implemented
|
||||
bool use_1se = params.use1SERule != 0 && _isClassifier;
|
||||
double min_err = 0, min_err_se = 0;
|
||||
@@ -1294,7 +1258,7 @@ int DTreesImpl::pruneCV( int root )
|
||||
|
||||
double DTreesImpl::updateTreeRNC( int root, double T, int fold )
|
||||
{
|
||||
int nidx = root, pidx = -1, cv_n = params.CVFolds;
|
||||
int nidx = root, pidx = -1, cv_n = params.getCVFolds();
|
||||
double min_alpha = DBL_MAX;
|
||||
|
||||
for(;;)
|
||||
@@ -1350,7 +1314,7 @@ double DTreesImpl::updateTreeRNC( int root, double T, int fold )
|
||||
|
||||
bool DTreesImpl::cutTree( int root, double T, int fold, double min_alpha )
|
||||
{
|
||||
int cv_n = params.CVFolds, nidx = root, pidx = -1;
|
||||
int cv_n = params.getCVFolds(), nidx = root, pidx = -1;
|
||||
WNode* node = &w->wnodes[root];
|
||||
if( node->left < 0 )
|
||||
return true;
|
||||
@@ -1560,19 +1524,19 @@ float DTreesImpl::predict( InputArray _samples, OutputArray _results, int flags
|
||||
|
||||
void DTreesImpl::writeTrainingParams(FileStorage& fs) const
|
||||
{
|
||||
fs << "use_surrogates" << (params0.useSurrogates ? 1 : 0);
|
||||
fs << "max_categories" << params0.maxCategories;
|
||||
fs << "regression_accuracy" << params0.regressionAccuracy;
|
||||
fs << "use_surrogates" << (params.useSurrogates ? 1 : 0);
|
||||
fs << "max_categories" << params.getMaxCategories();
|
||||
fs << "regression_accuracy" << params.getRegressionAccuracy();
|
||||
|
||||
fs << "max_depth" << params0.maxDepth;
|
||||
fs << "min_sample_count" << params0.minSampleCount;
|
||||
fs << "cross_validation_folds" << params0.CVFolds;
|
||||
fs << "max_depth" << params.getMaxDepth();
|
||||
fs << "min_sample_count" << params.getMinSampleCount();
|
||||
fs << "cross_validation_folds" << params.getCVFolds();
|
||||
|
||||
if( params0.CVFolds > 1 )
|
||||
fs << "use_1se_rule" << (params0.use1SERule ? 1 : 0);
|
||||
if( params.getCVFolds() > 1 )
|
||||
fs << "use_1se_rule" << (params.use1SERule ? 1 : 0);
|
||||
|
||||
if( !params0.priors.empty() )
|
||||
fs << "priors" << params0.priors;
|
||||
if( !params.priors.empty() )
|
||||
fs << "priors" << params.priors;
|
||||
}
|
||||
|
||||
void DTreesImpl::writeParams(FileStorage& fs) const
|
||||
@@ -1724,18 +1688,18 @@ void DTreesImpl::readParams( const FileNode& fn )
|
||||
|
||||
FileNode tparams_node = fn["training_params"];
|
||||
|
||||
params0 = Params();
|
||||
TreeParams params0 = TreeParams();
|
||||
|
||||
if( !tparams_node.empty() ) // training parameters are not necessary
|
||||
{
|
||||
params0.useSurrogates = (int)tparams_node["use_surrogates"] != 0;
|
||||
params0.maxCategories = (int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"]);
|
||||
params0.regressionAccuracy = (float)tparams_node["regression_accuracy"];
|
||||
params0.maxDepth = (int)tparams_node["max_depth"];
|
||||
params0.minSampleCount = (int)tparams_node["min_sample_count"];
|
||||
params0.CVFolds = (int)tparams_node["cross_validation_folds"];
|
||||
params0.setMaxCategories((int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"]));
|
||||
params0.setRegressionAccuracy((float)tparams_node["regression_accuracy"]);
|
||||
params0.setMaxDepth((int)tparams_node["max_depth"]);
|
||||
params0.setMinSampleCount((int)tparams_node["min_sample_count"]);
|
||||
params0.setCVFolds((int)tparams_node["cross_validation_folds"]);
|
||||
|
||||
if( params0.CVFolds > 1 )
|
||||
if( params0.getCVFolds() > 1 )
|
||||
{
|
||||
params.use1SERule = (int)tparams_node["use_1se_rule"] != 0;
|
||||
}
|
||||
@@ -1964,11 +1928,9 @@ void DTreesImpl::read( const FileNode& fn )
|
||||
readTree(fnodes);
|
||||
}
|
||||
|
||||
Ptr<DTrees> DTrees::create(const DTrees::Params& params)
|
||||
Ptr<DTrees> DTrees::create()
|
||||
{
|
||||
Ptr<DTreesImpl> p = makePtr<DTreesImpl>();
|
||||
p->setDParams(params);
|
||||
return p;
|
||||
return makePtr<DTreesImpl>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -330,7 +330,8 @@ void CV_KNearestTest::run( int /*start_from*/ )
|
||||
}
|
||||
|
||||
// KNearest KDTree implementation
|
||||
Ptr<KNearest> knearestKdt = KNearest::create(ml::KNearest::Params(10, true, INT_MAX, ml::KNearest::KDTREE));
|
||||
Ptr<KNearest> knearestKdt = KNearest::create();
|
||||
knearestKdt->setAlgorithmType(KNearest::KDTREE);
|
||||
knearestKdt->train(trainData, ml::ROW_SAMPLE, trainLabels);
|
||||
knearestKdt->findNearest(testData, 4, bestLabels);
|
||||
if( !calcErr( bestLabels, testLabels, sizes, err, true ) )
|
||||
@@ -394,16 +395,18 @@ int CV_EMTest::runCase( int caseIndex, const EM_Params& params,
|
||||
cv::Mat labels;
|
||||
float err;
|
||||
|
||||
Ptr<EM> em;
|
||||
EM::Params emp(params.nclusters, params.covMatType, params.termCrit);
|
||||
Ptr<EM> em = EM::create();
|
||||
em->setClustersNumber(params.nclusters);
|
||||
em->setCovarianceMatrixType(params.covMatType);
|
||||
em->setTermCriteria(params.termCrit);
|
||||
if( params.startStep == EM::START_AUTO_STEP )
|
||||
em = EM::train( trainData, noArray(), labels, noArray(), emp );
|
||||
em->trainEM( trainData, noArray(), labels, noArray() );
|
||||
else if( params.startStep == EM::START_E_STEP )
|
||||
em = EM::train_startWithE( trainData, *params.means, *params.covs,
|
||||
*params.weights, noArray(), labels, noArray(), emp );
|
||||
em->trainE( trainData, *params.means, *params.covs,
|
||||
*params.weights, noArray(), labels, noArray() );
|
||||
else if( params.startStep == EM::START_M_STEP )
|
||||
em = EM::train_startWithM( trainData, *params.probs,
|
||||
noArray(), labels, noArray(), emp );
|
||||
em->trainM( trainData, *params.probs,
|
||||
noArray(), labels, noArray() );
|
||||
|
||||
// check train error
|
||||
if( !calcErr( labels, trainLabels, sizes, err , false, false ) )
|
||||
@@ -543,7 +546,9 @@ protected:
|
||||
|
||||
Mat labels;
|
||||
|
||||
Ptr<EM> em = EM::train(samples, noArray(), labels, noArray(), EM::Params(nclusters));
|
||||
Ptr<EM> em = EM::create();
|
||||
em->setClustersNumber(nclusters);
|
||||
em->trainEM(samples, noArray(), labels, noArray());
|
||||
|
||||
Mat firstResult(samples.rows, 1, CV_32SC1);
|
||||
for( int i = 0; i < samples.rows; i++)
|
||||
@@ -644,8 +649,13 @@ protected:
|
||||
samples1.push_back(sample);
|
||||
}
|
||||
}
|
||||
Ptr<EM> model0 = EM::train(samples0, noArray(), noArray(), noArray(), EM::Params(3));
|
||||
Ptr<EM> model1 = EM::train(samples1, noArray(), noArray(), noArray(), EM::Params(3));
|
||||
Ptr<EM> model0 = EM::create();
|
||||
model0->setClustersNumber(3);
|
||||
model0->trainEM(samples0, noArray(), noArray(), noArray());
|
||||
|
||||
Ptr<EM> model1 = EM::create();
|
||||
model1->setClustersNumber(3);
|
||||
model1->trainEM(samples1, noArray(), noArray(), noArray());
|
||||
|
||||
Mat trainConfusionMat(2, 2, CV_32SC1, Scalar(0)),
|
||||
testConfusionMat(2, 2, CV_32SC1, Scalar(0));
|
||||
|
||||
@@ -95,16 +95,13 @@ void CV_LRTest::run( int /*start_from*/ )
|
||||
string dataFileName = ts->get_data_path() + "iris.data";
|
||||
Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
|
||||
|
||||
LogisticRegression::Params params = LogisticRegression::Params();
|
||||
params.alpha = 1.0;
|
||||
params.num_iters = 10001;
|
||||
params.norm = LogisticRegression::REG_L2;
|
||||
params.regularized = 1;
|
||||
params.train_method = LogisticRegression::BATCH;
|
||||
params.mini_batch_size = 10;
|
||||
|
||||
// run LR classifier train classifier
|
||||
Ptr<LogisticRegression> p = LogisticRegression::create(params);
|
||||
Ptr<LogisticRegression> p = LogisticRegression::create();
|
||||
p->setLearningRate(1.0);
|
||||
p->setIterations(10001);
|
||||
p->setRegularization(LogisticRegression::REG_L2);
|
||||
p->setTrainMethod(LogisticRegression::BATCH);
|
||||
p->setMiniBatchSize(10);
|
||||
p->train(tdata);
|
||||
|
||||
// predict using the same data
|
||||
@@ -157,20 +154,17 @@ void CV_LRTest_SaveLoad::run( int /*start_from*/ )
|
||||
Mat responses1, responses2;
|
||||
Mat learnt_mat1, learnt_mat2;
|
||||
|
||||
LogisticRegression::Params params1 = LogisticRegression::Params();
|
||||
params1.alpha = 1.0;
|
||||
params1.num_iters = 10001;
|
||||
params1.norm = LogisticRegression::REG_L2;
|
||||
params1.regularized = 1;
|
||||
params1.train_method = LogisticRegression::BATCH;
|
||||
params1.mini_batch_size = 10;
|
||||
|
||||
// train and save the classifier
|
||||
String filename = tempfile(".xml");
|
||||
try
|
||||
{
|
||||
// run LR classifier train classifier
|
||||
Ptr<LogisticRegression> lr1 = LogisticRegression::create(params1);
|
||||
Ptr<LogisticRegression> lr1 = LogisticRegression::create();
|
||||
lr1->setLearningRate(1.0);
|
||||
lr1->setIterations(10001);
|
||||
lr1->setRegularization(LogisticRegression::REG_L2);
|
||||
lr1->setTrainMethod(LogisticRegression::BATCH);
|
||||
lr1->setMiniBatchSize(10);
|
||||
lr1->train(tdata);
|
||||
lr1->predict(tdata->getSamples(), responses1);
|
||||
learnt_mat1 = lr1->get_learnt_thetas();
|
||||
|
||||
@@ -73,30 +73,14 @@ int str_to_svm_kernel_type( String& str )
|
||||
return -1;
|
||||
}
|
||||
|
||||
Ptr<SVM> svm_train_auto( Ptr<TrainData> _data, SVM::Params _params,
|
||||
int k_fold, ParamGrid C_grid, ParamGrid gamma_grid,
|
||||
ParamGrid p_grid, ParamGrid nu_grid, ParamGrid coef_grid,
|
||||
ParamGrid degree_grid )
|
||||
{
|
||||
Mat _train_data = _data->getSamples();
|
||||
Mat _responses = _data->getResponses();
|
||||
Mat _var_idx = _data->getVarIdx();
|
||||
Mat _sample_idx = _data->getTrainSampleIdx();
|
||||
|
||||
Ptr<SVM> svm = SVM::create(_params);
|
||||
if( svm->trainAuto( _data, k_fold, C_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid ) )
|
||||
return svm;
|
||||
return Ptr<SVM>();
|
||||
}
|
||||
|
||||
// 4. em
|
||||
// 5. ann
|
||||
int str_to_ann_train_method( String& str )
|
||||
{
|
||||
if( !str.compare("BACKPROP") )
|
||||
return ANN_MLP::Params::BACKPROP;
|
||||
return ANN_MLP::BACKPROP;
|
||||
if( !str.compare("RPROP") )
|
||||
return ANN_MLP::Params::RPROP;
|
||||
return ANN_MLP::RPROP;
|
||||
CV_Error( CV_StsBadArg, "incorrect ann train method string" );
|
||||
return -1;
|
||||
}
|
||||
@@ -343,16 +327,16 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
||||
String svm_type_str, kernel_type_str;
|
||||
modelParamsNode["svm_type"] >> svm_type_str;
|
||||
modelParamsNode["kernel_type"] >> kernel_type_str;
|
||||
SVM::Params params;
|
||||
params.svmType = str_to_svm_type( svm_type_str );
|
||||
params.kernelType = str_to_svm_kernel_type( kernel_type_str );
|
||||
modelParamsNode["degree"] >> params.degree;
|
||||
modelParamsNode["gamma"] >> params.gamma;
|
||||
modelParamsNode["coef0"] >> params.coef0;
|
||||
modelParamsNode["C"] >> params.C;
|
||||
modelParamsNode["nu"] >> params.nu;
|
||||
modelParamsNode["p"] >> params.p;
|
||||
model = SVM::create(params);
|
||||
Ptr<SVM> m = SVM::create();
|
||||
m->setType(str_to_svm_type( svm_type_str ));
|
||||
m->setKernel(str_to_svm_kernel_type( kernel_type_str ));
|
||||
m->setDegree(modelParamsNode["degree"]);
|
||||
m->setGamma(modelParamsNode["gamma"]);
|
||||
m->setCoef0(modelParamsNode["coef0"]);
|
||||
m->setC(modelParamsNode["C"]);
|
||||
m->setNu(modelParamsNode["nu"]);
|
||||
m->setP(modelParamsNode["p"]);
|
||||
model = m;
|
||||
}
|
||||
else if( modelName == CV_EM )
|
||||
{
|
||||
@@ -371,9 +355,13 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
||||
data->getVarIdx(), data->getTrainSampleIdx());
|
||||
int layer_sz[] = { data->getNAllVars(), 100, 100, (int)cls_map.size() };
|
||||
Mat layer_sizes( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
|
||||
model = ANN_MLP::create(ANN_MLP::Params(layer_sizes, ANN_MLP::SIGMOID_SYM, 0, 0,
|
||||
TermCriteria(TermCriteria::COUNT,300,0.01),
|
||||
str_to_ann_train_method(train_method_str), param1, param2));
|
||||
Ptr<ANN_MLP> m = ANN_MLP::create();
|
||||
m->setLayerSizes(layer_sizes);
|
||||
m->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0);
|
||||
m->setTermCriteria(TermCriteria(TermCriteria::COUNT,300,0.01));
|
||||
m->setTrainMethod(str_to_ann_train_method(train_method_str), param1, param2);
|
||||
model = m;
|
||||
|
||||
}
|
||||
else if( modelName == CV_DTREE )
|
||||
{
|
||||
@@ -386,8 +374,18 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
||||
modelParamsNode["max_categories"] >> MAX_CATEGORIES;
|
||||
modelParamsNode["cv_folds"] >> CV_FOLDS;
|
||||
modelParamsNode["is_pruned"] >> IS_PRUNED;
|
||||
model = DTrees::create(DTrees::Params(MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY, USE_SURROGATE,
|
||||
MAX_CATEGORIES, CV_FOLDS, false, IS_PRUNED, Mat() ));
|
||||
|
||||
Ptr<DTrees> m = DTrees::create();
|
||||
m->setMaxDepth(MAX_DEPTH);
|
||||
m->setMinSampleCount(MIN_SAMPLE_COUNT);
|
||||
m->setRegressionAccuracy(REG_ACCURACY);
|
||||
m->setUseSurrogates(USE_SURROGATE);
|
||||
m->setMaxCategories(MAX_CATEGORIES);
|
||||
m->setCVFolds(CV_FOLDS);
|
||||
m->setUse1SERule(false);
|
||||
m->setTruncatePrunedTree(IS_PRUNED);
|
||||
m->setPriors(Mat());
|
||||
model = m;
|
||||
}
|
||||
else if( modelName == CV_BOOST )
|
||||
{
|
||||
@@ -401,7 +399,15 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
||||
modelParamsNode["weight_trim_rate"] >> WEIGHT_TRIM_RATE;
|
||||
modelParamsNode["max_depth"] >> MAX_DEPTH;
|
||||
//modelParamsNode["use_surrogate"] >> USE_SURROGATE;
|
||||
model = Boost::create( Boost::Params(BOOST_TYPE, WEAK_COUNT, WEIGHT_TRIM_RATE, MAX_DEPTH, USE_SURROGATE, Mat()) );
|
||||
|
||||
Ptr<Boost> m = Boost::create();
|
||||
m->setBoostType(BOOST_TYPE);
|
||||
m->setWeakCount(WEAK_COUNT);
|
||||
m->setWeightTrimRate(WEIGHT_TRIM_RATE);
|
||||
m->setMaxDepth(MAX_DEPTH);
|
||||
m->setUseSurrogates(USE_SURROGATE);
|
||||
m->setPriors(Mat());
|
||||
model = m;
|
||||
}
|
||||
else if( modelName == CV_RTREES )
|
||||
{
|
||||
@@ -416,9 +422,18 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
||||
modelParamsNode["is_pruned"] >> IS_PRUNED;
|
||||
modelParamsNode["nactive_vars"] >> NACTIVE_VARS;
|
||||
modelParamsNode["max_trees_num"] >> MAX_TREES_NUM;
|
||||
model = RTrees::create(RTrees::Params( MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY,
|
||||
USE_SURROGATE, MAX_CATEGORIES, Mat(), true, // (calc_var_importance == true) <=> RF processes variable importance
|
||||
NACTIVE_VARS, TermCriteria(TermCriteria::COUNT, MAX_TREES_NUM, OOB_EPS)));
|
||||
|
||||
Ptr<RTrees> m = RTrees::create();
|
||||
m->setMaxDepth(MAX_DEPTH);
|
||||
m->setMinSampleCount(MIN_SAMPLE_COUNT);
|
||||
m->setRegressionAccuracy(REG_ACCURACY);
|
||||
m->setUseSurrogates(USE_SURROGATE);
|
||||
m->setMaxCategories(MAX_CATEGORIES);
|
||||
m->setPriors(Mat());
|
||||
m->setCalculateVarImportance(true);
|
||||
m->setActiveVarCount(NACTIVE_VARS);
|
||||
m->setTermCriteria(TermCriteria(TermCriteria::COUNT, MAX_TREES_NUM, OOB_EPS));
|
||||
model = m;
|
||||
}
|
||||
|
||||
if( !model.empty() )
|
||||
|
||||
@@ -149,9 +149,8 @@ int CV_SLMLTest::validate_test_results( int testCaseIdx )
|
||||
}
|
||||
|
||||
TEST(ML_NaiveBayes, save_load) { CV_SLMLTest test( CV_NBAYES ); test.safe_run(); }
|
||||
//CV_SLMLTest lsmlknearest( CV_KNEAREST, "slknearest" ); // does not support save!
|
||||
TEST(ML_KNearest, save_load) { CV_SLMLTest test( CV_KNEAREST ); test.safe_run(); }
|
||||
TEST(ML_SVM, save_load) { CV_SLMLTest test( CV_SVM ); test.safe_run(); }
|
||||
//CV_SLMLTest lsmlem( CV_EM, "slem" ); // does not support save!
|
||||
TEST(ML_ANN, save_load) { CV_SLMLTest test( CV_ANN ); test.safe_run(); }
|
||||
TEST(ML_DTree, save_load) { CV_SLMLTest test( CV_DTREE ); test.safe_run(); }
|
||||
TEST(ML_Boost, save_load) { CV_SLMLTest test( CV_BOOST ); test.safe_run(); }
|
||||
|
||||
@@ -104,34 +104,34 @@ namespace cv
|
||||
*/
|
||||
virtual void collectGarbage();
|
||||
|
||||
//! @name Scale factor
|
||||
//! @brief Scale factor
|
||||
CV_PURE_PROPERTY(int, Scale)
|
||||
|
||||
//! @name Iterations count
|
||||
//! @brief Iterations count
|
||||
CV_PURE_PROPERTY(int, Iterations)
|
||||
|
||||
//! @name Asymptotic value of steepest descent method
|
||||
//! @brief Asymptotic value of steepest descent method
|
||||
CV_PURE_PROPERTY(double, Tau)
|
||||
|
||||
//! @name Weight parameter to balance data term and smoothness term
|
||||
//! @brief Weight parameter to balance data term and smoothness term
|
||||
CV_PURE_PROPERTY(double, Labmda)
|
||||
|
||||
//! @name Parameter of spacial distribution in Bilateral-TV
|
||||
//! @brief Parameter of spacial distribution in Bilateral-TV
|
||||
CV_PURE_PROPERTY(double, Alpha)
|
||||
|
||||
//! @name Kernel size of Bilateral-TV filter
|
||||
//! @brief Kernel size of Bilateral-TV filter
|
||||
CV_PURE_PROPERTY(int, KernelSize)
|
||||
|
||||
//! @name Gaussian blur kernel size
|
||||
//! @brief Gaussian blur kernel size
|
||||
CV_PURE_PROPERTY(int, BlurKernelSize)
|
||||
|
||||
//! @name Gaussian blur sigma
|
||||
//! @brief Gaussian blur sigma
|
||||
CV_PURE_PROPERTY(double, BlurSigma)
|
||||
|
||||
//! @name Radius of the temporal search area
|
||||
//! @brief Radius of the temporal search area
|
||||
CV_PURE_PROPERTY(int, TemporalAreaRadius)
|
||||
|
||||
//! @name Dense optical flow algorithm
|
||||
//! @brief Dense optical flow algorithm
|
||||
CV_PURE_PROPERTY_S(Ptr<cv::superres::DenseOpticalFlowExt>, OpticalFlow)
|
||||
|
||||
protected:
|
||||
|
||||
@@ -98,17 +98,17 @@ namespace cv
|
||||
class CV_EXPORTS BroxOpticalFlow : public virtual DenseOpticalFlowExt
|
||||
{
|
||||
public:
|
||||
//! @name Flow smoothness
|
||||
//! @brief Flow smoothness
|
||||
CV_PURE_PROPERTY(double, Alpha)
|
||||
//! @name Gradient constancy importance
|
||||
//! @brief Gradient constancy importance
|
||||
CV_PURE_PROPERTY(double, Gamma)
|
||||
//! @name Pyramid scale factor
|
||||
//! @brief Pyramid scale factor
|
||||
CV_PURE_PROPERTY(double, ScaleFactor)
|
||||
//! @name Number of lagged non-linearity iterations (inner loop)
|
||||
//! @brief Number of lagged non-linearity iterations (inner loop)
|
||||
CV_PURE_PROPERTY(int, InnerIterations)
|
||||
//! @name Number of warping iterations (number of pyramid levels)
|
||||
//! @brief Number of warping iterations (number of pyramid levels)
|
||||
CV_PURE_PROPERTY(int, OuterIterations)
|
||||
//! @name Number of linear system solver iterations
|
||||
//! @brief Number of linear system solver iterations
|
||||
CV_PURE_PROPERTY(int, SolverIterations)
|
||||
};
|
||||
CV_EXPORTS Ptr<BroxOpticalFlow> createOptFlow_Brox_CUDA();
|
||||
|
||||
@@ -328,18 +328,6 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Simple()
|
||||
|
||||
namespace
|
||||
{
|
||||
#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) \
|
||||
type get##name() const \
|
||||
{ \
|
||||
return internal_obj->get##internal_name(); \
|
||||
} \
|
||||
void set##name(type _name) \
|
||||
{ \
|
||||
internal_obj->set##internal_name(_name); \
|
||||
}
|
||||
|
||||
#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
|
||||
|
||||
class DualTVL1 : public CpuOpticalFlow, public virtual cv::superres::DualTVL1OpticalFlow
|
||||
{
|
||||
public:
|
||||
@@ -347,14 +335,14 @@ namespace
|
||||
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
|
||||
void collectGarbage();
|
||||
|
||||
CV_WRAP_SAME_PROPERTY(double, Tau, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(double, Lambda, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(double, Theta, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(int, ScalesNumber, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(int, WarpingsNumber, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(double, Epsilon, alg_)
|
||||
CV_WRAP_PROPERTY(int, Iterations, OuterIterations, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(bool, UseInitialFlow, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(double, Tau, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(double, Lambda, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(double, Theta, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(int, ScalesNumber, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(int, WarpingsNumber, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(double, Epsilon, (*alg_))
|
||||
CV_WRAP_PROPERTY(int, Iterations, OuterIterations, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(bool, UseInitialFlow, (*alg_))
|
||||
|
||||
protected:
|
||||
void impl(InputArray input0, InputArray input1, OutputArray dst);
|
||||
|
||||
@@ -440,29 +440,29 @@ Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flo
|
||||
class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow
|
||||
{
|
||||
public:
|
||||
//! @name Time step of the numerical scheme
|
||||
//! @brief Time step of the numerical scheme
|
||||
CV_PURE_PROPERTY(double, Tau)
|
||||
//! @name Weight parameter for the data term, attachment parameter
|
||||
//! @brief Weight parameter for the data term, attachment parameter
|
||||
CV_PURE_PROPERTY(double, Lambda)
|
||||
//! @name Weight parameter for (u - v)^2, tightness parameter
|
||||
//! @brief Weight parameter for (u - v)^2, tightness parameter
|
||||
CV_PURE_PROPERTY(double, Theta)
|
||||
//! @name coefficient for additional illumination variation term
|
||||
//! @brief coefficient for additional illumination variation term
|
||||
CV_PURE_PROPERTY(double, Gamma)
|
||||
//! @name Number of scales used to create the pyramid of images
|
||||
//! @brief Number of scales used to create the pyramid of images
|
||||
CV_PURE_PROPERTY(int, ScalesNumber)
|
||||
//! @name Number of warpings per scale
|
||||
//! @brief Number of warpings per scale
|
||||
CV_PURE_PROPERTY(int, WarpingsNumber)
|
||||
//! @name Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time
|
||||
//! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time
|
||||
CV_PURE_PROPERTY(double, Epsilon)
|
||||
//! @name Inner iterations (between outlier filtering) used in the numerical scheme
|
||||
//! @brief Inner iterations (between outlier filtering) used in the numerical scheme
|
||||
CV_PURE_PROPERTY(int, InnerIterations)
|
||||
//! @name Outer iterations (number of inner loops) used in the numerical scheme
|
||||
//! @brief Outer iterations (number of inner loops) used in the numerical scheme
|
||||
CV_PURE_PROPERTY(int, OuterIterations)
|
||||
//! @name Use initial flow
|
||||
//! @brief Use initial flow
|
||||
CV_PURE_PROPERTY(bool, UseInitialFlow)
|
||||
//! @name Step between scales (<1)
|
||||
//! @brief Step between scales (<1)
|
||||
CV_PURE_PROPERTY(double, ScaleStep)
|
||||
//! @name Median filter kernel size (1 = no filter) (3 or 5)
|
||||
//! @brief Median filter kernel size (1 = no filter) (3 or 5)
|
||||
CV_PURE_PROPERTY(int, MedianFiltering)
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user