a big patch; use special proxy types (Input/OutputArray, Input/OutputArrayOfArrays) for passing in vectors, matrices etc.
This commit is contained in:
@@ -358,7 +358,7 @@ public:
|
||||
//! the virtual destructor
|
||||
virtual ~BackgroundSubtractor();
|
||||
//! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
|
||||
CV_WRAP_AS(apply) virtual void operator()(const Mat& image, CV_OUT Mat& fgmask,
|
||||
CV_WRAP_AS(apply) virtual void operator()(const InputArray& image, OutputArray fgmask,
|
||||
double learningRate=0);
|
||||
};
|
||||
|
||||
@@ -383,7 +383,7 @@ public:
|
||||
//! the destructor
|
||||
virtual ~BackgroundSubtractorMOG();
|
||||
//! the update operator
|
||||
virtual void operator()(const Mat& image, Mat& fgmask, double learningRate=0);
|
||||
virtual void operator()(const InputArray& image, OutputArray fgmask, double learningRate=0);
|
||||
|
||||
//! re-initiaization method
|
||||
virtual void initialize(Size frameSize, int frameType);
|
||||
@@ -400,52 +400,73 @@ public:
|
||||
};
|
||||
|
||||
|
||||
class CV_EXPORTS_W BackgroundSubtractorMOG2 : public BackgroundSubtractor
|
||||
class BackgroundSubtractorMOG2 : public BackgroundSubtractor
|
||||
{
|
||||
public:
|
||||
//! the default constructor
|
||||
CV_WRAP BackgroundSubtractorMOG2();
|
||||
BackgroundSubtractorMOG2();
|
||||
//! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength
|
||||
CV_WRAP BackgroundSubtractorMOG2(double alphaT,
|
||||
double sigma=15,
|
||||
int nmixtures=5,
|
||||
bool postFiltering=false,
|
||||
double minArea=15,
|
||||
bool detectShadows=true,
|
||||
bool removeForeground=false,
|
||||
|
||||
double Tb=16,
|
||||
double Tg=9,
|
||||
double TB=0.9,
|
||||
double CT=0.05,
|
||||
|
||||
int shadowOutputValue=127,
|
||||
double tau=0.5);
|
||||
|
||||
BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=1);
|
||||
//! the destructor
|
||||
virtual ~BackgroundSubtractorMOG2();
|
||||
//! the update operator
|
||||
virtual void operator()(const Mat& image, Mat& fgmask, double learningRate=0);
|
||||
virtual void operator()(const InputArray& image, OutputArray fgmask, double learningRate=-1);
|
||||
|
||||
//! re-initiaization method
|
||||
virtual void initialize(Size frameSize,
|
||||
double alphaT,
|
||||
double sigma=15,
|
||||
int nmixtures=5,
|
||||
bool postFiltering=false,
|
||||
double minArea=15,
|
||||
bool detectShadows=true,
|
||||
bool removeForeground=false,
|
||||
|
||||
double Tb=16,
|
||||
double Tg=9,
|
||||
double TB=0.9,
|
||||
double CT=0.05,
|
||||
|
||||
int nShadowDetection=127,
|
||||
double tau=0.5);
|
||||
virtual void initialize(Size frameSize, int frameType);
|
||||
|
||||
void* model;
|
||||
Size frameSize;
|
||||
int frameType;
|
||||
Mat bgmodel;
|
||||
Mat bgmodelUsedModes;//keep track of number of modes per pixel
|
||||
int nframes;
|
||||
int history;
|
||||
int nmixtures;
|
||||
//! here it is the maximum allowed number of mixture comonents.
|
||||
//! Actual number is determined dynamically per pixel
|
||||
float varThreshold;
|
||||
// threshold on the squared Mahalan. dist. to decide if it is well described
|
||||
//by the background model or not. Related to Cthr from the paper.
|
||||
//This does not influence the update of the background. A typical value could be 4 sigma
|
||||
//and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
|
||||
|
||||
/////////////////////////
|
||||
//less important parameters - things you might change but be carefull
|
||||
////////////////////////
|
||||
float backgroundRatio;
|
||||
//corresponds to fTB=1-cf from the paper
|
||||
//TB - threshold when the component becomes significant enough to be included into
|
||||
//the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
|
||||
//For alpha=0.001 it means that the mode should exist for approximately 105 frames before
|
||||
//it is considered foreground
|
||||
//float noiseSigma;
|
||||
float varThresholdGen;
|
||||
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
|
||||
//when a sample is close to the existing components. If it is not close
|
||||
//to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
|
||||
//Smaller Tg leads to more generated components and higher Tg might make
|
||||
//lead to small number of components but they can grow too large
|
||||
float fVarInit;
|
||||
float fVarMin;
|
||||
float fVarMax;
|
||||
//initial variance for the newly generated components.
|
||||
//It will will influence the speed of adaptation. A good guess should be made.
|
||||
//A simple way is to estimate the typical standard deviation from the images.
|
||||
//I used here 10 as a reasonable value
|
||||
// min and max can be used to further control the variance
|
||||
float fCT;//CT - complexity reduction prior
|
||||
//this is related to the number of samples needed to accept that a component
|
||||
//actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
|
||||
//the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
|
||||
|
||||
//shadow detection parameters
|
||||
bool bShadowDetection;//default 1 - do shadow detection
|
||||
unsigned char nShadowDetection;//do shadow detection - insert this value as the detection result - 127 default value
|
||||
float fTau;
|
||||
// Tau - shadow threshold. The shadow is detected if the pixel is darker
|
||||
//version of the background. Tau is a threshold on how much darker the shadow can be.
|
||||
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
|
||||
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -244,30 +244,30 @@ namespace cv
|
||||
{
|
||||
|
||||
//! updates motion history image using the current silhouette
|
||||
CV_EXPORTS_W void updateMotionHistory( const Mat& silhouette, Mat& mhi,
|
||||
double timestamp, double duration );
|
||||
CV_EXPORTS_W void updateMotionHistory( const InputArray& silhouette, InputOutputArray mhi,
|
||||
double timestamp, double duration );
|
||||
|
||||
//! computes the motion gradient orientation image from the motion history image
|
||||
CV_EXPORTS_W void calcMotionGradient( const Mat& mhi, CV_OUT Mat& mask,
|
||||
CV_OUT Mat& orientation,
|
||||
CV_EXPORTS_W void calcMotionGradient( const InputArray& mhi, OutputArray mask,
|
||||
OutputArray orientation,
|
||||
double delta1, double delta2,
|
||||
int apertureSize=3 );
|
||||
|
||||
//! computes the global orientation of the selected motion history image part
|
||||
CV_EXPORTS_W double calcGlobalOrientation( const Mat& orientation, const Mat& mask,
|
||||
const Mat& mhi, double timestamp,
|
||||
CV_EXPORTS_W double calcGlobalOrientation( const InputArray& orientation, const InputArray& mask,
|
||||
const InputArray& mhi, double timestamp,
|
||||
double duration );
|
||||
|
||||
CV_EXPORTS_W void segmentMotion(const Mat& mhi, Mat& segmask,
|
||||
CV_EXPORTS_W void segmentMotion(const InputArray& mhi, OutputArray segmask,
|
||||
vector<Rect>& boundingRects,
|
||||
double timestamp, double segThresh);
|
||||
|
||||
//! updates the object tracking window using CAMSHIFT algorithm
|
||||
CV_EXPORTS_W RotatedRect CamShift( const Mat& probImage, CV_IN_OUT Rect& window,
|
||||
CV_EXPORTS_W RotatedRect CamShift( const InputArray& probImage, CV_IN_OUT Rect& window,
|
||||
TermCriteria criteria );
|
||||
|
||||
//! updates the object tracking window using meanshift algorithm
|
||||
CV_EXPORTS_W int meanShift( const Mat& probImage, CV_IN_OUT Rect& window,
|
||||
CV_EXPORTS_W int meanShift( const InputArray& probImage, CV_IN_OUT Rect& window,
|
||||
TermCriteria criteria );
|
||||
|
||||
/*!
|
||||
@@ -315,9 +315,9 @@ public:
|
||||
enum { OPTFLOW_USE_INITIAL_FLOW=4, OPTFLOW_FARNEBACK_GAUSSIAN=256 };
|
||||
|
||||
//! computes sparse optical flow using multi-scale Lucas-Kanade algorithm
|
||||
CV_EXPORTS_W void calcOpticalFlowPyrLK( const Mat& prevImg, const Mat& nextImg,
|
||||
const vector<Point2f>& prevPts, CV_OUT vector<Point2f>& nextPts,
|
||||
CV_OUT vector<uchar>& status, CV_OUT vector<float>& err,
|
||||
CV_EXPORTS_W void calcOpticalFlowPyrLK( const InputArray& prevImg, const InputArray& nextImg,
|
||||
const InputArray& prevPts, CV_OUT InputOutputArray nextPts,
|
||||
OutputArray status, OutputArray err,
|
||||
Size winSize=Size(15,15), int maxLevel=3,
|
||||
TermCriteria criteria=TermCriteria(
|
||||
TermCriteria::COUNT+TermCriteria::EPS,
|
||||
@@ -326,10 +326,15 @@ CV_EXPORTS_W void calcOpticalFlowPyrLK( const Mat& prevImg, const Mat& nextImg,
|
||||
int flags=0 );
|
||||
|
||||
//! computes dense optical flow using Farneback algorithm
|
||||
CV_EXPORTS_W void calcOpticalFlowFarneback( const Mat& prev, const Mat& next,
|
||||
CV_OUT Mat& flow, double pyr_scale, int levels, int winsize,
|
||||
CV_EXPORTS_W void calcOpticalFlowFarneback( const InputArray& prev, const InputArray& next,
|
||||
CV_OUT InputOutputArray flow, double pyr_scale, int levels, int winsize,
|
||||
int iterations, int poly_n, double poly_sigma, int flags );
|
||||
|
||||
//! estimates the best-fit Euqcidean, similarity, affine or perspective transformation
|
||||
// that maps one 2D point set to another or one image to another.
|
||||
CV_EXPORTS_W Mat estimateRigidTransform( const InputArray& src, const InputArray& dst,
|
||||
bool fullAffine);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user