moved part of video to contrib/{outflow, bgsegm}; moved matlab to contrib

This commit is contained in:
Vadim Pisarevsky
2014-08-10 23:24:16 +04:00
parent 4de4ff5682
commit d0137b6d2d
62 changed files with 54 additions and 159912 deletions

View File

@@ -66,39 +66,6 @@ public:
};
/*!
Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm
The class implements the following algorithm:
"An improved adaptive background mixture model for real-time tracking with shadow detection"
P. KadewTraKuPong and R. Bowden,
Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
*/
class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor
{
public:
CV_WRAP virtual int getHistory() const = 0;
CV_WRAP virtual void setHistory(int nframes) = 0;
CV_WRAP virtual int getNMixtures() const = 0;
CV_WRAP virtual void setNMixtures(int nmix) = 0;
CV_WRAP virtual double getBackgroundRatio() const = 0;
CV_WRAP virtual void setBackgroundRatio(double backgroundRatio) = 0;
CV_WRAP virtual double getNoiseSigma() const = 0;
CV_WRAP virtual void setNoiseSigma(double noiseSigma) = 0;
};
CV_EXPORTS_W Ptr<BackgroundSubtractorMOG>
createBackgroundSubtractorMOG(int history=200, int nmixtures=5,
double backgroundRatio=0.7, double noiseSigma=0);
/*!
The class implements the following algorithm:
"Improved adaptive Gausian mixture model for background subtraction"
@@ -189,51 +156,6 @@ CV_EXPORTS_W Ptr<BackgroundSubtractorKNN>
createBackgroundSubtractorKNN(int history=500, double dist2Threshold=400.0,
bool detectShadows=true);
/**
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
* images of the same size, where 255 indicates Foreground and 0 represents Background.
* This class implements an algorithm described in "Visual Tracking of Human Visitors under
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
*/
class CV_EXPORTS_W BackgroundSubtractorGMG : public BackgroundSubtractor
{
public:
CV_WRAP virtual int getMaxFeatures() const = 0;
CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;
CV_WRAP virtual double getDefaultLearningRate() const = 0;
CV_WRAP virtual void setDefaultLearningRate(double lr) = 0;
CV_WRAP virtual int getNumFrames() const = 0;
CV_WRAP virtual void setNumFrames(int nframes) = 0;
CV_WRAP virtual int getQuantizationLevels() const = 0;
CV_WRAP virtual void setQuantizationLevels(int nlevels) = 0;
CV_WRAP virtual double getBackgroundPrior() const = 0;
CV_WRAP virtual void setBackgroundPrior(double bgprior) = 0;
CV_WRAP virtual int getSmoothingRadius() const = 0;
CV_WRAP virtual void setSmoothingRadius(int radius) = 0;
CV_WRAP virtual double getDecisionThreshold() const = 0;
CV_WRAP virtual void setDecisionThreshold(double thresh) = 0;
CV_WRAP virtual bool getUpdateBackgroundModel() const = 0;
CV_WRAP virtual void setUpdateBackgroundModel(bool update) = 0;
CV_WRAP virtual double getMinVal() const = 0;
CV_WRAP virtual void setMinVal(double val) = 0;
CV_WRAP virtual double getMaxVal() const = 0;
CV_WRAP virtual void setMaxVal(double val) = 0;
};
CV_EXPORTS_W Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int initializationFrames=120,
double decisionThreshold=0.8);
} // cv
#endif

View File

@@ -55,28 +55,6 @@ enum { OPTFLOW_USE_INITIAL_FLOW = 4,
OPTFLOW_FARNEBACK_GAUSSIAN = 256
};
enum { MOTION_TRANSLATION = 0,
MOTION_EUCLIDEAN = 1,
MOTION_AFFINE = 2,
MOTION_HOMOGRAPHY = 3
};
//! updates motion history image using the current silhouette
CV_EXPORTS_W void updateMotionHistory( InputArray silhouette, InputOutputArray mhi,
double timestamp, double duration );
//! computes the motion gradient orientation image from the motion history image
CV_EXPORTS_W void calcMotionGradient( InputArray mhi, OutputArray mask, OutputArray orientation,
double delta1, double delta2, int apertureSize = 3 );
//! computes the global orientation of the selected motion history image part
CV_EXPORTS_W double calcGlobalOrientation( InputArray orientation, InputArray mask, InputArray mhi,
double timestamp, double duration );
CV_EXPORTS_W void segmentMotion( InputArray mhi, OutputArray segmask,
CV_OUT std::vector<Rect>& boundingRects,
double timestamp, double segThresh );
//! updates the object tracking window using CAMSHIFT algorithm
CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_IN_OUT Rect& window,
TermCriteria criteria );
@@ -109,6 +87,15 @@ CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next, In
// that maps one 2D point set to another or one image to another.
CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, bool fullAffine );
enum
{
MOTION_TRANSLATION = 0,
MOTION_EUCLIDEAN = 1,
MOTION_AFFINE = 2,
MOTION_HOMOGRAPHY = 3
};
//! estimates the best-fit Translation, Euclidean, Affine or Perspective Transformation
// with respect to Enhanced Correlation Coefficient criterion that maps one image to
// another (area-based alignment)
@@ -120,20 +107,6 @@ CV_EXPORTS_W double findTransformECC( InputArray templateImage, InputArray input
InputOutputArray warpMatrix, int motionType = MOTION_AFFINE,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001));
//! computes dense optical flow using Simple Flow algorithm
CV_EXPORTS_W void calcOpticalFlowSF( InputArray from, InputArray to, OutputArray flow,
int layers, int averaging_block_size, int max_flow);
CV_EXPORTS_W void calcOpticalFlowSF( InputArray from, InputArray to, OutputArray flow, int layers,
int averaging_block_size, int max_flow,
double sigma_dist, double sigma_color, int postprocess_window,
double sigma_dist_fix, double sigma_color_fix, double occ_thr,
int upscale_averaging_radius, double upscale_sigma_dist,
double upscale_sigma_color, double speed_up_thr );
/*!
Kalman filter.