merged 2.4 into trunk

This commit is contained in:
Vadim Pisarevsky
2012-04-30 14:33:52 +00:00
parent 3f1c6d7357
commit d5a0088bbe
194 changed files with 10158 additions and 8225 deletions

View File

@@ -404,11 +404,11 @@ Updates the predicted state from the measurement.
BackgroundSubtractor
--------------------
.. ocv:class:: BackgroundSubtractor
.. ocv:class:: BackgroundSubtractor : public Algorithm
Base class for background/foreground segmentation. ::
class BackgroundSubtractor
class BackgroundSubtractor : public Algorithm
{
public:
virtual ~BackgroundSubtractor();

View File

@@ -54,7 +54,7 @@ namespace cv
The class is only used to define the common interface for
the whole family of background/foreground segmentation algorithms.
*/
class CV_EXPORTS_W BackgroundSubtractor
class CV_EXPORTS_W BackgroundSubtractor : public Algorithm
{
public:
//! the virtual destructor
@@ -93,6 +93,9 @@ public:
//! re-initiaization method
virtual void initialize(Size frameSize, int frameType);
virtual AlgorithmInfo* info() const;
protected:
Size frameSize;
int frameType;
Mat bgmodel;
@@ -130,6 +133,9 @@ public:
//! re-initiaization method
virtual void initialize(Size frameSize, int frameType);
virtual AlgorithmInfo* info() const;
protected:
Size frameSize;
int frameType;
Mat bgmodel;
@@ -137,24 +143,24 @@ public:
int nframes;
int history;
int nmixtures;
//! here it is the maximum allowed number of mixture comonents.
//! here it is the maximum allowed number of mixture components.
//! Actual number is determined dynamically per pixel
float varThreshold;
// threshold on the squared Mahalan. dist. to decide if it is well described
//by the background model or not. Related to Cthr from the paper.
//This does not influence the update of the background. A typical value could be 4 sigma
//and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
double varThreshold;
// threshold on the squared Mahalanobis distance to decide if it is well described
// by the background model or not. Related to Cthr from the paper.
// This does not influence the update of the background. A typical value could be 4 sigma
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
/////////////////////////
//less important parameters - things you might change but be carefull
// less important parameters - things you might change but be carefull
////////////////////////
float backgroundRatio;
//corresponds to fTB=1-cf from the paper
//TB - threshold when the component becomes significant enough to be included into
//the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
//For alpha=0.001 it means that the mode should exist for approximately 105 frames before
//it is considered foreground
//float noiseSigma;
// corresponds to fTB=1-cf from the paper
// TB - threshold when the component becomes significant enough to be included into
// the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
// For alpha=0.001 it means that the mode should exist for approximately 105 frames before
// it is considered foreground
// float noiseSigma;
float varThresholdGen;
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
//when a sample is close to the existing components. If it is not close

View File

@@ -134,17 +134,19 @@ template<typename VT> struct MixData
};
static void process8uC1( BackgroundSubtractorMOG& obj, const Mat& image, Mat& fgmask, double learningRate )
static void process8uC1( const Mat& image, Mat& fgmask, double learningRate,
Mat& bgmodel, int nmixtures, double backgroundRatio,
double varThreshold, double noiseSigma )
{
int x, y, k, k1, rows = image.rows, cols = image.cols;
float alpha = (float)learningRate, T = (float)obj.backgroundRatio, vT = (float)obj.varThreshold;
int K = obj.nmixtures;
MixData<float>* mptr = (MixData<float>*)obj.bgmodel.data;
float alpha = (float)learningRate, T = (float)backgroundRatio, vT = (float)varThreshold;
int K = nmixtures;
MixData<float>* mptr = (MixData<float>*)bgmodel.data;
const float w0 = (float)defaultInitialWeight;
const float sk0 = (float)(w0/(defaultNoiseSigma*2));
const float var0 = (float)(defaultNoiseSigma*defaultNoiseSigma*4);
const float minVar = (float)(obj.noiseSigma*obj.noiseSigma);
const float minVar = (float)(noiseSigma*noiseSigma);
for( y = 0; y < rows; y++ )
{
@@ -259,17 +261,20 @@ static void process8uC1( BackgroundSubtractorMOG& obj, const Mat& image, Mat& fg
}
}
static void process8uC3( BackgroundSubtractorMOG& obj, const Mat& image, Mat& fgmask, double learningRate )
static void process8uC3( const Mat& image, Mat& fgmask, double learningRate,
Mat& bgmodel, int nmixtures, double backgroundRatio,
double varThreshold, double noiseSigma )
{
int x, y, k, k1, rows = image.rows, cols = image.cols;
float alpha = (float)learningRate, T = (float)obj.backgroundRatio, vT = (float)obj.varThreshold;
int K = obj.nmixtures;
float alpha = (float)learningRate, T = (float)backgroundRatio, vT = (float)varThreshold;
int K = nmixtures;
const float w0 = (float)defaultInitialWeight;
const float sk0 = (float)(w0/(defaultNoiseSigma*2*sqrt(3.)));
const float var0 = (float)(defaultNoiseSigma*defaultNoiseSigma*4);
const float minVar = (float)(obj.noiseSigma*obj.noiseSigma);
MixData<Vec3f>* mptr = (MixData<Vec3f>*)obj.bgmodel.data;
const float minVar = (float)(noiseSigma*noiseSigma);
MixData<Vec3f>* mptr = (MixData<Vec3f>*)bgmodel.data;
for( y = 0; y < rows; y++ )
{
@@ -403,9 +408,9 @@ void BackgroundSubtractorMOG::operator()(InputArray _image, OutputArray _fgmask,
CV_Assert(learningRate >= 0);
if( image.type() == CV_8UC1 )
process8uC1( *this, image, fgmask, learningRate );
process8uC1( image, fgmask, learningRate, bgmodel, nmixtures, backgroundRatio, varThreshold, noiseSigma );
else if( image.type() == CV_8UC3 )
process8uC3( *this, image, fgmask, learningRate );
process8uC3( image, fgmask, learningRate, bgmodel, nmixtures, backgroundRatio, varThreshold, noiseSigma );
else
CV_Error( CV_StsUnsupportedFormat, "Only 1- and 3-channel 8-bit images are supported in BackgroundSubtractorMOG" );
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,119 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createMOG()
{
return new BackgroundSubtractorMOG;
}
static AlgorithmInfo& mog_info()
{
static AlgorithmInfo mog_info_var("BackgroundSubtractor.MOG", createMOG);
return mog_info_var;
}
static AlgorithmInfo& mog_info_auto = mog_info();
AlgorithmInfo* BackgroundSubtractorMOG::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
BackgroundSubtractorMOG obj;
mog_info().addParam(obj, "history", obj.history);
mog_info().addParam(obj, "nmixtures", obj.nmixtures);
mog_info().addParam(obj, "backgroundRatio", obj.backgroundRatio);
mog_info().addParam(obj, "noiseSigma", obj.noiseSigma);
initialized = true;
}
return &mog_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static Algorithm* createMOG2()
{
return new BackgroundSubtractorMOG2;
}
static AlgorithmInfo& mog2_info()
{
static AlgorithmInfo mog2_info_var("BackgroundSubtractor.MOG2", createMOG2);
return mog2_info_var;
}
static AlgorithmInfo& mog2_info_auto = mog2_info();
AlgorithmInfo* BackgroundSubtractorMOG2::info() const
{
static volatile bool initialized = false;
if( !initialized )
{
BackgroundSubtractorMOG2 obj;
mog2_info().addParam(obj, "history", obj.history);
mog2_info().addParam(obj, "varThreshold", obj.varThreshold);
mog2_info().addParam(obj, "detectShadows", obj.bShadowDetection);
initialized = true;
}
return &mog2_info();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
bool initModule_video(void)
{
Ptr<Algorithm> mog = createMOG(), mog2 = createMOG2();
return mog->info() != 0 && mog2->info() != 0;
}
}