refactored FGD algorithm
This commit is contained in:
parent
697793090d
commit
62edeeed16
@ -50,9 +50,6 @@
|
|||||||
#include "opencv2/core/gpu.hpp"
|
#include "opencv2/core/gpu.hpp"
|
||||||
#include "opencv2/video/background_segm.hpp"
|
#include "opencv2/video/background_segm.hpp"
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include "opencv2/gpufilters.hpp"
|
|
||||||
|
|
||||||
namespace cv { namespace gpu {
|
namespace cv { namespace gpu {
|
||||||
|
|
||||||
////////////////////////////////////////////////////
|
////////////////////////////////////////////////////
|
||||||
@ -105,76 +102,51 @@ public:
|
|||||||
CV_EXPORTS Ptr<gpu::BackgroundSubtractorGMG>
|
CV_EXPORTS Ptr<gpu::BackgroundSubtractorGMG>
|
||||||
createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
|
createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
// FGD
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Foreground Object Detection from Videos Containing Complex Background.
|
||||||
|
* Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
|
||||||
|
* ACM MM2003 9p
|
||||||
|
*/
|
||||||
|
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
|
||||||
|
|
||||||
// Foreground Object Detection from Videos Containing Complex Background.
|
|
||||||
// Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
|
|
||||||
// ACM MM2003 9p
|
|
||||||
class CV_EXPORTS FGDStatModel
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
struct CV_EXPORTS Params
|
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
|
||||||
{
|
|
||||||
int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
|
|
||||||
int N1c; // Number of color vectors used to model normal background color variation at a given pixel.
|
|
||||||
int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
|
|
||||||
// Used to allow the first N1c vectors to adapt over time to changing background.
|
|
||||||
|
|
||||||
int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
|
|
||||||
int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
|
|
||||||
int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
|
|
||||||
// Used to allow the first N1cc vectors to adapt over time to changing background.
|
|
||||||
|
|
||||||
bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
|
|
||||||
int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations.
|
|
||||||
// These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
|
|
||||||
|
|
||||||
float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1.
|
|
||||||
float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
|
|
||||||
float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
|
|
||||||
|
|
||||||
float delta; // Affects color and color co-occurrence quantization, typically set to 2.
|
|
||||||
float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
|
|
||||||
float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold.
|
|
||||||
|
|
||||||
// default Params
|
|
||||||
Params();
|
|
||||||
};
|
|
||||||
|
|
||||||
// out_cn - channels count in output result (can be 3 or 4)
|
|
||||||
// 4-channels require more memory, but a bit faster
|
|
||||||
explicit FGDStatModel(int out_cn = 3);
|
|
||||||
explicit FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3);
|
|
||||||
|
|
||||||
~FGDStatModel();
|
|
||||||
|
|
||||||
void create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params());
|
|
||||||
void release();
|
|
||||||
|
|
||||||
int update(const cv::gpu::GpuMat& curFrame);
|
|
||||||
|
|
||||||
//8UC3 or 8UC4 reference background image
|
|
||||||
cv::gpu::GpuMat background;
|
|
||||||
|
|
||||||
//8UC1 foreground image
|
|
||||||
cv::gpu::GpuMat foreground;
|
|
||||||
|
|
||||||
std::vector< std::vector<cv::Point> > foreground_regions;
|
|
||||||
|
|
||||||
private:
|
|
||||||
FGDStatModel(const FGDStatModel&);
|
|
||||||
FGDStatModel& operator=(const FGDStatModel&);
|
|
||||||
|
|
||||||
class Impl;
|
|
||||||
std::auto_ptr<Impl> impl_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct CV_EXPORTS FGDParams
|
||||||
|
{
|
||||||
|
int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
|
||||||
|
int N1c; // Number of color vectors used to model normal background color variation at a given pixel.
|
||||||
|
int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
|
||||||
|
// Used to allow the first N1c vectors to adapt over time to changing background.
|
||||||
|
|
||||||
|
int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
|
||||||
|
int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
|
||||||
|
int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
|
||||||
|
// Used to allow the first N1cc vectors to adapt over time to changing background.
|
||||||
|
|
||||||
|
bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
|
||||||
|
int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations.
|
||||||
|
// These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
|
||||||
|
|
||||||
|
float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1.
|
||||||
|
float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
|
||||||
|
float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
|
||||||
|
|
||||||
|
float delta; // Affects color and color co-occurrence quantization, typically set to 2.
|
||||||
|
float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
|
||||||
|
float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold.
|
||||||
|
|
||||||
|
// default Params
|
||||||
|
FGDParams();
|
||||||
|
};
|
||||||
|
|
||||||
|
CV_EXPORTS Ptr<gpu::BackgroundSubtractorFGD>
|
||||||
|
createBackgroundSubtractorFGD(const FGDParams& params = FGDParams());
|
||||||
|
|
||||||
}} // namespace cv { namespace gpu {
|
}} // namespace cv { namespace gpu {
|
||||||
|
|
||||||
#endif /* __OPENCV_GPUBGSEGM_HPP__ */
|
#endif /* __OPENCV_GPUBGSEGM_HPP__ */
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
|
|
||||||
#include "perf_precomp.hpp"
|
#include "perf_precomp.hpp"
|
||||||
#include "opencv2/legacy.hpp"
|
#include "opencv2/legacy.hpp"
|
||||||
|
#include "opencv2/gpuimgproc.hpp"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace testing;
|
using namespace testing;
|
||||||
@ -90,10 +91,10 @@ PERF_TEST_P(Video, FGDStatModel,
|
|||||||
|
|
||||||
if (PERF_RUN_GPU())
|
if (PERF_RUN_GPU())
|
||||||
{
|
{
|
||||||
cv::gpu::GpuMat d_frame(frame);
|
cv::gpu::GpuMat d_frame(frame), foreground, background3, background;
|
||||||
|
|
||||||
cv::gpu::FGDStatModel d_model(4);
|
cv::Ptr<cv::gpu::BackgroundSubtractorFGD> d_fgd = cv::gpu::createBackgroundSubtractorFGD();
|
||||||
d_model.create(d_frame);
|
d_fgd->apply(d_frame, foreground);
|
||||||
|
|
||||||
for (int i = 0; i < 10; ++i)
|
for (int i = 0; i < 10; ++i)
|
||||||
{
|
{
|
||||||
@ -103,12 +104,12 @@ PERF_TEST_P(Video, FGDStatModel,
|
|||||||
d_frame.upload(frame);
|
d_frame.upload(frame);
|
||||||
|
|
||||||
startTimer(); next();
|
startTimer(); next();
|
||||||
d_model.update(d_frame);
|
d_fgd->apply(d_frame, foreground);
|
||||||
stopTimer();
|
stopTimer();
|
||||||
}
|
}
|
||||||
|
|
||||||
const cv::gpu::GpuMat background = d_model.background;
|
d_fgd->getBackgroundImage(background3);
|
||||||
const cv::gpu::GpuMat foreground = d_model.foreground;
|
cv::gpu::cvtColor(background3, background, cv::COLOR_BGR2BGRA);
|
||||||
|
|
||||||
GPU_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
|
GPU_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
|
||||||
GPU_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
|
GPU_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
|
||||||
|
@ -53,7 +53,7 @@
|
|||||||
using namespace cv::gpu;
|
using namespace cv::gpu;
|
||||||
using namespace cv::gpu::cudev;
|
using namespace cv::gpu::cudev;
|
||||||
|
|
||||||
namespace bgfg
|
namespace fgd
|
||||||
{
|
{
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
// calcDiffHistogram
|
// calcDiffHistogram
|
||||||
|
@ -45,7 +45,7 @@
|
|||||||
|
|
||||||
#include "opencv2/core/gpu_types.hpp"
|
#include "opencv2/core/gpu_types.hpp"
|
||||||
|
|
||||||
namespace bgfg
|
namespace fgd
|
||||||
{
|
{
|
||||||
struct BGPixelStat
|
struct BGPixelStat
|
||||||
{
|
{
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -72,11 +72,10 @@ namespace cv
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string, Channels)
|
PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string)
|
||||||
{
|
{
|
||||||
cv::gpu::DeviceInfo devInfo;
|
cv::gpu::DeviceInfo devInfo;
|
||||||
std::string inputFile;
|
std::string inputFile;
|
||||||
int out_cn;
|
|
||||||
|
|
||||||
virtual void SetUp()
|
virtual void SetUp()
|
||||||
{
|
{
|
||||||
@ -84,8 +83,6 @@ PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string, Channels)
|
|||||||
cv::gpu::setDevice(devInfo.deviceID());
|
cv::gpu::setDevice(devInfo.deviceID());
|
||||||
|
|
||||||
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
|
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
|
||||||
|
|
||||||
out_cn = GET_PARAM(2);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -102,15 +99,10 @@ GPU_TEST_P(FGDStatModel, Update)
|
|||||||
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
|
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
|
||||||
|
|
||||||
cv::gpu::GpuMat d_frame(frame);
|
cv::gpu::GpuMat d_frame(frame);
|
||||||
cv::gpu::FGDStatModel d_model(out_cn);
|
cv::Ptr<cv::gpu::BackgroundSubtractorFGD> d_fgd = cv::gpu::createBackgroundSubtractorFGD();
|
||||||
d_model.create(d_frame);
|
cv::gpu::GpuMat d_foreground, d_background;
|
||||||
|
std::vector< std::vector<cv::Point> > foreground_regions;
|
||||||
cv::Mat h_background;
|
d_fgd->apply(d_frame, d_foreground);
|
||||||
cv::Mat h_foreground;
|
|
||||||
cv::Mat h_background3;
|
|
||||||
|
|
||||||
cv::Mat backgroundDiff;
|
|
||||||
cv::Mat foregroundDiff;
|
|
||||||
|
|
||||||
for (int i = 0; i < 5; ++i)
|
for (int i = 0; i < 5; ++i)
|
||||||
{
|
{
|
||||||
@ -121,32 +113,23 @@ GPU_TEST_P(FGDStatModel, Update)
|
|||||||
int gold_count = cvUpdateBGStatModel(&ipl_frame, model);
|
int gold_count = cvUpdateBGStatModel(&ipl_frame, model);
|
||||||
|
|
||||||
d_frame.upload(frame);
|
d_frame.upload(frame);
|
||||||
|
d_fgd->apply(d_frame, d_foreground);
|
||||||
int count = d_model.update(d_frame);
|
d_fgd->getBackgroundImage(d_background);
|
||||||
|
d_fgd->getForegroundRegions(foreground_regions);
|
||||||
ASSERT_EQ(gold_count, count);
|
int count = (int) foreground_regions.size();
|
||||||
|
|
||||||
cv::Mat gold_background = cv::cvarrToMat(model->background);
|
cv::Mat gold_background = cv::cvarrToMat(model->background);
|
||||||
cv::Mat gold_foreground = cv::cvarrToMat(model->foreground);
|
cv::Mat gold_foreground = cv::cvarrToMat(model->foreground);
|
||||||
|
|
||||||
if (out_cn == 3)
|
ASSERT_MAT_NEAR(gold_background, d_background, 1.0);
|
||||||
d_model.background.download(h_background3);
|
ASSERT_MAT_NEAR(gold_foreground, d_foreground, 0.0);
|
||||||
else
|
ASSERT_EQ(gold_count, count);
|
||||||
{
|
|
||||||
d_model.background.download(h_background);
|
|
||||||
cv::cvtColor(h_background, h_background3, cv::COLOR_BGRA2BGR);
|
|
||||||
}
|
|
||||||
d_model.foreground.download(h_foreground);
|
|
||||||
|
|
||||||
ASSERT_MAT_NEAR(gold_background, h_background3, 1.0);
|
|
||||||
ASSERT_MAT_NEAR(gold_foreground, h_foreground, 0.0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
INSTANTIATE_TEST_CASE_P(GPU_BgSegm, FGDStatModel, testing::Combine(
|
INSTANTIATE_TEST_CASE_P(GPU_BgSegm, FGDStatModel, testing::Combine(
|
||||||
ALL_DEVICES,
|
ALL_DEVICES,
|
||||||
testing::Values(std::string("768x576.avi")),
|
testing::Values(std::string("768x576.avi"))));
|
||||||
testing::Values(Channels(3), Channels(4))));
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ int main(int argc, const char** argv)
|
|||||||
Ptr<BackgroundSubtractor> mog = gpu::createBackgroundSubtractorMOG();
|
Ptr<BackgroundSubtractor> mog = gpu::createBackgroundSubtractorMOG();
|
||||||
Ptr<BackgroundSubtractor> mog2 = gpu::createBackgroundSubtractorMOG2();
|
Ptr<BackgroundSubtractor> mog2 = gpu::createBackgroundSubtractorMOG2();
|
||||||
Ptr<BackgroundSubtractor> gmg = gpu::createBackgroundSubtractorGMG(40);
|
Ptr<BackgroundSubtractor> gmg = gpu::createBackgroundSubtractorGMG(40);
|
||||||
FGDStatModel fgd_stat;
|
Ptr<BackgroundSubtractor> fgd = gpu::createBackgroundSubtractorFGD();
|
||||||
|
|
||||||
GpuMat d_fgmask;
|
GpuMat d_fgmask;
|
||||||
GpuMat d_fgimg;
|
GpuMat d_fgimg;
|
||||||
@ -103,7 +103,7 @@ int main(int argc, const char** argv)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case FGD_STAT:
|
case FGD_STAT:
|
||||||
fgd_stat.create(d_frame);
|
fgd->apply(d_frame, d_fgmask);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,9 +142,8 @@ int main(int argc, const char** argv)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case FGD_STAT:
|
case FGD_STAT:
|
||||||
fgd_stat.update(d_frame);
|
fgd->apply(d_frame, d_fgmask);
|
||||||
d_fgmask = fgd_stat.foreground;
|
fgd->getBackgroundImage(d_bgimg);
|
||||||
d_bgimg = fgd_stat.background;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1271,14 +1271,14 @@ TEST(FGDStatModel)
|
|||||||
{
|
{
|
||||||
const std::string inputFile = abspath("768x576.avi");
|
const std::string inputFile = abspath("768x576.avi");
|
||||||
|
|
||||||
cv::VideoCapture cap(inputFile);
|
VideoCapture cap(inputFile);
|
||||||
if (!cap.isOpened()) throw runtime_error("can't open 768x576.avi");
|
if (!cap.isOpened()) throw runtime_error("can't open 768x576.avi");
|
||||||
|
|
||||||
cv::Mat frame;
|
Mat frame;
|
||||||
cap >> frame;
|
cap >> frame;
|
||||||
|
|
||||||
IplImage ipl_frame = frame;
|
IplImage ipl_frame = frame;
|
||||||
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
|
Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
|
||||||
|
|
||||||
while (!TestSystem::instance().stop())
|
while (!TestSystem::instance().stop())
|
||||||
{
|
{
|
||||||
@ -1297,8 +1297,10 @@ TEST(FGDStatModel)
|
|||||||
|
|
||||||
cap >> frame;
|
cap >> frame;
|
||||||
|
|
||||||
cv::gpu::GpuMat d_frame(frame);
|
gpu::GpuMat d_frame(frame), d_fgmask;
|
||||||
cv::gpu::FGDStatModel d_model(d_frame);
|
Ptr<BackgroundSubtractor> d_fgd = gpu::createBackgroundSubtractorFGD();
|
||||||
|
|
||||||
|
d_fgd->apply(d_frame, d_fgmask);
|
||||||
|
|
||||||
while (!TestSystem::instance().stop())
|
while (!TestSystem::instance().stop())
|
||||||
{
|
{
|
||||||
@ -1307,7 +1309,7 @@ TEST(FGDStatModel)
|
|||||||
|
|
||||||
TestSystem::instance().gpuOn();
|
TestSystem::instance().gpuOn();
|
||||||
|
|
||||||
d_model.update(d_frame);
|
d_fgd->apply(d_frame, d_fgmask);
|
||||||
|
|
||||||
TestSystem::instance().gpuOff();
|
TestSystem::instance().gpuOff();
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user