moved part of video to contrib/{outflow, bgsegm}; moved matlab to contrib
This commit is contained in:
@@ -1,81 +0,0 @@
|
||||
/*
|
||||
* FGBGTest.cpp
|
||||
*
|
||||
* Created on: May 7, 2012
|
||||
* Author: Andrew B. Godbehere
|
||||
*/
|
||||
|
||||
#include "opencv2/video.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
static void help()
|
||||
{
|
||||
std::cout <<
|
||||
"\nA program demonstrating the use and capabilities of a particular BackgroundSubtraction\n"
|
||||
"algorithm described in A. Godbehere, A. Matsukawa, K. Goldberg, \n"
|
||||
"\"Visual Tracking of Human Visitors under Variable-Lighting Conditions for a Responsive\n"
|
||||
"Audio Art Installation\", American Control Conference, 2012, used in an interactive\n"
|
||||
"installation at the Contemporary Jewish Museum in San Francisco, CA from March 31 through\n"
|
||||
"July 31, 2011.\n"
|
||||
"Call:\n"
|
||||
"./BackgroundSubtractorGMG_sample\n"
|
||||
"Using OpenCV version " << CV_VERSION << "\n"<<std::endl;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
help();
|
||||
|
||||
initModule_video();
|
||||
setUseOptimized(true);
|
||||
setNumThreads(8);
|
||||
|
||||
Ptr<BackgroundSubtractor> fgbg = createBackgroundSubtractorGMG(20, 0.7);
|
||||
if (!fgbg)
|
||||
{
|
||||
std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
VideoCapture cap;
|
||||
if (argc > 1)
|
||||
cap.open(argv[1]);
|
||||
else
|
||||
cap.open(0);
|
||||
|
||||
if (!cap.isOpened())
|
||||
{
|
||||
std::cerr << "Cannot read video. Try moving video file to sample directory." << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
Mat frame, fgmask, segm;
|
||||
|
||||
namedWindow("FG Segmentation", WINDOW_NORMAL);
|
||||
|
||||
for (;;)
|
||||
{
|
||||
cap >> frame;
|
||||
|
||||
if (frame.empty())
|
||||
break;
|
||||
|
||||
fgbg->apply(frame, fgmask);
|
||||
|
||||
frame.convertTo(segm, CV_8U, 0.5);
|
||||
add(frame, Scalar(100, 100, 0), segm, fgmask);
|
||||
|
||||
imshow("FG Segmentation", segm);
|
||||
|
||||
int c = waitKey(30);
|
||||
if (c == 'q' || c == 'Q' || (c & 255) == 27)
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@@ -21,6 +21,8 @@ static void help()
|
||||
const char* keys =
|
||||
{
|
||||
"{c camera | | use camera or not}"
|
||||
"{m method |mog2 | method (knn or mog2) }"
|
||||
"{s smooth | | smooth the mask }"
|
||||
"{fn file_name|tree.avi | movie file }"
|
||||
};
|
||||
|
||||
@@ -31,7 +33,9 @@ int main(int argc, const char** argv)
|
||||
|
||||
CommandLineParser parser(argc, argv, keys);
|
||||
bool useCamera = parser.has("camera");
|
||||
bool smoothMask = parser.has("smooth");
|
||||
string file = parser.get<string>("file_name");
|
||||
string method = parser.get<string>("method");
|
||||
VideoCapture cap;
|
||||
bool update_bg_model = true;
|
||||
|
||||
@@ -53,24 +57,31 @@ int main(int argc, const char** argv)
|
||||
namedWindow("foreground image", WINDOW_NORMAL);
|
||||
namedWindow("mean background image", WINDOW_NORMAL);
|
||||
|
||||
Ptr<BackgroundSubtractor> bg_model = createBackgroundSubtractorMOG2();
|
||||
Ptr<BackgroundSubtractor> bg_model = method == "knn" ?
|
||||
createBackgroundSubtractorKNN().dynamicCast<BackgroundSubtractor>() :
|
||||
createBackgroundSubtractorMOG2().dynamicCast<BackgroundSubtractor>();
|
||||
|
||||
Mat img, fgmask, fgimg;
|
||||
Mat img0, img, fgmask, fgimg;
|
||||
|
||||
for(;;)
|
||||
{
|
||||
cap >> img;
|
||||
cap >> img0;
|
||||
|
||||
if( img.empty() )
|
||||
if( img0.empty() )
|
||||
break;
|
||||
|
||||
//cvtColor(_img, img, COLOR_BGR2GRAY);
|
||||
resize(img0, img, Size(640, 640*img0.rows/img0.cols), INTER_LINEAR);
|
||||
|
||||
if( fgimg.empty() )
|
||||
fgimg.create(img.size(), img.type());
|
||||
|
||||
//update the model
|
||||
bg_model->apply(img, fgmask, update_bg_model ? -1 : 0);
|
||||
if( smoothMask )
|
||||
{
|
||||
GaussianBlur(fgmask, fgmask, Size(11, 11), 3.5, 3.5);
|
||||
threshold(fgmask, fgmask, 10, 255, THRESH_BINARY);
|
||||
}
|
||||
|
||||
fgimg = Scalar::all(0);
|
||||
img.copyTo(fgimg, fgmask);
|
||||
|
@@ -1,204 +0,0 @@
|
||||
#include "opencv2/video/tracking_c.h"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
#include <time.h>
|
||||
#include <stdio.h>
|
||||
#include <ctype.h>
|
||||
|
||||
static void help(void)
|
||||
{
|
||||
printf(
|
||||
"\nThis program demonstrated the use of motion templates -- basically using the gradients\n"
|
||||
"of thresholded layers of decaying frame differencing. New movements are stamped on top with floating system\n"
|
||||
"time code and motions too old are thresholded away. This is the 'motion history file'. The program reads from the camera of your choice or from\n"
|
||||
"a file. Gradients of motion history are used to detect direction of motoin etc\n"
|
||||
"Usage :\n"
|
||||
"./motempl [camera number 0-n or file name, default is camera 0]\n"
|
||||
);
|
||||
}
|
||||
// various tracking parameters (in seconds)
|
||||
const double MHI_DURATION = 1;
|
||||
const double MAX_TIME_DELTA = 0.5;
|
||||
const double MIN_TIME_DELTA = 0.05;
|
||||
// number of cyclic frame buffer used for motion detection
|
||||
// (should, probably, depend on FPS)
|
||||
const int N = 4;
|
||||
|
||||
// ring image buffer
|
||||
IplImage **buf = 0;
|
||||
int last = 0;
|
||||
|
||||
// temporary images
|
||||
IplImage *mhi = 0; // MHI
|
||||
IplImage *orient = 0; // orientation
|
||||
IplImage *mask = 0; // valid orientation mask
|
||||
IplImage *segmask = 0; // motion segmentation map
|
||||
CvMemStorage* storage = 0; // temporary storage
|
||||
|
||||
// parameters:
|
||||
// img - input video frame
|
||||
// dst - resultant motion picture
|
||||
// args - optional parameters
|
||||
static void update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
|
||||
{
|
||||
double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
|
||||
CvSize size = cvSize(img->width,img->height); // get current frame size
|
||||
int i, idx1 = last, idx2;
|
||||
IplImage* silh;
|
||||
CvSeq* seq;
|
||||
CvRect comp_rect;
|
||||
double count;
|
||||
double angle;
|
||||
CvPoint center;
|
||||
double magnitude;
|
||||
CvScalar color;
|
||||
|
||||
// allocate images at the beginning or
|
||||
// reallocate them if the frame size is changed
|
||||
if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
|
||||
if( buf == 0 ) {
|
||||
buf = (IplImage**)malloc(N*sizeof(buf[0]));
|
||||
memset( buf, 0, N*sizeof(buf[0]));
|
||||
}
|
||||
|
||||
for( i = 0; i < N; i++ ) {
|
||||
cvReleaseImage( &buf[i] );
|
||||
buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
|
||||
cvZero( buf[i] );
|
||||
}
|
||||
cvReleaseImage( &mhi );
|
||||
cvReleaseImage( &orient );
|
||||
cvReleaseImage( &segmask );
|
||||
cvReleaseImage( &mask );
|
||||
|
||||
mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
|
||||
cvZero( mhi ); // clear MHI at the beginning
|
||||
orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
|
||||
segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
|
||||
mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
|
||||
}
|
||||
|
||||
cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale
|
||||
|
||||
idx2 = (last + 1) % N; // index of (last - (N-1))th frame
|
||||
last = idx2;
|
||||
|
||||
silh = buf[idx2];
|
||||
cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames
|
||||
|
||||
cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
|
||||
cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI
|
||||
|
||||
// convert MHI to blue 8u image
|
||||
cvCvtScale( mhi, mask, 255./MHI_DURATION,
|
||||
(MHI_DURATION - timestamp)*255./MHI_DURATION );
|
||||
cvZero( dst );
|
||||
cvMerge( mask, 0, 0, 0, dst );
|
||||
|
||||
// calculate motion gradient orientation and valid orientation mask
|
||||
cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
|
||||
|
||||
if( !storage )
|
||||
storage = cvCreateMemStorage(0);
|
||||
else
|
||||
cvClearMemStorage(storage);
|
||||
|
||||
// segment motion: get sequence of motion components
|
||||
// segmask is marked motion components map. It is not used further
|
||||
seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );
|
||||
|
||||
// iterate through the motion components,
|
||||
// One more iteration (i == -1) corresponds to the whole image (global motion)
|
||||
for( i = -1; i < seq->total; i++ ) {
|
||||
|
||||
if( i < 0 ) { // case of the whole image
|
||||
comp_rect = cvRect( 0, 0, size.width, size.height );
|
||||
color = CV_RGB(255,255,255);
|
||||
magnitude = 100;
|
||||
}
|
||||
else { // i-th motion component
|
||||
comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
|
||||
if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
|
||||
continue;
|
||||
color = CV_RGB(255,0,0);
|
||||
magnitude = 30;
|
||||
}
|
||||
|
||||
// select component ROI
|
||||
cvSetImageROI( silh, comp_rect );
|
||||
cvSetImageROI( mhi, comp_rect );
|
||||
cvSetImageROI( orient, comp_rect );
|
||||
cvSetImageROI( mask, comp_rect );
|
||||
|
||||
// calculate orientation
|
||||
angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
|
||||
angle = 360.0 - angle; // adjust for images with top-left origin
|
||||
|
||||
count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
|
||||
|
||||
cvResetImageROI( mhi );
|
||||
cvResetImageROI( orient );
|
||||
cvResetImageROI( mask );
|
||||
cvResetImageROI( silh );
|
||||
|
||||
// check for the case of little motion
|
||||
if( count < comp_rect.width*comp_rect.height * 0.05 )
|
||||
continue;
|
||||
|
||||
// draw a clock with arrow indicating the direction
|
||||
center = cvPoint( (comp_rect.x + comp_rect.width/2),
|
||||
(comp_rect.y + comp_rect.height/2) );
|
||||
|
||||
cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
|
||||
cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
|
||||
cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
IplImage* motion = 0;
|
||||
CvCapture* capture = 0;
|
||||
|
||||
help();
|
||||
|
||||
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
|
||||
capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
|
||||
else if( argc == 2 )
|
||||
capture = cvCaptureFromFile( argv[1] );
|
||||
|
||||
if( capture )
|
||||
{
|
||||
cvNamedWindow( "Motion", 1 );
|
||||
|
||||
for(;;)
|
||||
{
|
||||
IplImage* image = cvQueryFrame( capture );
|
||||
if( !image )
|
||||
break;
|
||||
|
||||
if( !motion )
|
||||
{
|
||||
motion = cvCreateImage( cvSize(image->width,image->height), 8, 3 );
|
||||
cvZero( motion );
|
||||
motion->origin = image->origin;
|
||||
}
|
||||
|
||||
update_mhi( image, motion, 30 );
|
||||
cvShowImage( "Motion", motion );
|
||||
|
||||
if( cvWaitKey(10) >= 0 )
|
||||
break;
|
||||
}
|
||||
cvReleaseCapture( &capture );
|
||||
cvDestroyWindow( "Motion" );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef _EiC
|
||||
main(1,"motempl.c");
|
||||
#endif
|
@@ -88,8 +88,8 @@ int main(int argc, char** argv)
|
||||
namedWindow("video", 1);
|
||||
namedWindow("segmented", 1);
|
||||
|
||||
Ptr<BackgroundSubtractorMOG> bgsubtractor=createBackgroundSubtractorMOG();
|
||||
bgsubtractor->setNoiseSigma(10);
|
||||
Ptr<BackgroundSubtractorMOG2> bgsubtractor=createBackgroundSubtractorMOG2();
|
||||
bgsubtractor->setVarThreshold(10);
|
||||
|
||||
for(;;)
|
||||
{
|
||||
|
@@ -1,221 +0,0 @@
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <cstdio>
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
#define APP_NAME "simpleflow_demo : "
|
||||
|
||||
static void help()
|
||||
{
|
||||
// print a welcome message, and the OpenCV version
|
||||
printf("This is a demo of SimpleFlow optical flow algorithm,\n"
|
||||
"Using OpenCV version %s\n\n", CV_VERSION);
|
||||
|
||||
printf("Usage: simpleflow_demo frame1 frame2 output_flow"
|
||||
"\nApplication will write estimated flow "
|
||||
"\nbetween 'frame1' and 'frame2' in binary format"
|
||||
"\ninto file 'output_flow'"
|
||||
"\nThen one can use code from http://vision.middlebury.edu/flow/data/"
|
||||
"\nto convert flow in binary file to image\n");
|
||||
}
|
||||
|
||||
// binary file format for flow data specified here:
|
||||
// http://vision.middlebury.edu/flow/data/
|
||||
static void writeOpticalFlowToFile(const Mat& flow, FILE* file) {
|
||||
int cols = flow.cols;
|
||||
int rows = flow.rows;
|
||||
|
||||
fprintf(file, "PIEH");
|
||||
|
||||
if (fwrite(&cols, sizeof(int), 1, file) != 1 ||
|
||||
fwrite(&rows, sizeof(int), 1, file) != 1) {
|
||||
printf(APP_NAME "writeOpticalFlowToFile : problem writing header\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
for (int i= 0; i < rows; ++i) {
|
||||
for (int j = 0; j < cols; ++j) {
|
||||
Vec2f flow_at_point = flow.at<Vec2f>(i, j);
|
||||
|
||||
if (fwrite(&(flow_at_point[0]), sizeof(float), 1, file) != 1 ||
|
||||
fwrite(&(flow_at_point[1]), sizeof(float), 1, file) != 1) {
|
||||
printf(APP_NAME "writeOpticalFlowToFile : problem writing data\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void run(int argc, char** argv) {
|
||||
if (argc < 3) {
|
||||
printf(APP_NAME "Wrong number of command line arguments for mode `run`: %d (expected %d)\n",
|
||||
argc, 3);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
Mat frame1 = imread(argv[0]);
|
||||
Mat frame2 = imread(argv[1]);
|
||||
|
||||
if (frame1.empty()) {
|
||||
printf(APP_NAME "Image #1 : %s cannot be read\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (frame2.empty()) {
|
||||
printf(APP_NAME "Image #2 : %s cannot be read\n", argv[1]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (frame1.rows != frame2.rows && frame1.cols != frame2.cols) {
|
||||
printf(APP_NAME "Images should be of equal sizes\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (frame1.type() != 16 || frame2.type() != 16) {
|
||||
printf(APP_NAME "Images should be of equal type CV_8UC3\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
printf(APP_NAME "Read two images of size [rows = %d, cols = %d]\n",
|
||||
frame1.rows, frame1.cols);
|
||||
|
||||
Mat flow;
|
||||
|
||||
float start = (float)getTickCount();
|
||||
calcOpticalFlowSF(frame1, frame2,
|
||||
flow,
|
||||
3, 2, 4, 4.1, 25.5, 18, 55.0, 25.5, 0.35, 18, 55.0, 25.5, 10);
|
||||
printf(APP_NAME "calcOpticalFlowSF : %lf sec\n", (getTickCount() - start) / getTickFrequency());
|
||||
|
||||
FILE* file = fopen(argv[2], "wb");
|
||||
if (file == NULL) {
|
||||
printf(APP_NAME "Unable to open file '%s' for writing\n", argv[2]);
|
||||
exit(1);
|
||||
}
|
||||
printf(APP_NAME "Writing to file\n");
|
||||
writeOpticalFlowToFile(flow, file);
|
||||
fclose(file);
|
||||
}
|
||||
|
||||
static bool readOpticalFlowFromFile(FILE* file, Mat& flow) {
|
||||
char header[5];
|
||||
if (fread(header, 1, 4, file) < 4 && (string)header != "PIEH") {
|
||||
return false;
|
||||
}
|
||||
|
||||
int cols, rows;
|
||||
if (fread(&cols, sizeof(int), 1, file) != 1||
|
||||
fread(&rows, sizeof(int), 1, file) != 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
flow = Mat::zeros(rows, cols, CV_32FC2);
|
||||
|
||||
for (int i = 0; i < rows; ++i) {
|
||||
for (int j = 0; j < cols; ++j) {
|
||||
Vec2f flow_at_point;
|
||||
if (fread(&(flow_at_point[0]), sizeof(float), 1, file) != 1 ||
|
||||
fread(&(flow_at_point[1]), sizeof(float), 1, file) != 1) {
|
||||
return false;
|
||||
}
|
||||
flow.at<Vec2f>(i, j) = flow_at_point;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool isFlowCorrect(float u) {
|
||||
return !cvIsNaN(u) && (fabs(u) < 1e9);
|
||||
}
|
||||
|
||||
static float calc_rmse(Mat flow1, Mat flow2) {
|
||||
float sum = 0;
|
||||
int counter = 0;
|
||||
const int rows = flow1.rows;
|
||||
const int cols = flow1.cols;
|
||||
|
||||
for (int y = 0; y < rows; ++y) {
|
||||
for (int x = 0; x < cols; ++x) {
|
||||
Vec2f flow1_at_point = flow1.at<Vec2f>(y, x);
|
||||
Vec2f flow2_at_point = flow2.at<Vec2f>(y, x);
|
||||
|
||||
float u1 = flow1_at_point[0];
|
||||
float v1 = flow1_at_point[1];
|
||||
float u2 = flow2_at_point[0];
|
||||
float v2 = flow2_at_point[1];
|
||||
|
||||
if (isFlowCorrect(u1) && isFlowCorrect(u2) && isFlowCorrect(v1) && isFlowCorrect(v2)) {
|
||||
sum += (u1-u2)*(u1-u2) + (v1-v2)*(v1-v2);
|
||||
counter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return (float)sqrt(sum / (1e-9 + counter));
|
||||
}
|
||||
|
||||
static void eval(int argc, char** argv) {
|
||||
if (argc < 2) {
|
||||
printf(APP_NAME "Wrong number of command line arguments for mode `eval` : %d (expected %d)\n",
|
||||
argc, 2);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
Mat flow1, flow2;
|
||||
|
||||
FILE* flow_file_1 = fopen(argv[0], "rb");
|
||||
if (flow_file_1 == NULL) {
|
||||
printf(APP_NAME "Cannot open file with first flow : %s\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
if (!readOpticalFlowFromFile(flow_file_1, flow1)) {
|
||||
printf(APP_NAME "Cannot read flow data from file %s\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
fclose(flow_file_1);
|
||||
|
||||
FILE* flow_file_2 = fopen(argv[1], "rb");
|
||||
if (flow_file_2 == NULL) {
|
||||
printf(APP_NAME "Cannot open file with first flow : %s\n", argv[1]);
|
||||
exit(1);
|
||||
}
|
||||
if (!readOpticalFlowFromFile(flow_file_2, flow2)) {
|
||||
printf(APP_NAME "Cannot read flow data from file %s\n", argv[1]);
|
||||
exit(1);
|
||||
}
|
||||
fclose(flow_file_2);
|
||||
|
||||
float rmse = calc_rmse(flow1, flow2);
|
||||
printf("%lf\n", rmse);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc < 2) {
|
||||
printf(APP_NAME "Mode is not specified\n");
|
||||
help();
|
||||
exit(1);
|
||||
}
|
||||
string mode = (string)argv[1];
|
||||
int new_argc = argc - 2;
|
||||
char** new_argv = &argv[2];
|
||||
|
||||
if ("run" == mode) {
|
||||
run(new_argc, new_argv);
|
||||
} else if ("eval" == mode) {
|
||||
eval(new_argc, new_argv);
|
||||
} else if ("help" == mode)
|
||||
help();
|
||||
else {
|
||||
printf(APP_NAME "Unknown mode : %s\n", argv[1]);
|
||||
help();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -20,9 +20,7 @@ using namespace std;
|
||||
|
||||
// Global variables
|
||||
Mat frame; //current frame
|
||||
Mat fgMaskMOG; //fg mask generated by MOG method
|
||||
Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
|
||||
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
|
||||
Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
|
||||
int keyboard; //input from keyboard
|
||||
|
||||
@@ -63,11 +61,9 @@ int main(int argc, char* argv[])
|
||||
|
||||
//create GUI windows
|
||||
namedWindow("Frame");
|
||||
namedWindow("FG Mask MOG");
|
||||
namedWindow("FG Mask MOG 2");
|
||||
|
||||
//create Background Subtractor objects
|
||||
pMOG = createBackgroundSubtractorMOG(); //MOG approach
|
||||
pMOG2 = createBackgroundSubtractorMOG2(); //MOG2 approach
|
||||
|
||||
if(strcmp(argv[1], "-vid") == 0) {
|
||||
@@ -109,7 +105,6 @@ void processVideo(char* videoFilename) {
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
//update the background model
|
||||
pMOG->apply(frame, fgMaskMOG);
|
||||
pMOG2->apply(frame, fgMaskMOG2);
|
||||
//get the frame number and write it on the current frame
|
||||
stringstream ss;
|
||||
@@ -121,7 +116,6 @@ void processVideo(char* videoFilename) {
|
||||
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
|
||||
//show the current frame and the fg masks
|
||||
imshow("Frame", frame);
|
||||
imshow("FG Mask MOG", fgMaskMOG);
|
||||
imshow("FG Mask MOG 2", fgMaskMOG2);
|
||||
//get the input from the keyboard
|
||||
keyboard = waitKey( 30 );
|
||||
@@ -146,7 +140,6 @@ void processImages(char* fistFrameFilename) {
|
||||
//read input data. ESC or 'q' for quitting
|
||||
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
|
||||
//update the background model
|
||||
pMOG->apply(frame, fgMaskMOG);
|
||||
pMOG2->apply(frame, fgMaskMOG2);
|
||||
//get the frame number and write it on the current frame
|
||||
size_t index = fn.find_last_of("/");
|
||||
@@ -166,7 +159,6 @@ void processImages(char* fistFrameFilename) {
|
||||
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
|
||||
//show the current frame and the fg masks
|
||||
imshow("Frame", frame);
|
||||
imshow("FG Mask MOG", fgMaskMOG);
|
||||
imshow("FG Mask MOG 2", fgMaskMOG2);
|
||||
//get the input from the keyboard
|
||||
keyboard = waitKey( 30 );
|
||||
|
Reference in New Issue
Block a user