propagated some more changes from 2.3 to trunk

This commit is contained in:
Vadim Pisarevsky
2011-06-23 12:15:35 +00:00
parent 6e613bca9b
commit f0624c08dc
116 changed files with 22049 additions and 192420 deletions

View File

@@ -17,8 +17,8 @@ void help()
{
printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n"
"Correspondences are drawn\n");
printf("Format: \n./one_way_sample [path_to_samples] [image1] [image2]\n");
printf("For example: ./one_way_sample ../../../opencv/samples/c scene_l.bmp scene_r.bmp\n");
printf("Format: \n./one_way_sample <path_to_samples> <image1> <image2>\n");
printf("For example: ./one_way_sample . ../c/scene_l.bmp ../c/scene_r.bmp\n");
}
using namespace cv;
@@ -32,7 +32,7 @@ int main(int argc, char** argv)
const CvSize patch_size = cvSize(24, 24);
const int pose_count = 50;
if (argc != 3 && argc != 4)
if (argc != 4)
{
help();
return 0;

View File

@@ -9,9 +9,9 @@ using namespace cv;
void help()
{
printf("Use the SURF descriptor for matching keypoints between 2 images\n");
printf("Format: \n./generic_descriptor_match [image1] [image2] [algorithm] [XML params]\n");
printf("For example: ./generic_descriptor_match scene_l.bmp scene_r.bmp FERN fern_params.xml\n");
printf("Use the SURF descriptor for matching keypoints between 2 images\n");
printf("Format: \n./generic_descriptor_match <image1> <image2> <algorithm> <XML params>\n");
printf("For example: ./generic_descriptor_match ../c/scene_l.bmp ../c/scene_r.bmp FERN fern_params.xml\n");
}
IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1, IplImage* img2,

View File

@@ -1,12 +0,0 @@
# -------------------------------------------------------------------------
# CMake file for Octave samples. See root CMakeLists.txt
# -------------------------------------------------------------------------
file(GLOB OCTAVE_SAMPLES *.m)
if(NOT WIN32)
install(FILES ${OCTAVE_SAMPLES}
DESTINATION share/opencv/samples/octave
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ)
endif()

View File

@@ -1,184 +0,0 @@
#! /usr/bin/env octave
## import the necessary things for OpenCV
addpath("/home/x/opencv2/interfaces/swig/octave");
source("/home/x/opencv2/interfaces/swig/octave/PKG_ADD_template");
debug_on_error(true);
debug_on_warning(true);
crash_dumps_octave_core (0)
cv;
highgui;
#############################################################################
## definition of some constants
## how many bins we want for the histogram, and their ranges
hdims = 16;
hranges = {0, 180};
## ranges for the limitation of the histogram
vmin = 10;
vmax = 256;
smin = 30;
## the range we want to monitor
hsv_min = cv.cvScalar (0, smin, vmin, 0);
hsv_max = cv.cvScalar (180, 256, vmax, 0);
#############################################################################
## some useful functions
function rgb = hsv2rgb (hue)
global cv;
## convert the hue value to the corresponding rgb value
sector_data = [0, 2, 1; 1, 2, 0; 1, 0, 2; 2, 0, 1; 2, 1, 0; 0, 1, 2]+1;
hue *= 0.1 / 3;
sector = cv.cvFloor (hue);
p = cv.cvRound (255 * (hue - sector));
if (bitand(sector,1))
p = bitxor(p,255);
endif
rgb = zeros(1,3);
rgb (sector_data (sector+1, 1)) = 255;
rgb (sector_data (sector+1, 2)) = 0;
rgb (sector_data (sector+1, 3)) = p;
rgb = cv.cvScalar (rgb (3), rgb (2), rgb (1), 0);
endfunction
#############################################################################
## so, here is the main part of the program
## a small welcome
printf("OpenCV Octave wrapper test\n");
printf("OpenCV version: %s (%d, %d, %d)\n",
cv.CV_VERSION,cv.CV_MAJOR_VERSION,
cv.CV_MINOR_VERSION,cv.CV_SUBMINOR_VERSION);
## first, create the necessary windows
highgui.cvNamedWindow ('Camera', highgui.CV_WINDOW_AUTOSIZE);
highgui.cvNamedWindow ('Histogram', highgui.CV_WINDOW_AUTOSIZE);
## move the new window to a better place
#highgui.cvMoveWindow ('Camera', 10, 40);
#highgui.cvMoveWindow ('Histogram', 10, 270);
try
## try to get the device number from the command line
device = int32 (argv(){1});
have_device = true;
catch
## no device number on the command line, assume we want the 1st device
device = -1;
end_try_catch
## no argument on the command line, try to use the camera
capture = highgui.cvCreateCameraCapture (device);
## set the wanted image size from the camera
highgui.cvSetCaptureProperty (capture, \
highgui.CV_CAP_PROP_FRAME_WIDTH, 320);
highgui.cvSetCaptureProperty (capture, \
highgui.CV_CAP_PROP_FRAME_HEIGHT, 240);
## create an image to put in the histogram
histimg = cv.cvCreateImage (cv.cvSize (320,240), 8, 3);
## init the image of the histogram to black
cv.cvSetZero (histimg);
## capture the 1st frame to get some propertie on it
frame = highgui.cvQueryFrame (capture);
## get some properties of the frame
frame_size = cv.cvGetSize (frame);
## compute which selection of the frame we want to monitor
selection = cv.cvRect (0, 0, frame.width, frame.height);
## create some images usefull later
hue = cv.cvCreateImage (frame_size, 8, 1);
mask = cv.cvCreateImage (frame_size, 8, 1);
hsv = cv.cvCreateImage (frame_size, 8, 3 );
## create the histogram
hist = cv.cvCreateHist ({hdims}, cv.CV_HIST_ARRAY, {hranges}, 1);
while (1) ## do forever
## 1. capture the current image
frame = highgui.cvQueryFrame (capture);
if (swig_this(frame)==0);
## no image captured... end the processing
break
endif
## mirror the captured image
cv.cvFlip (frame, [], 1);
## compute the hsv version of the image
cv.cvCvtColor (frame, hsv, cv.CV_BGR2HSV);
## compute which pixels are in the wanted range
cv.cvInRangeS (hsv, hsv_min, hsv_max, mask);
## extract the hue from the hsv array
cv.cvSplit (hsv, hue, [], [], []);
## select the rectangle of interest in the hue/mask arrays
hue_roi = cv.cvGetSubRect (hue, selection);
mask_roi = cv.cvGetSubRect (mask, selection);
## it's time to compute the histogram
cv.cvCalcHist (hue_roi, hist, 0, mask_roi);
## extract the min and max value of the histogram
[min_val, max_val, min_idx, max_idx] = cv.cvGetMinMaxHistValue (hist);
## compute the scale factor
if (max_val > 0)
scale = 255. / max_val;
else
scale = 0.;
endif
## scale the histograms
cv.cvConvertScale (hist.bins, hist.bins, scale, 0);
## clear the histogram image
cv.cvSetZero (histimg);
## compute the width for each bin do display
bin_w = histimg.width / hdims;
for (i=0:hdims-1)
## for all the bins
## get the value, and scale to the size of the hist image
val = cv.cvRound (cv.cvGetReal1D (hist.bins, i)
* histimg.height / 255);
## compute the color
color = hsv2rgb (i * 180. / hdims);
## draw the rectangle in the wanted color
cv.cvRectangle (histimg,
cv.cvPoint (i * bin_w, histimg.height),
cv.cvPoint ((i + 1) * bin_w, histimg.height - val),
color, -1, 8, 0);
## we can now display the images
highgui.cvShowImage ('Camera', frame);
highgui.cvShowImage ('Histogram', histimg);
endfor
## handle events
k = highgui.cvWaitKey (5);
if (k == 27)
## user has press the ESC key, so exit
break;
endif
endwhile

View File

@@ -1,107 +0,0 @@
#! /usr/bin/env octave
## import the necessary things for OpenCV
addpath("/home/x/opencv2/interfaces/swig/octave");
source("/home/x/opencv2/interfaces/swig/octave/PKG_ADD_template");
debug_on_error(true);
debug_on_warning(true);
crash_dumps_octave_core (0)
cv;
highgui;
## the codec existing in cvcapp.cpp,
## need to have a better way to specify them in the future
## WARNING: I have see only MPEG1VIDEO working on my computer
H263 = 0x33363255;
H263I = 0x33363249;
MSMPEG4V3 = 0x33564944;
MPEG4 = 0x58564944;
MSMPEG4V2 = 0x3234504D;
MJPEG = 0x47504A4D;
MPEG1VIDEO = 0x314D4950;
AC3 = 0x2000;
MP2 = 0x50;
FLV1 = 0x31564C46;
#############################################################################
## so, here is the main part of the program
## a small welcome
printf("OpenCV Octave capture video\n");
## first, create the necessary window
highgui.cvNamedWindow ('Camera', highgui.CV_WINDOW_AUTOSIZE);
## move the new window to a better place
#highgui.cvMoveWindow ('Camera', 10, 10);
try
## try to get the device number from the command line
device = int32 (argv(){1});
catch
## no device number on the command line, assume we want the 1st device
device = -1;
end_try_catch
## no argument on the command line, try to use the camera
capture = highgui.cvCreateCameraCapture (device);
## check that capture device is OK
if (!swig_this(capture))
printf("Error opening capture device\n");
exit (1);
endif
## capture the 1st frame to get some propertie on it
frame = highgui.cvQueryFrame (capture);
## get size of the frame
frame_size = cv.cvGetSize (frame);
## get the frame rate of the capture device
fps = highgui.cvGetCaptureProperty (capture, highgui.CV_CAP_PROP_FPS);
if (fps == 0)
## no fps getted, so set it to 30 by default
fps = 30;
endif
## create the writer
writer = highgui.cvCreateVideoWriter ("captured.mpg", MPEG1VIDEO,
fps, frame_size, true);
## check the writer is OK
if (!swig_this(writer))
printf("Error opening writer\n");
exit(1);
endif
while (1)
## do forever
## 1. capture the current image
frame = highgui.cvQueryFrame (capture);
if (swig_this(frame) == 0)
## no image captured... end the processing
break
endif
## write the frame to the output file
highgui.cvWriteFrame (writer, frame);
## display the frames to have a visual output
highgui.cvShowImage ('Camera', frame);
## handle events
k = highgui.cvWaitKey (5);
if (k & 0x100 == 27)
## user has press the ESC key, so exit
break
endif
endwhile
## end working with the writer
## not working at this time... Need to implement some typemaps...
## but exiting without calling it is OK in this simple application
##highgui.cvReleaseVideoWriter (writer)

View File

@@ -1,23 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
arg_list=argv();
cvNamedWindow("win");
if (!size(arg_list,1))
error("must specify filename");
exit
endif
filename = arg_list{1};
im = cvLoadImage(filename, CV_LOAD_IMAGE_GRAYSCALE);
im3 = cvLoadImage(filename, CV_LOAD_IMAGE_COLOR);
chessboard_dim = cvSize( 5, 6 );
[found_all, corners] = cvFindChessboardCorners( im, chessboard_dim );
cvDrawChessboardCorners( im3, chessboard_dim, corners, found_all );
cvShowImage("win", im3);
cvWaitKey();

View File

@@ -1,140 +0,0 @@
#! /usr/bin/env octave
printf("OpenCV Octave version of contours\n");
## import the necessary things for OpenCV
cv;
global _SIZE;
global _red;
global _green;
global _blue;
global contours;
## some default constants
_SIZE = 500;
_DEFAULT_LEVEL = 3;
## definition of some colors
_red = cvScalar (0, 0, 255, 0);
_green = cvScalar (0, 255, 0, 0);
_white = cvRealScalar (255);
_black = cvRealScalar (0);
## the callback on the trackbar, to set the level of contours we want
## to display
function on_trackbar (position)
global cv;
global _SIZE;
global _red;
global _green;
global _blue;
global contours;
## create the image for putting in it the founded contours
contours_image = cvCreateImage (cvSize (_SIZE, _SIZE), 8, 3);
## compute the real level of display, given the current position
levels = position - 3;
## initialisation
_contours = contours;
if (levels <= 0)
## zero or negative value
## => get to the nearest face to make it look more funny
_contours = contours.h_next.h_next.h_next;
endif
## first, clear the image where we will draw contours
cvSetZero (contours_image);
## draw contours in red and green
cvDrawContours (contours_image, _contours, _red, _green, levels, 3, cv.CV_AA, cvPoint (0, 0));
## finally, show the image
cvShowImage ("contours", contours_image);
endfunction
## create the image where we want to display results
image = cvCreateImage (cvSize (_SIZE, _SIZE), 8, 1);
## start with an empty image
cvSetZero (image);
## draw the original picture
for i=0:6-1,
dx = mod(i,2) * 250 - 30;
dy = (i / 2) * 150;
cvEllipse (image,
cvPoint (dx + 150, dy + 100),
cvSize (100, 70),
0, 0, 360, _white, -1, 8, 0);
cvEllipse (image,
cvPoint (dx + 115, dy + 70),
cvSize (30, 20),
0, 0, 360, _black, -1, 8, 0);
cvEllipse (image,
cvPoint (dx + 185, dy + 70),
cvSize (30, 20),
0, 0, 360, _black, -1, 8, 0);
cvEllipse (image,
cvPoint (dx + 115, dy + 70),
cvSize (15, 15),
0, 0, 360, _white, -1, 8, 0);
cvEllipse (image,
cvPoint (dx + 185, dy + 70),
cvSize (15, 15),
0, 0, 360, _white, -1, 8, 0);
cvEllipse (image,
cvPoint (dx + 115, dy + 70),
cvSize (5, 5),
0, 0, 360, _black, -1, 8, 0);
cvEllipse (image,
cvPoint (dx + 185, dy + 70),
cvSize (5, 5),
0, 0, 360, _black, -1, 8, 0);
cvEllipse (image,
cvPoint (dx + 150, dy + 100),
cvSize (10, 5),
0, 0, 360, _black, -1, 8, 0);
cvEllipse (image,
cvPoint (dx + 150, dy + 150),
cvSize (40, 10),
0, 0, 360, _black, -1, 8, 0);
cvEllipse (image,
cvPoint (dx + 27, dy + 100),
cvSize (20, 35),
0, 0, 360, _white, -1, 8, 0);
cvEllipse (image,
cvPoint (dx + 273, dy + 100),
cvSize (20, 35),
0, 0, 360, _white, -1, 8, 0);
endfor
## create window and display the original picture in it
cvNamedWindow ("image", 1);
cvShowImage ("image", image);
## create the storage area
storage = cvCreateMemStorage (0);
## find the contours
[nb_contours, contours] = cvFindContours (image, storage, sizeof_CvContour, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0,0));
## comment this out if you do not want approximation
contours = cvApproxPoly (contours, sizeof_CvContour, storage, CV_POLY_APPROX_DP, 3, 1);
## create the window for the contours
cvNamedWindow ("contours", 1);
## create the trackbar, to enable the change of the displayed level
cvCreateTrackbar ("levels+3", "contours", 3, 7, @on_trackbar);
## call one time the callback, so we will have the 1st display done
on_trackbar (_DEFAULT_LEVEL);
## wait a key pressed to end
cvWaitKey (0);

View File

@@ -1,74 +0,0 @@
#! /usr/bin/env octave
printf("OpenCV Octave version of convexhull\n");
## import the necessary things for OpenCV
cv;
highgui;
## how many points we want at max
_MAX_POINTS = 100;
## create the image where we want to display results
image = cv.cvCreateImage (cv.cvSize (500, 500), 8, 3);
## create the window to put the image in
highgui.cvNamedWindow ('hull', highgui.CV_WINDOW_AUTOSIZE);
while (true)
## do forever
## get a random number of points
count = int32(rand()*_MAX_POINTS)+1
## initialisations
points = {};
for i=1:count,
## generate a random point
points{i} = cv.cvPoint \
(int32(rand() * (image.width / 2) + image.width / 4), \
int32(rand() * (image.height / 2) + image.height / 4)); \
endfor
## compute the convex hull
hull = cv.cvConvexHull2 (points, cv.CV_CLOCKWISE, 0);
## start with an empty image
cv.cvSetZero (image);
for i=1:count,
## draw all the points
cv.cvCircle (image, points {i}, 2, \
cv.cvScalar (0, 0, 255, 0), \
cv.CV_FILLED, cv.CV_AA, 0);
endfor
## start the line from the last point
pt0 = points {hull [-1]};
for point_index = 1:hull.rows,
## connect the previous point to the current one
## get the current one
pt1 = points {point_index};
## draw
cv.cvLine (image, pt0, pt1, \
cv.cvScalar (0, 255, 0, 0), \
1, cv.CV_AA, 0);
## now, current one will be the previous one for the next iteration
pt0 = pt1;
endfor
## display the final image
highgui.cvShowImage ('hull', image);
## handle events, and wait a key pressed
k = highgui.cvWaitKey (0);
if (k == '\x1b')
## user has press the ESC key, so exit
break
endif
endwhile

View File

@@ -1,170 +0,0 @@
#! /usr/bin/env octave
## the script demostrates iterative construction of
## delaunay triangulation and voronoi tesselation
## Original Author (C version): ?
## Converted to Python by: Roman Stanchak
## Converted to Octave by: Xavier Delacour
cv;
highgui;
function draw_subdiv_point( img, fp, color )
global CV_FILLED;
cvCircle( img, cvPoint(cvRound(fp.x), cvRound(fp.y)), 3, color, \
CV_FILLED, 8, 0 );
endfunction
function draw_subdiv_edge( img, edge, color )
global CV_AA;
org_pt = cvSubdiv2DEdgeOrg(edge);
dst_pt = cvSubdiv2DEdgeDst(edge);
if (org_pt && dst_pt )
org = org_pt.pt;
dst = dst_pt.pt;
iorg = cvPoint( cvRound( org.x ), cvRound( org.y ));
idst = cvPoint( cvRound( dst.x ), cvRound( dst.y ));
cvLine( img, iorg, idst, color, 1, CV_AA, 0 );
endif
endfunction
function draw_subdiv( img, subdiv, delaunay_color, voronoi_color )
total = subdiv.edges.total;
elem_size = subdiv.edges.elem_size;
for edge in subdiv.edges,
edge_rot = cvSubdiv2DRotateEdge( edge, 1 );
if( CV_IS_SET_ELEM( edge ))
draw_subdiv_edge( img, edge_rot, voronoi_color );
draw_subdiv_edge( img, edge, delaunay_color );
endif
endfor
endfunction
function locate_point( subdiv, fp, img, active_color )
[res, e0, p] = cvSubdiv2DLocate( subdiv, fp );
if (e0)
e = e0
while (true)
draw_subdiv_edge( img, e, active_color );
e = cvSubdiv2DGetEdge(e,CV_NEXT_AROUND_LEFT);
if (e == e0)
break
endif
endwhile
endif
draw_subdiv_point( img, fp, active_color );
endfunction
function draw_subdiv_facet( img, edge )
t = edge;
count = 0;
## count number of edges in facet
while (count == 0 || t != edge)
count+=1;
t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
endwhile
buf = []
## gather points
t = edge;
for i=0:count-1,
assert t>4
pt = cvSubdiv2DEdgeOrg( t );
if (! pt)
break;
endif
buf.append( cvPoint( cvRound(pt.pt.x), cvRound(pt.pt.y) ) );
t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
endfor
if( len(buf)==count )
pt = cvSubdiv2DEdgeDst( cvSubdiv2DRotateEdge( edge, 1 ));
cvFillConvexPoly( img, buf, CV_RGB(randint(0,255),randint(0,255),randint(0,255)), CV_AA, 0 );
cvPolyLine( img, [buf], 1, CV_RGB(0,0,0), 1, CV_AA, 0);
draw_subdiv_point( img, pt.pt, CV_RGB(0,0,0));
endif
endfunction
function paint_voronoi( subdiv, img )
total = subdiv.edges.total;
elem_size = subdiv.edges.elem_size;
cvCalcSubdivVoronoi2D( subdiv );
for edge in subdiv.edges,
if( CV_IS_SET_ELEM( edge ))
## left
draw_subdiv_facet( img, cvSubdiv2DRotateEdge( edge, 1 ));
## right
draw_subdiv_facet( img, cvSubdiv2DRotateEdge( edge, 3 ));
endif
endfor
endfunction
win = "source";
rect = cvRect( 0, 0, 600, 600 );
active_facet_color = CV_RGB( 255, 0, 0 );
delaunay_color = CV_RGB( 0,0,0);
voronoi_color = CV_RGB(0, 180, 0);
bkgnd_color = CV_RGB(255,255,255);
img = cvCreateImage( cvSize(rect.width,rect.height), 8, 3 );
cvSet( img, bkgnd_color );
cvNamedWindow( win, 1 );
storage = cvCreateMemStorage(0);
subdiv = cvCreateSubdivDelaunay2D( rect, storage );
printf("Delaunay triangulation will be build now interactively.\n");
printf("To stop the process, press any key\n");
for i=0:200-1,
fp = cvPoint2D32f( int32(rand()*(rect.width-10)+5), int32(rand()*(rect.height-10)+5) )
locate_point( subdiv, fp, img, active_facet_color );
cvShowImage( win, img );
if( cvWaitKey( 100 ) >= 0 )
break;
endif
cvSubdivDelaunay2DInsert( subdiv, fp );
cvCalcSubdivVoronoi2D( subdiv );
cvSet( img, bkgnd_color );
draw_subdiv( img, subdiv, delaunay_color, voronoi_color );
cvShowImage( win, img );
if( cvWaitKey( 100 ) >= 0 )
break;
endif
endfor
cvSet( img, bkgnd_color );
paint_voronoi( subdiv, img );
cvShowImage( win, img );
cvWaitKey(0);
cvDestroyWindow( win );

View File

@@ -1,129 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
file_name = "../c/baboon.jpg";
global Gbrightness;
global Gcontrast;
global hist_size;
global ranges;
global src_image;
global dst_image;
global hist_image;
global hist;
global lut;
_brightness = 100;
_contrast = 100;
Gbrightness = 100;
Gcontrast = 100;
hist_size = 64;
range_0={0,256};
ranges = { range_0 };
src_image=[];
dst_image=[];
hist_image=[];
hist=[];
lut=cvCreateMat(256,1,CV_8U);
## brightness/contrast callback function
function update_brightness( val )
global Gbrightness # global tag is required, or we get UnboundLocalError
Gbrightness = val;
update_brightcont( );
endfunction
function update_contrast( val )
global Gcontrast; # global tag is required, or we get UnboundLocalError
Gcontrast = val;
update_brightcont( );
endfunction
function update_brightcont()
global Gbrightness;
global Gcontrast;
global hist_size;
global ranges;
global src_image;
global dst_image;
global hist_image;
global hist;
global lut;
global cvCalcHist; # use cv namespace for these instead
global cvZero;
global cvScale;
brightness = Gbrightness - 100;
contrast = Gcontrast - 100;
max_value = 0;
## The algorithm is by Werner D. Streidt
## (http://visca.com/ffactory/archives/5-99/msg00021.html)
if( contrast > 0 )
delta = 127.*contrast/100;
a = 255./(255. - delta*2);
b = a*(brightness - delta);
else
delta = -128.*contrast/100;
a = (256.-delta*2)/255.;
b = a*brightness + delta;
endif
for i=0:256-1,
v = cvRound(a*i + b);
if( v < 0 )
v = 0;
endif
if( v > 255 )
v = 255;
endif
lut(i) = v;
endfor
cvLUT( src_image, dst_image, lut );
cvShowImage( "image", dst_image );
cvCalcHist( dst_image, hist, 0, [] );
cvZero( dst_image );
[min_value, max_value] = cvGetMinMaxHistValue( hist );
cvScale( hist.bins, hist.bins, double(hist_image.height)/max_value, 0 );
##cvNormalizeHist( hist, 1000 );
cvSet( hist_image, cvScalarAll(255));
bin_w = cvRound(double(hist_image.width)/hist_size);
for i=0:hist_size-1,
cvRectangle( hist_image, cvPoint(i*bin_w, hist_image.height), cvPoint((i+1)*bin_w, hist_image.height - cvRound(cvGetReal1D(hist.bins,i))), cvScalarAll(0), -1, 8, 0 );
endfor
cvShowImage( "histogram", hist_image );
endfunction
## Load the source image. HighGUI use.
if size(argv, 1)>1
file_name = argv(){1}
endif
src_image = cvLoadImage( file_name, 0 );
if (!swig_this(src_image))
printf("Image was not loaded.\n");
exit(-1);
endif
dst_image = cvCloneImage(src_image);
hist_image = cvCreateImage(cvSize(320,200), 8, 1);
hist = cvCreateHist({hist_size}, CV_HIST_ARRAY, ranges, 1);
cvNamedWindow("image", 0);
cvNamedWindow("histogram", 0);
cvCreateTrackbar("brightness", "image", _brightness, 200, @update_brightness);
cvCreateTrackbar("contrast", "image", _contrast, 200, @update_contrast);
update_brightcont();
cvWaitKey(0);

View File

@@ -1,115 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
## Rearrange the quadrants of Fourier image so that the origin is at
## the image center
## src & dst arrays of equal size & type
function cvShiftDFT(src_arr, dst_arr )
size = cvGetSize(src_arr);
dst_size = cvGetSize(dst_arr);
if(dst_size.width != size.width || \
dst_size.height != size.height)
cvError( CV_StsUnmatchedSizes, "cvShiftDFT", \
"Source and Destination arrays must have equal sizes", \
__FILE__, __LINE__ );
endif
if(swig_this(src_arr) == swig_this(dst_arr))
tmp = cvCreateMat(size.height/2, size.width/2, cvGetElemType(src_arr));
endif
cx = size.width/2;
cy = size.height/2; # image center
q1 = cvGetSubRect( src_arr, cvRect(0,0,cx, cy) );
q2 = cvGetSubRect( src_arr, cvRect(cx,0,cx,cy) );
q3 = cvGetSubRect( src_arr, cvRect(cx,cy,cx,cy) );
q4 = cvGetSubRect( src_arr, cvRect(0,cy,cx,cy) );
d1 = cvGetSubRect( src_arr, cvRect(0,0,cx,cy) );
d2 = cvGetSubRect( src_arr, cvRect(cx,0,cx,cy) );
d3 = cvGetSubRect( src_arr, cvRect(cx,cy,cx,cy) );
d4 = cvGetSubRect( src_arr, cvRect(0,cy,cx,cy) );
if(swig_this(src_arr) != swig_this(dst_arr))
if( !CV_ARE_TYPES_EQ( q1, d1 ))
cvError( CV_StsUnmatchedFormats, \
"cvShiftDFT", "Source and Destination arrays must have the same format", \
__FILE__, __LINE__ );
endif
cvCopy(q3, d1);
cvCopy(q4, d2);
cvCopy(q1, d3);
cvCopy(q2, d4);
else
cvCopy(q3, tmp);
cvCopy(q1, q3);
cvCopy(tmp, q1);
cvCopy(q4, tmp);
cvCopy(q2, q4);
cvCopy(tmp, q2);
endif
endfunction
im = cvLoadImage( argv(){1}, CV_LOAD_IMAGE_GRAYSCALE);
realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);
cvScale(im, realInput, 1.0, 0.0);
cvZero(imaginaryInput);
cvMerge(realInput, imaginaryInput, [], [], complexInput);
dft_M = cvGetOptimalDFTSize( im.height - 1 );
dft_N = cvGetOptimalDFTSize( im.width - 1 );
dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
## copy A to dft_A and pad dft_A with zeros
tmp = cvGetSubRect( dft_A, cvRect(0,0, im.width, im.height));
cvCopy( complexInput, tmp, [] );
if(dft_A.width > im.width)
tmp = cvGetSubRect( dft_A, cvRect(im.width,0, dft_N - im.width, im.height));
cvZero( tmp );
endif
## no need to pad bottom part of dft_A with zeros because of
## use nonzero_rows parameter in cvDFT() call below
cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput.height );
cvNamedWindow("win", 0);
cvNamedWindow("magnitude", 0);
cvShowImage("win", im);
## Split Fourier in real and imaginary parts
cvSplit( dft_A, image_Re, image_Im, [], [] );
## Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
cvPow( image_Re, image_Re, 2.0);
cvPow( image_Im, image_Im, 2.0);
cvAdd( image_Re, image_Im, image_Re, []);
cvPow( image_Re, image_Re, 0.5 );
## Compute log(1 + Mag)
cvAddS( image_Re, cvScalarAll(1.0), image_Re, [] ); # 1 + Mag
cvLog( image_Re, image_Re ); # log(1 + Mag)
## Rearrange the quadrants of Fourier image so that the origin is at
## the image center
cvShiftDFT( image_Re, image_Re );
[min, max] = cvMinMaxLoc(image_Re);
cvScale(image_Re, image_Re, 1.0/(max-min), 1.0*(-min)/(max-min));
cvShowImage("magnitude", image_Re);
cvWaitKey(-1);

View File

@@ -1,76 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
global g;
g.wndname = "Distance transform";
g.tbarname = "Threshold";
## The output images
g.dist = 0;
g.dist8u1 = 0;
g.dist8u2 = 0;
g.dist8u = 0;
g.dist32s = 0;
g.gray = 0;
g.edge = 0;
## define a trackbar callback
function on_trackbar( edge_thresh )
global g;
global cv;
cvThreshold( g.gray, g.edge, double(edge_thresh), double(edge_thresh), cv.CV_THRESH_BINARY );
## Distance transform
cvDistTransform( g.edge, g.dist, cv.CV_DIST_L2, cv.CV_DIST_MASK_5, [], [] );
cvConvertScale( g.dist, g.dist, 5000.0, 0 );
cvPow( g.dist, g.dist, 0.5 );
cvConvertScale( g.dist, g.dist32s, 1.0, 0.5 );
cvAndS( g.dist32s, cvScalarAll(255), g.dist32s, [] );
cvConvertScale( g.dist32s, g.dist8u1, 1, 0 );
cvConvertScale( g.dist32s, g.dist32s, -1, 0 );
cvAddS( g.dist32s, cvScalarAll(255), g.dist32s, [] );
cvConvertScale( g.dist32s, g.dist8u2, 1, 0 );
cvMerge( g.dist8u1, g.dist8u2, g.dist8u2, [], g.dist8u );
cvShowImage( g.wndname, g.dist8u );
endfunction
edge_thresh = 100;
filename = "../c/stuff.jpg";
if (size(argv, 1) > 1)
filename = argv(){1};
endif
g.gray = cvLoadImage( filename, 0 );
if (!swig_this(g.gray))
printf("Failed to load %s\n",filename);
exit(-1);
endif
## Create the output image
g.dist = cvCreateImage( cvSize(g.gray.width,g.gray.height), IPL_DEPTH_32F, 1 );
g.dist8u1 = cvCloneImage( g.gray );
g.dist8u2 = cvCloneImage( g.gray );
g.dist8u = cvCreateImage( cvSize(g.gray.width,g.gray.height), IPL_DEPTH_8U, 3 );
g.dist32s = cvCreateImage( cvSize(g.gray.width,g.gray.height), IPL_DEPTH_32S, 1 );
## Convert to grayscale
g.edge = cvCloneImage( g.gray );
## Create a window
cvNamedWindow( g.wndname, 1 );
## create a toolbar
cvCreateTrackbar( g.tbarname, g.wndname, edge_thresh, 255, @on_trackbar );
## Show the image
on_trackbar(edge_thresh);
## Wait for a key stroke; the same function arranges events processing
cvWaitKey(0);

View File

@@ -1,161 +0,0 @@
#! /usr/bin/env octave
printf("OpenCV Octave version of drawing\n");
## import the necessary things for OpenCV
cv;
highgui;
function ret=random_color ()
ret = CV_RGB(int32(rand()*255), int32(rand()*255), int32(rand()*255));
endfunction
## some "constants"
width = 1000;
height = 700;
window_name = "Drawing Demo";
number = 100;
delay = 5;
line_type = cv.CV_AA; # change it to 8 to see non-antialiased graphics
## create the source image
image = cv.cvCreateImage (cv.cvSize (width, height), 8, 3);
## create window and display the original picture in it
highgui.cvNamedWindow (window_name, 1);
cv.cvSetZero (image);
highgui.cvShowImage (window_name, image);
## draw some lines
for i=0:number-1,
pt1 = cv.cvPoint (int32(rand() * 2 * width - width),
int32(rand() * 2 * height - height));
pt2 = cv.cvPoint (int32(rand() * 2 * width - width),
int32(rand() * 2 * height - height));
cv.cvLine (image, pt1, pt2,
random_color (),
int32(rand() * 10),
line_type, 0);
highgui.cvShowImage (window_name, image);
highgui.cvWaitKey (delay);
endfor
## draw some rectangles
for i=0:number-1,
pt1 = cv.cvPoint (int32(rand() * 2 * width - width),
int32(rand() * 2 * height - height));
pt2 = cv.cvPoint (int32(rand() * 2 * width - width),
int32(rand() * 2 * height - height));
cv.cvRectangle (image, pt1, pt2,
random_color (),
int32(rand() * 10 - 1),
line_type, 0);
highgui.cvShowImage (window_name, image);
highgui.cvWaitKey (delay);
endfor
## draw some ellipes
for i=0:number-1,
pt1 = cv.cvPoint (int32(rand() * 2 * width - width),
int32(rand() * 2 * height - height));
sz = cv.cvSize (int32(rand() * 200),
int32(rand() * 200));
angle = rand() * 1000 * 0.180;
cv.cvEllipse (image, pt1, sz, angle, angle - 100, angle + 200,
random_color (),
int32(rand() * 10 - 1),
line_type, 0);
highgui.cvShowImage (window_name, image);
highgui.cvWaitKey (delay);
endfor
## init the list of polylines
nb_polylines = 2;
polylines_size = 3;
pt = cell(1, nb_polylines);
for a=1:nb_polylines,
pt{a} = cell(1,polylines_size);
endfor
## draw some polylines
for i=0:number-1,
for a=1:nb_polylines,
for b=1:polylines_size,
pt {a}{b} = cv.cvPoint (int32(rand() * 2 * width - width), \
int32(rand() * 2 * height - height));
endfor
endfor
cv.cvPolyLine (image, pt, 1, random_color(), int32(rand() * 8 + 1), line_type, 0);
highgui.cvShowImage (window_name, image);
highgui.cvWaitKey (delay);
endfor
## draw some filled polylines
for i=0:number-1,
for a=1:nb_polylines,
for b=1:polylines_size,
pt {a}{b} = cv.cvPoint (int32(rand() * 2 * width - width),
int32(rand() * 2 * height - height));
endfor
endfor
cv.cvFillPoly (image, pt, random_color (), line_type, 0);
highgui.cvShowImage (window_name, image);
highgui.cvWaitKey (delay);
endfor
## draw some circles
for i=0:number-1,
pt1 = cv.cvPoint (int32(rand() * 2 * width - width),
int32(rand() * 2 * height - height));
cv.cvCircle (image, pt1, int32(rand() * 300), random_color (), \
int32(rand() * 10 - 1), line_type, 0);
highgui.cvShowImage (window_name, image);
highgui.cvWaitKey (delay);
endfor
## draw some text
for i=0:number-1,
pt1 = cv.cvPoint (int32(rand() * 2 * width - width), \
int32(rand() * 2 * height - height));
font = cv.cvInitFont (int32(rand() * 8), \
rand() * 100 * 0.05 + 0.01, \
rand() * 100 * 0.05 + 0.01, \
rand() * 5 * 0.1, \
int32(rand() * 10), \
line_type);
cv.cvPutText (image, "Testing text rendering!", \
pt1, font, \
random_color ());
highgui.cvShowImage (window_name, image);
highgui.cvWaitKey (delay);
endfor
## prepare a text, and get it's properties
font = cv.cvInitFont (cv.CV_FONT_HERSHEY_COMPLEX, \
3, 3, 0.0, 5, line_type);
[text_size, ymin] = cv.cvGetTextSize ("OpenCV forever!", font);
pt1.x = int32((width - text_size.width) / 2);
pt1.y = int32((height + text_size.height) / 2);
image2 = cv.cvCloneImage(image);
## now, draw some OpenCV pub ;-)
for i=0:255-1,
cv.cvSubS (image2, cv.cvScalarAll (i), image, []);
cv.cvPutText (image, "OpenCV forever!",
pt1, font, cv.cvScalar (255, i, i));
highgui.cvShowImage (window_name, image);
highgui.cvWaitKey (delay);
endfor
## wait some key to end
highgui.cvWaitKey (0);

View File

@@ -1,69 +0,0 @@
#! /usr/bin/env octave
printf("OpenCV Octave version of edge\n");
global g;
## import the necessary things for OpenCV
cv;
highgui;
## some definitions
g.win_name = "Edge";
g.trackbar_name = "Threshold";
## the callback on the trackbar
function on_trackbar (position)
global g;
global cv;
global highgui;
cv.cvSmooth (g.gray, g.edge, cv.CV_BLUR, 3, 3, 0);
cv.cvNot (g.gray, g.edge);
## run the edge dector on gray scale
cv.cvCanny (g.gray, g.edge, position, position * 3, 3);
## reset
cv.cvSetZero (g.col_edge);
## copy edge points
cv.cvCopy (g.image, g.col_edge, g.edge);
## show the image
highgui.cvShowImage (g.win_name, g.col_edge);
endfunction
filename = "../c/fruits.jpg";
if (size(argv, 1)>1)
filename = argv(){1};
endif
## load the image gived on the command line
g.image = highgui.cvLoadImage (filename);
if (!swig_this(g.image))
printf("Error loading image '%s'",filename);
exit(-1);
endif
## create the output image
g.col_edge = cv.cvCreateImage (cv.cvSize (g.image.width, g.image.height), 8, 3);
## convert to grayscale
g.gray = cv.cvCreateImage (cv.cvSize (g.image.width, g.image.height), 8, 1);
g.edge = cv.cvCreateImage (cv.cvSize (g.image.width, g.image.height), 8, 1);
cv.cvCvtColor (g.image, g.gray, cv.CV_BGR2GRAY);
## create the window
highgui.cvNamedWindow (g.win_name, highgui.CV_WINDOW_AUTOSIZE);
## create the trackbar
highgui.cvCreateTrackbar (g.trackbar_name, g.win_name, 1, 100, @on_trackbar);
## show the image
on_trackbar (0);
## wait a key pressed to end
highgui.cvWaitKey (0);

View File

@@ -1,143 +0,0 @@
#! /usr/bin/env octave
## This program is demonstration for face and object detection using haar-like features.
## The program finds faces in a camera image or video stream and displays a red box around them.
## Original C implementation by: ?
## Python implementation by: Roman Stanchak
## Octave implementation by: Xavier Delacour
addpath("/home/x/opencv2/interfaces/swig/octave");
source("/home/x/opencv2/interfaces/swig/octave/PKG_ADD_template");
debug_on_error(true);
debug_on_warning(true);
crash_dumps_octave_core (0)
cv;
highgui;
## Global Variables
global g;
g.cascade = [];
g.storage = cvCreateMemStorage(0);
g.cascade_name = "../../data/haarcascades/haarcascade_frontalface_alt.xml";
g.input_name = "../c/lena.jpg";
## Parameters for haar detection
## From the API:
## The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned
## for accurate yet slow object detection. For a faster operation on real video
## images the settings are:
## scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
## min_size=<minimum possible face size
g.min_size = cvSize(20,20);
g.image_scale = 1.3;
g.haar_scale = 1.2;
g.min_neighbors = 2;
g.haar_flags = 0;
function detect_and_draw( img )
global g;
global cv;
gray = cvCreateImage( cvSize(img.width,img.height), 8, 1 );
small_img = cvCreateImage( cvSize( cvRound (img.width/g.image_scale),
cvRound (img.height/g.image_scale)), 8, 1 );
cvCvtColor( img, gray, cv.CV_BGR2GRAY );
cvResize( gray, small_img, cv.CV_INTER_LINEAR );
cvEqualizeHist( small_img, small_img );
cvClearMemStorage( g.storage );
if( swig_this(g.cascade) )
tic
faces = cvHaarDetectObjects( small_img, g.cascade, g.storage,
g.haar_scale, g.min_neighbors, g.haar_flags, g.min_size );
toc
if (swig_this(faces))
for r = CvSeq_map(faces),
r = r{1};
pt1 = cvPoint( int32(r.x*g.image_scale), int32(r.y*g.image_scale));
pt2 = cvPoint( int32((r.x+r.width)*g.image_scale), int32((r.y+r.height)*g.image_scale) );
cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
endfor
endif
endif
cvShowImage( "result", img );
endfunction
if (size(argv, 2) > 0 && (strcmp(argv(){1}, "--help") || strcmp(argv(){1}, "-h")))
printf("Usage: facedetect --cascade \"<cascade_path>\" [filename|camera_index]\n");
exit(-1);
endif
if (size(argv, 2) >= 2)
if (strcmp(argv(){1},"--cascade"))
g.cascade_name = argv(){2};
if (size(argv, 2) >= 3)
g.input_name = argv(){3};
endif
endif
elseif (size(argv, 2) == 1)
g.input_name = argv(){1};
endif
## the OpenCV API says this function is obsolete, but we can't
## cast the output of cvLoad to a HaarClassifierCascade, so use this anyways
## the size parameter is ignored
g.cascade = cvLoadHaarClassifierCascade( g.cascade_name, cvSize(1,1) );
if (!swig_this(g.cascade))
printf("ERROR: Could not load classifier cascade\n");
exit(-1);
endif
g.input_name
if (all(isdigit(g.input_name)))
capture = cvCreateCameraCapture( sscanf(g.input_name, "%i") );
else
capture = cvCreateFileCapture( g.input_name );
endif
capture
cvNamedWindow( "result", 1 );
if( swig_this(capture) )
frame_copy = [];
while (true)
frame = cvQueryFrame( capture );
if( ! swig_this(frame) )
cvWaitKey(0);
endif
if( !swig_this(frame_copy) )
frame_copy = cvCreateImage( cvSize(frame.width,frame.height),
IPL_DEPTH_8U, frame.nChannels );
endif
if( frame.origin == IPL_ORIGIN_TL )
cvCopy( frame, frame_copy );
else
cvFlip( frame, frame_copy, 0 );
endif
detect_and_draw( frame_copy );
if( cvWaitKey( 10 ) == 27 )
break;
endif
endwhile
else
image = cvLoadImage( g.input_name, 1 );
if( swig_this(image) )
detect_and_draw( image );
cvWaitKey(0);
endif
endif
cvDestroyWindow("result");

View File

@@ -1,179 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
global g;
g.color_img0 = [];
g.mask = [];
g.color_img = [];
g.gray_img0 = [];
g.gray_img = [];
g.ffill_case = 1;
g.lo_diff = 20
g.up_diff = 20;
g.connectivity = 4;
g.is_color = 1;
g.is_mask = 0;
g.new_mask_val = 255;
function ret = randint(v1, v2)
ret = int32(rand() * (v2 - v1) + v1);
end
function update_lo( pos )
g.lo_diff = pos;
endfunction
function update_up( pos )
g.up_diff = pos;
endfunction
function on_mouse( event, x, y, flags, param )
global g;
global cv;
global highgui;
if( !swig_this(g.color_img) )
return;
endif
if (event == highgui.CV_EVENT_LBUTTONDOWN)
comp = cv.CvConnectedComp();
my_mask = [];
seed = cvPoint(x,y);
if (g.ffill_case==0)
lo = 0;
up = 0;
flags = g.connectivity + bitshift(g.new_mask_val,8);
else
lo = g.lo_diff;
up = g.up_diff;
flags = g.connectivity + bitshift(g.new_mask_val,8) + \
cv.CV_FLOODFILL_FIXED_RANGE;
endif
color = CV_RGB( randint(0,255), randint(0,255), randint(0,255) );
if( g.is_mask )
my_mask = g.mask;
cvThreshold( g.mask, g.mask, 1, 128, cv.CV_THRESH_BINARY );
endif
if( g.is_color )
cv.cvFloodFill( g.color_img, seed, color, cv.CV_RGB( lo, lo, lo ),
CV_RGB( up, up, up ), comp, flags, my_mask );
cvShowImage( "image", g.color_img );
else
brightness = cvRealScalar((r*2 + g*7 + b + 5)/10);
cvFloodFill( g.gray_img, seed, brightness, cvRealScalar(lo),
cvRealScalar(up), comp, flags, my_mask );
cvShowImage( "image", g.gray_img );
endif
printf("%i pixels were repainted\n", comp.area);
if( g.is_mask )
cvShowImage( "mask", g.mask );
endif
endif
endfunction
filename = "../c/fruits.jpg";
if (size(argv, 1)>0)
filename=argv(){1};
endif
g.color_img0 = cvLoadImage(filename,1);
if (!swig_this(g.color_img0))
printf("Could not open %s\n",filename);
exit(-1);
endif
printf("Hot keys:\n");
printf("\tESC - quit the program\n");
printf("\tc - switch color/grayscale mode\n");
printf("\tm - switch mask mode\n");
printf("\tr - restore the original image\n");
printf("\ts - use null-range floodfill\n");
printf("\tf - use gradient floodfill with fixed(absolute) range\n");
printf("\tg - use gradient floodfill with floating(relative) range\n");
printf("\t4 - use 4-g.connectivity mode\n");
printf("\t8 - use 8-g.connectivity mode\n");
g.color_img = cvCloneImage( g.color_img0 );
g.gray_img0 = cvCreateImage( cvSize(g.color_img.width, g.color_img.height), 8, 1 );
cvCvtColor( g.color_img, g.gray_img0, CV_BGR2GRAY );
g.gray_img = cvCloneImage( g.gray_img0 );
g.mask = cvCreateImage( cvSize(g.color_img.width + 2, g.color_img.height + 2), 8, 1 );
cvNamedWindow( "image", 1 );
cvCreateTrackbar( "g.lo_diff", "image", g.lo_diff, 255, @update_lo);
cvCreateTrackbar( "g.up_diff", "image", g.up_diff, 255, @update_up);
cvSetMouseCallback( "image", @on_mouse );
while (true)
if( g.is_color )
cvShowImage( "image", g.color_img );
else
cvShowImage( "image", g.gray_img );
endif
c = cvWaitKey(0);
if (c==27)
printf("Exiting ...\n");
exit(0)
elseif (c=='c')
if( g.is_color )
print("Grayscale mode is set");
cvCvtColor( g.color_img, g.gray_img, CV_BGR2GRAY );
g.is_color = 0;
else
print("Color mode is set");
cvCopy( g.color_img0, g.color_img, [] );
cvZero( g.mask );
g.is_color = 1;
endif
elseif (c=='m')
if( g.is_mask )
cvDestroyWindow( "mask" );
g.is_mask = 0;
else
cvNamedWindow( "mask", 0 );
cvZero( g.mask );
cvShowImage( "mask", g.mask );
g.is_mask = 1;
endif
elseif (c=='r')
printf("Original image is restored");
cvCopy( g.color_img0, g.color_img, [] );
cvCopy( g.gray_img0, g.gray_img, [] );
cvZero( g.mask );
elseif (c=='s')
printf("Simple floodfill mode is set");
g.ffill_case = 0;
elseif (c=='f')
printf("Fixed Range floodfill mode is set");
g.ffill_case = 1;
elseif (c=='g')
printf("Gradient (floating range) floodfill mode is set");
g.ffill_case = 2;
elseif (c=='4')
printf("4-g.connectivity mode is set");
g.connectivity = 4;
elseif (c=='8')
printf("8-g.connectivity mode is set");
g.connectivity = 8;
endif
endwhile

View File

@@ -1,123 +0,0 @@
#! /usr/bin/env octave
## This program is demonstration for ellipse fitting. Program finds
## contours and approximate it by ellipses.
## Trackbar specify threshold parametr.
## White lines is contours. Red lines is fitting ellipses.
## Original C implementation by: Denis Burenkov.
## Python implementation by: Roman Stanchak
## Octave implementation by: Xavier Delacour
cv;
highgui;
global g;
g.image02 = [];
g.image03 = [];
g.image04 = [];
function process_image( slider_pos )
global g;
global cv;
global highgui;
## Define trackbar callback functon. This function find contours,
## draw it and approximate it by ellipses.
stor = cv.cvCreateMemStorage(0);
## Threshold the source image. This needful for cv.cvFindContours().
cv.cvThreshold( g.image03, g.image02, slider_pos, 255, cv.CV_THRESH_BINARY );
## Find all contours.
[nb_contours, cont] = cv.cvFindContours (g.image02,stor,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint (0,0));
## Clear images. IPL use.
cv.cvZero(g.image02);
cv.cvZero(g.image04);
## This cycle draw all contours and approximate it by ellipses.
for c = cv.CvSeq_hrange(cont),
c = c{1};
count = c.total; # This is number point in contour
## Number point must be more than or equal to 6 (for cv.cvFitEllipse_32f).
if( count < 6 )
continue;
endif
## Alloc memory for contour point set.
PointArray = cv.cvCreateMat(1, count, cv.CV_32SC2);
PointArray2D32f= cv.cvCreateMat( 1, count, cv.CV_32FC2);
## Get contour point set.
cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX));
## Convert CvPoint set to CvBox2D32f set.
cv.cvConvert( PointArray, PointArray2D32f );
box = cv.CvBox2D();
## Fits ellipse to current contour.
box = cv.cvFitEllipse2(PointArray2D32f);
## Draw current contour.
cv.cvDrawContours(g.image04, c, cv.CV_RGB(255,255,255), cv.CV_RGB(255,255,255),0,1,8,cv.cvPoint(0,0));
## Convert ellipse data from float to integer representation.
center = cv.CvPoint();
size = cv.CvSize();
center.x = cv.cvRound(box.center.x);
center.y = cv.cvRound(box.center.y);
size.width = cv.cvRound(box.size.width*0.5);
size.height = cv.cvRound(box.size.height*0.5);
box.angle = -box.angle;
## Draw ellipse.
cv.cvEllipse(g.image04, center, size,box.angle, 0, 360,cv.CV_RGB(0,0,255), 1, cv.CV_AA, 0);
endfor
## Show image. HighGUI use.
highgui.cvShowImage( "Result", g.image04 );
endfunction
argc = size(argv, 1);
filename = "../c/stuff.jpg";
if(argc == 2)
filename = argv(){1};
endif
slider_pos = 70;
## load image and force it to be grayscale
g.image03 = highgui.cvLoadImage(filename, 0);
if (!swig_this( g.image03))
printf("Could not load image %s\n", filename);
exit(-1);
endif
## Create the destination images
g.image02 = cv.cvCloneImage( g.image03 );
g.image04 = cv.cvCloneImage( g.image03 );
## Create windows.
highgui.cvNamedWindow("Source", 1);
highgui.cvNamedWindow("Result", 1);
## Show the image.
highgui.cvShowImage("Source", g.image03);
## Create toolbars. HighGUI use.
highgui.cvCreateTrackbar( "Threshold", "Result", slider_pos, 255, @process_image );
process_image( 1 );
## Wait for a key stroke; the same function arranges events processing
printf("Press any key to exit\n");
highgui.cvWaitKey(0);
highgui.cvDestroyWindow("Source");
highgui.cvDestroyWindow("Result");

View File

@@ -1,62 +0,0 @@
#! /usr/bin/env octave
## This is a standalone program. Pass an image name as a first parameter of the program.
cv;
highgui;
## toggle between CV_HOUGH_STANDARD and CV_HOUGH_PROBILISTIC
USE_STANDARD=0;
filename = "../../docs/ref/pics/building.jpg"
if (size(argv, 1)>=1)
filename = argv(){1};
endif
src=cvLoadImage(filename, 0);
if (!swig_this(src))
printf("Error opening image %s\n",filename);
exit(-1);
endif
dst = cvCreateImage( cvGetSize(src), 8, 1 );
color_dst = cvCreateImage( cvGetSize(src), 8, 3 );
storage = cvCreateMemStorage(0);
lines = 0;
cvCanny( src, dst, 50, 200, 3 );
cvCvtColor( dst, color_dst, CV_GRAY2BGR );
if (USE_STANDARD)
lines = cvHoughLines2( dst, storage, CV_HOUGH_STANDARD, 1, CV_PI/180, 100, 0, 0 );
for i=0:min(lines.total, 100)-1,
line = lines{i};
rho = line{0};
theta = line{1};
pt1 = CvPoint();
pt2 = CvPoint();
a = cos(theta);
b = sin(theta);
x0 = a*rho;
y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
cvLine( color_dst, pt1, pt2, CV_RGB(255,0,0), 3, 8 );
endfor
else
lines = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 50, 50, 10 );
for line = CvSeq_map(lines),
line = line{1};
cvLine( color_dst, line{0}, line{1}, CV_RGB(255,0,0), 3, 8 );
endfor
endif
cvNamedWindow( "Source", 1 );
cvShowImage( "Source", src );
cvNamedWindow( "Hough", 1 );
cvShowImage( "Hough", color_dst );
cvWaitKey(0);

View File

@@ -1,84 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
global g;
inpaint_mask = [];
g.img0 = [];
g.img = [];
g.inpainted = [];
g.prev_pt = cvPoint(-1,-1);
function on_mouse( event, x, y, flags, param )
global g;
global cv;
global highgui;
if (!swig_this(g.img))
return;
endif
if (event == highgui.CV_EVENT_LBUTTONUP || ! (bitand(flags,highgui.CV_EVENT_FLAG_LBUTTON)))
g.prev_pt = cvPoint(-1,-1);
elseif (event == highgui.CV_EVENT_LBUTTONDOWN)
g.prev_pt = cvPoint(x,y);
elseif (event == highgui.CV_EVENT_MOUSEMOVE && bitand(flags,highgui.CV_EVENT_FLAG_LBUTTON))
pt = cvPoint(x,y);
if (g.prev_pt.x < 0)
g.prev_pt = pt;
endif
cvLine( g.inpaint_mask, g.prev_pt, pt, cvScalarAll(255), 5, 8, 0 );
cvLine( g.img, g.prev_pt, pt, cvScalarAll(255), 5, 8, 0 );
g.prev_pt = pt;
cvShowImage( "image", g.img );
endif
endfunction
filename = "../c/fruits.jpg";
if (size(argv, 1)>=1)
filename = argv(){1};
endif
g.img0 = cvLoadImage(filename,-1);
if (!swig_this(g.img0))
printf("Can't open image '%s'\n", filename);
exit(1);
endif
printf("Hot keys:\n");
printf("\tESC - quit the program\n");
printf("\tr - restore the original image\n");
printf("\ti or ENTER - run inpainting algorithm\n");
printf("\t\t(before running it, paint something on the image)\n");
cvNamedWindow( "image", 1 );
g.img = cvCloneImage( g.img0 );
g.inpainted = cvCloneImage( g.img0 );
g.inpaint_mask = cvCreateImage( cvGetSize(g.img), 8, 1 );
cvZero( g.inpaint_mask );
cvZero( g.inpainted );
cvShowImage( "image", g.img );
cvShowImage( "watershed transform", g.inpainted );
cvSetMouseCallback( "image", @on_mouse, [] );
while (true)
c = cvWaitKey(0);
if( c == 27 || c=='q')
break;
endif
if( c == 'r' )
cvZero( g.inpaint_mask );
cvCopy( g.img0, g.img );
cvShowImage( "image", g.img );
endif
if( c == 'i' || c == '\012' )
cvNamedWindow( "g.inpainted image", 1 );
cvInpaint( g.img, g.inpaint_mask, g.inpainted, 3, CV_INPAINT_TELEA );
cvShowImage( "g.inpainted image", g.inpainted );
endif
endwhile

View File

@@ -1,102 +0,0 @@
#! /usr/bin/env octave
## Tracking of rotating point.
## Rotation speed is constant.
## Both state and measurements vectors are 1D (a point angle),
## Measurement is the real point angle + gaussian noise.
## The real and the estimated points are connected with yellow line segment,
## the real and the measured points are connected with red line segment.
## (if Kalman filter works correctly,
## the yellow segment should be shorter than the red one).
## Pressing any key (except ESC) will reset the tracking with a different speed.
## Pressing ESC will stop the program.
cv;
highgui;
global img;
function ret=calc_point(angle)
global img;
ret=cvPoint( cvRound(img.width/2 + img.width/3*cos(angle)), \
cvRound(img.height/2 - img.width/3*sin(angle)));
endfunction
function draw_cross( center, color, d )
global img;
global CV_AA;
cvLine( img, cvPoint( center.x - d, center.y - d ),
cvPoint( center.x + d, center.y + d ), color, 1, CV_AA, 0);
cvLine( img, cvPoint( center.x + d, center.y - d ),
cvPoint( center.x - d, center.y + d ), \
color, 1, CV_AA, 0 );
endfunction
A = [ 1, 1; 0, 1 ];
img = cvCreateImage( cvSize(500,500), 8, 3 );
kalman = cvCreateKalman( 2, 1, 0 );
state = cvCreateMat( 2, 1, CV_32FC1 ); # (phi, delta_phi)
process_noise = cvCreateMat( 2, 1, CV_32FC1 );
measurement = cvCreateMat( 1, 1, CV_32FC1 );
rng = cvRNG(-1);
code = -1;
cvZero( measurement );
cvNamedWindow( "Kalman", 1 );
while (true),
cvRandArr( rng, state, CV_RAND_NORMAL, cvRealScalar(0), cvRealScalar(0.1) );
kalman.transition_matrix = mat2cv(A, CV_32FC1);
cvSetIdentity( kalman.measurement_matrix, cvRealScalar(1) );
cvSetIdentity( kalman.process_noise_cov, cvRealScalar(1e-5) );
cvSetIdentity( kalman.measurement_noise_cov, cvRealScalar(1e-1) );
cvSetIdentity( kalman.error_cov_post, cvRealScalar(1));
cvRandArr( rng, kalman.state_post, CV_RAND_NORMAL, cvRealScalar(0), cvRealScalar(0.1) );
while (true),
state_angle = state(0);
state_pt = calc_point(state_angle);
prediction = cvKalmanPredict( kalman );
predict_angle = prediction(0);
predict_pt = calc_point(predict_angle);
cvRandArr( rng, measurement, CV_RAND_NORMAL, cvRealScalar(0), \
cvRealScalar(sqrt(kalman.measurement_noise_cov(0))) );
## generate measurement
cvMatMulAdd( kalman.measurement_matrix, state, measurement, measurement );
measurement_angle = measurement(0);
measurement_pt = calc_point(measurement_angle);
## plot points
cvZero( img );
draw_cross( state_pt, CV_RGB(255,255,255), 3 );
draw_cross( measurement_pt, CV_RGB(255,0,0), 3 );
draw_cross( predict_pt, CV_RGB(0,255,0), 3 );
cvLine( img, state_pt, measurement_pt, CV_RGB(255,0,0), 3, CV_AA, 0 );
cvLine( img, state_pt, predict_pt, CV_RGB(255,255,0), 3, CV_AA, 0 );
cvKalmanCorrect( kalman, measurement );
cvRandArr( rng, process_noise, CV_RAND_NORMAL, cvRealScalar(0), \
cvRealScalar(sqrt(kalman.process_noise_cov(0)(0))));
cvMatMulAdd( kalman.transition_matrix, state, process_noise, state );
cvShowImage( "Kalman", img );
code = cvWaitKey( 100 );
if( code > 0 )
break;
endif
endwhile
if( code == '\x1b' || code == 'q' || code == 'Q' )
break;
endif
endwhile
cvDestroyWindow("Kalman");

View File

@@ -1,72 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
MAX_CLUSTERS=5;
function ret = randint(v1, v2)
ret = int32(rand() * (v2 - v1) + v1);
end
color_tab = { \
CV_RGB(255,0,0), \
CV_RGB(0,255,0), \
CV_RGB(100,100,255), \
CV_RGB(255,0,255), \
CV_RGB(255,255,0)};
img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
rng = cvRNG(-1);
cvNamedWindow( "clusters", 1 );
while (true),
cluster_count = randint(2, MAX_CLUSTERS);
sample_count = randint(1, 1000);
points = cvCreateMat( sample_count, 1, CV_32FC2 );
clusters = cvCreateMat( sample_count, 1, CV_32SC1 );
## generate random sample from multigaussian distribution
for k=0:cluster_count-1,
center = CvPoint();
center.x = mod(cvRandInt(rng), img.width);
center.y = mod(cvRandInt(rng), img.height);
first = k*sample_count/cluster_count;
last = sample_count;
if (k != cluster_count)
last = (k+1)*sample_count/cluster_count;
endif
point_chunk = cvGetRows(points, first, last);
cvRandArr( rng, point_chunk, CV_RAND_NORMAL, \
cvScalar(center.x,center.y,0,0), \
cvScalar(img.width*0.1,img.height*0.1,0,0));
endfor
## shuffle samples
cvRandShuffle( points, rng );
cvKMeans2( points, cluster_count, clusters, \
cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0 ));
cvZero( img );
for i=0:sample_count-1,
cluster_idx = clusters(i);
pt = points(i);
cvCircle( img, pt, 2, color_tab{cluster_idx + 1}, CV_FILLED, CV_AA, 0 );
cvCircle( img, pt, 2, color_tab{cluster_idx + 1}, CV_FILLED, CV_AA, 0 );
endfor
cvShowImage( "clusters", img );
key = cvWaitKey(0);
if( key == 27 || key == 'q' || key == 'Q' )
break;
endif
endwhile
cvDestroyWindow( "clusters" );

View File

@@ -1,65 +0,0 @@
#! /usr/bin/env octave
addpath("/home/x/opencv2/interfaces/swig/octave");
source("/home/x/opencv2/interfaces/swig/octave/PKG_ADD_template");
debug_on_error(true);
debug_on_warning(true);
crash_dumps_octave_core (0)
cv;
highgui;
laplace = [];
colorlaplace = [];
planes = { [], [], [] };
capture = [];
if (size(argv, 2)==0)
capture = cvCreateCameraCapture( -1 );
elseif (size(argv, 2)==1 && all(isdigit(argv(){1})))
capture = cvCreateCameraCapture( int32(argv(){1}) );
elseif (size(argv, 2)==1)
capture = cvCreateFileCapture( argv(){1} );
endif
if (!swig_this(capture))
printf("Could not initialize capturing...\n");
exit(-1)
endif
cvNamedWindow( "Laplacian", 1 );
while (true),
frame = cvQueryFrame( capture );
if (!swig_this(frame))
break
endif
if (!swig_this(laplace))
for i=1:size(planes,2),
planes{i} = cvCreateImage( \
cvSize(frame.width,frame.height), \
8, 1 );
endfor
laplace = cvCreateImage( cvSize(frame.width,frame.height), IPL_DEPTH_16S, 1 );
colorlaplace = cvCreateImage( \
cvSize(frame.width,frame.height), \
8, 3 );
endif
cvSplit( frame, planes{1}, planes{2}, planes{3}, [] );
for plane = planes,
plane = plane{1};
cvLaplace( plane, laplace, 3 );
cvConvertScaleAbs( laplace, plane, 1, 0 );
endfor
cvMerge( planes{1}, planes{2}, planes{3}, [], colorlaplace );
# colorlaplace.origin = frame.origin;
cvShowImage("Laplacian", colorlaplace );
if (cvWaitKey(10) == 27)
break;
endif
endwhile
cvDestroyWindow("Laplacian");

View File

@@ -1,219 +0,0 @@
#! /usr/bin/env octave
printf("OpenCV Octave version of lkdemo\n");
## import the necessary things for OpenCV
cv;
highgui;
#############################################################################
## some "constants"
win_size = 10;
MAX_COUNT = 500;
#############################################################################
## some "global" variables
global g;
g.image = [];
g.pt = [];
g.add_remove_pt = false;
g.flags = 0;
g.night_mode = false;
g.need_to_init = true;
g
#############################################################################
## the mouse callback
## the callback on the trackbar
function on_mouse (event, x, y, flags, param)
global g;
global cv;
global highgui;
if (swig_this(g.image) == 0)
## not initialized, so skip
return;
endif
if (g.image.origin != 0)
## different origin
y = g.image.height - y;
endif
if (event == highgui.CV_EVENT_LBUTTONDOWN)
## user has click, so memorize it
pt = cv.cvPoint (x, y);
g.add_remove_pt = true;
endif
endfunction
#############################################################################
## so, here is the main part of the program
filename = "/home/x/work/sneaker/dvgrab-001.avi";
if (size(argv, 1)>1)
filename=argv(){1};
endif
capture = highgui.cvCreateFileCapture (filename);
## check that capture device is OK
if (!swig_this(capture))
printf("Error opening capture device\n");
exit(1)
endif
## display a small howto use it
printf("Hot keys: \n");
printf("\tESC - quit the program\n");
printf("\tr - auto-initialize tracking\n");
printf("\tc - delete all the points\n");
printf("\tn - switch the \"night\" mode on/off\n");
printf("To add/remove a feature point click it\n");
## first, create the necessary windows
highgui.cvNamedWindow ('LkDemo', 1);
## register the mouse callback
highgui.cvSetMouseCallback ('LkDemo', @on_mouse, []);
while (1)
## do forever
## 1. capture the current image
frame = highgui.cvQueryFrame (capture);
if (swig_this(frame) == 0)
## no image captured... end the processing
break
endif
if (swig_this(g.image) == 0),
## create the images we need
g.image = cv.cvCreateImage (cv.cvGetSize (frame), 8, 3);
# g.image.origin = frame.origin;
g.grey = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1);
g.prev_grey = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1);
g.pyramid = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1);
g.prev_pyramid = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1);
g.points = {[], []};
endif
## copy the frame, so we can draw on it
cv.cvCopy (frame, g.image)
## create a grey version of the image
cv.cvCvtColor (g.image, g.grey, cv.CV_BGR2GRAY)
if (g.night_mode)
## night mode: only display the points
cv.cvSetZero (g.image);
endif
if (g.need_to_init)
## we want to search all the good points
## create the wanted images
eig = cv.cvCreateImage (cv.cvGetSize (g.grey), 32, 1);
temp = cv.cvCreateImage (cv.cvGetSize (g.grey), 32, 1);
## the default parameters
quality = 0.01;
min_distance = 10;
## search the good points
g.points {1} = cv.cvGoodFeaturesToTrack (g.grey, eig, temp,MAX_COUNT,quality, min_distance, [], 3, 0, 0.04);
## refine the corner locations
cv.cvFindCornerSubPix (g.grey,g.points {1},cv.cvSize (win_size, win_size), cv.cvSize (-1, -1),cv.cvTermCriteria (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,20, 0.03));
elseif (size (g.points {1}, 2) > 0)
## we have points, so display them
## calculate the optical flow
[tmp, status] = cv.cvCalcOpticalFlowPyrLK (g.prev_grey, g.grey, g.prev_pyramid, g.pyramid,g.points {1}, size (g.points {1},2),cv.cvSize (win_size, win_size), 3,size (g.points {1}, 2),[],cv.cvTermCriteria (bitor(cv.CV_TERMCRIT_ITER,cv.CV_TERMCRIT_EPS),20, 0.03),g.flags);
g.points {2} = tmp;
## initializations
point_counter = -1;
new_points = {};
for the_point = g.points {2},
the_point = the_point{1};
## go trough all the points
## increment the counter
point_counter += 1;
if (g.add_remove_pt)
## we have a point to add, so see if it is close to
## another one. If yes, don't use it
dx = pt.x - the_point.x;
dy = pt.y - the_point.y;
if (dx * dx + dy * dy <= 25)
## too close
g.add_remove_pt = 0;
continue;
endif
endif
if (!status {point_counter+1})
## we will disable this point
continue;
endif
## this point is a correct point
new_points{end+1} = the_point;
## draw the current point
cv.cvCircle (g.image, {the_point.x, the_point.y},3, cv.cvScalar (0, 255, 0, 0),-1, 8, 0);
endfor
## set back the points we keep;
points {1} = new_points;
endif
if (g.add_remove_pt)
## we want to add a point
points {1} = append (points {1}, cv.cvPointTo32f (pt));
## refine the corner locations
g.points {1} = cv.cvFindCornerSubPix \
(g.grey, {points {1}}, cv.cvSize (win_size, win_size), cv.cvSize \
(-1, -1), cv.cvTermCriteria (bitor(cv.CV_TERMCRIT_ITER, cv.CV_TERMCRIT_EPS),20, 0.03));
## we are no more in "add_remove_pt" mode
g.add_remove_pt = false
endif
## swapping
tmp = g.prev_grey; g.prev_grey = g.grey; g.grey = tmp;
tmp = g.prev_pyramid; g.prev_pyramid = g.pyramid; g.pyramid = tmp;
tmp = g.points{1}; g.points{1} = g.points{2}; g.points{2} = tmp;
g.need_to_init = false;
## we can now display the image
highgui.cvShowImage ('LkDemo', g.image)
## handle events
c = highgui.cvWaitKey (10);
if (c == 27)
## user has press the ESC key, so exit
break
endif
## processing depending on the character
if (c == int32('r'))
g.need_to_init = true;
elseif (c == int32('c'))
g.points = {[], []};
elseif (c == int32('n'))
g.night_mode = !g.night_mode;
endif
endwhile

View File

@@ -1,50 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
global g;
g.src=[];
g.dst=[];
g.src2=[];
function on_mouse( event, x, y, flags, param )
global g;
global cv;
global highgui;
if(!swig_this(g.src) )
return;
endif
if (event==highgui.CV_EVENT_LBUTTONDOWN)
cvLogPolar( g.src, g.dst, cvPoint2D32f(x,y), 40, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS );
cvLogPolar( g.dst, g.src2, cvPoint2D32f(x,y), 40, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS+cv.CV_WARP_INVERSE_MAP );
cvShowImage( "log-polar", g.dst );
cvShowImage( "inverse log-polar", g.src2 );
endif
endfunction
filename = "../c/fruits.jpg"
if (size(argv, 1)>1)
filename=argv(){1};
endif
g.src = cvLoadImage(filename,1);
if (!swig_this(g.src))
printf("Could not open %s",filename);
exit(-1)
endif
cvNamedWindow( "original",1 );
cvNamedWindow( "log-polar", 1 );
cvNamedWindow( "inverse log-polar", 1 );
g.dst = cvCreateImage( cvSize(256,256), 8, 3 );
g.src2 = cvCreateImage( cvGetSize(g.src), 8, 3 );
cvSetMouseCallback( "original", @on_mouse );
on_mouse( CV_EVENT_LBUTTONDOWN, g.src.width/2, g.src.height/2, [], []);
cvShowImage( "original", g.src );
cvWaitKey();

View File

@@ -1,85 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
function ret = randint(a, b)
ret = int32(rand() * (b - a) + a);
endfunction
function minarea_array(img, count)
global cv;
global highgui;
pointMat = cvCreateMat( count, 1, cv.CV_32SC2 );
for i=0:count-1,
pointMat(i) = cvPoint( randint(img.width/4, img.width*3/4), randint(img.height/4, img.height*3/4) );
endfor
box = cvMinAreaRect2( pointMat );
box_vtx = cvBoxPoints( box );
[success, center, radius] = cvMinEnclosingCircle( pointMat );
cv.cvZero( img );
for i=0:count-1,
cvCircle( img, cvGet1D(pointMat,i), 2, CV_RGB( 255, 0, 0 ), \
cv.CV_FILLED, cv.CV_AA, 0 );
endfor
box_vtx = {cvPointFrom32f(box_vtx{1}), \
cvPointFrom32f(box_vtx{2}), \
cvPointFrom32f(box_vtx{3}), \
cvPointFrom32f(box_vtx{4})};
cvCircle( img, cvPointFrom32f(center), cvRound(radius), CV_RGB(255, 255, 0), 1, cv.CV_AA, 0 );
cvPolyLine( img, {box_vtx}, 1, CV_RGB(0,255,255), 1, cv.CV_AA ) ;
endfunction
function minarea_seq(img, count, storage)
global cv;
global highgui;
ptseq = cvCreateSeq( bitor(cv.CV_SEQ_KIND_GENERIC, cv.CV_32SC2), cv.sizeof_CvContour, cv.sizeof_CvPoint, storage );
ptseq = cv.CvSeq_CvPoint.cast( ptseq );
for i=0:count-1,
pt0 = cvPoint( randint(img.width/4, img.width*3/4), randint(img.height/4, img.height*3/4) );
cvSeqPush( ptseq, pt0 );
endfor
box = cvMinAreaRect2( ptseq );
box_vtx = cvBoxPoints( box );
[success, center, radius] = cvMinEnclosingCircle( ptseq );
cv.cvZero( img );
for pt = CvSeq_map(ptseq),
pt = pt{1};
cvCircle( img, pt, 2, CV_RGB( 255, 0, 0 ), cv.CV_FILLED, cv.CV_AA, 0 );
endfor
box_vtx = {cvPointFrom32f(box_vtx{1}), \
cvPointFrom32f(box_vtx{2}), \
cvPointFrom32f(box_vtx{3}), \
cvPointFrom32f(box_vtx{4})};
cvCircle( img, cvPointFrom32f(center), cvRound(radius), CV_RGB(255, 255, 0), 1, cv.CV_AA, 0 );
cvPolyLine( img, {box_vtx}, 1, CV_RGB(0,255,255), 1, cv.CV_AA );
cvClearMemStorage( storage );
endfunction
img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
storage = cvCreateMemStorage(0);
cvNamedWindow( "rect & circle", 1 );
use_seq=false;
while (true),
count = randint(1,100);
if (use_seq)
minarea_seq(img, count, storage);
else
minarea_array(img, count);
endif
cvShowImage("rect & circle", img);
key = cvWaitKey();
if( key == '\x1b' );
break;
endif
use_seq = !use_seq;
endwhile

View File

@@ -1,15 +0,0 @@
#! /usr/bin/env octave
cvNamedWindow("win", CV_WINDOW_AUTOSIZE);
cap = cvCreateFileCapture("/home/x/work/sneaker/dvgrab-001.avi");
img = cvQueryFrame(cap);
printf("Got frame of dimensions (%i x %i)",img.width,img.height);
cvShowImage("win", img);
cvMoveWindow("win", 200, 200);
cvWaitKey(0);
octimg = cv2im(img);

View File

@@ -1,86 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
global src;
global image
global element
global element_shape
global global_pos;
global dest;
src = 0;
image = 0;
dest = 0;
element = 0;
element_shape = CV_SHAPE_RECT;
global_pos = 0;
function Opening(pos)
global src;
global image
global element
global element_shape
global global_pos;
global dest;
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, [] );
cvErode(src,image,element,1);
cvDilate(image,dest,element,1);
cvShowImage("Opening&Closing window",dest);
endfunction
function Closing(pos)
global src;
global image
global element
global element_shape
global global_pos;
global dest;
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, [] );
cvDilate(src,image,element,1);
cvErode(image,dest,element,1);
cvShowImage("Opening&Closing window",dest);
endfunction
function Erosion(pos)
global src;
global image
global element
global element_shape
global global_pos;
global dest;
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, [] );
cvErode(src,dest,element,1);
cvShowImage("Erosion&Dilation window",dest);
endfunction
function Dilation(pos)
global src;
global image
global element
global element_shape
global global_pos;
global dest;
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, [] );
cvDilate(src,dest,element,1);
cvShowImage("Erosion&Dilation window",dest);
endfunction
filename = "../c/baboon.jpg";
if (size(argv, 1)==1)
filename = argv(){1};
endif
src = cvLoadImage(filename,1);
if (! swig_this(src))
exit(-1);
endif
image = cvCloneImage(src);
dest = cvCloneImage(src);
cvNamedWindow("Opening&Closing window",1);
cvNamedWindow("Erosion&Dilation window",1);
cvShowImage("Opening&Closing window",src);
cvShowImage("Erosion&Dilation window",src);
cvCreateTrackbar("Open","Opening&Closing window",global_pos,10,@Opening);
cvCreateTrackbar("Close","Opening&Closing window",global_pos,10,@Closing);
cvCreateTrackbar("Dilate","Erosion&Dilation window",global_pos,10,@Dilation);
cvCreateTrackbar("Erode","Erosion&Dilation window",global_pos,10,@Erosion);
cvWaitKey(0);
cvDestroyWindow("Opening&Closing window");
cvDestroyWindow("Erosion&Dilation window");

View File

@@ -1,126 +0,0 @@
#! /usr/bin/env octave
cv
highgui
CLOCKS_PER_SEC = 1.0
MHI_DURATION = 1;
MAX_TIME_DELTA = 0.5;
MIN_TIME_DELTA = 0.05;
N = 4;
buf = range(10)
last = 0;
mhi = []; # MHI
orient = []; # orientation
mask = []; # valid orientation mask
segmask = []; # motion segmentation map
storage = []; # temporary storage
function update_mhi( img, dst, diff_threshold )
global last
global mhi
global storage
global mask
global orient
global segmask
timestamp = time.clock()/CLOCKS_PER_SEC; # get current time in seconds
size = cvSize(img.width,img.height); # get current frame size
idx1 = last;
if (! mhi || mhi.width != size.width || mhi.height != size.height)
for i=0:N-1,
buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
cvZero( buf[i] );
mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
cvZero( mhi ); # clear MHI at the beginning
orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
cvCvtColor( img, buf[last], CV_BGR2GRAY ); # convert frame to grayscale
idx2 = (last + 1) % N; # index of (last - (N-1))th frame
last = idx2;
silh = buf[idx2];
cvAbsDiff( buf[idx1], buf[idx2], silh ); # get difference between frames
cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); # and threshold it
cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); # update MHI
cvCvtScale( mhi, mask, 255./MHI_DURATION,
(MHI_DURATION - timestamp)*255./MHI_DURATION );
cvZero( dst );
cvMerge( mask, [], [], [], dst );
cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
if( not storage )
storage = cvCreateMemStorage(0);
else
cvClearMemStorage(storage);
seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );
for i=-1:seq.total-1,
if( i < 0 ) # case of the whole image
comp_rect = cvRect( 0, 0, size.width, size.height );
color = CV_RGB(255,255,255);
magnitude = 100.;
else # i-th motion component
comp_rect = seq[i].rect
if( comp_rect.width + comp_rect.height < 100 ) # reject very small components
continue;
endif
endif
color = CV_RGB(255,0,0);
magnitude = 30.;
silh_roi = cvGetSubRect(silh, comp_rect);
mhi_roi = cvGetSubRect( mhi, comp_rect );
orient_roi = cvGetSubRect( orient, comp_rect );
mask_roi = cvGetSubRect( mask, comp_rect );
angle = cvCalcGlobalOrientation( orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION);
angle = 360.0 - angle; # adjust for images with top-left origin
count = cvNorm( silh_roi, [], CV_L1, [] ); # calculate number of points within silhouette ROI
if( count < comp_rect.width * comp_rect.height * 0.05 )
continue;
endif
center = cvPoint( (comp_rect.x + comp_rect.width/2),
(comp_rect.y + comp_rect.height/2) );
cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
cvRound( center.y - magnitude*sin(angle*CV_PI/180))), \
color, 3, CV_AA, 0 );
endfor
endif
endfor
endif
endfunction
motion = 0;
capture = 0;
if (size(argv, 1)==1)
capture = cvCreateCameraCapture( 0 )
elseif (size(argv, 1)==2 && all(isdigit(argv(1, :))))
capture = cvCreateCameraCapture( int32(argv(1, :)) )
elseif (size(argv, 1)==2)
capture = cvCreateFileCapture( argv(1, :) );
endif
if (!capture)
print "Could not initialize capturing..."
exit(-1)
endif
cvNamedWindow( "Motion", 1 );
while (true)
image = cvQueryFrame( capture );
if( image )
if( ! motion )
motion = cvCreateImage( cvSize(image.width,image.height), 8, 3 );
cvZero( motion );
motion.origin = image.origin;
endif
update_mhi( image, motion, 30 );
cvShowImage( "Motion", motion );
if( cvWaitKey(10) != -1 )
break;
endif
else
break
endif
endwhile
cvDestroyWindow( "Motion" );

View File

@@ -1,71 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
global g;
g.image0 = [];
g.image1 = [];
g.threshold1 = 255;
g.threshold2 = 30;
g.l = g.level = 4;
g.block_size = 1000;
g.filter = CV_GAUSSIAN_5x5;
g.storage = [];
g.min_comp = CvConnectedComp();
function set_thresh1( val )
global g;
g.threshold1 = val;
ON_SEGMENT();
endfunction
function set_thresh2( val )
global g;
g.threshold2 = val;
ON_SEGMENT()
endfunction
function ON_SEGMENT()
global g;
global cv;
g
swig_this(g.image0)
swig_this(g.image1)
swig_this(g.storage)
g.level
g.threshold1
g.threshold2
comp = cv.cvPyrSegmentation(g.image0, g.image1, g.storage, g.level, g.threshold1+1, g.threshold2+1);
cvShowImage("Segmentation", g.image1);
endfunction
filename = "../c/fruits.jpg";
if (size(argv, 2) >= 1)
filename = argv(){1};
endif
g.image0 = cvLoadImage( filename, 1);
if (! swig_this(g.image0))
printf("Error opening %s\n",filename);
exit(-1);
endif
cvNamedWindow("Source", 0);
cvShowImage("Source", g.image0);
cvNamedWindow("Segmentation", 0);
g.storage = cvCreateMemStorage ( g.block_size );
new_width = bitshift(g.image0.width, -g.level);
new_height = bitshift(g.image0.height, -g.level);
g.image0 = cvCreateImage( cvSize(new_width,new_height), g.image0.depth, g.image0.nChannels );
g.image1 = cvCreateImage( cvSize(new_width,new_height), g.image0.depth, g.image0.nChannels );
## segmentation of the color image
g.l = 1;
g.threshold1 =255;
g.threshold2 =30;
ON_SEGMENT();
g.sthreshold1 = cvCreateTrackbar("Threshold1", "Segmentation", g.threshold1, 255, @set_thresh1);
g.sthreshold2 = cvCreateTrackbar("Threshold2", "Segmentation", g.threshold2, 255, @set_thresh2);
cvShowImage("Segmentation", image1);
cvWaitKey(0);
cvDestroyWindow("Segmentation");
cvDestroyWindow("Source");

View File

@@ -1,173 +0,0 @@
#! /usr/bin/env octave
##
## The full "Square Detector" program.
## It loads several images subsequentally and tries to find squares in
## each image
##
cv;
highgui;
global g;
g.thresh = 50;
g.img = [];
g.img0 = [];
g.storage = [];
g.wndname = "Square Detection Demo";
function ret = compute_angle( pt1, pt2, pt0 )
dx1 = pt1.x - pt0.x;
dy1 = pt1.y - pt0.y;
dx2 = pt2.x - pt0.x;
dy2 = pt2.y - pt0.y;
ret = (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
endfunction
function squares = findSquares4( img, storage )
global g;
global cv;
N = 11;
sz = cvSize( img.width, img.height );
timg = cvCloneImage( img ); # make a copy of input image
gray = cvCreateImage( sz, 8, 1 );
pyr = cvCreateImage( cvSize(int32(sz.width/2), int32(sz.height/2)), 8, 3 );
## create empty sequence that will contain points -
## 4 points per square (the square's vertices)
squares = cvCreateSeq( 0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, storage );
squares = cv.CvSeq_CvPoint.cast( squares );
## select the maximum ROI in the image
## with the width and height divisible by 2
subimage = cvGetSubRect( timg, cvRect( 0, 0, sz.width, sz.height ));
## down-scale and upscale the image to filter out the noise
cvPyrDown( subimage, pyr, 7 );
cvPyrUp( pyr, subimage, 7 );
tgray = cvCreateImage( sz, 8, 1 );
## find squares in every color plane of the image
for c=1:3,
## extract the c-th color plane
channels = {[], [], []};
channels{c} = tgray;
cvSplit( subimage, channels{1}, channels{2}, channels{3}, [] ) ;
for l=1:N,
## hack: use Canny instead of zero threshold level.
## Canny helps to catch squares with gradient shading
if( l == 1 )
## apply Canny. Take the upper threshold from slider
## and set the lower to 0 (which forces edges merging)
cvCanny( tgray, gray, 0, g.thresh, 5 );
## dilate canny output to remove potential
## holes between edge segments
cvDilate( gray, gray, [], 1 );
else
## apply threshold if l!=0
## tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
cvThreshold( tgray, gray, l*255/N, 255, cv.CV_THRESH_BINARY );
endif
## find contours and store them all as a list
[count, contours] = cvFindContours( gray, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
if (!swig_this(contours))
continue;
endif
## test each contour
for contour = CvSeq_hrange(contours),
## approximate contour with accuracy proportional
## to the contour perimeter
result = cvApproxPoly( contour, cv.sizeof_CvContour, storage, cv.CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
## square contours should have 4 vertices after approximation
## relatively large area (to filter out noisy contours)
## and be convex.
## Note: absolute value of an area is used because
## area may be positive or negative - in accordance with the
## contour orientation
if( result.total == 4 &&
abs(cvContourArea(result)) > 1000 &&
cvCheckContourConvexity(result) )
s = 0;
for i=1:5,
## find minimum angle between joint
## edges (maximum of cosine)
if( i > 2 )
t = abs(compute_angle( result{i}, result{i-2}, result{i-1}));
if (s<t)
s=t;
endif
endif
endfor
## if cosines of all angles are small
## (all angles are ~90 degree) then write quandrange
## vertices to resultant sequence
if( s < 0.3 )
for i=1:4,
squares.append( result{i} )
endfor
endif
endif
endfor
endfor
endfor
endfunction
## the function draws all the squares in the image
function drawSquares( img, squares )
global g;
global cv;
cpy = cvCloneImage( img );
## read 4 sequence elements at a time (all vertices of a square)
i=0;
while (i<squares.total)
pt = { squares{i}, squares{i+1}, squares{i+2}, squares{i+3} };
## draw the square as a closed polyline
cvPolyLine( cpy, {pt}, 1, CV_RGB(0,255,0), 3, cv.CV_AA, 0 );
i+=4;
endwhile
## show the resultant image
cvShowImage( g.wndname, cpy );
endfunction
function on_trackbar( a )
global g;
if( swig_this(g.img) )
drawSquares( g.img, findSquares4( g.img, g.storage ) );
endif
endfunction
g.names = {"../c/pic1.png", "../c/pic2.png", "../c/pic3.png", \
"../c/pic4.png", "../c/pic5.png", "../c/pic6.png" };
## create memory storage that will contain all the dynamic data
g.storage = cvCreateMemStorage(0);
for name = g.names,
g.img0 = cvLoadImage( name, 1 );
if (!swig_this(g.img0))
printf("Couldn't load %s\n",name);
continue;
endif
g.img = cvCloneImage( g.img0 );
## create window and a trackbar (slider) with parent "image" and set callback
## (the slider regulates upper threshold, passed to Canny edge detector)
cvNamedWindow( g.wndname, 1 );
cvCreateTrackbar( "canny thresh", g.wndname, g.thresh, 1000, @on_trackbar );
## force the image processing
on_trackbar(0);
## wait for key.
## Also the function cvWaitKey takes care of event processing
c = cvWaitKey(0);
## clear memory storage - reset free space position
cvClearMemStorage( g.storage );
if( c == '\x1b' )
break;
endif
endfor
cvDestroyWindow( g.wndname );

View File

@@ -1,133 +0,0 @@
#! /usr/bin/env octave
cv;
highgui;
global g;
g.marker_mask = [];
g.markers = [];
g.img0 = []
g.img = []
g.img_gray = []
g.wshed = []
g.prev_pt = cvPoint(-1,-1);
function on_mouse( event, x, y, flags, param )
global g;
global cv;
global highgui;
if( !swig_this( g.img) )
return;
endif
if( event == highgui.CV_EVENT_LBUTTONUP || ! bitand(flags,highgui.CV_EVENT_FLAG_LBUTTON) )
g.prev_pt = cvPoint(-1,-1);
elseif( event == highgui.CV_EVENT_LBUTTONDOWN )
g.prev_pt = cvPoint(x,y);
elseif( event == highgui.CV_EVENT_MOUSEMOVE && bitand(flags,highgui.CV_EVENT_FLAG_LBUTTON) )
pt = cvPoint(x,y);
if( g.prev_pt.x < 0 )
g.prev_pt = pt;
endif
cvLine( g.marker_mask, g.prev_pt, pt, cvScalarAll(255), 5, 8, 0 );
cvLine( g.img, g.prev_pt, pt, cvScalarAll(255), 5, 8, 0 );
g.prev_pt = pt;
cvShowImage( "image", g.img );
endif
endfunction
filename = "../c/fruits.jpg";
if (size(argv, 1)>=1)
filename = argv(){1};
endif
rng = cvRNG(-1);
g.img0 = cvLoadImage(filename,1);
if (!swig_this(g.img0))
print "Error opening image '%s'" % filename
exit(-1)
endif
printf("Hot keys:\n");
printf("\tESC - quit the program\n");
printf("\tr - restore the original image\n");
printf("\tw - run watershed algorithm\n");
printf("\t (before that, roughly outline several g.markers on the image)\n");
cvNamedWindow( "image", 1 );
cvNamedWindow( "watershed transform", 1 );
g.img = cvCloneImage( g.img0 );
g.img_gray = cvCloneImage( g.img0 );
g.wshed = cvCloneImage( g.img0 );
g.marker_mask = cvCreateImage( cvGetSize(g.img), 8, 1 );
g.markers = cvCreateImage( cvGetSize(g.img), IPL_DEPTH_32S, 1 );
cvCvtColor( g.img, g.marker_mask, CV_BGR2GRAY );
cvCvtColor( g.marker_mask, g.img_gray, CV_GRAY2BGR );
cvZero( g.marker_mask );
cvZero( g.wshed );
cvShowImage( "image", g.img );
cvShowImage( "watershed transform", g.wshed );
cvSetMouseCallback( "image", @on_mouse, [] );
while (true)
c = cvWaitKey(0);
if (c=='\x1b')
break;
endif
if (c == 'r')
cvZero( g.marker_mask );
cvCopy( g.img0, g.img );
cvShowImage( "image", g.img );
endif
if (c == 'w')
storage = cvCreateMemStorage(0);
comp_count = 0;
##cvSaveImage( "g.wshed_mask.png", g.marker_mask );
##g.marker_mask = cvLoadImage( "g.wshed_mask.png", 0 );
[nb_cont, contours] = cvFindContours( g.marker_mask, storage, \
sizeof_CvContour, \
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
cvZero( g.markers );
swig_this(contours)
while (swig_this(contours))
cvDrawContours( g.markers, contours, cvScalarAll(comp_count+1), \
cvScalarAll(comp_count+1), -1, -1, 8, cvPoint(0,0) );
contours=contours.h_next;
comp_count+=1;
endwhile
comp_count
color_tab = cvCreateMat( comp_count, 1, CV_8UC3 );
for i=0:comp_count-1,
color_tab(i) = cvScalar( mod(cvRandInt(rng),180) + 50, \
mod(cvRandInt(rng),180) + 50, \
mod(cvRandInt(rng),180) + 50 );
endfor
t = int32(cvGetTickCount());
cvWatershed( g.img0, g.markers );
t = int32(cvGetTickCount()) - t;
##print "exec time = %f" % t/(cvGetTickFrequency()*1000.)
cvSet( g.wshed, cvScalarAll(255) );
## paint the watershed image
for j=0:g.markers.height-1,
for i=0:g.markers.width-1,
{j,i}
idx = g.markers({j,i});
if (idx==-1)
continue
endif
idx = idx-1;
g.wshed({j,i}) = color_tab({idx,0});
endfor
endfor
cvAddWeighted( g.wshed, 0.5, g.img_gray, 0.5, 0, g.wshed );
cvShowImage( "watershed transform", g.wshed );
cvWaitKey();
endif
endwhile

View File

@@ -1,13 +0,0 @@
# -------------------------------------------------------------------------
# CMake file for Python samples. See root CMakeLists.txt
# -------------------------------------------------------------------------
file(GLOB PYTHON_SAMPLES *.py)
if(NOT WIN32)
install(FILES ${PYTHON_SAMPLES}
DESTINATION share/opencv/samples/python
PERMISSIONS OWNER_READ OWNER_EXECUTE
GROUP_READ GROUP_EXECUTE
WORLD_READ WORLD_EXECUTE)
endif()

View File

@@ -1,193 +0,0 @@
#! /usr/bin/env python
import sys
# import the necessary things for OpenCV
from opencv import cv
from opencv import highgui
#############################################################################
# definition of some constants
# how many bins we want for the histogram, and their ranges
hdims = 16
hranges = [[0, 180]]
# ranges for the limitation of the histogram
vmin = 10
vmax = 256
smin = 30
# the range we want to monitor
hsv_min = cv.cvScalar (0, smin, vmin, 0)
hsv_max = cv.cvScalar (180, 256, vmax, 0)
#############################################################################
# some useful functions
def hsv2rgb (hue):
# convert the hue value to the corresponding rgb value
sector_data = [[0, 2, 1],
[1, 2, 0],
[1, 0, 2],
[2, 0, 1],
[2, 1, 0],
[0, 1, 2]]
hue *= 0.1 / 3
sector = cv.cvFloor (hue)
p = cv.cvRound (255 * (hue - sector))
if sector & 1:
p ^= 255
rgb = {}
rgb [sector_data [sector][0]] = 255
rgb [sector_data [sector][1]] = 0
rgb [sector_data [sector][2]] = p
return cv.cvScalar (rgb [2], rgb [1], rgb [0], 0)
#############################################################################
# so, here is the main part of the program
if __name__ == '__main__':
# a small welcome
print "OpenCV Python wrapper test"
print "OpenCV version: %s (%d, %d, %d)" % (cv.CV_VERSION,
cv.CV_MAJOR_VERSION,
cv.CV_MINOR_VERSION,
cv.CV_SUBMINOR_VERSION)
# first, create the necessary windows
highgui.cvNamedWindow ('Camera', highgui.CV_WINDOW_AUTOSIZE)
highgui.cvNamedWindow ('Histogram', highgui.CV_WINDOW_AUTOSIZE)
# move the new window to a better place
highgui.cvMoveWindow ('Camera', 10, 40)
highgui.cvMoveWindow ('Histogram', 10, 270)
try:
# try to get the device number from the command line
device = int (sys.argv [1])
# got it ! so remove it from the arguments
del sys.argv [1]
except (IndexError, ValueError):
# no device number on the command line, assume we want the 1st device
device = 0
if len (sys.argv) == 1:
# no argument on the command line, try to use the camera
capture = highgui.cvCreateCameraCapture (device)
# set the wanted image size from the camera
highgui.cvSetCaptureProperty (capture,
highgui.CV_CAP_PROP_FRAME_WIDTH, 320)
highgui.cvSetCaptureProperty (capture,
highgui.CV_CAP_PROP_FRAME_HEIGHT, 240)
else:
# we have an argument on the command line,
# we can assume this is a file name, so open it
capture = highgui.cvCreateFileCapture (sys.argv [1])
# check that capture device is OK
if not capture:
print "Error opening capture device"
sys.exit (1)
# create an image to put in the histogram
histimg = cv.cvCreateImage (cv.cvSize (320,240), 8, 3)
# init the image of the histogram to black
cv.cvSetZero (histimg)
# capture the 1st frame to get some propertie on it
frame = highgui.cvQueryFrame (capture)
# get some properties of the frame
frame_size = cv.cvGetSize (frame)
# compute which selection of the frame we want to monitor
selection = cv.cvRect (0, 0, frame.width, frame.height)
# create some images usefull later
hue = cv.cvCreateImage (frame_size, 8, 1)
mask = cv.cvCreateImage (frame_size, 8, 1)
hsv = cv.cvCreateImage (frame_size, 8, 3 )
# create the histogram
hist = cv.cvCreateHist ([hdims], cv.CV_HIST_ARRAY, hranges, 1)
while 1:
# do forever
# 1. capture the current image
frame = highgui.cvQueryFrame (capture)
if frame is None:
# no image captured... end the processing
break
# mirror the captured image
cv.cvFlip (frame, None, 1)
# compute the hsv version of the image
cv.cvCvtColor (frame, hsv, cv.CV_BGR2HSV)
# compute which pixels are in the wanted range
cv.cvInRangeS (hsv, hsv_min, hsv_max, mask)
# extract the hue from the hsv array
cv.cvSplit (hsv, hue, None, None, None)
# select the rectangle of interest in the hue/mask arrays
hue_roi = cv.cvGetSubRect (hue, selection)
mask_roi = cv.cvGetSubRect (mask, selection)
# it's time to compute the histogram
cv.cvCalcHist (hue_roi, hist, 0, mask_roi)
# extract the min and max value of the histogram
min_val, max_val, min_idx, max_idx = cv.cvGetMinMaxHistValue (hist)
# compute the scale factor
if max_val > 0:
scale = 255. / max_val
else:
scale = 0.
# scale the histograms
cv.cvConvertScale (hist.bins, hist.bins, scale, 0)
# clear the histogram image
cv.cvSetZero (histimg)
# compute the width for each bin do display
bin_w = histimg.width / hdims
for i in range (hdims):
# for all the bins
# get the value, and scale to the size of the hist image
val = cv.cvRound (cv.cvGetReal1D (hist.bins, i)
* histimg.height / 255)
# compute the color
color = hsv2rgb (i * 180. / hdims)
# draw the rectangle in the wanted color
cv.cvRectangle (histimg,
cv.cvPoint (i * bin_w, histimg.height),
cv.cvPoint ((i + 1) * bin_w, histimg.height - val),
color, -1, 8, 0)
# we can now display the images
highgui.cvShowImage ('Camera', frame)
highgui.cvShowImage ('Histogram', histimg)
# handle events
k = highgui.cvWaitKey (10)
if k == '\x1b':
# user has press the ESC key, so exit
break

View File

@@ -1,106 +0,0 @@
#! /usr/bin/env python
import sys
# import the necessary things for OpenCV
from opencv import cv
from opencv import highgui
# the codec existing in cvcapp.cpp,
# need to have a better way to specify them in the future
# WARNING: I have see only MPEG1VIDEO working on my computer
H263 = 0x33363255
H263I = 0x33363249
MSMPEG4V3 = 0x33564944
MPEG4 = 0x58564944
MSMPEG4V2 = 0x3234504D
MJPEG = 0x47504A4D
MPEG1VIDEO = 0x314D4950
AC3 = 0x2000
MP2 = 0x50
FLV1 = 0x31564C46
#############################################################################
# so, here is the main part of the program
if __name__ == '__main__':
# a small welcome
print "OpenCV Python capture video"
# first, create the necessary window
highgui.cvNamedWindow ('Camera', highgui.CV_WINDOW_AUTOSIZE)
# move the new window to a better place
highgui.cvMoveWindow ('Camera', 10, 10)
try:
# try to get the device number from the command line
device = int (sys.argv [1])
# got it ! so remove it from the arguments
del sys.argv [1]
except (IndexError, ValueError):
# no device number on the command line, assume we want the 1st device
device = 0
if len (sys.argv) == 1:
# no argument on the command line, try to use the camera
capture = highgui.cvCreateCameraCapture (device)
else:
# we have an argument on the command line,
# we can assume this is a file name, so open it
capture = highgui.cvCreateFileCapture (sys.argv [1])
# check that capture device is OK
if not capture:
print "Error opening capture device"
sys.exit (1)
# capture the 1st frame to get some propertie on it
frame = highgui.cvQueryFrame (capture)
# get size of the frame
frame_size = cv.cvGetSize (frame)
# get the frame rate of the capture device
fps = highgui.cvGetCaptureProperty (capture, highgui.CV_CAP_PROP_FPS)
if fps == 0:
# no fps getted, so set it to 30 by default
fps = 30
# create the writer
writer = highgui.cvCreateVideoWriter ("captured.mpg", MPEG1VIDEO,
fps, frame_size, True)
# check the writer is OK
if not writer:
print "Error opening writer"
sys.exit (1)
while 1:
# do forever
# 1. capture the current image
frame = highgui.cvQueryFrame (capture)
if frame is None:
# no image captured... end the processing
break
# write the frame to the output file
highgui.cvWriteFrame (writer, frame)
# display the frames to have a visual output
highgui.cvShowImage ('Camera', frame)
# handle events
k = highgui.cvWaitKey (5)
if k % 0x100 == 27:
# user has press the ESC key, so exit
break
# end working with the writer
# not working at this time... Need to implement some typemaps...
# but exiting without calling it is OK in this simple application
#highgui.cvReleaseVideoWriter (writer)

View File

@@ -1,18 +0,0 @@
#!/usr/bin/python
from opencv.cv import *
from opencv.highgui import *
import sys
if __name__ == "__main__":
cvNamedWindow("win")
filename = sys.argv[1]
im = cvLoadImage(filename, CV_LOAD_IMAGE_GRAYSCALE)
im3 = cvLoadImage(filename, CV_LOAD_IMAGE_COLOR)
chessboard_dim = cvSize( 5, 6 )
found_all, corners = cvFindChessboardCorners( im, chessboard_dim )
cvDrawChessboardCorners( im3, chessboard_dim, corners, found_all )
cvShowImage("win", im3);
cvWaitKey()

View File

@@ -1,137 +0,0 @@
#! /usr/bin/env python
print "OpenCV Python version of contours"
# import the necessary things for OpenCV
from opencv import cv
from opencv import highgui
# some default constants
_SIZE = 500
_DEFAULT_LEVEL = 3
# definition of some colors
_red = cv.cvScalar (0, 0, 255, 0);
_green = cv.cvScalar (0, 255, 0, 0);
_white = cv.cvRealScalar (255)
_black = cv.cvRealScalar (0)
# the callback on the trackbar, to set the level of contours we want
# to display
def on_trackbar (position):
# create the image for putting in it the founded contours
contours_image = cv.cvCreateImage (cv.cvSize (_SIZE, _SIZE), 8, 3)
# compute the real level of display, given the current position
levels = position - 3
# initialisation
_contours = contours
if levels <= 0:
# zero or negative value
# => get to the nearest face to make it look more funny
_contours = contours.h_next.h_next.h_next
# first, clear the image where we will draw contours
cv.cvSetZero (contours_image)
# draw contours in red and green
cv.cvDrawContours (contours_image, _contours,
_red, _green,
levels, 3, cv.CV_AA,
cv.cvPoint (0, 0))
# finally, show the image
highgui.cvShowImage ("contours", contours_image)
if __name__ == '__main__':
# create the image where we want to display results
image = cv.cvCreateImage (cv.cvSize (_SIZE, _SIZE), 8, 1)
# start with an empty image
cv.cvSetZero (image)
# draw the original picture
for i in range (6):
dx = (i % 2) * 250 - 30
dy = (i / 2) * 150
cv.cvEllipse (image,
cv.cvPoint (dx + 150, dy + 100),
cv.cvSize (100, 70),
0, 0, 360, _white, -1, 8, 0)
cv.cvEllipse (image,
cv.cvPoint (dx + 115, dy + 70),
cv.cvSize (30, 20),
0, 0, 360, _black, -1, 8, 0)
cv.cvEllipse (image,
cv.cvPoint (dx + 185, dy + 70),
cv.cvSize (30, 20),
0, 0, 360, _black, -1, 8, 0)
cv.cvEllipse (image,
cv.cvPoint (dx + 115, dy + 70),
cv.cvSize (15, 15),
0, 0, 360, _white, -1, 8, 0)
cv.cvEllipse (image,
cv.cvPoint (dx + 185, dy + 70),
cv.cvSize (15, 15),
0, 0, 360, _white, -1, 8, 0)
cv.cvEllipse (image,
cv.cvPoint (dx + 115, dy + 70),
cv.cvSize (5, 5),
0, 0, 360, _black, -1, 8, 0)
cv.cvEllipse (image,
cv.cvPoint (dx + 185, dy + 70),
cv.cvSize (5, 5),
0, 0, 360, _black, -1, 8, 0)
cv.cvEllipse (image,
cv.cvPoint (dx + 150, dy + 100),
cv.cvSize (10, 5),
0, 0, 360, _black, -1, 8, 0)
cv.cvEllipse (image,
cv.cvPoint (dx + 150, dy + 150),
cv.cvSize (40, 10),
0, 0, 360, _black, -1, 8, 0)
cv.cvEllipse (image,
cv.cvPoint (dx + 27, dy + 100),
cv.cvSize (20, 35),
0, 0, 360, _white, -1, 8, 0)
cv.cvEllipse (image,
cv.cvPoint (dx + 273, dy + 100),
cv.cvSize (20, 35),
0, 0, 360, _white, -1, 8, 0)
# create window and display the original picture in it
highgui.cvNamedWindow ("image", 1)
highgui.cvShowImage ("image", image)
# create the storage area
storage = cv.cvCreateMemStorage (0)
# find the contours
nb_contours, contours = cv.cvFindContours (image,
storage,
cv.sizeof_CvContour,
cv.CV_RETR_TREE,
cv.CV_CHAIN_APPROX_SIMPLE,
cv.cvPoint (0,0))
# comment this out if you do not want approximation
contours = cv.cvApproxPoly (contours, cv.sizeof_CvContour,
storage,
cv.CV_POLY_APPROX_DP, 3, 1)
# create the window for the contours
highgui.cvNamedWindow ("contours", 1)
# create the trackbar, to enable the change of the displayed level
highgui.cvCreateTrackbar ("levels+3", "contours", 3, 7, on_trackbar)
# call one time the callback, so we will have the 1st display done
on_trackbar (_DEFAULT_LEVEL)
# wait a key pressed to end
highgui.cvWaitKey (0)

View File

@@ -1,78 +0,0 @@
#! /usr/bin/env python
print "OpenCV Python version of convexhull"
# import the necessary things for OpenCV
from opencv import cv
from opencv import highgui
# to generate random values
import random
# how many points we want at max
_MAX_POINTS = 100
if __name__ == '__main__':
# main object to get random values from
my_random = random.Random ()
# create the image where we want to display results
image = cv.cvCreateImage (cv.cvSize (500, 500), 8, 3)
# create the window to put the image in
highgui.cvNamedWindow ('hull', highgui.CV_WINDOW_AUTOSIZE)
while True:
# do forever
# get a random number of points
count = my_random.randrange (0, _MAX_POINTS) + 1
# initialisations
points = []
for i in range (count):
# generate a random point
points.append (cv.cvPoint (
my_random.randrange (0, image.width / 2) + image.width / 4,
my_random.randrange (0, image.width / 2) + image.width / 4
))
# compute the convex hull
hull = cv.cvConvexHull2 (points, cv.CV_CLOCKWISE, 0)
# start with an empty image
cv.cvSetZero (image)
for i in range (count):
# draw all the points
cv.cvCircle (image, points [i], 2,
cv.cvScalar (0, 0, 255, 0),
cv.CV_FILLED, cv.CV_AA, 0)
# start the line from the last point
pt0 = points [hull [-1]]
for point_index in hull:
# connect the previous point to the current one
# get the current one
pt1 = points [point_index]
# draw
cv.cvLine (image, pt0, pt1,
cv.cvScalar (0, 255, 0, 0),
1, cv.CV_AA, 0)
# now, current one will be the previous one for the next iteration
pt0 = pt1
# display the final image
highgui.cvShowImage ('hull', image)
# handle events, and wait a key pressed
k = highgui.cvWaitKey (0)
if k == '\x1b':
# user has press the ESC key, so exit
break

View File

@@ -1,149 +0,0 @@
#!/usr/bin/python
"""
the script demostrates iterative construction of
delaunay triangulation and voronoi tesselation
Original Author (C version): ?
Converted to Python by: Roman Stanchak
"""
from opencv.cv import *
from opencv.highgui import *
from random import random,randint
def draw_subdiv_point( img, fp, color ):
cvCircle( img, cvPoint(cvRound(fp.x), cvRound(fp.y)), 3, color, CV_FILLED, 8, 0 );
def draw_subdiv_edge( img, edge, color ):
org_pt = cvSubdiv2DEdgeOrg(edge);
dst_pt = cvSubdiv2DEdgeDst(edge);
if org_pt and dst_pt :
org = org_pt.pt;
dst = dst_pt.pt;
iorg = cvPoint( cvRound( org.x ), cvRound( org.y ));
idst = cvPoint( cvRound( dst.x ), cvRound( dst.y ));
cvLine( img, iorg, idst, color, 1, CV_AA, 0 );
def draw_subdiv( img, subdiv, delaunay_color, voronoi_color ):
total = subdiv.edges.total;
elem_size = subdiv.edges.elem_size;
for edge in subdiv.edges:
edge_rot = cvSubdiv2DRotateEdge( edge, 1 )
if( CV_IS_SET_ELEM( edge )):
draw_subdiv_edge( img, edge_rot, voronoi_color );
draw_subdiv_edge( img, edge, delaunay_color );
def locate_point( subdiv, fp, img, active_color ):
[res, e0, p] = cvSubdiv2DLocate( subdiv, fp );
if e0:
e = e0
while True:
draw_subdiv_edge( img, e, active_color );
e = cvSubdiv2DGetEdge(e,CV_NEXT_AROUND_LEFT);
if e == e0:
break
draw_subdiv_point( img, fp, active_color );
def draw_subdiv_facet( img, edge ):
t = edge;
count = 0;
# count number of edges in facet
while count == 0 or t != edge:
count+=1
t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
buf = []
# gather points
t = edge;
for i in range(count):
assert t>4
pt = cvSubdiv2DEdgeOrg( t );
if not pt:
break;
buf.append( cvPoint( cvRound(pt.pt.x), cvRound(pt.pt.y) ) );
t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
if( len(buf)==count ):
pt = cvSubdiv2DEdgeDst( cvSubdiv2DRotateEdge( edge, 1 ));
cvFillConvexPoly( img, buf, CV_RGB(randint(0,255),randint(0,255),randint(0,255)), CV_AA, 0 );
cvPolyLine( img, [buf], 1, CV_RGB(0,0,0), 1, CV_AA, 0);
draw_subdiv_point( img, pt.pt, CV_RGB(0,0,0));
def paint_voronoi( subdiv, img ):
total = subdiv.edges.total;
elem_size = subdiv.edges.elem_size;
cvCalcSubdivVoronoi2D( subdiv );
for edge in subdiv.edges:
if( CV_IS_SET_ELEM( edge )):
# left
draw_subdiv_facet( img, cvSubdiv2DRotateEdge( edge, 1 ));
# right
draw_subdiv_facet( img, cvSubdiv2DRotateEdge( edge, 3 ));
if __name__ == '__main__':
win = "source";
rect = cvRect( 0, 0, 600, 600 );
active_facet_color = CV_RGB( 255, 0, 0 );
delaunay_color = CV_RGB( 0,0,0);
voronoi_color = CV_RGB(0, 180, 0);
bkgnd_color = CV_RGB(255,255,255);
img = cvCreateImage( cvSize(rect.width,rect.height), 8, 3 );
cvSet( img, bkgnd_color );
cvNamedWindow( win, 1 );
storage = cvCreateMemStorage(0);
subdiv = cvCreateSubdivDelaunay2D( rect, storage );
print "Delaunay triangulation will be build now interactively."
print "To stop the process, press any key\n";
for i in range(200):
fp = cvPoint2D32f( random()*(rect.width-10)+5, random()*(rect.height-10)+5 )
locate_point( subdiv, fp, img, active_facet_color );
cvShowImage( win, img );
if( cvWaitKey( 100 ) >= 0 ):
break;
cvSubdivDelaunay2DInsert( subdiv, fp );
cvCalcSubdivVoronoi2D( subdiv );
cvSet( img, bkgnd_color );
draw_subdiv( img, subdiv, delaunay_color, voronoi_color );
cvShowImage( win, img );
if( cvWaitKey( 100 ) >= 0 ):
break;
cvSet( img, bkgnd_color );
paint_voronoi( subdiv, img );
cvShowImage( win, img );
cvWaitKey(0);
cvDestroyWindow( win );

View File

@@ -1,102 +0,0 @@
#!/usr/bin/python
from opencv.cv import *
from opencv.highgui import *
import sys
file_name = "../c/baboon.jpg";
_brightness = 100
_contrast = 100
Gbrightness = 100
Gcontrast = 100
hist_size = 64
range_0=[0,256]
ranges = [ range_0 ]
src_image=None
dst_image=None
hist_image=None
hist=None
lut=cvCreateMat(256,1,CV_8U)
# brightness/contrast callback function
def update_brightness( val ):
global Gbrightness # global tag is required, or we get UnboundLocalError
Gbrightness = val
update_brightcont( )
def update_contrast( val ):
global Gcontrast # global tag is required, or we get UnboundLocalError
Gcontrast = val
update_brightcont( )
def update_brightcont():
# no global tag required for images ???
brightness = Gbrightness - 100;
contrast = Gcontrast - 100;
max_value = 0;
# The algorithm is by Werner D. Streidt
# (http://visca.com/ffactory/archives/5-99/msg00021.html)
if( contrast > 0 ):
delta = 127.*contrast/100;
a = 255./(255. - delta*2);
b = a*(brightness - delta);
else:
delta = -128.*contrast/100;
a = (256.-delta*2)/255.;
b = a*brightness + delta;
for i in range(256):
v = cvRound(a*i + b);
if( v < 0 ):
v = 0;
if( v > 255 ):
v = 255;
lut[i] = v;
cvLUT( src_image, dst_image, lut );
cvShowImage( "image", dst_image );
cvCalcHist( dst_image, hist, 0, None );
cvZero( dst_image );
min_value, max_value = cvGetMinMaxHistValue( hist );
cvScale( hist.bins, hist.bins, float(hist_image.height)/max_value, 0 );
#cvNormalizeHist( hist, 1000 );
cvSet( hist_image, cvScalarAll(255));
bin_w = cvRound(float(hist_image.width)/hist_size);
for i in range(hist_size):
cvRectangle( hist_image, cvPoint(i*bin_w, hist_image.height),
cvPoint((i+1)*bin_w, hist_image.height - cvRound(cvGetReal1D(hist.bins,i))),
cvScalarAll(0), -1, 8, 0 );
cvShowImage( "histogram", hist_image );
if __name__ == "__main__":
# Load the source image. HighGUI use.
if len(sys.argv)>1:
file_name = sys.argv[1]
src_image = cvLoadImage( file_name, 0 );
if not src_image:
print "Image was not loaded.";
sys.exit(-1)
dst_image = cvCloneImage(src_image);
hist_image = cvCreateImage(cvSize(320,200), 8, 1);
hist = cvCreateHist([hist_size], CV_HIST_ARRAY, ranges, 1);
cvNamedWindow("image", 0);
cvNamedWindow("histogram", 0);
cvCreateTrackbar("brightness", "image", _brightness, 200, update_brightness);
cvCreateTrackbar("contrast", "image", _contrast, 200, update_contrast);
update_brightcont();
cvWaitKey(0);

View File

@@ -1,107 +0,0 @@
#!/usr/bin/python
from opencv.cv import *
from opencv.highgui import *
import sys
# Rearrange the quadrants of Fourier image so that the origin is at
# the image center
# src & dst arrays of equal size & type
def cvShiftDFT(src_arr, dst_arr ):
size = cvGetSize(src_arr)
dst_size = cvGetSize(dst_arr)
if(dst_size.width != size.width or
dst_size.height != size.height) :
cvError( CV_StsUnmatchedSizes, "cvShiftDFT", "Source and Destination arrays must have equal sizes", __FILE__, __LINE__ )
if(src_arr is dst_arr):
tmp = cvCreateMat(size.height/2, size.width/2, cvGetElemType(src_arr))
cx = size.width/2
cy = size.height/2 # image center
q1 = cvGetSubRect( src_arr, cvRect(0,0,cx, cy) )
q2 = cvGetSubRect( src_arr, cvRect(cx,0,cx,cy) )
q3 = cvGetSubRect( src_arr, cvRect(cx,cy,cx,cy) )
q4 = cvGetSubRect( src_arr, cvRect(0,cy,cx,cy) )
d1 = cvGetSubRect( src_arr, cvRect(0,0,cx,cy) )
d2 = cvGetSubRect( src_arr, cvRect(cx,0,cx,cy) )
d3 = cvGetSubRect( src_arr, cvRect(cx,cy,cx,cy) )
d4 = cvGetSubRect( src_arr, cvRect(0,cy,cx,cy) )
if(src_arr is not dst_arr):
if( not CV_ARE_TYPES_EQ( q1, d1 )):
cvError( CV_StsUnmatchedFormats, "cvShiftDFT", "Source and Destination arrays must have the same format", __FILE__, __LINE__ )
cvCopy(q3, d1)
cvCopy(q4, d2)
cvCopy(q1, d3)
cvCopy(q2, d4)
else:
cvCopy(q3, tmp)
cvCopy(q1, q3)
cvCopy(tmp, q1)
cvCopy(q4, tmp)
cvCopy(q2, q4)
cvCopy(tmp, q2)
if __name__ == "__main__":
im = cvLoadImage( sys.argv[1], CV_LOAD_IMAGE_GRAYSCALE)
realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1)
imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1)
complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2)
cvScale(im, realInput, 1.0, 0.0)
cvZero(imaginaryInput)
cvMerge(realInput, imaginaryInput, None, None, complexInput)
dft_M = cvGetOptimalDFTSize( im.height - 1 )
dft_N = cvGetOptimalDFTSize( im.width - 1 )
dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 )
image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1)
image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1)
# copy A to dft_A and pad dft_A with zeros
tmp = cvGetSubRect( dft_A, cvRect(0,0, im.width, im.height))
cvCopy( complexInput, tmp, None )
if(dft_A.width > im.width):
tmp = cvGetSubRect( dft_A, cvRect(im.width,0, dft_N - im.width, im.height))
cvZero( tmp )
# no need to pad bottom part of dft_A with zeros because of
# use nonzero_rows parameter in cvDFT() call below
cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput.height )
cvNamedWindow("win", 0)
cvNamedWindow("magnitude", 0)
cvShowImage("win", im)
# Split Fourier in real and imaginary parts
cvSplit( dft_A, image_Re, image_Im, None, None )
# Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
cvPow( image_Re, image_Re, 2.0)
cvPow( image_Im, image_Im, 2.0)
cvAdd( image_Re, image_Im, image_Re, None)
cvPow( image_Re, image_Re, 0.5 )
# Compute log(1 + Mag)
cvAddS( image_Re, cvScalarAll(1.0), image_Re, None ) # 1 + Mag
cvLog( image_Re, image_Re ) # log(1 + Mag)
# Rearrange the quadrants of Fourier image so that the origin is at
# the image center
cvShiftDFT( image_Re, image_Re )
min, max, pt1, pt2 = cvMinMaxLoc(image_Re)
cvScale(image_Re, image_Re, 1.0/(max-min), 1.0*(-min)/(max-min))
cvShowImage("magnitude", image_Re)
cvWaitKey(0)

View File

@@ -1,71 +0,0 @@
#!/usr/bin/python
import sys
from opencv.cv import *
from opencv.highgui import *
wndname = "Distance transform";
tbarname = "Threshold";
# The output images
dist = 0;
dist8u1 = 0;
dist8u2 = 0;
dist8u = 0;
dist32s = 0;
gray = 0;
edge = 0;
# define a trackbar callback
def on_trackbar( edge_thresh ):
cvThreshold( gray, edge, float(edge_thresh), float(edge_thresh), CV_THRESH_BINARY );
#Distance transform
cvDistTransform( edge, dist, CV_DIST_L2, CV_DIST_MASK_5, None, None );
cvConvertScale( dist, dist, 5000.0, 0 );
cvPow( dist, dist, 0.5 );
cvConvertScale( dist, dist32s, 1.0, 0.5 );
cvAndS( dist32s, cvScalarAll(255), dist32s, None );
cvConvertScale( dist32s, dist8u1, 1, 0 );
cvConvertScale( dist32s, dist32s, -1, 0 );
cvAddS( dist32s, cvScalarAll(255), dist32s, None );
cvConvertScale( dist32s, dist8u2, 1, 0 );
cvMerge( dist8u1, dist8u2, dist8u2, None, dist8u );
cvShowImage( wndname, dist8u );
if __name__ == "__main__":
edge_thresh = 100;
filename = "../c/stuff.jpg"
if len(sys.argv) > 1:
filename = sys.argv[1]
gray = cvLoadImage( filename, 0 )
if not gray:
print "Failed to load %s" % filename
sys.exit(-1)
# Create the output image
dist = cvCreateImage( cvSize(gray.width,gray.height), IPL_DEPTH_32F, 1 );
dist8u1 = cvCloneImage( gray );
dist8u2 = cvCloneImage( gray );
dist8u = cvCreateImage( cvSize(gray.width,gray.height), IPL_DEPTH_8U, 3 );
dist32s = cvCreateImage( cvSize(gray.width,gray.height), IPL_DEPTH_32S, 1 );
# Convert to grayscale
edge = cvCloneImage( gray );
# Create a window
cvNamedWindow( wndname, 1 );
# create a toolbar
cvCreateTrackbar( tbarname, wndname, edge_thresh, 255, on_trackbar );
# Show the image
on_trackbar(edge_thresh);
# Wait for a key stroke; the same function arranges events processing
cvWaitKey(0);

View File

@@ -1,164 +0,0 @@
#! /usr/bin/env python
print "OpenCV Python version of drawing"
# import the necessary things for OpenCV
from opencv import cv
from opencv import highgui
# for making random numbers
from random import Random
def random_color (random):
"""
Return a random color
"""
icolor = random.randint (0, 0xFFFFFF)
return cv.cvScalar (icolor & 0xff, (icolor >> 8) & 0xff, (icolor >> 16) & 0xff)
if __name__ == '__main__':
# some "constants"
width = 1000
height = 700
window_name = "Drawing Demo"
number = 100
delay = 5
line_type = cv.CV_AA # change it to 8 to see non-antialiased graphics
# create the source image
image = cv.cvCreateImage (cv.cvSize (width, height), 8, 3)
# create window and display the original picture in it
highgui.cvNamedWindow (window_name, 1)
cv.cvSetZero (image)
highgui.cvShowImage (window_name, image)
# create the random number
random = Random ()
# draw some lines
for i in range (number):
pt1 = cv.cvPoint (random.randrange (-width, 2 * width),
random.randrange (-height, 2 * height))
pt2 = cv.cvPoint (random.randrange (-width, 2 * width),
random.randrange (-height, 2 * height))
cv.cvLine (image, pt1, pt2,
random_color (random),
random.randrange (0, 10),
line_type, 0)
highgui.cvShowImage (window_name, image)
highgui.cvWaitKey (delay)
# draw some rectangles
for i in range (number):
pt1 = cv.cvPoint (random.randrange (-width, 2 * width),
random.randrange (-height, 2 * height))
pt2 = cv.cvPoint (random.randrange (-width, 2 * width),
random.randrange (-height, 2 * height))
cv.cvRectangle (image, pt1, pt2,
random_color (random),
random.randrange (-1, 9),
line_type, 0)
highgui.cvShowImage (window_name, image)
highgui.cvWaitKey (delay)
# draw some ellipes
for i in range (number):
pt1 = cv.cvPoint (random.randrange (-width, 2 * width),
random.randrange (-height, 2 * height))
sz = cv.cvSize (random.randrange (0, 200),
random.randrange (0, 200))
angle = random.randrange (0, 1000) * 0.180
cv.cvEllipse (image, pt1, sz, angle, angle - 100, angle + 200,
random_color (random),
random.randrange (-1, 9),
line_type, 0)
highgui.cvShowImage (window_name, image)
highgui.cvWaitKey (delay)
# init the list of polylines
nb_polylines = 2
polylines_size = 3
pt = [0,] * nb_polylines
for a in range (nb_polylines):
pt [a] = [0,] * polylines_size
# draw some polylines
for i in range (number):
for a in range (nb_polylines):
for b in range (polylines_size):
pt [a][b] = cv.cvPoint (random.randrange (-width, 2 * width),
random.randrange (-height, 2 * height))
cv.cvPolyLine (image, pt, 1,
random_color (random),
random.randrange (1, 9),
line_type, 0)
highgui.cvShowImage (window_name, image)
highgui.cvWaitKey (delay)
# draw some filled polylines
for i in range (number):
for a in range (nb_polylines):
for b in range (polylines_size):
pt [a][b] = cv.cvPoint (random.randrange (-width, 2 * width),
random.randrange (-height, 2 * height))
cv.cvFillPoly (image, pt,
random_color (random),
line_type, 0)
highgui.cvShowImage (window_name, image)
highgui.cvWaitKey (delay)
# draw some circles
for i in range (number):
pt1 = cv.cvPoint (random.randrange (-width, 2 * width),
random.randrange (-height, 2 * height))
cv.cvCircle (image, pt1, random.randrange (0, 300),
random_color (random),
random.randrange (-1, 9),
line_type, 0)
highgui.cvShowImage (window_name, image)
highgui.cvWaitKey (delay)
# draw some text
for i in range (number):
pt1 = cv.cvPoint (random.randrange (-width, 2 * width),
random.randrange (-height, 2 * height))
font = cv.cvInitFont (random.randrange (0, 8),
random.randrange (0, 100) * 0.05 + 0.01,
random.randrange (0, 100) * 0.05 + 0.01,
random.randrange (0, 5) * 0.1,
random.randrange (0, 10),
line_type)
cv.cvPutText (image, "Testing text rendering!",
pt1, font,
random_color (random))
highgui.cvShowImage (window_name, image)
highgui.cvWaitKey (delay)
# prepare a text, and get it's properties
font = cv.cvInitFont (cv.CV_FONT_HERSHEY_COMPLEX,
3, 3, 0.0, 5, line_type)
text_size, ymin = cv.cvGetTextSize ("OpenCV forever!", font)
pt1.x = (width - text_size.width) / 2
pt1.y = (height + text_size.height) / 2
image2 = cv.cvCloneImage(image)
# now, draw some OpenCV pub ;-)
for i in range (255):
cv.cvSubS (image2, cv.cvScalarAll (i), image, None)
cv.cvPutText (image, "OpenCV forever!",
pt1, font, cv.cvScalar (255, i, i))
highgui.cvShowImage (window_name, image)
highgui.cvWaitKey (delay)
# wait some key to end
highgui.cvWaitKey (0)

View File

@@ -1,64 +0,0 @@
#! /usr/bin/env python
print "OpenCV Python version of edge"
import sys
# import the necessary things for OpenCV
from opencv import cv
from opencv import highgui
# some definitions
win_name = "Edge"
trackbar_name = "Threshold"
# the callback on the trackbar
def on_trackbar (position):
cv.cvSmooth (gray, edge, cv.CV_BLUR, 3, 3, 0)
cv.cvNot (gray, edge)
# run the edge dector on gray scale
cv.cvCanny (gray, edge, position, position * 3, 3)
# reset
cv.cvSetZero (col_edge)
# copy edge points
cv.cvCopy (image, col_edge, edge)
# show the image
highgui.cvShowImage (win_name, col_edge)
if __name__ == '__main__':
filename = "../c/fruits.jpg"
if len(sys.argv)>1:
filename = sys.argv[1]
# load the image gived on the command line
image = highgui.cvLoadImage (filename)
if not image:
print "Error loading image '%s'" % filename
sys.exit(-1)
# create the output image
col_edge = cv.cvCreateImage (cv.cvSize (image.width, image.height), 8, 3)
# convert to grayscale
gray = cv.cvCreateImage (cv.cvSize (image.width, image.height), 8, 1)
edge = cv.cvCreateImage (cv.cvSize (image.width, image.height), 8, 1)
cv.cvCvtColor (image, gray, cv.CV_BGR2GRAY)
# create the window
highgui.cvNamedWindow (win_name, highgui.CV_WINDOW_AUTOSIZE)
# create the trackbar
highgui.cvCreateTrackbar (trackbar_name, win_name, 1, 100, on_trackbar)
# show the image
on_trackbar (0)
# wait a key pressed to end
highgui.cvWaitKey (0)

View File

@@ -1,128 +0,0 @@
#!/usr/bin/python
"""
This program is demonstration for face and object detection using haar-like features.
The program finds faces in a camera image or video stream and displays a red box around them.
Original C implementation by: ?
Python implementation by: Roman Stanchak
"""
import sys
from opencv.cv import *
from opencv.highgui import *
# Global Variables
cascade = None
storage = cvCreateMemStorage(0)
cascade_name = "../../data/haarcascades/haarcascade_frontalface_alt.xml"
input_name = "../c/lena.jpg"
# Parameters for haar detection
# From the API:
# The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned
# for accurate yet slow object detection. For a faster operation on real video
# images the settings are:
# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = cvSize(20,20)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
haar_flags = 0
def detect_and_draw( img ):
# allocate temporary images
gray = cvCreateImage( cvSize(img.width,img.height), 8, 1 )
small_img = cvCreateImage((cvRound(img.width/image_scale),
cvRound (img.height/image_scale)), 8, 1 )
# convert color input image to grayscale
cvCvtColor( img, gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( gray, small_img, CV_INTER_LINEAR )
cvEqualizeHist( small_img, small_img )
cvClearMemStorage( storage )
if( cascade ):
t = cvGetTickCount()
faces = cvHaarDetectObjects( small_img, cascade, storage,
haar_scale, min_neighbors, haar_flags, min_size )
t = cvGetTickCount() - t
print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.))
if faces:
for face_rect in faces:
# the input to cvHaarDetectObjects was resized, so scale the
# bounding box of each face and convert it to two CvPoints
pt1 = cvPoint( int(face_rect.x*image_scale), int(face_rect.y*image_scale))
pt2 = cvPoint( int((face_rect.x+face_rect.width)*image_scale),
int((face_rect.y+face_rect.height)*image_scale) )
cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 )
cvShowImage( "result", img )
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1].startswith("--cascade="):
cascade_name = sys.argv[1][ len("--cascade="): ]
if len(sys.argv) > 2:
input_name = sys.argv[2]
elif sys.argv[1] == "--help" or sys.argv[1] == "-h":
print "Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n"
sys.exit(-1)
else:
input_name = sys.argv[1]
# the OpenCV API says this function is obsolete, but we can't
# cast the output of cvLoad to a HaarClassifierCascade, so use this anyways
# the size parameter is ignored
cascade = cvLoadHaarClassifierCascade( cascade_name, cvSize(1,1) )
if not cascade:
print "ERROR: Could not load classifier cascade"
sys.exit(-1)
if input_name.isdigit():
capture = cvCreateCameraCapture( int(input_name) )
else:
capture = cvCreateFileCapture( input_name )
cvNamedWindow( "result", 1 )
if capture:
frame_copy = None
while True:
frame = cvQueryFrame( capture )
if not frame:
cvWaitKey(0)
break
if not frame_copy:
frame_copy = cvCreateImage( cvSize(frame.width,frame.height),
IPL_DEPTH_8U, frame.nChannels )
if frame.origin == IPL_ORIGIN_TL:
cvCopy( frame, frame_copy )
else:
cvFlip( frame, frame_copy, 0 )
detect_and_draw( frame_copy )
if( cvWaitKey( 10 ) >= 0 ):
break
else:
image = cvLoadImage( input_name, 1 )
if image:
detect_and_draw( image )
cvWaitKey(0)
cvDestroyWindow("result")

View File

@@ -1,160 +0,0 @@
#!/usr/bin/python
import sys
import random
from opencv.cv import *
from opencv.highgui import *
color_img0=None;
mask=None;
color_img=None;
gray_img0 = None;
gray_img = None;
ffill_case = 1;
lo_diff = 20
up_diff = 20;
connectivity = 4;
is_color = 1;
is_mask = 0;
new_mask_val = 255;
def update_lo( pos ):
lo_diff = pos
def update_up( pos ):
up_diff = pos
def on_mouse( event, x, y, flags, param ):
if( not color_img ):
return;
if event==CV_EVENT_LBUTTONDOWN:
comp = CvConnectedComp()
my_mask = None
seed = cvPoint(x,y);
if ffill_case==0:
lo = up = 0
flags = connectivity + (new_mask_val << 8)
else:
lo = lo_diff;
up = up_diff;
flags = connectivity + (new_mask_val << 8) + CV_FLOODFILL_FIXED_RANGE
b = random.randint(0,255)
g = random.randint(0,255)
r = random.randint(0,255)
if( is_mask ):
my_mask = mask
cvThreshold( mask, mask, 1, 128, CV_THRESH_BINARY );
if( is_color ):
color = CV_RGB( r, g, b );
cvFloodFill( color_img, seed, color, CV_RGB( lo, lo, lo ),
CV_RGB( up, up, up ), comp, flags, my_mask );
cvShowImage( "image", color_img );
else:
brightness = cvRealScalar((r*2 + g*7 + b + 5)/10);
cvFloodFill( gray_img, seed, brightness, cvRealScalar(lo),
cvRealScalar(up), comp, flags, my_mask );
cvShowImage( "image", gray_img );
print "%g pixels were repainted" % comp.area;
if( is_mask ):
cvShowImage( "mask", mask );
if __name__ == "__main__":
filename = "../c/fruits.jpg"
if len(sys.argv)>1:
filename=argv[1]
color_img0 = cvLoadImage(filename,1)
if not color_img0:
print "Could not open %s" % filename
sys.exit(-1)
print "Hot keys:"
print "\tESC - quit the program"
print "\tc - switch color/grayscale mode"
print "\tm - switch mask mode"
print "\tr - restore the original image"
print "\ts - use null-range floodfill"
print "\tf - use gradient floodfill with fixed(absolute) range"
print "\tg - use gradient floodfill with floating(relative) range"
print "\t4 - use 4-connectivity mode"
print "\t8 - use 8-connectivity mode"
color_img = cvCloneImage( color_img0 );
gray_img0 = cvCreateImage( cvSize(color_img.width, color_img.height), 8, 1 );
cvCvtColor( color_img, gray_img0, CV_BGR2GRAY );
gray_img = cvCloneImage( gray_img0 );
mask = cvCreateImage( cvSize(color_img.width + 2, color_img.height + 2), 8, 1 );
cvNamedWindow( "image", 1 );
cvCreateTrackbar( "lo_diff", "image", lo_diff, 255, update_lo);
cvCreateTrackbar( "up_diff", "image", up_diff, 255, update_up);
cvSetMouseCallback( "image", on_mouse );
while True:
if( is_color ):
cvShowImage( "image", color_img );
else:
cvShowImage( "image", gray_img );
c = cvWaitKey(0);
if c=='\x1b':
print("Exiting ...");
sys.exit(0)
elif c=='c':
if( is_color ):
print("Grayscale mode is set");
cvCvtColor( color_img, gray_img, CV_BGR2GRAY );
is_color = 0;
else:
print("Color mode is set");
cvCopy( color_img0, color_img, None );
cvZero( mask );
is_color = 1;
elif c=='m':
if( is_mask ):
cvDestroyWindow( "mask" );
is_mask = 0;
else:
cvNamedWindow( "mask", 0 );
cvZero( mask );
cvShowImage( "mask", mask );
is_mask = 1;
elif c=='r':
print("Original image is restored");
cvCopy( color_img0, color_img, None );
cvCopy( gray_img0, gray_img, None );
cvZero( mask );
elif c=='s':
print("Simple floodfill mode is set");
ffill_case = 0;
elif c=='f':
print("Fixed Range floodfill mode is set");
ffill_case = 1;
elif c=='g':
print("Gradient (floating range) floodfill mode is set");
ffill_case = 2;
elif c=='4':
print("4-connectivity mode is set");
connectivity = 4;
elif c=='8':
print("8-connectivity mode is set");
connectivity = 8;

View File

@@ -1,125 +0,0 @@
#!/usr/bin/python
"""
This program is demonstration for ellipse fitting. Program finds
contours and approximate it by ellipses.
Trackbar specify threshold parametr.
White lines is contours. Red lines is fitting ellipses.
Original C implementation by: Denis Burenkov.
Python implementation by: Roman Stanchak
"""
import sys
from opencv import cv
from opencv import highgui
image02 = None
image03 = None
image04 = None
def process_image( slider_pos ):
"""
Define trackbar callback functon. This function find contours,
draw it and approximate it by ellipses.
"""
stor = cv.cvCreateMemStorage(0);
# Threshold the source image. This needful for cv.cvFindContours().
cv.cvThreshold( image03, image02, slider_pos, 255, cv.CV_THRESH_BINARY );
# Find all contours.
nb_contours, cont = cv.cvFindContours (image02,
stor,
cv.sizeof_CvContour,
cv.CV_RETR_LIST,
cv.CV_CHAIN_APPROX_NONE,
cv.cvPoint (0,0))
# Clear images. IPL use.
cv.cvZero(image02);
cv.cvZero(image04);
# This cycle draw all contours and approximate it by ellipses.
for c in cont.hrange():
count = c.total; # This is number point in contour
# Number point must be more than or equal to 6 (for cv.cvFitEllipse_32f).
if( count < 6 ):
continue;
# Alloc memory for contour point set.
PointArray = cv.cvCreateMat(1, count, cv.CV_32SC2)
PointArray2D32f= cv.cvCreateMat( 1, count, cv.CV_32FC2)
# Get contour point set.
cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX));
# Convert CvPoint set to CvBox2D32f set.
cv.cvConvert( PointArray, PointArray2D32f )
box = cv.CvBox2D()
# Fits ellipse to current contour.
box = cv.cvFitEllipse2(PointArray2D32f);
# Draw current contour.
cv.cvDrawContours(image04, c, cv.CV_RGB(255,255,255), cv.CV_RGB(255,255,255),0,1,8,cv.cvPoint(0,0));
# Convert ellipse data from float to integer representation.
center = cv.CvPoint()
size = cv.CvSize()
center.x = cv.cvRound(box.center.x);
center.y = cv.cvRound(box.center.y);
size.width = cv.cvRound(box.size.width*0.5);
size.height = cv.cvRound(box.size.height*0.5);
box.angle = -box.angle;
# Draw ellipse.
cv.cvEllipse(image04, center, size,
box.angle, 0, 360,
cv.CV_RGB(0,0,255), 1, cv.CV_AA, 0);
# Show image. HighGUI use.
highgui.cvShowImage( "Result", image04 );
if __name__ == '__main__':
argc = len(sys.argv)
filename = "../c/stuff.jpg"
if(argc == 2):
filename = sys.argv[1]
slider_pos = 70
# load image and force it to be grayscale
image03 = highgui.cvLoadImage(filename, 0)
if not image03:
print "Could not load image " + filename
sys.exit(-1)
# Create the destination images
image02 = cv.cvCloneImage( image03 );
image04 = cv.cvCloneImage( image03 );
# Create windows.
highgui.cvNamedWindow("Source", 1);
highgui.cvNamedWindow("Result", 1);
# Show the image.
highgui.cvShowImage("Source", image03);
# Create toolbars. HighGUI use.
highgui.cvCreateTrackbar( "Threshold", "Result", slider_pos, 255, process_image );
process_image( 1 );
#Wait for a key stroke; the same function arranges events processing
print "Press any key to exit"
highgui.cvWaitKey(0);
highgui.cvDestroyWindow("Source");
highgui.cvDestroyWindow("Result");

View File

@@ -1,59 +0,0 @@
#!/usr/bin/python
# This is a standalone program. Pass an image name as a first parameter of the program.
import sys
from math import sin,cos,sqrt
from opencv.cv import *
from opencv.highgui import *
# toggle between CV_HOUGH_STANDARD and CV_HOUGH_PROBILISTIC
USE_STANDARD=0
if __name__ == "__main__":
filename = "../../docs/ref/pics/building.jpg"
if len(sys.argv)>1:
filename = sys.argv[1]
src=cvLoadImage(filename, 0);
if not src:
print "Error opening image %s" % filename
sys.exit(-1)
dst = cvCreateImage( cvGetSize(src), 8, 1 );
color_dst = cvCreateImage( cvGetSize(src), 8, 3 );
storage = cvCreateMemStorage(0);
lines = 0;
cvCanny( src, dst, 50, 200, 3 );
cvCvtColor( dst, color_dst, CV_GRAY2BGR );
if USE_STANDARD:
lines = cvHoughLines2( dst, storage, CV_HOUGH_STANDARD, 1, CV_PI/180, 100, 0, 0 );
for i in range(min(lines.total, 100)):
line = lines[i]
rho = line[0];
theta = line[1];
pt1 = CvPoint();
pt2 = CvPoint();
a = cos(theta);
b = sin(theta);
x0 = a*rho
y0 = b*rho
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
cvLine( color_dst, pt1, pt2, CV_RGB(255,0,0), 3, 8 );
else:
lines = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 50, 50, 10 );
for line in lines:
cvLine( color_dst, line[0], line[1], CV_RGB(255,0,0), 3, 8 );
cvNamedWindow( "Source", 1 );
cvShowImage( "Source", src );
cvNamedWindow( "Hough", 1 );
cvShowImage( "Hough", color_dst );
cvWaitKey(0);

View File

@@ -1,73 +0,0 @@
#!/usr/bin/python
from opencv.cv import *
from opencv.highgui import *
import sys
inpaint_mask = None
img0 = None
img = None
inpainted = None
prev_pt = cvPoint(-1,-1)
def on_mouse( event, x, y, flags, param ):
global prev_pt
if not img:
return
if event == CV_EVENT_LBUTTONUP or not (flags & CV_EVENT_FLAG_LBUTTON):
prev_pt = cvPoint(-1,-1)
elif event == CV_EVENT_LBUTTONDOWN:
prev_pt = cvPoint(x,y)
elif event == CV_EVENT_MOUSEMOVE and (flags & CV_EVENT_FLAG_LBUTTON) :
pt = cvPoint(x,y)
if prev_pt.x < 0:
prev_pt = pt
cvLine( inpaint_mask, prev_pt, pt, cvScalarAll(255), 5, 8, 0 )
cvLine( img, prev_pt, pt, cvScalarAll(255), 5, 8, 0 )
prev_pt = pt
cvShowImage( "image", img )
if __name__=="__main__":
filename = "../c/fruits.jpg"
if len(sys.argv) >= 2:
filename = sys.argv[1]
img0 = cvLoadImage(filename,-1)
if not img0:
print "Can't open image '%s'" % filename
sys.exit(1)
print "Hot keys:"
print "\tESC - quit the program"
print "\tr - restore the original image"
print "\ti or ENTER - run inpainting algorithm"
print "\t\t(before running it, paint something on the image)"
cvNamedWindow( "image", 1 )
img = cvCloneImage( img0 )
inpainted = cvCloneImage( img0 )
inpaint_mask = cvCreateImage( cvGetSize(img), 8, 1 )
cvZero( inpaint_mask )
cvZero( inpainted )
cvShowImage( "image", img )
cvShowImage( "watershed transform", inpainted )
cvSetMouseCallback( "image", on_mouse, None )
while True:
c = cvWaitKey(0)
if c == '\x1b' or c == 'q':
break
if c == 'r':
cvZero( inpaint_mask )
cvCopy( img0, img )
cvShowImage( "image", img )
if c == 'i' or c == '\012':
cvNamedWindow( "inpainted image", 1 )
cvInpaint( img, inpaint_mask, inpainted, 3, CV_INPAINT_TELEA )
cvShowImage( "inpainted image", inpainted )

View File

@@ -1,92 +0,0 @@
#!/usr/bin/python
"""
Tracking of rotating point.
Rotation speed is constant.
Both state and measurements vectors are 1D (a point angle),
Measurement is the real point angle + gaussian noise.
The real and the estimated points are connected with yellow line segment,
the real and the measured points are connected with red line segment.
(if Kalman filter works correctly,
the yellow segment should be shorter than the red one).
Pressing any key (except ESC) will reset the tracking with a different speed.
Pressing ESC will stop the program.
"""
from opencv.cv import *
from opencv.highgui import *
from math import cos, sin, sqrt
if __name__ == "__main__":
A = [ [1, 1], [0, 1] ]
img = cvCreateImage( cvSize(500,500), 8, 3 )
kalman = cvCreateKalman( 2, 1, 0 )
state = cvCreateMat( 2, 1, CV_32FC1 ) # (phi, delta_phi)
process_noise = cvCreateMat( 2, 1, CV_32FC1 )
measurement = cvCreateMat( 1, 1, CV_32FC1 )
rng = cvRNG(-1)
code = -1L
cvZero( measurement )
cvNamedWindow( "Kalman", 1 )
while True:
cvRandArr( rng, state, CV_RAND_NORMAL, cvRealScalar(0), cvRealScalar(0.1) )
kalman.transition_matrix[:] = A
cvSetIdentity( kalman.measurement_matrix, cvRealScalar(1) )
cvSetIdentity( kalman.process_noise_cov, cvRealScalar(1e-5) )
cvSetIdentity( kalman.measurement_noise_cov, cvRealScalar(1e-1) )
cvSetIdentity( kalman.error_cov_post, cvRealScalar(1))
cvRandArr( rng, kalman.state_post, CV_RAND_NORMAL, cvRealScalar(0), cvRealScalar(0.1) )
while True:
def calc_point(angle):
return cvPoint( cvRound(img.width/2 + img.width/3*cos(angle)),
cvRound(img.height/2 - img.width/3*sin(angle)))
state_angle = state[0]
state_pt = calc_point(state_angle)
prediction = cvKalmanPredict( kalman )
predict_angle = prediction[0,0]
predict_pt = calc_point(predict_angle)
cvRandArr( rng, measurement, CV_RAND_NORMAL, cvRealScalar(0),
cvRealScalar(sqrt(kalman.measurement_noise_cov[0,0])) )
# generate measurement
cvMatMulAdd( kalman.measurement_matrix, state, measurement, measurement )
measurement_angle = measurement[0,0]
measurement_pt = calc_point(measurement_angle)
# plot points
def draw_cross( center, color, d ):
cvLine( img, cvPoint( center.x - d, center.y - d ),
cvPoint( center.x + d, center.y + d ), color, 1, CV_AA, 0)
cvLine( img, cvPoint( center.x + d, center.y - d ),
cvPoint( center.x - d, center.y + d ), color, 1, CV_AA, 0 )
cvZero( img )
draw_cross( state_pt, CV_RGB(255,255,255), 3 )
draw_cross( measurement_pt, CV_RGB(255,0,0), 3 )
draw_cross( predict_pt, CV_RGB(0,255,0), 3 )
cvLine( img, state_pt, measurement_pt, CV_RGB(255,0,0), 3, CV_AA, 0 )
cvLine( img, state_pt, predict_pt, CV_RGB(255,255,0), 3, CV_AA, 0 )
cvKalmanCorrect( kalman, measurement )
cvRandArr( rng, process_noise, CV_RAND_NORMAL, cvRealScalar(0),
cvRealScalar(sqrt(kalman.process_noise_cov[0,0])))
cvMatMulAdd( kalman.transition_matrix, state, process_noise, state )
cvShowImage( "Kalman", img )
code = str(cvWaitKey( 100 ))
if( code != '-1'):
break
if( code == '\x1b' or code == 'q' or code == 'Q' ):
break
cvDestroyWindow("Kalman")

View File

@@ -1,66 +0,0 @@
#!/usr/bin/python
from opencv.cv import *
from opencv.highgui import *
from random import randint
MAX_CLUSTERS = 5
if __name__ == "__main__":
color_tab = [
CV_RGB(255,0,0),
CV_RGB(0,255,0),
CV_RGB(100,100,255),
CV_RGB(255,0,255),
CV_RGB(255,255,0)]
img = cvCreateImage( cvSize( 500, 500 ), 8, 3 )
rng = cvRNG(-1)
cvNamedWindow( "clusters", 1 )
while True:
cluster_count = randint(2, MAX_CLUSTERS)
sample_count = randint(1, 1000)
points = cvCreateMat( sample_count, 1, CV_32FC2 )
clusters = cvCreateMat( sample_count, 1, CV_32SC1 )
# generate random sample from multigaussian distribution
for k in range(cluster_count):
center = CvPoint()
center.x = cvRandInt(rng)%img.width
center.y = cvRandInt(rng)%img.height
first = k*sample_count/cluster_count
last = sample_count
if k != cluster_count:
last = (k+1)*sample_count/cluster_count
point_chunk = cvGetRows(points, first, last)
cvRandArr( rng, point_chunk, CV_RAND_NORMAL,
cvScalar(center.x,center.y,0,0),
cvScalar(img.width*0.1,img.height*0.1,0,0))
# shuffle samples
cvRandShuffle( points, rng )
cvKMeans2( points, cluster_count, clusters,
cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0 ))
cvZero( img )
for i in range(sample_count):
cluster_idx = clusters[i]
# a multi channel matrix access returns a scalar of
#dimension 4,0, which is not considerate a cvPoint
#we have to create a tuple with the first two elements
pt = (cvRound(points[i][0]), cvRound(points[i][1]))
cvCircle( img, pt, 2, color_tab[cluster_idx], CV_FILLED, CV_AA, 0 )
cvShowImage( "clusters", img )
key = cvWaitKey(0)
if( key == 27 or key == 'q' or key == 'Q' ): # 'ESC'
break
cvDestroyWindow( "clusters" )

View File

@@ -1,49 +0,0 @@
#!/usr/bin/python
from opencv.cv import *
from opencv.highgui import *
import sys
if __name__ == "__main__":
laplace = None
colorlaplace = None
planes = [ None, None, None ]
capture = None
if len(sys.argv)==1:
capture = cvCreateCameraCapture( 0 )
elif len(sys.argv)==2 and sys.argv[1].isdigit():
capture = cvCreateCameraCapture( int(sys.argv[1]) )
elif len(sys.argv)==2:
capture = cvCreateFileCapture( sys.argv[1] )
if not capture:
print "Could not initialize capturing..."
sys.exit(-1)
cvNamedWindow( "Laplacian", 1 )
while True:
frame = cvQueryFrame( capture )
if not frame:
cvWaitKey(0)
break
if not laplace:
for i in range( len(planes) ):
planes[i] = cvCreateImage( cvSize(frame.width,frame.height), 8, 1 )
laplace = cvCreateImage( cvSize(frame.width,frame.height), IPL_DEPTH_16S, 1 )
colorlaplace = cvCreateImage( cvSize(frame.width,frame.height), 8, 3 )
cvSplit( frame, planes[0], planes[1], planes[2], None )
for plane in planes:
cvLaplace( plane, laplace, 3 )
cvConvertScaleAbs( laplace, plane, 1, 0 )
cvMerge( planes[0], planes[1], planes[2], None, colorlaplace )
cvShowImage("Laplacian", colorlaplace )
if cvWaitKey(10) != -1:
break
cvDestroyWindow("Laplacian")

View File

@@ -1,227 +0,0 @@
#! /usr/bin/env python
print "OpenCV Python version of lkdemo"
import sys
# import the necessary things for OpenCV
from opencv import cv
from opencv import highgui
#############################################################################
# some "constants"
win_size = 10
MAX_COUNT = 500
#############################################################################
# some "global" variables
image = None
pt = None
add_remove_pt = False
flags = 0
night_mode = False
need_to_init = False
# the default parameters
quality = 0.01
min_distance = 10
#############################################################################
# the mouse callback
# the callback on the trackbar
def on_mouse (event, x, y, flags, param):
# we will use the global pt and add_remove_pt
global pt
global add_remove_pt
if image is None:
# not initialized, so skip
return
if event == highgui.CV_EVENT_LBUTTONDOWN:
# user has click, so memorize it
pt = cv.cvPoint (x, y)
add_remove_pt = True
#############################################################################
# so, here is the main part of the program
if __name__ == '__main__':
try:
# try to get the device number from the command line
device = int (sys.argv [1])
# got it ! so remove it from the arguments
del sys.argv [1]
except (IndexError, ValueError):
# no device number on the command line, assume we want the 1st device
device = 0
if len (sys.argv) == 1:
# no argument on the command line, try to use the camera
capture = highgui.cvCreateCameraCapture (device)
else:
# we have an argument on the command line,
# we can assume this is a file name, so open it
capture = highgui.cvCreateFileCapture (sys.argv [1])
# check that capture device is OK
if not capture:
print "Error opening capture device"
sys.exit (1)
# display a small howto use it
print "Hot keys: \n" \
"\tESC - quit the program\n" \
"\tr - auto-initialize tracking\n" \
"\tc - delete all the points\n" \
"\tn - switch the \"night\" mode on/off\n" \
"To add/remove a feature point click it\n"
# first, create the necessary windows
highgui.cvNamedWindow ('LkDemo', highgui.CV_WINDOW_AUTOSIZE)
# register the mouse callback
highgui.cvSetMouseCallback ('LkDemo', on_mouse, None)
while 1:
# do forever
# 1. capture the current image
frame = highgui.cvQueryFrame (capture)
if frame is None:
# no image captured... end the processing
break
if image is None:
# create the images we need
image = cv.cvCreateImage (cv.cvGetSize (frame), 8, 3)
grey = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
prev_grey = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
pyramid = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
prev_pyramid = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
eig = cv.cvCreateImage (cv.cvGetSize (frame), cv.IPL_DEPTH_32F, 1)
temp = cv.cvCreateImage (cv.cvGetSize (frame), cv.IPL_DEPTH_32F, 1)
points = [[], []]
# copy the frame, so we can draw on it
cv.cvCopy (frame, image)
# create a grey version of the image
cv.cvCvtColor (image, grey, cv.CV_BGR2GRAY)
if night_mode:
# night mode: only display the points
cv.cvSetZero (image)
if need_to_init:
# we want to search all the good points
# create the wanted images
# search the good points
points [1] = cv.cvGoodFeaturesToTrack (
grey, eig, temp,
MAX_COUNT,
quality, min_distance, None, 3, 0, 0.04)
# refine the corner locations
cv.cvFindCornerSubPix (
grey,
points [1],
cv.cvSize (win_size, win_size), cv.cvSize (-1, -1),
cv.cvTermCriteria (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,
20, 0.03))
elif len (points [0]) > 0:
# we have points, so display them
# calculate the optical flow
[points [1], status], something = cv.cvCalcOpticalFlowPyrLK (
prev_grey, grey, prev_pyramid, pyramid,
points [0], len (points [0]),
(win_size, win_size), 3,
len (points [0]),
None,
cv.cvTermCriteria (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,
20, 0.03),
flags)
# initializations
point_counter = -1
new_points = []
for the_point in points [1]:
# go trough all the points
# increment the counter
point_counter += 1
if add_remove_pt:
# we have a point to add, so see if it is close to
# another one. If yes, don't use it
dx = pt.x - the_point.x
dy = pt.y - the_point.y
if dx * dx + dy * dy <= 25:
# too close
add_remove_pt = 0
continue
if not status [point_counter]:
# we will disable this point
continue
# this point is a correct point
new_points.append (the_point)
# draw the current point
cv.cvCircle (image,
cv.cvPointFrom32f(the_point),
3, cv.cvScalar (0, 255, 0, 0),
-1, 8, 0)
# set back the points we keep
points [1] = new_points
if add_remove_pt:
# we want to add a point
points [1].append (cv.cvPointTo32f (pt))
# refine the corner locations
points [1][-1] = cv.cvFindCornerSubPix (
grey,
[points [1][-1]],
cv.cvSize (win_size, win_size), cv.cvSize (-1, -1),
cv.cvTermCriteria (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,
20, 0.03))[0]
# we are no more in "add_remove_pt" mode
add_remove_pt = False
# swapping
prev_grey, grey = grey, prev_grey
prev_pyramid, pyramid = pyramid, prev_pyramid
points [0], points [1] = points [1], points [0]
need_to_init = False
# we can now display the image
highgui.cvShowImage ('LkDemo', image)
# handle events
c = highgui.cvWaitKey (10)
if c == '\x1b':
# user has press the ESC key, so exit
break
# processing depending on the character
if c in ['r', 'R']:
need_to_init = True
elif c in ['c', 'C']:
points = [[], []]
elif c in ['n', 'N']:
night_mode = not night_mode

View File

@@ -1,44 +0,0 @@
#!/usr/bin/python
import sys
from opencv.cv import *
from opencv.highgui import *
src=None
dst=None
src2=None
def on_mouse( event, x, y, flags, param ):
if( not src ):
return;
if event==CV_EVENT_LBUTTONDOWN:
cvLogPolar( src, dst, cvPoint2D32f(x,y), 40, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
cvLogPolar( dst, src2, cvPoint2D32f(x,y), 40, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS+CV_WARP_INVERSE_MAP );
cvShowImage( "log-polar", dst );
cvShowImage( "inverse log-polar", src2 );
if __name__ == "__main__":
filename = "../c/fruits.jpg"
if len(sys.argv)>1:
filename=argv[1]
src = cvLoadImage(filename,1)
if not src:
print "Could not open %s" % filename
sys.exit(-1)
cvNamedWindow( "original",1 );
cvNamedWindow( "log-polar", 1 );
cvNamedWindow( "inverse log-polar", 1 );
dst = cvCreateImage( cvSize(256,256), 8, 3 );
src2 = cvCreateImage( cvGetSize(src), 8, 3 );
cvSetMouseCallback( "original", on_mouse );
on_mouse( CV_EVENT_LBUTTONDOWN, src.width/2, src.height/2, None, None)
cvShowImage( "original", src );
cvWaitKey();

View File

@@ -1,71 +0,0 @@
#!/usr/bin/python
from opencv.cv import *
from opencv.highgui import *
from random import randint
def minarea_array(img, count):
pointMat = cvCreateMat( count, 1, CV_32SC2 )
for i in range(count):
pointMat[i] = cvPoint( randint(img.width/4, img.width*3/4),
randint(img.height/4, img.height*3/4) )
box = cvMinAreaRect2( pointMat )
box_vtx = cvBoxPoints( box )
success, center, radius = cvMinEnclosingCircle( pointMat )
cvZero( img )
for i in range(count):
cvCircle( img, cvGet1D(pointMat,i), 2, CV_RGB( 255, 0, 0 ), CV_FILLED, CV_AA, 0 )
box_vtx = [cvPointFrom32f(box_vtx[0]),
cvPointFrom32f(box_vtx[1]),
cvPointFrom32f(box_vtx[2]),
cvPointFrom32f(box_vtx[3])]
cvCircle( img, cvPointFrom32f(center), cvRound(radius), CV_RGB(255, 255, 0), 1, CV_AA, 0 )
cvPolyLine( img, [box_vtx], 1, CV_RGB(0,255,255), 1, CV_AA )
def minarea_seq(img, count, storage):
ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC | CV_32SC2, sizeof_CvContour, sizeof_CvPoint, storage )
ptseq = CvSeq_CvPoint.cast( ptseq )
for i in range(count):
pt0 = cvPoint( randint(img.width/4, img.width*3/4),
randint(img.height/4, img.height*3/4) )
cvSeqPush( ptseq, pt0 )
box = cvMinAreaRect2( ptseq )
box_vtx = cvBoxPoints( box )
success, center, radius = cvMinEnclosingCircle( ptseq )
cvZero( img )
for pt in ptseq:
cvCircle( img, pt, 2, CV_RGB( 255, 0, 0 ), CV_FILLED, CV_AA, 0 )
box_vtx = [cvPointFrom32f(box_vtx[0]),
cvPointFrom32f(box_vtx[1]),
cvPointFrom32f(box_vtx[2]),
cvPointFrom32f(box_vtx[3])]
cvCircle( img, cvPointFrom32f(center), cvRound(radius), CV_RGB(255, 255, 0), 1, CV_AA, 0 )
cvPolyLine( img, [box_vtx], 1, CV_RGB(0,255,255), 1, CV_AA )
cvClearMemStorage( storage )
if __name__ == "__main__":
img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
storage = cvCreateMemStorage(0);
cvNamedWindow( "rect & circle", 1 );
use_seq=True
while True:
count = randint(1,100)
if use_seq:
minarea_seq(img, count, storage)
else:
minarea_array(img, count)
cvShowImage("rect & circle", img)
key = cvWaitKey()
if( key == '\x1b' ):
break;
use_seq = not use_seq

View File

@@ -1,14 +0,0 @@
#! /usr/bin/env python
import opencv
from opencv import highgui
cap = highgui.cvCreateFileCapture("../c/tree.avi")
img = highgui.cvQueryFrame(cap)
print "Got frame of dimensions (", img.width, " x ", img.height, " )"
highgui.cvNamedWindow("win", highgui.CV_WINDOW_AUTOSIZE)
highgui.cvShowImage("win", img)
highgui.cvMoveWindow("win", 200, 200)
highgui.cvWaitKey(0)

View File

@@ -1,50 +0,0 @@
#!/usr/bin/python
import sys
from opencv.cv import *
from opencv.highgui import *
src = 0;
image = 0;
dest = 0;
element = 0;
element_shape = CV_SHAPE_RECT;
global_pos = 0;
def Opening(pos):
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, None );
cvErode(src,image,element,1);
cvDilate(image,dest,element,1);
cvShowImage("Opening&Closing window",dest);
def Closing(pos):
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, None );
cvDilate(src,image,element,1);
cvErode(image,dest,element,1);
cvShowImage("Opening&Closing window",dest);
def Erosion(pos):
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, None );
cvErode(src,dest,element,1);
cvShowImage("Erosion&Dilation window",dest);
def Dilation(pos):
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, None );
cvDilate(src,dest,element,1);
cvShowImage("Erosion&Dilation window",dest);
if __name__ == "__main__":
filename = "../c/baboon.jpg"
if len(sys.argv)==2:
filename = sys.argv[1]
src = cvLoadImage(filename,1)
if not src:
sys.exit(-1)
image = cvCloneImage(src);
dest = cvCloneImage(src);
cvNamedWindow("Opening&Closing window",1);
cvNamedWindow("Erosion&Dilation window",1);
cvShowImage("Opening&Closing window",src);
cvShowImage("Erosion&Dilation window",src);
cvCreateTrackbar("Open","Opening&Closing window",global_pos,10,Opening);
cvCreateTrackbar("Close","Opening&Closing window",global_pos,10,Closing);
cvCreateTrackbar("Dilate","Erosion&Dilation window",global_pos,10,Dilation);
cvCreateTrackbar("Erode","Erosion&Dilation window",global_pos,10,Erosion);
cvWaitKey(0);
cvDestroyWindow("Opening&Closing window");
cvDestroyWindow("Erosion&Dilation window");

View File

@@ -1,113 +0,0 @@
#!/usr/bin/python
from opencv.cv import *
from opencv.highgui import *
import sys
import time
from math import cos,sin
CLOCKS_PER_SEC = 1.0
MHI_DURATION = 1
MAX_TIME_DELTA = 0.5
MIN_TIME_DELTA = 0.05
N = 4
buf = range(10)
last = 0
mhi = None # MHI
orient = None # orientation
mask = None # valid orientation mask
segmask = None # motion segmentation map
storage = None # temporary storage
def update_mhi( img, dst, diff_threshold ):
global last
global mhi
global storage
global mask
global orient
global segmask
timestamp = time.clock()/CLOCKS_PER_SEC # get current time in seconds
size = cvSize(img.width,img.height) # get current frame size
idx1 = last
if not mhi or mhi.width != size.width or mhi.height != size.height:
for i in range( N ):
buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 )
cvZero( buf[i] )
mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 )
cvZero( mhi ) # clear MHI at the beginning
orient = cvCreateImage( size, IPL_DEPTH_32F, 1 )
segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 )
mask = cvCreateImage( size, IPL_DEPTH_8U, 1 )
cvCvtColor( img, buf[last], CV_BGR2GRAY ) # convert frame to grayscale
idx2 = (last + 1) % N # index of (last - (N-1))th frame
last = idx2
silh = buf[idx2]
cvAbsDiff( buf[idx1], buf[idx2], silh ) # get difference between frames
cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ) # and threshold it
cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ) # update MHI
cvCvtScale( mhi, mask, 255./MHI_DURATION,
(MHI_DURATION - timestamp)*255./MHI_DURATION )
cvZero( dst )
cvMerge( mask, None, None, None, dst )
cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 )
if( not storage ):
storage = cvCreateMemStorage(0)
else:
cvClearMemStorage(storage)
seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA )
for i in range(-1, seq.total):
if( i < 0 ): # case of the whole image
comp_rect = cvRect( 0, 0, size.width, size.height )
color = CV_RGB(255,255,255)
magnitude = 100.
else: # i-th motion component
comp_rect = seq[i].rect
if( comp_rect.width + comp_rect.height < 100 ): # reject very small components
continue
color = CV_RGB(255,0,0)
magnitude = 30.
silh_roi = cvGetSubRect(silh, comp_rect)
mhi_roi = cvGetSubRect( mhi, comp_rect )
orient_roi = cvGetSubRect( orient, comp_rect )
mask_roi = cvGetSubRect( mask, comp_rect )
angle = cvCalcGlobalOrientation( orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
angle = 360.0 - angle # adjust for images with top-left origin
count = cvNorm( silh_roi, None, CV_L1, None ) # calculate number of points within silhouette ROI
if( count < comp_rect.width * comp_rect.height * 0.05 ):
continue
center = cvPoint( (comp_rect.x + comp_rect.width/2),
(comp_rect.y + comp_rect.height/2) )
cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 )
cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 )
if __name__ == "__main__":
motion = 0
capture = 0
if len(sys.argv)==1:
capture = cvCreateCameraCapture( 0 )
elif len(sys.argv)==2 and sys.argv[1].isdigit():
capture = cvCreateCameraCapture( int(sys.argv[1]) )
elif len(sys.argv)==2:
capture = cvCreateFileCapture( sys.argv[1] )
if not capture:
print "Could not initialize capturing..."
sys.exit(-1)
cvNamedWindow( "Motion", 1 )
while True:
image = cvQueryFrame( capture )
if( image ):
if( not motion ):
motion = cvCreateImage( cvSize(image.width,image.height), 8, 3 )
cvZero( motion )
#motion.origin = image.origin
update_mhi( image, motion, 30 )
cvShowImage( "Motion", motion )
if( cvWaitKey(10) != -1 ):
break
else:
break
cvDestroyWindow( "Motion" )

View File

@@ -1,59 +0,0 @@
#!/usr/bin/python
import sys
from opencv.cv import *
from opencv.highgui import *
image = [None, None]
image0 = None
image1 = None
threshold1 = 255
threshold2 = 30
l = level = 4;
block_size = 1000;
filter = CV_GAUSSIAN_5x5;
storage = None
min_comp = CvConnectedComp()
def set_thresh1( val ):
global threshold1
threshold1 = val
ON_SEGMENT()
def set_thresh2( val ):
global threshold2
threshold2 = val
ON_SEGMENT()
def ON_SEGMENT():
global storage
global min_comp
comp = cvPyrSegmentation(image0, image1, storage, level, threshold1+1, threshold2+1);
cvShowImage("Segmentation", image1);
if __name__ == "__main__":
filename = "../c/fruits.jpg";
if len(sys.argv) == 2:
filename = sys.argv[1]
image[0] = cvLoadImage( filename, 1)
if not image[0]:
print "Error opening %s" % filename
sys.exit(-1)
cvNamedWindow("Source", 0);
cvShowImage("Source", image[0]);
cvNamedWindow("Segmentation", 0);
storage = cvCreateMemStorage ( block_size );
image[0].width &= -(1<<level);
image[0].height &= -(1<<level);
image0 = cvCloneImage( image[0] );
image1 = cvCloneImage( image[0] );
# segmentation of the color image
l = 1;
threshold1 =255;
threshold2 =30;
ON_SEGMENT();
sthreshold1 = cvCreateTrackbar("Threshold1", "Segmentation", threshold1, 255, set_thresh1);
sthreshold2 = cvCreateTrackbar("Threshold2", "Segmentation", threshold2, 255, set_thresh2);
cvShowImage("Segmentation", image1);
cvWaitKey(0);
cvDestroyWindow("Segmentation");
cvDestroyWindow("Source");

View File

@@ -1,153 +0,0 @@
#!/usr/bin/python
#
# The full "Square Detector" program.
# It loads several images subsequentally and tries to find squares in
# each image
#
from opencv.cv import *
from opencv.highgui import *
from math import sqrt
thresh = 50;
img = None;
img0 = None;
storage = None;
wndname = "Square Detection Demo";
def angle( pt1, pt2, pt0 ):
dx1 = pt1.x - pt0.x;
dy1 = pt1.y - pt0.y;
dx2 = pt2.x - pt0.x;
dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
def findSquares4( img, storage ):
N = 11;
sz = cvSize( img.width & -2, img.height & -2 );
timg = cvCloneImage( img ); # make a copy of input image
gray = cvCreateImage( sz, 8, 1 );
pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
# create empty sequence that will contain points -
# 4 points per square (the square's vertices)
squares = cvCreateSeq( 0, sizeof_CvSeq, sizeof_CvPoint, storage );
squares = CvSeq_CvPoint.cast( squares )
# select the maximum ROI in the image
# with the width and height divisible by 2
subimage = cvGetSubRect( timg, cvRect( 0, 0, sz.width, sz.height ))
# down-scale and upscale the image to filter out the noise
cvPyrDown( subimage, pyr, 7 );
cvPyrUp( pyr, subimage, 7 );
tgray = cvCreateImage( sz, 8, 1 );
# find squares in every color plane of the image
for c in range(3):
# extract the c-th color plane
channels = [None, None, None]
channels[c] = tgray
cvSplit( subimage, channels[0], channels[1], channels[2], None )
for l in range(N):
# hack: use Canny instead of zero threshold level.
# Canny helps to catch squares with gradient shading
if( l == 0 ):
# apply Canny. Take the upper threshold from slider
# and set the lower to 0 (which forces edges merging)
cvCanny( tgray, gray, 0, thresh, 5 );
# dilate canny output to remove potential
# holes between edge segments
cvDilate( gray, gray, None, 1 );
else:
# apply threshold if l!=0:
# tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
# find contours and store them all as a list
count, contours = cvFindContours( gray, storage, sizeof_CvContour,
CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
if not contours:
continue
# test each contour
for contour in contours.hrange():
# approximate contour with accuracy proportional
# to the contour perimeter
result = cvApproxPoly( contour, sizeof_CvContour, storage,
CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
# square contours should have 4 vertices after approximation
# relatively large area (to filter out noisy contours)
# and be convex.
# Note: absolute value of an area is used because
# area may be positive or negative - in accordance with the
# contour orientation
if( result.total == 4 and
abs(cvContourArea(result)) > 1000 and
cvCheckContourConvexity(result) ):
s = 0;
for i in range(5):
# find minimum angle between joint
# edges (maximum of cosine)
if( i >= 2 ):
t = abs(angle( result[i], result[i-2], result[i-1]))
if s<t:
s=t
# if cosines of all angles are small
# (all angles are ~90 degree) then write quandrange
# vertices to resultant sequence
if( s < 0.3 ):
for i in range(4):
squares.append( result[i] )
return squares;
# the function draws all the squares in the image
def drawSquares( img, squares ):
cpy = cvCloneImage( img );
# read 4 sequence elements at a time (all vertices of a square)
i=0
while i<squares.total:
pt = []
# read 4 vertices
pt.append( squares[i] )
pt.append( squares[i+1] )
pt.append( squares[i+2] )
pt.append( squares[i+3] )
# draw the square as a closed polyline
cvPolyLine( cpy, [pt], 1, CV_RGB(0,255,0), 3, CV_AA, 0 );
i+=4
# show the resultant image
cvShowImage( wndname, cpy );
def on_trackbar( a ):
if( img ):
drawSquares( img, findSquares4( img, storage ) );
names = ["../c/pic1.png", "../c/pic2.png", "../c/pic3.png",
"../c/pic4.png", "../c/pic5.png", "../c/pic6.png" ];
if __name__ == "__main__":
# create memory storage that will contain all the dynamic data
storage = cvCreateMemStorage(0);
for name in names:
img0 = cvLoadImage( name, 1 );
if not img0:
print "Couldn't load %s" % name
continue;
img = cvCloneImage( img0 );
# create window and a trackbar (slider) with parent "image" and set callback
# (the slider regulates upper threshold, passed to Canny edge detector)
cvNamedWindow( wndname, 1 );
cvCreateTrackbar( "canny thresh", wndname, thresh, 1000, on_trackbar );
# force the image processing
on_trackbar(0);
# wait for key.
# Also the function cvWaitKey takes care of event processing
c = cvWaitKey(0);
# clear memory storage - reset free space position
cvClearMemStorage( storage );
if( c == '\x1b' ):
break;
cvDestroyWindow( wndname );

View File

@@ -1,111 +0,0 @@
#!/usr/bin/python
from opencv.cv import *
from opencv.highgui import *
import sys
marker_mask = None;
markers = None;
img0 = None
img = None
img_gray = None
wshed = None
prev_pt = cvPoint(-1,-1)
def on_mouse( event, x, y, flags, param ):
global prev_pt
if( not img ):
return;
if( event == CV_EVENT_LBUTTONUP or not (flags & CV_EVENT_FLAG_LBUTTON) ):
prev_pt = cvPoint(-1,-1);
elif( event == CV_EVENT_LBUTTONDOWN ):
prev_pt = cvPoint(x,y);
elif( event == CV_EVENT_MOUSEMOVE and (flags & CV_EVENT_FLAG_LBUTTON) ):
pt = cvPoint(x,y);
if( prev_pt.x < 0 ):
prev_pt = pt;
cvLine( marker_mask, prev_pt, pt, cvScalarAll(255), 5, 8, 0 );
cvLine( img, prev_pt, pt, cvScalarAll(255), 5, 8, 0 );
prev_pt = pt;
cvShowImage( "image", img );
if __name__ == "__main__":
filename = "../c/fruits.jpg"
if len(sys.argv)>1:
filename = sys.argv[1]
rng = cvRNG(-1);
img0 = cvLoadImage(filename,1)
if not img0:
print "Error opening image '%s'" % filename
sys.exit(-1)
print "Hot keys:"
print "\tESC - quit the program"
print "\tr - restore the original image"
print "\tw - run watershed algorithm"
print "\t (before that, roughly outline several markers on the image)"
cvNamedWindow( "image", 1 );
cvNamedWindow( "watershed transform", 1 );
img = cvCloneImage( img0 );
img_gray = cvCloneImage( img0 );
wshed = cvCloneImage( img0 );
marker_mask = cvCreateImage( cvGetSize(img), 8, 1 );
markers = cvCreateImage( cvGetSize(img), IPL_DEPTH_32S, 1 );
cvCvtColor( img, marker_mask, CV_BGR2GRAY );
cvCvtColor( marker_mask, img_gray, CV_GRAY2BGR );
cvZero( marker_mask );
cvZero( wshed );
cvShowImage( "image", img );
cvShowImage( "watershed transform", wshed );
cvSetMouseCallback( "image", on_mouse, None );
while True:
c = cvWaitKey(0);
if c=='\x1b':
break;
if c == 'r':
cvZero( marker_mask );
cvCopy( img0, img );
cvShowImage( "image", img );
if c == 'w':
storage = cvCreateMemStorage(0);
comp_count = 0;
#cvSaveImage( "wshed_mask.png", marker_mask );
#marker_mask = cvLoadImage( "wshed_mask.png", 0 );
nb_cont, contours = cvFindContours( marker_mask, storage, sizeof_CvContour,
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
cvZero( markers );
while contours:
cvDrawContours( markers, contours, cvScalarAll(comp_count+1),
cvScalarAll(comp_count+1), -1, -1, 8, cvPoint(0,0) );
contours=contours.h_next
comp_count+=1
color_tab = cvCreateMat( comp_count, 1, CV_8UC3 );
for i in range(comp_count):
color_tab[i] = cvScalar( cvRandInt(rng)%180 + 50,
cvRandInt(rng)%180 + 50,
cvRandInt(rng)%180 + 50 );
t = cvGetTickCount();
cvWatershed( img0, markers );
t = cvGetTickCount() - t;
#print "exec time = %f" % t/(cvGetTickFrequency()*1000.)
cvSet( wshed, cvScalarAll(255) );
# paint the watershed image
for j in range(markers.height):
for i in range(markers.width):
idx = markers[j,i]
if idx==-1:
continue
idx = idx-1
wshed[j,i] = color_tab[idx,0]
cvAddWeighted( wshed, 0.5, img_gray, 0.5, 0, wshed );
cvShowImage( "watershed transform", wshed );
cvWaitKey();