Normalize line endings and whitespace

This commit is contained in:
OpenCV Buildbot
2012-10-17 03:18:30 +04:00
committed by Andrey Kamaev
parent 69020da607
commit 04384a71e4
1516 changed files with 258846 additions and 258162 deletions

0
samples/python/camera.py Normal file → Executable file
View File

232
samples/python/camshift.py Normal file → Executable file
View File

@@ -1,116 +1,116 @@
#!/usr/bin/env python
import cv2.cv as cv
def is_rect_nonzero(r):
(_,_,w,h) = r
return (w > 0) and (h > 0)
class CamShiftDemo:
def __init__(self):
self.capture = cv.CaptureFromCAM(0)
cv.NamedWindow( "CamShiftDemo", 1 )
cv.NamedWindow( "Histogram", 1 )
cv.SetMouseCallback( "CamShiftDemo", self.on_mouse)
self.drag_start = None # Set to (x,y) when mouse starts drag
self.track_window = None # Set to rect when the mouse drag finishes
print( "Keys:\n"
" ESC - quit the program\n"
" b - switch to/from backprojection view\n"
"To initialize tracking, drag across the object with the mouse\n" )
def hue_histogram_as_image(self, hist):
""" Returns a nice representation of a hue histogram """
histimg_hsv = cv.CreateImage( (320,200), 8, 3)
mybins = cv.CloneMatND(hist.bins)
cv.Log(mybins, mybins)
(_, hi, _, _) = cv.MinMaxLoc(mybins)
cv.ConvertScale(mybins, mybins, 255. / hi)
w,h = cv.GetSize(histimg_hsv)
hdims = cv.GetDims(mybins)[0]
for x in range(w):
xh = (180 * x) / (w - 1) # hue sweeps from 0-180 across the image
val = int(mybins[int(hdims * x / w)] * h / 255)
cv.Rectangle( histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1)
cv.Rectangle( histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1)
histimg = cv.CreateImage( (320,200), 8, 3)
cv.CvtColor(histimg_hsv, histimg, cv.CV_HSV2BGR)
return histimg
def on_mouse(self, event, x, y, flags, param):
if event == cv.CV_EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if event == cv.CV_EVENT_LBUTTONUP:
self.drag_start = None
self.track_window = self.selection
if self.drag_start:
xmin = min(x, self.drag_start[0])
ymin = min(y, self.drag_start[1])
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.selection = (xmin, ymin, xmax - xmin, ymax - ymin)
def run(self):
hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
backproject_mode = False
while True:
frame = cv.QueryFrame( self.capture )
# Convert to HSV and keep the hue
hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
cv.Split(hsv, self.hue, None, None, None)
# Compute back projection
backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)
# Run the cam-shift
cv.CalcArrBackProject( [self.hue], backproject, hist )
if self.track_window and is_rect_nonzero(self.track_window):
crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
(iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
self.track_window = rect
# If mouse is pressed, highlight the current selected rectangle
# and recompute the histogram
if self.drag_start and is_rect_nonzero(self.selection):
sub = cv.GetSubRect(frame, self.selection)
save = cv.CloneMat(sub)
cv.ConvertScale(frame, frame, 0.5)
cv.Copy(save, sub)
x,y,w,h = self.selection
cv.Rectangle(frame, (x,y), (x+w,y+h), (255,255,255))
sel = cv.GetSubRect(self.hue, self.selection )
cv.CalcArrHist( [sel], hist, 0)
(_, max_val, _, _) = cv.GetMinMaxHistValue( hist)
if max_val != 0:
cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
elif self.track_window and is_rect_nonzero(self.track_window):
cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )
if not backproject_mode:
cv.ShowImage( "CamShiftDemo", frame )
else:
cv.ShowImage( "CamShiftDemo", backproject)
cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist))
c = cv.WaitKey(7) % 0x100
if c == 27:
break
elif c == ord("b"):
backproject_mode = not backproject_mode
if __name__=="__main__":
demo = CamShiftDemo()
demo.run()
cv.DestroyAllWindows()
#!/usr/bin/env python
import cv2.cv as cv
def is_rect_nonzero(r):
(_,_,w,h) = r
return (w > 0) and (h > 0)
class CamShiftDemo:
def __init__(self):
self.capture = cv.CaptureFromCAM(0)
cv.NamedWindow( "CamShiftDemo", 1 )
cv.NamedWindow( "Histogram", 1 )
cv.SetMouseCallback( "CamShiftDemo", self.on_mouse)
self.drag_start = None # Set to (x,y) when mouse starts drag
self.track_window = None # Set to rect when the mouse drag finishes
print( "Keys:\n"
" ESC - quit the program\n"
" b - switch to/from backprojection view\n"
"To initialize tracking, drag across the object with the mouse\n" )
def hue_histogram_as_image(self, hist):
""" Returns a nice representation of a hue histogram """
histimg_hsv = cv.CreateImage( (320,200), 8, 3)
mybins = cv.CloneMatND(hist.bins)
cv.Log(mybins, mybins)
(_, hi, _, _) = cv.MinMaxLoc(mybins)
cv.ConvertScale(mybins, mybins, 255. / hi)
w,h = cv.GetSize(histimg_hsv)
hdims = cv.GetDims(mybins)[0]
for x in range(w):
xh = (180 * x) / (w - 1) # hue sweeps from 0-180 across the image
val = int(mybins[int(hdims * x / w)] * h / 255)
cv.Rectangle( histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1)
cv.Rectangle( histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1)
histimg = cv.CreateImage( (320,200), 8, 3)
cv.CvtColor(histimg_hsv, histimg, cv.CV_HSV2BGR)
return histimg
def on_mouse(self, event, x, y, flags, param):
if event == cv.CV_EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if event == cv.CV_EVENT_LBUTTONUP:
self.drag_start = None
self.track_window = self.selection
if self.drag_start:
xmin = min(x, self.drag_start[0])
ymin = min(y, self.drag_start[1])
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.selection = (xmin, ymin, xmax - xmin, ymax - ymin)
def run(self):
hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
backproject_mode = False
while True:
frame = cv.QueryFrame( self.capture )
# Convert to HSV and keep the hue
hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
cv.Split(hsv, self.hue, None, None, None)
# Compute back projection
backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)
# Run the cam-shift
cv.CalcArrBackProject( [self.hue], backproject, hist )
if self.track_window and is_rect_nonzero(self.track_window):
crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
(iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
self.track_window = rect
# If mouse is pressed, highlight the current selected rectangle
# and recompute the histogram
if self.drag_start and is_rect_nonzero(self.selection):
sub = cv.GetSubRect(frame, self.selection)
save = cv.CloneMat(sub)
cv.ConvertScale(frame, frame, 0.5)
cv.Copy(save, sub)
x,y,w,h = self.selection
cv.Rectangle(frame, (x,y), (x+w,y+h), (255,255,255))
sel = cv.GetSubRect(self.hue, self.selection )
cv.CalcArrHist( [sel], hist, 0)
(_, max_val, _, _) = cv.GetMinMaxHistValue( hist)
if max_val != 0:
cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
elif self.track_window and is_rect_nonzero(self.track_window):
cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )
if not backproject_mode:
cv.ShowImage( "CamShiftDemo", frame )
else:
cv.ShowImage( "CamShiftDemo", backproject)
cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist))
c = cv.WaitKey(7) % 0x100
if c == 27:
break
elif c == ord("b"):
backproject_mode = not backproject_mode
if __name__=="__main__":
demo = CamShiftDemo()
demo.run()
cv.DestroyAllWindows()

View File

@@ -27,15 +27,15 @@ def on_trackbar (position):
# initialisation
_contours = contours
if levels <= 0:
# zero or negative value
# => get to the nearest face to make it look more funny
_contours = contours.h_next().h_next().h_next()
# first, clear the image where we will draw contours
cv.SetZero (contours_image)
# draw contours in red and green
cv.DrawContours (contours_image, _contours,
_red, _green,
@@ -57,7 +57,7 @@ if __name__ == '__main__':
for i in range (6):
dx = (i % 2) * 250 - 30
dy = (i / 2) * 150
cv.Ellipse (image,
(dx + 150, dy + 100),
(100, 70),
@@ -109,7 +109,7 @@ if __name__ == '__main__':
# create the storage area
storage = cv.CreateMemStorage (0)
# find the contours
contours = cv.FindContours(image,
storage,
@@ -118,10 +118,10 @@ if __name__ == '__main__':
(0,0))
# comment this out if you do not want approximation
contours = cv.ApproxPoly (contours,
contours = cv.ApproxPoly (contours,
storage,
cv.CV_POLY_APPROX_DP, 3, 1)
# create the window for the contours
cv.NamedWindow ("contours", 1)

View File

@@ -30,7 +30,7 @@ if __name__ == '__main__':
# initialisations
points = []
for i in range (count):
# generate a random point
points.append ( (

18
samples/python/cv20squares.py Normal file → Executable file
View File

@@ -1,7 +1,7 @@
"""
Find Squares in image by finding countours and filtering
"""
#Results slightly different from C version on same images, but is
#Results slightly different from C version on same images, but is
#otherwise ok
import math
@@ -24,7 +24,7 @@ def is_square(contour):
Squareness checker
Square contours should:
-have 4 vertices after approximation,
-have 4 vertices after approximation,
-have relatively large area (to filter out noisy contours)
-be convex.
-have angles between sides close to 90deg (cos(ang) ~0 )
@@ -45,11 +45,11 @@ def is_square(contour):
t = math.fabs(angle(pt0, pt1, pt2))
if s <= t:s = t
# if cosines of all angles are small (all angles are ~90 degree)
# if cosines of all angles are small (all angles are ~90 degree)
# then its a square
if s < 0.3:return True
return False
return False
def find_squares_from_binary( gray ):
"""
@@ -58,7 +58,7 @@ def find_squares_from_binary( gray ):
"""
squares = []
storage = cv.CreateMemStorage(0)
contours = cv.FindContours(gray, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, (0,0))
contours = cv.FindContours(gray, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, (0,0))
storage = cv.CreateMemStorage(0)
while contours:
#approximate contour with accuracy proportional to the contour perimeter
@@ -79,7 +79,7 @@ def find_squares4(color_img):
the edge segments.
-Threshold the result to binary edge tokens
-Use cv.FindContours: returns a cv.CvSequence of cv.CvContours
-Filter each candidate: use Approx poly, keep only contours with 4 vertices,
-Filter each candidate: use Approx poly, keep only contours with 4 vertices,
enough area, and ~90deg angles.
Return all squares contours in one flat list of arrays, 4 x,y points each.
@@ -137,7 +137,7 @@ def draw_squares( color_img, squares ):
cv.ShowImage(WNDNAME, color_img)
WNDNAME = "Squares Demo"
def main():
"""Open test color images, create display window, start the search"""
@@ -157,11 +157,11 @@ def main():
# force the image processing
draw_squares( img, find_squares4( img ) )
# wait for key.
if cv.WaitKey(-1) % 0x100 == 27:
break
if __name__ == "__main__":
main()
main()
cv.DestroyAllWindows()

0
samples/python/cvutils.py Normal file → Executable file
View File

View File

@@ -17,7 +17,7 @@ def draw_subdiv_edge( img, edge, color ):
dst_pt = cv.Subdiv2DEdgeDst(edge);
if org_pt and dst_pt :
org = org_pt.pt;
dst = dst_pt.pt;
@@ -28,7 +28,7 @@ def draw_subdiv_edge( img, edge, color ):
def draw_subdiv( img, subdiv, delaunay_color, voronoi_color ):
for edge in subdiv.edges:
edge_rot = cv.Subdiv2DRotateEdge( edge, 1 )
@@ -68,7 +68,7 @@ def draw_subdiv_facet( img, edge ):
for i in range(count):
assert t>4
pt = cv.Subdiv2DEdgeOrg( t );
if not pt:
if not pt:
break;
buf.append( ( cv.Round(pt.pt[0]), cv.Round(pt.pt[1]) ) );
t = cv.Subdiv2DGetEdge( t, cv.CV_NEXT_AROUND_LEFT );
@@ -128,7 +128,7 @@ if __name__ == '__main__':
if( cv.WaitKey( 100 ) >= 0 ):
break;
cv.Set( img, bkgnd_color );
paint_voronoi( subdiv, img );

0
samples/python/dmtx.py Normal file → Executable file
View File

View File

@@ -22,7 +22,7 @@ if __name__ == '__main__':
number = 100
delay = 5
line_type = cv.CV_AA # change it to 8 to see non-antialiased graphics
# create the source image
image = cv.CreateImage( (width, height), 8, 3)
@@ -44,7 +44,7 @@ if __name__ == '__main__':
random_color(random),
random.randrange(0, 10),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
@@ -58,7 +58,7 @@ if __name__ == '__main__':
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
@@ -73,7 +73,7 @@ if __name__ == '__main__':
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
@@ -119,7 +119,7 @@ if __name__ == '__main__':
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
@@ -137,7 +137,7 @@ if __name__ == '__main__':
cv.PutText(image, "Testing text rendering!",
pt1, font,
random_color(random))
cv.ShowImage(window_name, image)
cv.WaitKey(delay)

View File

@@ -12,10 +12,10 @@ from optparse import OptionParser
# Parameters for haar detection
# From the API:
# The default parameters (scale_factor=2, min_neighbors=3, flags=0) are tuned
# for accurate yet slow object detection. For a faster operation on real video
# images the settings are:
# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
# The default parameters (scale_factor=2, min_neighbors=3, flags=0) are tuned
# for accurate yet slow object detection. For a faster operation on real video
# images the settings are:
# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = (20, 20)
@@ -28,7 +28,7 @@ def detect_and_draw(img, cascade):
# allocate temporary images
gray = cv.CreateImage((img.width,img.height), 8, 1)
small_img = cv.CreateImage((cv.Round(img.width / image_scale),
cv.Round (img.height / image_scale)), 8, 1)
cv.Round (img.height / image_scale)), 8, 1)
# convert color input image to grayscale
cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
@@ -46,7 +46,7 @@ def detect_and_draw(img, cascade):
print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
if faces:
for ((x, y, w, h), n) in faces:
# the input to cv.HaarDetectObjects was resized, so scale the
# the input to cv.HaarDetectObjects was resized, so scale the
# bounding box of each face and convert it to two CvPoints
pt1 = (int(x * image_scale), int(y * image_scale))
pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
@@ -61,7 +61,7 @@ if __name__ == '__main__':
(options, args) = parser.parse_args()
cascade = cv.Load(options.cascade)
if len(args) != 1:
parser.print_help()
sys.exit(1)
@@ -88,7 +88,7 @@ if __name__ == '__main__':
cv.Copy(frame, frame_copy)
else:
cv.Flip(frame, frame_copy, 0)
detect_and_draw(frame_copy, cascade)
if cv.WaitKey(10) >= 0:

8
samples/python/fback.py Normal file → Executable file
View File

@@ -10,7 +10,7 @@ class FBackDemo:
self.mv_color = (0, 255, 0)
self.cflow = None
self.flow = None
NamedWindow( "Optical Flow", 1 )
print( "Press ESC - quit the program\n" )
@@ -28,7 +28,7 @@ class FBackDemo:
def run(self):
first_frame = True
while True:
frame = QueryFrame( self.capture )
@@ -37,7 +37,7 @@ class FBackDemo:
prev_gray = CreateImage(GetSize(frame), 8, 1)
flow = CreateImage(GetSize(frame), 32, 2)
self.cflow = CreateImage(GetSize(frame), 8, 3)
CvtColor(frame, gray, CV_BGR2GRAY)
if not first_frame:
CalcOpticalFlowFarneback(prev_gray, gray, flow,
@@ -47,7 +47,7 @@ class FBackDemo:
c = WaitKey(7)
if c in [27, ord('q'), ord('Q')]:
break
prev_gray, gray = gray, prev_gray
prev_gray, gray = gray, prev_gray
first_frame = False
if __name__=="__main__":

View File

@@ -1,5 +1,5 @@
#!/usr/bin/python
"""
"""
Tracking of rotating point.
Rotation speed is constant.
Both state and measurements vectors are 1D (a point angle),
@@ -18,7 +18,7 @@ import sys
if __name__ == "__main__":
A = [ [1, 1], [0, 1] ]
img = cv.CreateImage((500, 500), 8, 3)
kalman = cv.CreateKalman(2, 1, 0)
state = cv.CreateMat(2, 1, cv.CV_32FC1) # (phi, delta_phi)
@@ -32,7 +32,7 @@ if __name__ == "__main__":
while True:
cv.RandArr(rng, state, cv.CV_RAND_NORMAL, cv.RealScalar(0), cv.RealScalar(0.1))
kalman.transition_matrix[0,0] = 1
kalman.transition_matrix[0,1] = 1
kalman.transition_matrix[1,0] = 0
@@ -43,34 +43,34 @@ if __name__ == "__main__":
cv.SetIdentity(kalman.measurement_noise_cov, cv.RealScalar(1e-1))
cv.SetIdentity(kalman.error_cov_post, cv.RealScalar(1))
cv.RandArr(rng, kalman.state_post, cv.CV_RAND_NORMAL, cv.RealScalar(0), cv.RealScalar(0.1))
while True:
def calc_point(angle):
return (cv.Round(img.width/2 + img.width/3*cos(angle)),
cv.Round(img.height/2 - img.width/3*sin(angle)))
cv.Round(img.height/2 - img.width/3*sin(angle)))
state_angle = state[0,0]
state_angle = state[0,0]
state_pt = calc_point(state_angle)
prediction = cv.KalmanPredict(kalman)
predict_angle = prediction[0, 0]
predict_angle = prediction[0, 0]
predict_pt = calc_point(predict_angle)
cv.RandArr(rng, measurement, cv.CV_RAND_NORMAL, cv.RealScalar(0),
cv.RealScalar(sqrt(kalman.measurement_noise_cov[0, 0])))
# generate measurement
# generate measurement
cv.MatMulAdd(kalman.measurement_matrix, state, measurement, measurement)
measurement_angle = measurement[0, 0]
measurement_pt = calc_point(measurement_angle)
# plot points
def draw_cross(center, color, d):
cv.Line(img, (center[0] - d, center[1] - d),
(center[0] + d, center[1] + d), color, 1, cv.CV_AA, 0)
cv.Line(img, (center[0] + d, center[1] - d),
# plot points
def draw_cross(center, color, d):
cv.Line(img, (center[0] - d, center[1] - d),
(center[0] + d, center[1] + d), color, 1, cv.CV_AA, 0)
cv.Line(img, (center[0] + d, center[1] - d),
(center[0] - d, center[1] + d), color, 1, cv.CV_AA, 0)
cv.Zero(img)
@@ -79,7 +79,7 @@ if __name__ == "__main__":
draw_cross(predict_pt, cv.CV_RGB(0, 255, 0), 3)
cv.Line(img, state_pt, measurement_pt, cv.CV_RGB(255, 0,0), 3, cv. CV_AA, 0)
cv.Line(img, state_pt, predict_pt, cv.CV_RGB(255, 255, 0), 3, cv. CV_AA, 0)
cv.KalmanCorrect(kalman, measurement)
cv.RandArr(rng, process_noise, cv.CV_RAND_NORMAL, cv.RealScalar(0),
@@ -87,12 +87,12 @@ if __name__ == "__main__":
cv.MatMulAdd(kalman.transition_matrix, state, process_noise, state)
cv.ShowImage("Kalman", img)
code = cv.WaitKey(100) % 0x100
if code != -1:
break
if code in [27, ord('q'), ord('Q')]:
break
cv.DestroyWindow("Kalman")

View File

@@ -16,13 +16,13 @@ if __name__ == "__main__":
rng = cv.RNG(-1)
cv.NamedWindow("clusters", 1)
while True:
cluster_count = randint(2, MAX_CLUSTERS)
sample_count = randint(1, 1000)
points = cv.CreateMat(sample_count, 1, cv.CV_32FC2)
clusters = cv.CreateMat(sample_count, 1, cv.CV_32SC1)
# generate random sample from multigaussian distribution
for k in range(cluster_count):
center = (cv.RandInt(rng)%img.width, cv.RandInt(rng)%img.height)
@@ -32,13 +32,13 @@ if __name__ == "__main__":
last = (k+1)*sample_count/cluster_count
point_chunk = cv.GetRows(points, first, last)
cv.RandArr(rng, point_chunk, cv.CV_RAND_NORMAL,
cv.Scalar(center[0], center[1], 0, 0),
cv.Scalar(img.width*0.1, img.height*0.1, 0, 0))
# shuffle samples
# shuffle samples
cv.RandShuffle(points, rng)
cv.KMeans2(points, cluster_count, clusters,
@@ -56,5 +56,5 @@ if __name__ == "__main__":
key = cv.WaitKey(0) % 0x100
if key in [27, ord('q'), ord('Q')]:
break
cv.DestroyWindow("clusters")

View File

@@ -8,18 +8,18 @@ if __name__ == "__main__":
colorlaplace = None
planes = [ None, None, None ]
capture = None
if len(sys.argv) == 1:
capture = cv.CreateCameraCapture(0)
elif len(sys.argv) == 2 and sys.argv[1].isdigit():
capture = cv.CreateCameraCapture(int(sys.argv[1]))
elif len(sys.argv) == 2:
capture = cv.CreateFileCapture(sys.argv[1])
capture = cv.CreateFileCapture(sys.argv[1])
if not capture:
print "Could not initialize capturing..."
sys.exit(-1)
cv.NamedWindow("Laplacian", 1)
while True:

View File

@@ -32,7 +32,7 @@ def on_mouse (event, x, y, flags, param):
# we will use the global pt and add_remove_pt
global pt
global add_remove_pt
if image is None:
# not initialized, so skip
return
@@ -149,7 +149,7 @@ if __name__ == '__main__':
# draw the points as green circles
for the_point in features:
cv.Circle (image, (int(the_point[0]), int(the_point[1])), 3, (0, 255, 0, 0), -1, 8, 0)
if add_remove_pt:
# we want to add a point
# refine this corner location and append it to 'features'
@@ -167,7 +167,7 @@ if __name__ == '__main__':
prev_grey, grey = grey, prev_grey
prev_pyramid, pyramid = pyramid, prev_pyramid
need_to_init = False
# we can now display the image
cv.ShowImage ('LkDemo', image)

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/python
import urllib2
import cv2.cv as cv
@@ -14,7 +14,7 @@ def draw_common(points):
box = cv.MinAreaRect2(points)
box_vtx = [roundxy(p) for p in cv.BoxPoints(box)]
cv.PolyLine(img, [box_vtx], 1, cv.CV_RGB(0, 255, 255), 1, cv. CV_AA)
cv.PolyLine(img, [box_vtx], 1, cv.CV_RGB(0, 255, 255), 1, cv. CV_AA)
def minarea_array(img, count):
pointMat = cv.CreateMat(count, 1, cv.CV_32SC2)
@@ -43,10 +43,10 @@ if __name__ == "__main__":
storage = cv.CreateMemStorage()
cv.NamedWindow("rect & circle", 1)
use_seq = True
while True:
while True:
count = randint(1, 100)
if use_seq:
minarea_seq(img, count, storage)

View File

@@ -10,7 +10,7 @@ MHI_DURATION = 1
MAX_TIME_DELTA = 0.5
MIN_TIME_DELTA = 0.05
N = 4
buf = range(10)
buf = range(10)
last = 0
mhi = None # MHI
orient = None # orientation
@@ -37,7 +37,7 @@ def update_mhi(img, dst, diff_threshold):
orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1)
cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale
idx2 = (last + 1) % N # index of (last - (N-1))th frame
last = idx2
@@ -87,12 +87,12 @@ if __name__ == "__main__":
elif len(sys.argv)==2 and sys.argv[1].isdigit():
capture = cv.CreateCameraCapture(int(sys.argv[1]))
elif len(sys.argv)==2:
capture = cv.CreateFileCapture(sys.argv[1])
capture = cv.CreateFileCapture(sys.argv[1])
if not capture:
print "Could not initialize capturing..."
sys.exit(-1)
cv.NamedWindow("Motion", 1)
while True:
image = cv.QueryFrame(capture)

0
samples/python/numpy_array.py Normal file → Executable file
View File

0
samples/python/numpy_warhol.py Normal file → Executable file
View File

4
samples/python/peopledetect.py Normal file → Executable file
View File

@@ -28,7 +28,7 @@ for name in imglist:
img = LoadImage(n)
except:
continue
#ClearMemStorage(storage)
found = list(HOGDetectMultiScale(img, storage, win_stride=(8,8),
padding=(32,32), scale=1.05, group_threshold=2))
@@ -46,7 +46,7 @@ for name in imglist:
tl = (rx + int(rw*0.1), ry + int(rh*0.07))
br = (rx + int(rw*0.9), ry + int(rh*0.87))
Rectangle(img, tl, br, (0, 255, 0), 3)
ShowImage("people detection demo", img)
c = WaitKey(0)
if c == ord('q'):

View File

@@ -28,7 +28,7 @@ class PyrSegmentation:
comp = cv.PyrSegmentation(self.image0, self.image1, self.storage, \
self.level, self.thresh1+1, self.thresh2+1)
cv.ShowImage("Segmentation", self.image1)
def run(self):
self.on_segment()
cv.WaitKey(0)

View File

@@ -46,7 +46,7 @@ def findSquares4(img, storage):
# extract the c-th color plane
channels = [None, None, None]
channels[c] = tgray
cv.Split(subimage, channels[0], channels[1], channels[2], None)
cv.Split(subimage, channels[0], channels[1], channels[2], None)
for l in range(N):
# hack: use Canny instead of zero threshold level.
# Canny helps to catch squares with gradient shading
@@ -68,7 +68,7 @@ def findSquares4(img, storage):
if not contours:
continue
# test each contour
for contour in contours.hrange():
# approximate contour with accuracy proportional
@@ -81,8 +81,8 @@ def findSquares4(img, storage):
# Note: absolute value of an area is used because
# area may be positive or negative - in accordance with the
# contour orientation
if(result.total == 4 and
abs(cv.ContourArea(result)) > 1000 and
if(result.total == 4 and
abs(cv.ContourArea(result)) > 1000 and
cv.CheckContourConvexity(result)):
s = 0
for i in range(5):