From 5b00763bb0fcb41bf3c34cfaab1f82415ca38ebb Mon Sep 17 00:00:00 2001 From: Alexander Mordvintsev Date: Sat, 20 Aug 2011 19:54:03 +0000 Subject: [PATCH] work on demo.py shell --- samples/python2/browse.py | 63 ++++++++------ samples/python2/color_histogram.py | 72 ++++++++-------- samples/python2/demo.py | 132 +++++++++++++++++++++++++++++ samples/python2/edge.py | 43 +++++----- samples/python2/turing.py | 78 ++++++++--------- 5 files changed, 264 insertions(+), 124 deletions(-) create mode 100644 samples/python2/demo.py diff --git a/samples/python2/browse.py b/samples/python2/browse.py index 64f23b8d5..9ce45992b 100644 --- a/samples/python2/browse.py +++ b/samples/python2/browse.py @@ -1,38 +1,47 @@ ''' -browse.py shows how to implement a simple hi resolution image navigation +browse.py +========= + +Sample shows how to implement a simple hi resolution image navigation + +Usage +----- +browse.py [image filename] + ''' import numpy as np import cv2 import sys -print 'This sample shows how to implement a simple hi resolution image navigation.' -print 'USAGE: browse.py [image filename]' -print +if __name__ == '__main__': + print 'This sample shows how to implement a simple hi resolution image navigation.' + print 'USAGE: browse.py [image filename]' + print -if len(sys.argv) > 1: - fn = sys.argv[1] - print 'loading %s ...' % fn - img = cv2.imread(fn) -else: - sz = 4096 - print 'generating %dx%d procedural image ...' % (sz, sz) - img = np.zeros((sz, sz), np.uint8) - track = np.cumsum(np.random.rand(500000, 2)-0.5, axis=0) - track = np.int32(track*10 + (sz/2, sz/2)) - cv2.polylines(img, [track], 0, 255, 1, cv2.CV_AA) + if len(sys.argv) > 1: + fn = sys.argv[1] + print 'loading %s ...' % fn + img = cv2.imread(fn) + else: + sz = 4096 + print 'generating %dx%d procedural image ...' % (sz, sz) + img = np.zeros((sz, sz), np.uint8) + track = np.cumsum(np.random.rand(500000, 2)-0.5, axis=0) + track = np.int32(track*10 + (sz/2, sz/2)) + cv2.polylines(img, [track], 0, 255, 1, cv2.CV_AA) -small = img -for i in xrange(3): - small = cv2.pyrDown(small) + small = img + for i in xrange(3): + small = cv2.pyrDown(small) -def onmouse(event, x, y, flags, param): - h, w = img.shape[:2] - h1, w1 = small.shape[:2] - x, y = 1.0*x*h/h1, 1.0*y*h/h1 - zoom = cv2.getRectSubPix(img, (800, 600), (x+0.5, y+0.5)) - cv2.imshow('zoom', zoom) + def onmouse(event, x, y, flags, param): + h, w = img.shape[:2] + h1, w1 = small.shape[:2] + x, y = 1.0*x*h/h1, 1.0*y*h/h1 + zoom = cv2.getRectSubPix(img, (800, 600), (x+0.5, y+0.5)) + cv2.imshow('zoom', zoom) -cv2.imshow('preview', small) -cv2.setMouseCallback('preview', onmouse) -cv2.waitKey() + cv2.imshow('preview', small) + cv2.setMouseCallback('preview', onmouse) + cv2.waitKey() diff --git a/samples/python2/color_histogram.py b/samples/python2/color_histogram.py index 87041fcc5..7038dc6f8 100644 --- a/samples/python2/color_histogram.py +++ b/samples/python2/color_histogram.py @@ -5,47 +5,43 @@ import sys import video -hsv_map = np.zeros((180, 256, 3), np.uint8) -h, s = np.indices(hsv_map.shape[:2]) -hsv_map[:,:,0] = h -hsv_map[:,:,1] = s -hsv_map[:,:,2] = 255 -hsv_map = cv2.cvtColor(hsv_map, cv2.COLOR_HSV2BGR) -cv2.imshow('hsv_map', hsv_map) +if __name__ == '__main__': -cv2.namedWindow('hist', 0) -hist_scale = 10 -def set_scale(val): - global hist_scale - hist_scale = val -cv2.createTrackbar('scale', 'hist', hist_scale, 32, set_scale) + hsv_map = np.zeros((180, 256, 3), np.uint8) + h, s = np.indices(hsv_map.shape[:2]) + hsv_map[:,:,0] = h + hsv_map[:,:,1] = s + hsv_map[:,:,2] = 255 + hsv_map = cv2.cvtColor(hsv_map, cv2.COLOR_HSV2BGR) + cv2.imshow('hsv_map', hsv_map) -try: fn = sys.argv[1] -except: fn = 'synth:bg=../cpp/baboon.jpg:class=chess:noise=0.05' -cam = video.create_capture(fn) + cv2.namedWindow('hist', 0) + hist_scale = 10 + def set_scale(val): + global hist_scale + hist_scale = val + cv2.createTrackbar('scale', 'hist', hist_scale, 32, set_scale) -t = clock() -while True: - flag, frame = cam.read() - cv2.imshow('camera', frame) - - small = cv2.pyrDown(frame) + try: fn = sys.argv[1] + except: fn = 'synth:bg=../cpp/baboon.jpg:class=chess:noise=0.05' + cam = video.create_capture(fn) - hsv = cv2.cvtColor(small, cv2.COLOR_BGR2HSV) - dark = hsv[...,2] < 32 - hsv[dark] = 0 - h = cv2.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] ) + while True: + flag, frame = cam.read() + cv2.imshow('camera', frame) + + small = cv2.pyrDown(frame) + + hsv = cv2.cvtColor(small, cv2.COLOR_BGR2HSV) + dark = hsv[...,2] < 32 + hsv[dark] = 0 + h = cv2.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] ) - h = np.clip(h*0.005*hist_scale, 0, 1) - vis = hsv_map*h[:,:,np.newaxis] / 255.0 - cv2.imshow('hist', vis) - - - t1 = clock() - #print (t1-t)*1000 - t = t1 - - ch = cv2.waitKey(1) - if ch == 27: - break + h = np.clip(h*0.005*hist_scale, 0, 1) + vis = hsv_map*h[:,:,np.newaxis] / 255.0 + cv2.imshow('hist', vis) + + ch = cv2.waitKey(1) + if ch == 27: + break diff --git a/samples/python2/demo.py b/samples/python2/demo.py new file mode 100644 index 000000000..4365d7c91 --- /dev/null +++ b/samples/python2/demo.py @@ -0,0 +1,132 @@ +import Tkinter as tk +from ScrolledText import ScrolledText +from glob import glob +from common import splitfn + +#from IPython.Shell import IPShellEmbed +#ipshell = IPShellEmbed() + +exclude_list = ['demo', 'common'] + +class LinkManager: + def __init__(self, text, url_callback = None): + self.text = text + self.text.tag_config("link", foreground="blue", underline=1) + self.text.tag_bind("link", "", self._enter) + self.text.tag_bind("link", "", self._leave) + self.text.tag_bind("link", "", self._click) + + self.url_callback = url_callback + self.reset() + + def reset(self): + self.links = {} + def add(self, action): + # add an action to the manager. returns tags to use in + # associated text widget + tag = "link-%d" % len(self.links) + self.links[tag] = action + return "link", tag + + def _enter(self, event): + self.text.config(cursor="hand2") + def _leave(self, event): + self.text.config(cursor="") + def _click(self, event): + for tag in self.text.tag_names(tk.CURRENT): + if tag.startswith("link-"): + proc = self.links[tag] + if callable(proc): + proc() + else: + if self.url_callback: + self.url_callback(proc) + +class App: + def __init__(self): + root = tk.Tk() + root.title('OpenCV Demo') + + self.win = win = tk.PanedWindow(root, orient=tk.HORIZONTAL, sashrelief=tk.RAISED, sashwidth=4) + self.win.pack(fill=tk.BOTH, expand=1) + + left = tk.Frame(win) + right = tk.Frame(win) + win.add(left) + win.add(right) + + scrollbar = tk.Scrollbar(left, orient=tk.VERTICAL) + self.demos_lb = demos_lb = tk.Listbox(left, yscrollcommand=scrollbar.set) + scrollbar.config(command=demos_lb.yview) + scrollbar.pack(side=tk.RIGHT, fill=tk.Y) + demos_lb.pack(side=tk.LEFT, fill=tk.BOTH, expand=1) + + self.samples = {} + for fn in glob('*.py'): + name = splitfn(fn)[1] + if fn[0] != '_' and name not in exclude_list: + demos_lb.insert(tk.END, name) + self.samples[name] = fn + demos_lb.bind('<>', self.on_demo_select) + + self.text = text = ScrolledText(right, font=('arial', 12, 'normal'), width = 30, wrap='word') + text.pack(fill='both', expand=1) + self.linker = linker = LinkManager(text, self.on_link) + + self.text.tag_config("header1", font=('arial', 14, 'bold')) + self.text.tag_config("header2", font=('arial', 12, 'bold')) + + text.config(state='disabled') + + def on_link(self, url): + print url + + def on_demo_select(self, evt): + name = self.demos_lb.get( self.demos_lb.curselection()[0] ) + fn = self.samples[name] + loc = {} + execfile(fn, loc) + descr = loc.get('__doc__', 'no-description') + + self.linker.reset() + self.text.config(state='normal') + self.text.delete(1.0, tk.END) + self.format_text(descr) + self.text.config(state='disabled') + + def format_text(self, s): + text = self.text + lines = s.splitlines() + for i, s in enumerate(lines): + s = s.rstrip() + if i == 0 and not s: + continue + sn = len(s) + if s and s == '='*sn: + text.tag_add('header1', 'end-2l', 'end-1l') + elif s and s == '-'*sn: + text.tag_add('header2', 'end-2l', 'end-1l') + else: + text.insert('end', s+'\n') + + def format_line(self, s): + text = self.text + pos, n = 0, len(s) + while pos < n: + next = s.find('http://', pos) + if next < 0: + next = n + test.insert(tk.END, s[pos:next]) + pos = next + + + #text.insert(tk.END, "click here!", linker.add('http://asdfsdaf')) + + + + def run(self): + tk.mainloop() + + +if __name__ == '__main__': + App().run() diff --git a/samples/python2/edge.py b/samples/python2/edge.py index 9b72dd80f..cd5d21679 100644 --- a/samples/python2/edge.py +++ b/samples/python2/edge.py @@ -2,28 +2,29 @@ import cv2 import video import sys -try: fn = sys.argv[1] -except: fn = video.presets['chess'] +if __name__ == '__main__': + try: fn = sys.argv[1] + except: fn = video.presets['chess'] -def nothing(*arg): - pass + def nothing(*arg): + pass -cv2.namedWindow('edge') -cv2.createTrackbar('thrs1', 'edge', 2000, 5000, nothing) -cv2.createTrackbar('thrs2', 'edge', 4000, 5000, nothing) + cv2.namedWindow('edge') + cv2.createTrackbar('thrs1', 'edge', 2000, 5000, nothing) + cv2.createTrackbar('thrs2', 'edge', 4000, 5000, nothing) -cap = video.create_capture(fn) -while True: - flag, img = cap.read() - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - thrs1 = cv2.getTrackbarPos('thrs1', 'edge') - thrs2 = cv2.getTrackbarPos('thrs2', 'edge') - edge = cv2.Canny(gray, thrs1, thrs2, apertureSize=5) - vis = img.copy() - vis /= 2 - vis[edge != 0] = (0, 255, 0) - cv2.imshow('edge', vis) - ch = cv2.waitKey(5) - if ch == 27: - break + cap = video.create_capture(fn) + while True: + flag, img = cap.read() + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + thrs1 = cv2.getTrackbarPos('thrs1', 'edge') + thrs2 = cv2.getTrackbarPos('thrs2', 'edge') + edge = cv2.Canny(gray, thrs1, thrs2, apertureSize=5) + vis = img.copy() + vis /= 2 + vis[edge != 0] = (0, 255, 0) + cv2.imshow('edge', vis) + ch = cv2.waitKey(5) + if ch == 27: + break diff --git a/samples/python2/turing.py b/samples/python2/turing.py index f04c5a1ef..af9b9dec5 100644 --- a/samples/python2/turing.py +++ b/samples/python2/turing.py @@ -15,46 +15,48 @@ USAGE: turing.py [-o ] Press ESC to stop. ''' -print help_message -w, h = 512, 512 +if __name__ == '__main__': + print help_message -args, args_list = getopt.getopt(sys.argv[1:], 'o:', []) -args = dict(args) -out = None -if '-o' in args: - fn = args['-o'] - out = cv2.VideoWriter(args['-o'], cv.CV_FOURCC(*'DIB '), 30.0, (w, h), False) - print 'writing %s ...' % fn + w, h = 512, 512 -a = np.zeros((h, w), np.float32) -cv2.randu(a, np.array([0]), np.array([1])) + args, args_list = getopt.getopt(sys.argv[1:], 'o:', []) + args = dict(args) + out = None + if '-o' in args: + fn = args['-o'] + out = cv2.VideoWriter(args['-o'], cv.CV_FOURCC(*'DIB '), 30.0, (w, h), False) + print 'writing %s ...' % fn -def process_scale(a_lods, lod): - d = a_lods[lod] - cv2.pyrUp(a_lods[lod+1]) - for i in xrange(lod): - d = cv2.pyrUp(d) - v = cv2.GaussianBlur(d*d, (3, 3), 0) - return np.sign(d), v - -scale_num = 6 -for frame_i in count(): - a_lods = [a] - for i in xrange(scale_num): - a_lods.append(cv2.pyrDown(a_lods[-1])) - ms, vs = [], [] - for i in xrange(1, scale_num): - m, v = process_scale(a_lods, i) - ms.append(m) - vs.append(v) - mi = np.argmin(vs, 0) - a += np.choose(mi, ms) * 0.025 - a = (a-a.min()) / a.ptp() + a = np.zeros((h, w), np.float32) + cv2.randu(a, np.array([0]), np.array([1])) - if out: - out.write(a) - vis = a.copy() - draw_str(vis, (20, 20), 'frame %d' % frame_i) - cv2.imshow('a', vis) - if cv2.waitKey(5) == 27: - break + def process_scale(a_lods, lod): + d = a_lods[lod] - cv2.pyrUp(a_lods[lod+1]) + for i in xrange(lod): + d = cv2.pyrUp(d) + v = cv2.GaussianBlur(d*d, (3, 3), 0) + return np.sign(d), v + + scale_num = 6 + for frame_i in count(): + a_lods = [a] + for i in xrange(scale_num): + a_lods.append(cv2.pyrDown(a_lods[-1])) + ms, vs = [], [] + for i in xrange(1, scale_num): + m, v = process_scale(a_lods, i) + ms.append(m) + vs.append(v) + mi = np.argmin(vs, 0) + a += np.choose(mi, ms) * 0.025 + a = (a-a.min()) / a.ptp() + + if out: + out.write(a) + vis = a.copy() + draw_str(vis, (20, 20), 'frame %d' % frame_i) + cv2.imshow('a', vis) + if cv2.waitKey(5) == 27: + break