Merged the trunk r8852:8880
This commit is contained in:
@@ -17,6 +17,7 @@ from common import clock, mosaic
|
||||
|
||||
SZ = 20 # size of each digit is SZ x SZ
|
||||
CLASS_N = 10
|
||||
DIGITS_FN = 'digits.png'
|
||||
|
||||
def load_digits(fn):
|
||||
print 'loading "%s" ...' % fn
|
||||
@@ -95,7 +96,7 @@ def evaluate_model(model, digits, samples, labels):
|
||||
if __name__ == '__main__':
|
||||
print __doc__
|
||||
|
||||
digits, labels = load_digits('digits.png')
|
||||
digits, labels = load_digits(DIGITS_FN)
|
||||
|
||||
print 'preprocessing...'
|
||||
# shuffle digits
|
||||
|
@@ -11,11 +11,10 @@ Usage:
|
||||
digits_adjust.py [--model {svm|knearest}] [--cloud] [--env <PiCloud environment>]
|
||||
|
||||
--model {svm|knearest} - select the classifier (SVM is the default)
|
||||
--cloud - use PiCloud computing platform (for SVM only)
|
||||
--cloud - use PiCloud computing platform
|
||||
--env - cloud environment name
|
||||
|
||||
'''
|
||||
# TODO dataset preprocessing in cloud
|
||||
# TODO cloud env setup tutorial
|
||||
|
||||
import numpy as np
|
||||
@@ -24,6 +23,14 @@ from multiprocessing.pool import ThreadPool
|
||||
|
||||
from digits import *
|
||||
|
||||
try:
|
||||
import cloud
|
||||
have_cloud = True
|
||||
except ImportError:
|
||||
have_cloud = False
|
||||
|
||||
|
||||
|
||||
def cross_validate(model_class, params, samples, labels, kfold = 3, pool = None):
|
||||
n = len(samples)
|
||||
folds = np.array_split(np.arange(n), kfold)
|
||||
@@ -46,66 +53,88 @@ def cross_validate(model_class, params, samples, labels, kfold = 3, pool = None)
|
||||
scores = pool.map(f, xrange(kfold))
|
||||
return np.mean(scores)
|
||||
|
||||
def adjust_KNearest(samples, labels):
|
||||
print 'adjusting KNearest ...'
|
||||
best_err, best_k = np.inf, -1
|
||||
for k in xrange(1, 9):
|
||||
err = cross_validate(KNearest, dict(k=k), samples, labels)
|
||||
if err < best_err:
|
||||
best_err, best_k = err, k
|
||||
print 'k = %d, error: %.2f %%' % (k, err*100)
|
||||
best_params = dict(k=best_k)
|
||||
print 'best params:', best_params
|
||||
return best_params
|
||||
|
||||
def adjust_SVM(samples, labels, usecloud=False, cloud_env=''):
|
||||
Cs = np.logspace(0, 5, 10, base=2)
|
||||
gammas = np.logspace(-7, -2, 10, base=2)
|
||||
scores = np.zeros((len(Cs), len(gammas)))
|
||||
scores[:] = np.nan
|
||||
|
||||
if usecloud:
|
||||
try:
|
||||
import cloud
|
||||
except ImportError:
|
||||
print 'cloud module is not installed'
|
||||
class App(object):
|
||||
def __init__(self, usecloud=False, cloud_env=''):
|
||||
if usecloud and not have_cloud:
|
||||
print 'warning: cloud module is not installed, running locally'
|
||||
usecloud = False
|
||||
if usecloud:
|
||||
print 'uploading dataset to cloud...'
|
||||
np.savez('train.npz', samples=samples, labels=labels)
|
||||
cloud.files.put('train.npz')
|
||||
self.usecloud = usecloud
|
||||
self.cloud_env = cloud_env
|
||||
|
||||
print 'adjusting SVM (may take a long time) ...'
|
||||
def f(job):
|
||||
i, j = job
|
||||
params = dict(C = Cs[i], gamma=gammas[j])
|
||||
score = cross_validate(SVM, params, samples, labels)
|
||||
return i, j, score
|
||||
def fcloud(job):
|
||||
i, j = job
|
||||
cloud.files.get('train.npz')
|
||||
npz = np.load('train.npz')
|
||||
params = dict(C = Cs[i], gamma=gammas[j])
|
||||
score = cross_validate(SVM, params, npz['samples'], npz['labels'])
|
||||
return i, j, score
|
||||
|
||||
if usecloud:
|
||||
jids = cloud.map(fcloud, np.ndindex(*scores.shape), _env=cloud_env, _profile=True)
|
||||
ires = cloud.iresult(jids)
|
||||
else:
|
||||
pool = ThreadPool(processes=cv2.getNumberOfCPUs())
|
||||
ires = pool.imap_unordered(f, np.ndindex(*scores.shape))
|
||||
if self.usecloud:
|
||||
print 'uploading dataset to cloud...'
|
||||
cloud.files.put(DIGITS_FN)
|
||||
self.preprocess_job = cloud.call(self.preprocess, _env=self.cloud_env)
|
||||
else:
|
||||
self._samples, self._labels = self.preprocess()
|
||||
|
||||
for count, (i, j, score) in enumerate(ires):
|
||||
scores[i, j] = score
|
||||
print '%d / %d (best error: %.2f %%, last: %.2f %%)' % (count+1, scores.size, np.nanmin(scores)*100, score*100)
|
||||
print scores
|
||||
def preprocess(self):
|
||||
if self.usecloud:
|
||||
cloud.files.get(DIGITS_FN)
|
||||
digits, labels = load_digits(DIGITS_FN)
|
||||
shuffle = np.random.permutation(len(digits))
|
||||
digits, labels = digits[shuffle], labels[shuffle]
|
||||
digits2 = map(deskew, digits)
|
||||
samples = np.float32(digits2).reshape(-1, SZ*SZ) / 255.0
|
||||
return samples, labels
|
||||
|
||||
def get_dataset(self):
|
||||
if self.usecloud:
|
||||
return cloud.result(self.preprocess_job)
|
||||
else:
|
||||
return self._samples, self._labels
|
||||
|
||||
def run_jobs(self, f, jobs):
|
||||
if self.usecloud:
|
||||
jids = cloud.map(f, jobs, _env=self.cloud_env, _profile=True, _depends_on=self.preprocess_job)
|
||||
ires = cloud.iresult(jids)
|
||||
else:
|
||||
pool = ThreadPool(processes=cv2.getNumberOfCPUs())
|
||||
ires = pool.imap_unordered(f, jobs)
|
||||
return ires
|
||||
|
||||
def adjust_SVM(self):
|
||||
Cs = np.logspace(0, 5, 10, base=2)
|
||||
gammas = np.logspace(-7, -2, 10, base=2)
|
||||
scores = np.zeros((len(Cs), len(gammas)))
|
||||
scores[:] = np.nan
|
||||
|
||||
print 'adjusting SVM (may take a long time) ...'
|
||||
def f(job):
|
||||
i, j = job
|
||||
samples, labels = self.get_dataset()
|
||||
params = dict(C = Cs[i], gamma=gammas[j])
|
||||
score = cross_validate(SVM, params, samples, labels)
|
||||
return i, j, score
|
||||
|
||||
ires = self.run_jobs(f, np.ndindex(*scores.shape))
|
||||
for count, (i, j, score) in enumerate(ires):
|
||||
scores[i, j] = score
|
||||
print '%d / %d (best error: %.2f %%, last: %.2f %%)' % (count+1, scores.size, np.nanmin(scores)*100, score*100)
|
||||
print scores
|
||||
|
||||
i, j = np.unravel_index(scores.argmin(), scores.shape)
|
||||
best_params = dict(C = Cs[i], gamma=gammas[j])
|
||||
print 'best params:', best_params
|
||||
print 'best error: %.2f %%' % (scores.min()*100)
|
||||
return best_params
|
||||
|
||||
def adjust_KNearest(self):
|
||||
print 'adjusting KNearest ...'
|
||||
def f(k):
|
||||
samples, labels = self.get_dataset()
|
||||
err = cross_validate(KNearest, dict(k=k), samples, labels)
|
||||
return k, err
|
||||
best_err, best_k = np.inf, -1
|
||||
for k, err in self.run_jobs(f, xrange(1, 9)):
|
||||
if err < best_err:
|
||||
best_err, best_k = err, k
|
||||
print 'k = %d, error: %.2f %%' % (k, err*100)
|
||||
best_params = dict(k=best_k)
|
||||
print 'best params:', best_params, 'err: %.2f' % (best_err*100)
|
||||
return best_params
|
||||
|
||||
i, j = np.unravel_index(scores.argmin(), scores.shape)
|
||||
best_params = dict(C = Cs[i], gamma=gammas[j])
|
||||
print 'best params:', best_params
|
||||
print 'best error: %.2f %%' % (scores.min()*100)
|
||||
return best_params
|
||||
|
||||
if __name__ == '__main__':
|
||||
import getopt
|
||||
@@ -113,6 +142,7 @@ if __name__ == '__main__':
|
||||
|
||||
print __doc__
|
||||
|
||||
|
||||
args, _ = getopt.getopt(sys.argv[1:], '', ['model=', 'cloud', 'env='])
|
||||
args = dict(args)
|
||||
args.setdefault('--model', 'svm')
|
||||
@@ -121,16 +151,10 @@ if __name__ == '__main__':
|
||||
print 'unknown model "%s"' % args['--model']
|
||||
sys.exit(1)
|
||||
|
||||
digits, labels = load_digits('digits.png')
|
||||
shuffle = np.random.permutation(len(digits))
|
||||
digits, labels = digits[shuffle], labels[shuffle]
|
||||
digits2 = map(deskew, digits)
|
||||
samples = np.float32(digits2).reshape(-1, SZ*SZ) / 255.0
|
||||
|
||||
t = clock()
|
||||
app = App(usecloud='--cloud' in args, cloud_env = args['--env'])
|
||||
if args['--model'] == 'knearest':
|
||||
adjust_KNearest(samples, labels)
|
||||
app.adjust_KNearest()
|
||||
else:
|
||||
adjust_SVM(samples, labels, usecloud='--cloud' in args, cloud_env = args['--env'])
|
||||
app.adjust_SVM()
|
||||
print 'work time: %f s' % (clock() - t)
|
||||
|
78
samples/python2/fitline.py
Normal file
78
samples/python2/fitline.py
Normal file
@@ -0,0 +1,78 @@
|
||||
'''
|
||||
Robust line fitting.
|
||||
==================
|
||||
|
||||
Example of using cv2.fitLine function for fitting line to points in presence of outliers.
|
||||
|
||||
Usage
|
||||
-----
|
||||
fitline.py
|
||||
|
||||
Switch through different M-estimator functions and see, how well the robust functions
|
||||
fit the line even in case of ~50% of outliers.
|
||||
|
||||
Keys
|
||||
----
|
||||
SPACE - generaty random points
|
||||
f - change distance function
|
||||
ESC - exit
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
import itertools as it
|
||||
from common import draw_str
|
||||
|
||||
|
||||
w, h = 512, 256
|
||||
|
||||
def toint(p):
|
||||
return tuple(map(int, p))
|
||||
|
||||
def sample_line(p1, p2, n, noise=0.0):
|
||||
p1 = np.float32(p1)
|
||||
t = np.random.rand(n,1)
|
||||
return p1 + (p2-p1)*t + np.random.normal(size=(n, 2))*noise
|
||||
|
||||
dist_func_names = it.cycle('CV_DIST_L2 CV_DIST_L1 CV_DIST_L12 CV_DIST_FAIR CV_DIST_WELSCH CV_DIST_HUBER'.split())
|
||||
cur_func_name = dist_func_names.next()
|
||||
|
||||
def update(_=None):
|
||||
noise = cv2.getTrackbarPos('noise', 'fit line')
|
||||
n = cv2.getTrackbarPos('point n', 'fit line')
|
||||
r = cv2.getTrackbarPos('outlier %', 'fit line') / 100.0
|
||||
outn = int(n*r)
|
||||
|
||||
p0, p1 = (90, 80), (w-90, h-80)
|
||||
img = np.zeros((h, w, 3), np.uint8)
|
||||
cv2.line(img, toint(p0), toint(p1), (0, 255, 0))
|
||||
|
||||
if n > 0:
|
||||
line_points = sample_line(p0, p1, n-outn, noise)
|
||||
outliers = np.random.rand(outn, 2) * (w, h)
|
||||
points = np.vstack([line_points, outliers])
|
||||
for p in line_points:
|
||||
cv2.circle(img, toint(p), 2, (255, 255, 255), -1)
|
||||
for p in outliers:
|
||||
cv2.circle(img, toint(p), 2, (64, 64, 255), -1)
|
||||
func = getattr(cv2.cv, cur_func_name)
|
||||
vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01)
|
||||
cv2.line(img, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*w), int(cy+vy*w)), (0, 0, 255))
|
||||
|
||||
draw_str(img, (20, 20), cur_func_name)
|
||||
cv2.imshow('fit line', img)
|
||||
|
||||
if __name__ == '__main__':
|
||||
print __doc__
|
||||
|
||||
cv2.namedWindow('fit line')
|
||||
cv2.createTrackbar('noise', 'fit line', 3, 50, update)
|
||||
cv2.createTrackbar('point n', 'fit line', 100, 500, update)
|
||||
cv2.createTrackbar('outlier %', 'fit line', 30, 100, update)
|
||||
while True:
|
||||
update()
|
||||
ch = cv2.waitKey(0)
|
||||
if ch == ord('f'):
|
||||
cur_func_name = dist_func_names.next()
|
||||
if ch == 27:
|
||||
break
|
@@ -1,3 +1,32 @@
|
||||
'''
|
||||
Video capture sample.
|
||||
|
||||
Sample shows how VideoCapture class can be used to acquire video
|
||||
frames from a camera of a movie file. Also the sample provides
|
||||
an example of procedural video generation by an object, mimicking
|
||||
the VideoCapture interface (see Chess class).
|
||||
|
||||
'create_capture' is a convinience function for capture creation,
|
||||
falling back to procedural video in case of error.
|
||||
|
||||
Usage:
|
||||
video.py [--shotdir <shot path>] [source0] [source1] ...'
|
||||
|
||||
sourceN is an
|
||||
- integer number for camera capture
|
||||
- name of video file
|
||||
- synth:<params> for procedural video
|
||||
|
||||
Synth examples:
|
||||
synth:bg=../cpp/lena.jpg:noise=0.1
|
||||
synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480
|
||||
|
||||
Keys:
|
||||
ESC - exit
|
||||
SPACE - save current frame to <shot path> directory
|
||||
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
from time import clock
|
||||
@@ -100,8 +129,7 @@ presets = dict(
|
||||
|
||||
|
||||
def create_capture(source = 0, fallback = presets['chess']):
|
||||
'''
|
||||
source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
|
||||
'''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
|
||||
'''
|
||||
source = str(source).strip()
|
||||
chunks = source.split(':')
|
||||
@@ -136,9 +164,7 @@ if __name__ == '__main__':
|
||||
import sys
|
||||
import getopt
|
||||
|
||||
print 'USAGE: video.py [--shotdir <dir>] [source0] [source1] ...'
|
||||
print "source: '<int>' or '<filename>' or 'synth:<params>'"
|
||||
print
|
||||
print __doc__
|
||||
|
||||
args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=')
|
||||
args = dict(args)
|
||||
@@ -146,8 +172,6 @@ if __name__ == '__main__':
|
||||
if len(sources) == 0:
|
||||
sources = [ 0 ]
|
||||
|
||||
print 'Press SPACE to save current frame'
|
||||
|
||||
caps = map(create_capture, sources)
|
||||
shot_idx = 0
|
||||
while True:
|
||||
|
@@ -1,18 +1,31 @@
|
||||
import numpy as np
|
||||
import cv2
|
||||
from common import Sketcher
|
||||
'''
|
||||
Watershed segmentation
|
||||
=========
|
||||
|
||||
help_message = '''
|
||||
USAGE: watershed.py [<image>]
|
||||
This program demonstrates the watershed segmentation algorithm
|
||||
in OpenCV: watershed().
|
||||
|
||||
Use keys 1 - 7 to switch marker color
|
||||
Usage
|
||||
-----
|
||||
watershed.py [image filename]
|
||||
|
||||
Keys
|
||||
----
|
||||
1-7 - switch marker color
|
||||
SPACE - update segmentation
|
||||
r - reset
|
||||
a - switch autoupdate
|
||||
a - toggle autoupdate
|
||||
ESC - exit
|
||||
|
||||
'''
|
||||
|
||||
|
||||
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
from common import Sketcher
|
||||
|
||||
class App:
|
||||
def __init__(self, fn):
|
||||
self.img = cv2.imread(fn)
|
||||
@@ -60,5 +73,5 @@ if __name__ == '__main__':
|
||||
import sys
|
||||
try: fn = sys.argv[1]
|
||||
except: fn = '../cpp/fruits.jpg'
|
||||
print help_message
|
||||
print __doc__
|
||||
App(fn).run()
|
||||
|
Reference in New Issue
Block a user