diff --git a/samples/python2/edge.py b/samples/python2/edge.py index 998c0c401..a55c51dad 100644 --- a/samples/python2/edge.py +++ b/samples/python2/edge.py @@ -3,7 +3,7 @@ import video import sys try: fn = sys.argv[1] -except: fn = 'synth:bg=../cpp/lena.jpg:noise=0.1' +except: fn = video.presets['lena'] def nothing(*arg): pass diff --git a/samples/python2/letter_recog.py b/samples/python2/letter_recog.py index e0fabc538..c25809b57 100644 --- a/samples/python2/letter_recog.py +++ b/samples/python2/letter_recog.py @@ -91,31 +91,34 @@ class SVM(LetterStatModel): if __name__ == '__main__': - import argparse + import getopt + import sys models = [RTrees, KNearest, Boost, SVM] # MLP, NBayes models = dict( [(cls.__name__.lower(), cls) for cls in models] ) - - parser = argparse.ArgumentParser() - parser.add_argument('-model', default='rtrees', choices=models.keys()) - parser.add_argument('-data', nargs=1, default='../cpp/letter-recognition.data') - parser.add_argument('-load', nargs=1) - parser.add_argument('-save', nargs=1) - args = parser.parse_args() - print 'loading data %s ...' % args.data - samples, responses = load_base(args.data) - Model = models[args.model] + print 'USAGE: letter_recog.py [--model ] [--data ] [--load ] [--save ]' + print 'Models: ', ', '.join(models) + print + + args, dummy = getopt.getopt(sys.argv[1:], '', ['model=', 'data=', 'load=', 'save=']) + args = dict(args) + args.setdefault('--model', 'rtrees') + args.setdefault('--data', '../cpp/letter-recognition.data') + + print 'loading data %s ...' % args['--data'] + samples, responses = load_base(args['--data']) + Model = models[args['--model']] model = Model() train_n = int(len(samples)*model.train_ratio) - if args.load is None: - print 'training %s ...' % Model.__name__ - model.train(samples[:train_n], responses[:train_n]) - else: - fn = args.load[0] + if '--load' in args: + fn = args['--load'] print 'loading model from %s ...' % fn model.load(fn) + else: + print 'training %s ...' % Model.__name__ + model.train(samples[:train_n], responses[:train_n]) print 'testing...' train_rate = np.mean(model.predict(samples[:train_n]) == responses[:train_n]) @@ -123,7 +126,7 @@ if __name__ == '__main__': print 'train rate: %f test rate: %f' % (train_rate*100, test_rate*100) - if args.save is not None: - fn = args.save[0] + if '--save' in args: + fn = args['--save'] print 'saving model to %s ...' % fn model.save(fn) diff --git a/samples/python2/video.py b/samples/python2/video.py index 3b5a98e79..b681f03b8 100644 --- a/samples/python2/video.py +++ b/samples/python2/video.py @@ -47,19 +47,27 @@ def create_capture(source): return cv2.VideoCapture(source) +presets = dict( + lena = 'synth:bg=../cpp/lena.jpg:noise=0.1' +) + if __name__ == '__main__': import sys - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('sources', nargs='*', default=['synth:bg=../cpp/lena.jpg:noise=0.1']) - parser.add_argument('-shotdir', nargs=1, default='.') - args = parser.parse_args() - print args + import getopt + + print 'USAGE: video.py [--shotdir ] [source0] [source1] ...' + print "source: '' or '' or 'synth:'" + print + + args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=') + args = dict(args) + shotdir = args.get('--shotdir', '.') + if len(sources) == 0: + sources = [ presets['lena'] ] print 'Press SPACE to save current frame' - caps = map(create_capture, args.sources) + caps = map(create_capture, sources) shot_idx = 0 while True: imgs = [] @@ -72,7 +80,7 @@ if __name__ == '__main__': break if ch == ord(' '): for i, img in enumerate(imgs): - fn = '%s/shot_%d_%03d.bmp' % (args.shotdir[0], i, shot_idx) + fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx) cv2.imwrite(fn, img) print fn, 'saved' shot_idx += 1