From ab4d3753491c4ebf36a4709ecdbe56d0b7b4c370 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Thu, 28 Jan 2016 15:43:08 +0300 Subject: [PATCH 01/16] Add new tests on python --- modules/python/test/test.py | 55 ++----- modules/python/test/test_calibration.py | 69 ++++++++ modules/python/test/test_digits.py | 197 +++++++++++++++++++++++ modules/python/test/test_facedetect.py | 102 ++++++++++++ modules/python/test/test_fitline.py | 66 ++++++++ modules/python/test/test_gaussian_mix.py | 60 +++++++ modules/python/test/test_houghcircles.py | 57 +++++++ modules/python/test/test_houghlines.py | 65 ++++++++ modules/python/test/test_squares.py | 96 +++++++++++ modules/python/test/test_texture_flow.py | 52 ++++++ modules/python/test/tests_common.py | 56 +++++++ samples/data/kate.jpg | Bin 0 -> 40791 bytes samples/python/demo.py | 5 +- 13 files changed, 839 insertions(+), 41 deletions(-) mode change 100644 => 100755 modules/python/test/test.py create mode 100644 modules/python/test/test_calibration.py create mode 100644 modules/python/test/test_digits.py create mode 100644 modules/python/test/test_facedetect.py create mode 100644 modules/python/test/test_fitline.py create mode 100644 modules/python/test/test_gaussian_mix.py create mode 100644 modules/python/test/test_houghcircles.py create mode 100644 modules/python/test/test_houghlines.py create mode 100644 modules/python/test/test_squares.py create mode 100644 modules/python/test/test_texture_flow.py create mode 100644 modules/python/test/tests_common.py create mode 100644 samples/data/kate.jpg diff --git a/modules/python/test/test.py b/modules/python/test/test.py old mode 100644 new mode 100755 index 093979aba..074b6edad --- a/modules/python/test/test.py +++ b/modules/python/test/test.py @@ -1,6 +1,8 @@ #!/usr/bin/env python + from __future__ import print_function + import unittest import random import time @@ -17,51 +19,24 @@ import numpy as np import cv2 import argparse +# local test modules +from test_digits import digits_test +from test_calibration import calibration_test +from test_squares import squares_test +from test_texture_flow import texture_flow_test +from test_fitline import fitline_test +from test_houghcircles import houghcircles_test +from test_houghlines import houghlines_test +from test_gaussian_mix import gaussian_mix_test +from test_facedetect import facedetect_test + # Python 3 moved urlopen to urllib.requests try: from urllib.request import urlopen except ImportError: from urllib import urlopen -class NewOpenCVTests(unittest.TestCase): - - # path to local repository folder containing 'samples' folder - repoPath = None - # github repository url - repoUrl = 'https://raw.github.com/Itseez/opencv/master' - - def get_sample(self, filename, iscolor = cv2.IMREAD_COLOR): - if not filename in self.image_cache: - filedata = None - if NewOpenCVTests.repoPath is not None: - candidate = NewOpenCVTests.repoPath + '/' + filename - if os.path.isfile(candidate): - with open(candidate, 'rb') as f: - filedata = f.read() - if filedata is None: - filedata = urlopen(NewOpenCVTests.repoUrl + '/' + filename).read() - self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor) - return self.image_cache[filename] - - def setUp(self): - self.image_cache = {} - - def hashimg(self, im): - """ Compute a hash for an image, useful for image comparisons """ - return hashlib.md5(im.tostring()).digest() - - if sys.version_info[:2] == (2, 6): - def assertLess(self, a, b, msg=None): - if not a < b: - self.fail('%s not less than %s' % (repr(a), repr(b))) - - def assertLessEqual(self, a, b, msg=None): - if not a <= b: - self.fail('%s not less than or equal to %s' % (repr(a), repr(b))) - - def assertGreater(self, a, b, msg=None): - if not a > b: - self.fail('%s not greater than %s' % (repr(a), repr(b))) +from tests_common import NewOpenCVTests # Tests to run first; check the handful of basic operations that the later tests rely on @@ -167,4 +142,4 @@ if __name__ == '__main__': NewOpenCVTests.repoPath = args.repo random.seed(0) unit_argv = [sys.argv[0]] + other; - unittest.main(argv=unit_argv) + unittest.main(argv=unit_argv) \ No newline at end of file diff --git a/modules/python/test/test_calibration.py b/modules/python/test/test_calibration.py new file mode 100644 index 000000000..af8d0fcea --- /dev/null +++ b/modules/python/test/test_calibration.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + +''' +camera calibration for distorted images with chess board samples +reads distorted images, calculates the calibration and write undistorted images +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 + + +from tests_common import NewOpenCVTests + +class calibration_test(NewOpenCVTests): + + def test_calibration(self): + + from glob import glob + + img_mask = '../../../samples/data/left*.jpg' # default + img_names = glob(img_mask) + + square_size = 1.0 + pattern_size = (9, 6) + pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32) + pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2) + pattern_points *= square_size + + obj_points = [] + img_points = [] + h, w = 0, 0 + img_names_undistort = [] + for fn in img_names: + img = cv2.imread(fn, 0) + if img is None: + continue + + h, w = img.shape[:2] + found, corners = cv2.findChessboardCorners(img, pattern_size) + if found: + term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1) + cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term) + + if not found: + continue + + img_points.append(corners.reshape(-1, 2)) + obj_points.append(pattern_points) + + # calculate camera distortion + rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None, flags = 0) + + eps = 0.01 + normCamEps = 10.0 + normDistEps = 0.01 + + cameraMatrixTest = [[ 532.80992189, 0., 342.4952186 ], + [ 0., 532.93346422, 233.8879292 ], + [ 0., 0., 1. ]] + + distCoeffsTest = [ -2.81325576e-01, 2.91130406e-02, + 1.21234330e-03, -1.40825372e-04, 1.54865844e-01] + + self.assertLess(abs(rms - 0.196334638034), eps) + self.assertLess(cv2.norm(camera_matrix - cameraMatrixTest, cv2.NORM_L1), normCamEps) + self.assertLess(cv2.norm(dist_coefs - distCoeffsTest, cv2.NORM_L1), normDistEps) \ No newline at end of file diff --git a/modules/python/test/test_digits.py b/modules/python/test/test_digits.py new file mode 100644 index 000000000..9d1d2557c --- /dev/null +++ b/modules/python/test/test_digits.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python + +''' +SVM and KNearest digit recognition. + +Sample loads a dataset of handwritten digits from '../data/digits.png'. +Then it trains a SVM and KNearest classifiers on it and evaluates +their accuracy. + +Following preprocessing is applied to the dataset: + - Moment-based image deskew (see deskew()) + - Digit images are split into 4 10x10 cells and 16-bin + histogram of oriented gradients is computed for each + cell + - Transform histograms to space with Hellinger metric (see [1] (RootSIFT)) + + +[1] R. Arandjelovic, A. Zisserman + "Three things everyone should know to improve object retrieval" + http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf + +''' + + +# Python 2/3 compatibility +from __future__ import print_function + +# built-in modules +from multiprocessing.pool import ThreadPool + +import cv2 + +import numpy as np +from numpy.linalg import norm + + +SZ = 20 # size of each digit is SZ x SZ +CLASS_N = 10 +DIGITS_FN = '../../../samples/data/digits.png' + +def split2d(img, cell_size, flatten=True): + h, w = img.shape[:2] + sx, sy = cell_size + cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)] + cells = np.array(cells) + if flatten: + cells = cells.reshape(-1, sy, sx) + return cells + +def load_digits(fn): + digits_img = cv2.imread(fn, 0) + digits = split2d(digits_img, (SZ, SZ)) + labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N) + return digits, labels + +def deskew(img): + m = cv2.moments(img) + if abs(m['mu02']) < 1e-2: + return img.copy() + skew = m['mu11']/m['mu02'] + M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) + img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR) + return img + +class StatModel(object): + def load(self, fn): + self.model.load(fn) # Known bug: https://github.com/Itseez/opencv/issues/4969 + def save(self, fn): + self.model.save(fn) + +class KNearest(StatModel): + def __init__(self, k = 3): + self.k = k + self.model = cv2.ml.KNearest_create() + + def train(self, samples, responses): + self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + retval, results, neigh_resp, dists = self.model.findNearest(samples, self.k) + return results.ravel() + +class SVM(StatModel): + def __init__(self, C = 1, gamma = 0.5): + self.model = cv2.ml.SVM_create() + self.model.setGamma(gamma) + self.model.setC(C) + self.model.setKernel(cv2.ml.SVM_RBF) + self.model.setType(cv2.ml.SVM_C_SVC) + + def train(self, samples, responses): + self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + return self.model.predict(samples)[1].ravel() + + +def evaluate_model(model, digits, samples, labels): + resp = model.predict(samples) + err = (labels != resp).mean() + + confusion = np.zeros((10, 10), np.int32) + for i, j in zip(labels, resp): + confusion[i, j] += 1 + + return err, confusion + +def preprocess_simple(digits): + return np.float32(digits).reshape(-1, SZ*SZ) / 255.0 + +def preprocess_hog(digits): + samples = [] + for img in digits: + gx = cv2.Sobel(img, cv2.CV_32F, 1, 0) + gy = cv2.Sobel(img, cv2.CV_32F, 0, 1) + mag, ang = cv2.cartToPolar(gx, gy) + bin_n = 16 + bin = np.int32(bin_n*ang/(2*np.pi)) + bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:] + mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:] + hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)] + hist = np.hstack(hists) + + # transform to Hellinger kernel + eps = 1e-7 + hist /= hist.sum() + eps + hist = np.sqrt(hist) + hist /= norm(hist) + eps + + samples.append(hist) + return np.float32(samples) + +from tests_common import NewOpenCVTests + +class digits_test(NewOpenCVTests): + + def test_digits(self): + + digits, labels = load_digits(DIGITS_FN) + + # shuffle digits + rand = np.random.RandomState(321) + shuffle = rand.permutation(len(digits)) + digits, labels = digits[shuffle], labels[shuffle] + + digits2 = list(map(deskew, digits)) + samples = preprocess_hog(digits2) + + train_n = int(0.9*len(samples)) + digits_train, digits_test = np.split(digits2, [train_n]) + samples_train, samples_test = np.split(samples, [train_n]) + labels_train, labels_test = np.split(labels, [train_n]) + errors = list() + confusionMatrixes = list() + + model = KNearest(k=4) + model.train(samples_train, labels_train) + error, confusion = evaluate_model(model, digits_test, samples_test, labels_test) + errors.append(error) + confusionMatrixes.append(confusion) + + model = SVM(C=2.67, gamma=5.383) + model.train(samples_train, labels_train) + error, confusion = evaluate_model(model, digits_test, samples_test, labels_test) + errors.append(error) + confusionMatrixes.append(confusion) + + eps = 0.001 + normEps = len(samples_test) * 0.02 + + confusionKNN = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 57, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 59, 1, 0, 0, 0, 0, 1, 0], + [ 0, 0, 0, 43, 0, 0, 0, 1, 0, 0], + [ 0, 0, 0, 0, 38, 0, 2, 0, 0, 0], + [ 0, 0, 0, 2, 0, 48, 0, 0, 1, 0], + [ 0, 1, 0, 0, 0, 0, 51, 0, 0, 0], + [ 0, 0, 1, 0, 0, 0, 0, 54, 0, 0], + [ 0, 0, 0, 0, 0, 1, 0, 0, 46, 0], + [ 1, 1, 0, 1, 1, 0, 0, 0, 2, 42]] + + confusionSVM = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 57, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 59, 2, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 43, 0, 0, 0, 1, 0, 0], + [ 0, 0, 0, 0, 40, 0, 0, 0, 0, 0], + [ 0, 0, 0, 1, 0, 50, 0, 0, 0, 0], + [ 0, 0, 0, 0, 1, 0, 51, 0, 0, 0], + [ 0, 0, 1, 0, 0, 0, 0, 54, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0, 0, 47, 0], + [ 0, 1, 0, 1, 0, 0, 0, 0, 1, 45]] + + self.assertLess(cv2.norm(confusionMatrixes[0] - confusionKNN, cv2.NORM_L1), normEps) + self.assertLess(cv2.norm(confusionMatrixes[1] - confusionSVM, cv2.NORM_L1), normEps) + + self.assertLess(errors[0] - 0.034, eps) + self.assertLess(errors[1] - 0.018, eps) \ No newline at end of file diff --git a/modules/python/test/test_facedetect.py b/modules/python/test/test_facedetect.py new file mode 100644 index 000000000..7fe64e207 --- /dev/null +++ b/modules/python/test/test_facedetect.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python + +''' +face detection using haar cascades +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 + +def intersectionRate(s1, s2): + + x1, y1, x2, y2 = s1 + s1 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] + + x1, y1, x2, y2 = s2 + s2 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] + + area, intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2)) + return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2))) + +def detect(img, cascade): + rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), + flags=cv2.CASCADE_SCALE_IMAGE) + if len(rects) == 0: + return [] + rects[:,2:] += rects[:,:2] + return rects + +from tests_common import NewOpenCVTests + +class facedetect_test(NewOpenCVTests): + + def test_facedetect(self): + import sys, getopt + + cascade_fn = "../../../data/haarcascades/haarcascade_frontalface_alt.xml" + nested_fn = "../../../data/haarcascades/haarcascade_eye.xml" + + cascade = cv2.CascadeClassifier(cascade_fn) + nested = cv2.CascadeClassifier(nested_fn) + + dirPath = '../../../samples/data/' + samples = ['lena.jpg', 'kate.jpg'] + + faces = [] + eyes = [] + + testFaces = [ + #lena + [[218, 200, 389, 371], + [ 244, 240, 294, 290], + [ 309, 246, 352, 289]], + + #kate + [[207, 89, 436, 318], + [245, 161, 294, 210], + [343, 139, 389, 185]] + ] + + for sample in samples: + + img = cv2.imread(dirPath + sample) + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + gray = cv2.GaussianBlur(gray, (3, 3), 1.1) + + rects = detect(gray, cascade) + faces.append(rects) + + if not nested.empty(): + for x1, y1, x2, y2 in rects: + roi = gray[y1:y2, x1:x2] + subrects = detect(roi.copy(), nested) + + for rect in subrects: + rect[0] += x1 + rect[2] += x1 + rect[1] += y1 + rect[3] += y1 + + eyes.append(subrects) + + faces_matches = 0 + eyes_matches = 0 + + eps = 0.8 + + for i in range(len(faces)): + for j in range(len(testFaces)): + if intersectionRate(faces[i][0], testFaces[j][0]) > eps: + faces_matches += 1 + #check eyes + if len(eyes[i]) == 2: + if intersectionRate(eyes[i][0], testFaces[j][1]) > eps and intersectionRate(eyes[i][1], testFaces[j][2]): + eyes_matches += 1 + elif intersectionRate(eyes[i][1], testFaces[j][1]) > eps and intersectionRate(eyes[i][0], testFaces[j][2]): + eyes_matches += 1 + + self.assertEqual(faces_matches, 2) + self.assertEqual(eyes_matches, 2) \ No newline at end of file diff --git a/modules/python/test/test_fitline.py b/modules/python/test/test_fitline.py new file mode 100644 index 000000000..7de957338 --- /dev/null +++ b/modules/python/test/test_fitline.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +''' +Robust line fitting. +================== + +Example of using cv2.fitLine function for fitting line +to points in presence of outliers. + +Switch through different M-estimator functions and see, +how well the robust functions fit the line even +in case of ~50% of outliers. + +''' + +# Python 2/3 compatibility +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 + +import numpy as np +import cv2 + +from tests_common import NewOpenCVTests + +w, h = 512, 256 + +def toint(p): + return tuple(map(int, p)) + +def sample_line(p1, p2, n, noise=0.0): + np.random.seed(10) + p1 = np.float32(p1) + t = np.random.rand(n,1) + return p1 + (p2-p1)*t + np.random.normal(size=(n, 2))*noise + +dist_func_names = ['DIST_L2', 'DIST_L1', 'DIST_L12', 'DIST_FAIR', 'DIST_WELSCH', 'DIST_HUBER'] + +class fitline_test(NewOpenCVTests): + + def test_fitline(self): + + noise = 5 + n = 200 + r = 5 / 100.0 + outn = int(n*r) + + p0, p1 = (90, 80), (w-90, h-80) + line_points = sample_line(p0, p1, n-outn, noise) + outliers = np.random.rand(outn, 2) * (w, h) + points = np.vstack([line_points, outliers]) + + lines = [] + + for name in dist_func_names: + func = getattr(cv2, name) + vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01) + line = [float(vx), float(vy), float(cx), float(cy)] + lines.append(line) + + eps = 0.05 + + refVec = (np.float32(p1) - p0) / cv2.norm(np.float32(p1) - p0) + + for i in range(len(lines)): + self.assertLessEqual(cv2.norm(refVec - lines[i][0:2], cv2.NORM_L2), eps) \ No newline at end of file diff --git a/modules/python/test/test_gaussian_mix.py b/modules/python/test/test_gaussian_mix.py new file mode 100644 index 000000000..58802d4c1 --- /dev/null +++ b/modules/python/test/test_gaussian_mix.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python + +# Python 2/3 compatibility +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 + +if PY3: + xrange = range + +import numpy as np +from numpy import random +import cv2 + +def make_gaussians(cluster_n, img_size): + points = [] + ref_distrs = [] + for i in xrange(cluster_n): + mean = (0.1 + 0.8*random.rand(2)) * img_size + a = (random.rand(2, 2)-0.5)*img_size*0.1 + cov = np.dot(a.T, a) + img_size*0.05*np.eye(2) + n = 100 + random.randint(900) + pts = random.multivariate_normal(mean, cov, n) + points.append( pts ) + ref_distrs.append( (mean, cov) ) + points = np.float32( np.vstack(points) ) + return points, ref_distrs + +from tests_common import NewOpenCVTests + +class gaussian_mix_test(NewOpenCVTests): + + def test_gaussian_mix(self): + + np.random.seed(10) + cluster_n = 5 + img_size = 512 + + points, ref_distrs = make_gaussians(cluster_n, img_size) + + em = cv2.ml.EM_create() + em.setClustersNumber(cluster_n) + em.setCovarianceMatrixType(cv2.ml.EM_COV_MAT_GENERIC) + em.trainEM(points) + means = em.getMeans() + covs = em.getCovs() # Known bug: https://github.com/Itseez/opencv/pull/4232 + found_distrs = zip(means, covs) + + matches_count = 0 + + meanEps = 0.05 + covEps = 0.1 + + for i in range(cluster_n): + for j in range(cluster_n): + if (cv2.norm(means[i] - ref_distrs[j][0], cv2.NORM_L2) / cv2.norm(ref_distrs[j][0], cv2.NORM_L2) < meanEps and + cv2.norm(covs[i] - ref_distrs[j][1], cv2.NORM_L2) / cv2.norm(ref_distrs[j][1], cv2.NORM_L2) < covEps): + matches_count += 1 + + self.assertEqual(matches_count, cluster_n) \ No newline at end of file diff --git a/modules/python/test/test_houghcircles.py b/modules/python/test/test_houghcircles.py new file mode 100644 index 000000000..dc4284a41 --- /dev/null +++ b/modules/python/test/test_houghcircles.py @@ -0,0 +1,57 @@ +#!/usr/bin/python + +''' +This example illustrates how to use cv2.HoughCircles() function. +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import cv2 +import numpy as np +import sys + +from tests_common import NewOpenCVTests + +class houghcircles_test(NewOpenCVTests): + + def test_houghcircles(self): + + fn = "../../../samples/data/board.jpg" + + src = cv2.imread(fn, 1) + img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) + img = cv2.medianBlur(img, 5) + + circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)[0] + + testCircles = [[38, 181, 17.6], + [99.7, 166, 13.12], + [142.7, 160, 13.52], + [223.6, 110, 8.62], + [79.1, 206.7, 8.62], + [47.5, 351.6, 11.64], + [189.5, 354.4, 11.64], + [189.8, 298.9, 10.64], + [189.5, 252.4, 14.62], + [252.5, 393.4, 15.62], + [602.9, 467.5, 11.42], + [222, 210.4, 9.12], + [263.1, 216.7, 9.12], + [359.8, 222.6, 9.12], + [518.9, 120.9, 9.12], + [413.8, 113.4, 9.12], + [489, 127.2, 9.12], + [448.4, 121.3, 9.12], + [384.6, 128.9, 8.62]] + + eps = 7 + matches_counter = 0 + + for i in range(len(testCircles)): + for j in range(len(circles)): + if cv2.norm(testCircles[i] - circles[j], cv2.NORM_L2) < eps: + matches_counter += 1 + + self.assertGreater(float(matches_counter) / len(testCircles), .5) + self.assertLess(float(len(circles) - matches_counter) / len(circles), .7) \ No newline at end of file diff --git a/modules/python/test/test_houghlines.py b/modules/python/test/test_houghlines.py new file mode 100644 index 000000000..b77912979 --- /dev/null +++ b/modules/python/test/test_houghlines.py @@ -0,0 +1,65 @@ +#!/usr/bin/python + +''' +This example illustrates how to use Hough Transform to find lines +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import cv2 +import numpy as np +import sys +import math + +from tests_common import NewOpenCVTests + +def linesDiff(line1, line2): + + norm1 = cv2.norm(line1 - line2, cv2.NORM_L2) + line3 = line1[2:4] + line1[0:2] + norm2 = cv2.norm(line3 - line2, cv2.NORM_L2) + + return min(norm1, norm2) + +class houghlines_test(NewOpenCVTests): + + def test_houghlines(self): + + fn = "../../../samples/data/pic1.png" + + src = cv2.imread(fn) + dst = cv2.Canny(src, 50, 200) + + lines = cv2.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)[:,0,:] + + eps = 5 + testLines = [ + #rect1 + [ 232, 25, 43, 25], + [ 43, 129, 232, 129], + [ 43, 129, 43, 25], + [232, 129, 232, 25], + #rect2 + [251, 86, 314, 183], + [252, 86, 323, 40], + [315, 183, 386, 137], + [324, 40, 386, 136], + #triangle + [245, 205, 377, 205], + [244, 206, 305, 278], + [306, 279, 377, 205], + #rect3 + [153, 177, 196, 177], + [153, 277, 153, 179], + [153, 277, 196, 277], + [196, 177, 196, 277]] + + matches_counter = 0 + + for i in range(len(testLines)): + for j in range(len(lines)): + if linesDiff(testLines[i], lines[j]) < eps: + matches_counter += 1 + + self.assertGreater(float(matches_counter) / len(testLines), .7) \ No newline at end of file diff --git a/modules/python/test/test_squares.py b/modules/python/test/test_squares.py new file mode 100644 index 000000000..937b526b0 --- /dev/null +++ b/modules/python/test/test_squares.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python + +''' +Simple "Square Detector" program. + +Loads several images sequentially and tries to find squares in each image. +''' + +# Python 2/3 compatibility +import sys +PY3 = sys.version_info[0] == 3 + +if PY3: + xrange = range + +import numpy as np +import cv2 + + +def angle_cos(p0, p1, p2): + d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float') + return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) ) + +def find_squares(img): + img = cv2.GaussianBlur(img, (5, 5), 0) + squares = [] + for gray in cv2.split(img): + for thrs in xrange(0, 255, 26): + if thrs == 0: + bin = cv2.Canny(gray, 0, 50, apertureSize=5) + bin = cv2.dilate(bin, None) + else: + retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) + bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) + for cnt in contours: + cnt_len = cv2.arcLength(cnt, True) + cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True) + if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt): + cnt = cnt.reshape(-1, 2) + max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) + if max_cos < 0.1 and filterSquares(squares, cnt): + squares.append(cnt) + + return squares + +def intersectionRate(s1, s2): + area, intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2)) + return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2))) + +def filterSquares(squares, square): + + for i in range(len(squares)): + if intersectionRate(squares[i], square) > 0.95: + return False + + return True + +from tests_common import NewOpenCVTests + +class squares_test(NewOpenCVTests): + + def test_squares(self): + + img = cv2.imread('../../../samples/data/pic1.png') + squares = find_squares(img) + + testSquares = [ + [[43, 25], + [43, 129], + [232, 129], + [232, 25]], + + [[252, 87], + [324, 40], + [387, 137], + [315, 184]], + + [[154, 178], + [196, 180], + [198, 278], + [154, 278]], + + [[0, 0], + [400, 0], + [400, 300], + [0, 300]] + ] + + matches_counter = 0 + for i in range(len(squares)): + for j in range(len(testSquares)): + if intersectionRate(squares[i], testSquares[j]) > 0.9: + matches_counter += 1 + + self.assertGreater(matches_counter / len(testSquares), 0.9) + self.assertLess( (len(squares) - matches_counter) / len(squares), 0.2) \ No newline at end of file diff --git a/modules/python/test/test_texture_flow.py b/modules/python/test/test_texture_flow.py new file mode 100644 index 000000000..46d680a7f --- /dev/null +++ b/modules/python/test/test_texture_flow.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +''' +Texture flow direction estimation. + +Sample shows how cv2.cornerEigenValsAndVecs function can be used +to estimate image texture flow direction. +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 +import sys + +from tests_common import NewOpenCVTests + + +class texture_flow_test(NewOpenCVTests): + + def test_texture_flow(self): + + fn = '../../../samples/data/pic6.png' + img = cv2.imread(fn) + + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + h, w = img.shape[:2] + + eigen = cv2.cornerEigenValsAndVecs(gray, 15, 3) + eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2] + flow = eigen[:,:,2] + + vis = img.copy() + vis[:] = (192 + np.uint32(vis)) / 2 + d = 80 + points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2) + + textureVectors = [] + + for x, y in np.int32(points): + textureVectors.append(np.int32(flow[y, x]*d)) + + eps = 0.05 + + testTextureVectors = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], + [-38, 70], [-79, 3], [0, 0], [0, 0], [-39, 69], [-79, -1], + [0, 0], [0, 0], [0, -79], [17, -78], [-48, -63], [65, -46], + [-69, -39], [-48, -63], [-45, 66]] + + for i in range(len(textureVectors)): + self.assertLessEqual(cv2.norm(textureVectors[i] - testTextureVectors[i], cv2.NORM_L2), eps) \ No newline at end of file diff --git a/modules/python/test/tests_common.py b/modules/python/test/tests_common.py new file mode 100644 index 000000000..6ab26050b --- /dev/null +++ b/modules/python/test/tests_common.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python + +from __future__ import print_function + +import unittest +import sys +import hashlib +import os +import numpy as np +import cv2 + +# Python 3 moved urlopen to urllib.requests +try: + from urllib.request import urlopen +except ImportError: + from urllib import urlopen + +class NewOpenCVTests(unittest.TestCase): + + # path to local repository folder containing 'samples' folder + repoPath = None + # github repository url + repoUrl = 'https://raw.github.com/Itseez/opencv/master' + + def get_sample(self, filename, iscolor = cv2.IMREAD_COLOR): + if not filename in self.image_cache: + filedata = None + if NewOpenCVTests.repoPath is not None: + candidate = NewOpenCVTests.repoPath + '/' + filename + if os.path.isfile(candidate): + with open(candidate, 'rb') as f: + filedata = f.read() + if filedata is None: + filedata = urlopen(NewOpenCVTests.repoUrl + '/' + filename).read() + self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor) + return self.image_cache[filename] + + def setUp(self): + self.image_cache = {} + + def hashimg(self, im): + """ Compute a hash for an image, useful for image comparisons """ + return hashlib.md5(im.tostring()).digest() + + if sys.version_info[:2] == (2, 6): + def assertLess(self, a, b, msg=None): + if not a < b: + self.fail('%s not less than %s' % (repr(a), repr(b))) + + def assertLessEqual(self, a, b, msg=None): + if not a <= b: + self.fail('%s not less than or equal to %s' % (repr(a), repr(b))) + + def assertGreater(self, a, b, msg=None): + if not a > b: + self.fail('%s not greater than %s' % (repr(a), repr(b))) \ No newline at end of file diff --git a/samples/data/kate.jpg b/samples/data/kate.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40ab433bfde29689600f3d2a9308e0745f7fe988 GIT binary patch literal 40791 zcmb5VXIN8B*fqLS2!T*S?+_pqQ99B~LPvU6njnZET|k;ZLIA@9B2{V9s|X?nDHZ~P z6hY}tdha3~MErcO@4VMJf6u+=&&-~=*ZkP~-gB+B_TP!W^8oUOu7NH90)YVie;4rg z2cQK&A&~!RPzaO)3Z;NiQ^NiQEfqDKnw}Pkq^G5)V_;%uW?*Dvq@!oyW?|#t;Ns#! zGV}29aPqNpa&i7o5)hPv0!9I&p`@hYWT0o@{Qn()$p8Z!41tJ1Kmq`m0R&+H{p|pF z000aDApf@ezwxj84+};Kr}}qW3kiTA5C|ChZ{H9o2n>UO{;h@ZOG6p46y`7imjEUi zd@5zpn>IMJ=HQ;7#R7}$onJ9p4_MK{a%shu>FxiLF#mtD|34`IGs^J)V*c;w6hQNT z0T=)c;0%E0=H3t=nx~AK7UN3hX`FUpM?FhqV>*jK)eS(gbvt5RS&TF)+3l=fu~Vj1 z+ex`g^R27DOW;@7{eURCq(n9#kOG2|#wm(;o9;p($R#JI%V9Y*&LHq;yG^HgNi zL2u&Jz+U1_ivtmnDaidZ2le(gr5fPY?6`sNyz`^!3PR+d?*ZTKQHq9gXxoXcCzI=U zelw(&V`?(L!;50n)LoEVI#(NHq?ief$t%zSa!Jt}G%?pu=O|Bny8Q2#Go%+gLlvvQpw|Ido6sng_V1gQI_PXN=F7;(zS@YAqmT{A1~FJHRXikG zXIujOZd9BoMHqBkbpI4TPOqYEm~0BIx!3um8ha>aW@s=w+LE?W^N6M`6mArQlB@QEy%izrb?d_o0o?f~m0?vE=e(NTdqJ@_c&k zH0+7<^0cU&sdGdIW|lv88hT`W2=b5*MFKTeW9yxEzpH5F&?ZIEDLGB5lmMf$=Wqea=UBO2F{_)S6>tHz@9I^7!uJj`87?#+|-7ZL4Gxa_{|l^(<@qxfgkcZ!Bl`$4>$ zp?@X9{x6_^?bDoJ@yFe|ozP??H^)vKI+0yHzY?x|a;{%z7%=T%lq`n6`_t#FG7e+XG2-718OCT4>B;=LO z+KgqAyZp(b8Ar90>{f!!I5B%gz;(55zFLB91_}zIl|WY z1g3}?L{IB3@p!o~O7+X~>^hSfk=T@FAcErflb#Q;_0jXltr30 z89zu%;I2OEdze88ko&YBoVSQgHetPigRGENqYdox-56Ya^)~C(2Xu16D8TpG0N0g2 ztoJ*#k&-f&c@Mi!q-XfQ<$(THJ&NtMdaFlizl`|_A!qe-fFHFTdUHGvsqI)gQ~e87 zlu|B#pyrDoD{D#&nb#ka$`^#~iuv(CPpm2rHqXG%@>m9Xc4u9D2oRB$ZLc#pV9FPw z@4beSTmB0?*Bx-Dn<4*6xrrOl>HA*lqLea5qDhS208|WGi@luPG2PjJ0U?UyANbY3 zz@&6x*t;wOzo^RBq_x;FBkhQGn4G(8ma#K#NIMZxHk=Pa2dEYY#(u`;HG{f^m*0)q zs?=|#_L}*7z!YxI3sMI62#=j$$jb7 z9lyy!X^=Ac7m$fBsp;du=$7`NmSQWcL_*wJlJODbo10_&rF1hzko#i`IPyw>U5yz; za6;_0LmhIMTSCFyI%D!vDcjy8R%}ztfqPw|JC7pKvN~ALWN}eFQRO9D+Kr;zLKGq? zxA+HZrbvB-iZ?!k)xfhZt)~8j=j;&kqbG|y13NAM`3N)7&7Rlu)_I_NxN&;VIuO3Pe~#pfzhQ*yBS9NU;vJPXB}z& zCTG&wjEQGHuMRj=^$@a7(d`X`0qT{koc3t( zKN?lGLpGk}7a@#{Gsc~z>Ar3EA`pJj#dIgbkF5U47A*xUJnE;UY;s&GJKE%YNh?qp z=7A!m3~99dQWICsjNTamlBwbvjYonVm8%T`d)2=f~$H$(Cv?(KK>UPgZX5p`=m}P zQZv>e6`$Ol80)mSv|d?oOkUfJnUc@Fc*CK$-;s}$Zr%~p?4(ABwHJUXr+%j>+`e`WObsQ5Lqq+gMGX6`{j=S z^yT9ZlcYC+NzTIfP{~zKHE>FUEFQ%7-TmT zY3j_t)!5wS2Bpf^1yd&k07TxGs}xT%8K7P3MNsEk6#Cl{j^@HNX|d@F+>Z8ci>AfY zp={Dpej1a|wt^hRSU=%|bgwc7(zi&Y!#sh{@FiOv>T;dx2Sazq<)&$qAb_e{VtVj+ zm7+#UT+{#$b`GJA)6-@z=!ZaXi3h(0>3*J}3AC!3Z<<2*a=?%N0@i=p>&3ev{IEsm zuHr_nsiiqldOlIvIQR_UfTDYv^?mr*7ClWA6@5|B2mQYMY>BsfnwHGaQ6xkEJN^d6 zr;g+vI^T`H?^nv|b@c|XbYOSA@sa|@$GEqQAYp_v#!hT8FiO`(b15_n!*V+}yW^VQ z!zJ#hM6F#co+tUPTctKRn@tG(QaM1tS6u3CQ|Izy6WZa~hjrY?LT;5?2F>yb7#2W5 z26Pnnth$r%RAF`Fh*yBy03oDDgk}@K-AoqjYx>B5zd#v_vbUY{c_jfK0P1+w;r_r} zettS__yKJ%;Ew)!%G2&F)R7ti|A3MvHC->f`6qLi|D8+(^EGb3=XFJ&xBlkt;xxhiu-lhJb+EYB2YCD4eEWy(| zp)trvZTDV2QmwFi<{r(AUiLis4Tcv&1+(_u7V0?8plC8d`3qa*Ey$9)sN zbCTjJ3Z&^}Ot{Y68MOv>U@A2Y_o=NyxQt`A)U<92dZ@dm;gxrsHj+XdtZ&41THh#J z?sYO_mp#-Sj<$O11s#L;B7_BGN-zrBFPD3-lNz(jJ#z~g1iSN0w>?R z>(BCE^Qmj#`#fkdt^Vb+hsGYXV2Y=!xE$UCp_SPQ!%#>}WUtqWP9I%QKu;E?;`Z?5 zm65J1-&pe^$`~~EkWhe`?y2@|I{I8{*xVrG)gFk|pprbKIgDXrXeiBxQ`?qN}DS-S)XmaxGA4sG*5zr zT(dr669==QDFH(y}qNhx!WBu)1ktu9?&xqWrS(RMW!?^Dx5ccOd!BP8bLKL)NJ zaf+i@AAgQ*rw<+a@Q!n9PbIFKl8WcKQ$@9p@)HlQET3z^^XDNB{nWX!T-m*}j~e-A zYzvz;sS=U|Jr0GD_E3nZ~ghOPO=r8f=E?tcU}pgNKh>>gcu- zp7sDKCVoM8{9Q-&Q&DSTBHJ%_F&dZ?>czR?g`(aQ_`k_X}2f)EHAYPFMoYp6^nS3X3!W`{|>|W;DIF%vAd(J zvDZ~~0Yp@69kNQuj}W7k^$=j7cS1tq5jsQD3V@!s2-lsidP;Um&){nfK}a{p>tk%h*z5uW ztlR9;3?xs#4HhU`%67mPBq5)00%u;-hjNGWLvy=YTLfq(qW0o{m<-6Cp<-V&6BMwA z(%&y42V=AXn{F`rL?of=P+GBk7Kt2a1O>);|K7f~fQ0K-1~SVpt1X+=EPqa}w}PEj zkzEvv2a&&vA{3?#lH#1RL30wm&+hhTQ%>G&@^+FCjD^jQCC-QsN|W#zcZYz;o-6L{ z(AzCQ)K>0X{sw)oS3Z|(4{YnYrUStGeH6TzeYj0$WBZ~5}hH^EV`?bhL zYzB1%ITvuP?>k{f0_p{mYPL zJ8O63#k!Oxa!&_}!7gg#@p*A@R7W^`1tfJ=XlPm?Mi%Oj*F!TFabaeX1OjkW`<=fs zm^|nn0~oe+H%tP>vt${@ZA6ZQ$_;0T4>1EaBe(N6frFvKe3~rSZ^lcLf@uZ?Dh#jg9PF1K3rIyZrvX-+#seYC2R}kTGxlGO+S?S z_wh;4Cfr<=_9=r$o~|Yg>pMw^5Jb(TMgOXYLvkS6*}8DvqIUI4PTiRy+3);LTVLNs zTTv3BKF2bTp#;kMVuPRpmD5NY-nvVO)BUuArgw z+Gh{xb)@IUcbAtOW*;nMRRjVy8*K(XJ7s@49H*$Jh=jE0J}w=9ZC;)jCFN_U*z)s| zc5gSg_P&_HV)WHYZ;~0Y>7+F*meN9<6FnmyETDMw{)){(pE28&D%G5 zT**1JIj18Va|Ulzim^Ox+k_(0w!Ej1RM@9Ge(TLIZevG0r-p7me81w~bd~RqSl!!3 z=>r#OQN_(1vw1_1`WA&QYnA|;>Lz{pPQyy9|7xv@tv>i+26w1X`BvTO{U+aAd z*LWJPD1yAS0qO1cnQG3jrX=ap7N#T!Y4qt^3dRg*<3DNa1C$wk5As2Le*F-Nwz~Yb zTQRbC?!wV<_O;9h`fT9ASk|v?`44KLX;ITvCI_4593L{k=!5{(y^2a!+f<2`7_~Eu zfZlk5<{IrlWI6!Ny*(;iD4y$d4Ig8?1o>qB9O9C!HgJ8MT}Mi9HzprQK|*-b+jO^L zu4zAP2L(!@pisBO1sZOMbK;nQ9`$1d+0Fs8H$Y&oa|$5BZ|2DW9wqlXJPgdjr?F<|#Dj?~`w9MSf+9t(k9Fpj!fLl*sdD>Zw9Yjd$!@EYn-EWAY*zXMHUF5-SF}&|Y9*WS z4Bz2K#H`~?M50d87c+-n#8tDVT^Lo8qj?ZXFDPvtDLV-bK)o2hwW)|WJ~T$BxbkjY z*8;#2zGf07wLZdo^&fT>a=F{@>iLSSLjDyo@>5eckQq72bkZ%5&?6%wRbWnb0(tJf zRqIHRpxng!1ruQg?`-bzbWHaw4PHO#%AFA?0t+l&i$sbq+tHTb-j1qH6x<$_FOtA< z`pv~YM;ejfe+DE2nvnzk7<0=UI1^wX+e=jJe#QZ`#kuPPNKnsiELcgvgz9${w5b~} zh5PZ~;EtM-*yV8k=JgV11{wD@gxyYQ3gV3$s{prSfN1xAqG9o-9Ei2RX!5248c#~p zi9x{NQ1C*oP6pq#Fw@dXDM_R+%d@2XX=VyCc;fAoR1!x+E>E)|9TJqT_PeCq&~5;( zngypp(#_uq?3I4+@6WK#LuHK4%_i(nC>6e=&*Dx|$$+9CEt~EVQ>Je+NwZ@?UpPbC z6+OK56#t~?;#=%cokg11w81-yW{O!T2H^`ZZqXnHXvSz0U|Kc2ZBWCz9pS|a9w`0Y znCf}Migi4T7eKS9ve#r%QacN$BqIFGUN^+M-J}?6untj?hqIUu84ooY$5ilcDzrc} z?z(rvn?EtQ6`~|TH`$V{eF=9Jv5(HhMDUqZ_3voX({MjO8b-@6i&&sDkOToQ`0gZhyjV#F6gt=XmWU&nd*Tb}RU* z)sT&sm&LvID)aXau5<|O^O+eSYq~XnN;UsnFvkx zSJ!G=hmJU@zg0M;=iFQzS4`sVR!&^C2I~x0%RT00LoE%GZiI$1()imZULR;Sc~KKgjqfL1&PP{1#2$WnYJY>IO!%CTNYP^%wQtZE zyyiqd8m(bmBjK|DSdNf7QG0k5c0JdLcAoHOz>iU~r{3AGb~#5RlU4NaO zO`+!=v0z3>KP!w^LK3mZf0&ou*mny>d|Jsk2Q)zu!znx{+Sl2}mUb6rTpzKAg@2Km34F(J_OUo*P{`C+hnv z5%%2aq9FU^E>VU?pEVgx$`J5=)&aM{e7k)C?-DVCDj?xamm8X=TrFK(F&*LqDKPWwg#oH|_ybASq@z=S%7w`O<(V_WB z?h$DrDmEgT*f2lkGky9#>}s@tL8x6+!G3rA7%|3Oyd4HO}ax614XTPc#o|GSq@dsA!B~vb`3dXj^M!Zr6?Mg z_IH@bMAw_82Nyz2@{%$TwINN7yHzzY8mQ9FGvgwiQJOX^Tsl$Q&XGU*MSD*M>2pZd z9~JSbaJrXvXv=hPv=vvld*&!Ft(ZWWCVvfWmSgK@Udpl13MXUT3vnKW?P{|o7@<2a zvP_|!m-h7{kB%6pnDrA%ManT(=hQxy!?oB)^d%dd@@7?KK5P~cA9ty_D|i+CW#VigI`ky^O#I6Oa@x$cgMpKLUm+4o{ zD~HD4ENbEBWBHOUMfPUj7J5~C9+C3;lW~jJ7TS2@g6Dg#9%}shc^X(T7Oq}>YoT$c z+Mukz%q$(C&)}VDzFoliQQVldwv0y$A!ZrimGc0ufPJx@cw!#gFkmFo{64Pe%Fbg5 zT;)41{P&^q&p1mLuX*qWUVU29W$?{z<@aQ*yrt$sF8boL&svoabKZ;5Z^(RHYFE3H z|0y|Z@bL$^pLCCU>3$lK)4gtrfPK*IZKl?%F$nixe*x8+JC&4&E3nj5b~7$JOkS_a znRm}m_$pV_L& z{2FBvAv%n5J}(^+_e#eNRM&*2e7*X z&l{rBC@?CO3a67Fi0BhiP1Q zCufncm=Zw$`;^68E_PK@K0C$rcYCZHmD)ZYc94zv#oqTV%qS}Cj|)1NDQafGY6^~u zT;_-5nn;ToKxE3kINPmI7~RBB4#F;&DD+g*6oo%$6fX|$Bo?a8yq&xn_O+tnRmqDc zLf>7)oGkAiPGR;l&w$boNAy}s5jr7DphsucPgG>2uznGg-sP4JaPT^uSUJ&5Co^86 znV4drh4n_{4krso6`jQ}d1~Ls4(*mRqy#iGQxHf=NwMB?MzV)`+8OChKOv-&9+t=q zKG9uD^}IFhKqV7Z^;)(Ak>ql>Qcp&&90|D~`znEZwC7llR=DCNp$BYK5~_G-m3APE zD`CcCiHbhx?ierr`T#ZwadSodIKN{_3P*F(ir{TT-|}C+$zMQ}?qUS{MV`kiyR+Gb zTLI}WDU#`Xh}m^MtB zQ+-fV=Y(bSQ7q})N?2VEV7%^Wn5Iz0<(MA zCLjTR;x8_&eaF9=mdoIiM3KzoPA=*>mz7!VINFX2dCB%}{~54IJv zrJRoBFBV6nk9=$^cbhVPZnJs4-!A?DF0I%jbgg8!+B+piNu55M~scKo|+ zJR}Q;^K4vHKSrdKt$=E7UBOL&0(oS$cZ|(?jU~e5c`MmW;v9|L&4=YaeiKZRbiGiy zxT>aG66|>I@~dg@{G+ux@L!;1aQ-$Z{yy;^yL~%Q=Pg55vqswacXAZf@cR13iFADR zt&mRg;WnY>)ON4sUFyhYP<(RMqWqG-;9r2%rQ+i1gfeC9#(LKGHIHc7e*)|Ni0_(2 z=kVdR!ofF%7~uCqhZW&)DEp*tIazeb%avmake}3!R;S*%Jg3^@6T7L2Ugu;a z zrBIxmzou?^3KW(d=NEayd01HQZuUZRedUw5Wun?imKTcAKee0F^%YHXbVd{ajea+eRc()^mGasKK#x1aChpCan1I7 zlbouGcQ@^Zops3Asmf8P-ZF_G*dJxfQhJ8nD9 z<+ZG~!z&VDJs&941)r3DpAlbXpOI&BV_1fRqk7y@bUMz(0?87rTzaZy&tqu+0&dJ} z?BAvH$`qgN3mmgrZGbs_+6^9MKNLVR?M2+ZmM!n|;zt~JfHV_jZ2d;6KlEu3coCR( z(>2pb#rZmGmOxJ$mg?D>l(lv^7;%QvWbLE}DQl!;^6cjfzbR}sWM~2Pf}9wmJ=+ah zrBSAAy%J1Cb0jd^eL@LOxi^I5faa?Wpi^W~G~j3J0_^D2W9>5($u2DM2BUG?MID|# zM)kA5`b)7$tc>$-3Zx64oSdF~uax}s&Hj`(^`RV$ing-Q&ygl(PCu`j`bJ#k;Qz7; zPbp0kda*WRm8KtL(2a3LW+#hOCW^xW3K6|ZB?sh$`97X#PA2^6${-kQJQN>?)Jn2tO z=Bk}}^E|_$)7hzSZ%`1a54e1eq%4yIA(w(-_6YsTWB6* z##zSPf-sx3AWB$YHK!1*(ZsPZ>rhj)GPR%Co=$EUk4Eu$9u<|g8oqK(d<-JT9ob)6 zd{8*8Sl``jB{&KB@C(Q1*&a{KN4xQ5Wll|<+gKsRYhr03FP>YI!&)ceDv1H=TEYv^E4T)rv+3vIn?E4;)Gq0S$U;GJD(|EzWr zE|_9OuNB1HL9@WR(EX+ttnQISXb1mjYrQfM*6JpN>)h!@l&w>#(?)z#3LgYOA&DeyuWm4}3qX$jn0==S$ENsn*ljfeRtcNBO zfc!Ccpf(zAQ&!&{Y84(nZFsFZkL8Gt_i1#1LJ`f-4w?GwX zv>#*~h&PennEEpWZe}dd*7ac0`s>`ItzO34!!Gv2WY1A z_MQw$_)+<)JAJ-b0h&7k)&5xIVY3LE*8R0U?e_Up|1>9x%ds1gGu?o|w4wt06;sui zfG}1}BbPSEswO4l8Ze;kbeHLK>C{DtfxkeXGp;uwSe{dp!t4rI%I~f2%+ciph#Cy zF^3<-^sNkvLCWC&8rh{J^!%S~Fp=g9?O#X>3ZK+7K3L5)SQ6t<_h73Nd0Je&s71Ip zTn`G!LO-X!u(E`Q5|w7eEhlNRTvj z^b@(XvoX+TDO7ll+)7*`jw|qx8uh)=8^rsjx1NRrEVzqPe2$S1DMqP20raXAu+YZ7 zLBB`0*jxJ+=bRr)8SLl&$fS{Dq|XGA@X@BvTk2hAZ4zL=CXs+0JoTv+`n~^@@HM2l zzk)x3QH_NJp0`J7gEe#zA?gk~-ACGE6WWL+A_fCCq0FJO86>4_h@_QB$antkI@?S- zfKDMTv2-}!K!Fo?7>e3S2&KpKpn~1@mdA{qGlblrlw+i>_?q9g zolw&2Dg-}U%V4qU*P0M0zavO0Q|wXSz_kOxJ!TyRI>O6EY#47Qbh+%B-%C((52>S- z9nNRBLKx`ny|zd_vLF(mkQh1kK%k`5`EvdG`d?s|6v;La9nV~8TV?X(LCD9x3ZR43 zBv~Nm`p4|!*S)VdIbIJc_C3OM>0&jm{Ep@G33awEyEWg5fnZ47lGvel2!u|u-e8ah z)uvX}*PJLpeFAGphNR|Dim3}DAyAxZr>Vt|DF?|F;4KUVG^KEvFR523u9{Dhk-?XTY=ft5$Or_V3$j#b|}H~svF zgBb|CT{$A#rz=zv%jbH(@9_LhrZbEnTw-;cuHmT!dDzAO1QXpzoCvVZOzIbIj4a_W9zyjNK*5i3P1&nWYnW=kP{jd4I*6!%_!K1*Bd00&3gaq6K)0i}Wo}#} zw(``PtLf~XsLHbvlkwEsCObo_U!;S3Jnw%h6aJJkX_I$nsOd6yZ_Z3A<(7GDVt@Ov zZZ*lbc|VckFW`yY;N{&k8Q+-vIK=s|t*^Enw4Z)=?UH4zQD+pEU-F?P)u+AdjpLze zTDQEgHfp*Z18@oWr_m&#%F$Hq{v_k>dU(&g@rVH3wQPX6#=Dt=?&DA`lqZ5&Qo)0> z=GTiS@a+O9zP&=Ei7t!w@1%bv2HaKuDm}Uw4&6gl3;6m+uyAqHES_omivLNu_KrT= zA^f*MPgawazET34C^VP0Nc0zYqm+^AsKC^%ks}w8LH(rZBGZj2Y(>_MD@D_ih?j94 z1b3$>HlCXC8HVVn`xGx{P#kzJb^}i)<##iD2;d3bwVjh>#!YtFSnxf+jMWWE**^AI z6aOEn57N!aG5$)iRH2~BjK_-4u#LVOKS@}5zMO9d!an&(QBnk^<1UDMc};Xq3I^(l zed`&7m;@)*@cg`DqQJ(mnZKUbcGTYYn+}!1JP&GaRYj{ZsVv&pH@U|bbKuzllEog1u@b*HQmKakc^ULW8ccqAP(BS_Bj0%QP-9Y zNUS(U%MJcW8$kKfsnsI zGq_ve7h&!|Q|rNypI7X?erKPySjw@8xqoH?UpVE(pR^tnSYPUarc2Odv245Lkq@?f^M!o|^j0h51o*2lRd?Vhg#`hc5$2c#a#JeJ-6jf<%?@+g=P0npf!C?)}*L1OO{Fio#F zli^((L?`DN-3xScf>%w}K14FT@K>{WqHeHT zzb_(1vXmO!#2@i2cMkkE1hHF9m(tQaa%2#9g{zc4_~d^f zw)ynZN~z)Is%3OLN_n?Vyugen-zAd1Xyp3gMtl>KOJ3pXMjM+Q2gBX*@aM$yXhZL< zV5`c!vphDa2Ait5N#bVfOLH&w#UaDy!(GBadGF`vxnmdi_u^<(s%^J8ZKULlMj0t* z{I6>y(1Raq%pZkYKV;u`((@-wDO_oiWajH=#2efFJIe-Gl)NLck_)2PJlE&`yX4(+ zY3=pOmWU#cPwZ|J22QSKejeORs_C6EH2;7a7E4U=#hjd(vDnp=x3957G7nmpUjJgz zBg=g^T{rfvc&Xm59sm0FFG;ggJF&ReQ+uj)!dpwN9loi9L7XpaZGW*AP3=}x1<<&h z5f@%@M^&muRC#^yI_)3lv#qoq+00qnr3n*w!GBJ%`HB95W3S+GQ0?X?*-cVIQqf=F zL7WVPeS?vEX-bd#wQ|{#f`_v1tNYhST3D1>9Olj+roL}rF+EH(#&!0!FMbeQ&|rzW zGPK!po7&@u$K<_^$o;IHP}g?}(}o6&+@{NfpHVMPpD&ow1YN!;EtNZ`RoFKf`IM;$ zj*}<3(WI@x@M*Tr>wF0>xH?EWPA{x-`~vPut?mSVjSA%3fyRGBvz$F$x=nq$)E@jY z`n00-$Rw|~#3fjBaU5`?f)Y7Eo_mx_ls0=6W)(Iy_Zv2z#?O=usPjlYv;g&ey*SS zkcG#zr*s;E#$?iWk#goy#7hGA2+xRHx z)E|{=Y;(7RcKp!ERVR6Hr1}H$_|}eNb=!IIb(WwXCkhXbdC3Xb^^6>GHmahxOs=9= zCW~F!?BwSUX3gk7H)MD;p#oyZNbx^@h&?T$pxD6+yB?WO?y6aNN(i4?GE4#|Bwp(- z;Liwa%)OQ;zNo}S-Fd>0U4Sn9f`4xl#mrs z`T&7r{%hH6O@9U%wZWL(xG&h&dg_25JVod%B}HwA(2r@nr?TW&WYeAncVx!`Qyx4; zSA0h3J34hE77_8T2U_6P$n}td>1TAtv4I5BlnN?YrFi1^4Z~OR{ZZ$O;ere(2O90S3gUo9ISjGvtpjxty0vbEgJPa#kRY4(0hCkN{8u!y4)Z@6Niv{ z)vB~&!OMV0ljBq-D!BZaCJf!-2RaE0Zg;(M>cHgrEv~F~2LfM#6yCDA+&FG62Kj;f ziKb_}NSLug3;@tnXU16gW8=4%F0uiWiX_Ae2*1)=kwxEQ)#r-~ zbO{oFpZIw6Rw7=d-scee0Nn!<4@fJ)K5HIwn?2JGT*;l#tV~y+d>RBDMDuNKC*Agd z-WL04kRl->Q|sVh=M6LH78RS9%T;;95M&{rJ2f9$h<2A$?|BELD}Bg-p&2QN2sL}@ zuI(J71jRo#k%kwIJCl?wRh5rc?|9onDx=o2K8}mj)R0nVw;n6V;BxoN=|C9i;0L%o zch^b8liI?NK^vnz+z&5)g{t-P8?R8sNw@0oB>{}D@;gDobtj9AdS8Rc-bm?L_ep!6 zraLZ&%z&MI+pNMhJ*LlpfQ_RU0dq&n^9gsc*C-QFzh+Dxcg-rh zm)y2Ke;FR$U^snl96TRo?4Z(0_sFH8C0s68BGT|L@Y|NK5%@It#F)2wAc%Bd*3{N$ ziu0a#)r~A(Tlx*bR)rwgCG-SkQo?dusHV{T8?@Iv*1bH}3UR%NI3OOEDTBw8=x z^lh6;MB?Zr97z*EJ5Oi@!jpMAdjpgE76y!2Mw6O^eeUyTV7pRs=f6 z^(?e&%i@LYf*oyidL1+BpVAUvkZio-ZUjy=IsblbxAEh8 zj=Oc)Wr@Rp7CwVFZs3vTW_URBrnO`Q2HG?kE~2yfWYm(5Zst9rE&J9X-;)ZatAmda zwUK(PN~iJ}yyfGDW@9PCle~F`+9lN{H_gKQo`x50oU7^F%W?$?pQ**1+W*qKJ#w98 z>&UKVB)?3z#L*quf7Kv9p>riFHh7NCY_pYli<9R@y5Vut#QTK9%n`3ku0zgG+@nF4 zWhMpYB_lbjn-JffuEs_R%C&#wocuT8g3$V;`Z$|MB4-d;a%t)G!Y{>P|x>o-4!QH|B)HsdnI4wCf-#Yg#GkF?+-Oe%E zH1hT#Uv-W(JJp%v9r`Ax@27mHpy7YEk8r|;;bM))mb##i2=%q$!I1VLsmUL1j?<

pVI?Dat>+Vr_W$29nts#Tvz_QfM)9KfL6Hy`x=KmHF`g0gL&#(0!bEup#!EX3Jbv}S>sMeq-JZyz9#NUY^Dd8i z)wd!6;q;5Xi=;@lZMM&#RK&dGQe#_d_i1k7X?~nyMNQ!jPY9jU=jPVGfGuA=efq|e z9qmDGN4mVT$9J*}=$l$7WB7FH)U_xlYG)Qc8U`I59`QVe=)Mh9OOmK6oAP4K{$XAA z>1+PR?!=d7Lf@ZKMP3~uc2;>TcfrPW)#2y0e_F+u?Sg{OxJ-tQE(GZZ)C5<)@Y5S^ z?&|wYCmqUu^BnSo4=MF=)&liMCnda74F6l7gAIX6-$B6gOYPG)-_;Sh3#@2oNJyP# z(GcI{=%7Yrqwr!6K?Uw!n6rasV+7Jln3L{W<*aaYJrx%&7u(W|ckvsf!Mkb%Tb?AL z-g-Mpr8g0>uOxTA_Wi21db&D+*}i%2$jXRoO$<&5({x1AUk)&R8^ZARxOb!;YW<=T z@tct-`8f$Fk*CNsEH8dS@vgg1cBkPVE)!MqHV(N9Wj2H#_&;+1R7^+so1_ypD;`QD zq9_%+6C)YFnJlSSTpVhzas2xe3QFdR>>ogMAbHT5x7fN~B{R`UT#IBR#u)vH0V*RL z{4{&5F+^8ay3fz22jQXB$P^Gh`&*)ciR8ySOUo`D=r36{o+7K{mGj2tW0_29R+*!&Hnyaj`>;SO-3oopIh)eP`N8>J)j^x)3r;Kr{Xg1 z<3;Lo8QC2iS$>Ea&{edo`^QPh_R3GBQM^Yo^ODxr{Nn>I?cCmY>zg@fW9rmgIaWvMjFDS9&DaYdy&?LF@9?rd_!@b zJ~%Q4A&(<63$GY#6Len;TG4GZOwx9+W@eY}dLmLkKKKmzhWZ{vRetJoYFGKpGQvrB zbzf26N8_=rYrQzt$VKus4gRx8WF+mWouvYND=fgbT;i zaE2F(T_}0bv2ovhlsMG#!X)o!f=-75-J#l$s=?@y8umwlm3qcfP}^ZHj`05U^{>~J zM<4BjIn9DN>k#*?T}#zkccc&KANjXjzs+h@Q{`J+Cb=3M%Q{kJy|S=G^z7gwv@ZnZ zaGr10_y{W!h}^>Wm92dXs;kP!8b-w4E%_(#3`9g5t~-hRdcAP`_DiH`XNx$CL|jSb z4qg5Wmy1rlhV6Tw{k>VAyD9dOo!-yCCjXNyL)Y48S$^Hq`SmCAE~NafjrpeB6Vuq^ zg3LxDl;Q04^G95@o7aXn&)IGa+l+07f9YlR76M^^HaU~wNu32(&t&; zQP%h7#Y1xU;cb;aWm{#;9{njQS$s_~Ji77ka}n-C8};0_QXs!RYtGP>oPyzKyjp)) zKw+~1YDrFH2^V)2NU3!4B*aiL@g~kGe<{!!A?*%rt~Xj5clGtZkE?6>=S$fui%K6k z`Zf$RTOZAB$xmte@a7XvHYcKBJdT@=)W4Y|+mY;tjiY&ss zsz~r{HvJJ#jnvVG{x_*fsN*NJx+A6I3MnKjMmER2)w=Ov*^qv2i4F;*yBvp^QYI(& zI1yEv|3dc(W52lka3jcvDEbwP9jo>XHVM?4T8qzyePPNj5A%d&KX>SM*d?>zkp3g# zH&=>Ea4u}WXOI31jDEYV zqbO0abeU>edum#9*t^YgWlQdQ<+J<{Dg0UCMR1{izFxkc8{S|lygXBDwS2LQVfz12 zbe`dCzHJy!A|!TDdj&D7s8xF;c8uDsS~XjW*sF@fC`yf>rS_gx)s#r7QEF7x-g_0T zy;{Zp^?rQ5J8k$GxceHb_0r_yU|!b5FXXr z|MJyy-mNZiBRIVaL;Kmy0U}$0R=UvU6qyf9#)rKhU4xGl4Af&(RLIPFwh%K=IP?$} zvH7K#LYRo2XNgjMCz5UA5C)f5*hvJcAQ*#l`Z{!M1)4m{ninIrXy~4|i-;W@o_*G?l+d}qCJCg2m%gTCtD-Agpb<-XfI!o2~kQK zJBx5TE{JDnI9MC)W}0BN)?5Lv(I9Ts7d8V+x@=mur~!qub+M#$7Q#7P8{HCCYV+Mc z(x7|{`#A9^<3nm_)}9E&7i0^*Yc}UHhk|S~e5y~e_~4(IXiN^Oc8(rceiR^y9d`25 zA2uU>6b`$qgc1{B6QsB68!GV0Cgy7T}Z zXLpt6Q!+vmL_`YYCiS@SNhBbQ&J+vfX%?YmbNlRv^>G?ZiZ^(cLvt3dbJh+MXZ!sM z&3`1qvUj254O$XZTri;+4vor1^U5mZ2r8lBsiD8IyjN2U-5C~!YmC~zi)mKEZ!b2u z%J@r%7-VZpoc|9R4dz8Vh@suy!$9zSL8Or_6xGn^~Uo{FboD%mpqD|*ClZ%JH#VjoTw0a z_lXikR-SRZQWvjp)>Lyqb2@*$>CagRL1pCi%v)*pX$_0e7E32ZxKfFMS-f2M9#SP> zu{^FQN6})92B%&Ttyq`7Ob$LXkcD)SX%ipWv=b{|ne?RjwTdv~oY_NoQ07@0v?Bm7 zoUU}Qp0I#Js2Q(q^SoUSzCTZli-*oQyoiA9QkJSb4y;7#Uziqej6SpLgBz^3UL8Zb5It7;Dm{&$Q&?pq&>P-hN^9`Q%KaH z6L_enSeH3+UYAy0w3ed180ZD&(b(qJPCaAjgoFf@xT!ns5|TG!f4&i&6(uwY2G zd`?Fv#BdZiy)9#^W7JMnvh1YDaDb;4-K&~JHc7}-HU+XKLv7PzUvrP;eK0cZ!A2i^ z>3)hTI(Te1lJJ6C*W}yAI9G$ap{4t>--2@G%!2B{9UZ%O+i#-O{{uMM+5txX1H4W3 zlf_nY8pdO`>TVvXMC9=Sj2+G|+zWH26dfm*;IF+065=h3}UPr1N`A-L&L#ZX=Pa zwktuUIvZC#bT=}*5w=Msz^^{nXaPujMs*zg@$dtwO`1!_MZqC%76RPxB4;~}*63(!A$E8$NrUD7>_JJ}#t;HC?60{9tVyKlS49oJnT~G`f za$wkCCxKg_@@$C+3)%zV1k`E>%GtNO7wHU*SeX-YR zs>JN*X|f~$IcW7`$6YY@z-zl8`oy&q41y) z-EZ6@PZW!}D3iaM8-Dzdlh0zLS@_5+6xIF9BPU_6f4N#Y0N?a0A48nvftlcwcpHHJ z0ggbhuM3+Nt`XWKYM9tVNIVvTNJL>nT|WAETw=MT)p-o9cfWAG zb@pLaOCrMDbHE1_s4U6efod|kr^fT^q30l-#(F#X^63mF^vss|w#*Y*&YY$>36FC1 z)%P7;hW@8ly^3#doj$30WuEsnC|7CHe&16Lz>!detpjF@Q^^RsJ1-2?pw`u z6k6*Lw|Q}NKz}1K%@vx#YBZ8dQ`_$4=dVw}C#oM0$kAU>Izbs!JmFcT^`KN6Y7Umj z`IG*RTfU<;_?{t^6335%M>lxW?3|j%19vNI{{x%^20lCs)pw*#>2qbKUaE#DJ$tA0o`tZA+D5f(zn;!Yy_Q*W zfC(}!LbCSV&gun$gP}aU=`z3O&GUiR7!qvSFIdLe?1G8%3OhJ`6ULE##2NfM?SN*k z>qz9y->z#_$Q#&2r*nfYo7bbXrC?i&wqxyRp3HVh_xkpUmcWY7_0vvVB$%*q>Gf2i zFw!&;jyKnE*9N%!Unr?f#ssE5t_vOK(gY^yKwmd@V+T= zP)gFXvV#?TvYi6 zG1@k(Q~d6Xt)Ak)e*{~MZ~;U%Qm&*ad)=k(uijQ zBu_Lr!zszWwQGQ3*Vd|@e0S}n}yPF z{3;V4EZSBvAd^_)48(@YCsVP0^7q|n#=^wAuCpn@Rvsi5D{)Mp#KzUP#BodhKlw0U z>kqdT$cA0`w@z|-eq9+TvZIi8QiaA1EbEe1ycV!vvxp8;6>^9RlhAhkJF3_Y;TWdG zBKiB$RbU;yw=(z`DANxh(ghy{OQNO^CDlrId}c#p^l1`^C-B=b_uT z_Ki)-E|TMS-Mp+l)qT}TbnnWx-Xup)MDrMT6x@8M6Tx#LV;}E)rAJW7Q%u$re%f53b)5jD&2ojW}g!|<}S?IzhU>boKra_tkBDV zzv8FEtz@OPjm7=rGw%}>2ML|5srF%<3wf+CDHbRhrONxwa9NTa27X91^XKD-Pmym}A5 zJ62UxhQRDM#nnel{Oozh3f8NzcPGu=$tIiTgII_xKNeUuHfP$_PXYseuDQ>RF6i%C zY~LA3HSwHx?ipsXOEzv8m`qRTcMlj?F>%jdadG9_O@JAs?lm>oBro|3zwe|xpj)PB z{}Hh9DZo&-VZak}`_!m>r|^e-48>2cda0gb?p5DtPc1=`gm!;A!Cs#hJ_AU#XlD5r zllmV(yyKW8y(gAH*E;r>A|5ijNqui%Q<0{kr+ zp%(;KBHiIvXk*_NyCRv0Up(rv#T(e5e{cvJ10qO2P!GYWJI=W=8WuiMPNT&Gvp?iQ z3n)-s-G=vd`K5)@sqO0*I{DmY+Ke`-au!;dFt|g>o;H_M+84!zYU;23yk-+%c#8C= z1390a7z7VIdQ7yi6DQ~?B7cH@rG-ruHnHVR@*&bgA->Y4&BI}@auGT0=}f*EH_mqX z|913S#%ki7Eb#*006hUsM37886jYWY^rg+EVO6d#n`+*oaRsfow~Zy@`gU=Tf~|Y9 zZWGMDz*1+C2XK3!e*1oz$9QUAdDV)(z z!7zP*ujA)>mV)W-m#_o%u!+xmv7nF#8(W!%c(d+(ERO0-hneE+{zvd!X(gvfBfUcu zE}Oxxa;5il=c|4WvdIMqji2#8hE5G}*#AnR4`>MH+2rd`cSRzZG8{kT!!|a>(>c@V zWz##$6Fh-_@lXKI8LC7-!U?>tFYG4$JUv#)CE4(n_hp0jE-v#~U6ap8_tLS1hMs%7 zYFp>Qnz&QcwCpakJI0sRpMUGk)9Yqo0Bp;9~EwuPqPA5 zZa_qg#xh}nDeQkDUdJoxRG@7Qo=6g=p9N2nAn_{XRpC_D_n#aG%o#C$p>t3Piak?( z6ek-++B4r&vWr-@Pq&9A#_$$!d-T{DDt63>TdICtU%lB1>28g*l5^Ws>5jW*e!8CC zzJI~L#f|FvWcW3Dx`U}-@7>`{jsp2VMCSZmOjEKx{tVd z=u(?JSiHaVA7D=0?c^q;X0s!@s#J9WM5kf4a4*=yo#M5(yXT|pe9aq=c5!L!FBOXH z&OcxNY-~qf%7!NCjajSnDsxtjT zAOg?fe|;y7iaqg5XxN)hjz|}-y}O~VX!h0lU`#>PbH%bA_Zjd?DD@9QpurlnU_MAK zdu)r;Ds1`6(+$kvg#5o9#_EX)z3ZDvSz;1r*>K@Tq*QYu&ivW;PggGp);|)pH`2e( zq_}~VFHSTMY-yi*;wP0L3~Z>LC!81W?#(NLZ9eO+-y&?*d>8)!j_{}peBS(#V}o77 zUr%a_`qj`_E0cq$8Ten}&#KRukw}Y2MJ)eH>u0*?dUOBcdUFa@zjjRppJ*AqJaBn~ zX|S7kSc;qf~nbbflxe@)_`CoH%C!P!SNzDb1=@Tgg`o?R?XDzxeP=6LHH|RZji>FETgY{hE`u!%HbxBH5zc4`hYmgH6Zw!jtj8E zc5F!`z$7RvS#{h$ zIzV=6>>zv6*>7ARuq92%=LV?LmlyfAopHpF=j*A)LjoI^{+j~wZhY$ArPw$PM1kZy z88d1k3kWZAuKNdj#J?#s5kBVxv{fsb&EJ&EY%}=-cBzxAsWW*Lim$>w=n$PvgiFH1 zDkW+cM;HhiK-yRr4LR9#q5c9#GDM+`RtvsX{`cFr?^BAKLZ20{{Tv2Z2K=?bme{zf z+;Y}C6J7A4y$&@*=*7=!N2eny!J#SrlFv)N75czX!N_)4Qck`ht>aT z_iVJOMACiyTJV55cV%yVOLeS^v!$K-_Z~3tF|AO3;v8Aed2-^nl*pqPo_}R;#oUCJ zy*zg212Ng}{D1l`g-Q5kzRloF9FCJvd7@lSwIUX+tvmhLm()|WExRd8D#4Q7iW>j( zt$g!=SzV2#ex6crs6;2-ki(;2LLVNMZXLfg3JfwWSI&J$b0B}fP&CqdtfIzVQzdmE zZxzmp8jlb*-!|gNe_#9D(g)l+EE5mfRC~T1`Jxq4QGWry)GYbdeyb|5pojGtg-o$q zEJs~{;t$S}>5wnaEWR5F$&?b=|+gu^GOEU2d1BTpj@+z~MwWbd`Y+ePc8-}Ws)|KrC@^-@#v!Pwu{ydCqj z_eRD8UrkJY!G(;qT(eMDV?UTK*QFPYJ>z*k-Iu#BS24|_&nq5| zbC-@lKmluvhpK<bG~Wj-zSV^-H5Z= zW`6p^eEe4p{>c*$rJ95j>FA^cxw~Gf!(e7PPj`fdN#zDRvV)ZVmpXIn%}{@`m!9&= z#Mf-+-)n)hABO`j1^LW~j>>4%!c#Brb&ro@@=CZUo2j}U`?nm6R3BlSw*j6~0@HzN zIY#G?|JvJr#bHL8D`=Cwf)a|C zbu6%Ud|P&GX6MI@c3Wie4!2y~m(m`!AHm0`P{qPu*85^5_LK=dF0axzJ!{$4el}3d z(aR6z-4U3MF*avNDHTvdFT%xMx^@g<_g+W? z()`sBn&!A&^el?@5`?Vk6OzGf_fbfYHzW7=#Ea)M_wrZ#%pM7NUWRJffq+rrA;^(G z?NQ2zF=?p0RC2%K>BisG*sp*ndJRtb@Zr!OC!1Dh9&izyp0kTe~r+U6~T3SY(N?C9q z26{PS2mEA++?xj|PT{~l%$kN6$w*6wbVR#*d`xTSdI|TCl}`&Y#WsVyt38p1uKR&6 zi0&tJM>Mfq-gYUwpUO93LC9z?HO^DcCC+uENP1}cGqci}8rCL;`ST7A_PBMN6*g(i z^@D0oiwT_Ogk^^r<#>Qu!C7t6^xL?lFX$hk*o47EHuw_Rb+ReCJz%Y$JUd;Q%%3Ip zhJPV?51=1^N7l=^7_O20aR#s|+TG{LaC~3QO4;zV0far7ZhfyV7Ds$67&Uo5$p?g8UXy|&YMyKX~ zZX|{k+N5-PeZXygXu^B{D_44=a;oE$tB~}p2=M9^y{vce8Z0qNhw)pU=~_Bsogq9? z2PRRn)?N>NLn+YQ2}7Le*xp>shcRogY1+74+m6;Kk2Iawid7r$8OG%{zoSx?g&NrR4auwYN9WH4m`%%aF1N-qAoAvlguVMm^JB zO4H`&n*ID{!kbQe%B8i=zXcuVc5USEZ*2V-pUd3IXB$__6EziV4<8D@wLI_sR=?nl zSD#%O2|t0dM=wV-uuVkmBFHzDyuxlZ4ra5X`afA&{o)j}(|xO~TNa3tkjLH*K=f-{ z%P#UViapv?3%WpG=pB5i9xGYCN~{To>jjz`rmBE)quTTq4YJIo!tnpiFY1yPEKZuBjvpa)=3Y5^Om| zZS!z#stThG`iwrh(3aFEf6ELiFeY&tUXeDlf2HTtb}dE<%m zy5jf60e8p}jlN^g3x9{}T0fzW*4d5M966_!f2mUmVe)meI=9ar6Mbgn|6Mm86LdS+C7H1BeU=BvWp1*rTrgNuef z5sR{TqWiDtwl7G5C*Iy`mP9cc;w%4SSQe%Tep+i7>W)~rcxzat_CV%W1|6M5eEPJs z9w4}&STE$&z+DZ`R4@|P2@GW-td+g)V!(higPN1BP`3~(N8Lj*&r)RduAv(n+*HxE zQq5$2Y%+6&#d!%`0>*343yUKdFUk)#KQ#a+f@pJyt z;U?yJA^hYe@#rwzF~Z*5?$FjBvy5n5Jx5B#BN2FTc0;jB;lM&SEXqsH3x^_E11JP? zdXaaaZpn&h|0d2g`d3V%wpf|p8A}4at8hOo?Q^k^7zrV*e@>Q!RPB=FZc{`w!HTJS zom_R_S;dB0xqC06-(!H-MZ_xhqkIrpA1{^+GOt2vhHSl17pC~2Y5Wf#izx1!arB?km zF*sK4j!x)`-C?q^`Wr?=EVz$dQ*sqFb&;8f)%=9snI>phb&)n}r8gj&=P;!bTz4zYIbQYVP|olJ?Ha9Se;TZ zGhx+UcS5a!;KYM4k+|BfN-zhwMaK1&nPbd64ZGR zh{RyU(ETa{*8BR5NQ6Q=1qkZJki1xKk}8*#m`ugDfrqn;P$E)|npa~0$QgLNytLWe zGJMdt{hqCy(V8!{_|wwqdQO(4PVl{|OYJXmwvQTGvb>1KAOGgq#S4KbnXFjtU zR0z56|0<__v54Ypyee_$vERE@xvW{O$U{XDF`O}>w0XqBKSAEoF{!ZShBHea64(~- z^;+;8ZlW3ttM+Oo8q)5$Ktr%q#Z0-gj)?ds7aCU$;fBT0Uav=rU)%dvg_v$yh{>UL z)NX7C*t{?6C>GlIHh!U8>sa1S$uGt_z%5+!M#4>){OOO_7LzX@%d0SosAFA&$(=y? zk7r~ufs0Rd?Y%SWd{Y9KTji@8irxgu%jdfW+oz1(Y<;=#2j2Z$gB~*)NFUG1>b`!j z!hXIi`7mvuu4q*0`BzUbbW4K5vGuRXKi}cdHTHGLt%uf=@`1vdts&=P#s_4_wY)HI zHhAsWmc=te+f|a6eb)AhL-sDrd-zYZfkgtRec0RU&f;~9v|*#K!}d|!mo|pQZFAu% z*B~#}=UMIi*OWmLc=P;Kd5Px3_rp<~fqiBp7Gd8S(^TrrwTIo^dws~Zn}bZ8kyu#Z z!AN4SBx<*DWXQ{Fzu9aE48X>(z$#wIO??raNFgJm> z)eFzxRV$U<8ydB!j4ai^AYBPv`0cS@3H@>(u4>aA={Y1|<>n-g&YFfHDG-!yDEf(q zoq+p$WLOLe8TNoWfkl&87~}QXoMX#tC<E7jmfXLPi-+A$#UT~NxhPe8YW@yjDJ0~ud1LQwqO=IkoPX4eo#X|#|r`NsmmrN zMl6|SPwddE*YUKw@R8nh(iR$I~gb>LI9; z@3FqUW4IgjfitI&&NQmltRN$n8rmBbWtiGbCH6sA(7_q)W810}mOmu!cFn`VVa}w> zDB>j|<&kHsN<}Fm^_C>|`cE6p$4E@&>INYf+@wHArO#bA^)25L9#9v|8wVl|qXy47 zp>8IMg#E?%-QD(ce$TN_<;tB*dfm8lxcS`nb9+}4PkJqfYciQl0vN%HQ&ha+(lmjp5QOf2??Ce#~Ln8>ch7LB942e>yiXx2x&&WpZU5(oLjKE;5I zhNJmuM{xKon0Xhc&uclu2*KO)|Tj@+x6rx!e=20fWQcS!JXkUc2 z@$V9QA<1j{`xAODu56HH^Wx42G};5psRdOiXy88t#0S{IJr{fzw)@_@e`O>`2TUcH zhYB>EAy+nY)1=5v`95%+mqBoJKPfg1Hso-;4WO{(Wmb|+X&ftz4r=j`FgGJZ#RTqR zaQ+CI({$3pBKowD%_`^mpo8!Ua`7J$^Ytmy;J_F%X(9W&=VwB zE8(UkH!W*HwPO~%OZV9gMnCYZ2y9=>3pAC0+)Bz$Do@pW?G(C<=w(HmgQ;H$XPIkErW(MGj}ld?G$4=xAe)MaU;18+7p` zQ$P-O6gN=bTtBfC73q?=8x3Eb2Ux(iB$u}tdzJGD3sRKHYEFi@sl!)(sFu0Hiq~3N zPw6%(CY44OO+5qv&*xBhAv0Sb9Y30wjdJ;76XhiNoMMi=y zUnM9z;1OGoI9#62F>|5aakRqV!b8HWtSqSyC23Yb4G7Y+9^Si~c8mh> zf1OY+hgVZau;L0?GI4hqEST{IYM|qTweh>+)&;jL)dRI^rtm__WJME}>=T{~cue+; zz$D4$h!&8Q|M(^r$oM-UeK)3Y)&2YiNc;%sld%Q1o{KS=O9MmCMfMcr6?+__@7#+6 zL0f`sF?D$<3br{LkHo_A?hcv^l#E*C^9|ixFe_$~ChA3t;w<*%hy$F@DxFDq_X6%QZ`O0sIl4&cRIc!}e{i>4YA^ygaCyW1%#e05snFC{U8<=#P-*Z> z@+loXRU=GDeEMOhg!IH^sxAQ2nR4f&)nP)_|*cXqdAEhLsRNzhZo?lSy=#KQL6_V)gk<%|XIir8F?v7s(J)oT87b zQx5o+x1R=)+Dr=jScS?a?;dy`;FcG1bV`)$^xtf6bP&35C!K$8jQM|l%ygm=y;;B% z$r2&F?OnU@68%OL^TBqt`OA1nMZz=3i58n0rR3IkZ)>07ht5K6eoL}~i=p~uRPo|y z(T(1_CbrcHX8nIq=$AxF2S&o3PufG`8-haSqU@d4)Fze8@Rp)@t;#~C04Zp(1&U** z3^Vd1Pp8EG3!VEl)x=dE#TY$t4zaijGn~|0fGa$$3rtCM=-#_E&~kH(uE6T%Yt1K6 z;`&o&s+`z_WJOei(?q_jl`m1j0L-3OIt}xce9T8?y(|Z3tR;0BvNcufTrNr_e^JLf zKQS=GVIU$j3Ybfa+u}XLVl9Ipyu@QJKV0m@>#)Lk3HV?Ln}s(x3i_UK2~$|{^!34h z!@_s3+wgfmg|?7=LJsRTFLA=x!lW3{7_bO61b8uu%f(X4fd6$5DHtw5ih&Xcs{+UC z!6ffzcZ(2AI7p0Om##dD3zmFw5Y8MqNh*gUV1j_ItLv za>8(U*b*0xrDYzJOZgraR;K3>LU&^1J10z*&^p59*r}JVTMZO!!X58yWAPH6dx0D) zTc^7jhda$uiYyr{D_n3}p1*p*87R5c$$9C}uX@5c)j@mp4PX@q4C7txbxL9!bx?A+ zsnUM{xuM}-l&%fvxyNvQfd3~$%3d?fPc8=M%Q3-E*sJte5<7wofxZ@GX?K;4p}`bc z>?~65S`WoRD-f3iT4Dc}rK`LaEP8Tj01?5q(ckoew;d&cKcElR-RD^e>Hpvj%Z8x6 zVn(RIIeJ--mdBJeThOXG{Y;(_>(Zd9r8HMr)y1X=rdHa9|Kt%HQhONry95K5Tai$= z6fD+@3TK7|6>O1_k*Fl#USxaIl%7OuNO7FNH(1Qaq6Ktz&Q&GlhEeUZ)OPG&;3&T# zHfx!vgyI+Td$lB&*L3~8UUOc*0+Y+pP1BfX153ED+7k|2+SW^$&&+#5x*t9DiyD8p z#A91%01qoGBjxuY*9@WFYB|azcs6oEjfoQca5r^q2xjWrA!h=r++gm0+es~qVR^;7 zab3pcXs7rzF!6Cy<#6HtC1!t|oWU)11$D|RJk!iu$C-FghBbHXh~cRez1%}e246R__(zzIPx|=Yc^HpS^$`dLhRo%$Pb@jK-Mvt8 zG`tbL@+|Hr;^cz;YUH5eyO|e%|`$jU>EBjsExGjDr9yz zGhd7iLOJM*H?|F%@QcN>qdZvwVB-MLb4^dRGV#H=a1fO*vNlr>`oQcntgy}1thO5U zBtAC8qzg{wN?ur%`H)0sP%$a%F&ZLiy_LDmp|r^G*56Kg2({aiW9X3V`1GMx&4KI~ z*X^rZSaiMY6lIH@B-7Q1F=?0vVewVgTqL>GF0(S0GWN{%}(+3Cv2e^+WAB#BPu;Q9o>mU6a$n;5tKy zNK{u&WGut8ycW=hDFB!1c-(;Dztdx6`wIOil|mFHYfj-JF-Pyx5`#!zObcM{V}V&J z?0}gn(z8|z*s0MO{RNCjm z^JH^+bNpd`SxeT2bGWNb{j{b5bPgrT=7@sknrWm!u+=V^*Vyb~2oTzr+0=mn2;4(= z1*sEcHa-jOGp9LU%49D?eOPeb-qV2S+Y&*1;$1Rw6eEyg@h)V1Oa;s<=MhXp56&Lg zhL8gxnKB`7O4$M3Q6+-ygv5u_>X?> z`i_Ey+A)b=8STt4iY&|D5iE-~71+?IP^n*N z!tVKil5oqTV*UXtETO-C)3g|zLkH_l)j+Ybb*{-;G?@o#h%MabPG_Rd8#edo7QL%x z1{4^&hFFt7sd{t6y?NEU>j=16b)v>H^YX7AqcY#GnSz9|{B~4@4%>V&`KyNsq-^jM z9yiXzXa3sd+eX7Is)UI7ydSX zulI;7qSyU1LV!L25<^>N`gKrR^?JyR?Yxk~yke>9=SpVt^~~&V=CicX*RcH&|jzs6QtWa`74&9*fN3?9!uRD3KOvfsb_0pg}>-5a|DZK zX6o_dv=vZKGBVw6LPT;g)qG?ksgvRN3qvr3t(ybZ8b=dse8CTC#{L87&|XZT=BB*d zl0hyjtGt2uY}QjlI><&q0&*HL<41X-?NyG*LGPxM02I(m%Q!40SvHAQInIsH&ytme zUk=Dyi&l<5)BdcT0OtGwWi#4!(Sf+y#AUJS$|HG%{k3f33tYf>${n{|T-@Y%&clSNNIy+WIY3#S+qeNydJvP?XW{sbx0wkMUCsg7T9(#5uv-=#GjIiy8 zWCs9qvK6n>{#awjPUso>Gc4W7c*8+)Fel@|4) zNFa?s-4PX$fZ@H_`W%fc!fSu}(K?1u1JAaJ<{H@0wyZ1$Tn{CF9pNUBzv7_B76Z;w zUZEF0f#&-X`q+53=+t-EmkKZ-x?m4IiDE%v> zn*iB39g4=8X=bu=Vt##PK-;__#G!r>xx z+e))uIYm3p)C>`q3Y6lS2HYF#kV2qaYBTRhBwnW5Psh)5Jpj77y-M_hIoDJndR$+; zHTwDsQ%Lr_-72@YS>%RctZ9sC{5?(|te(Xv#a0DoGr>r#7?PcQ?bwFvSTYE0)rnoD zI*(KOEWyj87PXL+jMel&``L#vEUWVUj3a)vsbaY!Q^(L7HwQGo$|&zFDU)y9?U z;oObxIOK~(OB8jVEc~>6ZCFjgF|}){!iW|z?#DK1rnuT9Xv$kk;$nw}p{Rij#E*u0 zsE8PwNJ39FTW1Up?1AdUOUc_|x=fuv8fhoK%bLr`qXNJFOoiNHs@voe&)lZy`;xNu zL8|QQCRKh;kS%yW_mZbWdh|r6A3;fJ*(8S+1lhNX84A8yY^zpXQR$#mO$Bnc+#D0@ZWrS{W+D%s`}9<~Xb9~#SgYvoZxiYbTw?QxE$)?Vcgs$lMI zw}JI=bO+OdU)+}rj;qp{lWfzO@LNVtyRqM_7d&Zf6FXwRZr@Tjw}1PjPNK@Rdr(+f zADjt_yWZ}Zouy1v2ryv^F_j>8Kyd_%^!wklrFEw-ZwAL7ghC~Gmw zOzUJzh}Yjl?8GxDFxD#PY-mvI;S^d2^mCeh5q>h>7Of*--Yorcq5>A7-GTfM5O1@d z(*&-3!m3wK)@(4xW&F>%O*FBkT+&YgX;b`F-4eA#-M7ZjGY%!fJEVO(?U#@cU#aVO zu^qT;QMytAqLAi2E!O;&C~5R5JcF)Yo~#~eELhY%N%p=IcBqv#Cl*8WB@j3>BqT=( z1Qd}R2#GY2GBdf;dNff?s_2E9(1R`fKUajX#V5yR5*1uRE+2HI=?z6iOvn3BI@KTp4!)PHrMAN2a^tk?Hti zVRHgrH3Kz`qi}+$KwnvX|Hmv&(4uDa?kC%|`wBDl#~pG0_yfahWl7~a-Wx(8*g+_$ zjf0HT_jS2m!)8h^y3m313Kq!cv)wG~n^9 z#JsnPPV~}oovZcnPKdMnJh(=Wm2*< zwy24BjC~)B5~?v|jU+NAm8~#Q*6d}8kfcbZ<^9e3{r&fR?)&q3p68Eq-{+j`oO4|| zHz2=QBDuqr;#o|Po`+Pozdzk({KvV;wg(!3s|@U${N%I1-z~MgSKcRfwh6Rdxm?+GasdJcQgKm=kw_uug?rRX*44H z%nNT*ovg5*Ocl(Y*}lcWBGiY!=3_8PJ3uaJj%xEfJCb&LtzeWVc;;*cY~MB8y1qoV zEBBnmQ-R%lpCL1yGO80W3o^V=)`Q(EZ6|8#8Tygx=e?#XzjWx4LdKJ#G#NTs$5A#( zc@(h7w+(s!LIq)Lx|h1|1&>KopVm35q|xD1bh^vbbkHA6LQMa(C7qpj?!`vW7TkCo zQ7YcwT{^fLPZz({lPm;;L1$3|%AU);8PsqpJIo-XP!@J*P!D*m5A%KApYw^6+goB#A<$*N>Sn%rfhL>^5&2&nIh z()f@>R{ic+)}I^oi>cXfa3{Lf@^GW>u_AVtcz_`VE$vjXixo^XKz2Q$n@t|y+axBI}d$a+{=~ar%BM3au{uL(;$a}IIW&o4)ist z?=TCDFS4mt)ljM&8~Gbl8gpNV>H8rHM?WUHQM;bvon*U_cUL{|Hj+mpA&)KQs5m8- zS_ymm#>a!(f-iEC`jOXz1O_}C9Jlx?vU1VS5A=ey4i-vb&<#o^?w*rI!Sh9ks3364 zfV?jp@<{0C11-~DF!&OW%mD+TADFx|4V9fwR2rw=@U5~3*feGTeCkKq=_V8$tup16 zr%-c4?OqqB--cjYucrZ}6)$vgo%%{#RQ}c=e38%lm!!OV+kewrk7BN$k@B6?R|aVb z0*mouO)%8#u7*+)lu-Ym!~YZWTYP^RsqX0aYH;H=+AtW1t|JNl(A345MO6^%shK>P2*ahEkZp zcONwGwCt&^36YcLy#VY;%k8$|%jB9XTE;G1WNTVBDsSS|SJ(Exv7ox`Wi)J}i< z0-Ch=R)r6?Ly1*!#iLtTNG{bsxN0^?Y&Y!3BiYlt`#~k;MOgL%lTgu-r~f&icC-&e zJJxusexyVws(8t#=O5yH#Dt=A&wZ}2?Zc8$K~yYUQd>OufxYA{BebchhcrL}9>I^K zlncy__F2K@`ZQ(qv;x6UZ75_Ns`TN-O*Kx|c z@$N9v&3?VmD0ye|@Ot9Cs?iT@>M*3hRgnJiP_^wt&Wyn^(h1Di5RwK3c$( zcEU_Mp=_WPD+NNdL7cc#(CUSC{KIgkX=zO$n=)WF-9X;DjRcX05_khwh+I|1QRX9` z#(_l+dg5h2m4B5pi!ENf{4YS*M3bHOT}VRVW}5tE+x>h%u3rWQ-lJl9&Nb#?_!O2D zjvfsx{KXPCF-TYsSn7sWPoKt%VQ#;{49ta+%9L<&d4oHYDTaaCWt!UGjYg6pLSXbFLz85V;sOEBKIknmq$8B6+rIh{a;k!k)BvE-+F z@Bh$6;|hy^p{4sk-dlxMtSG4T@~x_4{^DNE zuM-t(57%v>4FC2^B*a&)c$~Cm3aNx3r3=*zrUBxsWDc~DESXvmFPZh{(wh zZcmzq8YVfvW%=5*r^+PMAy?XjC+|3I*WA$*6N;A%gEH+SfS3=oz@cl)Yt)OMr;Y#v z=_$RX*}0;|N$5(&uc6oFr$AHs6l66n>d*D7Ia2AdwVbi9y?A4M?5@`wsWhw1i!avV zWfbM!eOaY~H(xlHX~4yxIHRG@*O-IYrH$e_=`%m@v*}CyGs9CTZ-S+3Yu{i5wRkmO zl}sf8uD>oTfNtk9zj8$^YMw2b-JbLbnUA^ez8+h1%9kr>(A`L=$DG1VX}H5=1-?8V z(Z6JSDj{8h1Pz0Bc?E+kR4-Tx-EoJj2oK*x8+=g)3G6LO#7_um%d)Zl4|ieHc(#{e zmi2na9{8KL@#4MmT1q1Q&8J<%q}-_R)I-rPEk>NHj5LgQmnAQM_erXM?ncSj*VD?^ zq4yz{V%|+U#@ITY{~KCg+nQh6Ua;rmH^SlUiU;M zGDV=7iM>z+6+sAO=Kr@LNW}0$MKetLiijhqx=e3*v0TFE%KR_Aa#u@_*?pKQer&$q z0!9{A*p}=_dUUN~1neUiud9q9l_qW?d^kw>^9eB>q4S0MwS5LrGT3K^rq{XP0(jzu z(s>6!hS;!5&kmN&&NEIvL~bty(!;6mK#^e~Uf_3n5FjTsDZ+>hcbbb|aQ#7$MHNF3 z2G5IV4hdi@&mW*`6@|Z2u#4T81^dm!RQ`JV_u7Ybgu)~K)^uZIvR#=)3M)BnrVQa9 z&GE;c#bAFTix(U{H(wFLO_ae-88qQOP5*c9=(oW#)*OfxDC%*%qj29^IqKaLdM^oC zyHJ-k$WrdC0Lb+srAFThJb7#{8Xd-`qkOnU{^Ok?9{)G^-02JE zs$pE7yUa7jl%6H}EcCTi;nh(C+hKNmf`P_0%stk0_DBfo-xH{K~Y~&bxOu1Au zZ(;IDy!+kw)&2(jiK?zk8{JFUY^TN~%`oq+2oV3Vq9gX(5y$KE$4-VJtxKkk7HMDX zYPINpLiYpP zsX!BNjG{{0vkkv?9k_`z?wPJ>ViRa>~Ufg z4i?=ueAy@d>Wy3|UHQGV@`-aeDg8_x1URIk6t;)q;tuC+1&F6y`WK+>RT!IZX;W;j zlND1Avfs?9>;)$fWQ)S|Y5Y?3hF7I^dh!GAJZuLKU%@zNB{?7kt4%UyC`@Zw3^>0F zCK#wuD}x3fP)yV)WkqE)b$em@Mxtst4QlMsVBo7PP@E&w!nH1e!8Y055N?(2nKgn& zl_-gfemopA^GUGv;PI^Lxs9SaV$45@E9izR7qarj0y=LC2;WKj3*0zk`H|zxm&nQy zC@OVntgD@NE{vNC;{cJ0K()@IkCr?cm+WKeP7AjVEFM^l_cgiSHGg=pwtk~!{mK1} zgVd#E@#`QQ=V-4tK(PK>-t%~Mt8KmeUH_!*U;JZ!y}8}|k5?TxWqffc-aG~{w>nPu z+2Ie>7j8R#oHp)$pToN>{^#YjJZt|81c;us-hu+;DtA-N(djdi{@yTFCJ#UKa7@=m z6Zde5V9ktJO5^&d*Y@ss*L%lxhTgXR^01jSd%$mhQlb@aC$@kPdC?43U-sk^?`(qP#n7i~6!s zMlH|?k<5W^5;Bw1mqC$KPME29GDKcZ%&zvP)!9ccRC&T}V9=+p_2Nnn7@O_@WZsC| zozN*JDt8zqH1 zVk1;X&ndCxV}-)x7d@e_(SpV^vrj`A`_yq@#b(2Vv-MBZEcpQ%e0zSVSt^~}a^~Fe zA_9L|nZIpb=7-Z4Fm-vw$Sd#J6M$frx{$sW_&1NuA;P_^_Bhh1y$`pZ|lJ-mN~ zjwogvK(!3KhN=1p6|lVG_5_eq))^27Sp^1Ju9=i2Q$TGCHbKSbn`5Kt*zEHYkjGcd z%%nBEV{w$M6`}#8O-;R7VaQPU!>BSOKIhYgmhxcn-Hs2xIqJ7M3y^d74A{?@Wy!|+jUmss_4 z1AjQC9)saF5m?=VI3Z=IH@{)SRv4H6aRf*790D$oxRpVKsWhc3J8V(^cw|Gw_0wgr z=QW607*!6ni}*&1epDt68eX%BM~^1VEmU1^xyfTJve{~AK&rJE!=c~}vJg{D8U9OK zVYm>*73ggY>G^ZEVSVkJNZzpaMuL)epz(eVT1kJr=WdQp2Yu7#^9FlJF}IB{vA$bk za?RI;RUdqU^x^|MwI$v`a4YQ4V%p-f?Y(HuYo)cT@y;gotVg-^9^)}-UbP7w?J1jg zS>YoO(s*rBqj3dh2a*O`=pRMrCGN9+$N$hg+xsr%>iy@hh1RaB+pclUX<3{EYR5HS zBUttCp1Yqf)IaGoE1#66k`9&GWFz$(2%az2m>zR*r)Ke0EKtaBl>=)%D^7mfv zT>ZefZXQ1dPFLYkOr6ddOj9Bxw=<1le9phh>BDNms{`St3tx^6dy#>?u-5{gk>7c6 zI*p`sB+f}a`jeD~aBp$2$r7RfpPRsKJEa+gq!HZn3y*V+xAcMP&B{&@F4azfmFXPV zkXfw23q}&6<(8Yw@EfHlF1FF(!OazDjK(=lZDLbWA71GFTEQaAELTV2(Ok&G2RDfxk6TD1DdJj)$$Y- zQhVQ5yOS>1Zx|){vK5h~9nGN%aQiphj9laqC7eXx;T)q`0mHI)9A!tNfRg3yz8XyP zFj5p1No<|9liqcVw{LISwdc+EA}7DtSiFnXjEPaJ%qVES7VuTA3%rp!bA=180nj3g zh_g(=y7@1{BL)@0pOOWIksy{<&L=ePAl~e$7oc>2n@A5A%1Y4_Dv*nc<1&{I?U`JV~<)mQ6nn5h2pI>#?!(a#2a_TFsD}>3GyY^_4`g?0SJKlW`t~Y0| zoAp+bH`OJWu}`B4Lc}ld0d6B6%qG~K(c{DJ;mOd^z>mFGnYwdwvR%8FqqD0>pg=F4 zhD*k#G>RJ9sl!SA4M&)5G0)jfrYJmFReR!69d0tza3esrd^d@v|Hf$%)G_#OLiU`2fl#$?sze)?*)&AXuhdB zOMC}7Q!ENG$FB{?f2q?0rGyXe_7bmo_h{Blj zFoJihES_0qyZWvSsc_6RdZ&A4gOcQD7kR>Xh>r=F zEgA}|)C7#^Z4n~QF^$t?j<2=xJ(BTzLAa>&LHUswgEvzKe6o!hM4NpnjE<30(nNiL zBk%(nz)y!ES(V2bxU7bX5dISN79Ec|Y17*l$XCmJAjG8EXhOzj)fxL{ze=-aNS7bm zlzSi(+e(}FEVYK@&j(647{xKfqkDZcZnpU~5y7W*0llyA^S1dWD0ytN@SN050$}lR zK7zV9^>w?(1b#?t_WT_i^^h{>C;njHj{KHhuq|x?>ejleaw8VSDbZKRhZI1(M@t~Z3ckF8p<1@g}xHdE$OsH zK>XM=2ufX*q_17iz~8lb-kGQ{fVq*OhL+QmPPDY5G^iDxvK;5Oy$SIGbUq)}yQMZ( zcp^Nrv7sTX4AkRxQY7=*cdvI(>+^VePN(%yvYODXngyP+Y^2^5+Zo+pC9&bevo8zp z)P-p|uL5OVC^I8QfqgNlcTYHQgk34#-9SXsn}kh&!y7z-z^&Z7FmZ-Vx*mUuv%h?S zCw=s@O>K!kkk2HCrFz@V`{VC=ztI8xLoi{5%G4~Tn>@Z2+M!>Qj=!{+^9f59iSZqq zt2-y$+YVLBkMz8g`N9O@mewOBXm;(XnnQD4Yb*yyzboZ5)R_1wXfe{&FWFqj5AqYp z!t1o=js=QGi&YF}S!Er)5wwUqATsB`R(;Y>IPQF_Lv)%H<{)nN`Ij317Qpo8AoB8p zbu40dOvOcVPBcVtRau7v+%X6Atu=y5_G97 z@Grur7hs?IZXN?jntk%P4n+?S(1}cGoV5r7JR?CLk!0y={F`;5@cpSSftR-HQVPGy zb?2dv%6tLWKSBC)+LB_hESuj#Cc}W%vbeU4spXhVWdNTRxlOgIpG6{_)64hBQ;(ZzBot&z}uV+AOI_9@p;X?5%t{4eQp=k-j*de&%TB~G?5 zUJ%isPDViCt?pSnTVE-kTZ)WLwjD=AvczgjTDf|?@tXBXT_-!2qF0{fa{es6#Dgv+ z+;Hf$AY;BKoPYIUYpz+D`h#S_-~FP*S5c1vcg-oQ@nboM@Z>tA4}xG=q?WF}<6XR{ zNJJ@dUiGZ5Qi_^$kjsGxg4>KeKydG*E8Zs*spvNEl`Wkmm-|sN%r6)v#_yNpmK6Nc zq0~39_Fmc*Uny?aY47j-A>}0WXJi2(GH=o;f_?8*)Y~P82Paa*lVrUW@Q2$>81M^# zo@B*~&Qy7QWE1<~Zgl!ZGcV zKwGf^@N~x<%*KWB0Vbw-L(%QIRFi}F)~(hYwwLxK1MJBR*Lf&?Lr$})<28KqF32Bx z;)3k!y6D#AznRP`zvTv2> z255gI%hSsCfQ#tza}VunU6|WGpsT(^#KEJ|DZMj}fqIay3lc=YcvG;f3XfGt$uE!w z%0${=kcytiTuVy7``RGLjiM=XMG_dS7ZC>{8uMb1gJm`Uv&zg&o@HXDZ4mnBcxYiW zgO#qM%B%B1dF8)6f&VvLp8X2Av(kCUBCjOKyT%bj973D-9uEx`2%ZCO&P0wmGqZKa zHQRWPE4zasepLO|A?hv>Cjls`E0*G|YOW|anZJF;ds6;N$*{)rv!#8wnMexeb?_h2 z@rV$-3~y_>emnvq#-3ys7xI49d8-r;*3w8iXTUd~DbR?5AFW{tx%^Dyhyl>', self.on_demo_select) self.cmd_entry = cmd_entry = tk.Entry(right) From 5625d79508c1ec7d55383ac0a1f8b13014042b37 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Thu, 28 Jan 2016 17:13:58 +0300 Subject: [PATCH 02/16] Fix loading images in python tests --- modules/python/test/test.py | 2 -- modules/python/test/test_calibration.py | 11 +++++++---- modules/python/test/test_digits.py | 16 ++++++++-------- modules/python/test/test_facedetect.py | 8 ++++---- modules/python/test/test_gaussian_mix.py | 4 ++-- modules/python/test/test_houghcircles.py | 4 ++-- modules/python/test/test_houghlines.py | 4 ++-- modules/python/test/test_squares.py | 2 +- modules/python/test/test_texture_flow.py | 5 ++--- 9 files changed, 28 insertions(+), 28 deletions(-) diff --git a/modules/python/test/test.py b/modules/python/test/test.py index 074b6edad..e0b674187 100755 --- a/modules/python/test/test.py +++ b/modules/python/test/test.py @@ -1,8 +1,6 @@ #!/usr/bin/env python - from __future__ import print_function - import unittest import random import time diff --git a/modules/python/test/test_calibration.py b/modules/python/test/test_calibration.py index af8d0fcea..6a1240c63 100644 --- a/modules/python/test/test_calibration.py +++ b/modules/python/test/test_calibration.py @@ -19,9 +19,12 @@ class calibration_test(NewOpenCVTests): def test_calibration(self): from glob import glob - - img_mask = '../../../samples/data/left*.jpg' # default - img_names = glob(img_mask) + img_names = [] + for i in range(1, 15): + if i < 10: + img_names.append('samples/data/left0{}.jpg'.format(str(i))) + else: + img_names.append('samples/data/left{}.jpg'.format(str(i))) square_size = 1.0 pattern_size = (9, 6) @@ -34,7 +37,7 @@ class calibration_test(NewOpenCVTests): h, w = 0, 0 img_names_undistort = [] for fn in img_names: - img = cv2.imread(fn, 0) + img = self.get_sample(fn, 0) if img is None: continue diff --git a/modules/python/test/test_digits.py b/modules/python/test/test_digits.py index 9d1d2557c..2aa32df4c 100644 --- a/modules/python/test/test_digits.py +++ b/modules/python/test/test_digits.py @@ -36,7 +36,7 @@ from numpy.linalg import norm SZ = 20 # size of each digit is SZ x SZ CLASS_N = 10 -DIGITS_FN = '../../../samples/data/digits.png' +DIGITS_FN = 'samples/data/digits.png' def split2d(img, cell_size, flatten=True): h, w = img.shape[:2] @@ -47,12 +47,6 @@ def split2d(img, cell_size, flatten=True): cells = cells.reshape(-1, sy, sx) return cells -def load_digits(fn): - digits_img = cv2.imread(fn, 0) - digits = split2d(digits_img, (SZ, SZ)) - labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N) - return digits, labels - def deskew(img): m = cv2.moments(img) if abs(m['mu02']) < 1e-2: @@ -134,9 +128,15 @@ from tests_common import NewOpenCVTests class digits_test(NewOpenCVTests): + def load_digits(self, fn): + digits_img = self.get_sample(fn, 0) + digits = split2d(digits_img, (SZ, SZ)) + labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N) + return digits, labels + def test_digits(self): - digits, labels = load_digits(DIGITS_FN) + digits, labels = self.load_digits(DIGITS_FN) # shuffle digits rand = np.random.RandomState(321) diff --git a/modules/python/test/test_facedetect.py b/modules/python/test/test_facedetect.py index 7fe64e207..e5c2f5943 100644 --- a/modules/python/test/test_facedetect.py +++ b/modules/python/test/test_facedetect.py @@ -36,13 +36,13 @@ class facedetect_test(NewOpenCVTests): def test_facedetect(self): import sys, getopt - cascade_fn = "../../../data/haarcascades/haarcascade_frontalface_alt.xml" - nested_fn = "../../../data/haarcascades/haarcascade_eye.xml" + cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml' + nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml' cascade = cv2.CascadeClassifier(cascade_fn) nested = cv2.CascadeClassifier(nested_fn) - dirPath = '../../../samples/data/' + dirPath = 'samples/data/' samples = ['lena.jpg', 'kate.jpg'] faces = [] @@ -62,7 +62,7 @@ class facedetect_test(NewOpenCVTests): for sample in samples: - img = cv2.imread(dirPath + sample) + img = self.get_sample(dirPath + sample) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (3, 3), 1.1) diff --git a/modules/python/test/test_gaussian_mix.py b/modules/python/test/test_gaussian_mix.py index 58802d4c1..cfd33ece0 100644 --- a/modules/python/test/test_gaussian_mix.py +++ b/modules/python/test/test_gaussian_mix.py @@ -31,7 +31,7 @@ from tests_common import NewOpenCVTests class gaussian_mix_test(NewOpenCVTests): def test_gaussian_mix(self): - + np.random.seed(10) cluster_n = 5 img_size = 512 @@ -53,7 +53,7 @@ class gaussian_mix_test(NewOpenCVTests): for i in range(cluster_n): for j in range(cluster_n): - if (cv2.norm(means[i] - ref_distrs[j][0], cv2.NORM_L2) / cv2.norm(ref_distrs[j][0], cv2.NORM_L2) < meanEps and + if (cv2.norm(means[i] - ref_distrs[j][0], cv2.NORM_L2) / cv2.norm(ref_distrs[j][0], cv2.NORM_L2) < meanEps and cv2.norm(covs[i] - ref_distrs[j][1], cv2.NORM_L2) / cv2.norm(ref_distrs[j][1], cv2.NORM_L2) < covEps): matches_count += 1 diff --git a/modules/python/test/test_houghcircles.py b/modules/python/test/test_houghcircles.py index dc4284a41..c012d30b4 100644 --- a/modules/python/test/test_houghcircles.py +++ b/modules/python/test/test_houghcircles.py @@ -17,9 +17,9 @@ class houghcircles_test(NewOpenCVTests): def test_houghcircles(self): - fn = "../../../samples/data/board.jpg" + fn = "samples/data/board.jpg" - src = cv2.imread(fn, 1) + src = self.get_sample(fn, 1) img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) img = cv2.medianBlur(img, 5) diff --git a/modules/python/test/test_houghlines.py b/modules/python/test/test_houghlines.py index b77912979..9f056ce3e 100644 --- a/modules/python/test/test_houghlines.py +++ b/modules/python/test/test_houghlines.py @@ -26,9 +26,9 @@ class houghlines_test(NewOpenCVTests): def test_houghlines(self): - fn = "../../../samples/data/pic1.png" + fn = "/samples/data/pic1.png" - src = cv2.imread(fn) + src = self.get_sample(fn) dst = cv2.Canny(src, 50, 200) lines = cv2.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)[:,0,:] diff --git a/modules/python/test/test_squares.py b/modules/python/test/test_squares.py index 937b526b0..214c64bee 100644 --- a/modules/python/test/test_squares.py +++ b/modules/python/test/test_squares.py @@ -61,7 +61,7 @@ class squares_test(NewOpenCVTests): def test_squares(self): - img = cv2.imread('../../../samples/data/pic1.png') + img = self.get_sample('samples/data/pic1.png') squares = find_squares(img) testSquares = [ diff --git a/modules/python/test/test_texture_flow.py b/modules/python/test/test_texture_flow.py index 46d680a7f..50d1e692a 100644 --- a/modules/python/test/test_texture_flow.py +++ b/modules/python/test/test_texture_flow.py @@ -21,8 +21,7 @@ class texture_flow_test(NewOpenCVTests): def test_texture_flow(self): - fn = '../../../samples/data/pic6.png' - img = cv2.imread(fn) + img = self.get_sample('samples/data/pic6.png') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) h, w = img.shape[:2] @@ -43,7 +42,7 @@ class texture_flow_test(NewOpenCVTests): eps = 0.05 - testTextureVectors = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], + testTextureVectors = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [-38, 70], [-79, 3], [0, 0], [0, 0], [-39, 69], [-79, -1], [0, 0], [0, 0], [0, -79], [17, -78], [-48, -63], [65, -46], [-69, -39], [-48, -63], [-45, 66]] From 56571561b48efdbdb3644cd185fc653d73afc3f9 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Thu, 28 Jan 2016 18:50:09 +0300 Subject: [PATCH 03/16] Add k-means python test, fix loading images in calibration test --- modules/python/test/test.py | 1 + modules/python/test/test_calibration.py | 2 +- modules/python/test/test_kmeans.py | 73 ++++++++++++++++++++++++ modules/python/test/test_texture_flow.py | 2 +- 4 files changed, 76 insertions(+), 2 deletions(-) create mode 100644 modules/python/test/test_kmeans.py diff --git a/modules/python/test/test.py b/modules/python/test/test.py index e0b674187..d5fc533b7 100755 --- a/modules/python/test/test.py +++ b/modules/python/test/test.py @@ -27,6 +27,7 @@ from test_houghcircles import houghcircles_test from test_houghlines import houghlines_test from test_gaussian_mix import gaussian_mix_test from test_facedetect import facedetect_test +from test_kmeans import kmeans_test # Python 3 moved urlopen to urllib.requests try: diff --git a/modules/python/test/test_calibration.py b/modules/python/test/test_calibration.py index 6a1240c63..4c275a0d2 100644 --- a/modules/python/test/test_calibration.py +++ b/modules/python/test/test_calibration.py @@ -23,7 +23,7 @@ class calibration_test(NewOpenCVTests): for i in range(1, 15): if i < 10: img_names.append('samples/data/left0{}.jpg'.format(str(i))) - else: + elif i != 10: img_names.append('samples/data/left{}.jpg'.format(str(i))) square_size = 1.0 diff --git a/modules/python/test/test_kmeans.py b/modules/python/test/test_kmeans.py new file mode 100644 index 000000000..2420cce1a --- /dev/null +++ b/modules/python/test/test_kmeans.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +''' +K-means clusterization test +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 +from numpy import random + +from tests_common import NewOpenCVTests + + +def make_gaussians(cluster_n, img_size): + points = [] + ref_distrs = [] + sizes = [] + for i in xrange(cluster_n): + mean = (0.1 + 0.8*random.rand(2)) * img_size + a = (random.rand(2, 2)-0.5)*img_size*0.1 + cov = np.dot(a.T, a) + img_size*0.05*np.eye(2) + n = 100 + random.randint(900) + pts = random.multivariate_normal(mean, cov, n) + points.append( pts ) + ref_distrs.append( (mean, cov) ) + sizes.append(n) + points = np.float32( np.vstack(points) ) + return points, ref_distrs, sizes + +def getMainLabelConfidence(labels, nLabels): + + n = len(labels) + labelsDict = dict.fromkeys(range(nLabels), 0) + labelsConfDict = dict.fromkeys(range(nLabels)) + + for i in range(n): + labelsDict[labels[i][0]] += 1 + + for i in range(nLabels): + labelsConfDict[i] = float(labelsDict[i]) / n + + return max(labelsConfDict.values()) + +class kmeans_test(NewOpenCVTests): + + def test_kmeans(self): + + np.random.seed(10) + + cluster_n = 5 + img_size = 512 + + # generating bright palette + colors = np.zeros((1, cluster_n, 3), np.uint8) + colors[0,:] = 255 + colors[0,:,0] = np.arange(0, 180, 180.0/cluster_n) + colors = cv2.cvtColor(colors, cv2.COLOR_HSV2BGR)[0] + + points, _, clusterSizes = make_gaussians(cluster_n, img_size) + + term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1) + ret, labels, centers = cv2.kmeans(points, cluster_n, None, term_crit, 10, 0) + + self.assertEqual(len(centers), cluster_n) + + offset = 0 + for i in range(cluster_n): + confidence = getMainLabelConfidence(labels[offset : (offset + clusterSizes[i])], cluster_n) + offset += clusterSizes[i] + self.assertGreater(confidence, 0.9) \ No newline at end of file diff --git a/modules/python/test/test_texture_flow.py b/modules/python/test/test_texture_flow.py index 50d1e692a..7dc3b0704 100644 --- a/modules/python/test/test_texture_flow.py +++ b/modules/python/test/test_texture_flow.py @@ -36,7 +36,7 @@ class texture_flow_test(NewOpenCVTests): points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2) textureVectors = [] - + for x, y in np.int32(points): textureVectors.append(np.int32(flow[y, x]*d)) From aaa43dc84f22a2653f9519a5dfce844c74f9318e Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Fri, 29 Jan 2016 13:07:08 +0300 Subject: [PATCH 04/16] Add morphology python test, fix python3 compabtibility in kmeans test --- modules/python/test/test.py | 1 + modules/python/test/test_kmeans.py | 11 ++---- modules/python/test/test_morphology.py | 51 ++++++++++++++++++++++++++ modules/python/test/tests_common.py | 2 +- 4 files changed, 57 insertions(+), 8 deletions(-) create mode 100644 modules/python/test/test_morphology.py diff --git a/modules/python/test/test.py b/modules/python/test/test.py index d5fc533b7..f9213cffb 100755 --- a/modules/python/test/test.py +++ b/modules/python/test/test.py @@ -28,6 +28,7 @@ from test_houghlines import houghlines_test from test_gaussian_mix import gaussian_mix_test from test_facedetect import facedetect_test from test_kmeans import kmeans_test +from test_morphology import morphology_test # Python 3 moved urlopen to urllib.requests try: diff --git a/modules/python/test/test_kmeans.py b/modules/python/test/test_kmeans.py index 2420cce1a..4f886d9d8 100644 --- a/modules/python/test/test_kmeans.py +++ b/modules/python/test/test_kmeans.py @@ -10,10 +10,13 @@ from __future__ import print_function import numpy as np import cv2 from numpy import random +import sys +PY3 = sys.version_info[0] == 3 +if PY3: + xrange = range from tests_common import NewOpenCVTests - def make_gaussians(cluster_n, img_size): points = [] ref_distrs = [] @@ -53,12 +56,6 @@ class kmeans_test(NewOpenCVTests): cluster_n = 5 img_size = 512 - # generating bright palette - colors = np.zeros((1, cluster_n, 3), np.uint8) - colors[0,:] = 255 - colors[0,:,0] = np.arange(0, 180, 180.0/cluster_n) - colors = cv2.cvtColor(colors, cv2.COLOR_HSV2BGR)[0] - points, _, clusterSizes = make_gaussians(cluster_n, img_size) term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1) diff --git a/modules/python/test/test_morphology.py b/modules/python/test/test_morphology.py new file mode 100644 index 000000000..d7abda4ed --- /dev/null +++ b/modules/python/test/test_morphology.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +''' +Morphology operations. +''' + +# Python 2/3 compatibility +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 + +import numpy as np +import cv2 + +from tests_common import NewOpenCVTests + +class morphology_test(NewOpenCVTests): + + def test_morphology(self): + + fn = 'samples/data/baboon.jpg' + img = self.get_sample(fn) + + modes = ['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient'] + str_modes = ['ellipse', 'rect', 'cross'] + + referenceHashes = { modes[0]: '1bd14fc814e41b80ce7816bc04f60b65', modes[1] : '1bd14fc814e41b80ce7816bc04f60b65', + modes[2] : 'cb18a5d28e77522dfec6a6255bc3847e', modes[3] : '84909517e4866aa079f4b2e2906bf47b'} + + def update(cur_mode): + cur_str_mode = str_modes[0] + sz = 10 + iters = 1 + opers = cur_mode.split('/') + if len(opers) > 1: + sz = sz - 10 + op = opers[sz > 0] + sz = abs(sz) + else: + op = opers[0] + sz = sz*2+1 + + str_name = 'MORPH_' + cur_str_mode.upper() + oper_name = 'MORPH_' + op.upper() + + st = cv2.getStructuringElement(getattr(cv2, str_name), (sz, sz)) + return cv2.morphologyEx(img, getattr(cv2, oper_name), st, iterations=iters) + + for mode in modes: + res = update(mode) + self.assertEqual(self.hashimg(res), referenceHashes[mode]) \ No newline at end of file diff --git a/modules/python/test/tests_common.py b/modules/python/test/tests_common.py index 6ab26050b..c1cc12d6e 100644 --- a/modules/python/test/tests_common.py +++ b/modules/python/test/tests_common.py @@ -40,7 +40,7 @@ class NewOpenCVTests(unittest.TestCase): def hashimg(self, im): """ Compute a hash for an image, useful for image comparisons """ - return hashlib.md5(im.tostring()).digest() + return hashlib.md5(im.tostring()).hexdigest() if sys.version_info[:2] == (2, 6): def assertLess(self, a, b, msg=None): From cb7cc816537b1a1f5cd404c97908381e3c613979 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Fri, 29 Jan 2016 18:00:18 +0300 Subject: [PATCH 05/16] Add dft python test, fix platform depended result in morphology test --- modules/python/test/test.py | 19 ++++------ modules/python/test/test_calibration.py | 1 - modules/python/test/test_dft.py | 46 +++++++++++++++++++++++++ modules/python/test/test_morphology.py | 6 ++-- 4 files changed, 55 insertions(+), 17 deletions(-) create mode 100644 modules/python/test/test_dft.py diff --git a/modules/python/test/test.py b/modules/python/test/test.py index f9213cffb..aa66e9234 100755 --- a/modules/python/test/test.py +++ b/modules/python/test/test.py @@ -17,19 +17,6 @@ import numpy as np import cv2 import argparse -# local test modules -from test_digits import digits_test -from test_calibration import calibration_test -from test_squares import squares_test -from test_texture_flow import texture_flow_test -from test_fitline import fitline_test -from test_houghcircles import houghcircles_test -from test_houghlines import houghlines_test -from test_gaussian_mix import gaussian_mix_test -from test_facedetect import facedetect_test -from test_kmeans import kmeans_test -from test_morphology import morphology_test - # Python 3 moved urlopen to urllib.requests try: from urllib.request import urlopen @@ -40,6 +27,12 @@ from tests_common import NewOpenCVTests # Tests to run first; check the handful of basic operations that the later tests rely on +basedir = os.path.abspath(os.path.dirname(__file__)) + +def load_tests(loader, tests, pattern): + tests.addTests(loader.discover(basedir, pattern='test_*.py')) + return tests + class Hackathon244Tests(NewOpenCVTests): def test_int_array(self): diff --git a/modules/python/test/test_calibration.py b/modules/python/test/test_calibration.py index 4c275a0d2..48b53ff9b 100644 --- a/modules/python/test/test_calibration.py +++ b/modules/python/test/test_calibration.py @@ -11,7 +11,6 @@ from __future__ import print_function import numpy as np import cv2 - from tests_common import NewOpenCVTests class calibration_test(NewOpenCVTests): diff --git a/modules/python/test/test_dft.py b/modules/python/test/test_dft.py new file mode 100644 index 000000000..f79693997 --- /dev/null +++ b/modules/python/test/test_dft.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +''' +Test for disctrete fourier transform (dft) +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import cv2 +import numpy as np +import sys + +from tests_common import NewOpenCVTests + +class dft_test(NewOpenCVTests): + def test_dft(self): + + img = self.get_sample('samples/data/rubberwhale1.png', 0) + eps = 0.001 + + #test direct transform + refDft = np.fft.fft2(img) + refDftShift = np.fft.fftshift(refDft) + refMagnitide = np.log(1.0 + np.abs(refDftShift)) + + testDft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT) + testDftShift = np.fft.fftshift(testDft) + testMagnitude = np.log(1.0 + cv2.magnitude(testDftShift[:,:,0], testDftShift[:,:,1])) + + refMagnitide = cv2.normalize(refMagnitide, 0.0, 1.0, cv2.NORM_MINMAX) + testMagnitude = cv2.normalize(testMagnitude, 0.0, 1.0, cv2.NORM_MINMAX) + + self.assertLess(cv2.norm(refMagnitide - testMagnitude), eps) + + #test inverse transform + img_back = np.fft.ifft2(refDft) + img_back = np.abs(img_back) + + img_backTest = cv2.idft(testDft) + img_backTest = cv2.magnitude(img_backTest[:,:,0], img_backTest[:,:,1]) + + img_backTest = cv2.normalize(img_backTest, 0.0, 1.0, cv2.NORM_MINMAX) + img_back = cv2.normalize(img_back, 0.0, 1.0, cv2.NORM_MINMAX) + + self.assertLess(cv2.norm(img_back - img_backTest), eps) \ No newline at end of file diff --git a/modules/python/test/test_morphology.py b/modules/python/test/test_morphology.py index d7abda4ed..309c80cfd 100644 --- a/modules/python/test/test_morphology.py +++ b/modules/python/test/test_morphology.py @@ -18,14 +18,14 @@ class morphology_test(NewOpenCVTests): def test_morphology(self): - fn = 'samples/data/baboon.jpg' + fn = 'samples/data/rubberwhale1.png' img = self.get_sample(fn) modes = ['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient'] str_modes = ['ellipse', 'rect', 'cross'] - referenceHashes = { modes[0]: '1bd14fc814e41b80ce7816bc04f60b65', modes[1] : '1bd14fc814e41b80ce7816bc04f60b65', - modes[2] : 'cb18a5d28e77522dfec6a6255bc3847e', modes[3] : '84909517e4866aa079f4b2e2906bf47b'} + referenceHashes = { modes[0]: '071a526425b79e45b4d0d71ef51b0562', modes[1] : '071a526425b79e45b4d0d71ef51b0562', + modes[2] : '427e89f581b7df1b60a831b1ed4c8618', modes[3] : '0dd8ad251088a63d0dd022bcdc57361c'} def update(cur_mode): cur_str_mode = str_modes[0] From 3a51cae2081fe5e0c1d562cbb48abfe62b83280c Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Tue, 2 Feb 2016 14:16:49 +0300 Subject: [PATCH 06/16] Fix rectangle selection in some of python samples --- samples/python/common.py | 1 + 1 file changed, 1 insertion(+) diff --git a/samples/python/common.py b/samples/python/common.py index 785fb6c8f..09159bbe8 100755 --- a/samples/python/common.py +++ b/samples/python/common.py @@ -173,6 +173,7 @@ class RectSelector: x, y = np.int16([x, y]) # BUG if event == cv2.EVENT_LBUTTONDOWN: self.drag_start = (x, y) + return if self.drag_start: if flags & cv2.EVENT_FLAG_LBUTTON: xo, yo = self.drag_start From d579f080936308a9f1aa10463eb1c8d991676cfc Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Wed, 3 Feb 2016 11:20:06 +0300 Subject: [PATCH 07/16] Fix mouse control in asift and find_obj samples --- samples/python/asift.py | 2 +- samples/python/find_obj.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/samples/python/asift.py b/samples/python/asift.py index 8d2774a72..ec74930c5 100755 --- a/samples/python/asift.py +++ b/samples/python/asift.py @@ -112,7 +112,7 @@ if __name__ == '__main__': import sys, getopt opts, args = getopt.getopt(sys.argv[1:], '', ['feature=']) opts = dict(opts) - feature_name = opts.get('--feature', 'sift-flann') + feature_name = opts.get('--feature', 'brisk-flann') try: fn1, fn2 = args except: diff --git a/samples/python/find_obj.py b/samples/python/find_obj.py index d8d3d4133..05709c1ff 100755 --- a/samples/python/find_obj.py +++ b/samples/python/find_obj.py @@ -119,7 +119,7 @@ def explore_match(win, img1, img2, kp_pairs, status = None, H = None): if flags & cv2.EVENT_FLAG_LBUTTON: cur_vis = vis0.copy() r = 8 - m = (anorm(p1 - (x, y)) < r) | (anorm(p2 - (x, y)) < r) + m = (anorm(np.array(p1) - (x, y)) < r) | (anorm(np.array(p2) - (x, y)) < r) idxs = np.where(m)[0] kp1s, kp2s = [], [] for i in idxs: @@ -143,7 +143,7 @@ if __name__ == '__main__': import sys, getopt opts, args = getopt.getopt(sys.argv[1:], '', ['feature=']) opts = dict(opts) - feature_name = opts.get('--feature', 'sift') + feature_name = opts.get('--feature', 'brisk') try: fn1, fn2 = args except: From e90dc20361fdf104c829fca4d16440c712e3ed1a Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Wed, 3 Feb 2016 11:22:32 +0300 Subject: [PATCH 08/16] Update letter_recog sample to current version of opencv interfaces --- samples/python/letter_recog.py | 59 +++++++++++++++++----------------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/samples/python/letter_recog.py b/samples/python/letter_recog.py index e68c095bc..4e166cbbd 100755 --- a/samples/python/letter_recog.py +++ b/samples/python/letter_recog.py @@ -65,13 +65,12 @@ class RTrees(LetterStatModel): def train(self, samples, responses): sample_n, var_n = samples.shape - var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL], np.uint8) - #CvRTParams(10,10,0,false,15,0,true,4,100,0.01f,CV_TERMCRIT_ITER)); - params = dict(max_depth=10 ) - self.model.train(samples, cv2.ml.ROW_SAMPLE, responses, varType = var_types, params = params) + self.model.setMaxDepth(20) + self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int)) def predict(self, samples): - return [self.model.predict(s) for s in samples] + ret, resp = self.model.predict(samples) + return resp.ravel() class KNearest(LetterStatModel): @@ -79,10 +78,10 @@ class KNearest(LetterStatModel): self.model = cv2.ml.KNearest_create() def train(self, samples, responses): - self.model.train(samples, responses) + self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) def predict(self, samples): - retval, results, neigh_resp, dists = self.model.find_nearest(samples, k = 10) + retval, results, neigh_resp, dists = self.model.findNearest(samples, k = 10) return results.ravel() @@ -95,15 +94,15 @@ class Boost(LetterStatModel): new_samples = self.unroll_samples(samples) new_responses = self.unroll_responses(responses) var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL, cv2.ml.VAR_CATEGORICAL], np.uint8) - #CvBoostParams(CvBoost::REAL, 100, 0.95, 5, false, 0 ) - params = dict(max_depth=5) #, use_surrogates=False) - self.model.train(new_samples, cv2.ml.ROW_SAMPLE, new_responses, varType = var_types, params=params) + + self.model.setMaxDepth(5) + self.model.train(cv2.ml.TrainData_create(new_samples, cv2.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types)) def predict(self, samples): new_samples = self.unroll_samples(samples) - pred = np.array( [self.model.predict(s, returnSum = True) for s in new_samples] ) - pred = pred.reshape(-1, self.class_n).argmax(1) - return pred + ret, resp = self.model.predict(new_samples) + + return resp.ravel().reshape(-1, self.class_n).argmax(1) class SVM(LetterStatModel): @@ -111,13 +110,14 @@ class SVM(LetterStatModel): self.model = cv2.ml.SVM_create() def train(self, samples, responses): - params = dict( kernel_type = cv2.ml.SVM_LINEAR, - svm_type = cv2.ml.SVM_C_SVC, - C = 1 ) - self.model.train(samples, responses, params = params) + self.model.setType(cv2.ml.SVM_C_SVC) + self.model.setC(1) + self.model.setKernel(cv2.ml.SVM_LINEAR) + self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int)) def predict(self, samples): - return self.model.predict_all(samples).ravel() + ret, resp = self.model.predict(samples) + return resp.ravel() class MLP(LetterStatModel): @@ -127,22 +127,23 @@ class MLP(LetterStatModel): def train(self, samples, responses): sample_n, var_n = samples.shape new_responses = self.unroll_responses(responses).reshape(-1, self.class_n) - layer_sizes = np.int32([var_n, 100, 100, self.class_n]) - self.model.create(layer_sizes) - # CvANN_MLP_TrainParams::BACKPROP,0.001 - params = dict( term_crit = (cv2.TERM_CRITERIA_COUNT, 300, 0.01), - train_method = cv2.ml.ANN_MLP_TRAIN_PARAMS_BACKPROP, - bp_dw_scale = 0.001, - bp_moment_scale = 0.0 ) - self.model.train(samples, np.float32(new_responses), None, params = params) + self.model.setLayerSizes(layer_sizes) + self.model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP) + self.model.setBackpropMomentumScale(0) + self.model.setBackpropWeightScale(0.001) + self.model.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 300, 0.01)) + self.model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM) + + self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses)) def predict(self, samples): ret, resp = self.model.predict(samples) return resp.argmax(-1) + if __name__ == '__main__': import getopt import sys @@ -155,7 +156,7 @@ if __name__ == '__main__': args, dummy = getopt.getopt(sys.argv[1:], '', ['model=', 'data=', 'load=', 'save=']) args = dict(args) - args.setdefault('--model', 'rtrees') + args.setdefault('--model', 'svm') args.setdefault('--data', '../data/letter-recognition.data') print('loading data %s ...' % args['--data']) @@ -173,8 +174,8 @@ if __name__ == '__main__': model.train(samples[:train_n], responses[:train_n]) print('testing...') - train_rate = np.mean(model.predict(samples[:train_n]) == responses[:train_n]) - test_rate = np.mean(model.predict(samples[train_n:]) == responses[train_n:]) + train_rate = np.mean(model.predict(samples[:train_n]) == responses[:train_n].astype(int)) + test_rate = np.mean(model.predict(samples[train_n:]) == responses[train_n:].astype(int)) print('train rate: %f test rate: %f' % (train_rate*100, test_rate*100)) From 86868176a21c382cd7f1c21879d1ccba76a0f321 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Wed, 3 Feb 2016 13:23:43 +0300 Subject: [PATCH 09/16] Fix py3 comatibility --- samples/python/find_obj.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/python/find_obj.py b/samples/python/find_obj.py index 05709c1ff..09457b80b 100755 --- a/samples/python/find_obj.py +++ b/samples/python/find_obj.py @@ -68,7 +68,7 @@ def filter_matches(kp1, kp2, matches, ratio = 0.75): p1 = np.float32([kp.pt for kp in mkp1]) p2 = np.float32([kp.pt for kp in mkp2]) kp_pairs = zip(mkp1, mkp2) - return p1, p2, kp_pairs + return p1, p2, list(kp_pairs) def explore_match(win, img1, img2, kp_pairs, status = None, H = None): h1, w1 = img1.shape[:2] From 54c07ba0ffeb4adb0fdf89b84a288ded14c51bf4 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Thu, 4 Feb 2016 17:12:32 +0300 Subject: [PATCH 10/16] Add 2 new tests, bugfixed in old tests --- modules/python/test/test_camshift.py | 112 +++++++++++++++ modules/python/test/test_facedetect.py | 4 +- modules/python/test/test_letter_recog.py | 167 +++++++++++++++++++++++ modules/python/test/test_peopledetect.py | 73 ++++++++++ modules/python/test/tst_scene_render.py | 67 +++++++++ samples/python/letter_recog.py | 12 +- 6 files changed, 428 insertions(+), 7 deletions(-) create mode 100644 modules/python/test/test_camshift.py create mode 100644 modules/python/test/test_letter_recog.py create mode 100644 modules/python/test/test_peopledetect.py create mode 100644 modules/python/test/tst_scene_render.py diff --git a/modules/python/test/test_camshift.py b/modules/python/test/test_camshift.py new file mode 100644 index 000000000..11164f9f0 --- /dev/null +++ b/modules/python/test/test_camshift.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python + +''' +Camshift tracker +================ + +This is a demo that shows mean-shift based tracking +You select a color objects such as your face and it tracks it. +This reads from video camera (0 by default, or the camera number the user enters) + +http://www.robinhewitt.com/research/track/camshift.html + +''' + +# Python 2/3 compatibility +from __future__ import print_function +import sys +PY3 = sys.version_info[0] == 3 + +if PY3: + xrange = range + +import numpy as np +import cv2 +from tst_scene_render import TestSceneRender + +def intersectionRate(s1, s2): + + x1, y1, x2, y2 = s1 + s1 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] + + x1, y1, x2, y2 = s2 + s2 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] + + area, intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2)) + return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2))) + + +from tests_common import NewOpenCVTests + +class camshift_test(NewOpenCVTests): + + frame = None + selection = None + drag_start = None + show_backproj = False + track_window = None + render = None + + def prepareRender(self): + + cv2.namedWindow('camshift') + self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg')) + + def runTracker(self): + + framesCounter = 0 + self.selection = True + + xmin, ymin, xmax, ymax = self.render.getCurrentRect() + + self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin) + + while True: + framesCounter += 1 + self.frame = self.render.getNextFrame() + vis = self.frame.copy() + hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) + mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) + + if self.selection: + x0, y0, x1, y1 = self.render.getCurrentRect() + 50 + x0 -= 100 + y0 -= 100 + + hsv_roi = hsv[y0:y1, x0:x1] + mask_roi = mask[y0:y1, x0:x1] + hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) + cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) + self.hist = hist.reshape(-1) + + vis_roi = vis[y0:y1, x0:x1] + cv2.bitwise_not(vis_roi, vis_roi) + vis[mask == 0] = 0 + + self.selection = False + + if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0: + self.selection = None + prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) + prob &= mask + term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) + track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit) + + if self.show_backproj: + vis[:] = prob[...,np.newaxis] + + cv2.rectangle(vis, (self.track_window[0], self.track_window[1]), (self.track_window[0] + self.track_window[2], self.track_window[1] + self.track_window[3]), (0, 255, 0), 2) + + trackingRect = np.array(self.track_window) + trackingRect[2] += trackingRect[0] + trackingRect[3] += trackingRect[1] + + print(intersectionRate((self.render.getCurrentRect()), trackingRect)) + self.assertGreater(intersectionRate((self.render.getCurrentRect()), trackingRect), 0.5) + + if framesCounter > 300: + break + + def test_camshift(self): + self.prepareRender() + self.runTracker() \ No newline at end of file diff --git a/modules/python/test/test_facedetect.py b/modules/python/test/test_facedetect.py index e5c2f5943..fad1cf97d 100644 --- a/modules/python/test/test_facedetect.py +++ b/modules/python/test/test_facedetect.py @@ -93,9 +93,9 @@ class facedetect_test(NewOpenCVTests): faces_matches += 1 #check eyes if len(eyes[i]) == 2: - if intersectionRate(eyes[i][0], testFaces[j][1]) > eps and intersectionRate(eyes[i][1], testFaces[j][2]): + if intersectionRate(eyes[i][0], testFaces[j][1]) > eps and intersectionRate(eyes[i][1] , testFaces[j][2]) > eps: eyes_matches += 1 - elif intersectionRate(eyes[i][1], testFaces[j][1]) > eps and intersectionRate(eyes[i][0], testFaces[j][2]): + elif intersectionRate(eyes[i][1], testFaces[j][1]) > eps and intersectionRate(eyes[i][0], testFaces[j][2]) > eps: eyes_matches += 1 self.assertEqual(faces_matches, 2) diff --git a/modules/python/test/test_letter_recog.py b/modules/python/test/test_letter_recog.py new file mode 100644 index 000000000..574741f44 --- /dev/null +++ b/modules/python/test/test_letter_recog.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python + +''' +The sample demonstrates how to train Random Trees classifier +(or Boosting classifier, or MLP, or Knearest, or Support Vector Machines) using the provided dataset. + +We use the sample database letter-recognition.data +from UCI Repository, here is the link: + +Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998). +UCI Repository of machine learning databases +[http://www.ics.uci.edu/~mlearn/MLRepository.html]. +Irvine, CA: University of California, Department of Information and Computer Science. + +The dataset consists of 20000 feature vectors along with the +responses - capital latin letters A..Z. +The first 10000 samples are used for training +and the remaining 10000 - to test the classifier. +====================================================== + Models: RTrees, KNearest, Boost, SVM, MLP +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 + +def load_base(fn): + a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') }) + samples, responses = a[:,1:], a[:,0] + return samples, responses + +class LetterStatModel(object): + class_n = 26 + train_ratio = 0.5 + + def load(self, fn): + self.model.load(fn) + def save(self, fn): + self.model.save(fn) + + def unroll_samples(self, samples): + sample_n, var_n = samples.shape + new_samples = np.zeros((sample_n * self.class_n, var_n+1), np.float32) + new_samples[:,:-1] = np.repeat(samples, self.class_n, axis=0) + new_samples[:,-1] = np.tile(np.arange(self.class_n), sample_n) + return new_samples + + def unroll_responses(self, responses): + sample_n = len(responses) + new_responses = np.zeros(sample_n*self.class_n, np.int32) + resp_idx = np.int32( responses + np.arange(sample_n)*self.class_n ) + new_responses[resp_idx] = 1 + return new_responses + +class RTrees(LetterStatModel): + def __init__(self): + self.model = cv2.ml.RTrees_create() + + def train(self, samples, responses): + sample_n, var_n = samples.shape + self.model.setMaxDepth(20) + self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int)) + + def predict(self, samples): + ret, resp = self.model.predict(samples) + return resp.ravel() + + +class KNearest(LetterStatModel): + def __init__(self): + self.model = cv2.ml.KNearest_create() + + def train(self, samples, responses): + self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) + + def predict(self, samples): + retval, results, neigh_resp, dists = self.model.findNearest(samples, k = 10) + return results.ravel() + + +class Boost(LetterStatModel): + def __init__(self): + self.model = cv2.ml.Boost_create() + + def train(self, samples, responses): + sample_n, var_n = samples.shape + new_samples = self.unroll_samples(samples) + new_responses = self.unroll_responses(responses) + var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL, cv2.ml.VAR_CATEGORICAL], np.uint8) + + self.model.setWeakCount(15) + self.model.setMaxDepth(10) + self.model.train(cv2.ml.TrainData_create(new_samples, cv2.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types)) + + def predict(self, samples): + new_samples = self.unroll_samples(samples) + ret, resp = self.model.predict(new_samples) + + return resp.ravel().reshape(-1, self.class_n).argmax(1) + + +class SVM(LetterStatModel): + def __init__(self): + self.model = cv2.ml.SVM_create() + + def train(self, samples, responses): + self.model.setType(cv2.ml.SVM_C_SVC) + self.model.setC(1) + self.model.setKernel(cv2.ml.SVM_RBF) + self.model.setGamma(.1) + self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int)) + + def predict(self, samples): + ret, resp = self.model.predict(samples) + return resp.ravel() + + +class MLP(LetterStatModel): + def __init__(self): + self.model = cv2.ml.ANN_MLP_create() + + def train(self, samples, responses): + sample_n, var_n = samples.shape + new_responses = self.unroll_responses(responses).reshape(-1, self.class_n) + layer_sizes = np.int32([var_n, 100, 100, self.class_n]) + + self.model.setLayerSizes(layer_sizes) + self.model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP) + self.model.setBackpropMomentumScale(0) + self.model.setBackpropWeightScale(0.001) + self.model.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 20, 0.01)) + self.model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 2, 1) + + self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses)) + + def predict(self, samples): + ret, resp = self.model.predict(samples) + return resp.argmax(-1) + +from tests_common import NewOpenCVTests + +class letter_recog_test(NewOpenCVTests): + + def test_letter_recog(self): + + eps = 0.01 + + models = [RTrees, KNearest, Boost, SVM, MLP] + models = dict( [(cls.__name__.lower(), cls) for cls in models] ) + testErrors = {RTrees: (98.930000, 92.390000), KNearest: (94.960000, 92.010000), + Boost: (85.970000, 74.920000), SVM: (99.780000, 95.680000), MLP: (90.060000, 87.410000)} + + for model in models: + Model = models[model] + classifier = Model() + + samples, responses = load_base(self.repoPath + '/samples/data/letter-recognition.data') + train_n = int(len(samples)*classifier.train_ratio) + + classifier.train(samples[:train_n], responses[:train_n]) + train_rate = np.mean(classifier.predict(samples[:train_n]) == responses[:train_n].astype(int)) + test_rate = np.mean(classifier.predict(samples[train_n:]) == responses[train_n:].astype(int)) + + self.assertLess(train_rate - testErrors[Model][0], eps) + self.assertLess(test_rate - testErrors[Model][1], eps) \ No newline at end of file diff --git a/modules/python/test/test_peopledetect.py b/modules/python/test/test_peopledetect.py new file mode 100644 index 000000000..9aed0ed0a --- /dev/null +++ b/modules/python/test/test_peopledetect.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +''' +example to detect upright people in images using HOG features +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 + + +def inside(r, q): + rx, ry, rw, rh = r + qx, qy, qw, qh = q + return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh + +def intersectionRate(s1, s2): + + x1, y1, x2, y2 = s1 + s1 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] + + x1, y1, x2, y2 = s2 + s2 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] + area, intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2)) + + return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2))) + +from tests_common import NewOpenCVTests + +class peopledetect_test(NewOpenCVTests): + def test_peopledetect(self): + + hog = cv2.HOGDescriptor() + hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() ) + + dirPath = 'samples/data/' + samples = ['basketball1.png', 'basketball2.png'] + + testPeople = [ + [[23, 76, 164, 477], [440, 22, 637, 478]], + [[23, 76, 164, 477], [440, 22, 637, 478]] + ] + + eps = 0.5 + + for sample in samples: + + img = self.get_sample(dirPath + sample, 0) + + found, w = hog.detectMultiScale(img, winStride=(8,8), padding=(32,32), scale=1.05) + found_filtered = [] + for ri, r in enumerate(found): + for qi, q in enumerate(found): + if ri != qi and inside(r, q): + break + else: + found_filtered.append(r) + + matches = 0 + + for i in range(len(found_filtered)): + for j in range(len(testPeople)): + + found_rect = (found_filtered[i][0], found_filtered[i][1], + found_filtered[i][0] + found_filtered[i][2], + found_filtered[i][1] + found_filtered[i][3]) + + if intersectionRate(found_rect, testPeople[j][0]) > eps or intersectionRate(found_rect, testPeople[j][1]) > eps: + matches += 1 + + self.assertGreater(matches, 0) \ No newline at end of file diff --git a/modules/python/test/tst_scene_render.py b/modules/python/test/tst_scene_render.py new file mode 100644 index 000000000..e952c4236 --- /dev/null +++ b/modules/python/test/tst_scene_render.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python + + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +from numpy import pi, sin, cos + +import cv2 + +defaultSize = 512 + +class TestSceneRender(): + + def __init__(self, bgImg = None, **params): + self.time = 0.0 + self.timeStep = 1.0 / 30.0 + + if bgImg != None: + self.sceneBg = bgImg.copy() + else: + self.sceneBg = np.zeros((defaultSize, defaultSize, 3), np.uint8) + + self.w = self.sceneBg.shape[0] + self.h = self.sceneBg.shape[1] + + self.initialRect = np.array([ (self.h/2, self.w/2), (self.h/2, self.w/2 + self.w/10), + (self.h/2 + self.h/10, self.w/2 + self.w/10), (self.h/2 + self.h/10, self.w/2)]) + self.currentRect = self.initialRect + + def setInitialRect(self, rect): + self.initialRect = rect + + def getCurrentRect(self): + x0, y0 = self.currentRect[0] + x1, y1 = self.currentRect[2] + return np.array([x0, y0, x1, y1]) + + def getNextFrame(self): + self.time += self.timeStep + img = self.sceneBg.copy() + + self.currentRect = self.initialRect + np.int( 30*cos(self.time) + 50*sin(self.time/3)) + cv2.fillConvexPoly(img, self.currentRect, (0, 0, 255)) + + return img + + def resetTime(self): + self.time = 0.0 + + +if __name__ == '__main__': + + backGr = cv2.imread('../../../samples/data/lena.jpg') + + render = TestSceneRender(backGr) + + while True: + + img = render.getNextFrame() + cv2.imshow('img', img) + + ch = 0xFF & cv2.waitKey(3) + if ch == 27: + break + cv2.destroyAllWindows() \ No newline at end of file diff --git a/samples/python/letter_recog.py b/samples/python/letter_recog.py index 4e166cbbd..7d0c43764 100755 --- a/samples/python/letter_recog.py +++ b/samples/python/letter_recog.py @@ -95,7 +95,8 @@ class Boost(LetterStatModel): new_responses = self.unroll_responses(responses) var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL, cv2.ml.VAR_CATEGORICAL], np.uint8) - self.model.setMaxDepth(5) + self.model.setWeakCount(15) + self.model.setMaxDepth(10) self.model.train(cv2.ml.TrainData_create(new_samples, cv2.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types)) def predict(self, samples): @@ -112,7 +113,8 @@ class SVM(LetterStatModel): def train(self, samples, responses): self.model.setType(cv2.ml.SVM_C_SVC) self.model.setC(1) - self.model.setKernel(cv2.ml.SVM_LINEAR) + self.model.setKernel(cv2.ml.SVM_RBF) + self.model.setGamma(.1) self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int)) def predict(self, samples): @@ -131,10 +133,10 @@ class MLP(LetterStatModel): self.model.setLayerSizes(layer_sizes) self.model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP) - self.model.setBackpropMomentumScale(0) + self.model.setBackpropMomentumScale(0.0) self.model.setBackpropWeightScale(0.001) - self.model.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 300, 0.01)) - self.model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM) + self.model.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 20, 0.01)) + self.model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 2, 1) self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses)) From 4e3a6328ba06ab1069d07e3f785fe9b340d996d3 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Thu, 4 Feb 2016 17:38:05 +0300 Subject: [PATCH 11/16] Fixes in calibration and camshift tests --- modules/python/test/test_calibration.py | 2 +- modules/python/test/test_camshift.py | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/modules/python/test/test_calibration.py b/modules/python/test/test_calibration.py index 48b53ff9b..665521862 100644 --- a/modules/python/test/test_calibration.py +++ b/modules/python/test/test_calibration.py @@ -57,7 +57,7 @@ class calibration_test(NewOpenCVTests): eps = 0.01 normCamEps = 10.0 - normDistEps = 0.01 + normDistEps = 0.001 cameraMatrixTest = [[ 532.80992189, 0., 342.4952186 ], [ 0., 532.93346422, 233.8879292 ], diff --git a/modules/python/test/test_camshift.py b/modules/python/test/test_camshift.py index 11164f9f0..064206edb 100644 --- a/modules/python/test/test_camshift.py +++ b/modules/python/test/test_camshift.py @@ -49,7 +49,6 @@ class camshift_test(NewOpenCVTests): def prepareRender(self): - cv2.namedWindow('camshift') self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg')) def runTracker(self): @@ -95,13 +94,10 @@ class camshift_test(NewOpenCVTests): if self.show_backproj: vis[:] = prob[...,np.newaxis] - cv2.rectangle(vis, (self.track_window[0], self.track_window[1]), (self.track_window[0] + self.track_window[2], self.track_window[1] + self.track_window[3]), (0, 255, 0), 2) - trackingRect = np.array(self.track_window) trackingRect[2] += trackingRect[0] trackingRect[3] += trackingRect[1] - print(intersectionRate((self.render.getCurrentRect()), trackingRect)) self.assertGreater(intersectionRate((self.render.getCurrentRect()), trackingRect), 0.5) if framesCounter > 300: From 87fc75c6d74eea5accf4c2d7c69d0fa3a91a8c00 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Fri, 5 Feb 2016 17:46:52 +0300 Subject: [PATCH 12/16] Improvements in tests, bugfix in digits_video sample --- modules/python/test/test_camshift.py | 26 +++++++------------- modules/python/test/test_digits.py | 2 +- modules/python/test/test_facedetect.py | 13 +--------- modules/python/test/test_houghcircles.py | 30 +++++++++++++++++++++--- modules/python/test/test_peopledetect.py | 13 +--------- modules/python/test/tests_common.py | 13 +++++++++- modules/python/test/tst_scene_render.py | 8 +++++-- samples/python/digits_video.py | 7 ++++-- 8 files changed, 62 insertions(+), 50 deletions(-) diff --git a/modules/python/test/test_camshift.py b/modules/python/test/test_camshift.py index 064206edb..766e0c4bb 100644 --- a/modules/python/test/test_camshift.py +++ b/modules/python/test/test_camshift.py @@ -24,32 +24,22 @@ import numpy as np import cv2 from tst_scene_render import TestSceneRender -def intersectionRate(s1, s2): - - x1, y1, x2, y2 = s1 - s1 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] - - x1, y1, x2, y2 = s2 - s2 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] - - area, intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2)) - return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2))) - - -from tests_common import NewOpenCVTests +from tests_common import NewOpenCVTests, intersectionRate class camshift_test(NewOpenCVTests): + framesNum = 300 frame = None selection = None drag_start = None show_backproj = False track_window = None render = None + errors = 0 def prepareRender(self): - self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg')) + self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg'), True) def runTracker(self): @@ -93,16 +83,18 @@ class camshift_test(NewOpenCVTests): if self.show_backproj: vis[:] = prob[...,np.newaxis] - trackingRect = np.array(self.track_window) trackingRect[2] += trackingRect[0] trackingRect[3] += trackingRect[1] - self.assertGreater(intersectionRate((self.render.getCurrentRect()), trackingRect), 0.5) + if intersectionRate(self.render.getCurrentRect(), trackingRect) < 0.4: + self.errors += 1 - if framesCounter > 300: + if framesCounter > self.framesNum: break + self.assertLess(float(self.errors) / self.framesNum, 0.4) + def test_camshift(self): self.prepareRender() self.runTracker() \ No newline at end of file diff --git a/modules/python/test/test_digits.py b/modules/python/test/test_digits.py index 2aa32df4c..2d5c90043 100644 --- a/modules/python/test/test_digits.py +++ b/modules/python/test/test_digits.py @@ -95,7 +95,7 @@ def evaluate_model(model, digits, samples, labels): confusion = np.zeros((10, 10), np.int32) for i, j in zip(labels, resp): - confusion[i, j] += 1 + confusion[int(i), int(j)] += 1 return err, confusion diff --git a/modules/python/test/test_facedetect.py b/modules/python/test/test_facedetect.py index fad1cf97d..50c108e05 100644 --- a/modules/python/test/test_facedetect.py +++ b/modules/python/test/test_facedetect.py @@ -10,17 +10,6 @@ from __future__ import print_function import numpy as np import cv2 -def intersectionRate(s1, s2): - - x1, y1, x2, y2 = s1 - s1 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] - - x1, y1, x2, y2 = s2 - s2 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] - - area, intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2)) - return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2))) - def detect(img, cascade): rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE) @@ -29,7 +18,7 @@ def detect(img, cascade): rects[:,2:] += rects[:,:2] return rects -from tests_common import NewOpenCVTests +from tests_common import NewOpenCVTests, intersectionRate class facedetect_test(NewOpenCVTests): diff --git a/modules/python/test/test_houghcircles.py b/modules/python/test/test_houghcircles.py index c012d30b4..318153ab7 100644 --- a/modules/python/test/test_houghcircles.py +++ b/modules/python/test/test_houghcircles.py @@ -10,9 +10,31 @@ from __future__ import print_function import cv2 import numpy as np import sys +from numpy import pi, sin, cos from tests_common import NewOpenCVTests +def circleApproximation(circle): + + nPoints = 30 + phi = 0 + dPhi = 2*pi / nPoints + contour = [] + for i in range(nPoints): + contour.append(([circle[0] + circle[2]*cos(i*dPhi), + circle[1] + circle[2]*sin(i*dPhi)])) + + return np.array(contour).astype(int) + +def convContoursIntersectiponRate(c1, c2): + + s1 = cv2.contourArea(c1) + s2 = cv2.contourArea(c2) + + s, _ = cv2.intersectConvexConvex(c1, c2) + + return 2*s/(s1+s2) + class houghcircles_test(NewOpenCVTests): def test_houghcircles(self): @@ -45,13 +67,15 @@ class houghcircles_test(NewOpenCVTests): [448.4, 121.3, 9.12], [384.6, 128.9, 8.62]] - eps = 7 matches_counter = 0 for i in range(len(testCircles)): for j in range(len(circles)): - if cv2.norm(testCircles[i] - circles[j], cv2.NORM_L2) < eps: + + tstCircle = circleApproximation(testCircles[i]) + circle = circleApproximation(circles[j]) + if convContoursIntersectiponRate(tstCircle, circle) > 0.6: matches_counter += 1 self.assertGreater(float(matches_counter) / len(testCircles), .5) - self.assertLess(float(len(circles) - matches_counter) / len(circles), .7) \ No newline at end of file + self.assertLess(float(len(circles) - matches_counter) / len(circles), .75) \ No newline at end of file diff --git a/modules/python/test/test_peopledetect.py b/modules/python/test/test_peopledetect.py index 9aed0ed0a..fb0a9e9ca 100644 --- a/modules/python/test/test_peopledetect.py +++ b/modules/python/test/test_peopledetect.py @@ -16,18 +16,7 @@ def inside(r, q): qx, qy, qw, qh = q return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh -def intersectionRate(s1, s2): - - x1, y1, x2, y2 = s1 - s1 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] - - x1, y1, x2, y2 = s2 - s2 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] - area, intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2)) - - return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2))) - -from tests_common import NewOpenCVTests +from tests_common import NewOpenCVTests, intersectionRate class peopledetect_test(NewOpenCVTests): def test_peopledetect(self): diff --git a/modules/python/test/tests_common.py b/modules/python/test/tests_common.py index c1cc12d6e..d76346bb4 100644 --- a/modules/python/test/tests_common.py +++ b/modules/python/test/tests_common.py @@ -53,4 +53,15 @@ class NewOpenCVTests(unittest.TestCase): def assertGreater(self, a, b, msg=None): if not a > b: - self.fail('%s not greater than %s' % (repr(a), repr(b))) \ No newline at end of file + self.fail('%s not greater than %s' % (repr(a), repr(b))) + +def intersectionRate(s1, s2): + + x1, y1, x2, y2 = s1 + s1 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] + + x1, y1, x2, y2 = s2 + s2 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] + #print(np.array(s2)) + area, intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2)) + return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2))) \ No newline at end of file diff --git a/modules/python/test/tst_scene_render.py b/modules/python/test/tst_scene_render.py index e952c4236..adda5bf24 100644 --- a/modules/python/test/tst_scene_render.py +++ b/modules/python/test/tst_scene_render.py @@ -13,9 +13,10 @@ defaultSize = 512 class TestSceneRender(): - def __init__(self, bgImg = None, **params): + def __init__(self, bgImg = None, deformation = False, **params): self.time = 0.0 self.timeStep = 1.0 / 30.0 + self.deformation = deformation if bgImg != None: self.sceneBg = bgImg.copy() @@ -26,7 +27,7 @@ class TestSceneRender(): self.h = self.sceneBg.shape[1] self.initialRect = np.array([ (self.h/2, self.w/2), (self.h/2, self.w/2 + self.w/10), - (self.h/2 + self.h/10, self.w/2 + self.w/10), (self.h/2 + self.h/10, self.w/2)]) + (self.h/2 + self.h/10, self.w/2 + self.w/10), (self.h/2 + self.h/10, self.w/2)]).astype(int) self.currentRect = self.initialRect def setInitialRect(self, rect): @@ -42,6 +43,9 @@ class TestSceneRender(): img = self.sceneBg.copy() self.currentRect = self.initialRect + np.int( 30*cos(self.time) + 50*sin(self.time/3)) + if(self.deformation): + self.currentRect[1:3] += np.int(self.h/20*cos(self.time)) + cv2.fillConvexPoly(img, self.currentRect, (0, 0, 255)) return img diff --git a/samples/python/digits_video.py b/samples/python/digits_video.py index 5f57cb8f9..c85deb6d0 100755 --- a/samples/python/digits_video.py +++ b/samples/python/digits_video.py @@ -27,9 +27,12 @@ def main(): if not os.path.exists(classifier_fn): print('"%s" not found, run digits.py first' % classifier_fn) return - model = SVM() - model.load(classifier_fn) + if True: + model = cv2.ml.SVM_load(classifier_fn) + else: + model = cv2.ml.SVM_create() + model.load_(classifier_fn) #Known bug: https://github.com/Itseez/opencv/issues/4969 while True: ret, frame = cap.read() From 0c6e09f060aea333b1f75317e46172e85f51961f Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Fri, 5 Feb 2016 18:46:43 +0300 Subject: [PATCH 13/16] Changes in facedetect test --- modules/python/test/test.py | 4 ++++ modules/python/test/test_facedetect.py | 15 +++++++-------- modules/python/test/tests_common.py | 6 ++++++ samples/data/kate.jpg | Bin 40791 -> 0 bytes 4 files changed, 17 insertions(+), 8 deletions(-) delete mode 100644 samples/data/kate.jpg diff --git a/modules/python/test/test.py b/modules/python/test/test.py index aa66e9234..bbd30389f 100755 --- a/modules/python/test/test.py +++ b/modules/python/test/test.py @@ -133,6 +133,10 @@ if __name__ == '__main__': print("Testing OpenCV", cv2.__version__) print("Local repo path:", args.repo) NewOpenCVTests.repoPath = args.repo + try: + NewOpenCVTests.extraTestDataPath = os.environ['OPENCV_TEST_DATA_PATH'] + except KeyError: + pass random.seed(0) unit_argv = [sys.argv[0]] + other; unittest.main(argv=unit_argv) \ No newline at end of file diff --git a/modules/python/test/test_facedetect.py b/modules/python/test/test_facedetect.py index 50c108e05..8d64fde10 100644 --- a/modules/python/test/test_facedetect.py +++ b/modules/python/test/test_facedetect.py @@ -31,8 +31,7 @@ class facedetect_test(NewOpenCVTests): cascade = cv2.CascadeClassifier(cascade_fn) nested = cv2.CascadeClassifier(nested_fn) - dirPath = 'samples/data/' - samples = ['lena.jpg', 'kate.jpg'] + samples = ['samples/data/lena.jpg', 'cv/cascadeandhog/images/mona-lisa.png'] faces = [] eyes = [] @@ -43,17 +42,17 @@ class facedetect_test(NewOpenCVTests): [ 244, 240, 294, 290], [ 309, 246, 352, 289]], - #kate - [[207, 89, 436, 318], - [245, 161, 294, 210], - [343, 139, 389, 185]] + #lisa + [[167, 119, 307, 259], + [188, 153, 229, 194], + [236, 153, 277, 194]] ] for sample in samples: - img = self.get_sample(dirPath + sample) + img = self.get_sample( sample) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - gray = cv2.GaussianBlur(gray, (3, 3), 1.1) + gray = cv2.GaussianBlur(gray, (5, 5), 5.1) rects = detect(gray, cascade) faces.append(rects) diff --git a/modules/python/test/tests_common.py b/modules/python/test/tests_common.py index d76346bb4..17473cd6e 100644 --- a/modules/python/test/tests_common.py +++ b/modules/python/test/tests_common.py @@ -19,6 +19,7 @@ class NewOpenCVTests(unittest.TestCase): # path to local repository folder containing 'samples' folder repoPath = None + extraTestDataPath = None # github repository url repoUrl = 'https://raw.github.com/Itseez/opencv/master' @@ -30,6 +31,11 @@ class NewOpenCVTests(unittest.TestCase): if os.path.isfile(candidate): with open(candidate, 'rb') as f: filedata = f.read() + if NewOpenCVTests.extraTestDataPath is not None: + candidate = NewOpenCVTests.extraTestDataPath + '/' + filename + if os.path.isfile(candidate): + with open(candidate, 'rb') as f: + filedata = f.read() if filedata is None: filedata = urlopen(NewOpenCVTests.repoUrl + '/' + filename).read() self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor) diff --git a/samples/data/kate.jpg b/samples/data/kate.jpg deleted file mode 100644 index 40ab433bfde29689600f3d2a9308e0745f7fe988..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40791 zcmb5VXIN8B*fqLS2!T*S?+_pqQ99B~LPvU6njnZET|k;ZLIA@9B2{V9s|X?nDHZ~P z6hY}tdha3~MErcO@4VMJf6u+=&&-~=*ZkP~-gB+B_TP!W^8oUOu7NH90)YVie;4rg z2cQK&A&~!RPzaO)3Z;NiQ^NiQEfqDKnw}Pkq^G5)V_;%uW?*Dvq@!oyW?|#t;Ns#! zGV}29aPqNpa&i7o5)hPv0!9I&p`@hYWT0o@{Qn()$p8Z!41tJ1Kmq`m0R&+H{p|pF z000aDApf@ezwxj84+};Kr}}qW3kiTA5C|ChZ{H9o2n>UO{;h@ZOG6p46y`7imjEUi zd@5zpn>IMJ=HQ;7#R7}$onJ9p4_MK{a%shu>FxiLF#mtD|34`IGs^J)V*c;w6hQNT z0T=)c;0%E0=H3t=nx~AK7UN3hX`FUpM?FhqV>*jK)eS(gbvt5RS&TF)+3l=fu~Vj1 z+ex`g^R27DOW;@7{eURCq(n9#kOG2|#wm(;o9;p($R#JI%V9Y*&LHq;yG^HgNi zL2u&Jz+U1_ivtmnDaidZ2le(gr5fPY?6`sNyz`^!3PR+d?*ZTKQHq9gXxoXcCzI=U zelw(&V`?(L!;50n)LoEVI#(NHq?ief$t%zSa!Jt}G%?pu=O|Bny8Q2#Go%+gLlvvQpw|Ido6sng_V1gQI_PXN=F7;(zS@YAqmT{A1~FJHRXikG zXIujOZd9BoMHqBkbpI4TPOqYEm~0BIx!3um8ha>aW@s=w+LE?W^N6M`6mArQlB@QEy%izrb?d_o0o?f~m0?vE=e(NTdqJ@_c&k zH0+7<^0cU&sdGdIW|lv88hT`W2=b5*MFKTeW9yxEzpH5F&?ZIEDLGB5lmMf$=Wqea=UBO2F{_)S6>tHz@9I^7!uJj`87?#+|-7ZL4Gxa_{|l^(<@qxfgkcZ!Bl`$4>$ zp?@X9{x6_^?bDoJ@yFe|ozP??H^)vKI+0yHzY?x|a;{%z7%=T%lq`n6`_t#FG7e+XG2-718OCT4>B;=LO z+KgqAyZp(b8Ar90>{f!!I5B%gz;(55zFLB91_}zIl|WY z1g3}?L{IB3@p!o~O7+X~>^hSfk=T@FAcErflb#Q;_0jXltr30 z89zu%;I2OEdze88ko&YBoVSQgHetPigRGENqYdox-56Ya^)~C(2Xu16D8TpG0N0g2 ztoJ*#k&-f&c@Mi!q-XfQ<$(THJ&NtMdaFlizl`|_A!qe-fFHFTdUHGvsqI)gQ~e87 zlu|B#pyrDoD{D#&nb#ka$`^#~iuv(CPpm2rHqXG%@>m9Xc4u9D2oRB$ZLc#pV9FPw z@4beSTmB0?*Bx-Dn<4*6xrrOl>HA*lqLea5qDhS208|WGi@luPG2PjJ0U?UyANbY3 zz@&6x*t;wOzo^RBq_x;FBkhQGn4G(8ma#K#NIMZxHk=Pa2dEYY#(u`;HG{f^m*0)q zs?=|#_L}*7z!YxI3sMI62#=j$$jb7 z9lyy!X^=Ac7m$fBsp;du=$7`NmSQWcL_*wJlJODbo10_&rF1hzko#i`IPyw>U5yz; za6;_0LmhIMTSCFyI%D!vDcjy8R%}ztfqPw|JC7pKvN~ALWN}eFQRO9D+Kr;zLKGq? zxA+HZrbvB-iZ?!k)xfhZt)~8j=j;&kqbG|y13NAM`3N)7&7Rlu)_I_NxN&;VIuO3Pe~#pfzhQ*yBS9NU;vJPXB}z& zCTG&wjEQGHuMRj=^$@a7(d`X`0qT{koc3t( zKN?lGLpGk}7a@#{Gsc~z>Ar3EA`pJj#dIgbkF5U47A*xUJnE;UY;s&GJKE%YNh?qp z=7A!m3~99dQWICsjNTamlBwbvjYonVm8%T`d)2=f~$H$(Cv?(KK>UPgZX5p`=m}P zQZv>e6`$Ol80)mSv|d?oOkUfJnUc@Fc*CK$-;s}$Zr%~p?4(ABwHJUXr+%j>+`e`WObsQ5Lqq+gMGX6`{j=S z^yT9ZlcYC+NzTIfP{~zKHE>FUEFQ%7-TmT zY3j_t)!5wS2Bpf^1yd&k07TxGs}xT%8K7P3MNsEk6#Cl{j^@HNX|d@F+>Z8ci>AfY zp={Dpej1a|wt^hRSU=%|bgwc7(zi&Y!#sh{@FiOv>T;dx2Sazq<)&$qAb_e{VtVj+ zm7+#UT+{#$b`GJA)6-@z=!ZaXi3h(0>3*J}3AC!3Z<<2*a=?%N0@i=p>&3ev{IEsm zuHr_nsiiqldOlIvIQR_UfTDYv^?mr*7ClWA6@5|B2mQYMY>BsfnwHGaQ6xkEJN^d6 zr;g+vI^T`H?^nv|b@c|XbYOSA@sa|@$GEqQAYp_v#!hT8FiO`(b15_n!*V+}yW^VQ z!zJ#hM6F#co+tUPTctKRn@tG(QaM1tS6u3CQ|Izy6WZa~hjrY?LT;5?2F>yb7#2W5 z26Pnnth$r%RAF`Fh*yBy03oDDgk}@K-AoqjYx>B5zd#v_vbUY{c_jfK0P1+w;r_r} zettS__yKJ%;Ew)!%G2&F)R7ti|A3MvHC->f`6qLi|D8+(^EGb3=XFJ&xBlkt;xxhiu-lhJb+EYB2YCD4eEWy(| zp)trvZTDV2QmwFi<{r(AUiLis4Tcv&1+(_u7V0?8plC8d`3qa*Ey$9)sN zbCTjJ3Z&^}Ot{Y68MOv>U@A2Y_o=NyxQt`A)U<92dZ@dm;gxrsHj+XdtZ&41THh#J z?sYO_mp#-Sj<$O11s#L;B7_BGN-zrBFPD3-lNz(jJ#z~g1iSN0w>?R z>(BCE^Qmj#`#fkdt^Vb+hsGYXV2Y=!xE$UCp_SPQ!%#>}WUtqWP9I%QKu;E?;`Z?5 zm65J1-&pe^$`~~EkWhe`?y2@|I{I8{*xVrG)gFk|pprbKIgDXrXeiBxQ`?qN}DS-S)XmaxGA4sG*5zr zT(dr669==QDFH(y}qNhx!WBu)1ktu9?&xqWrS(RMW!?^Dx5ccOd!BP8bLKL)NJ zaf+i@AAgQ*rw<+a@Q!n9PbIFKl8WcKQ$@9p@)HlQET3z^^XDNB{nWX!T-m*}j~e-A zYzvz;sS=U|Jr0GD_E3nZ~ghOPO=r8f=E?tcU}pgNKh>>gcu- zp7sDKCVoM8{9Q-&Q&DSTBHJ%_F&dZ?>czR?g`(aQ_`k_X}2f)EHAYPFMoYp6^nS3X3!W`{|>|W;DIF%vAd(J zvDZ~~0Yp@69kNQuj}W7k^$=j7cS1tq5jsQD3V@!s2-lsidP;Um&){nfK}a{p>tk%h*z5uW ztlR9;3?xs#4HhU`%67mPBq5)00%u;-hjNGWLvy=YTLfq(qW0o{m<-6Cp<-V&6BMwA z(%&y42V=AXn{F`rL?of=P+GBk7Kt2a1O>);|K7f~fQ0K-1~SVpt1X+=EPqa}w}PEj zkzEvv2a&&vA{3?#lH#1RL30wm&+hhTQ%>G&@^+FCjD^jQCC-QsN|W#zcZYz;o-6L{ z(AzCQ)K>0X{sw)oS3Z|(4{YnYrUStGeH6TzeYj0$WBZ~5}hH^EV`?bhL zYzB1%ITvuP?>k{f0_p{mYPL zJ8O63#k!Oxa!&_}!7gg#@p*A@R7W^`1tfJ=XlPm?Mi%Oj*F!TFabaeX1OjkW`<=fs zm^|nn0~oe+H%tP>vt${@ZA6ZQ$_;0T4>1EaBe(N6frFvKe3~rSZ^lcLf@uZ?Dh#jg9PF1K3rIyZrvX-+#seYC2R}kTGxlGO+S?S z_wh;4Cfr<=_9=r$o~|Yg>pMw^5Jb(TMgOXYLvkS6*}8DvqIUI4PTiRy+3);LTVLNs zTTv3BKF2bTp#;kMVuPRpmD5NY-nvVO)BUuArgw z+Gh{xb)@IUcbAtOW*;nMRRjVy8*K(XJ7s@49H*$Jh=jE0J}w=9ZC;)jCFN_U*z)s| zc5gSg_P&_HV)WHYZ;~0Y>7+F*meN9<6FnmyETDMw{)){(pE28&D%G5 zT**1JIj18Va|Ulzim^Ox+k_(0w!Ej1RM@9Ge(TLIZevG0r-p7me81w~bd~RqSl!!3 z=>r#OQN_(1vw1_1`WA&QYnA|;>Lz{pPQyy9|7xv@tv>i+26w1X`BvTO{U+aAd z*LWJPD1yAS0qO1cnQG3jrX=ap7N#T!Y4qt^3dRg*<3DNa1C$wk5As2Le*F-Nwz~Yb zTQRbC?!wV<_O;9h`fT9ASk|v?`44KLX;ITvCI_4593L{k=!5{(y^2a!+f<2`7_~Eu zfZlk5<{IrlWI6!Ny*(;iD4y$d4Ig8?1o>qB9O9C!HgJ8MT}Mi9HzprQK|*-b+jO^L zu4zAP2L(!@pisBO1sZOMbK;nQ9`$1d+0Fs8H$Y&oa|$5BZ|2DW9wqlXJPgdjr?F<|#Dj?~`w9MSf+9t(k9Fpj!fLl*sdD>Zw9Yjd$!@EYn-EWAY*zXMHUF5-SF}&|Y9*WS z4Bz2K#H`~?M50d87c+-n#8tDVT^Lo8qj?ZXFDPvtDLV-bK)o2hwW)|WJ~T$BxbkjY z*8;#2zGf07wLZdo^&fT>a=F{@>iLSSLjDyo@>5eckQq72bkZ%5&?6%wRbWnb0(tJf zRqIHRpxng!1ruQg?`-bzbWHaw4PHO#%AFA?0t+l&i$sbq+tHTb-j1qH6x<$_FOtA< z`pv~YM;ejfe+DE2nvnzk7<0=UI1^wX+e=jJe#QZ`#kuPPNKnsiELcgvgz9${w5b~} zh5PZ~;EtM-*yV8k=JgV11{wD@gxyYQ3gV3$s{prSfN1xAqG9o-9Ei2RX!5248c#~p zi9x{NQ1C*oP6pq#Fw@dXDM_R+%d@2XX=VyCc;fAoR1!x+E>E)|9TJqT_PeCq&~5;( zngypp(#_uq?3I4+@6WK#LuHK4%_i(nC>6e=&*Dx|$$+9CEt~EVQ>Je+NwZ@?UpPbC z6+OK56#t~?;#=%cokg11w81-yW{O!T2H^`ZZqXnHXvSz0U|Kc2ZBWCz9pS|a9w`0Y znCf}Migi4T7eKS9ve#r%QacN$BqIFGUN^+M-J}?6untj?hqIUu84ooY$5ilcDzrc} z?z(rvn?EtQ6`~|TH`$V{eF=9Jv5(HhMDUqZ_3voX({MjO8b-@6i&&sDkOToQ`0gZhyjV#F6gt=XmWU&nd*Tb}RU* z)sT&sm&LvID)aXau5<|O^O+eSYq~XnN;UsnFvkx zSJ!G=hmJU@zg0M;=iFQzS4`sVR!&^C2I~x0%RT00LoE%GZiI$1()imZULR;Sc~KKgjqfL1&PP{1#2$WnYJY>IO!%CTNYP^%wQtZE zyyiqd8m(bmBjK|DSdNf7QG0k5c0JdLcAoHOz>iU~r{3AGb~#5RlU4NaO zO`+!=v0z3>KP!w^LK3mZf0&ou*mny>d|Jsk2Q)zu!znx{+Sl2}mUb6rTpzKAg@2Km34F(J_OUo*P{`C+hnv z5%%2aq9FU^E>VU?pEVgx$`J5=)&aM{e7k)C?-DVCDj?xamm8X=TrFK(F&*LqDKPWwg#oH|_ybASq@z=S%7w`O<(V_WB z?h$DrDmEgT*f2lkGky9#>}s@tL8x6+!G3rA7%|3Oyd4HO}ax614XTPc#o|GSq@dsA!B~vb`3dXj^M!Zr6?Mg z_IH@bMAw_82Nyz2@{%$TwINN7yHzzY8mQ9FGvgwiQJOX^Tsl$Q&XGU*MSD*M>2pZd z9~JSbaJrXvXv=hPv=vvld*&!Ft(ZWWCVvfWmSgK@Udpl13MXUT3vnKW?P{|o7@<2a zvP_|!m-h7{kB%6pnDrA%ManT(=hQxy!?oB)^d%dd@@7?KK5P~cA9ty_D|i+CW#VigI`ky^O#I6Oa@x$cgMpKLUm+4o{ zD~HD4ENbEBWBHOUMfPUj7J5~C9+C3;lW~jJ7TS2@g6Dg#9%}shc^X(T7Oq}>YoT$c z+Mukz%q$(C&)}VDzFoliQQVldwv0y$A!ZrimGc0ufPJx@cw!#gFkmFo{64Pe%Fbg5 zT;)41{P&^q&p1mLuX*qWUVU29W$?{z<@aQ*yrt$sF8boL&svoabKZ;5Z^(RHYFE3H z|0y|Z@bL$^pLCCU>3$lK)4gtrfPK*IZKl?%F$nixe*x8+JC&4&E3nj5b~7$JOkS_a znRm}m_$pV_L& z{2FBvAv%n5J}(^+_e#eNRM&*2e7*X z&l{rBC@?CO3a67Fi0BhiP1Q zCufncm=Zw$`;^68E_PK@K0C$rcYCZHmD)ZYc94zv#oqTV%qS}Cj|)1NDQafGY6^~u zT;_-5nn;ToKxE3kINPmI7~RBB4#F;&DD+g*6oo%$6fX|$Bo?a8yq&xn_O+tnRmqDc zLf>7)oGkAiPGR;l&w$boNAy}s5jr7DphsucPgG>2uznGg-sP4JaPT^uSUJ&5Co^86 znV4drh4n_{4krso6`jQ}d1~Ls4(*mRqy#iGQxHf=NwMB?MzV)`+8OChKOv-&9+t=q zKG9uD^}IFhKqV7Z^;)(Ak>ql>Qcp&&90|D~`znEZwC7llR=DCNp$BYK5~_G-m3APE zD`CcCiHbhx?ierr`T#ZwadSodIKN{_3P*F(ir{TT-|}C+$zMQ}?qUS{MV`kiyR+Gb zTLI}WDU#`Xh}m^MtB zQ+-fV=Y(bSQ7q})N?2VEV7%^Wn5Iz0<(MA zCLjTR;x8_&eaF9=mdoIiM3KzoPA=*>mz7!VINFX2dCB%}{~54IJv zrJRoBFBV6nk9=$^cbhVPZnJs4-!A?DF0I%jbgg8!+B+piNu55M~scKo|+ zJR}Q;^K4vHKSrdKt$=E7UBOL&0(oS$cZ|(?jU~e5c`MmW;v9|L&4=YaeiKZRbiGiy zxT>aG66|>I@~dg@{G+ux@L!;1aQ-$Z{yy;^yL~%Q=Pg55vqswacXAZf@cR13iFADR zt&mRg;WnY>)ON4sUFyhYP<(RMqWqG-;9r2%rQ+i1gfeC9#(LKGHIHc7e*)|Ni0_(2 z=kVdR!ofF%7~uCqhZW&)DEp*tIazeb%avmake}3!R;S*%Jg3^@6T7L2Ugu;a z zrBIxmzou?^3KW(d=NEayd01HQZuUZRedUw5Wun?imKTcAKee0F^%YHXbVd{ajea+eRc()^mGasKK#x1aChpCan1I7 zlbouGcQ@^Zops3Asmf8P-ZF_G*dJxfQhJ8nD9 z<+ZG~!z&VDJs&941)r3DpAlbXpOI&BV_1fRqk7y@bUMz(0?87rTzaZy&tqu+0&dJ} z?BAvH$`qgN3mmgrZGbs_+6^9MKNLVR?M2+ZmM!n|;zt~JfHV_jZ2d;6KlEu3coCR( z(>2pb#rZmGmOxJ$mg?D>l(lv^7;%QvWbLE}DQl!;^6cjfzbR}sWM~2Pf}9wmJ=+ah zrBSAAy%J1Cb0jd^eL@LOxi^I5faa?Wpi^W~G~j3J0_^D2W9>5($u2DM2BUG?MID|# zM)kA5`b)7$tc>$-3Zx64oSdF~uax}s&Hj`(^`RV$ing-Q&ygl(PCu`j`bJ#k;Qz7; zPbp0kda*WRm8KtL(2a3LW+#hOCW^xW3K6|ZB?sh$`97X#PA2^6${-kQJQN>?)Jn2tO z=Bk}}^E|_$)7hzSZ%`1a54e1eq%4yIA(w(-_6YsTWB6* z##zSPf-sx3AWB$YHK!1*(ZsPZ>rhj)GPR%Co=$EUk4Eu$9u<|g8oqK(d<-JT9ob)6 zd{8*8Sl``jB{&KB@C(Q1*&a{KN4xQ5Wll|<+gKsRYhr03FP>YI!&)ceDv1H=TEYv^E4T)rv+3vIn?E4;)Gq0S$U;GJD(|EzWr zE|_9OuNB1HL9@WR(EX+ttnQISXb1mjYrQfM*6JpN>)h!@l&w>#(?)z#3LgYOA&DeyuWm4}3qX$jn0==S$ENsn*ljfeRtcNBO zfc!Ccpf(zAQ&!&{Y84(nZFsFZkL8Gt_i1#1LJ`f-4w?GwX zv>#*~h&PennEEpWZe}dd*7ac0`s>`ItzO34!!Gv2WY1A z_MQw$_)+<)JAJ-b0h&7k)&5xIVY3LE*8R0U?e_Up|1>9x%ds1gGu?o|w4wt06;sui zfG}1}BbPSEswO4l8Ze;kbeHLK>C{DtfxkeXGp;uwSe{dp!t4rI%I~f2%+ciph#Cy zF^3<-^sNkvLCWC&8rh{J^!%S~Fp=g9?O#X>3ZK+7K3L5)SQ6t<_h73Nd0Je&s71Ip zTn`G!LO-X!u(E`Q5|w7eEhlNRTvj z^b@(XvoX+TDO7ll+)7*`jw|qx8uh)=8^rsjx1NRrEVzqPe2$S1DMqP20raXAu+YZ7 zLBB`0*jxJ+=bRr)8SLl&$fS{Dq|XGA@X@BvTk2hAZ4zL=CXs+0JoTv+`n~^@@HM2l zzk)x3QH_NJp0`J7gEe#zA?gk~-ACGE6WWL+A_fCCq0FJO86>4_h@_QB$antkI@?S- zfKDMTv2-}!K!Fo?7>e3S2&KpKpn~1@mdA{qGlblrlw+i>_?q9g zolw&2Dg-}U%V4qU*P0M0zavO0Q|wXSz_kOxJ!TyRI>O6EY#47Qbh+%B-%C((52>S- z9nNRBLKx`ny|zd_vLF(mkQh1kK%k`5`EvdG`d?s|6v;La9nV~8TV?X(LCD9x3ZR43 zBv~Nm`p4|!*S)VdIbIJc_C3OM>0&jm{Ep@G33awEyEWg5fnZ47lGvel2!u|u-e8ah z)uvX}*PJLpeFAGphNR|Dim3}DAyAxZr>Vt|DF?|F;4KUVG^KEvFR523u9{Dhk-?XTY=ft5$Or_V3$j#b|}H~svF zgBb|CT{$A#rz=zv%jbH(@9_LhrZbEnTw-;cuHmT!dDzAO1QXpzoCvVZOzIbIj4a_W9zyjNK*5i3P1&nWYnW=kP{jd4I*6!%_!K1*Bd00&3gaq6K)0i}Wo}#} zw(``PtLf~XsLHbvlkwEsCObo_U!;S3Jnw%h6aJJkX_I$nsOd6yZ_Z3A<(7GDVt@Ov zZZ*lbc|VckFW`yY;N{&k8Q+-vIK=s|t*^Enw4Z)=?UH4zQD+pEU-F?P)u+AdjpLze zTDQEgHfp*Z18@oWr_m&#%F$Hq{v_k>dU(&g@rVH3wQPX6#=Dt=?&DA`lqZ5&Qo)0> z=GTiS@a+O9zP&=Ei7t!w@1%bv2HaKuDm}Uw4&6gl3;6m+uyAqHES_omivLNu_KrT= zA^f*MPgawazET34C^VP0Nc0zYqm+^AsKC^%ks}w8LH(rZBGZj2Y(>_MD@D_ih?j94 z1b3$>HlCXC8HVVn`xGx{P#kzJb^}i)<##iD2;d3bwVjh>#!YtFSnxf+jMWWE**^AI z6aOEn57N!aG5$)iRH2~BjK_-4u#LVOKS@}5zMO9d!an&(QBnk^<1UDMc};Xq3I^(l zed`&7m;@)*@cg`DqQJ(mnZKUbcGTYYn+}!1JP&GaRYj{ZsVv&pH@U|bbKuzllEog1u@b*HQmKakc^ULW8ccqAP(BS_Bj0%QP-9Y zNUS(U%MJcW8$kKfsnsI zGq_ve7h&!|Q|rNypI7X?erKPySjw@8xqoH?UpVE(pR^tnSYPUarc2Odv245Lkq@?f^M!o|^j0h51o*2lRd?Vhg#`hc5$2c#a#JeJ-6jf<%?@+g=P0npf!C?)}*L1OO{Fio#F zli^((L?`DN-3xScf>%w}K14FT@K>{WqHeHT zzb_(1vXmO!#2@i2cMkkE1hHF9m(tQaa%2#9g{zc4_~d^f zw)ynZN~z)Is%3OLN_n?Vyugen-zAd1Xyp3gMtl>KOJ3pXMjM+Q2gBX*@aM$yXhZL< zV5`c!vphDa2Ait5N#bVfOLH&w#UaDy!(GBadGF`vxnmdi_u^<(s%^J8ZKULlMj0t* z{I6>y(1Raq%pZkYKV;u`((@-wDO_oiWajH=#2efFJIe-Gl)NLck_)2PJlE&`yX4(+ zY3=pOmWU#cPwZ|J22QSKejeORs_C6EH2;7a7E4U=#hjd(vDnp=x3957G7nmpUjJgz zBg=g^T{rfvc&Xm59sm0FFG;ggJF&ReQ+uj)!dpwN9loi9L7XpaZGW*AP3=}x1<<&h z5f@%@M^&muRC#^yI_)3lv#qoq+00qnr3n*w!GBJ%`HB95W3S+GQ0?X?*-cVIQqf=F zL7WVPeS?vEX-bd#wQ|{#f`_v1tNYhST3D1>9Olj+roL}rF+EH(#&!0!FMbeQ&|rzW zGPK!po7&@u$K<_^$o;IHP}g?}(}o6&+@{NfpHVMPpD&ow1YN!;EtNZ`RoFKf`IM;$ zj*}<3(WI@x@M*Tr>wF0>xH?EWPA{x-`~vPut?mSVjSA%3fyRGBvz$F$x=nq$)E@jY z`n00-$Rw|~#3fjBaU5`?f)Y7Eo_mx_ls0=6W)(Iy_Zv2z#?O=usPjlYv;g&ey*SS zkcG#zr*s;E#$?iWk#goy#7hGA2+xRHx z)E|{=Y;(7RcKp!ERVR6Hr1}H$_|}eNb=!IIb(WwXCkhXbdC3Xb^^6>GHmahxOs=9= zCW~F!?BwSUX3gk7H)MD;p#oyZNbx^@h&?T$pxD6+yB?WO?y6aNN(i4?GE4#|Bwp(- z;Liwa%)OQ;zNo}S-Fd>0U4Sn9f`4xl#mrs z`T&7r{%hH6O@9U%wZWL(xG&h&dg_25JVod%B}HwA(2r@nr?TW&WYeAncVx!`Qyx4; zSA0h3J34hE77_8T2U_6P$n}td>1TAtv4I5BlnN?YrFi1^4Z~OR{ZZ$O;ere(2O90S3gUo9ISjGvtpjxty0vbEgJPa#kRY4(0hCkN{8u!y4)Z@6Niv{ z)vB~&!OMV0ljBq-D!BZaCJf!-2RaE0Zg;(M>cHgrEv~F~2LfM#6yCDA+&FG62Kj;f ziKb_}NSLug3;@tnXU16gW8=4%F0uiWiX_Ae2*1)=kwxEQ)#r-~ zbO{oFpZIw6Rw7=d-scee0Nn!<4@fJ)K5HIwn?2JGT*;l#tV~y+d>RBDMDuNKC*Agd z-WL04kRl->Q|sVh=M6LH78RS9%T;;95M&{rJ2f9$h<2A$?|BELD}Bg-p&2QN2sL}@ zuI(J71jRo#k%kwIJCl?wRh5rc?|9onDx=o2K8}mj)R0nVw;n6V;BxoN=|C9i;0L%o zch^b8liI?NK^vnz+z&5)g{t-P8?R8sNw@0oB>{}D@;gDobtj9AdS8Rc-bm?L_ep!6 zraLZ&%z&MI+pNMhJ*LlpfQ_RU0dq&n^9gsc*C-QFzh+Dxcg-rh zm)y2Ke;FR$U^snl96TRo?4Z(0_sFH8C0s68BGT|L@Y|NK5%@It#F)2wAc%Bd*3{N$ ziu0a#)r~A(Tlx*bR)rwgCG-SkQo?dusHV{T8?@Iv*1bH}3UR%NI3OOEDTBw8=x z^lh6;MB?Zr97z*EJ5Oi@!jpMAdjpgE76y!2Mw6O^eeUyTV7pRs=f6 z^(?e&%i@LYf*oyidL1+BpVAUvkZio-ZUjy=IsblbxAEh8 zj=Oc)Wr@Rp7CwVFZs3vTW_URBrnO`Q2HG?kE~2yfWYm(5Zst9rE&J9X-;)ZatAmda zwUK(PN~iJ}yyfGDW@9PCle~F`+9lN{H_gKQo`x50oU7^F%W?$?pQ**1+W*qKJ#w98 z>&UKVB)?3z#L*quf7Kv9p>riFHh7NCY_pYli<9R@y5Vut#QTK9%n`3ku0zgG+@nF4 zWhMpYB_lbjn-JffuEs_R%C&#wocuT8g3$V;`Z$|MB4-d;a%t)G!Y{>P|x>o-4!QH|B)HsdnI4wCf-#Yg#GkF?+-Oe%E zH1hT#Uv-W(JJp%v9r`Ax@27mHpy7YEk8r|;;bM))mb##i2=%q$!I1VLsmUL1j?<

pVI?Dat>+Vr_W$29nts#Tvz_QfM)9KfL6Hy`x=KmHF`g0gL&#(0!bEup#!EX3Jbv}S>sMeq-JZyz9#NUY^Dd8i z)wd!6;q;5Xi=;@lZMM&#RK&dGQe#_d_i1k7X?~nyMNQ!jPY9jU=jPVGfGuA=efq|e z9qmDGN4mVT$9J*}=$l$7WB7FH)U_xlYG)Qc8U`I59`QVe=)Mh9OOmK6oAP4K{$XAA z>1+PR?!=d7Lf@ZKMP3~uc2;>TcfrPW)#2y0e_F+u?Sg{OxJ-tQE(GZZ)C5<)@Y5S^ z?&|wYCmqUu^BnSo4=MF=)&liMCnda74F6l7gAIX6-$B6gOYPG)-_;Sh3#@2oNJyP# z(GcI{=%7Yrqwr!6K?Uw!n6rasV+7Jln3L{W<*aaYJrx%&7u(W|ckvsf!Mkb%Tb?AL z-g-Mpr8g0>uOxTA_Wi21db&D+*}i%2$jXRoO$<&5({x1AUk)&R8^ZARxOb!;YW<=T z@tct-`8f$Fk*CNsEH8dS@vgg1cBkPVE)!MqHV(N9Wj2H#_&;+1R7^+so1_ypD;`QD zq9_%+6C)YFnJlSSTpVhzas2xe3QFdR>>ogMAbHT5x7fN~B{R`UT#IBR#u)vH0V*RL z{4{&5F+^8ay3fz22jQXB$P^Gh`&*)ciR8ySOUo`D=r36{o+7K{mGj2tW0_29R+*!&Hnyaj`>;SO-3oopIh)eP`N8>J)j^x)3r;Kr{Xg1 z<3;Lo8QC2iS$>Ea&{edo`^QPh_R3GBQM^Yo^ODxr{Nn>I?cCmY>zg@fW9rmgIaWvMjFDS9&DaYdy&?LF@9?rd_!@b zJ~%Q4A&(<63$GY#6Len;TG4GZOwx9+W@eY}dLmLkKKKmzhWZ{vRetJoYFGKpGQvrB zbzf26N8_=rYrQzt$VKus4gRx8WF+mWouvYND=fgbT;i zaE2F(T_}0bv2ovhlsMG#!X)o!f=-75-J#l$s=?@y8umwlm3qcfP}^ZHj`05U^{>~J zM<4BjIn9DN>k#*?T}#zkccc&KANjXjzs+h@Q{`J+Cb=3M%Q{kJy|S=G^z7gwv@ZnZ zaGr10_y{W!h}^>Wm92dXs;kP!8b-w4E%_(#3`9g5t~-hRdcAP`_DiH`XNx$CL|jSb z4qg5Wmy1rlhV6Tw{k>VAyD9dOo!-yCCjXNyL)Y48S$^Hq`SmCAE~NafjrpeB6Vuq^ zg3LxDl;Q04^G95@o7aXn&)IGa+l+07f9YlR76M^^HaU~wNu32(&t&; zQP%h7#Y1xU;cb;aWm{#;9{njQS$s_~Ji77ka}n-C8};0_QXs!RYtGP>oPyzKyjp)) zKw+~1YDrFH2^V)2NU3!4B*aiL@g~kGe<{!!A?*%rt~Xj5clGtZkE?6>=S$fui%K6k z`Zf$RTOZAB$xmte@a7XvHYcKBJdT@=)W4Y|+mY;tjiY&ss zsz~r{HvJJ#jnvVG{x_*fsN*NJx+A6I3MnKjMmER2)w=Ov*^qv2i4F;*yBvp^QYI(& zI1yEv|3dc(W52lka3jcvDEbwP9jo>XHVM?4T8qzyePPNj5A%d&KX>SM*d?>zkp3g# zH&=>Ea4u}WXOI31jDEYV zqbO0abeU>edum#9*t^YgWlQdQ<+J<{Dg0UCMR1{izFxkc8{S|lygXBDwS2LQVfz12 zbe`dCzHJy!A|!TDdj&D7s8xF;c8uDsS~XjW*sF@fC`yf>rS_gx)s#r7QEF7x-g_0T zy;{Zp^?rQ5J8k$GxceHb_0r_yU|!b5FXXr z|MJyy-mNZiBRIVaL;Kmy0U}$0R=UvU6qyf9#)rKhU4xGl4Af&(RLIPFwh%K=IP?$} zvH7K#LYRo2XNgjMCz5UA5C)f5*hvJcAQ*#l`Z{!M1)4m{ninIrXy~4|i-;W@o_*G?l+d}qCJCg2m%gTCtD-Agpb<-XfI!o2~kQK zJBx5TE{JDnI9MC)W}0BN)?5Lv(I9Ts7d8V+x@=mur~!qub+M#$7Q#7P8{HCCYV+Mc z(x7|{`#A9^<3nm_)}9E&7i0^*Yc}UHhk|S~e5y~e_~4(IXiN^Oc8(rceiR^y9d`25 zA2uU>6b`$qgc1{B6QsB68!GV0Cgy7T}Z zXLpt6Q!+vmL_`YYCiS@SNhBbQ&J+vfX%?YmbNlRv^>G?ZiZ^(cLvt3dbJh+MXZ!sM z&3`1qvUj254O$XZTri;+4vor1^U5mZ2r8lBsiD8IyjN2U-5C~!YmC~zi)mKEZ!b2u z%J@r%7-VZpoc|9R4dz8Vh@suy!$9zSL8Or_6xGn^~Uo{FboD%mpqD|*ClZ%JH#VjoTw0a z_lXikR-SRZQWvjp)>Lyqb2@*$>CagRL1pCi%v)*pX$_0e7E32ZxKfFMS-f2M9#SP> zu{^FQN6})92B%&Ttyq`7Ob$LXkcD)SX%ipWv=b{|ne?RjwTdv~oY_NoQ07@0v?Bm7 zoUU}Qp0I#Js2Q(q^SoUSzCTZli-*oQyoiA9QkJSb4y;7#Uziqej6SpLgBz^3UL8Zb5It7;Dm{&$Q&?pq&>P-hN^9`Q%KaH z6L_enSeH3+UYAy0w3ed180ZD&(b(qJPCaAjgoFf@xT!ns5|TG!f4&i&6(uwY2G zd`?Fv#BdZiy)9#^W7JMnvh1YDaDb;4-K&~JHc7}-HU+XKLv7PzUvrP;eK0cZ!A2i^ z>3)hTI(Te1lJJ6C*W}yAI9G$ap{4t>--2@G%!2B{9UZ%O+i#-O{{uMM+5txX1H4W3 zlf_nY8pdO`>TVvXMC9=Sj2+G|+zWH26dfm*;IF+065=h3}UPr1N`A-L&L#ZX=Pa zwktuUIvZC#bT=}*5w=Msz^^{nXaPujMs*zg@$dtwO`1!_MZqC%76RPxB4;~}*63(!A$E8$NrUD7>_JJ}#t;HC?60{9tVyKlS49oJnT~G`f za$wkCCxKg_@@$C+3)%zV1k`E>%GtNO7wHU*SeX-YR zs>JN*X|f~$IcW7`$6YY@z-zl8`oy&q41y) z-EZ6@PZW!}D3iaM8-Dzdlh0zLS@_5+6xIF9BPU_6f4N#Y0N?a0A48nvftlcwcpHHJ z0ggbhuM3+Nt`XWKYM9tVNIVvTNJL>nT|WAETw=MT)p-o9cfWAG zb@pLaOCrMDbHE1_s4U6efod|kr^fT^q30l-#(F#X^63mF^vss|w#*Y*&YY$>36FC1 z)%P7;hW@8ly^3#doj$30WuEsnC|7CHe&16Lz>!detpjF@Q^^RsJ1-2?pw`u z6k6*Lw|Q}NKz}1K%@vx#YBZ8dQ`_$4=dVw}C#oM0$kAU>Izbs!JmFcT^`KN6Y7Umj z`IG*RTfU<;_?{t^6335%M>lxW?3|j%19vNI{{x%^20lCs)pw*#>2qbKUaE#DJ$tA0o`tZA+D5f(zn;!Yy_Q*W zfC(}!LbCSV&gun$gP}aU=`z3O&GUiR7!qvSFIdLe?1G8%3OhJ`6ULE##2NfM?SN*k z>qz9y->z#_$Q#&2r*nfYo7bbXrC?i&wqxyRp3HVh_xkpUmcWY7_0vvVB$%*q>Gf2i zFw!&;jyKnE*9N%!Unr?f#ssE5t_vOK(gY^yKwmd@V+T= zP)gFXvV#?TvYi6 zG1@k(Q~d6Xt)Ak)e*{~MZ~;U%Qm&*ad)=k(uijQ zBu_Lr!zszWwQGQ3*Vd|@e0S}n}yPF z{3;V4EZSBvAd^_)48(@YCsVP0^7q|n#=^wAuCpn@Rvsi5D{)Mp#KzUP#BodhKlw0U z>kqdT$cA0`w@z|-eq9+TvZIi8QiaA1EbEe1ycV!vvxp8;6>^9RlhAhkJF3_Y;TWdG zBKiB$RbU;yw=(z`DANxh(ghy{OQNO^CDlrId}c#p^l1`^C-B=b_uT z_Ki)-E|TMS-Mp+l)qT}TbnnWx-Xup)MDrMT6x@8M6Tx#LV;}E)rAJW7Q%u$re%f53b)5jD&2ojW}g!|<}S?IzhU>boKra_tkBDV zzv8FEtz@OPjm7=rGw%}>2ML|5srF%<3wf+CDHbRhrONxwa9NTa27X91^XKD-Pmym}A5 zJ62UxhQRDM#nnel{Oozh3f8NzcPGu=$tIiTgII_xKNeUuHfP$_PXYseuDQ>RF6i%C zY~LA3HSwHx?ipsXOEzv8m`qRTcMlj?F>%jdadG9_O@JAs?lm>oBro|3zwe|xpj)PB z{}Hh9DZo&-VZak}`_!m>r|^e-48>2cda0gb?p5DtPc1=`gm!;A!Cs#hJ_AU#XlD5r zllmV(yyKW8y(gAH*E;r>A|5ijNqui%Q<0{kr+ zp%(;KBHiIvXk*_NyCRv0Up(rv#T(e5e{cvJ10qO2P!GYWJI=W=8WuiMPNT&Gvp?iQ z3n)-s-G=vd`K5)@sqO0*I{DmY+Ke`-au!;dFt|g>o;H_M+84!zYU;23yk-+%c#8C= z1390a7z7VIdQ7yi6DQ~?B7cH@rG-ruHnHVR@*&bgA->Y4&BI}@auGT0=}f*EH_mqX z|913S#%ki7Eb#*006hUsM37886jYWY^rg+EVO6d#n`+*oaRsfow~Zy@`gU=Tf~|Y9 zZWGMDz*1+C2XK3!e*1oz$9QUAdDV)(z z!7zP*ujA)>mV)W-m#_o%u!+xmv7nF#8(W!%c(d+(ERO0-hneE+{zvd!X(gvfBfUcu zE}Oxxa;5il=c|4WvdIMqji2#8hE5G}*#AnR4`>MH+2rd`cSRzZG8{kT!!|a>(>c@V zWz##$6Fh-_@lXKI8LC7-!U?>tFYG4$JUv#)CE4(n_hp0jE-v#~U6ap8_tLS1hMs%7 zYFp>Qnz&QcwCpakJI0sRpMUGk)9Yqo0Bp;9~EwuPqPA5 zZa_qg#xh}nDeQkDUdJoxRG@7Qo=6g=p9N2nAn_{XRpC_D_n#aG%o#C$p>t3Piak?( z6ek-++B4r&vWr-@Pq&9A#_$$!d-T{DDt63>TdICtU%lB1>28g*l5^Ws>5jW*e!8CC zzJI~L#f|FvWcW3Dx`U}-@7>`{jsp2VMCSZmOjEKx{tVd z=u(?JSiHaVA7D=0?c^q;X0s!@s#J9WM5kf4a4*=yo#M5(yXT|pe9aq=c5!L!FBOXH z&OcxNY-~qf%7!NCjajSnDsxtjT zAOg?fe|;y7iaqg5XxN)hjz|}-y}O~VX!h0lU`#>PbH%bA_Zjd?DD@9QpurlnU_MAK zdu)r;Ds1`6(+$kvg#5o9#_EX)z3ZDvSz;1r*>K@Tq*QYu&ivW;PggGp);|)pH`2e( zq_}~VFHSTMY-yi*;wP0L3~Z>LC!81W?#(NLZ9eO+-y&?*d>8)!j_{}peBS(#V}o77 zUr%a_`qj`_E0cq$8Ten}&#KRukw}Y2MJ)eH>u0*?dUOBcdUFa@zjjRppJ*AqJaBn~ zX|S7kSc;qf~nbbflxe@)_`CoH%C!P!SNzDb1=@Tgg`o?R?XDzxeP=6LHH|RZji>FETgY{hE`u!%HbxBH5zc4`hYmgH6Zw!jtj8E zc5F!`z$7RvS#{h$ zIzV=6>>zv6*>7ARuq92%=LV?LmlyfAopHpF=j*A)LjoI^{+j~wZhY$ArPw$PM1kZy z88d1k3kWZAuKNdj#J?#s5kBVxv{fsb&EJ&EY%}=-cBzxAsWW*Lim$>w=n$PvgiFH1 zDkW+cM;HhiK-yRr4LR9#q5c9#GDM+`RtvsX{`cFr?^BAKLZ20{{Tv2Z2K=?bme{zf z+;Y}C6J7A4y$&@*=*7=!N2eny!J#SrlFv)N75czX!N_)4Qck`ht>aT z_iVJOMACiyTJV55cV%yVOLeS^v!$K-_Z~3tF|AO3;v8Aed2-^nl*pqPo_}R;#oUCJ zy*zg212Ng}{D1l`g-Q5kzRloF9FCJvd7@lSwIUX+tvmhLm()|WExRd8D#4Q7iW>j( zt$g!=SzV2#ex6crs6;2-ki(;2LLVNMZXLfg3JfwWSI&J$b0B}fP&CqdtfIzVQzdmE zZxzmp8jlb*-!|gNe_#9D(g)l+EE5mfRC~T1`Jxq4QGWry)GYbdeyb|5pojGtg-o$q zEJs~{;t$S}>5wnaEWR5F$&?b=|+gu^GOEU2d1BTpj@+z~MwWbd`Y+ePc8-}Ws)|KrC@^-@#v!Pwu{ydCqj z_eRD8UrkJY!G(;qT(eMDV?UTK*QFPYJ>z*k-Iu#BS24|_&nq5| zbC-@lKmluvhpK<bG~Wj-zSV^-H5Z= zW`6p^eEe4p{>c*$rJ95j>FA^cxw~Gf!(e7PPj`fdN#zDRvV)ZVmpXIn%}{@`m!9&= z#Mf-+-)n)hABO`j1^LW~j>>4%!c#Brb&ro@@=CZUo2j}U`?nm6R3BlSw*j6~0@HzN zIY#G?|JvJr#bHL8D`=Cwf)a|C zbu6%Ud|P&GX6MI@c3Wie4!2y~m(m`!AHm0`P{qPu*85^5_LK=dF0axzJ!{$4el}3d z(aR6z-4U3MF*avNDHTvdFT%xMx^@g<_g+W? z()`sBn&!A&^el?@5`?Vk6OzGf_fbfYHzW7=#Ea)M_wrZ#%pM7NUWRJffq+rrA;^(G z?NQ2zF=?p0RC2%K>BisG*sp*ndJRtb@Zr!OC!1Dh9&izyp0kTe~r+U6~T3SY(N?C9q z26{PS2mEA++?xj|PT{~l%$kN6$w*6wbVR#*d`xTSdI|TCl}`&Y#WsVyt38p1uKR&6 zi0&tJM>Mfq-gYUwpUO93LC9z?HO^DcCC+uENP1}cGqci}8rCL;`ST7A_PBMN6*g(i z^@D0oiwT_Ogk^^r<#>Qu!C7t6^xL?lFX$hk*o47EHuw_Rb+ReCJz%Y$JUd;Q%%3Ip zhJPV?51=1^N7l=^7_O20aR#s|+TG{LaC~3QO4;zV0far7ZhfyV7Ds$67&Uo5$p?g8UXy|&YMyKX~ zZX|{k+N5-PeZXygXu^B{D_44=a;oE$tB~}p2=M9^y{vce8Z0qNhw)pU=~_Bsogq9? z2PRRn)?N>NLn+YQ2}7Le*xp>shcRogY1+74+m6;Kk2Iawid7r$8OG%{zoSx?g&NrR4auwYN9WH4m`%%aF1N-qAoAvlguVMm^JB zO4H`&n*ID{!kbQe%B8i=zXcuVc5USEZ*2V-pUd3IXB$__6EziV4<8D@wLI_sR=?nl zSD#%O2|t0dM=wV-uuVkmBFHzDyuxlZ4ra5X`afA&{o)j}(|xO~TNa3tkjLH*K=f-{ z%P#UViapv?3%WpG=pB5i9xGYCN~{To>jjz`rmBE)quTTq4YJIo!tnpiFY1yPEKZuBjvpa)=3Y5^Om| zZS!z#stThG`iwrh(3aFEf6ELiFeY&tUXeDlf2HTtb}dE<%m zy5jf60e8p}jlN^g3x9{}T0fzW*4d5M966_!f2mUmVe)meI=9ar6Mbgn|6Mm86LdS+C7H1BeU=BvWp1*rTrgNuef z5sR{TqWiDtwl7G5C*Iy`mP9cc;w%4SSQe%Tep+i7>W)~rcxzat_CV%W1|6M5eEPJs z9w4}&STE$&z+DZ`R4@|P2@GW-td+g)V!(higPN1BP`3~(N8Lj*&r)RduAv(n+*HxE zQq5$2Y%+6&#d!%`0>*343yUKdFUk)#KQ#a+f@pJyt z;U?yJA^hYe@#rwzF~Z*5?$FjBvy5n5Jx5B#BN2FTc0;jB;lM&SEXqsH3x^_E11JP? zdXaaaZpn&h|0d2g`d3V%wpf|p8A}4at8hOo?Q^k^7zrV*e@>Q!RPB=FZc{`w!HTJS zom_R_S;dB0xqC06-(!H-MZ_xhqkIrpA1{^+GOt2vhHSl17pC~2Y5Wf#izx1!arB?km zF*sK4j!x)`-C?q^`Wr?=EVz$dQ*sqFb&;8f)%=9snI>phb&)n}r8gj&=P;!bTz4zYIbQYVP|olJ?Ha9Se;TZ zGhx+UcS5a!;KYM4k+|BfN-zhwMaK1&nPbd64ZGR zh{RyU(ETa{*8BR5NQ6Q=1qkZJki1xKk}8*#m`ugDfrqn;P$E)|npa~0$QgLNytLWe zGJMdt{hqCy(V8!{_|wwqdQO(4PVl{|OYJXmwvQTGvb>1KAOGgq#S4KbnXFjtU zR0z56|0<__v54Ypyee_$vERE@xvW{O$U{XDF`O}>w0XqBKSAEoF{!ZShBHea64(~- z^;+;8ZlW3ttM+Oo8q)5$Ktr%q#Z0-gj)?ds7aCU$;fBT0Uav=rU)%dvg_v$yh{>UL z)NX7C*t{?6C>GlIHh!U8>sa1S$uGt_z%5+!M#4>){OOO_7LzX@%d0SosAFA&$(=y? zk7r~ufs0Rd?Y%SWd{Y9KTji@8irxgu%jdfW+oz1(Y<;=#2j2Z$gB~*)NFUG1>b`!j z!hXIi`7mvuu4q*0`BzUbbW4K5vGuRXKi}cdHTHGLt%uf=@`1vdts&=P#s_4_wY)HI zHhAsWmc=te+f|a6eb)AhL-sDrd-zYZfkgtRec0RU&f;~9v|*#K!}d|!mo|pQZFAu% z*B~#}=UMIi*OWmLc=P;Kd5Px3_rp<~fqiBp7Gd8S(^TrrwTIo^dws~Zn}bZ8kyu#Z z!AN4SBx<*DWXQ{Fzu9aE48X>(z$#wIO??raNFgJm> z)eFzxRV$U<8ydB!j4ai^AYBPv`0cS@3H@>(u4>aA={Y1|<>n-g&YFfHDG-!yDEf(q zoq+p$WLOLe8TNoWfkl&87~}QXoMX#tC<E7jmfXLPi-+A$#UT~NxhPe8YW@yjDJ0~ud1LQwqO=IkoPX4eo#X|#|r`NsmmrN zMl6|SPwddE*YUKw@R8nh(iR$I~gb>LI9; z@3FqUW4IgjfitI&&NQmltRN$n8rmBbWtiGbCH6sA(7_q)W810}mOmu!cFn`VVa}w> zDB>j|<&kHsN<}Fm^_C>|`cE6p$4E@&>INYf+@wHArO#bA^)25L9#9v|8wVl|qXy47 zp>8IMg#E?%-QD(ce$TN_<;tB*dfm8lxcS`nb9+}4PkJqfYciQl0vN%HQ&ha+(lmjp5QOf2??Ce#~Ln8>ch7LB942e>yiXx2x&&WpZU5(oLjKE;5I zhNJmuM{xKon0Xhc&uclu2*KO)|Tj@+x6rx!e=20fWQcS!JXkUc2 z@$V9QA<1j{`xAODu56HH^Wx42G};5psRdOiXy88t#0S{IJr{fzw)@_@e`O>`2TUcH zhYB>EAy+nY)1=5v`95%+mqBoJKPfg1Hso-;4WO{(Wmb|+X&ftz4r=j`FgGJZ#RTqR zaQ+CI({$3pBKowD%_`^mpo8!Ua`7J$^Ytmy;J_F%X(9W&=VwB zE8(UkH!W*HwPO~%OZV9gMnCYZ2y9=>3pAC0+)Bz$Do@pW?G(C<=w(HmgQ;H$XPIkErW(MGj}ld?G$4=xAe)MaU;18+7p` zQ$P-O6gN=bTtBfC73q?=8x3Eb2Ux(iB$u}tdzJGD3sRKHYEFi@sl!)(sFu0Hiq~3N zPw6%(CY44OO+5qv&*xBhAv0Sb9Y30wjdJ;76XhiNoMMi=y zUnM9z;1OGoI9#62F>|5aakRqV!b8HWtSqSyC23Yb4G7Y+9^Si~c8mh> zf1OY+hgVZau;L0?GI4hqEST{IYM|qTweh>+)&;jL)dRI^rtm__WJME}>=T{~cue+; zz$D4$h!&8Q|M(^r$oM-UeK)3Y)&2YiNc;%sld%Q1o{KS=O9MmCMfMcr6?+__@7#+6 zL0f`sF?D$<3br{LkHo_A?hcv^l#E*C^9|ixFe_$~ChA3t;w<*%hy$F@DxFDq_X6%QZ`O0sIl4&cRIc!}e{i>4YA^ygaCyW1%#e05snFC{U8<=#P-*Z> z@+loXRU=GDeEMOhg!IH^sxAQ2nR4f&)nP)_|*cXqdAEhLsRNzhZo?lSy=#KQL6_V)gk<%|XIir8F?v7s(J)oT87b zQx5o+x1R=)+Dr=jScS?a?;dy`;FcG1bV`)$^xtf6bP&35C!K$8jQM|l%ygm=y;;B% z$r2&F?OnU@68%OL^TBqt`OA1nMZz=3i58n0rR3IkZ)>07ht5K6eoL}~i=p~uRPo|y z(T(1_CbrcHX8nIq=$AxF2S&o3PufG`8-haSqU@d4)Fze8@Rp)@t;#~C04Zp(1&U** z3^Vd1Pp8EG3!VEl)x=dE#TY$t4zaijGn~|0fGa$$3rtCM=-#_E&~kH(uE6T%Yt1K6 z;`&o&s+`z_WJOei(?q_jl`m1j0L-3OIt}xce9T8?y(|Z3tR;0BvNcufTrNr_e^JLf zKQS=GVIU$j3Ybfa+u}XLVl9Ipyu@QJKV0m@>#)Lk3HV?Ln}s(x3i_UK2~$|{^!34h z!@_s3+wgfmg|?7=LJsRTFLA=x!lW3{7_bO61b8uu%f(X4fd6$5DHtw5ih&Xcs{+UC z!6ffzcZ(2AI7p0Om##dD3zmFw5Y8MqNh*gUV1j_ItLv za>8(U*b*0xrDYzJOZgraR;K3>LU&^1J10z*&^p59*r}JVTMZO!!X58yWAPH6dx0D) zTc^7jhda$uiYyr{D_n3}p1*p*87R5c$$9C}uX@5c)j@mp4PX@q4C7txbxL9!bx?A+ zsnUM{xuM}-l&%fvxyNvQfd3~$%3d?fPc8=M%Q3-E*sJte5<7wofxZ@GX?K;4p}`bc z>?~65S`WoRD-f3iT4Dc}rK`LaEP8Tj01?5q(ckoew;d&cKcElR-RD^e>Hpvj%Z8x6 zVn(RIIeJ--mdBJeThOXG{Y;(_>(Zd9r8HMr)y1X=rdHa9|Kt%HQhONry95K5Tai$= z6fD+@3TK7|6>O1_k*Fl#USxaIl%7OuNO7FNH(1Qaq6Ktz&Q&GlhEeUZ)OPG&;3&T# zHfx!vgyI+Td$lB&*L3~8UUOc*0+Y+pP1BfX153ED+7k|2+SW^$&&+#5x*t9DiyD8p z#A91%01qoGBjxuY*9@WFYB|azcs6oEjfoQca5r^q2xjWrA!h=r++gm0+es~qVR^;7 zab3pcXs7rzF!6Cy<#6HtC1!t|oWU)11$D|RJk!iu$C-FghBbHXh~cRez1%}e246R__(zzIPx|=Yc^HpS^$`dLhRo%$Pb@jK-Mvt8 zG`tbL@+|Hr;^cz;YUH5eyO|e%|`$jU>EBjsExGjDr9yz zGhd7iLOJM*H?|F%@QcN>qdZvwVB-MLb4^dRGV#H=a1fO*vNlr>`oQcntgy}1thO5U zBtAC8qzg{wN?ur%`H)0sP%$a%F&ZLiy_LDmp|r^G*56Kg2({aiW9X3V`1GMx&4KI~ z*X^rZSaiMY6lIH@B-7Q1F=?0vVewVgTqL>GF0(S0GWN{%}(+3Cv2e^+WAB#BPu;Q9o>mU6a$n;5tKy zNK{u&WGut8ycW=hDFB!1c-(;Dztdx6`wIOil|mFHYfj-JF-Pyx5`#!zObcM{V}V&J z?0}gn(z8|z*s0MO{RNCjm z^JH^+bNpd`SxeT2bGWNb{j{b5bPgrT=7@sknrWm!u+=V^*Vyb~2oTzr+0=mn2;4(= z1*sEcHa-jOGp9LU%49D?eOPeb-qV2S+Y&*1;$1Rw6eEyg@h)V1Oa;s<=MhXp56&Lg zhL8gxnKB`7O4$M3Q6+-ygv5u_>X?> z`i_Ey+A)b=8STt4iY&|D5iE-~71+?IP^n*N z!tVKil5oqTV*UXtETO-C)3g|zLkH_l)j+Ybb*{-;G?@o#h%MabPG_Rd8#edo7QL%x z1{4^&hFFt7sd{t6y?NEU>j=16b)v>H^YX7AqcY#GnSz9|{B~4@4%>V&`KyNsq-^jM z9yiXzXa3sd+eX7Is)UI7ydSX zulI;7qSyU1LV!L25<^>N`gKrR^?JyR?Yxk~yke>9=SpVt^~~&V=CicX*RcH&|jzs6QtWa`74&9*fN3?9!uRD3KOvfsb_0pg}>-5a|DZK zX6o_dv=vZKGBVw6LPT;g)qG?ksgvRN3qvr3t(ybZ8b=dse8CTC#{L87&|XZT=BB*d zl0hyjtGt2uY}QjlI><&q0&*HL<41X-?NyG*LGPxM02I(m%Q!40SvHAQInIsH&ytme zUk=Dyi&l<5)BdcT0OtGwWi#4!(Sf+y#AUJS$|HG%{k3f33tYf>${n{|T-@Y%&clSNNIy+WIY3#S+qeNydJvP?XW{sbx0wkMUCsg7T9(#5uv-=#GjIiy8 zWCs9qvK6n>{#awjPUso>Gc4W7c*8+)Fel@|4) zNFa?s-4PX$fZ@H_`W%fc!fSu}(K?1u1JAaJ<{H@0wyZ1$Tn{CF9pNUBzv7_B76Z;w zUZEF0f#&-X`q+53=+t-EmkKZ-x?m4IiDE%v> zn*iB39g4=8X=bu=Vt##PK-;__#G!r>xx z+e))uIYm3p)C>`q3Y6lS2HYF#kV2qaYBTRhBwnW5Psh)5Jpj77y-M_hIoDJndR$+; zHTwDsQ%Lr_-72@YS>%RctZ9sC{5?(|te(Xv#a0DoGr>r#7?PcQ?bwFvSTYE0)rnoD zI*(KOEWyj87PXL+jMel&``L#vEUWVUj3a)vsbaY!Q^(L7HwQGo$|&zFDU)y9?U z;oObxIOK~(OB8jVEc~>6ZCFjgF|}){!iW|z?#DK1rnuT9Xv$kk;$nw}p{Rij#E*u0 zsE8PwNJ39FTW1Up?1AdUOUc_|x=fuv8fhoK%bLr`qXNJFOoiNHs@voe&)lZy`;xNu zL8|QQCRKh;kS%yW_mZbWdh|r6A3;fJ*(8S+1lhNX84A8yY^zpXQR$#mO$Bnc+#D0@ZWrS{W+D%s`}9<~Xb9~#SgYvoZxiYbTw?QxE$)?Vcgs$lMI zw}JI=bO+OdU)+}rj;qp{lWfzO@LNVtyRqM_7d&Zf6FXwRZr@Tjw}1PjPNK@Rdr(+f zADjt_yWZ}Zouy1v2ryv^F_j>8Kyd_%^!wklrFEw-ZwAL7ghC~Gmw zOzUJzh}Yjl?8GxDFxD#PY-mvI;S^d2^mCeh5q>h>7Of*--Yorcq5>A7-GTfM5O1@d z(*&-3!m3wK)@(4xW&F>%O*FBkT+&YgX;b`F-4eA#-M7ZjGY%!fJEVO(?U#@cU#aVO zu^qT;QMytAqLAi2E!O;&C~5R5JcF)Yo~#~eELhY%N%p=IcBqv#Cl*8WB@j3>BqT=( z1Qd}R2#GY2GBdf;dNff?s_2E9(1R`fKUajX#V5yR5*1uRE+2HI=?z6iOvn3BI@KTp4!)PHrMAN2a^tk?Hti zVRHgrH3Kz`qi}+$KwnvX|Hmv&(4uDa?kC%|`wBDl#~pG0_yfahWl7~a-Wx(8*g+_$ zjf0HT_jS2m!)8h^y3m313Kq!cv)wG~n^9 z#JsnPPV~}oovZcnPKdMnJh(=Wm2*< zwy24BjC~)B5~?v|jU+NAm8~#Q*6d}8kfcbZ<^9e3{r&fR?)&q3p68Eq-{+j`oO4|| zHz2=QBDuqr;#o|Po`+Pozdzk({KvV;wg(!3s|@U${N%I1-z~MgSKcRfwh6Rdxm?+GasdJcQgKm=kw_uug?rRX*44H z%nNT*ovg5*Ocl(Y*}lcWBGiY!=3_8PJ3uaJj%xEfJCb&LtzeWVc;;*cY~MB8y1qoV zEBBnmQ-R%lpCL1yGO80W3o^V=)`Q(EZ6|8#8Tygx=e?#XzjWx4LdKJ#G#NTs$5A#( zc@(h7w+(s!LIq)Lx|h1|1&>KopVm35q|xD1bh^vbbkHA6LQMa(C7qpj?!`vW7TkCo zQ7YcwT{^fLPZz({lPm;;L1$3|%AU);8PsqpJIo-XP!@J*P!D*m5A%KApYw^6+goB#A<$*N>Sn%rfhL>^5&2&nIh z()f@>R{ic+)}I^oi>cXfa3{Lf@^GW>u_AVtcz_`VE$vjXixo^XKz2Q$n@t|y+axBI}d$a+{=~ar%BM3au{uL(;$a}IIW&o4)ist z?=TCDFS4mt)ljM&8~Gbl8gpNV>H8rHM?WUHQM;bvon*U_cUL{|Hj+mpA&)KQs5m8- zS_ymm#>a!(f-iEC`jOXz1O_}C9Jlx?vU1VS5A=ey4i-vb&<#o^?w*rI!Sh9ks3364 zfV?jp@<{0C11-~DF!&OW%mD+TADFx|4V9fwR2rw=@U5~3*feGTeCkKq=_V8$tup16 zr%-c4?OqqB--cjYucrZ}6)$vgo%%{#RQ}c=e38%lm!!OV+kewrk7BN$k@B6?R|aVb z0*mouO)%8#u7*+)lu-Ym!~YZWTYP^RsqX0aYH;H=+AtW1t|JNl(A345MO6^%shK>P2*ahEkZp zcONwGwCt&^36YcLy#VY;%k8$|%jB9XTE;G1WNTVBDsSS|SJ(Exv7ox`Wi)J}i< z0-Ch=R)r6?Ly1*!#iLtTNG{bsxN0^?Y&Y!3BiYlt`#~k;MOgL%lTgu-r~f&icC-&e zJJxusexyVws(8t#=O5yH#Dt=A&wZ}2?Zc8$K~yYUQd>OufxYA{BebchhcrL}9>I^K zlncy__F2K@`ZQ(qv;x6UZ75_Ns`TN-O*Kx|c z@$N9v&3?VmD0ye|@Ot9Cs?iT@>M*3hRgnJiP_^wt&Wyn^(h1Di5RwK3c$( zcEU_Mp=_WPD+NNdL7cc#(CUSC{KIgkX=zO$n=)WF-9X;DjRcX05_khwh+I|1QRX9` z#(_l+dg5h2m4B5pi!ENf{4YS*M3bHOT}VRVW}5tE+x>h%u3rWQ-lJl9&Nb#?_!O2D zjvfsx{KXPCF-TYsSn7sWPoKt%VQ#;{49ta+%9L<&d4oHYDTaaCWt!UGjYg6pLSXbFLz85V;sOEBKIknmq$8B6+rIh{a;k!k)BvE-+F z@Bh$6;|hy^p{4sk-dlxMtSG4T@~x_4{^DNE zuM-t(57%v>4FC2^B*a&)c$~Cm3aNx3r3=*zrUBxsWDc~DESXvmFPZh{(wh zZcmzq8YVfvW%=5*r^+PMAy?XjC+|3I*WA$*6N;A%gEH+SfS3=oz@cl)Yt)OMr;Y#v z=_$RX*}0;|N$5(&uc6oFr$AHs6l66n>d*D7Ia2AdwVbi9y?A4M?5@`wsWhw1i!avV zWfbM!eOaY~H(xlHX~4yxIHRG@*O-IYrH$e_=`%m@v*}CyGs9CTZ-S+3Yu{i5wRkmO zl}sf8uD>oTfNtk9zj8$^YMw2b-JbLbnUA^ez8+h1%9kr>(A`L=$DG1VX}H5=1-?8V z(Z6JSDj{8h1Pz0Bc?E+kR4-Tx-EoJj2oK*x8+=g)3G6LO#7_um%d)Zl4|ieHc(#{e zmi2na9{8KL@#4MmT1q1Q&8J<%q}-_R)I-rPEk>NHj5LgQmnAQM_erXM?ncSj*VD?^ zq4yz{V%|+U#@ITY{~KCg+nQh6Ua;rmH^SlUiU;M zGDV=7iM>z+6+sAO=Kr@LNW}0$MKetLiijhqx=e3*v0TFE%KR_Aa#u@_*?pKQer&$q z0!9{A*p}=_dUUN~1neUiud9q9l_qW?d^kw>^9eB>q4S0MwS5LrGT3K^rq{XP0(jzu z(s>6!hS;!5&kmN&&NEIvL~bty(!;6mK#^e~Uf_3n5FjTsDZ+>hcbbb|aQ#7$MHNF3 z2G5IV4hdi@&mW*`6@|Z2u#4T81^dm!RQ`JV_u7Ybgu)~K)^uZIvR#=)3M)BnrVQa9 z&GE;c#bAFTix(U{H(wFLO_ae-88qQOP5*c9=(oW#)*OfxDC%*%qj29^IqKaLdM^oC zyHJ-k$WrdC0Lb+srAFThJb7#{8Xd-`qkOnU{^Ok?9{)G^-02JE zs$pE7yUa7jl%6H}EcCTi;nh(C+hKNmf`P_0%stk0_DBfo-xH{K~Y~&bxOu1Au zZ(;IDy!+kw)&2(jiK?zk8{JFUY^TN~%`oq+2oV3Vq9gX(5y$KE$4-VJtxKkk7HMDX zYPINpLiYpP zsX!BNjG{{0vkkv?9k_`z?wPJ>ViRa>~Ufg z4i?=ueAy@d>Wy3|UHQGV@`-aeDg8_x1URIk6t;)q;tuC+1&F6y`WK+>RT!IZX;W;j zlND1Avfs?9>;)$fWQ)S|Y5Y?3hF7I^dh!GAJZuLKU%@zNB{?7kt4%UyC`@Zw3^>0F zCK#wuD}x3fP)yV)WkqE)b$em@Mxtst4QlMsVBo7PP@E&w!nH1e!8Y055N?(2nKgn& zl_-gfemopA^GUGv;PI^Lxs9SaV$45@E9izR7qarj0y=LC2;WKj3*0zk`H|zxm&nQy zC@OVntgD@NE{vNC;{cJ0K()@IkCr?cm+WKeP7AjVEFM^l_cgiSHGg=pwtk~!{mK1} zgVd#E@#`QQ=V-4tK(PK>-t%~Mt8KmeUH_!*U;JZ!y}8}|k5?TxWqffc-aG~{w>nPu z+2Ie>7j8R#oHp)$pToN>{^#YjJZt|81c;us-hu+;DtA-N(djdi{@yTFCJ#UKa7@=m z6Zde5V9ktJO5^&d*Y@ss*L%lxhTgXR^01jSd%$mhQlb@aC$@kPdC?43U-sk^?`(qP#n7i~6!s zMlH|?k<5W^5;Bw1mqC$KPME29GDKcZ%&zvP)!9ccRC&T}V9=+p_2Nnn7@O_@WZsC| zozN*JDt8zqH1 zVk1;X&ndCxV}-)x7d@e_(SpV^vrj`A`_yq@#b(2Vv-MBZEcpQ%e0zSVSt^~}a^~Fe zA_9L|nZIpb=7-Z4Fm-vw$Sd#J6M$frx{$sW_&1NuA;P_^_Bhh1y$`pZ|lJ-mN~ zjwogvK(!3KhN=1p6|lVG_5_eq))^27Sp^1Ju9=i2Q$TGCHbKSbn`5Kt*zEHYkjGcd z%%nBEV{w$M6`}#8O-;R7VaQPU!>BSOKIhYgmhxcn-Hs2xIqJ7M3y^d74A{?@Wy!|+jUmss_4 z1AjQC9)saF5m?=VI3Z=IH@{)SRv4H6aRf*790D$oxRpVKsWhc3J8V(^cw|Gw_0wgr z=QW607*!6ni}*&1epDt68eX%BM~^1VEmU1^xyfTJve{~AK&rJE!=c~}vJg{D8U9OK zVYm>*73ggY>G^ZEVSVkJNZzpaMuL)epz(eVT1kJr=WdQp2Yu7#^9FlJF}IB{vA$bk za?RI;RUdqU^x^|MwI$v`a4YQ4V%p-f?Y(HuYo)cT@y;gotVg-^9^)}-UbP7w?J1jg zS>YoO(s*rBqj3dh2a*O`=pRMrCGN9+$N$hg+xsr%>iy@hh1RaB+pclUX<3{EYR5HS zBUttCp1Yqf)IaGoE1#66k`9&GWFz$(2%az2m>zR*r)Ke0EKtaBl>=)%D^7mfv zT>ZefZXQ1dPFLYkOr6ddOj9Bxw=<1le9phh>BDNms{`St3tx^6dy#>?u-5{gk>7c6 zI*p`sB+f}a`jeD~aBp$2$r7RfpPRsKJEa+gq!HZn3y*V+xAcMP&B{&@F4azfmFXPV zkXfw23q}&6<(8Yw@EfHlF1FF(!OazDjK(=lZDLbWA71GFTEQaAELTV2(Ok&G2RDfxk6TD1DdJj)$$Y- zQhVQ5yOS>1Zx|){vK5h~9nGN%aQiphj9laqC7eXx;T)q`0mHI)9A!tNfRg3yz8XyP zFj5p1No<|9liqcVw{LISwdc+EA}7DtSiFnXjEPaJ%qVES7VuTA3%rp!bA=180nj3g zh_g(=y7@1{BL)@0pOOWIksy{<&L=ePAl~e$7oc>2n@A5A%1Y4_Dv*nc<1&{I?U`JV~<)mQ6nn5h2pI>#?!(a#2a_TFsD}>3GyY^_4`g?0SJKlW`t~Y0| zoAp+bH`OJWu}`B4Lc}ld0d6B6%qG~K(c{DJ;mOd^z>mFGnYwdwvR%8FqqD0>pg=F4 zhD*k#G>RJ9sl!SA4M&)5G0)jfrYJmFReR!69d0tza3esrd^d@v|Hf$%)G_#OLiU`2fl#$?sze)?*)&AXuhdB zOMC}7Q!ENG$FB{?f2q?0rGyXe_7bmo_h{Blj zFoJihES_0qyZWvSsc_6RdZ&A4gOcQD7kR>Xh>r=F zEgA}|)C7#^Z4n~QF^$t?j<2=xJ(BTzLAa>&LHUswgEvzKe6o!hM4NpnjE<30(nNiL zBk%(nz)y!ES(V2bxU7bX5dISN79Ec|Y17*l$XCmJAjG8EXhOzj)fxL{ze=-aNS7bm zlzSi(+e(}FEVYK@&j(647{xKfqkDZcZnpU~5y7W*0llyA^S1dWD0ytN@SN050$}lR zK7zV9^>w?(1b#?t_WT_i^^h{>C;njHj{KHhuq|x?>ejleaw8VSDbZKRhZI1(M@t~Z3ckF8p<1@g}xHdE$OsH zK>XM=2ufX*q_17iz~8lb-kGQ{fVq*OhL+QmPPDY5G^iDxvK;5Oy$SIGbUq)}yQMZ( zcp^Nrv7sTX4AkRxQY7=*cdvI(>+^VePN(%yvYODXngyP+Y^2^5+Zo+pC9&bevo8zp z)P-p|uL5OVC^I8QfqgNlcTYHQgk34#-9SXsn}kh&!y7z-z^&Z7FmZ-Vx*mUuv%h?S zCw=s@O>K!kkk2HCrFz@V`{VC=ztI8xLoi{5%G4~Tn>@Z2+M!>Qj=!{+^9f59iSZqq zt2-y$+YVLBkMz8g`N9O@mewOBXm;(XnnQD4Yb*yyzboZ5)R_1wXfe{&FWFqj5AqYp z!t1o=js=QGi&YF}S!Er)5wwUqATsB`R(;Y>IPQF_Lv)%H<{)nN`Ij317Qpo8AoB8p zbu40dOvOcVPBcVtRau7v+%X6Atu=y5_G97 z@Grur7hs?IZXN?jntk%P4n+?S(1}cGoV5r7JR?CLk!0y={F`;5@cpSSftR-HQVPGy zb?2dv%6tLWKSBC)+LB_hESuj#Cc}W%vbeU4spXhVWdNTRxlOgIpG6{_)64hBQ;(ZzBot&z}uV+AOI_9@p;X?5%t{4eQp=k-j*de&%TB~G?5 zUJ%isPDViCt?pSnTVE-kTZ)WLwjD=AvczgjTDf|?@tXBXT_-!2qF0{fa{es6#Dgv+ z+;Hf$AY;BKoPYIUYpz+D`h#S_-~FP*S5c1vcg-oQ@nboM@Z>tA4}xG=q?WF}<6XR{ zNJJ@dUiGZ5Qi_^$kjsGxg4>KeKydG*E8Zs*spvNEl`Wkmm-|sN%r6)v#_yNpmK6Nc zq0~39_Fmc*Uny?aY47j-A>}0WXJi2(GH=o;f_?8*)Y~P82Paa*lVrUW@Q2$>81M^# zo@B*~&Qy7QWE1<~Zgl!ZGcV zKwGf^@N~x<%*KWB0Vbw-L(%QIRFi}F)~(hYwwLxK1MJBR*Lf&?Lr$})<28KqF32Bx z;)3k!y6D#AznRP`zvTv2> z255gI%hSsCfQ#tza}VunU6|WGpsT(^#KEJ|DZMj}fqIay3lc=YcvG;f3XfGt$uE!w z%0${=kcytiTuVy7``RGLjiM=XMG_dS7ZC>{8uMb1gJm`Uv&zg&o@HXDZ4mnBcxYiW zgO#qM%B%B1dF8)6f&VvLp8X2Av(kCUBCjOKyT%bj973D-9uEx`2%ZCO&P0wmGqZKa zHQRWPE4zasepLO|A?hv>Cjls`E0*G|YOW|anZJF;ds6;N$*{)rv!#8wnMexeb?_h2 z@rV$-3~y_>emnvq#-3ys7xI49d8-r;*3w8iXTUd~DbR?5AFW{tx%^Dyhyl Date: Fri, 12 Feb 2016 15:55:06 +0300 Subject: [PATCH 14/16] Add segmentation and object tracking python tests --- .../features2d/include/opencv2/features2d.hpp | 2 +- modules/python/test/test_camshift.py | 10 +- .../python/test/test_feature_homography.py | 160 ++++++++++++++++++ modules/python/test/test_grabcut.py | 67 ++++++++ modules/python/test/test_lk_homography.py | 96 +++++++++++ modules/python/test/test_lk_track.py | 111 ++++++++++++ modules/python/test/test_mser.py | 69 ++++++++ modules/python/test/test_watershed.py | 51 ++++++ modules/python/test/tests_common.py | 18 +- modules/python/test/tst_scene_render.py | 70 ++++++-- 10 files changed, 627 insertions(+), 27 deletions(-) create mode 100644 modules/python/test/test_feature_homography.py create mode 100644 modules/python/test/test_grabcut.py create mode 100644 modules/python/test/test_lk_homography.py create mode 100644 modules/python/test/test_lk_track.py create mode 100644 modules/python/test/test_mser.py create mode 100644 modules/python/test/test_watershed.py diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index 692d3d9fd..32fdabd8a 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -361,7 +361,7 @@ public: */ CV_WRAP virtual void detectRegions( InputArray image, CV_OUT std::vector >& msers, - std::vector& bboxes ) = 0; + CV_OUT std::vector& bboxes ) = 0; CV_WRAP virtual void setDelta(int delta) = 0; CV_WRAP virtual int getDelta() const = 0; diff --git a/modules/python/test/test_camshift.py b/modules/python/test/test_camshift.py index 766e0c4bb..a824320ef 100644 --- a/modules/python/test/test_camshift.py +++ b/modules/python/test/test_camshift.py @@ -39,7 +39,7 @@ class camshift_test(NewOpenCVTests): def prepareRender(self): - self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg'), True) + self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg'), deformation = True) def runTracker(self): @@ -53,7 +53,6 @@ class camshift_test(NewOpenCVTests): while True: framesCounter += 1 self.frame = self.render.getNextFrame() - vis = self.frame.copy() hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) @@ -67,11 +66,6 @@ class camshift_test(NewOpenCVTests): hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) self.hist = hist.reshape(-1) - - vis_roi = vis[y0:y1, x0:x1] - cv2.bitwise_not(vis_roi, vis_roi) - vis[mask == 0] = 0 - self.selection = False if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0: @@ -81,8 +75,6 @@ class camshift_test(NewOpenCVTests): term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit) - if self.show_backproj: - vis[:] = prob[...,np.newaxis] trackingRect = np.array(self.track_window) trackingRect[2] += trackingRect[0] trackingRect[3] += trackingRect[1] diff --git a/modules/python/test/test_feature_homography.py b/modules/python/test/test_feature_homography.py new file mode 100644 index 000000000..9fbdc027a --- /dev/null +++ b/modules/python/test/test_feature_homography.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python + +''' +Feature homography +================== + +Example of using features2d framework for interactive video homography matching. +ORB features and FLANN matcher are used. The actual tracking is implemented by +PlaneTracker class in plane_tracker.py +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 +import sys +PY3 = sys.version_info[0] == 3 + +if PY3: + xrange = range + +# local modules +from tst_scene_render import TestSceneRender + +def intersectionRate(s1, s2): + + x1, y1, x2, y2 = s1 + s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]]) + + area, intersection = cv2.intersectConvexConvex(s1, np.array(s2)) + return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(np.array(s2))) + +from tests_common import NewOpenCVTests + +class feature_homography_test(NewOpenCVTests): + + render = None + tracker = None + framesCounter = 0 + frame = None + + def test_feature_homography(self): + + self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'), + self.get_sample('samples/data/box.png'), noise = 0.5, speed = 0.5) + self.frame = self.render.getNextFrame() + self.tracker = PlaneTracker() + self.tracker.clear() + self.tracker.add_target(self.frame, self.render.getCurrentRect()) + + while self.framesCounter < 100: + self.framesCounter += 1 + tracked = self.tracker.track(self.frame) + if len(tracked) > 0: + tracked = tracked[0] + self.assertGreater(intersectionRate(self.render.getCurrentRect(), np.int32(tracked.quad)), 0.6) + else: + self.assertEqual(0, 1, 'Tracking error') + self.frame = self.render.getNextFrame() + + +# built-in modules +from collections import namedtuple + +FLANN_INDEX_KDTREE = 1 +FLANN_INDEX_LSH = 6 +flann_params= dict(algorithm = FLANN_INDEX_LSH, + table_number = 6, # 12 + key_size = 12, # 20 + multi_probe_level = 1) #2 + +MIN_MATCH_COUNT = 10 + +''' + image - image to track + rect - tracked rectangle (x1, y1, x2, y2) + keypoints - keypoints detected inside rect + descrs - their descriptors + data - some user-provided data +''' +PlanarTarget = namedtuple('PlaneTarget', 'image, rect, keypoints, descrs, data') + +''' + target - reference to PlanarTarget + p0 - matched points coords in target image + p1 - matched points coords in input frame + H - homography matrix from p0 to p1 + quad - target bounary quad in input frame +''' +TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad') + +class PlaneTracker: + def __init__(self): + self.detector = cv2.ORB_create( nfeatures = 1000 ) + self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329) + self.targets = [] + self.frame_points = [] + + def add_target(self, image, rect, data=None): + '''Add a new tracking target.''' + x0, y0, x1, y1 = rect + raw_points, raw_descrs = self.detect_features(image) + points, descs = [], [] + for kp, desc in zip(raw_points, raw_descrs): + x, y = kp.pt + if x0 <= x <= x1 and y0 <= y <= y1: + points.append(kp) + descs.append(desc) + descs = np.uint8(descs) + self.matcher.add([descs]) + target = PlanarTarget(image = image, rect=rect, keypoints = points, descrs=descs, data=data) + self.targets.append(target) + + def clear(self): + '''Remove all targets''' + self.targets = [] + self.matcher.clear() + + def track(self, frame): + '''Returns a list of detected TrackedTarget objects''' + self.frame_points, frame_descrs = self.detect_features(frame) + if len(self.frame_points) < MIN_MATCH_COUNT: + return [] + matches = self.matcher.knnMatch(frame_descrs, k = 2) + matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75] + if len(matches) < MIN_MATCH_COUNT: + return [] + matches_by_id = [[] for _ in xrange(len(self.targets))] + for m in matches: + matches_by_id[m.imgIdx].append(m) + tracked = [] + for imgIdx, matches in enumerate(matches_by_id): + if len(matches) < MIN_MATCH_COUNT: + continue + target = self.targets[imgIdx] + p0 = [target.keypoints[m.trainIdx].pt for m in matches] + p1 = [self.frame_points[m.queryIdx].pt for m in matches] + p0, p1 = np.float32((p0, p1)) + H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0) + status = status.ravel() != 0 + if status.sum() < MIN_MATCH_COUNT: + continue + p0, p1 = p0[status], p1[status] + + x0, y0, x1, y1 = target.rect + quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]) + quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2) + + track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad) + tracked.append(track) + tracked.sort(key = lambda t: len(t.p0), reverse=True) + return tracked + + def detect_features(self, frame): + '''detect_features(self, frame) -> keypoints, descrs''' + keypoints, descrs = self.detector.detectAndCompute(frame, None) + if descrs is None: # detectAndCompute returns descs=None if not keypoints found + descrs = [] + return keypoints, descrs \ No newline at end of file diff --git a/modules/python/test/test_grabcut.py b/modules/python/test/test_grabcut.py new file mode 100644 index 000000000..1e1eb7548 --- /dev/null +++ b/modules/python/test/test_grabcut.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +''' +=============================================================================== +Interactive Image Segmentation using GrabCut algorithm. +=============================================================================== +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 +import sys + +from tests_common import NewOpenCVTests + +class grabcut_test(NewOpenCVTests): + + def verify(self, mask, exp): + + maxDiffRatio = 0.02 + expArea = np.count_nonzero(exp) + nonIntersectArea = np.count_nonzero(mask != exp) + curRatio = float(nonIntersectArea) / expArea + return curRatio < maxDiffRatio + + def scaleMask(self, mask): + + return np.where((mask==cv2.GC_FGD) + (mask==cv2.GC_PR_FGD),255,0).astype('uint8') + + def test_grabcut(self): + + img = self.get_sample('cv/shared/airplane.png') + mask_prob = self.get_sample("cv/grabcut/mask_probpy.png", 0) + exp_mask1 = self.get_sample("cv/grabcut/exp_mask1py.png", 0) + exp_mask2 = self.get_sample("cv/grabcut/exp_mask2py.png", 0) + + if img == None: + self.assertEqual(0, 1, 'Missing test data') + + rect = (24, 126, 459, 168) + mask = np.zeros(img.shape[:2], dtype = np.uint8) + bgdModel = np.zeros((1,65),np.float64) + fgdModel = np.zeros((1,65),np.float64) + cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv2.GC_INIT_WITH_RECT) + cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 2, cv2.GC_EVAL) + + if mask_prob == None: + mask_prob = mask.copy() + cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/mask_probpy.png', mask_prob) + if exp_mask1 == None: + exp_mask1 = self.scaleMask(mask) + cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask1py.png', exp_mask1) + + self.assertEqual(self.verify(self.scaleMask(mask), exp_mask1), True) + + mask = mask_prob + bgdModel = np.zeros((1,65),np.float64) + fgdModel = np.zeros((1,65),np.float64) + cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv2.GC_INIT_WITH_MASK) + cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 1, cv2.GC_EVAL) + + if exp_mask2 == None: + exp_mask2 = self.scaleMask(mask) + cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask2py.png', exp_mask2) + + self.assertEqual(self.verify(self.scaleMask(mask), exp_mask2), True) \ No newline at end of file diff --git a/modules/python/test/test_lk_homography.py b/modules/python/test/test_lk_homography.py new file mode 100644 index 000000000..8e526d0de --- /dev/null +++ b/modules/python/test/test_lk_homography.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python + +''' +Lucas-Kanade homography tracker test +=============================== +Uses goodFeaturesToTrack for track initialization and back-tracking for match verification +between frames. Finds homography between reference and current views. +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 + +#local modules +from tst_scene_render import TestSceneRender +from tests_common import NewOpenCVTests, isPointInRect + +lk_params = dict( winSize = (19, 19), + maxLevel = 2, + criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) + +feature_params = dict( maxCorners = 1000, + qualityLevel = 0.01, + minDistance = 8, + blockSize = 19 ) + +def checkedTrace(img0, img1, p0, back_threshold = 1.0): + p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) + p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) + d = abs(p0-p0r).reshape(-1, 2).max(-1) + status = d < back_threshold + return p1, status + +class lk_homography_test(NewOpenCVTests): + + render = None + framesCounter = 0 + frame = frame0 = None + p0 = None + p1 = None + gray0 = gray1 = None + numFeaturesInRectOnStart = 0 + + def test_lk_homography(self): + self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'), + self.get_sample('samples/data/box.png'), noise = 0.1, speed = 1.0) + + frame = self.render.getNextFrame() + frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + self.frame0 = frame.copy() + self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params) + + isForegroundHomographyFound = False + + if self.p0 is not None: + self.p1 = self.p0 + self.gray0 = frame_gray + self.gray1 = frame_gray + currRect = self.render.getCurrentRect() + for (x,y) in self.p0[:,0]: + if isPointInRect((x,y), currRect): + self.numFeaturesInRectOnStart += 1 + + while self.framesCounter < 200: + self.framesCounter += 1 + frame = self.render.getNextFrame() + frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + if self.p0 is not None: + p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1) + + self.p1 = p2[trace_status].copy() + self.p0 = self.p0[trace_status].copy() + self.gray1 = frame_gray + + if len(self.p0) < 4: + self.p0 = None + continue + H, status = cv2.findHomography(self.p0, self.p1, cv2.RANSAC, 5.0) + + goodPointsInRect = 0 + goodPointsOutsideRect = 0 + for (x0, y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]): + if good: + if isPointInRect((x1,y1), self.render.getCurrentRect()): + goodPointsInRect += 1 + else: goodPointsOutsideRect += 1 + + if goodPointsOutsideRect < goodPointsInRect: + isForegroundHomographyFound = True + self.assertGreater(float(goodPointsInRect) / (self.numFeaturesInRectOnStart + 1), 0.6) + else: + p = cv2.goodFeaturesToTrack(frame_gray, **feature_params) + + self.assertEqual(isForegroundHomographyFound, True) \ No newline at end of file diff --git a/modules/python/test/test_lk_track.py b/modules/python/test/test_lk_track.py new file mode 100644 index 000000000..ccc67a512 --- /dev/null +++ b/modules/python/test/test_lk_track.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python + +''' +Lucas-Kanade tracker +==================== + +Lucas-Kanade sparse optical flow demo. Uses goodFeaturesToTrack +for track initialization and back-tracking for match verification +between frames. +''' + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 + +#local modules +from tst_scene_render import TestSceneRender +from tests_common import NewOpenCVTests, intersectionRate, isPointInRect + +lk_params = dict( winSize = (15, 15), + maxLevel = 2, + criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) + +feature_params = dict( maxCorners = 500, + qualityLevel = 0.3, + minDistance = 7, + blockSize = 7 ) + +def getRectFromPoints(points): + + distances = [] + for point in points: + distances.append(cv2.norm(point, cv2.NORM_L2)) + + x0, y0 = points[np.argmin(distances)] + x1, y1 = points[np.argmax(distances)] + + return np.array([x0, y0, x1, y1]) + + +class lk_track_test(NewOpenCVTests): + + track_len = 10 + detect_interval = 5 + tracks = [] + frame_idx = 0 + render = None + + def test_lk_track(self): + + self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'), self.get_sample('samples/data/box.png')) + self.runTracker() + + def runTracker(self): + foregroundPointsNum = 0 + + while True: + frame = self.render.getNextFrame() + frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + + if len(self.tracks) > 0: + img0, img1 = self.prev_gray, frame_gray + p0 = np.float32([tr[-1][0] for tr in self.tracks]).reshape(-1, 1, 2) + p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) + p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) + d = abs(p0-p0r).reshape(-1, 2).max(-1) + good = d < 1 + new_tracks = [] + for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good): + if not good_flag: + continue + tr.append([(x, y), self.frame_idx]) + if len(tr) > self.track_len: + del tr[0] + new_tracks.append(tr) + self.tracks = new_tracks + + if self.frame_idx % self.detect_interval == 0: + goodTracksCount = 0 + for tr in self.tracks: + oldRect = self.render.getRectInTime(self.render.timeStep * tr[0][1]) + newRect = self.render.getRectInTime(self.render.timeStep * tr[-1][1]) + if isPointInRect(tr[0][0], oldRect) and isPointInRect(tr[-1][0], newRect): + goodTracksCount += 1 + + if self.frame_idx == self.detect_interval: + foregroundPointsNum = goodTracksCount + + fgIndex = float(foregroundPointsNum) / (foregroundPointsNum + 1) + fgRate = float(goodTracksCount) / (len(self.tracks) + 1) + + if self.frame_idx > 0: + self.assertGreater(fgIndex, 0.9) + self.assertGreater(fgRate, 0.2) + + mask = np.zeros_like(frame_gray) + mask[:] = 255 + for x, y in [np.int32(tr[-1][0]) for tr in self.tracks]: + cv2.circle(mask, (x, y), 5, 0, -1) + p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) + if p is not None: + for x, y in np.float32(p).reshape(-1, 2): + self.tracks.append([[(x, y), self.frame_idx]]) + + self.frame_idx += 1 + self.prev_gray = frame_gray + + if self.frame_idx > 300: + break \ No newline at end of file diff --git a/modules/python/test/test_mser.py b/modules/python/test/test_mser.py new file mode 100644 index 000000000..619300b86 --- /dev/null +++ b/modules/python/test/test_mser.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + +''' +MSER detector test +''' +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 + +from tests_common import NewOpenCVTests + +class mser_test(NewOpenCVTests): + def test_mser(self): + + img = self.get_sample('cv/mser/puzzle.png', 0) + smallImg = [ + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255] + ] + thresharr = [ 0, 70, 120, 180, 255 ] + kDelta = 5 + mserExtractor = cv2.MSER_create() + mserExtractor.setDelta(kDelta) + np.random.seed(10) + + for i in range(100): + + use_big_image = int(np.random.rand(1,1)*7) != 0 + invert = int(np.random.rand(1,1)*2) != 0 + binarize = int(np.random.rand(1,1)*5) != 0 if use_big_image else False + blur = int(np.random.rand(1,1)*2) != 0 + thresh = thresharr[int(np.random.rand(1,1)*5)] + src0 = img if use_big_image else np.array(smallImg).astype('uint8') + src = src0.copy() + + kMinArea = 256 if use_big_image else 10 + kMaxArea = int(src.shape[0]*src.shape[1]/4) + + mserExtractor.setMinArea(kMinArea) + mserExtractor.setMaxArea(kMaxArea) + if invert: + cv2.bitwise_not(src, src) + if binarize: + _, src = cv2.threshold(src, thresh, 255, cv2.THRESH_BINARY) + if blur: + src = cv2.GaussianBlur(src, (5, 5), 1.5, 1.5) + minRegs = 7 if use_big_image else 2 + maxRegs = 1000 if use_big_image else 15 + if binarize and (thresh == 0 or thresh == 255): + minRegs = maxRegs = 0 + msers, boxes = mserExtractor.detectRegions(src) + nmsers = len(msers) + self.assertEqual(nmsers, len(boxes)) + self.assertLessEqual(minRegs, nmsers) + self.assertGreaterEqual(maxRegs, nmsers) \ No newline at end of file diff --git a/modules/python/test/test_watershed.py b/modules/python/test/test_watershed.py new file mode 100644 index 000000000..7c640f67b --- /dev/null +++ b/modules/python/test/test_watershed.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +''' +Watershed segmentation +========= + +This program demonstrates the watershed segmentation algorithm +in OpenCV: watershed(). + +Usage +----- +watershed.py [image filename] + +Keys +---- + 1-7 - switch marker color + SPACE - update segmentation + r - reset + a - toggle autoupdate + ESC - exit + +''' + + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 + +from tests_common import NewOpenCVTests + +class watershed_test(NewOpenCVTests): + def test_watershed(self): + + img = self.get_sample('cv/inpaint/orig.png') + markers = self.get_sample('cv/watershed/wshed_exp.png', 0) + refSegments = self.get_sample('cv/watershed/wshed_segments.png') + + if img == None or markers == None: + self.assertEqual(0, 1, 'Missing test data') + + colors = np.int32( list(np.ndindex(3, 3, 3)) ) * 122 + cv2.watershed(img, np.int32(markers)) + segments = colors[np.maximum(markers, 0)] + + if refSegments == None: + refSegments = segments.copy() + cv2.imwrite(self.extraTestDataPath + '/cv/watershed/wshed_segments.png', refSegments) + + self.assertLess(cv2.norm(segments - refSegments, cv2.NORM_L1) / 255.0, 50) \ No newline at end of file diff --git a/modules/python/test/tests_common.py b/modules/python/test/tests_common.py index 17473cd6e..3a636b255 100644 --- a/modules/python/test/tests_common.py +++ b/modules/python/test/tests_common.py @@ -37,7 +37,7 @@ class NewOpenCVTests(unittest.TestCase): with open(candidate, 'rb') as f: filedata = f.read() if filedata is None: - filedata = urlopen(NewOpenCVTests.repoUrl + '/' + filename).read() + return None#filedata = urlopen(NewOpenCVTests.repoUrl + '/' + filename).read() self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor) return self.image_cache[filename] @@ -64,10 +64,16 @@ class NewOpenCVTests(unittest.TestCase): def intersectionRate(s1, s2): x1, y1, x2, y2 = s1 - s1 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] + s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]]) x1, y1, x2, y2 = s2 - s2 = [[x1, y1], [x2,y1], [x2, y2], [x1, y2] ] - #print(np.array(s2)) - area, intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2)) - return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2))) \ No newline at end of file + s2 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]]) + + area, intersection = cv2.intersectConvexConvex(s1, s2) + return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(s2)) + +def isPointInRect(p, rect): + if rect[0] <= p[0] and rect[1] <=p[1] and p[0] <= rect[2] and p[1] <= rect[3]: + return True + else: + return False \ No newline at end of file diff --git a/modules/python/test/tst_scene_render.py b/modules/python/test/tst_scene_render.py index adda5bf24..80dd95abf 100644 --- a/modules/python/test/tst_scene_render.py +++ b/modules/python/test/tst_scene_render.py @@ -13,41 +13,89 @@ defaultSize = 512 class TestSceneRender(): - def __init__(self, bgImg = None, deformation = False, **params): + def __init__(self, bgImg = None, fgImg = None, deformation = False, noise = 0.0, speed = 0.25, **params): self.time = 0.0 self.timeStep = 1.0 / 30.0 + self.foreground = fgImg self.deformation = deformation + self.noise = noise + self.speed = speed if bgImg != None: self.sceneBg = bgImg.copy() else: - self.sceneBg = np.zeros((defaultSize, defaultSize, 3), np.uint8) + self.sceneBg = np.zeros(defaultSize, defaultSize, np.uint8) self.w = self.sceneBg.shape[0] self.h = self.sceneBg.shape[1] + if fgImg != None: + self.foreground = fgImg.copy() + self.center = self.currentCenter = (int(self.w/2 - fgImg.shape[0]/2), int(self.h/2 - fgImg.shape[1]/2)) + + self.xAmpl = self.sceneBg.shape[0] - (self.center[0] + fgImg.shape[0]) + self.yAmpl = self.sceneBg.shape[1] - (self.center[1] + fgImg.shape[1]) + self.initialRect = np.array([ (self.h/2, self.w/2), (self.h/2, self.w/2 + self.w/10), (self.h/2 + self.h/10, self.w/2 + self.w/10), (self.h/2 + self.h/10, self.w/2)]).astype(int) self.currentRect = self.initialRect + np.random.seed(10) + + def getXOffset(self, time): + return int(self.xAmpl*cos(time*self.speed)) + + + def getYOffset(self, time): + return int(self.yAmpl*sin(time*self.speed)) def setInitialRect(self, rect): self.initialRect = rect + def getRectInTime(self, time): + + if self.foreground != None: + tmp = np.array(self.center) + np.array((self.getXOffset(time), self.getYOffset(time))) + x0, y0 = tmp + x1, y1 = tmp + self.foreground.shape[0:2] + return np.array([y0, x0, y1, x1]) + else: + x0, y0 = self.initialRect[0] + np.array((self.getXOffset(time), self.getYOffset(time))) + x1, y1 = self.initialRect[2] + np.array((self.getXOffset(time), self.getYOffset(time))) + return np.array([y0, x0, y1, x1]) + def getCurrentRect(self): - x0, y0 = self.currentRect[0] - x1, y1 = self.currentRect[2] - return np.array([x0, y0, x1, y1]) + + if self.foreground != None: + + x0 = self.currentCenter[0] + y0 = self.currentCenter[1] + x1 = self.currentCenter[0] + self.foreground.shape[0] + y1 = self.currentCenter[1] + self.foreground.shape[1] + return np.array([y0, x0, y1, x1]) + else: + x0, y0 = self.currentRect[0] + x1, y1 = self.currentRect[2] + return np.array([x0, y0, x1, y1]) def getNextFrame(self): - self.time += self.timeStep img = self.sceneBg.copy() - self.currentRect = self.initialRect + np.int( 30*cos(self.time) + 50*sin(self.time/3)) - if(self.deformation): - self.currentRect[1:3] += np.int(self.h/20*cos(self.time)) + if self.foreground != None: + self.currentCenter = (self.center[0] + self.getXOffset(self.time), self.center[1] + self.getYOffset(self.time)) + img[self.currentCenter[0]:self.currentCenter[0]+self.foreground.shape[0], + self.currentCenter[1]:self.currentCenter[1]+self.foreground.shape[1]] = self.foreground + else: + self.currentRect = self.initialRect + np.int( 30*cos(self.time) + 50*sin(self.time/3)) + if self.deformation: + self.currentRect[1:3] += int(self.h/20*cos(self.time)) + cv2.fillConvexPoly(img, self.currentRect, (0, 0, 255)) - cv2.fillConvexPoly(img, self.currentRect, (0, 0, 255)) + self.time += self.timeStep + if self.noise: + noise = np.zeros(self.sceneBg.shape, np.int8) + cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) + img = cv2.add(img, noise, dtype=cv2.CV_8UC3) return img def resetTime(self): @@ -58,7 +106,7 @@ if __name__ == '__main__': backGr = cv2.imread('../../../samples/data/lena.jpg') - render = TestSceneRender(backGr) + render = TestSceneRender(backGr, noise = 0.5) while True: From 22b028b64b4a3f869caa8af9e4a83eb657387e1d Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Wed, 24 Feb 2016 13:09:42 +0300 Subject: [PATCH 15/16] Fix warnings in python3 --- modules/python/test/test.py | 2 +- modules/python/test/test_grabcut.py | 10 +++++----- modules/python/test/test_watershed.py | 24 +++--------------------- modules/python/test/tst_scene_render.py | 10 +++++----- 4 files changed, 14 insertions(+), 32 deletions(-) diff --git a/modules/python/test/test.py b/modules/python/test/test.py index bbd30389f..a2d36989b 100755 --- a/modules/python/test/test.py +++ b/modules/python/test/test.py @@ -136,7 +136,7 @@ if __name__ == '__main__': try: NewOpenCVTests.extraTestDataPath = os.environ['OPENCV_TEST_DATA_PATH'] except KeyError: - pass + print('Missing opencv extra repository. Some of tests may fail.') random.seed(0) unit_argv = [sys.argv[0]] + other; unittest.main(argv=unit_argv) \ No newline at end of file diff --git a/modules/python/test/test_grabcut.py b/modules/python/test/test_grabcut.py index 1e1eb7548..38211f7d8 100644 --- a/modules/python/test/test_grabcut.py +++ b/modules/python/test/test_grabcut.py @@ -35,8 +35,8 @@ class grabcut_test(NewOpenCVTests): exp_mask1 = self.get_sample("cv/grabcut/exp_mask1py.png", 0) exp_mask2 = self.get_sample("cv/grabcut/exp_mask2py.png", 0) - if img == None: - self.assertEqual(0, 1, 'Missing test data') + if img is None: + self.assertTrue(False, 'Missing test data') rect = (24, 126, 459, 168) mask = np.zeros(img.shape[:2], dtype = np.uint8) @@ -45,10 +45,10 @@ class grabcut_test(NewOpenCVTests): cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv2.GC_INIT_WITH_RECT) cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 2, cv2.GC_EVAL) - if mask_prob == None: + if mask_prob is None: mask_prob = mask.copy() cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/mask_probpy.png', mask_prob) - if exp_mask1 == None: + if exp_mask1 is None: exp_mask1 = self.scaleMask(mask) cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask1py.png', exp_mask1) @@ -60,7 +60,7 @@ class grabcut_test(NewOpenCVTests): cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv2.GC_INIT_WITH_MASK) cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 1, cv2.GC_EVAL) - if exp_mask2 == None: + if exp_mask2 is None: exp_mask2 = self.scaleMask(mask) cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask2py.png', exp_mask2) diff --git a/modules/python/test/test_watershed.py b/modules/python/test/test_watershed.py index 7c640f67b..0a1d222f4 100644 --- a/modules/python/test/test_watershed.py +++ b/modules/python/test/test_watershed.py @@ -1,27 +1,9 @@ #!/usr/bin/env python ''' -Watershed segmentation -========= - -This program demonstrates the watershed segmentation algorithm -in OpenCV: watershed(). - -Usage ------ -watershed.py [image filename] - -Keys ----- - 1-7 - switch marker color - SPACE - update segmentation - r - reset - a - toggle autoupdate - ESC - exit - +Watershed segmentation test ''' - # Python 2/3 compatibility from __future__ import print_function @@ -37,14 +19,14 @@ class watershed_test(NewOpenCVTests): markers = self.get_sample('cv/watershed/wshed_exp.png', 0) refSegments = self.get_sample('cv/watershed/wshed_segments.png') - if img == None or markers == None: + if img is None or markers is None: self.assertEqual(0, 1, 'Missing test data') colors = np.int32( list(np.ndindex(3, 3, 3)) ) * 122 cv2.watershed(img, np.int32(markers)) segments = colors[np.maximum(markers, 0)] - if refSegments == None: + if refSegments is None: refSegments = segments.copy() cv2.imwrite(self.extraTestDataPath + '/cv/watershed/wshed_segments.png', refSegments) diff --git a/modules/python/test/tst_scene_render.py b/modules/python/test/tst_scene_render.py index 80dd95abf..49cde80d2 100644 --- a/modules/python/test/tst_scene_render.py +++ b/modules/python/test/tst_scene_render.py @@ -21,7 +21,7 @@ class TestSceneRender(): self.noise = noise self.speed = speed - if bgImg != None: + if bgImg is not None: self.sceneBg = bgImg.copy() else: self.sceneBg = np.zeros(defaultSize, defaultSize, np.uint8) @@ -29,7 +29,7 @@ class TestSceneRender(): self.w = self.sceneBg.shape[0] self.h = self.sceneBg.shape[1] - if fgImg != None: + if fgImg is not None: self.foreground = fgImg.copy() self.center = self.currentCenter = (int(self.w/2 - fgImg.shape[0]/2), int(self.h/2 - fgImg.shape[1]/2)) @@ -53,7 +53,7 @@ class TestSceneRender(): def getRectInTime(self, time): - if self.foreground != None: + if self.foreground is not None: tmp = np.array(self.center) + np.array((self.getXOffset(time), self.getYOffset(time))) x0, y0 = tmp x1, y1 = tmp + self.foreground.shape[0:2] @@ -65,7 +65,7 @@ class TestSceneRender(): def getCurrentRect(self): - if self.foreground != None: + if self.foreground is not None: x0 = self.currentCenter[0] y0 = self.currentCenter[1] @@ -80,7 +80,7 @@ class TestSceneRender(): def getNextFrame(self): img = self.sceneBg.copy() - if self.foreground != None: + if self.foreground is not None: self.currentCenter = (self.center[0] + self.getXOffset(self.time), self.center[1] + self.getYOffset(self.time)) img[self.currentCenter[0]:self.currentCenter[0]+self.foreground.shape[0], self.currentCenter[1]:self.currentCenter[1]+self.foreground.shape[1]] = self.foreground From e2434ab7c3a6622c29d4b53334e4f053065442dd Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Wed, 9 Mar 2016 11:37:25 +0300 Subject: [PATCH 16/16] Use AKAZE detector instead of ORB in feature_homography test --- modules/python/test/test_feature_homography.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/python/test/test_feature_homography.py b/modules/python/test/test_feature_homography.py index 9fbdc027a..861ff9232 100644 --- a/modules/python/test/test_feature_homography.py +++ b/modules/python/test/test_feature_homography.py @@ -92,7 +92,7 @@ TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad') class PlaneTracker: def __init__(self): - self.detector = cv2.ORB_create( nfeatures = 1000 ) + self.detector = cv2.AKAZE_create(threshold = 0.003) self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329) self.targets = [] self.frame_points = [] @@ -155,6 +155,6 @@ class PlaneTracker: def detect_features(self, frame): '''detect_features(self, frame) -> keypoints, descrs''' keypoints, descrs = self.detector.detectAndCompute(frame, None) - if descrs is None: # detectAndCompute returns descs=None if not keypoints found + if descrs is None: # detectAndCompute returns descs=None if no keypoints found descrs = [] return keypoints, descrs \ No newline at end of file