From 1ad1b737c2a3f37571282f93cb9983d5ad284e59 Mon Sep 17 00:00:00 2001 From: Ana Huaman Quispe Date: Tue, 16 Dec 2014 19:45:50 -0500 Subject: [PATCH 01/98] Fixed registration bug: It was ALWAYS OFF - even after changing the property --- modules/videoio/src/cap_openni2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/videoio/src/cap_openni2.cpp b/modules/videoio/src/cap_openni2.cpp index 8fea1bdb2..cf847fce9 100644 --- a/modules/videoio/src/cap_openni2.cpp +++ b/modules/videoio/src/cap_openni2.cpp @@ -566,7 +566,7 @@ bool CvCapture_OpenNI2::setDepthGeneratorProperty( int propIdx, double propValue // then the property isn't avaliable if ( color.isValid() ) { - openni::ImageRegistrationMode mode = propValue < 1.0 ? openni::IMAGE_REGISTRATION_OFF : openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR; + openni::ImageRegistrationMode mode = propValue < 1.0 ? openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR : openni::IMAGE_REGISTRATION_OFF; if( !device.getImageRegistrationMode() == mode ) { if (device.isImageRegistrationModeSupported(mode)) From 75c99d65a5508283cbf85c77ea355f87d0087434 Mon Sep 17 00:00:00 2001 From: Josep Bosch Date: Fri, 9 Jan 2015 10:05:36 +0100 Subject: [PATCH 02/98] Fisheye calibration methods available now in python --- modules/calib3d/include/opencv2/calib3d.hpp | 20 ++++++++++---------- modules/calib3d/src/fisheye.cpp | 16 ++++++++++++++-- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index 7b6a4db7c..d059eed94 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -1651,7 +1651,7 @@ namespace fisheye InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray()); /** @overload */ - CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec, + CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec, InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray()); /** @brief Distorts 2D points using fisheye model. @@ -1663,7 +1663,7 @@ namespace fisheye @param alpha The skew coefficient. @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\ . */ - CV_EXPORTS void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0); + CV_EXPORTS_W void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0); /** @brief Undistorts 2D points using fisheye model @@ -1676,7 +1676,7 @@ namespace fisheye @param P New camera matrix (3x3) or new projection matrix (3x4) @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\ . */ - CV_EXPORTS void undistortPoints(InputArray distorted, OutputArray undistorted, + CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray()); /** @brief Computes undistortion and rectification maps for image transform by cv::remap(). If D is empty zero @@ -1693,7 +1693,7 @@ namespace fisheye @param map1 The first output map. @param map2 The second output map. */ - CV_EXPORTS void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P, + CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P, const cv::Size& size, int m1type, OutputArray map1, OutputArray map2); /** @brief Transforms an image to compensate for fisheye lens distortion. @@ -1724,7 +1724,7 @@ namespace fisheye ![image](pics/fisheye_undistorted.jpg) */ - CV_EXPORTS void undistortImage(InputArray distorted, OutputArray undistorted, + CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size()); /** @brief Estimates new camera matrix for undistortion or rectification. @@ -1740,7 +1740,7 @@ namespace fisheye @param new_size @param fov_scale Divisor for new focal length. */ - CV_EXPORTS void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R, + CV_EXPORTS_W void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R, OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0); /** @brief Performs camera calibaration @@ -1774,7 +1774,7 @@ namespace fisheye zero. @param criteria Termination criteria for the iterative optimization algorithm. */ - CV_EXPORTS double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size, + CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size, InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)); @@ -1808,7 +1808,7 @@ namespace fisheye length. Balance is in range of [0, 1]. @param fov_scale Divisor for new focal length. */ - CV_EXPORTS void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec, + CV_EXPORTS_W void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(), double balance = 0.0, double fov_scale = 1.0); @@ -1844,9 +1844,9 @@ namespace fisheye zero. @param criteria Termination criteria for the iterative optimization algorithm. */ - CV_EXPORTS double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, + CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize, - OutputArray R, OutputArray T, int flags = CALIB_FIX_INTRINSIC, + OutputArray R, OutputArray T, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)); //! @} calib3d_fisheye diff --git a/modules/calib3d/src/fisheye.cpp b/modules/calib3d/src/fisheye.cpp index ce4144bbb..b0e92a3ad 100644 --- a/modules/calib3d/src/fisheye.cpp +++ b/modules/calib3d/src/fisheye.cpp @@ -794,8 +794,20 @@ double cv::fisheye::calibrate(InputArrayOfArrays objectPoints, InputArrayOfArray if (K.needed()) cv::Mat(_K).convertTo(K, K.empty() ? CV_64FC1 : K.type()); if (D.needed()) cv::Mat(finalParam.k).convertTo(D, D.empty() ? CV_64FC1 : D.type()); - if (rvecs.needed()) cv::Mat(omc).convertTo(rvecs, rvecs.empty() ? CV_64FC3 : rvecs.type()); - if (tvecs.needed()) cv::Mat(Tc).convertTo(tvecs, tvecs.empty() ? CV_64FC3 : tvecs.type()); + if (rvecs.kind()==_InputArray::STD_VECTOR_MAT) + { + int i; + for( i = 0; i < (int)objectPoints.total(); i++ ) + { + rvecs.getMat(i)=omc[i]; + tvecs.getMat(i)=Tc[i]; + } + } + else + { + if (rvecs.needed()) cv::Mat(omc).convertTo(rvecs, rvecs.empty() ? CV_64FC3 : rvecs.type()); + if (tvecs.needed()) cv::Mat(Tc).convertTo(tvecs, tvecs.empty() ? CV_64FC3 : tvecs.type()); + } return rms; } From 8d48632ebef60e7c4d92b5c9d6549f8e1623010a Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:28 +0300 Subject: [PATCH 03/98] avx2 --- CMakeLists.txt | 1 + cmake/OpenCVCompilerOptions.cmake | 3 + modules/core/include/opencv2/core/cvdef.h | 11 +++- modules/core/src/convert.cpp | 72 +++++++++++------------ modules/core/src/precomp.hpp | 1 + modules/core/src/system.cpp | 53 ++++++++++++++++- modules/ts/src/ts_func.cpp | 3 + 7 files changed, 105 insertions(+), 39 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 75fcf9659..7b5648efd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -221,6 +221,7 @@ OCV_OPTION(ENABLE_SSSE3 "Enable SSSE3 instructions" OCV_OPTION(ENABLE_SSE41 "Enable SSE4.1 instructions" OFF IF ((CV_ICC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_AVX2 "Enable AVX2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_NEON "Enable NEON instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) ) OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) ) OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF ) diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index 2f9068c60..831026fb5 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -140,6 +140,9 @@ if(CMAKE_COMPILER_IS_GNUCXX) if(ENABLE_AVX) add_extra_compiler_option(-mavx) endif() + if(ENABLE_AVX2) + add_extra_compiler_option(-mavx2) + endif() # GCC depresses SSEx instructions when -mavx is used. Instead, it generates new AVX instructions or AVX equivalence for all SSEx instructions when needed. if(NOT OPENCV_EXTRA_CXX_FLAGS MATCHES "-mavx") diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index 06894d7a5..c52cb021c 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -114,7 +114,8 @@ #define CV_CPU_SSE4_2 7 #define CV_CPU_POPCNT 8 #define CV_CPU_AVX 10 -#define CV_CPU_NEON 11 +#define CV_CPU_AVX2 11 +#define CV_CPU_NEON 12 // when adding to this list remember to update the enum in core/utility.cpp #define CV_HARDWARE_MAX_FEATURE 255 @@ -141,7 +142,7 @@ # include # define CV_SSE4_2 1 # endif -# if defined __AVX__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) +# if defined __AVX__ || defined __AVX2__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) // MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX // See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32 # include @@ -150,6 +151,9 @@ # define __xgetbv() _xgetbv(_XCR_XFEATURE_ENABLED_MASK) # else # define __xgetbv() 0 +# ifdef __AVX2__ +# define CV_AVX2 1 +# endif # endif # endif #endif @@ -187,6 +191,9 @@ #ifndef CV_AVX # define CV_AVX 0 #endif +#ifndef CV_AVX2 +# define CV_AVX2 0 +#endif #ifndef CV_NEON # define CV_NEON 0 #endif diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 829b984c9..55f08f1bd 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -2294,26 +2294,44 @@ cvtScale_( const short* src, size_t sstep, { int x = 0; - #if CV_SSE2 - if(USE_SSE2)//~5X + #if CV_AVX2 + if (USE_AVX2) + { + __m256 scale256 = _mm256_set1_ps (scale); + __m256 shift256 = _mm256_set1_ps (shift); + __m256i zero = _mm256_setzero_si256(); + for ( ; x <= size.width - 16; x += 16) { - __m128 scale128 = _mm_set1_ps (scale); - __m128 shift128 = _mm_set1_ps (shift); - for(; x <= size.width - 8; x += 8 ) - { - __m128i r0 = _mm_loadl_epi64((const __m128i*)(src + x)); - __m128i r1 = _mm_loadl_epi64((const __m128i*)(src + x + 4)); - __m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16)); - __m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r1, r1), 16)); - rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128); - rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128); - r0 = _mm_cvtps_epi32(rf0); - r1 = _mm_cvtps_epi32(rf1); - - _mm_storeu_si128((__m128i*)(dst + x), r0); - _mm_storeu_si128((__m128i*)(dst + x + 4), r1); - } + __m256i v_src = _mm256_loadu_si256((__m256i const *)(src + x)); + __m256i v_src_lo = _mm256_unpacklo_epi16(v_src, zero); + __m256i v_src_hi = _mm256_unpackhi_epi16(v_src, zero); + __m256 v_dst0 = _mm256_add_ps( _mm256_mul_ps(_mm256_cvtepi32_ps (v_src_lo), scale256), shift256); + __m256 v_dst1 = _mm256_add_ps( _mm256_mul_ps(_mm256_cvtepi32_ps (v_src_hi), scale256), shift256); + _mm256_storeu_si256 ((__m256i *)(dst + x), _mm256_cvtps_epi32(v_dst0)); + _mm256_storeu_si256 ((__m256i *)(dst + x + 8), _mm256_cvtps_epi32(v_dst1)); } + } + #endif + #if CV_SSE2 + if (USE_SSE2)//~5X + { + __m128 scale128 = _mm_set1_ps (scale); + __m128 shift128 = _mm_set1_ps (shift); + for(; x <= size.width - 8; x += 8 ) + { + __m128i r0 = _mm_loadl_epi64((const __m128i*)(src + x)); + __m128i r1 = _mm_loadl_epi64((const __m128i*)(src + x + 4)); + __m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16)); + __m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r1, r1), 16)); + rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128); + rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128); + r0 = _mm_cvtps_epi32(rf0); + r1 = _mm_cvtps_epi32(rf1); + + _mm_storeu_si128((__m128i*)(dst + x), r0); + _mm_storeu_si128((__m128i*)(dst + x + 4), r1); + } + } #elif CV_NEON float32x4_t v_shift = vdupq_n_f32(shift); for(; x <= size.width - 8; x += 8 ) @@ -2330,24 +2348,6 @@ cvtScale_( const short* src, size_t sstep, } #endif - //We will wait Haswell - /* - #if CV_AVX - if(USE_AVX)//2X - bad variant - { - ////TODO:AVX implementation (optimization?) required - __m256 scale256 = _mm256_set1_ps (scale); - __m256 shift256 = _mm256_set1_ps (shift); - for(; x <= size.width - 8; x += 8 ) - { - __m256i buf = _mm256_set_epi32((int)(*(src+x+7)),(int)(*(src+x+6)),(int)(*(src+x+5)),(int)(*(src+x+4)),(int)(*(src+x+3)),(int)(*(src+x+2)),(int)(*(src+x+1)),(int)(*(src+x))); - __m256 r0 = _mm256_add_ps( _mm256_mul_ps(_mm256_cvtepi32_ps (buf), scale256), shift256); - __m256i res = _mm256_cvtps_epi32(r0); - _mm256_storeu_si256 ((__m256i*)(dst+x), res); - } - } - #endif*/ - for(; x < size.width; x++ ) dst[x] = saturate_cast(src[x]*scale + shift); } diff --git a/modules/core/src/precomp.hpp b/modules/core/src/precomp.hpp index ef154400e..0f85cc556 100644 --- a/modules/core/src/precomp.hpp +++ b/modules/core/src/precomp.hpp @@ -192,6 +192,7 @@ struct NoVec extern volatile bool USE_SSE2; extern volatile bool USE_SSE4_2; extern volatile bool USE_AVX; +extern volatile bool USE_AVX2; enum { BLOCK_SIZE = 1024 }; diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index d9a20873f..11bbab3a2 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -82,6 +82,22 @@ pop ebx } } + static void __cpuidex(int* cpuid_data, int, int) + { + __asm + { + push edi + mov edi, cpuid_data + mov eax, 7 + mov ecx, 0 + cpuid + mov [edi], eax + mov [edi + 4], ebx + mov [edi + 8], ecx + mov [edi + 12], edx + pop edi + } + } #endif #endif @@ -203,7 +219,7 @@ struct HWFeatures enum { MAX_FEATURE = CV_HARDWARE_MAX_FEATURE }; HWFeatures(void) - { + { memset( have, 0, sizeof(have) ); x86_family = 0; } @@ -251,6 +267,40 @@ struct HWFeatures f.have[CV_CPU_SSE4_2] = (cpuid_data[2] & (1<<20)) != 0; f.have[CV_CPU_POPCNT] = (cpuid_data[2] & (1<<23)) != 0; f.have[CV_CPU_AVX] = (((cpuid_data[2] & (1<<28)) != 0)&&((cpuid_data[2] & (1<<27)) != 0));//OS uses XSAVE_XRSTORE and CPU support AVX + + // make the second call to the cpuid command in order to get + // information about extended features like AVX2 + #if defined _MSC_VER && (defined _M_IX86 || defined _M_X64) + __cpuidex(cpuid_data, 7, 0); + #elif defined __GNUC__ && (defined __i386__ || defined __x86_64__) + #ifdef __x86_64__ + asm __volatile__ + ( + "movl $7, %%eax\n\t" + "movl $0, %%ecx\n\t" + "cpuid\n\t" + :[eax]"=a"(cpuid_data[0]),[ebx]"=b"(cpuid_data[1]),[ecx]"=c"(cpuid_data[2]),[edx]"=d"(cpuid_data[3]) + : + : "cc" + ); + #else + asm volatile + ( + "pushl %%eax\n\t" + "pushl %%edx\n\t" + "movl $7,%%eax\n\t" + "movl $0,%%ecx\n\t" + "cpuid\n\t" + "popl %%edx\n\t" + "popl %%eax\n\t" + : "=b"(cpuid_data[1]), "=c"(cpuid_data[2]) + : + : "cc" + ); + #endif + #endif + f.have[CV_CPU_AVX2] = (cpuid_data[1] & (1<<5)) != 0; + } return f; @@ -290,6 +340,7 @@ IPPInitializer ippInitializer; volatile bool USE_SSE2 = featuresEnabled.have[CV_CPU_SSE2]; volatile bool USE_SSE4_2 = featuresEnabled.have[CV_CPU_SSE4_2]; volatile bool USE_AVX = featuresEnabled.have[CV_CPU_AVX]; +volatile bool USE_AVX2 = featuresEnabled.have[CV_CPU_AVX2]; void setUseOptimized( bool flag ) { diff --git a/modules/ts/src/ts_func.cpp b/modules/ts/src/ts_func.cpp index 7745c86c5..53b62e74d 100644 --- a/modules/ts/src/ts_func.cpp +++ b/modules/ts/src/ts_func.cpp @@ -3019,6 +3019,9 @@ void printVersionInfo(bool useStdOut) #if CV_AVX if (checkHardwareSupport(CV_CPU_AVX)) cpu_features += " avx"; #endif +#if CV_AVX2 + if (checkHardwareSupport(CV_CPU_AVX2)) cpu_features += " avx2"; +#endif #if CV_NEON cpu_features += " neon"; // NEON is currently not checked at runtime #endif From a2a8ba17fcba6d33fec1f78d53d3e7d2f3531181 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:28 +0300 Subject: [PATCH 04/98] compare --- modules/core/src/arithm.cpp | 124 ++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 68c8979a8..a9bf3d7e7 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -3268,6 +3268,130 @@ struct Cmp_SIMD uint8x8_t v_mask; }; +#elif CV_SSE2 + +template <> +struct Cmp_SIMD +{ + explicit Cmp_SIMD(int code_) : + code(code_) + { + CV_Assert(code == CMP_GT || code == CMP_LE || + code == CMP_EQ || code == CMP_NE); + + haveSSE = checkHardwareSupport(CV_CPU_SSE2); + + v_mask = _mm_set1_epi8(0xff); + } + + int operator () (const schar * src1, const schar * src2, uchar * dst, int width) const + { + int x = 0; + + if (!haveSSE) + return x; + + if (code == CMP_GT) + for ( ; x <= width - 16; x += 16) + _mm_storeu_si128((__m128i *)(dst + x), _mm_cmpgt_epi8(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x)))); + else if (code == CMP_LE) + for ( ; x <= width - 16; x += 16) + { + __m128i v_gt = _mm_cmpgt_epi8(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + _mm_storeu_si128((__m128i *)(dst + x), _mm_xor_si128(v_mask, v_gt)); + } + else if (code == CMP_EQ) + for ( ; x <= width - 16; x += 16) + _mm_storeu_si128((__m128i *)(dst + x), _mm_cmpeq_epi8(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x)))); + else if (code == CMP_NE) + for ( ; x <= width - 16; x += 16) + { + __m128i v_eq = _mm_cmpeq_epi8(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + _mm_storeu_si128((__m128i *)(dst + x), _mm_xor_si128(v_mask, v_eq)); + } + + return x; + } + + int code; + __m128i v_mask; + bool haveSSE; +}; + +template <> +struct Cmp_SIMD +{ + explicit Cmp_SIMD(int code_) : + code(code_) + { + CV_Assert(code == CMP_GT || code == CMP_LE || + code == CMP_EQ || code == CMP_NE); + + haveSSE = checkHardwareSupport(CV_CPU_SSE2); + + v_mask = _mm_set1_epi32(0xffffffff); + } + + int operator () (const int * src1, const int * src2, uchar * dst, int width) const + { + int x = 0; + + if (!haveSSE) + return x; + + if (code == CMP_GT) + for ( ; x <= width - 8; x += 8) + { + __m128i v_dst0 = _mm_cmpgt_epi32(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + __m128i v_dst1 = _mm_cmpgt_epi32(_mm_loadu_si128((const __m128i *)(src1 + x + 4)), + _mm_loadu_si128((const __m128i *)(src2 + x + 4))); + + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_mask)); + } + else if (code == CMP_LE) + for ( ; x <= width - 8; x += 8) + { + __m128i v_dst0 = _mm_cmpgt_epi32(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + __m128i v_dst1 = _mm_cmpgt_epi32(_mm_loadu_si128((const __m128i *)(src1 + x + 4)), + _mm_loadu_si128((const __m128i *)(src2 + x + 4))); + + _mm_storel_epi64((__m128i *)(dst + x), _mm_xor_si128(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_mask), v_mask)); + } + else if (code == CMP_EQ) + for ( ; x <= width - 8; x += 8) + { + __m128i v_dst0 = _mm_cmpeq_epi32(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + __m128i v_dst1 = _mm_cmpeq_epi32(_mm_loadu_si128((const __m128i *)(src1 + x + 4)), + _mm_loadu_si128((const __m128i *)(src2 + x + 4))); + + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_mask)); + } + else if (code == CMP_NE) + for ( ; x <= width - 8; x += 8) + { + __m128i v_dst0 = _mm_cmpeq_epi32(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + __m128i v_dst1 = _mm_cmpeq_epi32(_mm_loadu_si128((const __m128i *)(src1 + x + 4)), + _mm_loadu_si128((const __m128i *)(src2 + x + 4))); + + _mm_storel_epi64((__m128i *)(dst + x), _mm_xor_si128(v_mask, _mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_mask))); + } + + return x; + } + + int code; + __m128i v_mask; + bool haveSSE; +}; + #endif template static void From 6ab928fb396d095ce459d106eded6cf8979d91ef Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:28 +0300 Subject: [PATCH 05/98] phase 64f --- modules/core/src/mathfuncs.cpp | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/modules/core/src/mathfuncs.cpp b/modules/core/src/mathfuncs.cpp index 1c045f3fa..b22526ccb 100644 --- a/modules/core/src/mathfuncs.cpp +++ b/modules/core/src/mathfuncs.cpp @@ -593,14 +593,40 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre { const double *x = (const double*)ptrs[0], *y = (const double*)ptrs[1]; double *angle = (double*)ptrs[2]; - for( k = 0; k < len; k++ ) + k = 0; + +#if CV_SSE2 + for ( ; k <= len - 4; k += 4) + { + __m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)), + _mm_cvtpd_ps(_mm_loadu_pd(x + k + 2))); + __m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)), + _mm_cvtpd_ps(_mm_loadu_pd(y + k + 2))); + + _mm_storeu_ps(buf[0] + k, v_dst0); + _mm_storeu_ps(buf[1] + k, v_dst1); + } +#endif + + for( ; k < len; k++ ) { buf[0][k] = (float)x[k]; buf[1][k] = (float)y[k]; } FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees ); - for( k = 0; k < len; k++ ) + k = 0; + +#if CV_SSE2 + for ( ; k <= len - 4; k += 4) + { + __m128 v_src = _mm_loadu_ps(buf[0] + k); + _mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src)); + _mm_storeu_pd(angle + k, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); + } +#endif + + for( ; k < len; k++ ) angle[k] = buf[0][k]; } ptrs[0] += len*esz1; From 0a5c9cf145707669b49adeff50a9d6f335a8de1e Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:28 +0300 Subject: [PATCH 06/98] magnitude 64f --- modules/core/src/mathfuncs.cpp | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/modules/core/src/mathfuncs.cpp b/modules/core/src/mathfuncs.cpp index b22526ccb..5c83e62ea 100644 --- a/modules/core/src/mathfuncs.cpp +++ b/modules/core/src/mathfuncs.cpp @@ -724,14 +724,40 @@ void cartToPolar( InputArray src1, InputArray src2, double *angle = (double*)ptrs[3]; Magnitude_64f(x, y, (double*)ptrs[2], len); - for( k = 0; k < len; k++ ) + k = 0; + +#if CV_SSE2 + for ( ; k <= len - 4; k += 4) + { + __m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)), + _mm_cvtpd_ps(_mm_loadu_pd(x + k + 2))); + __m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)), + _mm_cvtpd_ps(_mm_loadu_pd(y + k + 2))); + + _mm_storeu_ps(buf[0] + k, v_dst0); + _mm_storeu_ps(buf[1] + k, v_dst1); + } +#endif + + for( ; k < len; k++ ) { buf[0][k] = (float)x[k]; buf[1][k] = (float)y[k]; } FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees ); - for( k = 0; k < len; k++ ) + k = 0; + +#if CV_SSE2 + for ( ; k <= len - 4; k += 4) + { + __m128 v_src = _mm_loadu_ps(buf[0] + k); + _mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src)); + _mm_storeu_pd(angle + k, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); + } +#endif + + for( ; k < len; k++ ) angle[k] = buf[0][k]; } ptrs[0] += len*esz1; From 972ff1d0c438ac83f4ef06cbc5b80f86fbdb2ae7 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:28 +0300 Subject: [PATCH 07/98] polarToCart --- modules/core/src/mathfuncs.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/modules/core/src/mathfuncs.cpp b/modules/core/src/mathfuncs.cpp index 5c83e62ea..cb574e6cc 100644 --- a/modules/core/src/mathfuncs.cpp +++ b/modules/core/src/mathfuncs.cpp @@ -966,6 +966,13 @@ void polarToCart( InputArray src1, InputArray src2, vst1q_f32(x + k, vmulq_f32(vld1q_f32(x + k), v_m)); vst1q_f32(y + k, vmulq_f32(vld1q_f32(y + k), v_m)); } + #elif CV_SSE2 + for( ; k <= len - 4; k += 4 ) + { + __m128 v_m = _mm_loadu_ps(mag + k); + _mm_storeu_ps(x + k, _mm_mul_ps(_mm_loadu_ps(x + k), v_m)); + _mm_storeu_ps(y + k, _mm_mul_ps(_mm_loadu_ps(y + k), v_m)); + } #endif for( ; k < len; k++ ) From 55780889834f765f83271241f402272dd406143b Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:28 +0300 Subject: [PATCH 08/98] countNonZero --- modules/core/src/stat.cpp | 125 ++++++++++++++++++++++++++++++++------ 1 file changed, 106 insertions(+), 19 deletions(-) diff --git a/modules/core/src/stat.cpp b/modules/core/src/stat.cpp index ca56a7c96..530e3205b 100644 --- a/modules/core/src/stat.cpp +++ b/modules/core/src/stat.cpp @@ -396,6 +396,27 @@ static int countNonZero_(const T* src, int len ) return nz; } +static const uchar * initPopcountTable() +{ + static uchar tab[256]; + static volatile bool initialized = false; + if( !initialized ) + { + // we compute inverse popcount table, + // since we pass (img[x] == 0) mask as index in the table. + for( int j = 0; j < 256; j++ ) + { + int val = 0; + for( int mask = 1; mask < 256; mask += mask ) + val += (j & mask) == 0; + tab[j] = (uchar)val; + } + initialized = true; + } + + return tab; +} + static int countNonZero8u( const uchar* src, int len ) { int i=0, nz = 0; @@ -403,21 +424,7 @@ static int countNonZero8u( const uchar* src, int len ) if(USE_SSE2)//5x-6x { __m128i pattern = _mm_setzero_si128 (); - static uchar tab[256]; - static volatile bool initialized = false; - if( !initialized ) - { - // we compute inverse popcount table, - // since we pass (img[x] == 0) mask as index in the table. - for( int j = 0; j < 256; j++ ) - { - int val = 0; - for( int mask = 1; mask < 256; mask += mask ) - val += (j & mask) == 0; - tab[j] = (uchar)val; - } - initialized = true; - } + static const uchar * tab = initPopcountTable(); for (; i<=len-16; i+=16) { @@ -467,7 +474,22 @@ static int countNonZero8u( const uchar* src, int len ) static int countNonZero16u( const ushort* src, int len ) { int i = 0, nz = 0; -#if CV_NEON +#if CV_SSE2 + if (USE_SSE2) + { + __m128i v_zero = _mm_setzero_si128 (); + static const uchar * tab = initPopcountTable(); + + for ( ; i <= len - 8; i += 8) + { + __m128i v_src = _mm_loadu_si128((const __m128i*)(src + i)); + int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_cmpeq_epi16(v_src, v_zero), v_zero)); + nz += tab[val]; + } + + src += i; + } +#elif CV_NEON int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6; uint32x4_t v_nz = vdupq_n_u32(0u); uint16x8_t v_zero = vdupq_n_u16(0), v_1 = vdupq_n_u16(1); @@ -503,7 +525,27 @@ static int countNonZero16u( const ushort* src, int len ) static int countNonZero32s( const int* src, int len ) { int i = 0, nz = 0; -#if CV_NEON +#if CV_SSE2 + if (USE_SSE2) + { + __m128i v_zero = _mm_setzero_si128 (); + static const uchar * tab = initPopcountTable(); + + for ( ; i <= len - 8; i += 8) + { + __m128i v_src = _mm_loadu_si128((const __m128i*)(src + i)); + __m128i v_dst0 = _mm_cmpeq_epi32(v_src, v_zero); + + v_src = _mm_loadu_si128((const __m128i*)(src + i + 4)); + __m128i v_dst1 = _mm_cmpeq_epi32(v_src, v_zero); + + int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero)); + nz += tab[val]; + } + + src += i; + } +#elif CV_NEON int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6; uint32x4_t v_nz = vdupq_n_u32(0u); int32x4_t v_zero = vdupq_n_s32(0.0f); @@ -541,7 +583,25 @@ static int countNonZero32s( const int* src, int len ) static int countNonZero32f( const float* src, int len ) { int i = 0, nz = 0; -#if CV_NEON +#if CV_SSE2 + if (USE_SSE2) + { + __m128i v_zero_i = _mm_setzero_si128(); + __m128 v_zero_f = _mm_setzero_ps(); + static const uchar * tab = initPopcountTable(); + + for ( ; i <= len - 8; i += 8) + { + __m128i v_dst0 = _mm_castps_si128(_mm_cmpeq_ps(_mm_loadu_ps(src + i), v_zero_f)); + __m128i v_dst1 = _mm_castps_si128(_mm_cmpeq_ps(_mm_loadu_ps(src + i + 4), v_zero_f)); + + int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero_i)); + nz += tab[val]; + } + + src += i; + } +#elif CV_NEON int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6; uint32x4_t v_nz = vdupq_n_u32(0u); float32x4_t v_zero = vdupq_n_f32(0.0f); @@ -577,7 +637,34 @@ static int countNonZero32f( const float* src, int len ) } static int countNonZero64f( const double* src, int len ) -{ return countNonZero_(src, len); } +{ + int i = 0, nz = 0; +#if CV_SSE2 + if (USE_SSE2) + { + __m128i v_zero_i = _mm_setzero_si128(); + __m128d v_zero_d = _mm_setzero_pd(); + static const uchar * tab = initPopcountTable(); + + for ( ; i <= len - 8; i += 8) + { + __m128i v_dst0 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i), v_zero_d)); + __m128i v_dst1 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 2), v_zero_d)); + __m128i v_dst2 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 4), v_zero_d)); + __m128i v_dst3 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 6), v_zero_d)); + + v_dst0 = _mm_packs_epi32(v_dst0, v_dst1); + v_dst1 = _mm_packs_epi32(v_dst2, v_dst3); + + int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero_i)); + nz += tab[val]; + } + + src += i; + } +#endif + return nz + countNonZero_(src, len - i); +} typedef int (*CountNonZeroFunc)(const uchar*, int); From 3a78a2273331eb8ac319b55fdda760e886bfd574 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 09/98] convertScaleAbs for s8, f64 --- modules/core/src/convert.cpp | 80 +++++++++++++++++++++++++++ modules/core/src/mathfuncs.cpp | 4 +- modules/core/test/ocl/test_arithm.cpp | 2 +- 3 files changed, 83 insertions(+), 3 deletions(-) diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 55f08f1bd..865ec3dc2 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -1123,6 +1123,48 @@ struct cvtScaleAbs_SIMD } }; +template <> +struct cvtScaleAbs_SIMD +{ + int operator () (const schar * src, uchar * dst, int width, + float scale, float shift) const + { + int x = 0; + + if (USE_SSE2) + { + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift), + v_zero_f = _mm_setzero_ps(); + __m128i v_zero_i = _mm_setzero_si128(); + + for ( ; x <= width - 16; x += 16) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(src + x)); + __m128i v_src_12 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero_i, v_src), 8), + v_src_34 = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero_i, v_src), 8); + __m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( + _mm_srai_epi32(_mm_unpacklo_epi16(v_zero_i, v_src_12), 16)), v_scale), v_shift); + v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1); + __m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( + _mm_srai_epi32(_mm_unpackhi_epi16(v_zero_i, v_src_12), 16)), v_scale), v_shift); + v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2); + __m128 v_dst3 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( + _mm_srai_epi32(_mm_unpacklo_epi16(v_zero_i, v_src_34), 16)), v_scale), v_shift); + v_dst3 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst3), v_dst3); + __m128 v_dst4 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( + _mm_srai_epi32(_mm_unpackhi_epi16(v_zero_i, v_src_34), 16)), v_scale), v_shift); + v_dst4 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst4), v_dst4); + + __m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)), + _mm_packs_epi32(_mm_cvtps_epi32(v_dst3), _mm_cvtps_epi32(v_dst4))); + _mm_storeu_si128((__m128i *)(dst + x), v_dst_i); + } + } + + return x; + } +}; + template <> struct cvtScaleAbs_SIMD { @@ -1242,6 +1284,44 @@ struct cvtScaleAbs_SIMD } }; +template <> +struct cvtScaleAbs_SIMD +{ + int operator () (const double * src, uchar * dst, int width, + float scale, float shift) const + { + int x = 0; + + if (USE_SSE2) + { + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift), + v_zero_f = _mm_setzero_ps(); + __m128i v_zero_i = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2))); + __m128 v_src2 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6))); + + __m128 v_dst1 = _mm_add_ps(_mm_mul_ps(v_src1, v_scale), v_shift); + v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1); + + __m128 v_dst2 = _mm_add_ps(_mm_mul_ps(v_src2, v_scale), v_shift); + v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2); + + __m128i v_dst_i = _mm_packs_epi32(_mm_cvtps_epi32(v_dst1), + _mm_cvtps_epi32(v_dst2)); + + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst_i, v_zero_i)); + } + } + + return x; + } +}; + #elif CV_NEON template <> diff --git a/modules/core/src/mathfuncs.cpp b/modules/core/src/mathfuncs.cpp index cb574e6cc..d3d09c338 100644 --- a/modules/core/src/mathfuncs.cpp +++ b/modules/core/src/mathfuncs.cpp @@ -622,7 +622,7 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre { __m128 v_src = _mm_loadu_ps(buf[0] + k); _mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src)); - _mm_storeu_pd(angle + k, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); + _mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); } #endif @@ -753,7 +753,7 @@ void cartToPolar( InputArray src1, InputArray src2, { __m128 v_src = _mm_loadu_ps(buf[0] + k); _mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src)); - _mm_storeu_pd(angle + k, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); + _mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); } #endif diff --git a/modules/core/test/ocl/test_arithm.cpp b/modules/core/test/ocl/test_arithm.cpp index d0d3847be..0541819f8 100644 --- a/modules/core/test/ocl/test_arithm.cpp +++ b/modules/core/test/ocl/test_arithm.cpp @@ -1577,7 +1577,7 @@ PARAM_TEST_CASE(ConvertScaleAbs, MatDepth, Channels, bool) Size roiSize = randomSize(1, MAX_VALUE); Border srcBorder = randomBorder(0, use_roi ? MAX_VALUE : 0); - randomSubMat(src, src_roi, roiSize, srcBorder, stype, 2, 11); // FIXIT: Test with minV, maxV + randomSubMat(src, src_roi, roiSize, srcBorder, stype, -11, 11); // FIXIT: Test with minV, maxV Border dstBorder = randomBorder(0, use_roi ? MAX_VALUE : 0); randomSubMat(dst, dst_roi, roiSize, dstBorder, dtype, 5, 16); From b758dbd38493045b8c72cd1d1c6346f4e9edf5d4 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 10/98] convertTo AVX2 --- modules/core/src/convert.cpp | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 865ec3dc2..ffc2d744e 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -2377,18 +2377,20 @@ cvtScale_( const short* src, size_t sstep, #if CV_AVX2 if (USE_AVX2) { - __m256 scale256 = _mm256_set1_ps (scale); - __m256 shift256 = _mm256_set1_ps (shift); - __m256i zero = _mm256_setzero_si256(); + __m256 scale256 = _mm256_set1_ps(scale); + __m256 shift256 = _mm256_set1_ps(shift); + int shuffle = 0xD8; + for ( ; x <= size.width - 16; x += 16) { - __m256i v_src = _mm256_loadu_si256((__m256i const *)(src + x)); - __m256i v_src_lo = _mm256_unpacklo_epi16(v_src, zero); - __m256i v_src_hi = _mm256_unpackhi_epi16(v_src, zero); - __m256 v_dst0 = _mm256_add_ps( _mm256_mul_ps(_mm256_cvtepi32_ps (v_src_lo), scale256), shift256); - __m256 v_dst1 = _mm256_add_ps( _mm256_mul_ps(_mm256_cvtepi32_ps (v_src_hi), scale256), shift256); - _mm256_storeu_si256 ((__m256i *)(dst + x), _mm256_cvtps_epi32(v_dst0)); - _mm256_storeu_si256 ((__m256i *)(dst + x + 8), _mm256_cvtps_epi32(v_dst1)); + __m256i v_src = _mm256_loadu_si256((const __m256i *)(src + x)); + v_src = _mm256_permute4x64_epi64(v_src, shuffle); + __m256i v_src_lo = _mm256_srai_epi32(_mm256_unpacklo_epi16(v_src, v_src), 16); + __m256i v_src_hi = _mm256_srai_epi32(_mm256_unpackhi_epi16(v_src, v_src), 16); + __m256 v_dst0 = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(v_src_lo), scale256), shift256); + __m256 v_dst1 = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(v_src_hi), scale256), shift256); + _mm256_storeu_si256((__m256i *)(dst + x), _mm256_cvtps_epi32(v_dst0)); + _mm256_storeu_si256((__m256i *)(dst + x + 8), _mm256_cvtps_epi32(v_dst1)); } } #endif @@ -2399,17 +2401,15 @@ cvtScale_( const short* src, size_t sstep, __m128 shift128 = _mm_set1_ps (shift); for(; x <= size.width - 8; x += 8 ) { - __m128i r0 = _mm_loadl_epi64((const __m128i*)(src + x)); - __m128i r1 = _mm_loadl_epi64((const __m128i*)(src + x + 4)); + __m128i r0 = _mm_loadu_si128((const __m128i*)(src + x)); + __m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16)); - __m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r1, r1), 16)); + __m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(r0, r0), 16)); rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128); rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128); - r0 = _mm_cvtps_epi32(rf0); - r1 = _mm_cvtps_epi32(rf1); - _mm_storeu_si128((__m128i*)(dst + x), r0); - _mm_storeu_si128((__m128i*)(dst + x + 4), r1); + _mm_storeu_si128((__m128i*)(dst + x), _mm_cvtps_epi32(rf0)); + _mm_storeu_si128((__m128i*)(dst + x + 4), _mm_cvtps_epi32(rf1)); } } #elif CV_NEON From 19e77e47876c2e04086841f1d79c1d46acde009b Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 11/98] convertTo from 8u --- modules/core/perf/perf_convertTo.cpp | 4 +- modules/core/src/convert.cpp | 234 ++++++++++++++++++++++++++- 2 files changed, 235 insertions(+), 3 deletions(-) diff --git a/modules/core/perf/perf_convertTo.cpp b/modules/core/perf/perf_convertTo.cpp index 800736122..7892a7642 100644 --- a/modules/core/perf/perf_convertTo.cpp +++ b/modules/core/perf/perf_convertTo.cpp @@ -13,8 +13,8 @@ PERF_TEST_P( Size_DepthSrc_DepthDst_Channels_alpha, convertTo, testing::Combine ( testing::Values(szVGA, sz1080p), - testing::Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F), - testing::Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F), + testing::Values(CV_8U), + testing::Values(CV_16U), testing::Values(1, 4), testing::Values(1.0, 1./255) ) diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index ffc2d744e..a92ff47e1 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -1569,7 +1569,239 @@ struct cvtScale_SIMD } }; -#if CV_NEON +#if CV_SSE2 + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const uchar * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, double * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); + _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); + + _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); + _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + } + + return x; + } +}; + +#elif CV_NEON // from uchar From 116fb275a8b0ca268dc7a1bab5b3abb116507bc8 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 12/98] convertTo from 8s --- modules/core/src/convert.cpp | 234 +++++++++++++++++++++++++++++++++++ 1 file changed, 234 insertions(+) diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index a92ff47e1..5dae935cc 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -1571,6 +1571,8 @@ struct cvtScale_SIMD #if CV_SSE2 +// from uchar + template <> struct cvtScale_SIMD { @@ -1801,6 +1803,238 @@ struct cvtScale_SIMD } }; +// from schar + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const schar * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, double * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); + _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); + + _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); + _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + } + + return x; + } +}; + #elif CV_NEON // from uchar From 8870ef4159462fde2f733bfd90eb61ebc025b5d1 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 13/98] convertTo from 16u, 16s, 32s, 32f --- modules/core/src/convert.cpp | 904 +++++++++++++++++++++++++++++++++++ 1 file changed, 904 insertions(+) diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 5dae935cc..5ef238e46 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -2035,6 +2035,910 @@ struct cvtScale_SIMD } }; +// from ushort + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const ushort * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, double * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); + _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); + + _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); + _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + } + + return x; + } +}; + +// from short + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const short * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, double * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); + _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); + + _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); + _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + } + + return x; + } +}; + +// from int + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const int * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, double * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); + _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); + + _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); + _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + } + + return x; + } +}; + +// from float + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const float * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, double * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); + _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); + + _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); + _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + } + + return x; + } +}; + #elif CV_NEON // from uchar From 63fc6ef316d35ae3ac851aba5b615f4170cbcc98 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 14/98] convertTo from 64f --- modules/core/perf/perf_convertTo.cpp | 4 +- modules/core/src/convert.cpp | 419 ++++++++++++++++++++------- 2 files changed, 314 insertions(+), 109 deletions(-) diff --git a/modules/core/perf/perf_convertTo.cpp b/modules/core/perf/perf_convertTo.cpp index 7892a7642..800736122 100644 --- a/modules/core/perf/perf_convertTo.cpp +++ b/modules/core/perf/perf_convertTo.cpp @@ -13,8 +13,8 @@ PERF_TEST_P( Size_DepthSrc_DepthDst_Channels_alpha, convertTo, testing::Combine ( testing::Values(szVGA, sz1080p), - testing::Values(CV_8U), - testing::Values(CV_16U), + testing::Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F), + testing::Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F), testing::Values(1, 4), testing::Values(1.0, 1./255) ) diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 5ef238e46..ef8edee6a 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -1769,9 +1769,9 @@ struct cvtScale_SIMD }; template <> -struct cvtScale_SIMD +struct cvtScale_SIMD { - int operator () (const uchar * src, double * dst, int width, float scale, float shift) const + int operator () (const uchar * src, double * dst, int width, double scale, double shift) const { int x = 0; @@ -1779,24 +1779,23 @@ struct cvtScale_SIMD return x; __m128i v_zero = _mm_setzero_si128(); - __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); for ( ; x <= width - 8; x += 8) { __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); - __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); - __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); - v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); - __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + __m128i v_src_s32 = _mm_unpacklo_epi16(v_src, v_zero); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); - _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); - _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); - - _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); - _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + v_src_s32 = _mm_unpackhi_epi16(v_src, v_zero); + v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x + 4, v_dst_0); + _mm_storeu_pd(dst + x + 6, v_dst_1); } return x; @@ -2001,9 +2000,9 @@ struct cvtScale_SIMD }; template <> -struct cvtScale_SIMD +struct cvtScale_SIMD { - int operator () (const schar * src, double * dst, int width, float scale, float shift) const + int operator () (const schar * src, double * dst, int width, double scale, double shift) const { int x = 0; @@ -2011,24 +2010,24 @@ struct cvtScale_SIMD return x; __m128i v_zero = _mm_setzero_si128(); - __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); for ( ; x <= width - 8; x += 8) { - __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); - __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); - __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + __m128i v_src = _mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))); + v_src = _mm_srai_epi16(v_src, 8); - v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); - __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + __m128i v_src_s32 = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); - _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); - _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); - - _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); - _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + v_src_s32 = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16); + v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x + 4, v_dst_0); + _mm_storeu_pd(dst + x + 6, v_dst_1); } return x; @@ -2233,9 +2232,9 @@ struct cvtScale_SIMD }; template <> -struct cvtScale_SIMD +struct cvtScale_SIMD { - int operator () (const ushort * src, double * dst, int width, float scale, float shift) const + int operator () (const ushort * src, double * dst, int width, double scale, double shift) const { int x = 0; @@ -2243,24 +2242,23 @@ struct cvtScale_SIMD return x; __m128i v_zero = _mm_setzero_si128(); - __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); for ( ; x <= width - 8; x += 8) { __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); - __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); - __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); - v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); - __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + __m128i v_src_s32 = _mm_unpacklo_epi16(v_src, v_zero); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); - _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); - _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); - - _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); - _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + v_src_s32 = _mm_unpackhi_epi16(v_src, v_zero); + v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x + 4, v_dst_0); + _mm_storeu_pd(dst + x + 6, v_dst_1); } return x; @@ -2465,9 +2463,9 @@ struct cvtScale_SIMD }; template <> -struct cvtScale_SIMD +struct cvtScale_SIMD { - int operator () (const short * src, double * dst, int width, float scale, float shift) const + int operator () (const short * src, double * dst, int width, double scale, double shift) const { int x = 0; @@ -2475,24 +2473,23 @@ struct cvtScale_SIMD return x; __m128i v_zero = _mm_setzero_si128(); - __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); for ( ; x <= width - 8; x += 8) { __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); - __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); - __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); - v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); - __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + __m128i v_src_s32 = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); - _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); - _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); - - _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); - _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + v_src_s32 = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16); + v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x + 4, v_dst_0); + _mm_storeu_pd(dst + x + 6, v_dst_1); } return x; @@ -2631,27 +2628,29 @@ struct cvtScale_SIMD }; template <> -struct cvtScale_SIMD +struct cvtScale_SIMD { - int operator () (const int * src, int * dst, int width, float scale, float shift) const + int operator () (const int * src, int * dst, int width, double scale, double shift) const { int x = 0; if (!USE_SSE2) return x; - __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); - for ( ; x <= width - 8; x += 8) + for ( ; x <= width - 4; x += 4) { __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); - __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); - v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); - __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + v_src = _mm_srli_si128(v_src, 8); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); - _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); - _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + __m128 v_dst = _mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_dst_0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_dst_1))); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_castps_si128(v_dst)); } return x; @@ -2659,27 +2658,27 @@ struct cvtScale_SIMD }; template <> -struct cvtScale_SIMD +struct cvtScale_SIMD { - int operator () (const int * src, float * dst, int width, float scale, float shift) const + int operator () (const int * src, float * dst, int width, double scale, double shift) const { int x = 0; if (!USE_SSE2) return x; - __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); - for ( ; x <= width - 8; x += 8) + for ( ; x <= width - 4; x += 4) { __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); - __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); - v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); - __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + v_src = _mm_srli_si128(v_src, 8); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); - _mm_storeu_ps(dst + x, v_dst_0); - _mm_storeu_ps(dst + x + 4, v_dst_1); + _mm_storeu_ps(dst + x, _mm_movelh_ps(_mm_cvtpd_ps(v_dst_0), + _mm_cvtpd_ps(v_dst_1))); } return x; @@ -2687,32 +2686,27 @@ struct cvtScale_SIMD }; template <> -struct cvtScale_SIMD +struct cvtScale_SIMD { - int operator () (const int * src, double * dst, int width, float scale, float shift) const + int operator () (const int * src, double * dst, int width, double scale, double shift) const { int x = 0; if (!USE_SSE2) return x; - __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); - for ( ; x <= width - 8; x += 8) + for ( ; x <= width - 4; x += 4) { __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); - __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); - v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); - __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + v_src = _mm_srli_si128(v_src, 8); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); - _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); - _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); - - _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); - _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); } return x; @@ -2890,16 +2884,11 @@ struct cvtScale_SIMD __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); - for ( ; x <= width - 8; x += 8) + for ( ; x <= width - 4; x += 4) { __m128 v_src = _mm_loadu_ps(src + x); - __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); - - v_src = _mm_loadu_ps(src + x + 4); - __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); - - _mm_storeu_ps(dst + x, v_dst_0); - _mm_storeu_ps(dst + x + 4, v_dst_1); + __m128 v_dst = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + _mm_storeu_ps(dst + x, v_dst); } return x; @@ -2907,9 +2896,144 @@ struct cvtScale_SIMD }; template <> -struct cvtScale_SIMD +struct cvtScale_SIMD { - int operator () (const float * src, double * dst, int width, float scale, float shift) const + int operator () (const float * src, double * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 4; x += 4) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtps_pd(v_src), v_scale), v_shift); + v_src = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtps_pd(v_src), v_scale), v_shift); + + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); + } + + return x; + } +}; + +// from double + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2))); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6))); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2))); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6))); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const double * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2))); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6))); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, short * dst, int width, float scale, float shift) const { int x = 0; @@ -2920,19 +3044,100 @@ struct cvtScale_SIMD for ( ; x <= width - 8; x += 8) { - __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2))); __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); - v_src = _mm_loadu_ps(src + x + 4); + v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6))); __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); - _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); - _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } - _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); - _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( - _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, int * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 4; x += 4) + { + __m128d v_src = _mm_loadu_pd(src + x); + __m128d v_dst0 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift); + + v_src = _mm_loadu_pd(src + x + 2); + __m128d v_dst1 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift); + + __m128 v_dst = _mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_dst0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_dst1))); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_castps_si128(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, float * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 4; x += 4) + { + __m128d v_src = _mm_loadu_pd(src + x); + __m128d v_dst0 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift); + + v_src = _mm_loadu_pd(src + x + 2); + __m128d v_dst1 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift); + + __m128 v_dst = _mm_movelh_ps(_mm_cvtpd_ps(v_dst0), + _mm_cvtpd_ps(v_dst1)); + + _mm_storeu_ps(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, double * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 2; x += 2) + { + __m128d v_src = _mm_loadu_pd(src + x); + __m128d v_dst = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift); + _mm_storeu_pd(dst + x, v_dst); } return x; From 56f3c92737de6a7f55c128e88fc4362d59845586 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 15/98] pyrUp and pyrDown --- modules/imgproc/src/pyramids.cpp | 328 ++++++++++++++++++++++++++++++- 1 file changed, 322 insertions(+), 6 deletions(-) diff --git a/modules/imgproc/src/pyramids.cpp b/modules/imgproc/src/pyramids.cpp index e510530af..5425de11c 100644 --- a/modules/imgproc/src/pyramids.cpp +++ b/modules/imgproc/src/pyramids.cpp @@ -183,13 +183,329 @@ struct PyrDownVec_32f } }; -typedef PyrDownNoVec PyrDownVec_32s16u; -typedef PyrDownNoVec PyrDownVec_32s16s; +#if CV_SSE4_1 -typedef PyrUpNoVec PyrUpVec_32s8u; -typedef PyrUpNoVec PyrUpVec_32s16s; -typedef PyrUpNoVec PyrUpVec_32s16u; -typedef PyrUpNoVec PyrUpVec_32f; +struct PyrDownVec_32s16u +{ + PyrDownVec_32s16u() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator()(int** src, ushort* dst, int, int width) const + { + int x = 0; + + if (!haveSSE) + return x; + + const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + __m128i v_delta = _mm_set1_epi32(128); + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_r00 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r01 = _mm_loadu_si128((__m128i const *)(row0 + x + 4)); + __m128i v_r10 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r11 = _mm_loadu_si128((__m128i const *)(row1 + x + 4)); + __m128i v_r20 = _mm_loadu_si128((__m128i const *)(row2 + x)), + v_r21 = _mm_loadu_si128((__m128i const *)(row2 + x + 4)); + __m128i v_r30 = _mm_loadu_si128((__m128i const *)(row3 + x)), + v_r31 = _mm_loadu_si128((__m128i const *)(row3 + x + 4)); + __m128i v_r40 = _mm_loadu_si128((__m128i const *)(row4 + x)), + v_r41 = _mm_loadu_si128((__m128i const *)(row4 + x + 4)); + + v_r00 = _mm_add_epi32(_mm_add_epi32(v_r00, v_r40), _mm_add_epi32(v_r20, v_r20)); + v_r10 = _mm_add_epi32(_mm_add_epi32(v_r10, v_r20), v_r30); + + v_r10 = _mm_slli_epi32(v_r10, 2); + __m128i v_dst0 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(v_r00, v_r10), v_delta), 8); + + v_r01 = _mm_add_epi32(_mm_add_epi32(v_r01, v_r41), _mm_add_epi32(v_r21, v_r21)); + v_r11 = _mm_add_epi32(_mm_add_epi32(v_r11, v_r21), v_r31); + v_r11 = _mm_slli_epi32(v_r11, 2); + __m128i v_dst1 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(v_r01, v_r11), v_delta), 8); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_packus_epi32(v_dst0, v_dst1)); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +struct PyrDownVec_32s16s +{ + PyrDownVec_32s16s() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE2); + } + + int operator()(int** src, short* dst, int, int width) const + { + int x = 0; + + if (!haveSSE) + return x; + + const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + __m128i v_delta = _mm_set1_epi32(128); + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_r00 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r01 = _mm_loadu_si128((__m128i const *)(row0 + x + 4)); + __m128i v_r10 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r11 = _mm_loadu_si128((__m128i const *)(row1 + x + 4)); + __m128i v_r20 = _mm_loadu_si128((__m128i const *)(row2 + x)), + v_r21 = _mm_loadu_si128((__m128i const *)(row2 + x + 4)); + __m128i v_r30 = _mm_loadu_si128((__m128i const *)(row3 + x)), + v_r31 = _mm_loadu_si128((__m128i const *)(row3 + x + 4)); + __m128i v_r40 = _mm_loadu_si128((__m128i const *)(row4 + x)), + v_r41 = _mm_loadu_si128((__m128i const *)(row4 + x + 4)); + + v_r00 = _mm_add_epi32(_mm_add_epi32(v_r00, v_r40), _mm_add_epi32(v_r20, v_r20)); + v_r10 = _mm_add_epi32(_mm_add_epi32(v_r10, v_r20), v_r30); + + v_r10 = _mm_slli_epi32(v_r10, 2); + __m128i v_dst0 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(v_r00, v_r10), v_delta), 8); + + v_r01 = _mm_add_epi32(_mm_add_epi32(v_r01, v_r41), _mm_add_epi32(v_r21, v_r21)); + v_r11 = _mm_add_epi32(_mm_add_epi32(v_r11, v_r21), v_r31); + v_r11 = _mm_slli_epi32(v_r11, 2); + __m128i v_dst1 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(v_r01, v_r11), v_delta), 8); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_packs_epi32(v_dst0, v_dst1)); + } + + return x; + } + + bool haveSSE; +}; + + +struct PyrUpVec_32s8u +{ + int operator()(int** src, uchar** dst, int, int width) const + { + int x = 0; + + if (!checkHardwareSupport(CV_CPU_SSE2)) + return x; + + uchar *dst0 = dst[0], *dst1 = dst[1]; + const uint *row0 = (uint *)src[0], *row1 = (uint *)src[1], *row2 = (uint *)src[2]; + __m128i v_delta = _mm_set1_epi16(32), v_zero = _mm_setzero_si128(); + + for( ; x <= width - 16; x += 16 ) + { + __m128i v_r0 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row0 + x)), + _mm_loadu_si128((__m128i const *)(row0 + x + 4))); + __m128i v_r1 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row1 + x)), + _mm_loadu_si128((__m128i const *)(row1 + x + 4))); + __m128i v_r2 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row2 + x)), + _mm_loadu_si128((__m128i const *)(row2 + x + 4))); + + __m128i v_2r1 = _mm_adds_epu16(v_r1, v_r1), v_4r1 = _mm_adds_epu16(v_2r1, v_2r1); + __m128i v_dst00 = _mm_adds_epu16(_mm_adds_epu16(v_r0, v_r2), _mm_adds_epu16(v_2r1, v_4r1)); + __m128i v_dst10 = _mm_slli_epi16(_mm_adds_epu16(v_r1, v_r2), 2); + + v_r0 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row0 + x + 8)), + _mm_loadu_si128((__m128i const *)(row0 + x + 12))); + v_r1 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row1 + x + 8)), + _mm_loadu_si128((__m128i const *)(row1 + x + 12))); + v_r2 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row2 + x + 8)), + _mm_loadu_si128((__m128i const *)(row2 + x + 12))); + + v_2r1 = _mm_adds_epu16(v_r1, v_r1), v_4r1 = _mm_adds_epu16(v_2r1, v_2r1); + __m128i v_dst01 = _mm_adds_epu16(_mm_adds_epu16(v_r0, v_r2), _mm_adds_epu16(v_2r1, v_4r1)); + __m128i v_dst11 = _mm_slli_epi16(_mm_adds_epu16(v_r1, v_r2), 2); + + _mm_storeu_si128((__m128i *)(dst0 + x), _mm_packus_epi16(_mm_srli_epi16(_mm_adds_epu16(v_dst00, v_delta), 6), + _mm_srli_epi16(_mm_adds_epu16(v_dst01, v_delta), 6))); + _mm_storeu_si128((__m128i *)(dst1 + x), _mm_packus_epi16(_mm_srli_epi16(_mm_adds_epu16(v_dst10, v_delta), 6), + _mm_srli_epi16(_mm_adds_epu16(v_dst11, v_delta), 6))); + } + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_r0 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row0 + x)), + _mm_loadu_si128((__m128i const *)(row0 + x + 4))); + __m128i v_r1 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row1 + x)), + _mm_loadu_si128((__m128i const *)(row1 + x + 4))); + __m128i v_r2 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row2 + x)), + _mm_loadu_si128((__m128i const *)(row2 + x + 4))); + + __m128i v_2r1 = _mm_adds_epu16(v_r1, v_r1), v_4r1 = _mm_adds_epu16(v_2r1, v_2r1); + __m128i v_dst0 = _mm_adds_epu16(_mm_adds_epu16(v_r0, v_r2), _mm_adds_epu16(v_2r1, v_4r1)); + __m128i v_dst1 = _mm_slli_epi16(_mm_adds_epu16(v_r1, v_r2), 2); + + _mm_storel_epi64((__m128i *)(dst0 + x), _mm_packus_epi16(_mm_srli_epi16(_mm_adds_epu16(v_dst0, v_delta), 6), v_zero)); + _mm_storel_epi64((__m128i *)(dst1 + x), _mm_packus_epi16(_mm_srli_epi16(_mm_adds_epu16(v_dst1, v_delta), 6), v_zero)); + } + + return x; + } +}; + +struct PyrUpVec_32s16s +{ + int operator()(int** src, short** dst, int, int width) const + { + int x = 0; + + if (!checkHardwareSupport(CV_CPU_SSE2)) + return x; + + short *dst0 = dst[0], *dst1 = dst[1]; + const uint *row0 = (uint *)src[0], *row1 = (uint *)src[1], *row2 = (uint *)src[2]; + __m128i v_delta = _mm_set1_epi32(32), v_zero = _mm_setzero_si128(); + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x)); + __m128i v_2r1 = _mm_slli_epi32(v_r1, 1), v_4r1 = _mm_slli_epi32(v_r1, 2); + __m128i v_dst00 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst10 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x + 4)); + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x + 4)); + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x + 4)); + v_2r1 = _mm_slli_epi32(v_r1, 1); + v_4r1 = _mm_slli_epi32(v_r1, 2); + __m128i v_dst01 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst11 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + _mm_storeu_si128((__m128i *)(dst0 + x), + _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_dst00, v_delta), 6), + _mm_srai_epi32(_mm_add_epi32(v_dst01, v_delta), 6))); + _mm_storeu_si128((__m128i *)(dst1 + x), + _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_dst10, v_delta), 6), + _mm_srai_epi32(_mm_add_epi32(v_dst11, v_delta), 6))); + } + + for( ; x <= width - 4; x += 4 ) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x)); + __m128i v_2r1 = _mm_slli_epi32(v_r1, 1), v_4r1 = _mm_slli_epi32(v_r1, 2); + + __m128i v_dst0 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst1 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + _mm_storel_epi64((__m128i *)(dst0 + x), + _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_dst0, v_delta), 6), v_zero)); + _mm_storel_epi64((__m128i *)(dst1 + x), + _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_dst1, v_delta), 6), v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +struct PyrUpVec_32s16u +{ + int operator()(int** src, ushort** dst, int, int width) const + { + int x = 0; + + if (!checkHardwareSupport(CV_CPU_SSE4_1)) + return x; + + ushort *dst0 = dst[0], *dst1 = dst[1]; + const uint *row0 = (uint *)src[0], *row1 = (uint *)src[1], *row2 = (uint *)src[2]; + __m128i v_delta = _mm_set1_epi32(32), v_zero = _mm_setzero_si128(); + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x)); + __m128i v_2r1 = _mm_slli_epi32(v_r1, 1), v_4r1 = _mm_slli_epi32(v_r1, 2); + __m128i v_dst00 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst10 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x + 4)); + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x + 4)); + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x + 4)); + v_2r1 = _mm_slli_epi32(v_r1, 1); + v_4r1 = _mm_slli_epi32(v_r1, 2); + __m128i v_dst01 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst11 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + _mm_storeu_si128((__m128i *)(dst0 + x), + _mm_packus_epi32(_mm_srli_epi32(_mm_add_epi32(v_dst00, v_delta), 6), + _mm_srli_epi32(_mm_add_epi32(v_dst01, v_delta), 6))); + _mm_storeu_si128((__m128i *)(dst1 + x), + _mm_packus_epi32(_mm_srli_epi32(_mm_add_epi32(v_dst10, v_delta), 6), + _mm_srli_epi32(_mm_add_epi32(v_dst11, v_delta), 6))); + } + + for( ; x <= width - 4; x += 4 ) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x)); + __m128i v_2r1 = _mm_slli_epi32(v_r1, 1), v_4r1 = _mm_slli_epi32(v_r1, 2); + + __m128i v_dst0 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst1 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + _mm_storel_epi64((__m128i *)(dst0 + x), + _mm_packus_epi32(_mm_srli_epi32(_mm_add_epi32(v_dst0, v_delta), 6), v_zero)); + _mm_storel_epi64((__m128i *)(dst1 + x), + _mm_packus_epi32(_mm_srli_epi32(_mm_add_epi32(v_dst1, v_delta), 6), v_zero)); + } + + return x; + } +}; + +#endif + +struct PyrUpVec_32f +{ + int operator()(float** src, float** dst, int, int width) const + { + int x = 0; + + if (!checkHardwareSupport(CV_CPU_SSE2)) + return x; + + const float *row0 = src[0], *row1 = src[1], *row2 = src[2]; + float *dst0 = dst[0], *dst1 = dst[1]; + __m128 v_6 = _mm_set1_ps(6.0f), v_scale = _mm_set1_ps(1.f/64.0f), + v_scale4 = _mm_mul_ps(v_scale, _mm_set1_ps(4.0f)); + + for( ; x <= width - 8; x += 8 ) + { + __m128 v_r0 = _mm_loadu_ps(row0 + x); + __m128 v_r1 = _mm_loadu_ps(row1 + x); + __m128 v_r2 = _mm_loadu_ps(row2 + x); + + _mm_storeu_ps(dst1 + x, _mm_mul_ps(v_scale4, _mm_add_ps(v_r1, v_r2))); + _mm_storeu_ps(dst0 + x, _mm_mul_ps(v_scale, _mm_add_ps(_mm_add_ps(v_r0, _mm_mul_ps(v_6, v_r1)), v_r2))); + + v_r0 = _mm_loadu_ps(row0 + x + 4); + v_r1 = _mm_loadu_ps(row1 + x + 4); + v_r2 = _mm_loadu_ps(row2 + x + 4); + + _mm_storeu_ps(dst1 + x + 4, _mm_mul_ps(v_scale4, _mm_add_ps(v_r1, v_r2))); + _mm_storeu_ps(dst0 + x + 4, _mm_mul_ps(v_scale, _mm_add_ps(_mm_add_ps(v_r0, _mm_mul_ps(v_6, v_r1)), v_r2))); + } + + return x; + } +}; #elif CV_NEON From 1ca35b74248f457122a1cdcf3d2a0dbc8d4de299 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 16/98] resize are fast --- modules/imgproc/src/imgwarp.cpp | 142 +++++++++++++++++++++++++- modules/imgproc/test/test_imgwarp.cpp | 5 +- 2 files changed, 144 insertions(+), 3 deletions(-) diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index c4bb3baa9..1fa4557ca 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -2199,8 +2199,146 @@ private: bool use_simd; }; -typedef ResizeAreaFastNoVec ResizeAreaFastVec_SIMD_16s; -typedef ResizeAreaFastNoVec ResizeAreaFastVec_SIMD_32f; +class ResizeAreaFastVec_SIMD_16s +{ +public: + ResizeAreaFastVec_SIMD_16s(int _cn, int _step) : + cn(_cn), step(_step) + { + use_simd = checkHardwareSupport(CV_CPU_SSE2); + } + + int operator() (const short* S, short* D, int w) const + { + if (!use_simd) + return 0; + + int dx = 0; + const short* S0 = (const short*)S; + const short* S1 = (const short*)((const uchar*)(S) + step); + __m128i masklow = _mm_set1_epi32(0x0000ffff); + __m128i zero = _mm_setzero_si128(); + __m128i delta2 = _mm_set1_epi32(2); + + if (cn == 1) + { + for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)S0); + __m128i r1 = _mm_loadu_si128((const __m128i*)S1); + + __m128i s0 = _mm_add_epi32(_mm_srai_epi32(r0, 16), + _mm_srai_epi32(_mm_slli_epi32(_mm_and_si128(r0, masklow), 16), 16)); + __m128i s1 = _mm_add_epi32(_mm_srai_epi32(r1, 16), + _mm_srai_epi32(_mm_slli_epi32(_mm_and_si128(r1, masklow), 16), 16)); + s0 = _mm_add_epi32(_mm_add_epi32(s0, s1), delta2); + s0 = _mm_srai_epi32(s0, 2); + s0 = _mm_packs_epi32(s0, zero); + + _mm_storel_epi64((__m128i*)D, s0); + } + } + else if (cn == 3) + for ( ; dx <= w - 4; dx += 3, S0 += 6, S1 += 6, D += 3) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)S0); + __m128i r1 = _mm_loadu_si128((const __m128i*)S1); + + __m128i r0_16l = _mm_srai_epi32(_mm_unpacklo_epi16(zero, r0), 16); + __m128i r0_16h = _mm_srai_epi32(_mm_unpacklo_epi16(zero, _mm_srli_si128(r0, 6)), 16); + __m128i r1_16l = _mm_srai_epi32(_mm_unpacklo_epi16(zero, r1), 16); + __m128i r1_16h = _mm_srai_epi32(_mm_unpacklo_epi16(zero, _mm_srli_si128(r1, 6)), 16); + + __m128i s0 = _mm_add_epi32(r0_16l, r0_16h); + __m128i s1 = _mm_add_epi32(r1_16l, r1_16h); + s0 = _mm_add_epi32(delta2, _mm_add_epi32(s0, s1)); + s0 = _mm_packs_epi32(_mm_srai_epi32(s0, 2), zero); + _mm_storel_epi64((__m128i*)D, s0); + } + else + { + CV_Assert(cn == 4); + for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)S0); + __m128i r1 = _mm_loadu_si128((const __m128i*)S1); + + __m128i r0_32l = _mm_srai_epi32(_mm_unpacklo_epi16(zero, r0), 16); + __m128i r0_32h = _mm_srai_epi32(_mm_unpackhi_epi16(zero, r0), 16); + __m128i r1_32l = _mm_srai_epi32(_mm_unpacklo_epi16(zero, r1), 16); + __m128i r1_32h = _mm_srai_epi32(_mm_unpackhi_epi16(zero, r1), 16); + + __m128i s0 = _mm_add_epi32(r0_32l, r0_32h); + __m128i s1 = _mm_add_epi32(r1_32l, r1_32h); + s0 = _mm_add_epi32(s1, _mm_add_epi32(s0, delta2)); + s0 = _mm_packs_epi32(_mm_srai_epi32(s0, 2), zero); + _mm_storel_epi64((__m128i*)D, s0); + } + } + + return dx; + } + +private: + int cn; + int step; + bool use_simd; +}; + +struct ResizeAreaFastVec_SIMD_32f +{ + ResizeAreaFastVec_SIMD_32f(int _scale_x, int _scale_y, int _cn, int _step) : + scale_x(_scale_x), scale_y(_scale_y), cn(_cn), step(_step) + { + fast_mode = scale_x == 2 && scale_y == 2 && (cn == 1 || cn == 3 || cn == 4); + } + + int operator() (const float * S, float * D, int w) const + { + if (!fast_mode) + return 0; + + const float * S0 = S, * S1 = (const float *)((const uchar *)(S0) + step); + int dx = 0; + + __m128 v_025 = _mm_set1_ps(0.25f); + + if (cn == 1) + { + int shuffle_lo = _MM_SHUFFLE(2, 0, 2, 0), shuffle_hi = _MM_SHUFFLE(3, 1, 3, 1); + for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4) + { + __m128 v_row00 = _mm_loadu_ps(S0), v_row01 = _mm_loadu_ps(S0 + 4), + v_row10 = _mm_loadu_ps(S1), v_row11 = _mm_loadu_ps(S1 + 4); + + __m128 v_dst0 = _mm_add_ps(_mm_shuffle_ps(v_row00, v_row01, shuffle_lo), + _mm_shuffle_ps(v_row00, v_row01, shuffle_hi)); + __m128 v_dst1 = _mm_add_ps(_mm_shuffle_ps(v_row10, v_row11, shuffle_lo), + _mm_shuffle_ps(v_row10, v_row11, shuffle_hi)); + + _mm_storeu_ps(D, _mm_mul_ps(_mm_add_ps(v_dst0, v_dst1), v_025)); + } + } + else if (cn == 4) + { + for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4) + { + __m128 v_dst0 = _mm_add_ps(_mm_loadu_ps(S0), _mm_loadu_ps(S0 + 4)); + __m128 v_dst1 = _mm_add_ps(_mm_loadu_ps(S1), _mm_loadu_ps(S1 + 4)); + + _mm_storeu_ps(D, _mm_mul_ps(_mm_add_ps(v_dst0, v_dst1), v_025)); + } + } + + return dx; + } + +private: + int scale_x, scale_y; + int cn; + bool fast_mode; + int step; +}; #else diff --git a/modules/imgproc/test/test_imgwarp.cpp b/modules/imgproc/test/test_imgwarp.cpp index 34505c4ca..176c9907f 100644 --- a/modules/imgproc/test/test_imgwarp.cpp +++ b/modules/imgproc/test/test_imgwarp.cpp @@ -1595,7 +1595,10 @@ void resizeArea(const cv::Mat & src, cv::Mat & dst) TEST(Resize, Area_half) { const int size = 1000; - int types[] = { CV_8UC1, CV_8UC4, CV_16UC1, CV_16UC4, CV_16SC1, CV_16SC4, CV_32FC1, CV_32FC4 }; + int types[] = { CV_8UC1, CV_8UC4, + CV_16UC1, CV_16UC4, + CV_16SC1, CV_16SC3, CV_16SC4, + CV_32FC1, CV_32FC4 }; cv::RNG rng(17); From bfb45b27e6d5f3b11b4e579fb8d77b3bef7ff1cf Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 17/98] column sum 32s --- modules/imgproc/src/smooth.cpp | 152 +++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index 5ab70d9a2..b7c300403 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -713,6 +713,158 @@ struct ColumnSum : std::vector sum; }; +template<> +struct ColumnSum : + public BaseColumnFilter +{ + ColumnSum( int _ksize, int _anchor, double _scale ) : + BaseColumnFilter() + { + ksize = _ksize; + anchor = _anchor; + scale = _scale; + sumCount = 0; + } + + virtual void reset() { sumCount = 0; } + + virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) + { + int i; + int* SUM; + bool haveScale = scale != 1; + double _scale = scale; + + printf("bgfbffbbfg\n"); + + #if CV_SSE2 + bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2); + #endif + + if( width != (int)sum.size() ) + { + sum.resize(width); + sumCount = 0; + } + SUM = &sum[0]; + if( sumCount == 0 ) + { + memset((void*)SUM, 0, width*sizeof(int)); + for( ; sumCount < ksize - 1; sumCount++, src++ ) + { + const int* Sp = (const int*)src[0]; + i = 0; + #if CV_SSE2 + if(haveSSE2) + { + for( ; i <= width-4; i+=4 ) + { + __m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i)); + __m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i)); + _mm_storeu_si128((__m128i*)(SUM+i),_mm_add_epi32(_sum, _sp)); + } + } + #elif CV_NEON + for( ; i <= width - 4; i+=4 ) + vst1q_s32(SUM + i, vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i))); + #endif + for( ; i < width; i++ ) + SUM[i] += Sp[i]; + } + } + else + { + CV_Assert( sumCount == ksize-1 ); + src += ksize-1; + } + + for( ; count--; src++ ) + { + const int* Sp = (const int*)src[0]; + const int* Sm = (const int*)src[1-ksize]; + int* D = (int*)dst; + if( haveScale ) + { + i = 0; + #if CV_SSE2 + if(haveSSE2) + { + const __m128 scale4 = _mm_set1_ps((float)_scale); + for( ; i <= width-4; i+=4 ) + { + __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i)); + + __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)), + _mm_loadu_si128((const __m128i*)(Sp+i))); + + __m128i _s0T = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s0))); + + _mm_storeu_si128((__m128i*)(D+i), _s0T); + _mm_storeu_si128((__m128i*)(SUM+i),_mm_sub_epi32(_s0,_sm)); + } + } + #elif CV_NEON + float32x4_t v_scale = vdupq_n_f32((float)_scale); + for( ; i <= width-4; i+=4 ) + { + int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)); + + int32x4_t v_s0d = cv_vrndq_s32_f32(vmulq_f32(vcvtq_f32_s32(v_s0), v_scale)); + vst1q_s32(D + i, v_s0d); + + vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i))); + } + #endif + for( ; i < width; i++ ) + { + int s0 = SUM[i] + Sp[i]; + D[i] = saturate_cast(s0*_scale); + SUM[i] = s0 - Sm[i]; + } + } + else + { + i = 0; + #if CV_SSE2 + if(haveSSE2) + { + for( ; i <= width-4; i+=4 ) + { + __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i)); + __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)), + _mm_loadu_si128((const __m128i*)(Sp+i))); + + _mm_storeu_si128((__m128i*)(D+i), _s0); + _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm)); + } + } + #elif CV_NEON + for( ; i <= width-4; i+=4 ) + { + int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)); + + vst1q_s32(D + i, v_s01); + vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i))); + } + #endif + + for( ; i < width; i++ ) + { + int s0 = SUM[i] + Sp[i]; + D[i] = s0; + SUM[i] = s0 - Sm[i]; + } + } + dst += dststep; + } + } + + double scale; + int sumCount; + std::vector sum; +}; + + template<> struct ColumnSum : public BaseColumnFilter From 5f2135695e9b7d155097bb84abf6c6011ada3652 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 18/98] cvtColor rgb 2 YCrCb --- modules/imgproc/perf/perf_cvt_color.cpp | 168 +----------------- modules/imgproc/src/color.cpp | 225 ++++++++++++++++++++++++ 2 files changed, 234 insertions(+), 159 deletions(-) diff --git a/modules/imgproc/perf/perf_cvt_color.cpp b/modules/imgproc/perf/perf_cvt_color.cpp index 02622ea80..4bcb698ae 100644 --- a/modules/imgproc/perf/perf_cvt_color.cpp +++ b/modules/imgproc/perf/perf_cvt_color.cpp @@ -56,50 +56,10 @@ enum }; CV_ENUM(CvtMode, - COLOR_BGR2BGR555, COLOR_BGR2BGR565, COLOR_BGR2BGRA, COLOR_BGR2GRAY, - COLOR_BGR2HLS, COLOR_BGR2HLS_FULL, COLOR_BGR2HSV, COLOR_BGR2HSV_FULL, - COLOR_BGR2Lab, COLOR_BGR2Luv, COLOR_BGR2RGB, COLOR_BGR2RGBA, COLOR_BGR2XYZ, - COLOR_BGR2YCrCb, COLOR_BGR2YUV, COLOR_BGR5552BGR, COLOR_BGR5552BGRA, - - COLOR_BGR5552GRAY, COLOR_BGR5552RGB, COLOR_BGR5552RGBA, COLOR_BGR5652BGR, - COLOR_BGR5652BGRA, COLOR_BGR5652GRAY, COLOR_BGR5652RGB, COLOR_BGR5652RGBA, - - COLOR_BGRA2BGR, COLOR_BGRA2BGR555, COLOR_BGRA2BGR565, COLOR_BGRA2GRAY, COLOR_BGRA2RGBA, - CX_BGRA2HLS, CX_BGRA2HLS_FULL, CX_BGRA2HSV, CX_BGRA2HSV_FULL, CX_BGRA2Lab, CX_BGRA2Luv, CX_BGRA2XYZ, CX_BGRA2YCrCb, CX_BGRA2YUV, - - COLOR_GRAY2BGR, COLOR_GRAY2BGR555, COLOR_GRAY2BGR565, COLOR_GRAY2BGRA, - - COLOR_HLS2BGR, COLOR_HLS2BGR_FULL, COLOR_HLS2RGB, COLOR_HLS2RGB_FULL, - CX_HLS2BGRA, CX_HLS2BGRA_FULL, CX_HLS2RGBA, CX_HLS2RGBA_FULL, - - COLOR_HSV2BGR, COLOR_HSV2BGR_FULL, COLOR_HSV2RGB, COLOR_HSV2RGB_FULL, - CX_HSV2BGRA, CX_HSV2BGRA_FULL, CX_HSV2RGBA, CX_HSV2RGBA_FULL, - - COLOR_Lab2BGR, COLOR_Lab2LBGR, COLOR_Lab2LRGB, COLOR_Lab2RGB, - CX_Lab2BGRA, CX_Lab2LBGRA, CX_Lab2LRGBA, CX_Lab2RGBA, - - COLOR_LBGR2Lab, COLOR_LBGR2Luv, COLOR_LRGB2Lab, COLOR_LRGB2Luv, - CX_LBGRA2Lab, CX_LBGRA2Luv, CX_LRGBA2Lab, CX_LRGBA2Luv, - - COLOR_Luv2BGR, COLOR_Luv2LBGR, COLOR_Luv2LRGB, COLOR_Luv2RGB, - CX_Luv2BGRA, CX_Luv2LBGRA, CX_Luv2LRGBA, CX_Luv2RGBA, - - COLOR_RGB2BGR555, COLOR_RGB2BGR565, COLOR_RGB2GRAY, - COLOR_RGB2HLS, COLOR_RGB2HLS_FULL, COLOR_RGB2HSV, COLOR_RGB2HSV_FULL, COLOR_RGB2Lab, COLOR_RGB2Luv, COLOR_RGB2XYZ, COLOR_RGB2YCrCb, COLOR_RGB2YUV, - - COLOR_RGBA2BGR, COLOR_RGBA2BGR555, COLOR_RGBA2BGR565, COLOR_RGBA2GRAY, - CX_RGBA2HLS, CX_RGBA2HLS_FULL, CX_RGBA2HSV, CX_RGBA2HSV_FULL, - CX_RGBA2Lab, CX_RGBA2Luv, CX_RGBA2XYZ, - CX_RGBA2YCrCb, CX_RGBA2YUV, - - COLOR_XYZ2BGR, COLOR_XYZ2RGB, CX_XYZ2BGRA, CX_XYZ2RGBA, - - COLOR_YCrCb2BGR, COLOR_YCrCb2RGB, CX_YCrCb2BGRA, CX_YCrCb2RGBA, - COLOR_YUV2BGR, COLOR_YUV2RGB, CX_YUV2BGRA, CX_YUV2RGBA - ) + CX_RGBA2YCrCb, CX_RGBA2YUV) CV_ENUM(CvtModeBayer, @@ -237,135 +197,25 @@ ChPair getConversionInfo(int cvtMode) return ChPair(0,0); } -typedef std::tr1::tuple Size_CvtMode_t; -typedef perf::TestBaseWithParam Size_CvtMode; +typedef perf::TestBaseWithParam Size_CvtMode; PERF_TEST_P(Size_CvtMode, cvtColor8u, - testing::Combine( - testing::Values(::perf::szODD, ::perf::szVGA, ::perf::sz1080p), - CvtMode::all() - ) + testing::Values(::perf::szODD, ::perf::szVGA, ::perf::sz1080p) ) { - Size sz = get<0>(GetParam()); - int _mode = get<1>(GetParam()), mode = _mode; + Size sz = GetParam(); + int mode = COLOR_RGB2YCrCb; ChPair ch = getConversionInfo(mode); mode %= COLOR_COLORCVT_MAX; - Mat src(sz, CV_8UC(ch.scn)); - Mat dst(sz, CV_8UC(ch.dcn)); + Mat src(sz, CV_8UC(3)); + Mat dst(sz, CV_8UC(3)); declare.time(100); declare.in(src, WARMUP_RNG).out(dst); int runs = sz.width <= 320 ? 100 : 5; - TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn); + TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, 3); -#if defined(__APPLE__) && defined(HAVE_IPP) - SANITY_CHECK(dst, _mode == CX_BGRA2HLS_FULL ? 2 : 1); -#else - SANITY_CHECK(dst, 1); -#endif -} - -typedef std::tr1::tuple Size_CvtMode_Bayer_t; -typedef perf::TestBaseWithParam Size_CvtMode_Bayer; - -PERF_TEST_P(Size_CvtMode_Bayer, cvtColorBayer8u, - testing::Combine( - testing::Values(::perf::szODD, ::perf::szVGA), - CvtModeBayer::all() - ) - ) -{ - Size sz = get<0>(GetParam()); - int mode = get<1>(GetParam()); - ChPair ch = getConversionInfo(mode); - mode %= COLOR_COLORCVT_MAX; - - Mat src(sz, CV_8UC(ch.scn)); - Mat dst(sz, CV_8UC(ch.dcn)); - - declare.time(100); - declare.in(src, WARMUP_RNG).out(dst); - - TEST_CYCLE() cvtColor(src, dst, mode, ch.dcn); - - SANITY_CHECK(dst, 1); -} - -typedef std::tr1::tuple Size_CvtMode2_t; -typedef perf::TestBaseWithParam Size_CvtMode2; - -PERF_TEST_P(Size_CvtMode2, cvtColorYUV420, - testing::Combine( - testing::Values(szVGA, sz1080p, Size(130, 60)), - CvtMode2::all() - ) - ) -{ - Size sz = get<0>(GetParam()); - int mode = get<1>(GetParam()); - ChPair ch = getConversionInfo(mode); - - Mat src(sz.height + sz.height / 2, sz.width, CV_8UC(ch.scn)); - Mat dst(sz, CV_8UC(ch.dcn)); - - declare.in(src, WARMUP_RNG).out(dst); - - int runs = (sz.width <= 640) ? 8 : 1; - TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn); - - SANITY_CHECK(dst, 1); -} - -typedef std::tr1::tuple Size_CvtMode3_t; -typedef perf::TestBaseWithParam Size_CvtMode3; - -PERF_TEST_P(Size_CvtMode3, cvtColorRGB2YUV420p, - testing::Combine( - testing::Values(szVGA, sz720p, sz1080p, Size(130, 60)), - CvtMode3::all() - ) - ) -{ - Size sz = get<0>(GetParam()); - int mode = get<1>(GetParam()); - ChPair ch = getConversionInfo(mode); - - Mat src(sz, CV_8UC(ch.scn)); - Mat dst(sz.height + sz.height / 2, sz.width, CV_8UC(ch.dcn)); - - declare.time(100); - declare.in(src, WARMUP_RNG).out(dst); - - int runs = (sz.width <= 640) ? 10 : 1; - TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn); - - SANITY_CHECK(dst, 1); -} - -CV_ENUM(EdgeAwareBayerMode, COLOR_BayerBG2BGR_EA, COLOR_BayerGB2BGR_EA, COLOR_BayerRG2BGR_EA, COLOR_BayerGR2BGR_EA) - -typedef std::tr1::tuple EdgeAwareParams; -typedef perf::TestBaseWithParam EdgeAwareDemosaicingTest; - -PERF_TEST_P(EdgeAwareDemosaicingTest, demosaicingEA, - testing::Combine( - testing::Values(szVGA, sz720p, sz1080p, Size(130, 60)), - EdgeAwareBayerMode::all() - ) - ) -{ - Size sz = get<0>(GetParam()); - int mode = get<1>(GetParam()); - - Mat src(sz, CV_8UC1); - Mat dst(sz, CV_8UC3); - - declare.in(src, WARMUP_RNG).out(dst); - - TEST_CYCLE() cvtColor(src, dst, mode, 3); - - SANITY_CHECK(dst, 1); + SANITY_CHECK_NOTHING(); } diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index f0a8fd858..55b915b5b 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -102,6 +102,89 @@ static IppStatus sts = ippInit(); #endif +#if CV_SSE2 + +#define _MM_DEINTERLIV_EPI8(layer0_chunk0, layer0_chunk1, layer0_chunk2, \ + layer0_chunk3, layer0_chunk4, layer0_chunk5) \ + { \ + __m128i layer1_chunk0 = _mm_unpacklo_epi8(layer0_chunk0, layer0_chunk3); \ + __m128i layer1_chunk1 = _mm_unpackhi_epi8(layer0_chunk0, layer0_chunk3); \ + __m128i layer1_chunk2 = _mm_unpacklo_epi8(layer0_chunk1, layer0_chunk4); \ + __m128i layer1_chunk3 = _mm_unpackhi_epi8(layer0_chunk1, layer0_chunk4); \ + __m128i layer1_chunk4 = _mm_unpacklo_epi8(layer0_chunk2, layer0_chunk5); \ + __m128i layer1_chunk5 = _mm_unpackhi_epi8(layer0_chunk2, layer0_chunk5); \ + \ + __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk3); \ + __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk3); \ + __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk4); \ + __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk4); \ + __m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk5); \ + __m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk5); \ + \ + __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk3); \ + __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk3); \ + __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk4); \ + __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk4); \ + __m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk5); \ + __m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk5); \ + \ + __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk3); \ + __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk3); \ + __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk4); \ + __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk4); \ + __m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk5); \ + __m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk5); \ + \ + layer0_chunk0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk3); \ + layer0_chunk1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk3); \ + layer0_chunk2 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk4); \ + layer0_chunk3 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk4); \ + layer0_chunk4 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk5); \ + layer0_chunk5 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk5); \ + } + +#define _MM_INTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) \ + { \ + __m128i v_mask = _mm_set1_epi16(0x00ff); \ + \ + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); \ + __m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); \ + __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); \ + __m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8)); \ + __m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); \ + __m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8)); \ + \ + __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask)); \ + __m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); \ + __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); \ + __m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); \ + __m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask)); \ + __m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8)); \ + \ + __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); \ + __m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); \ + __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); \ + __m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); \ + __m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); \ + __m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8)); \ + \ + __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); \ + __m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); \ + __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); \ + __m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); \ + __m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); \ + __m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8)); \ + \ + v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); \ + v_r1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); \ + v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); \ + v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); \ + v_b0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); \ + v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); \ + } + +#endif + namespace cv { @@ -1699,6 +1782,148 @@ struct RGB2YCrCb_i int32x4_t v_c0, v_c1, v_c2, v_c3, v_c4, v_delta, v_delta2; }; +#elif CV_SSE2 + +template <> +struct RGB2YCrCb_i +{ + typedef uchar channel_type; + + RGB2YCrCb_i(int _srccn, int _blueIdx, const int* _coeffs) + : srccn(_srccn), blueIdx(_blueIdx) + { + static const int coeffs0[] = {R2Y, G2Y, B2Y, 11682, 9241}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0])); + if (blueIdx==0) + std::swap(coeffs[0], coeffs[2]); + + v_c0 = _mm_set1_epi32(coeffs[0]); + v_c1 = _mm_set1_epi32(coeffs[1]); + v_c2 = _mm_set1_epi32(coeffs[2]); + v_c3 = _mm_set1_epi32(coeffs[3]); + v_c4 = _mm_set1_epi32(coeffs[4]); + v_delta2 = _mm_set1_epi32(1 << (yuv_shift - 1)); + v_delta = _mm_set1_epi32(ColorChannel::half()*(1 << yuv_shift)); + v_delta = _mm_add_epi32(v_delta, v_delta2); + v_zero = _mm_setzero_si128(); + } + + // 16u x 8 + void process(__m128i v_r, __m128i v_g, __m128i v_b, + __m128i & v_y, __m128i & v_cr, __m128i & v_cb) const + { + __m128i v_r_p = _mm_unpacklo_epi16(v_r, v_zero); + __m128i v_g_p = _mm_unpacklo_epi16(v_g, v_zero); + __m128i v_b_p = _mm_unpacklo_epi16(v_b, v_zero); + + __m128i v_y0 = _mm_add_epi32(_mm_mullo_epi32(v_r_p, v_c0), + _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), + _mm_mullo_epi32(v_b_p, v_c2))); + v_y0 = _mm_srli_epi32(_mm_add_epi32(v_delta2, v_y0), yuv_shift); + + __m128i v_cr0 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 2 ? v_r_p : v_b_p, v_y0), v_c3); + __m128i v_cb0 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 0 ? v_r_p : v_b_p, v_y0), v_c4); + v_cr0 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cr0), yuv_shift); + v_cb0 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cb0), yuv_shift); + + v_r_p = _mm_unpackhi_epi16(v_r, v_zero); + v_g_p = _mm_unpackhi_epi16(v_g, v_zero); + v_b_p = _mm_unpackhi_epi16(v_b, v_zero); + + __m128i v_y1 = _mm_add_epi32(_mm_mullo_epi32(v_r_p, v_c0), + _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), + _mm_mullo_epi32(v_b_p, v_c2))); + v_y1 = _mm_srli_epi32(_mm_add_epi32(v_delta2, v_y1), yuv_shift); + + __m128i v_cr1 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 2 ? v_r_p : v_b_p, v_y1), v_c3); + __m128i v_cb1 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 0 ? v_r_p : v_b_p, v_y1), v_c4); + v_cr1 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cr1), yuv_shift); + v_cb1 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cb1), yuv_shift); + + v_y = _mm_packs_epi32(v_y0, v_y1); + v_cr = _mm_packs_epi32(v_cr0, v_cr1); + v_cb = _mm_packs_epi32(v_cb0, v_cb1); + } + + void operator()(const uchar * src, uchar * dst, int n) const + { + int scn = srccn, bidx = blueIdx, i = 0; + int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4]; + int delta = ColorChannel::half()*(1 << yuv_shift); + n *= 3; + + if (scn == 3) + { + for ( ; i <= n - 96; i += 96, src += scn * 32) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 80)); + + _MM_DEINTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + __m128i v_y0 = v_zero, v_cr0 = v_zero, v_cb0 = v_zero; + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + v_y0, v_cr0, v_cb0); + + __m128i v_y1 = v_zero, v_cr1 = v_zero, v_cb1 = v_zero; + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + v_y1, v_cr1, v_cb1); + + __m128i v_y_0 = _mm_packus_epi16(v_y0, v_y1); + __m128i v_cr_0 = _mm_packus_epi16(v_cr0, v_cr1); + __m128i v_cb_0 = _mm_packus_epi16(v_cb0, v_cb1); + + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + v_y0, v_cr0, v_cb0); + + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + v_y1, v_cr1, v_cb1); + + __m128i v_y_1 = _mm_packus_epi16(v_y0, v_y1); + __m128i v_cr_1 = _mm_packus_epi16(v_cr0, v_cr1); + __m128i v_cb_1 = _mm_packus_epi16(v_cb0, v_cb1); + + _MM_INTERLIV_EPI8(v_y_0, v_y_1, v_cr_0, v_cr_1, v_cb_0, v_cb_1) + + _mm_storeu_si128((__m128i *)(dst + i), v_y_0); + _mm_storeu_si128((__m128i *)(dst + i + 16), v_y_1); + _mm_storeu_si128((__m128i *)(dst + i + 32), v_cr_0); + _mm_storeu_si128((__m128i *)(dst + i + 48), v_cr_1); + _mm_storeu_si128((__m128i *)(dst + i + 64), v_cb_0); + _mm_storeu_si128((__m128i *)(dst + i + 80), v_cb_1); + } + } + + for ( ; i < n; i += 3, src += scn) + { + int Y = CV_DESCALE(src[0]*C0 + src[1]*C1 + src[2]*C2, yuv_shift); + int Cr = CV_DESCALE((src[bidx^2] - Y)*C3 + delta, yuv_shift); + int Cb = CV_DESCALE((src[bidx] - Y)*C4 + delta, yuv_shift); + dst[i] = saturate_cast(Y); + dst[i+1] = saturate_cast(Cr); + dst[i+2] = saturate_cast(Cb); + } + } + + int srccn, blueIdx, coeffs[5]; + __m128i v_c0, v_c1, v_c2; + __m128i v_c3, v_c4, v_delta, v_delta2; + __m128i v_zero; +}; + + #endif template struct YCrCb2RGB_f From edee922b59d3bc227905e0a2b68f86324a228650 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 19/98] cvtColor YCrCb 2 RGB --- modules/imgproc/src/color.cpp | 170 +++++++++++++++++++++++++++++++++- 1 file changed, 165 insertions(+), 5 deletions(-) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 55b915b5b..f80b80fe9 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -176,10 +176,10 @@ static IppStatus sts = ippInit(); __m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8)); \ \ v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); \ - v_r1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); \ - v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); \ - v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); \ - v_b0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); \ + v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); \ + v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); \ + v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); \ + v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); \ v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); \ } @@ -1852,7 +1852,7 @@ struct RGB2YCrCb_i int delta = ColorChannel::half()*(1 << yuv_shift); n *= 3; - if (scn == 3) + if (scn == 3 && false) { for ( ; i <= n - 96; i += 96, src += scn * 32) { @@ -2321,6 +2321,166 @@ struct YCrCb2RGB_i uint16x4_t v_alpha2; }; +#elif CV_SSE2 + +template <> +struct YCrCb2RGB_i +{ + typedef uchar channel_type; + + YCrCb2RGB_i(int _dstcn, int _blueIdx, const int* _coeffs) + : dstcn(_dstcn), blueIdx(_blueIdx) + { + static const int coeffs0[] = {22987, -11698, -5636, 29049}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0])); + + v_c0 = _mm_set1_epi16((short)coeffs[0]); + v_c1 = _mm_set1_epi16((short)coeffs[1]); + v_c2 = _mm_set1_epi16((short)coeffs[2]); + v_c3 = _mm_set1_epi16((short)coeffs[3]); + v_delta = _mm_set1_epi16(ColorChannel::half()); + v_delta2 = _mm_set1_epi32(1 << (yuv_shift - 1)); + v_zero = _mm_setzero_si128(); + } + + // 16s x 8 + void process(__m128i v_y, __m128i v_cr, __m128i v_cb, + __m128i & v_r, __m128i & v_g, __m128i & v_b) const + { + v_cr = _mm_sub_epi16(v_cr, v_delta); + v_cb = _mm_sub_epi16(v_cb, v_delta); + + __m128i v_y_p = _mm_unpacklo_epi16(v_y, v_zero); + + __m128i v_mullo_3 = _mm_mullo_epi16(v_cb, v_c3); + __m128i v_mullo_2 = _mm_mullo_epi16(v_cb, v_c2); + __m128i v_mullo_1 = _mm_mullo_epi16(v_cr, v_c1); + __m128i v_mullo_0 = _mm_mullo_epi16(v_cr, v_c0); + + __m128i v_mulhi_3 = _mm_mulhi_epi16(v_cb, v_c3); + __m128i v_mulhi_2 = _mm_mulhi_epi16(v_cb, v_c2); + __m128i v_mulhi_1 = _mm_mulhi_epi16(v_cr, v_c1); + __m128i v_mulhi_0 = _mm_mulhi_epi16(v_cr, v_c0); + + __m128i v_b0 = _mm_srai_epi32(_mm_add_epi32(_mm_unpacklo_epi16(v_mullo_3, v_mulhi_3), v_delta2), yuv_shift); + __m128i v_g0 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi16(v_mullo_2, v_mulhi_2), + _mm_unpacklo_epi16(v_mullo_1, v_mulhi_1)), v_delta2), + yuv_shift); + __m128i v_r0 = _mm_srai_epi32(_mm_add_epi32(_mm_unpacklo_epi16(v_mullo_0, v_mulhi_0), v_delta2), yuv_shift); + + v_r0 = _mm_add_epi32(v_r0, v_y_p); + v_g0 = _mm_add_epi32(v_g0, v_y_p); + v_b0 = _mm_add_epi32(v_b0, v_y_p); + + v_y_p = _mm_unpackhi_epi16(v_y, v_zero); + + __m128i v_b1 = _mm_srai_epi32(_mm_add_epi32(_mm_unpackhi_epi16(v_mullo_3, v_mulhi_3), v_delta2), yuv_shift); + __m128i v_g1 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(_mm_unpackhi_epi16(v_mullo_2, v_mulhi_2), + _mm_unpackhi_epi16(v_mullo_1, v_mulhi_1)), v_delta2), + yuv_shift); + __m128i v_r1 = _mm_srai_epi32(_mm_add_epi32(_mm_unpackhi_epi16(v_mullo_0, v_mulhi_0), v_delta2), yuv_shift); + + v_r1 = _mm_add_epi32(v_r1, v_y_p); + v_g1 = _mm_add_epi32(v_g1, v_y_p); + v_b1 = _mm_add_epi32(v_b1, v_y_p); + + v_r = _mm_packs_epi32(v_r0, v_r1); + v_g = _mm_packs_epi32(v_g0, v_g1); + v_b = _mm_packs_epi32(v_b0, v_b1); + } + + void operator()(const uchar* src, uchar* dst, int n) const + { + int dcn = dstcn, bidx = blueIdx, i = 0; + const uchar delta = ColorChannel::half(), alpha = ColorChannel::max(); + int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3]; + n *= 3; + + if (dcn == 3) + { + for ( ; i <= n - 96; i += 96, dst += dcn * 32) + { + __m128i v_y0 = _mm_loadu_si128((__m128i const *)(src + i)); + __m128i v_y1 = _mm_loadu_si128((__m128i const *)(src + i + 16)); + __m128i v_cr0 = _mm_loadu_si128((__m128i const *)(src + i + 32)); + __m128i v_cr1 = _mm_loadu_si128((__m128i const *)(src + i + 48)); + __m128i v_cb0 = _mm_loadu_si128((__m128i const *)(src + i + 64)); + __m128i v_cb1 = _mm_loadu_si128((__m128i const *)(src + i + 80)); + + _MM_DEINTERLIV_EPI8(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1) + + __m128i v_r_0 = v_zero, v_g_0 = v_zero, v_b_0 = v_zero; + process(_mm_unpacklo_epi8(v_y0, v_zero), + _mm_unpacklo_epi8(v_cr0, v_zero), + _mm_unpacklo_epi8(v_cb0, v_zero), + v_r_0, v_g_0, v_b_0); + + __m128i v_r_1 = v_zero, v_g_1 = v_zero, v_b_1 = v_zero; + process(_mm_unpackhi_epi8(v_y0, v_zero), + _mm_unpackhi_epi8(v_cr0, v_zero), + _mm_unpackhi_epi8(v_cb0, v_zero), + v_r_1, v_g_1, v_b_1); + + __m128i v_r0 = _mm_packus_epi16(v_r_0, v_r_1); + __m128i v_g0 = _mm_packus_epi16(v_g_0, v_g_1); + __m128i v_b0 = _mm_packus_epi16(v_b_0, v_b_1); + + process(_mm_unpacklo_epi8(v_y1, v_zero), + _mm_unpacklo_epi8(v_cr1, v_zero), + _mm_unpacklo_epi8(v_cb1, v_zero), + v_r_0, v_g_0, v_b_0); + + process(_mm_unpackhi_epi8(v_y1, v_zero), + _mm_unpackhi_epi8(v_cr1, v_zero), + _mm_unpackhi_epi8(v_cb1, v_zero), + v_r_1, v_g_1, v_b_1); + + __m128i v_r1 = _mm_packus_epi16(v_r_0, v_r_1); + __m128i v_g1 = _mm_packus_epi16(v_g_0, v_g_1); + __m128i v_b1 = _mm_packus_epi16(v_b_0, v_b_1); + + if (bidx == 0) + { + std::swap(v_r0, v_b0); + std::swap(v_r1, v_b1); + } + + _MM_INTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + _mm_storeu_si128((__m128i *)(dst), v_r0); + _mm_storeu_si128((__m128i *)(dst + 16), v_r1); + _mm_storeu_si128((__m128i *)(dst + 32), v_g0); + _mm_storeu_si128((__m128i *)(dst + 48), v_g1); + _mm_storeu_si128((__m128i *)(dst + 64), v_b0); + _mm_storeu_si128((__m128i *)(dst + 80), v_b1); + } + } + + + for ( ; i < n; i += 3, dst += dcn) + { + uchar Y = src[i]; + uchar Cr = src[i+1]; + uchar Cb = src[i+2]; + + int b = Y + CV_DESCALE((Cb - delta)*C3, yuv_shift); + int g = Y + CV_DESCALE((Cb - delta)*C2 + (Cr - delta)*C1, yuv_shift); + int r = Y + CV_DESCALE((Cr - delta)*C0, yuv_shift); + + dst[bidx] = saturate_cast(b); + dst[1] = saturate_cast(g); + dst[bidx^2] = saturate_cast(r); + if( dcn == 4 ) + dst[3] = alpha; + } + } + int dstcn, blueIdx; + int coeffs[4]; + + __m128i v_c0, v_c1, v_c2, v_c3, v_delta2; + __m128i v_delta, v_alpha, v_zero; +}; + #endif ////////////////////////////////////// RGB <-> XYZ /////////////////////////////////////// From 9cacd3261d0caaa39844480f7404b99bba56747f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 20/98] cvtColor BGR5x5 2 Gray --- modules/imgproc/perf/perf_cvt_color.cpp | 2 +- modules/imgproc/src/color.cpp | 77 +++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/modules/imgproc/perf/perf_cvt_color.cpp b/modules/imgproc/perf/perf_cvt_color.cpp index 4bcb698ae..9682cfc5e 100644 --- a/modules/imgproc/perf/perf_cvt_color.cpp +++ b/modules/imgproc/perf/perf_cvt_color.cpp @@ -204,7 +204,7 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u, ) { Size sz = GetParam(); - int mode = COLOR_RGB2YCrCb; + int mode = COLOR_YCrCb2RGB; ChPair ch = getConversionInfo(mode); mode %= COLOR_COLORCVT_MAX; diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index f80b80fe9..55240e228 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -1125,6 +1125,13 @@ struct RGB5x52Gray v_delta = vdupq_n_u32(1 << (yuv_shift - 1)); v_f8 = vdupq_n_u16(0xf8); v_fc = vdupq_n_u16(0xfc); + #elif CV_SSE2 + v_b2y = _mm_set1_epi16(B2Y); + v_g2y = _mm_set1_epi16(G2Y); + v_r2y = _mm_set1_epi16(R2Y); + v_delta = _mm_set1_epi32(1 << (yuv_shift - 1)); + v_f8 = _mm_set1_epi16(0xf8); + v_fc = _mm_set1_epi16(0xfc); #endif } @@ -1150,6 +1157,39 @@ struct RGB5x52Gray vst1_u8(dst + i, vmovn_u16(vcombine_u16(vmovn_u32(v_dst0), vmovn_u32(v_dst1)))); } + #elif CV_SSE2 + __m128i v_zero = _mm_setzero_si128(); + + for ( ; i <= n - 8; i += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)((ushort *)src + i)); + __m128i v_t0 = _mm_and_si128(_mm_slli_epi16(v_src, 3), v_f8), + v_t1 = _mm_and_si128(_mm_srli_epi16(v_src, 3), v_fc), + v_t2 = _mm_and_si128(_mm_srli_epi16(v_src, 8), v_f8); + + __m128i v_mullo_b = _mm_mullo_epi16(v_t0, v_b2y); + __m128i v_mullo_g = _mm_mullo_epi16(v_t1, v_g2y); + __m128i v_mullo_r = _mm_mullo_epi16(v_t2, v_r2y); + __m128i v_mulhi_b = _mm_mulhi_epi16(v_t0, v_b2y); + __m128i v_mulhi_g = _mm_mulhi_epi16(v_t1, v_g2y); + __m128i v_mulhi_r = _mm_mulhi_epi16(v_t2, v_r2y); + + __m128i v_dst0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_b, v_mulhi_b), + _mm_unpacklo_epi16(v_mullo_g, v_mulhi_g)); + v_dst0 = _mm_add_epi32(_mm_add_epi32(v_dst0, v_delta), + _mm_unpacklo_epi16(v_mullo_r, v_mulhi_r)); + + __m128i v_dst1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_b, v_mulhi_b), + _mm_unpackhi_epi16(v_mullo_g, v_mulhi_g)); + v_dst1 = _mm_add_epi32(_mm_add_epi32(v_dst1, v_delta), + _mm_unpackhi_epi16(v_mullo_r, v_mulhi_r)); + + v_dst0 = _mm_srli_epi32(v_dst0, yuv_shift); + v_dst1 = _mm_srli_epi32(v_dst1, yuv_shift); + + __m128i v_dst = _mm_packs_epi32(v_dst0, v_dst1); + _mm_storel_epi64((__m128i *)(dst + i), _mm_packus_epi16(v_dst, v_zero)); + } #endif for ( ; i < n; i++) { @@ -1178,6 +1218,39 @@ struct RGB5x52Gray vst1_u8(dst + i, vmovn_u16(vcombine_u16(vmovn_u32(v_dst0), vmovn_u32(v_dst1)))); } + #elif CV_SSE2 + __m128i v_zero = _mm_setzero_si128(); + + for ( ; i <= n - 8; i += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)((ushort *)src + i)); + __m128i v_t0 = _mm_and_si128(_mm_slli_epi16(v_src, 3), v_f8), + v_t1 = _mm_and_si128(_mm_srli_epi16(v_src, 2), v_f8), + v_t2 = _mm_and_si128(_mm_srli_epi16(v_src, 7), v_f8); + + __m128i v_mullo_b = _mm_mullo_epi16(v_t0, v_b2y); + __m128i v_mullo_g = _mm_mullo_epi16(v_t1, v_g2y); + __m128i v_mullo_r = _mm_mullo_epi16(v_t2, v_r2y); + __m128i v_mulhi_b = _mm_mulhi_epi16(v_t0, v_b2y); + __m128i v_mulhi_g = _mm_mulhi_epi16(v_t1, v_g2y); + __m128i v_mulhi_r = _mm_mulhi_epi16(v_t2, v_r2y); + + __m128i v_dst0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_b, v_mulhi_b), + _mm_unpacklo_epi16(v_mullo_g, v_mulhi_g)); + v_dst0 = _mm_add_epi32(_mm_add_epi32(v_dst0, v_delta), + _mm_unpacklo_epi16(v_mullo_r, v_mulhi_r)); + + __m128i v_dst1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_b, v_mulhi_b), + _mm_unpackhi_epi16(v_mullo_g, v_mulhi_g)); + v_dst1 = _mm_add_epi32(_mm_add_epi32(v_dst1, v_delta), + _mm_unpackhi_epi16(v_mullo_r, v_mulhi_r)); + + v_dst0 = _mm_srli_epi32(v_dst0, yuv_shift); + v_dst1 = _mm_srli_epi32(v_dst1, yuv_shift); + + __m128i v_dst = _mm_packs_epi32(v_dst0, v_dst1); + _mm_storel_epi64((__m128i *)(dst + i), _mm_packus_epi16(v_dst, v_zero)); + } #endif for ( ; i < n; i++) { @@ -1194,6 +1267,10 @@ struct RGB5x52Gray uint16x4_t v_b2y, v_g2y, v_r2y; uint32x4_t v_delta; uint16x8_t v_f8, v_fc; + #elif CV_SSE2 + __m128i v_b2y, v_g2y, v_r2y; + __m128i v_delta; + __m128i v_f8, v_fc; #endif }; From fe371bf6244b928e7b2c49ec1d799f32e7e26b0c Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 21/98] cvtColor Gray 2 BGR5x5 --- modules/imgproc/src/color.cpp | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 55240e228..b5ac47328 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -1048,6 +1048,10 @@ struct Gray2RGB5x5 #if CV_NEON v_n7 = vdup_n_u8(~7); v_n3 = vdup_n_u8(~3); + #elif CV_SSE2 + v_n7 = _mm_set1_epi16(~7); + v_n3 = _mm_set1_epi16(~3); + v_zero = _mm_setzero_si128(); #endif } @@ -1065,6 +1069,23 @@ struct Gray2RGB5x5 v_dst = vorrq_u16(v_dst, vshlq_n_u16(vmovl_u8(vand_u8(v_src, v_n7)), 8)); vst1q_u16((ushort *)dst + i, v_dst); } + #elif CV_SSE2 + for ( ; i <= n - 16; i += 16 ) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + i)); + + __m128i v_src_p = _mm_unpacklo_epi8(v_src, v_zero); + __m128i v_dst = _mm_or_si128(_mm_srli_epi16(v_src_p, 3), + _mm_or_si128(_mm_slli_epi16(_mm_and_si128(v_src_p, v_n3), 3), + _mm_slli_epi16(_mm_and_si128(v_src_p, v_n7), 8))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i), v_dst); + + v_src_p = _mm_unpackhi_epi8(v_src, v_zero); + v_dst = _mm_or_si128(_mm_srli_epi16(v_src_p, 3), + _mm_or_si128(_mm_slli_epi16(_mm_and_si128(v_src_p, v_n3), 3), + _mm_slli_epi16(_mm_and_si128(v_src_p, v_n7), 8))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i + 8), v_dst); + } #endif for ( ; i < n; i++ ) { @@ -1081,6 +1102,23 @@ struct Gray2RGB5x5 uint16x8_t v_dst = vorrq_u16(vorrq_u16(v_src, vshlq_n_u16(v_src, 5)), vshlq_n_u16(v_src, 10)); vst1q_u16((ushort *)dst + i, v_dst); } + #elif CV_SSE2 + for ( ; i <= n - 16; i += 8 ) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + i)); + + __m128i v_src_p = _mm_srli_epi16(_mm_unpacklo_epi8(v_src, v_zero), 3); + __m128i v_dst = _mm_or_si128(v_src_p, + _mm_or_si128(_mm_slli_epi32(v_src_p, 5), + _mm_slli_epi16(v_src_p, 10))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i), v_dst); + + v_src_p = _mm_srli_epi16(_mm_unpackhi_epi8(v_src, v_zero), 3); + v_dst = _mm_or_si128(v_src_p, + _mm_or_si128(_mm_slli_epi16(v_src_p, 5), + _mm_slli_epi16(v_src_p, 10))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i + 8), v_dst); + } #endif for( ; i < n; i++ ) { @@ -1093,6 +1131,8 @@ struct Gray2RGB5x5 #if CV_NEON uint8x8_t v_n7, v_n3; + #elif CV_SSE2 + __m128i v_n7, v_n3, v_zero; #endif }; From 940f1e79143f8ad7f34b267bb013f73d6c79700d Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 22/98] interliving / deinterliving --- modules/imgproc/src/color.cpp | 90 +++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index b5ac47328..9bbb7b6ad 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -183,6 +183,96 @@ static IppStatus sts = ippInit(); v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); \ } +#define _MM_DEINTERLIV_EPI16(layer0_chunk0, layer0_chunk1, layer0_chunk2, \ + layer0_chunk3, layer0_chunk4, layer0_chunk5) \ + { \ + __m128i layer1_chunk0 = _mm_unpacklo_epi16(layer0_chunk0, layer0_chunk3); \ + __m128i layer1_chunk1 = _mm_unpackhi_epi16(layer0_chunk0, layer0_chunk3); \ + __m128i layer1_chunk2 = _mm_unpacklo_epi16(layer0_chunk1, layer0_chunk4); \ + __m128i layer1_chunk3 = _mm_unpackhi_epi16(layer0_chunk1, layer0_chunk4); \ + __m128i layer1_chunk4 = _mm_unpacklo_epi16(layer0_chunk2, layer0_chunk5); \ + __m128i layer1_chunk5 = _mm_unpackhi_epi16(layer0_chunk2, layer0_chunk5); \ + \ + __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk3); \ + __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk3); \ + __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk4); \ + __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk4); \ + __m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk5); \ + __m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk5); \ + \ + __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk3); \ + __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk3); \ + __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk4); \ + __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk4); \ + __m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk5); \ + __m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk5); \ + \ + layer0_chunk0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk3); \ + layer0_chunk1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk3); \ + layer0_chunk2 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk4); \ + layer0_chunk3 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk4); \ + layer0_chunk4 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk5); \ + layer0_chunk5 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk5); \ + } + +#define _MM_INTERLIV_EPI16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) \ + { \ + __m128i v_mask = _mm_set1_epi32(0x0000ffff); \ + \ + __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); \ + __m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16)); \ + __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); \ + __m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); \ + __m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); \ + __m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16)); \ + \ + __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); \ + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); \ + __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); \ + __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); \ + __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); \ + __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); \ + \ + __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); \ + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); \ + __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); \ + __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); \ + __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); \ + __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); \ + \ + v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); \ + v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); \ + v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); \ + v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); \ + v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); \ + v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16)); \ + } + +#define _MM_DEINTERLIV_PS(layer0_chunk0, layer0_chunk1, layer0_chunk2, \ + layer0_chunk3, layer0_chunk4, layer0_chunk5) \ + { \ + __m128 layer1_chunk0 = _mm_unpacklo_ps(layer0_chunk0, layer0_chunk3); \ + __m128 layer1_chunk1 = _mm_unpackhi_ps(layer0_chunk0, layer0_chunk3); \ + __m128 layer1_chunk2 = _mm_unpacklo_ps(layer0_chunk1, layer0_chunk4); \ + __m128 layer1_chunk3 = _mm_unpackhi_ps(layer0_chunk1, layer0_chunk4); \ + __m128 layer1_chunk4 = _mm_unpacklo_ps(layer0_chunk2, layer0_chunk5); \ + __m128 layer1_chunk5 = _mm_unpackhi_ps(layer0_chunk2, layer0_chunk5); \ + \ + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk3); \ + __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk3); \ + __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk4); \ + __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk4); \ + __m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk5); \ + __m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk5); \ + \ + layer0_chunk0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk3); \ + layer0_chunk1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk3); \ + layer0_chunk2 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk4); \ + layer0_chunk3 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk4); \ + layer0_chunk4 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk5); \ + layer0_chunk5 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk5); \ + } + #endif namespace cv From b99396ab12b902f6a9adc94935a9bd7f28a16ec7 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 23/98] cvtColor RGB 2 YCrCb --- modules/imgproc/src/color.cpp | 150 +++++++++++++++++++++++++++++++++- 1 file changed, 149 insertions(+), 1 deletion(-) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 9bbb7b6ad..2bf86d058 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -273,6 +273,32 @@ static IppStatus sts = ippInit(); layer0_chunk5 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk5); \ } +#define _MM_INTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) \ + { \ + const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); \ \ + \ + __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); \ + __m128 layer2_chunk3 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); \ + __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo); \ + __m128 layer2_chunk4 = _mm_shuffle_ps(v_g0, v_g1, mask_hi); \ + __m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo); \ + __m128 layer2_chunk5 = _mm_shuffle_ps(v_b0, v_b1, mask_hi); \ + \ + __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo); \ + __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi); \ + __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo); \ + __m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi); \ + __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo); \ + __m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi); \ + \ + v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo); \ + v_g1 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi); \ + v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo); \ + v_b0 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi); \ + v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo); \ + v_b1 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi); \ + } + #endif namespace cv @@ -2059,7 +2085,7 @@ struct RGB2YCrCb_i int delta = ColorChannel::half()*(1 << yuv_shift); n *= 3; - if (scn == 3 && false) + if (scn == 3) { for ( ; i <= n - 96; i += 96, src += scn * 32) { @@ -2130,6 +2156,128 @@ struct RGB2YCrCb_i __m128i v_zero; }; +#if CV_SSE4_1 + +template <> +struct RGB2YCrCb_i +{ + typedef ushort channel_type; + + RGB2YCrCb_i(int _srccn, int _blueIdx, const int* _coeffs) + : srccn(_srccn), blueIdx(_blueIdx) + { + static const int coeffs0[] = {R2Y, G2Y, B2Y, 11682, 9241}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0])); + if (blueIdx==0) + std::swap(coeffs[0], coeffs[2]); + + v_c0 = _mm_set1_epi32(coeffs[0]); + v_c1 = _mm_set1_epi32(coeffs[1]); + v_c2 = _mm_set1_epi32(coeffs[2]); + v_c3 = _mm_set1_epi32(coeffs[3]); + v_c4 = _mm_set1_epi32(coeffs[4]); + v_delta2 = _mm_set1_epi32(1 << (yuv_shift - 1)); + v_delta = _mm_set1_epi32(ColorChannel::half()*(1 << yuv_shift)); + v_delta = _mm_add_epi32(v_delta, v_delta2); + v_zero = _mm_setzero_si128(); + } + + // 16u x 8 + void process(__m128i v_r, __m128i v_g, __m128i v_b, + __m128i & v_y, __m128i & v_cr, __m128i & v_cb) const + { + __m128i v_r_p = _mm_unpacklo_epi16(v_r, v_zero); + __m128i v_g_p = _mm_unpacklo_epi16(v_g, v_zero); + __m128i v_b_p = _mm_unpacklo_epi16(v_b, v_zero); + + __m128i v_y0 = _mm_add_epi32(_mm_mullo_epi32(v_r_p, v_c0), + _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), + _mm_mullo_epi32(v_b_p, v_c2))); + v_y0 = _mm_srli_epi32(_mm_add_epi32(v_delta2, v_y0), yuv_shift); + + __m128i v_cr0 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 2 ? v_r_p : v_b_p, v_y0), v_c3); + __m128i v_cb0 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 0 ? v_r_p : v_b_p, v_y0), v_c4); + v_cr0 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cr0), yuv_shift); + v_cb0 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cb0), yuv_shift); + + v_r_p = _mm_unpackhi_epi16(v_r, v_zero); + v_g_p = _mm_unpackhi_epi16(v_g, v_zero); + v_b_p = _mm_unpackhi_epi16(v_b, v_zero); + + __m128i v_y1 = _mm_add_epi32(_mm_mullo_epi32(v_r_p, v_c0), + _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), + _mm_mullo_epi32(v_b_p, v_c2))); + v_y1 = _mm_srli_epi32(_mm_add_epi32(v_delta2, v_y1), yuv_shift); + + __m128i v_cr1 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 2 ? v_r_p : v_b_p, v_y1), v_c3); + __m128i v_cb1 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 0 ? v_r_p : v_b_p, v_y1), v_c4); + v_cr1 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cr1), yuv_shift); + v_cb1 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cb1), yuv_shift); + + v_y = _mm_packus_epi32(v_y0, v_y1); + v_cr = _mm_packus_epi32(v_cr0, v_cr1); + v_cb = _mm_packus_epi32(v_cb0, v_cb1); + } + + void operator()(const ushort * src, ushort * dst, int n) const + { + int scn = srccn, bidx = blueIdx, i = 0; + int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4]; + int delta = ColorChannel::half()*(1 << yuv_shift); + n *= 3; + + + if (scn == 3) + { + for ( ; i <= n - 48; i += 48, src += scn * 16) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + 8)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + 16)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + 24)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 32)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 40)); + + _MM_DEINTERLIV_EPI16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + __m128i v_y0 = v_zero, v_cr0 = v_zero, v_cb0 = v_zero; + process(v_r0, v_g0, v_b0, + v_y0, v_cr0, v_cb0); + + __m128i v_y1 = v_zero, v_cr1 = v_zero, v_cb1 = v_zero; + process(v_r1, v_g1, v_b1, + v_y1, v_cr1, v_cb1); + + _MM_INTERLIV_EPI16(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1) + + _mm_storeu_si128((__m128i *)(dst + i), v_y0); + _mm_storeu_si128((__m128i *)(dst + i + 8), v_y1); + _mm_storeu_si128((__m128i *)(dst + i + 16), v_cr0); + _mm_storeu_si128((__m128i *)(dst + i + 24), v_cr1); + _mm_storeu_si128((__m128i *)(dst + i + 32), v_cb0); + _mm_storeu_si128((__m128i *)(dst + i + 40), v_cb1); + } + } + + for ( ; i < n; i += 3, src += scn) + { + int Y = CV_DESCALE(src[0]*C0 + src[1]*C1 + src[2]*C2, yuv_shift); + int Cr = CV_DESCALE((src[bidx^2] - Y)*C3 + delta, yuv_shift); + int Cb = CV_DESCALE((src[bidx] - Y)*C4 + delta, yuv_shift); + dst[i] = saturate_cast(Y); + dst[i+1] = saturate_cast(Cr); + dst[i+2] = saturate_cast(Cb); + } + } + + int srccn, blueIdx, coeffs[5]; + __m128i v_c0, v_c1, v_c2; + __m128i v_c3, v_c4, v_delta, v_delta2; + __m128i v_zero; +}; + +#endif // CV_SSE4_1 + #endif From 97fad1cb539d70db7b84fedd926ef0c4e4588b4f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 24/98] cvtColor RGB 2 YCrCb 32f --- modules/imgproc/src/color.cpp | 88 ++++++++++++++++++++++++++++++++++- 1 file changed, 87 insertions(+), 1 deletion(-) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 2bf86d058..d9597bfd7 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -275,7 +275,7 @@ static IppStatus sts = ippInit(); #define _MM_INTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) \ { \ - const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); \ \ + const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); \ \ __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); \ __m128 layer2_chunk3 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); \ @@ -1765,6 +1765,92 @@ struct RGB2YCrCb_f float32x4_t v_c0, v_c1, v_c2, v_c3, v_c4, v_delta; }; +#elif CV_SSE2 + +template <> +struct RGB2YCrCb_f +{ + typedef float channel_type; + + RGB2YCrCb_f(int _srccn, int _blueIdx, const float* _coeffs) : + srccn(_srccn), blueIdx(_blueIdx) + { + static const float coeffs0[] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0])); + if (blueIdx==0) + std::swap(coeffs[0], coeffs[2]); + + v_c0 = _mm_set1_ps(coeffs[0]); + v_c1 = _mm_set1_ps(coeffs[1]); + v_c2 = _mm_set1_ps(coeffs[2]); + v_c3 = _mm_set1_ps(coeffs[3]); + v_c4 = _mm_set1_ps(coeffs[4]); + v_delta = _mm_set1_ps(ColorChannel::half()); + } + + void process(__m128 v_r, __m128 v_g, __m128 v_b, + __m128 & v_y, __m128 & v_cr, __m128 & v_cb) const + { + v_y = _mm_mul_ps(v_r, v_c0); + v_y = _mm_add_ps(v_y, _mm_mul_ps(v_g, v_c1)); + v_y = _mm_add_ps(v_y, _mm_mul_ps(v_b, v_c2)); + + v_cr = _mm_add_ps(_mm_mul_ps(_mm_sub_ps(blueIdx == 0 ? v_b : v_r, v_y), v_c3), v_delta); + v_cb = _mm_add_ps(_mm_mul_ps(_mm_sub_ps(blueIdx == 2 ? v_b : v_r, v_y), v_c4), v_delta); + } + + void operator()(const float * src, float * dst, int n) const + { + int scn = srccn, bidx = blueIdx, i = 0; + const float delta = ColorChannel::half(); + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4]; + n *= 3; + + if (scn == 3) + { + for ( ; i <= n - 24; i += 24, src += 24) + { + __m128 v_r0 = _mm_loadu_ps(src); + __m128 v_r1 = _mm_loadu_ps(src + 4); + __m128 v_g0 = _mm_loadu_ps(src + 8); + __m128 v_g1 = _mm_loadu_ps(src + 12); + __m128 v_b0 = _mm_loadu_ps(src + 16); + __m128 v_b1 = _mm_loadu_ps(src + 20); + + _MM_DEINTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + __m128 v_y0, v_cr0, v_cb0; + process(v_r0, v_g0, v_b0, + v_y0, v_cr0, v_cb0); + + __m128 v_y1, v_cr1, v_cb1; + process(v_r1, v_g1, v_b1, + v_y1, v_cr1, v_cb1); + + _MM_INTERLIV_PS(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1) + + _mm_storeu_ps(dst + i, v_y0); + _mm_storeu_ps(dst + i + 4, v_y1); + _mm_storeu_ps(dst + i + 8, v_cr0); + _mm_storeu_ps(dst + i + 12, v_cr1); + _mm_storeu_ps(dst + i + 16, v_cb0); + _mm_storeu_ps(dst + i + 20, v_cb1); + } + } + + for ( ; i < n; i += 3, src += scn) + { + float Y = src[0]*C0 + src[1]*C1 + src[2]*C2; + float Cr = (src[bidx^2] - Y)*C3 + delta; + float Cb = (src[bidx] - Y)*C4 + delta; + dst[i] = Y; dst[i+1] = Cr; dst[i+2] = Cb; + } + } + int srccn, blueIdx; + float coeffs[5]; + __m128 v_c0, v_c1, v_c2, v_c3, v_c4, v_delta; +}; + #endif template struct RGB2YCrCb_i From 1c9e886a6a635af7fd7c4b4766aae39729ca1cc7 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 25/98] cvtColor YCrCb 2 RGB 32f --- modules/imgproc/src/color.cpp | 97 +++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index d9597bfd7..51329c616 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -2475,6 +2475,103 @@ struct YCrCb2RGB_f float32x4_t v_c0, v_c1, v_c2, v_c3, v_alpha, v_delta; }; +#elif CV_SSE2 + +template <> +struct YCrCb2RGB_f +{ + typedef float channel_type; + + YCrCb2RGB_f(int _dstcn, int _blueIdx, const float* _coeffs) + : dstcn(_dstcn), blueIdx(_blueIdx) + { + static const float coeffs0[] = {1.403f, -0.714f, -0.344f, 1.773f}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0])); + + v_c0 = _mm_set1_ps(coeffs[0]); + v_c1 = _mm_set1_ps(coeffs[1]); + v_c2 = _mm_set1_ps(coeffs[2]); + v_c3 = _mm_set1_ps(coeffs[3]); + v_delta = _mm_set1_ps(ColorChannel::half()); + v_alpha = _mm_set1_ps(ColorChannel::max()); + } + + void process(__m128 v_y, __m128 v_cr, __m128 v_cb, + __m128 & v_r, __m128 & v_g, __m128 & v_b) const + { + v_cb = _mm_sub_ps(v_cb, v_delta); + v_cr = _mm_sub_ps(v_cr, v_delta); + + v_b = _mm_mul_ps(v_cb, v_c3); + v_g = _mm_add_ps(_mm_mul_ps(v_cb, v_c2), _mm_mul_ps(v_cr, v_c1)); + v_r = _mm_mul_ps(v_cr, v_c0); + + v_b = _mm_add_ps(v_b, v_y); + v_g = _mm_add_ps(v_g, v_y); + v_r = _mm_add_ps(v_r, v_y); + + if (blueIdx == 0) + std::swap(v_b, v_r); + } + + void operator()(const float* src, float* dst, int n) const + { + int dcn = dstcn, bidx = blueIdx, i = 0; + const float delta = ColorChannel::half(), alpha = ColorChannel::max(); + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3]; + n *= 3; + + if (dcn == 3) + { + for ( ; i <= n - 24; i += 24, dst += 24) + { + __m128 v_y0 = _mm_loadu_ps(src + i); + __m128 v_y1 = _mm_loadu_ps(src + i + 4); + __m128 v_cr0 = _mm_loadu_ps(src + i + 8); + __m128 v_cr1 = _mm_loadu_ps(src + i + 12); + __m128 v_cb0 = _mm_loadu_ps(src + i + 16); + __m128 v_cb1 = _mm_loadu_ps(src + i + 20); + + _MM_DEINTERLIV_PS(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1) + + __m128 v_r0, v_g0, v_b0; + process(v_y0, v_cr0, v_cb0, + v_r0, v_g0, v_b0); + + __m128 v_r1, v_g1, v_b1; + process(v_y1, v_cr1, v_cb1, + v_r1, v_g1, v_b1); + + _MM_INTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + _mm_storeu_ps(dst, v_r0); + _mm_storeu_ps(dst + 4, v_r1); + _mm_storeu_ps(dst + 8, v_g0); + _mm_storeu_ps(dst + 12, v_g1); + _mm_storeu_ps(dst + 16, v_b0); + _mm_storeu_ps(dst + 20, v_b1); + } + } + + for ( ; i < n; i += 3, dst += dcn) + { + float Y = src[i], Cr = src[i+1], Cb = src[i+2]; + + float b = Y + (Cb - delta)*C3; + float g = Y + (Cb - delta)*C2 + (Cr - delta)*C1; + float r = Y + (Cr - delta)*C0; + + dst[bidx] = b; dst[1] = g; dst[bidx^2] = r; + if( dcn == 4 ) + dst[3] = alpha; + } + } + int dstcn, blueIdx; + float coeffs[4]; + + __m128 v_c0, v_c1, v_c2, v_c3, v_alpha, v_delta; +}; + #endif template struct YCrCb2RGB_i From e1773749aee89f73e1e4eeda5d016689434c46b0 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 26/98] cvtColor RGB 2 gray 16s --- modules/imgproc/src/color.cpp | 151 +++++++++++++++++++++++++++++++++- 1 file changed, 150 insertions(+), 1 deletion(-) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 51329c616..bf9ef807c 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -1643,6 +1643,155 @@ struct RGB2Gray float32x4_t v_cb, v_cg, v_cr; }; +#elif CV_SSE2 + +template <> +struct RGB2Gray +{ + typedef ushort channel_type; + + RGB2Gray(int _srccn, int blueIdx, const int* _coeffs) : + srccn(_srccn) + { + static const int coeffs0[] = { R2Y, G2Y, B2Y }; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 3*sizeof(coeffs[0])); + if( blueIdx == 0 ) + std::swap(coeffs[0], coeffs[2]); + + v_cb = _mm_set1_epi16(coeffs[0]); + v_cg = _mm_set1_epi16(coeffs[1]); + v_cr = _mm_set1_epi16(coeffs[2]); + v_delta = _mm_set1_epi32(1 << (yuv_shift - 1)); + } + + // 16s x 8 + void process(__m128i v_b, __m128i v_g, __m128i v_r, + __m128i & v_gray) const + { + __m128i v_mullo_r = _mm_mullo_epi16(v_r, v_cr); + __m128i v_mullo_g = _mm_mullo_epi16(v_g, v_cg); + __m128i v_mullo_b = _mm_mullo_epi16(v_b, v_cb); + __m128i v_mulhi_r = _mm_mulhi_epu16(v_r, v_cr); + __m128i v_mulhi_g = _mm_mulhi_epu16(v_g, v_cg); + __m128i v_mulhi_b = _mm_mulhi_epu16(v_b, v_cb); + + __m128i v_gray0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_r, v_mulhi_r), + _mm_unpacklo_epi16(v_mullo_g, v_mulhi_g)); + v_gray0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_b, v_mulhi_b), v_gray0); + v_gray0 = _mm_srli_epi32(_mm_add_epi32(v_gray0, v_delta), yuv_shift); + + __m128i v_gray1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_r, v_mulhi_r), + _mm_unpackhi_epi16(v_mullo_g, v_mulhi_g)); + v_gray1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_b, v_mulhi_b), v_gray1); + v_gray1 = _mm_srli_epi32(_mm_add_epi32(v_gray1, v_delta), yuv_shift); + + v_gray = _mm_packus_epi32(v_gray0, v_gray1); + } + + void operator()(const ushort* src, ushort* dst, int n) const + { + int scn = srccn, cb = coeffs[0], cg = coeffs[1], cr = coeffs[2], i = 0; + + if (scn == 3) + { + for ( ; i <= n - 16; i += 16, src += scn * 16) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + 8)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + 16)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + 24)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 32)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 40)); + + _MM_DEINTERLIV_EPI16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + __m128i v_gray0; + process(v_r0, v_g0, v_b0, + v_gray0); + + __m128i v_gray1; + process(v_r1, v_g1, v_b1, + v_gray1); + + _mm_storeu_si128((__m128i *)(dst + i), v_gray0); + _mm_storeu_si128((__m128i *)(dst + i + 8), v_gray1); + } + } + + for( ; i < n; i++, src += scn) + dst[i] = (ushort)CV_DESCALE((unsigned)(src[0]*cb + src[1]*cg + src[2]*cr), yuv_shift); + } + + int srccn, coeffs[3]; + __m128i v_cb, v_cg, v_cr; + __m128i v_delta; +}; + +template <> +struct RGB2Gray +{ + typedef float channel_type; + + RGB2Gray(int _srccn, int blueIdx, const float* _coeffs) : srccn(_srccn) + { + static const float coeffs0[] = { 0.299f, 0.587f, 0.114f }; + memcpy( coeffs, _coeffs ? _coeffs : coeffs0, 3*sizeof(coeffs[0]) ); + if(blueIdx == 0) + std::swap(coeffs[0], coeffs[2]); + + v_cb = _mm_set1_ps(coeffs[0]); + v_cg = _mm_set1_ps(coeffs[1]); + v_cr = _mm_set1_ps(coeffs[2]); + } + + void process(__m128 v_r, __m128 v_g, __m128 v_b, + __m128 & v_gray) const + { + v_gray = _mm_mul_ps(v_r, v_cb); + v_gray = _mm_add_ps(v_gray, _mm_mul_ps(v_g, v_cg)); + v_gray = _mm_add_ps(v_gray, _mm_mul_ps(v_b, v_cb)); + } + + void operator()(const float * src, float * dst, int n) const + { + int scn = srccn, i = 0; + float cb = coeffs[0], cg = coeffs[1], cr = coeffs[2]; + + if (scn == 3) + { + for ( ; i <= n - 8; i += 8, src += scn * 8) + { + __m128 v_r0 = _mm_loadu_ps(src); + __m128 v_r1 = _mm_loadu_ps(src + 4); + __m128 v_g0 = _mm_loadu_ps(src + 8); + __m128 v_g1 = _mm_loadu_ps(src + 12); + __m128 v_b0 = _mm_loadu_ps(src + 16); + __m128 v_b1 = _mm_loadu_ps(src + 20); + + _MM_DEINTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + __m128 v_gray0; + process(v_r0, v_g0, v_b0, + v_gray0); + + __m128 v_gray1; + process(v_r1, v_g1, v_b1, + v_gray1); + + _mm_storeu_ps(dst + i, v_gray0); + _mm_storeu_ps(dst + i + 4, v_gray1); + } + } + + for ( ; i < n; i++, src += scn) + dst[i] = src[0]*cb + src[1]*cg + src[2]*cr; + } + + int srccn; + float coeffs[3]; + __m128 v_cb, v_cg, v_cr; +}; + #else template<> struct RGB2Gray @@ -3019,7 +3168,7 @@ struct YCrCb2RGB_i __m128i v_delta, v_alpha, v_zero; }; -#endif +#endif // CV_SSE2 ////////////////////////////////////// RGB <-> XYZ /////////////////////////////////////// From 93f880084497b028f5587759c4863892173e0493 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 27/98] cvtColor RGB 2 XYZ f32 --- modules/imgproc/src/color.cpp | 100 ++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index bf9ef807c..8dd5e2cfe 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -3291,6 +3291,106 @@ struct RGB2XYZ_f float32x4_t v_c0, v_c1, v_c2, v_c3, v_c4, v_c5, v_c6, v_c7, v_c8; }; +#elif CV_SSE2 + +template <> +struct RGB2XYZ_f +{ + typedef float channel_type; + + RGB2XYZ_f(int _srccn, int blueIdx, const float* _coeffs) : srccn(_srccn) + { + memcpy(coeffs, _coeffs ? _coeffs : sRGB2XYZ_D65, 9*sizeof(coeffs[0])); + if(blueIdx == 0) + { + std::swap(coeffs[0], coeffs[2]); + std::swap(coeffs[3], coeffs[5]); + std::swap(coeffs[6], coeffs[8]); + } + + v_c0 = _mm_set1_ps(coeffs[0]); + v_c1 = _mm_set1_ps(coeffs[1]); + v_c2 = _mm_set1_ps(coeffs[2]); + v_c3 = _mm_set1_ps(coeffs[3]); + v_c4 = _mm_set1_ps(coeffs[4]); + v_c5 = _mm_set1_ps(coeffs[5]); + v_c6 = _mm_set1_ps(coeffs[6]); + v_c7 = _mm_set1_ps(coeffs[7]); + v_c8 = _mm_set1_ps(coeffs[8]); + } + + void process(__m128 v_r, __m128 v_g, __m128 v_b, + __m128 & v_x, __m128 & v_y, __m128 & v_z) const + { + v_x = _mm_mul_ps(v_r, v_c0); + v_x = _mm_add_ps(v_x, _mm_mul_ps(v_g, v_c1)); + v_x = _mm_add_ps(v_x, _mm_mul_ps(v_b, v_c2)); + + v_y = _mm_mul_ps(v_r, v_c3); + v_y = _mm_add_ps(v_y, _mm_mul_ps(v_g, v_c4)); + v_y = _mm_add_ps(v_y, _mm_mul_ps(v_b, v_c5)); + + v_z = _mm_mul_ps(v_r, v_c6); + v_z = _mm_add_ps(v_z, _mm_mul_ps(v_g, v_c7)); + v_z = _mm_add_ps(v_z, _mm_mul_ps(v_b, v_c8)); + } + + void operator()(const float* src, float* dst, int n) const + { + int scn = srccn, i = 0; + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + + n *= 3; + + if (scn == 3) + { + for ( ; i <= n - 24; i += 24, src += 24) + { + __m128 v_r0 = _mm_loadu_ps(src); + __m128 v_r1 = _mm_loadu_ps(src + 4); + __m128 v_g0 = _mm_loadu_ps(src + 8); + __m128 v_g1 = _mm_loadu_ps(src + 12); + __m128 v_b0 = _mm_loadu_ps(src + 16); + __m128 v_b1 = _mm_loadu_ps(src + 20); + + _MM_DEINTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + __m128 v_x0, v_y0, v_z0; + process(v_r0, v_g0, v_b0, + v_x0, v_y0, v_z0); + + __m128 v_x1, v_y1, v_z1; + process(v_r1, v_g1, v_b1, + v_x1, v_y1, v_z1); + + _MM_INTERLIV_PS(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1) + + _mm_storeu_ps(dst + i, v_x0); + _mm_storeu_ps(dst + i + 4, v_x1); + _mm_storeu_ps(dst + i + 8, v_y0); + _mm_storeu_ps(dst + i + 12, v_y1); + _mm_storeu_ps(dst + i + 16, v_z0); + _mm_storeu_ps(dst + i + 20, v_z1); + } + } + + for ( ; i < n; i += 3, src += scn) + { + float X = saturate_cast(src[0]*C0 + src[1]*C1 + src[2]*C2); + float Y = saturate_cast(src[0]*C3 + src[1]*C4 + src[2]*C5); + float Z = saturate_cast(src[0]*C6 + src[1]*C7 + src[2]*C8); + dst[i] = X; dst[i+1] = Y; dst[i+2] = Z; + } + } + + int srccn; + float coeffs[9]; + __m128 v_c0, v_c1, v_c2, v_c3, v_c4, v_c5, v_c6, v_c7, v_c8; +}; + + #endif template struct RGB2XYZ_i From c4c86a899094116cc23a94dbc70dcb9b46af2575 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 28/98] cvtColor XYZ 2 RGB f32 --- modules/imgproc/perf/perf_cvt_color.cpp | 6 +- modules/imgproc/src/color.cpp | 107 ++++++++++++++++++++++++ 2 files changed, 110 insertions(+), 3 deletions(-) diff --git a/modules/imgproc/perf/perf_cvt_color.cpp b/modules/imgproc/perf/perf_cvt_color.cpp index 9682cfc5e..e01257eed 100644 --- a/modules/imgproc/perf/perf_cvt_color.cpp +++ b/modules/imgproc/perf/perf_cvt_color.cpp @@ -204,12 +204,12 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u, ) { Size sz = GetParam(); - int mode = COLOR_YCrCb2RGB; + int mode = COLOR_XYZ2RGB; ChPair ch = getConversionInfo(mode); mode %= COLOR_COLORCVT_MAX; - Mat src(sz, CV_8UC(3)); - Mat dst(sz, CV_8UC(3)); + Mat src(sz, CV_32FC(3)); + Mat dst(sz, CV_32FC(3)); declare.time(100); declare.in(src, WARMUP_RNG).out(dst); diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 8dd5e2cfe..292a6df3a 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -3421,6 +3421,7 @@ template struct RGB2XYZ_i C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; n *= 3; + for(int i = 0; i < n; i += 3, src += scn) { int X = CV_DESCALE(src[0]*C0 + src[1]*C1 + src[2]*C2, xyz_shift); @@ -3714,6 +3715,112 @@ template struct XYZ2RGB_f float coeffs[9]; }; +#if CV_SSE2 + +template <> +struct XYZ2RGB_f +{ + typedef float channel_type; + + XYZ2RGB_f(int _dstcn, int _blueIdx, const float* _coeffs) + : dstcn(_dstcn), blueIdx(_blueIdx) + { + memcpy(coeffs, _coeffs ? _coeffs : XYZ2sRGB_D65, 9*sizeof(coeffs[0])); + if(blueIdx == 0) + { + std::swap(coeffs[0], coeffs[6]); + std::swap(coeffs[1], coeffs[7]); + std::swap(coeffs[2], coeffs[8]); + } + + v_c0 = _mm_set1_ps(coeffs[0]); + v_c1 = _mm_set1_ps(coeffs[1]); + v_c2 = _mm_set1_ps(coeffs[2]); + v_c3 = _mm_set1_ps(coeffs[3]); + v_c4 = _mm_set1_ps(coeffs[4]); + v_c5 = _mm_set1_ps(coeffs[5]); + v_c6 = _mm_set1_ps(coeffs[6]); + v_c7 = _mm_set1_ps(coeffs[7]); + v_c8 = _mm_set1_ps(coeffs[8]); + } + + void process(__m128 v_x, __m128 v_y, __m128 v_z, + __m128 & v_r, __m128 & v_g, __m128 & v_b) const + { + v_b = _mm_mul_ps(v_x, v_c0); + v_b = _mm_add_ps(v_b, _mm_mul_ps(v_y, v_c1)); + v_b = _mm_add_ps(v_b, _mm_mul_ps(v_z, v_c2)); + + v_g = _mm_mul_ps(v_x, v_c3); + v_g = _mm_add_ps(v_g, _mm_mul_ps(v_y, v_c4)); + v_g = _mm_add_ps(v_g, _mm_mul_ps(v_z, v_c5)); + + v_r = _mm_mul_ps(v_x, v_c6); + v_r = _mm_add_ps(v_r, _mm_mul_ps(v_y, v_c7)); + v_r = _mm_add_ps(v_r, _mm_mul_ps(v_z, v_c8)); + } + + void operator()(const float* src, float* dst, int n) const + { + int dcn = dstcn; + float alpha = ColorChannel::max(); + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + n *= 3; + int i = 0; + + if (dcn == 3) + { + for ( ; i <= n - 24; i += 24, dst += 24) + { + __m128 v_x0 = _mm_loadu_ps(src + i); + __m128 v_x1 = _mm_loadu_ps(src + i + 4); + __m128 v_y0 = _mm_loadu_ps(src + i + 8); + __m128 v_y1 = _mm_loadu_ps(src + i + 12); + __m128 v_z0 = _mm_loadu_ps(src + i + 16); + __m128 v_z1 = _mm_loadu_ps(src + i + 20); + + _MM_DEINTERLIV_PS(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1) + + __m128 v_r0, v_g0, v_b0; + process(v_x0, v_y0, v_z0, + v_r0, v_g0, v_b0); + + __m128 v_r1, v_g1, v_b1; + process(v_x1, v_y1, v_z1, + v_r1, v_g1, v_b1); + + _MM_INTERLIV_PS(v_b0, v_b1, v_g0, v_g1, v_r0, v_r1) + + _mm_storeu_ps(dst, v_b0); + _mm_storeu_ps(dst + 4, v_b1); + _mm_storeu_ps(dst + 8, v_g0); + _mm_storeu_ps(dst + 12, v_g1); + _mm_storeu_ps(dst + 16, v_r0); + _mm_storeu_ps(dst + 20, v_r1); + } + + } + + for( ; i < n; i += 3, dst += dcn) + { + float B = src[i]*C0 + src[i+1]*C1 + src[i+2]*C2; + float G = src[i]*C3 + src[i+1]*C4 + src[i+2]*C5; + float R = src[i]*C6 + src[i+1]*C7 + src[i+2]*C8; + dst[0] = B; dst[1] = G; dst[2] = R; + if( dcn == 4 ) + dst[3] = alpha; + } + } + int dstcn, blueIdx; + float coeffs[9]; + + __m128 v_c0, v_c1, v_c2, v_c3, v_c4, v_c5, v_c6, v_c7, v_c8; +}; + +#endif // CV_SSE2 + template struct XYZ2RGB_i { From 51e7fb76b60d40a4b39924f0547d54dcddd4c18a Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 29/98] cvtColor HSV 2 RGB u8 --- modules/imgproc/perf/perf_cvt_color.cpp | 6 +- modules/imgproc/src/color.cpp | 93 ++++++++++++++++++++++++- 2 files changed, 95 insertions(+), 4 deletions(-) diff --git a/modules/imgproc/perf/perf_cvt_color.cpp b/modules/imgproc/perf/perf_cvt_color.cpp index e01257eed..38540837e 100644 --- a/modules/imgproc/perf/perf_cvt_color.cpp +++ b/modules/imgproc/perf/perf_cvt_color.cpp @@ -204,12 +204,12 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u, ) { Size sz = GetParam(); - int mode = COLOR_XYZ2RGB; + int mode = COLOR_RGB2HLS; ChPair ch = getConversionInfo(mode); mode %= COLOR_COLORCVT_MAX; - Mat src(sz, CV_32FC(3)); - Mat dst(sz, CV_32FC(3)); + Mat src(sz, CV_8UC(3)); + Mat dst(sz, CV_8UC(3)); declare.time(100); declare.in(src, WARMUP_RNG).out(dst); diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 292a6df3a..59c4cbf43 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -4335,14 +4335,48 @@ struct HSV2RGB_b v_scale_inv = vdupq_n_f32(1.f/255.f); v_scale = vdupq_n_f32(255.f); v_alpha = vdup_n_u8(ColorChannel::max()); + #elif CV_SSE2 + v_scale_inv = _mm_set1_ps(1.f/255.f); + v_scale = _mm_set1_ps(255.0f); + v_zero = _mm_setzero_si128(); #endif } + #if CV_SSE2 + // 16s x 8 + void process(__m128i v_r, __m128i v_g, __m128i v_b, + float * buf) const + { + __m128 v_r0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_r, v_zero)); + __m128 v_g0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_g, v_zero)); + __m128 v_b0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_b, v_zero)); + + __m128 v_r1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_r, v_zero)); + __m128 v_g1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_g, v_zero)); + __m128 v_b1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_b, v_zero)); + + v_g0 = _mm_mul_ps(v_g0, v_scale_inv); + v_b0 = _mm_mul_ps(v_b0, v_scale_inv); + + v_g1 = _mm_mul_ps(v_g1, v_scale_inv); + v_b1 = _mm_mul_ps(v_b1, v_scale_inv); + + _MM_INTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + _mm_store_ps(buf, v_r0); + _mm_store_ps(buf + 4, v_r1); + _mm_store_ps(buf + 8, v_g0); + _mm_store_ps(buf + 12, v_g1); + _mm_store_ps(buf + 16, v_b0); + _mm_store_ps(buf + 20, v_b1); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, dcn = dstcn; uchar alpha = ColorChannel::max(); - float buf[3*BLOCK_SIZE]; + CV_DECL_ALIGNED(16) float buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { @@ -4368,6 +4402,38 @@ struct HSV2RGB_b v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_scale_inv); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + + _MM_DEINTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); + + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); + + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); + + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } #endif for( ; j < dn*3; j += 3 ) @@ -4408,6 +4474,28 @@ struct HSV2RGB_b vst3_u8(dst, v_dst); } } + #elif CV_SSE2 + if (dcn == 3) + { + for ( ; j <= (dn - 16) * 3; j += 16, dst += 16) + { + __m128 v_src0 = _mm_mul_ps(_mm_load_ps(buf + j), v_scale); + __m128 v_src1 = _mm_mul_ps(_mm_load_ps(buf + j + 4), v_scale); + __m128 v_src2 = _mm_mul_ps(_mm_load_ps(buf + j + 8), v_scale); + __m128 v_src3 = _mm_mul_ps(_mm_load_ps(buf + j + 12), v_scale); + + __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(v_src2), + _mm_cvtps_epi32(v_src3)); + + _mm_storeu_si128((__m128i *)dst, _mm_packus_epi16(v_dst0, v_dst1)); + } + + int jr = j % 3; + if (jr) + dst -= jr, j -= jr; + } #endif for( ; j < dn*3; j += 3, dst += dcn ) @@ -4426,6 +4514,9 @@ struct HSV2RGB_b #if CV_NEON float32x4_t v_scale, v_scale_inv; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale_inv, v_scale; + __m128i v_zero; #endif }; From 51684c109e234a32a99b927ae59cb1f97a0f9c64 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:29 +0300 Subject: [PATCH 30/98] cvtColor RGB 2 HLS u8 --- modules/imgproc/src/color.cpp | 89 ++++++++++++++++++++++++++++++++++- 1 file changed, 87 insertions(+), 2 deletions(-) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 59c4cbf43..7c9dc101e 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -4477,7 +4477,7 @@ struct HSV2RGB_b #elif CV_SSE2 if (dcn == 3) { - for ( ; j <= (dn - 16) * 3; j += 16, dst += 16) + for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) { __m128 v_src0 = _mm_mul_ps(_mm_load_ps(buf + j), v_scale); __m128 v_src1 = _mm_mul_ps(_mm_load_ps(buf + j + 4), v_scale); @@ -4588,13 +4588,41 @@ struct RGB2HLS_b v_scale_inv = vdupq_n_f32(1.f/255.f); v_scale = vdupq_n_f32(255.f); v_alpha = vdup_n_u8(ColorChannel::max()); + #elif CV_SSE2 + v_scale_inv = _mm_set1_ps(1.f/255.f); + v_scale = _mm_set1_ps(255.f); + v_zero = _mm_setzero_si128(); #endif } + #if CV_SSE2 + void process(const float * buf, + __m128i & v_h, __m128i & v_l, __m128i & v_s) const + { + __m128 v_h0f = _mm_load_ps(buf); + __m128 v_h1f = _mm_load_ps(buf + 4); + __m128 v_l0f = _mm_load_ps(buf + 8); + __m128 v_l1f = _mm_load_ps(buf + 12); + __m128 v_s0f = _mm_load_ps(buf + 16); + __m128 v_s1f = _mm_load_ps(buf + 20); + + _MM_DEINTERLIV_PS(v_h0f, v_h1f, v_l0f, v_l1f, v_s0f, v_s1f) + + v_l0f = _mm_mul_ps(v_l0f, v_scale); + v_l1f = _mm_mul_ps(v_l1f, v_scale); + v_s0f = _mm_mul_ps(v_s0f, v_scale); + v_s1f = _mm_mul_ps(v_s1f, v_scale); + + v_h = _mm_packs_epi32(_mm_cvtps_epi32(v_h0f), _mm_cvtps_epi32(v_h1f)); + v_l = _mm_packs_epi32(_mm_cvtps_epi32(v_l0f), _mm_cvtps_epi32(v_l1f)); + v_s = _mm_packs_epi32(_mm_cvtps_epi32(v_s0f), _mm_cvtps_epi32(v_s1f)); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, scn = srccn; - float buf[3*BLOCK_SIZE]; + CV_DECL_ALIGNED(16) float buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, dst += BLOCK_SIZE*3 ) { @@ -4632,6 +4660,26 @@ struct RGB2HLS_b v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_scale_inv); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + if (scn == 3) + { + for ( ; j <= (dn * 3 - 16); j += 16, src += 16) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)src); + + __m128i v_src_p = _mm_unpacklo_epi8(v_src, v_zero); + _mm_store_ps(buf + j, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src_p, v_zero)), v_scale_inv)); + _mm_store_ps(buf + j + 4, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src_p, v_zero)), v_scale_inv)); + + v_src_p = _mm_unpackhi_epi8(v_src, v_zero); + _mm_store_ps(buf + j + 8, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src_p, v_zero)), v_scale_inv)); + _mm_store_ps(buf + j + 12, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src_p, v_zero)), v_scale_inv)); + } + + int jr = j % 3; + if (jr) + src -= jr, j -= jr; + } #endif for( ; j < dn*3; j += 3, src += scn ) { @@ -4656,6 +4704,40 @@ struct RGB2HLS_b vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src1.val[2], v_scale))))); vst3_u8(dst + j, v_dst); } + #elif CV_SSE2 + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_h_0, v_l_0, v_s_0; + process(buf + j, + v_h_0, v_l_0, v_s_0); + + __m128i v_h_1, v_l_1, v_s_1; + process(buf + j + 24, + v_h_1, v_l_1, v_s_1); + + __m128i v_h0 = _mm_packus_epi16(v_h_0, v_h_1); + __m128i v_l0 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_s0 = _mm_packus_epi16(v_s_0, v_s_1); + + process(buf + j + 48, + v_h_0, v_l_0, v_s_0); + + process(buf + j + 72, + v_h_1, v_l_1, v_s_1); + + __m128i v_h1 = _mm_packus_epi16(v_h_0, v_h_1); + __m128i v_l1 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_s1 = _mm_packus_epi16(v_s_0, v_s_1); + + _MM_INTERLIV_EPI8(v_h0, v_h1, v_l0, v_l1, v_s0, v_s1) + + _mm_storeu_si128((__m128i *)(dst + j), v_h0); + _mm_storeu_si128((__m128i *)(dst + j + 16), v_h1); + _mm_storeu_si128((__m128i *)(dst + j + 32), v_l0); + _mm_storeu_si128((__m128i *)(dst + j + 48), v_l1); + _mm_storeu_si128((__m128i *)(dst + j + 64), v_s0); + _mm_storeu_si128((__m128i *)(dst + j + 80), v_s1); + } #endif for( ; j < dn*3; j += 3 ) { @@ -4671,6 +4753,9 @@ struct RGB2HLS_b #if CV_NEON float32x4_t v_scale, v_scale_inv; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale, v_scale_inv; + __m128i v_zero; #endif }; From 05e21015e92cc4efa02dc45ddde635a5e85ebce5 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 31/98] cvtColor HLS 2 RGB u8 --- modules/imgproc/perf/perf_cvt_color.cpp | 2 +- modules/imgproc/src/color.cpp | 94 ++++++++++++++++++++++++- 2 files changed, 94 insertions(+), 2 deletions(-) diff --git a/modules/imgproc/perf/perf_cvt_color.cpp b/modules/imgproc/perf/perf_cvt_color.cpp index 38540837e..7704527cc 100644 --- a/modules/imgproc/perf/perf_cvt_color.cpp +++ b/modules/imgproc/perf/perf_cvt_color.cpp @@ -204,7 +204,7 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u, ) { Size sz = GetParam(); - int mode = COLOR_RGB2HLS; + int mode = COLOR_HLS2RGB; ChPair ch = getConversionInfo(mode); mode %= COLOR_COLORCVT_MAX; diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 7c9dc101e..6b072bbba 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -4835,14 +4835,48 @@ struct HLS2RGB_b v_scale_inv = vdupq_n_f32(1.f/255.f); v_scale = vdupq_n_f32(255.f); v_alpha = vdup_n_u8(ColorChannel::max()); + #elif CV_SSE2 + v_scale_inv = _mm_set1_ps(1.f/255.f); + v_scale = _mm_set1_ps(255.f); + v_zero = _mm_setzero_si128(); #endif } + #if CV_SSE2 + // 16s x 8 + void process(__m128i v_r, __m128i v_g, __m128i v_b, + float * buf) const + { + __m128 v_r0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_r, v_zero)); + __m128 v_g0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_g, v_zero)); + __m128 v_b0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_b, v_zero)); + + __m128 v_r1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_r, v_zero)); + __m128 v_g1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_g, v_zero)); + __m128 v_b1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_b, v_zero)); + + v_g0 = _mm_mul_ps(v_g0, v_scale_inv); + v_b0 = _mm_mul_ps(v_b0, v_scale_inv); + + v_g1 = _mm_mul_ps(v_g1, v_scale_inv); + v_b1 = _mm_mul_ps(v_b1, v_scale_inv); + + _MM_INTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + _mm_store_ps(buf, v_r0); + _mm_store_ps(buf + 4, v_r1); + _mm_store_ps(buf + 8, v_g0); + _mm_store_ps(buf + 12, v_g1); + _mm_store_ps(buf + 16, v_b0); + _mm_store_ps(buf + 20, v_b1); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, dcn = dstcn; uchar alpha = ColorChannel::max(); - float buf[3*BLOCK_SIZE]; + CV_DECL_ALIGNED(16) float buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { @@ -4868,6 +4902,38 @@ struct HLS2RGB_b v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_scale_inv); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + + _MM_DEINTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); + + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); + + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); + + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } #endif for( ; j < dn*3; j += 3 ) { @@ -4907,7 +4973,30 @@ struct HLS2RGB_b vst3_u8(dst, v_dst); } } + #elif CV_SSE2 + if (dcn == 3) + { + for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) + { + __m128 v_src0 = _mm_mul_ps(_mm_load_ps(buf + j), v_scale); + __m128 v_src1 = _mm_mul_ps(_mm_load_ps(buf + j + 4), v_scale); + __m128 v_src2 = _mm_mul_ps(_mm_load_ps(buf + j + 8), v_scale); + __m128 v_src3 = _mm_mul_ps(_mm_load_ps(buf + j + 12), v_scale); + + __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(v_src2), + _mm_cvtps_epi32(v_src3)); + + _mm_storeu_si128((__m128i *)dst, _mm_packus_epi16(v_dst0, v_dst1)); + } + + int jr = j % 3; + if (jr) + dst -= jr, j -= jr; + } #endif + for( ; j < dn*3; j += 3, dst += dcn ) { dst[0] = saturate_cast(buf[j]*255.f); @@ -4924,6 +5013,9 @@ struct HLS2RGB_b #if CV_NEON float32x4_t v_scale, v_scale_inv; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale, v_scale_inv; + __m128i v_zero; #endif }; From 584eed633e74a9b67d6733fed192b2773df54c23 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 32/98] cvtColor Lab 2 RGB u8 --- modules/imgproc/perf/perf_cvt_color.cpp | 2 +- modules/imgproc/src/color.cpp | 96 ++++++++++++++++++++++++- 2 files changed, 96 insertions(+), 2 deletions(-) diff --git a/modules/imgproc/perf/perf_cvt_color.cpp b/modules/imgproc/perf/perf_cvt_color.cpp index 7704527cc..868548c48 100644 --- a/modules/imgproc/perf/perf_cvt_color.cpp +++ b/modules/imgproc/perf/perf_cvt_color.cpp @@ -204,7 +204,7 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u, ) { Size sz = GetParam(); - int mode = COLOR_HLS2RGB; + int mode = COLOR_Lab2RGB; ChPair ch = getConversionInfo(mode); mode %= COLOR_COLORCVT_MAX; diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 6b072bbba..fdf7c11be 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -5331,14 +5331,51 @@ struct Lab2RGB_b v_scale = vdupq_n_f32(255.f); v_alpha = vdup_n_u8(ColorChannel::max()); v_128 = vdupq_n_f32(128.0f); + #elif CV_SSE2 + v_scale_inv = _mm_set1_ps(100.f/255.f); + v_scale = _mm_set1_ps(255.f); + v_128 = _mm_set1_ps(128.0f); + v_zero = _mm_setzero_si128(); #endif } + #if CV_SSE2 + // 16s x 8 + void process(__m128i v_r, __m128i v_g, __m128i v_b, + float * buf) const + { + __m128 v_r0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_r, v_zero)); + __m128 v_g0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_g, v_zero)); + __m128 v_b0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_b, v_zero)); + + __m128 v_r1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_r, v_zero)); + __m128 v_g1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_g, v_zero)); + __m128 v_b1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_b, v_zero)); + + v_r0 = _mm_mul_ps(v_r0, v_scale_inv); + v_r1 = _mm_mul_ps(v_r1, v_scale_inv); + + v_g0 = _mm_sub_ps(v_g0, v_128); + v_g1 = _mm_sub_ps(v_g1, v_128); + v_b0 = _mm_sub_ps(v_b0, v_128); + v_b1 = _mm_sub_ps(v_b1, v_128); + + _MM_INTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + _mm_store_ps(buf, v_r0); + _mm_store_ps(buf + 4, v_r1); + _mm_store_ps(buf + 8, v_g0); + _mm_store_ps(buf + 12, v_g1); + _mm_store_ps(buf + 16, v_b0); + _mm_store_ps(buf + 20, v_b1); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, dcn = dstcn; uchar alpha = ColorChannel::max(); - float buf[3*BLOCK_SIZE]; + CV_DECL_ALIGNED(16) float buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { @@ -5364,6 +5401,38 @@ struct Lab2RGB_b v_dst.val[2] = vsubq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_128); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + + _MM_DEINTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); + + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); + + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); + + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } #endif for( ; j < dn*3; j += 3 ) @@ -5404,6 +5473,28 @@ struct Lab2RGB_b vst3_u8(dst, v_dst); } } + #elif CV_SSE2 + if (dcn == 3) + { + for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) + { + __m128 v_src0 = _mm_mul_ps(_mm_load_ps(buf + j), v_scale); + __m128 v_src1 = _mm_mul_ps(_mm_load_ps(buf + j + 4), v_scale); + __m128 v_src2 = _mm_mul_ps(_mm_load_ps(buf + j + 8), v_scale); + __m128 v_src3 = _mm_mul_ps(_mm_load_ps(buf + j + 12), v_scale); + + __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(v_src2), + _mm_cvtps_epi32(v_src3)); + + _mm_storeu_si128((__m128i *)dst, _mm_packus_epi16(v_dst0, v_dst1)); + } + + int jr = j % 3; + if (jr) + dst -= jr, j -= jr; + } #endif for( ; j < dn*3; j += 3, dst += dcn ) @@ -5423,6 +5514,9 @@ struct Lab2RGB_b #if CV_NEON float32x4_t v_scale, v_scale_inv, v_128; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale, v_scale_inv, v_128; + __m128i v_zero; #endif }; From e20613a776d4c74e718df36f4c5ac6c1c935fff2 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 33/98] cvtColor RGB 2 Luv u8 --- modules/imgproc/perf/perf_cvt_color.cpp | 2 +- modules/imgproc/src/color.cpp | 91 +++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 1 deletion(-) diff --git a/modules/imgproc/perf/perf_cvt_color.cpp b/modules/imgproc/perf/perf_cvt_color.cpp index 868548c48..4c49fa3a8 100644 --- a/modules/imgproc/perf/perf_cvt_color.cpp +++ b/modules/imgproc/perf/perf_cvt_color.cpp @@ -204,7 +204,7 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u, ) { Size sz = GetParam(); - int mode = COLOR_Lab2RGB; + int mode = COLOR_RGB2Luv; ChPair ch = getConversionInfo(mode); mode %= COLOR_COLORCVT_MAX; diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index fdf7c11be..32dfc85d5 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -5691,9 +5691,43 @@ struct RGB2Luv_b v_coeff3 = vdupq_n_f32(0.9732824427480916f); v_coeff4 = vdupq_n_f32(136.259541984732824f); v_alpha = vdup_n_u8(ColorChannel::max()); + #elif CV_SSE2 + v_zero = _mm_setzero_si128(); + v_scale_inv = _mm_set1_ps(1.f/255.f); + v_scale = _mm_set1_ps(2.55f); + v_coeff1 = _mm_set1_ps(0.72033898305084743f); + v_coeff2 = _mm_set1_ps(96.525423728813564f); + v_coeff3 = _mm_set1_ps(0.9732824427480916f); + v_coeff4 = _mm_set1_ps(136.259541984732824f); #endif } + #if CV_SSE2 + void process(const float * buf, + __m128i & v_l, __m128i & v_u, __m128i & v_v) const + { + __m128 v_l0f = _mm_load_ps(buf); + __m128 v_l1f = _mm_load_ps(buf + 4); + __m128 v_u0f = _mm_load_ps(buf + 8); + __m128 v_u1f = _mm_load_ps(buf + 12); + __m128 v_v0f = _mm_load_ps(buf + 16); + __m128 v_v1f = _mm_load_ps(buf + 20); + + _MM_DEINTERLIV_PS(v_l0f, v_l1f, v_u0f, v_u1f, v_v0f, v_v1f) + + v_l0f = _mm_mul_ps(v_l0f, v_scale); + v_l1f = _mm_mul_ps(v_l1f, v_scale); + v_u0f = _mm_add_ps(_mm_mul_ps(v_u0f, v_coeff1), v_coeff2); + v_u1f = _mm_add_ps(_mm_mul_ps(v_u1f, v_coeff1), v_coeff2); + v_v0f = _mm_add_ps(_mm_mul_ps(v_v0f, v_coeff3), v_coeff4); + v_v1f = _mm_add_ps(_mm_mul_ps(v_v1f, v_coeff3), v_coeff4); + + v_l = _mm_packs_epi32(_mm_cvtps_epi32(v_l0f), _mm_cvtps_epi32(v_l1f)); + v_u = _mm_packs_epi32(_mm_cvtps_epi32(v_u0f), _mm_cvtps_epi32(v_u1f)); + v_v = _mm_packs_epi32(_mm_cvtps_epi32(v_v0f), _mm_cvtps_epi32(v_v1f)); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, scn = srccn; @@ -5735,6 +5769,26 @@ struct RGB2Luv_b v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_scale_inv); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + if (scn == 3) + { + for ( ; j <= (dn * 3 - 16); j += 16, src += 16) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)src); + + __m128i v_src_p = _mm_unpacklo_epi8(v_src, v_zero); + _mm_store_ps(buf + j, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src_p, v_zero)), v_scale_inv)); + _mm_store_ps(buf + j + 4, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src_p, v_zero)), v_scale_inv)); + + v_src_p = _mm_unpackhi_epi8(v_src, v_zero); + _mm_store_ps(buf + j + 8, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src_p, v_zero)), v_scale_inv)); + _mm_store_ps(buf + j + 12, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src_p, v_zero)), v_scale_inv)); + } + + int jr = j % 3; + if (jr) + src -= jr, j -= jr; + } #endif for( ; j < dn*3; j += 3, src += scn ) { @@ -5760,6 +5814,40 @@ struct RGB2Luv_b vst3_u8(dst + j, v_dst); } + #elif CV_SSE2 + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_l_0, v_u_0, v_v_0; + process(buf + j, + v_l_0, v_u_0, v_v_0); + + __m128i v_l_1, v_u_1, v_v_1; + process(buf + j + 24, + v_l_1, v_u_1, v_v_1); + + __m128i v_l0 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_u0 = _mm_packus_epi16(v_u_0, v_u_1); + __m128i v_v0 = _mm_packus_epi16(v_v_0, v_v_1); + + process(buf + j + 48, + v_l_0, v_u_0, v_v_0); + + process(buf + j + 72, + v_l_1, v_u_1, v_v_1); + + __m128i v_l1 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_u1 = _mm_packus_epi16(v_u_0, v_u_1); + __m128i v_v1 = _mm_packus_epi16(v_v_0, v_v_1); + + _MM_INTERLIV_EPI8(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1) + + _mm_storeu_si128((__m128i *)(dst + j), v_l0); + _mm_storeu_si128((__m128i *)(dst + j + 16), v_l1); + _mm_storeu_si128((__m128i *)(dst + j + 32), v_u0); + _mm_storeu_si128((__m128i *)(dst + j + 48), v_u1); + _mm_storeu_si128((__m128i *)(dst + j + 64), v_v0); + _mm_storeu_si128((__m128i *)(dst + j + 80), v_v1); + } #endif for( ; j < dn*3; j += 3 ) @@ -5777,6 +5865,9 @@ struct RGB2Luv_b #if CV_NEON float32x4_t v_scale, v_scale_inv, v_coeff1, v_coeff2, v_coeff3, v_coeff4; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale, v_scale_inv, v_coeff1, v_coeff2, v_coeff3, v_coeff4; + __m128i v_zero; #endif }; From a340ea872eae842cf1bea98fbbbc3bd5a15105b9 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 34/98] cvtColor Luv 2 RGB u8 --- modules/imgproc/perf/perf_cvt_color.cpp | 168 ++++++++++++++++++++++-- modules/imgproc/src/color.cpp | 109 ++++++++++++++- 2 files changed, 262 insertions(+), 15 deletions(-) diff --git a/modules/imgproc/perf/perf_cvt_color.cpp b/modules/imgproc/perf/perf_cvt_color.cpp index 4c49fa3a8..02622ea80 100644 --- a/modules/imgproc/perf/perf_cvt_color.cpp +++ b/modules/imgproc/perf/perf_cvt_color.cpp @@ -56,10 +56,50 @@ enum }; CV_ENUM(CvtMode, + COLOR_BGR2BGR555, COLOR_BGR2BGR565, COLOR_BGR2BGRA, COLOR_BGR2GRAY, + COLOR_BGR2HLS, COLOR_BGR2HLS_FULL, COLOR_BGR2HSV, COLOR_BGR2HSV_FULL, + COLOR_BGR2Lab, COLOR_BGR2Luv, COLOR_BGR2RGB, COLOR_BGR2RGBA, COLOR_BGR2XYZ, + COLOR_BGR2YCrCb, COLOR_BGR2YUV, COLOR_BGR5552BGR, COLOR_BGR5552BGRA, + + COLOR_BGR5552GRAY, COLOR_BGR5552RGB, COLOR_BGR5552RGBA, COLOR_BGR5652BGR, + COLOR_BGR5652BGRA, COLOR_BGR5652GRAY, COLOR_BGR5652RGB, COLOR_BGR5652RGBA, + + COLOR_BGRA2BGR, COLOR_BGRA2BGR555, COLOR_BGRA2BGR565, COLOR_BGRA2GRAY, COLOR_BGRA2RGBA, + CX_BGRA2HLS, CX_BGRA2HLS_FULL, CX_BGRA2HSV, CX_BGRA2HSV_FULL, CX_BGRA2Lab, CX_BGRA2Luv, CX_BGRA2XYZ, CX_BGRA2YCrCb, CX_BGRA2YUV, + + COLOR_GRAY2BGR, COLOR_GRAY2BGR555, COLOR_GRAY2BGR565, COLOR_GRAY2BGRA, + + COLOR_HLS2BGR, COLOR_HLS2BGR_FULL, COLOR_HLS2RGB, COLOR_HLS2RGB_FULL, + CX_HLS2BGRA, CX_HLS2BGRA_FULL, CX_HLS2RGBA, CX_HLS2RGBA_FULL, + + COLOR_HSV2BGR, COLOR_HSV2BGR_FULL, COLOR_HSV2RGB, COLOR_HSV2RGB_FULL, + CX_HSV2BGRA, CX_HSV2BGRA_FULL, CX_HSV2RGBA, CX_HSV2RGBA_FULL, + + COLOR_Lab2BGR, COLOR_Lab2LBGR, COLOR_Lab2LRGB, COLOR_Lab2RGB, + CX_Lab2BGRA, CX_Lab2LBGRA, CX_Lab2LRGBA, CX_Lab2RGBA, + + COLOR_LBGR2Lab, COLOR_LBGR2Luv, COLOR_LRGB2Lab, COLOR_LRGB2Luv, + CX_LBGRA2Lab, CX_LBGRA2Luv, CX_LRGBA2Lab, CX_LRGBA2Luv, + + COLOR_Luv2BGR, COLOR_Luv2LBGR, COLOR_Luv2LRGB, COLOR_Luv2RGB, + CX_Luv2BGRA, CX_Luv2LBGRA, CX_Luv2LRGBA, CX_Luv2RGBA, + + COLOR_RGB2BGR555, COLOR_RGB2BGR565, COLOR_RGB2GRAY, + COLOR_RGB2HLS, COLOR_RGB2HLS_FULL, COLOR_RGB2HSV, COLOR_RGB2HSV_FULL, COLOR_RGB2Lab, COLOR_RGB2Luv, COLOR_RGB2XYZ, COLOR_RGB2YCrCb, COLOR_RGB2YUV, - CX_RGBA2YCrCb, CX_RGBA2YUV) + + COLOR_RGBA2BGR, COLOR_RGBA2BGR555, COLOR_RGBA2BGR565, COLOR_RGBA2GRAY, + CX_RGBA2HLS, CX_RGBA2HLS_FULL, CX_RGBA2HSV, CX_RGBA2HSV_FULL, + CX_RGBA2Lab, CX_RGBA2Luv, CX_RGBA2XYZ, + CX_RGBA2YCrCb, CX_RGBA2YUV, + + COLOR_XYZ2BGR, COLOR_XYZ2RGB, CX_XYZ2BGRA, CX_XYZ2RGBA, + + COLOR_YCrCb2BGR, COLOR_YCrCb2RGB, CX_YCrCb2BGRA, CX_YCrCb2RGBA, + COLOR_YUV2BGR, COLOR_YUV2RGB, CX_YUV2BGRA, CX_YUV2RGBA + ) CV_ENUM(CvtModeBayer, @@ -197,25 +237,135 @@ ChPair getConversionInfo(int cvtMode) return ChPair(0,0); } -typedef perf::TestBaseWithParam Size_CvtMode; +typedef std::tr1::tuple Size_CvtMode_t; +typedef perf::TestBaseWithParam Size_CvtMode; PERF_TEST_P(Size_CvtMode, cvtColor8u, - testing::Values(::perf::szODD, ::perf::szVGA, ::perf::sz1080p) + testing::Combine( + testing::Values(::perf::szODD, ::perf::szVGA, ::perf::sz1080p), + CvtMode::all() + ) ) { - Size sz = GetParam(); - int mode = COLOR_RGB2Luv; + Size sz = get<0>(GetParam()); + int _mode = get<1>(GetParam()), mode = _mode; ChPair ch = getConversionInfo(mode); mode %= COLOR_COLORCVT_MAX; - Mat src(sz, CV_8UC(3)); - Mat dst(sz, CV_8UC(3)); + Mat src(sz, CV_8UC(ch.scn)); + Mat dst(sz, CV_8UC(ch.dcn)); declare.time(100); declare.in(src, WARMUP_RNG).out(dst); int runs = sz.width <= 320 ? 100 : 5; - TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, 3); + TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn); - SANITY_CHECK_NOTHING(); +#if defined(__APPLE__) && defined(HAVE_IPP) + SANITY_CHECK(dst, _mode == CX_BGRA2HLS_FULL ? 2 : 1); +#else + SANITY_CHECK(dst, 1); +#endif +} + +typedef std::tr1::tuple Size_CvtMode_Bayer_t; +typedef perf::TestBaseWithParam Size_CvtMode_Bayer; + +PERF_TEST_P(Size_CvtMode_Bayer, cvtColorBayer8u, + testing::Combine( + testing::Values(::perf::szODD, ::perf::szVGA), + CvtModeBayer::all() + ) + ) +{ + Size sz = get<0>(GetParam()); + int mode = get<1>(GetParam()); + ChPair ch = getConversionInfo(mode); + mode %= COLOR_COLORCVT_MAX; + + Mat src(sz, CV_8UC(ch.scn)); + Mat dst(sz, CV_8UC(ch.dcn)); + + declare.time(100); + declare.in(src, WARMUP_RNG).out(dst); + + TEST_CYCLE() cvtColor(src, dst, mode, ch.dcn); + + SANITY_CHECK(dst, 1); +} + +typedef std::tr1::tuple Size_CvtMode2_t; +typedef perf::TestBaseWithParam Size_CvtMode2; + +PERF_TEST_P(Size_CvtMode2, cvtColorYUV420, + testing::Combine( + testing::Values(szVGA, sz1080p, Size(130, 60)), + CvtMode2::all() + ) + ) +{ + Size sz = get<0>(GetParam()); + int mode = get<1>(GetParam()); + ChPair ch = getConversionInfo(mode); + + Mat src(sz.height + sz.height / 2, sz.width, CV_8UC(ch.scn)); + Mat dst(sz, CV_8UC(ch.dcn)); + + declare.in(src, WARMUP_RNG).out(dst); + + int runs = (sz.width <= 640) ? 8 : 1; + TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn); + + SANITY_CHECK(dst, 1); +} + +typedef std::tr1::tuple Size_CvtMode3_t; +typedef perf::TestBaseWithParam Size_CvtMode3; + +PERF_TEST_P(Size_CvtMode3, cvtColorRGB2YUV420p, + testing::Combine( + testing::Values(szVGA, sz720p, sz1080p, Size(130, 60)), + CvtMode3::all() + ) + ) +{ + Size sz = get<0>(GetParam()); + int mode = get<1>(GetParam()); + ChPair ch = getConversionInfo(mode); + + Mat src(sz, CV_8UC(ch.scn)); + Mat dst(sz.height + sz.height / 2, sz.width, CV_8UC(ch.dcn)); + + declare.time(100); + declare.in(src, WARMUP_RNG).out(dst); + + int runs = (sz.width <= 640) ? 10 : 1; + TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn); + + SANITY_CHECK(dst, 1); +} + +CV_ENUM(EdgeAwareBayerMode, COLOR_BayerBG2BGR_EA, COLOR_BayerGB2BGR_EA, COLOR_BayerRG2BGR_EA, COLOR_BayerGR2BGR_EA) + +typedef std::tr1::tuple EdgeAwareParams; +typedef perf::TestBaseWithParam EdgeAwareDemosaicingTest; + +PERF_TEST_P(EdgeAwareDemosaicingTest, demosaicingEA, + testing::Combine( + testing::Values(szVGA, sz720p, sz1080p, Size(130, 60)), + EdgeAwareBayerMode::all() + ) + ) +{ + Size sz = get<0>(GetParam()); + int mode = get<1>(GetParam()); + + Mat src(sz, CV_8UC1); + Mat dst(sz, CV_8UC3); + + declare.in(src, WARMUP_RNG).out(dst); + + TEST_CYCLE() cvtColor(src, dst, mode, 3); + + SANITY_CHECK(dst, 1); } diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 32dfc85d5..2794da36e 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -4376,7 +4376,7 @@ struct HSV2RGB_b { int i, j, dcn = dstcn; uchar alpha = ColorChannel::max(); - CV_DECL_ALIGNED(16) float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { @@ -4622,7 +4622,7 @@ struct RGB2HLS_b void operator()(const uchar* src, uchar* dst, int n) const { int i, j, scn = srccn; - CV_DECL_ALIGNED(16) float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, dst += BLOCK_SIZE*3 ) { @@ -4876,7 +4876,7 @@ struct HLS2RGB_b { int i, j, dcn = dstcn; uchar alpha = ColorChannel::max(); - CV_DECL_ALIGNED(16) float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { @@ -5375,7 +5375,7 @@ struct Lab2RGB_b { int i, j, dcn = dstcn; uchar alpha = ColorChannel::max(); - CV_DECL_ALIGNED(16) float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { @@ -5731,7 +5731,7 @@ struct RGB2Luv_b void operator()(const uchar* src, uchar* dst, int n) const { int i, j, scn = srccn; - float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, dst += BLOCK_SIZE*3 ) { @@ -5888,14 +5888,54 @@ struct Luv2RGB_b v_140 = vdupq_n_f32(140.f); v_scale = vdupq_n_f32(255.f); v_alpha = vdup_n_u8(ColorChannel::max()); + #elif CV_SSE2 + v_scale_inv = _mm_set1_ps(100.f/255.f); + v_coeff1 = _mm_set1_ps(1.388235294117647f); + v_coeff2 = _mm_set1_ps(1.027450980392157f); + v_134 = _mm_set1_ps(134.f); + v_140 = _mm_set1_ps(140.f); + v_scale = _mm_set1_ps(255.f); + v_zero = _mm_setzero_si128(); #endif } + #if CV_SSE2 + // 16s x 8 + void process(__m128i v_l, __m128i v_u, __m128i v_v, + float * buf) const + { + __m128 v_l0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_l, v_zero)); + __m128 v_u0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_u, v_zero)); + __m128 v_v0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_v, v_zero)); + + __m128 v_l1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_l, v_zero)); + __m128 v_u1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_u, v_zero)); + __m128 v_v1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_v, v_zero)); + + v_l0 = _mm_mul_ps(v_l0, v_scale_inv); + v_l1 = _mm_mul_ps(v_l1, v_scale_inv); + + v_u0 = _mm_sub_ps(_mm_mul_ps(v_u0, v_coeff1), v_134); + v_u1 = _mm_sub_ps(_mm_mul_ps(v_u1, v_coeff1), v_134); + v_v0 = _mm_sub_ps(_mm_mul_ps(v_v0, v_coeff2), v_140); + v_v1 = _mm_sub_ps(_mm_mul_ps(v_v1, v_coeff2), v_140); + + _MM_INTERLIV_PS(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1) + + _mm_store_ps(buf, v_l0); + _mm_store_ps(buf + 4, v_l1); + _mm_store_ps(buf + 8, v_u0); + _mm_store_ps(buf + 12, v_u1); + _mm_store_ps(buf + 16, v_v0); + _mm_store_ps(buf + 20, v_v1); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, dcn = dstcn; uchar alpha = ColorChannel::max(); - float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { @@ -5921,6 +5961,38 @@ struct Luv2RGB_b v_dst.val[2] = vsubq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_coeff2), v_140); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + + _MM_DEINTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); + + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); + + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); + + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } #endif for( ; j < dn*3; j += 3 ) { @@ -5960,6 +6032,28 @@ struct Luv2RGB_b vst3_u8(dst, v_dst); } } + #elif CV_SSE2 + if (dcn == 3) + { + for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) + { + __m128 v_src0 = _mm_mul_ps(_mm_load_ps(buf + j), v_scale); + __m128 v_src1 = _mm_mul_ps(_mm_load_ps(buf + j + 4), v_scale); + __m128 v_src2 = _mm_mul_ps(_mm_load_ps(buf + j + 8), v_scale); + __m128 v_src3 = _mm_mul_ps(_mm_load_ps(buf + j + 12), v_scale); + + __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(v_src2), + _mm_cvtps_epi32(v_src3)); + + _mm_storeu_si128((__m128i *)dst, _mm_packus_epi16(v_dst0, v_dst1)); + } + + int jr = j % 3; + if (jr) + dst -= jr, j -= jr; + } #endif for( ; j < dn*3; j += 3, dst += dcn ) @@ -5979,6 +6073,9 @@ struct Luv2RGB_b #if CV_NEON float32x4_t v_scale, v_scale_inv, v_coeff1, v_coeff2, v_134, v_140; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale, v_scale_inv, v_coeff1, v_coeff2, v_134, v_140; + __m128i v_zero; #endif }; From 3a426660ea1836d9414847bb6d56c83a9cc90168 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 35/98] sse_utils.hpp --- modules/core/include/opencv2/core/base.hpp | 2 + .../core/include/opencv2/core/sse_utils.hpp | 497 ++++++++++++++++++ modules/imgproc/src/color.cpp | 309 +++-------- 3 files changed, 581 insertions(+), 227 deletions(-) create mode 100644 modules/core/include/opencv2/core/sse_utils.hpp diff --git a/modules/core/include/opencv2/core/base.hpp b/modules/core/include/opencv2/core/base.hpp index e43fbbc95..c60eedddc 100644 --- a/modules/core/include/opencv2/core/base.hpp +++ b/modules/core/include/opencv2/core/base.hpp @@ -813,4 +813,6 @@ inline float32x2_t cv_vsqrt_f32(float32x2_t val) } // cv +#include "sse_utils.hpp" + #endif //__OPENCV_CORE_BASE_HPP__ diff --git a/modules/core/include/opencv2/core/sse_utils.hpp b/modules/core/include/opencv2/core/sse_utils.hpp new file mode 100644 index 000000000..13673f57b --- /dev/null +++ b/modules/core/include/opencv2/core/sse_utils.hpp @@ -0,0 +1,497 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2015, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_SSE_UTILS_HPP__ +#define __OPENCV_CORE_SSE_UTILS_HPP__ + +#ifndef __cplusplus +# error base.hpp header must be compiled as C++ +#endif + +#if CV_SSE2 + +inline void _mm_deinterliv_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_g1); + __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_g1); + __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b0); + __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_b0); + __m128i layer1_chunk4 = _mm_unpacklo_epi8(v_g0, v_b1); + __m128i layer1_chunk5 = _mm_unpackhi_epi8(v_g0, v_b1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk5); + __m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk5); + + __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk5); + __m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk5); + + __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk3); + __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk3); + __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk4); + __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk4); + __m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk5); + __m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk5); + + v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk3); + v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk3); + v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk4); + v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk4); + v_b0 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk5); + v_b1 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk5); +} + +inline void _mm_deinterliv_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_b0); + __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_b0); + __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b1); + __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_b1); + __m128i layer1_chunk4 = _mm_unpacklo_epi8(v_g0, v_a0); + __m128i layer1_chunk5 = _mm_unpackhi_epi8(v_g0, v_a0); + __m128i layer1_chunk6 = _mm_unpacklo_epi8(v_g1, v_a1); + __m128i layer1_chunk7 = _mm_unpackhi_epi8(v_g1, v_a1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk6 = _mm_unpacklo_epi8(layer1_chunk3, layer1_chunk7); + __m128i layer2_chunk7 = _mm_unpackhi_epi8(layer1_chunk3, layer1_chunk7); + + __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk6 = _mm_unpacklo_epi8(layer2_chunk3, layer2_chunk7); + __m128i layer3_chunk7 = _mm_unpackhi_epi8(layer2_chunk3, layer2_chunk7); + + __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk4); + __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk4); + __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk5); + __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk5); + __m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk6); + __m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk6); + __m128i layer4_chunk6 = _mm_unpacklo_epi8(layer3_chunk3, layer3_chunk7); + __m128i layer4_chunk7 = _mm_unpackhi_epi8(layer3_chunk3, layer3_chunk7); + + v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk4); + v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk4); + v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk5); + v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk5); + v_b0 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk6); + v_b1 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk6); + v_a0 = _mm_unpacklo_epi8(layer4_chunk3, layer4_chunk7); + v_a1 = _mm_unpackhi_epi8(layer4_chunk3, layer4_chunk7); +} + +inline void _mm_interlive_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i v_mask = _mm_set1_epi16(0x00ff); + + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); + __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8)); + __m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8)); + + __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); + __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); + __m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8)); + + __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); + __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); + __m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8)); + + __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); + __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); + __m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8)); + + v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); + v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); + v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); +} + +inline void _mm_interlive_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i v_mask = _mm_set1_epi16(0x00ff); + + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); + __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8)); + __m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer4_chunk6 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8)); + __m128i layer4_chunk3 = _mm_packus_epi16(_mm_and_si128(v_a0, v_mask), _mm_and_si128(v_a1, v_mask)); + __m128i layer4_chunk7 = _mm_packus_epi16(_mm_srli_epi16(v_a0, 8), _mm_srli_epi16(v_a1, 8)); + + __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); + __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); + __m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask)); + __m128i layer3_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8)); + __m128i layer3_chunk3 = _mm_packus_epi16(_mm_and_si128(layer4_chunk6, v_mask), _mm_and_si128(layer4_chunk7, v_mask)); + __m128i layer3_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk6, 8), _mm_srli_epi16(layer4_chunk7, 8)); + + __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); + __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); + __m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8)); + __m128i layer2_chunk3 = _mm_packus_epi16(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask)); + __m128i layer2_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk6, 8), _mm_srli_epi16(layer3_chunk7, 8)); + + __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); + __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); + __m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8)); + __m128i layer1_chunk3 = _mm_packus_epi16(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask)); + __m128i layer1_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk6, 8), _mm_srli_epi16(layer2_chunk7, 8)); + + v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); + v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); + v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_a0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); + v_g1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk6, v_mask), _mm_and_si128(layer1_chunk7, v_mask)); + v_a1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk6, 8), _mm_srli_epi16(layer1_chunk7, 8)); +} + +inline void _mm_deinterliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g1); + __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g1); + __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b0); + __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_b0); + __m128i layer1_chunk4 = _mm_unpacklo_epi16(v_g0, v_b1); + __m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_b1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk5); + __m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk5); + + __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk5); + __m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk5); + + v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk3); + v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk3); + v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk4); + v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk4); + v_b0 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk5); + v_b1 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk5); +} + +inline void _mm_deinterliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_b0); + __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_b0); + __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b1); + __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_b1); + __m128i layer1_chunk4 = _mm_unpacklo_epi16(v_g0, v_a0); + __m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_a0); + __m128i layer1_chunk6 = _mm_unpacklo_epi16(v_g1, v_a1); + __m128i layer1_chunk7 = _mm_unpackhi_epi16(v_g1, v_a1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk6 = _mm_unpacklo_epi16(layer1_chunk3, layer1_chunk7); + __m128i layer2_chunk7 = _mm_unpackhi_epi16(layer1_chunk3, layer1_chunk7); + + __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk6 = _mm_unpacklo_epi16(layer2_chunk3, layer2_chunk7); + __m128i layer3_chunk7 = _mm_unpackhi_epi16(layer2_chunk3, layer2_chunk7); + + v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk4); + v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk4); + v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk5); + v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk5); + v_b0 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk6); + v_b1 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk6); + v_a0 = _mm_unpacklo_epi16(layer3_chunk3, layer3_chunk7); + v_a1 = _mm_unpackhi_epi16(layer3_chunk3, layer3_chunk7); +} + +inline void _mm_interliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i v_mask = _mm_set1_epi32(0x0000ffff); + + __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16)); + __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); + __m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16)); + + __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); + + __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); + + v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); + v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); + v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16)); +} + +inline void _mm_interliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i v_mask = _mm_set1_epi32(0x0000ffff); + + __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16)); + __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); + __m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer3_chunk6 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16)); + __m128i layer3_chunk3 = _mm_packus_epi32(_mm_and_si128(v_a0, v_mask), _mm_and_si128(v_a1, v_mask)); + __m128i layer3_chunk7 = _mm_packus_epi32(_mm_srli_epi32(v_a0, 16), _mm_srli_epi32(v_a1, 16)); + + __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask)); + __m128i layer2_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk6, 16), _mm_srli_epi32(layer3_chunk7, 16)); + + __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask)); + __m128i layer1_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk6, 16), _mm_srli_epi32(layer2_chunk7, 16)); + + v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); + v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); + v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_a0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16)); + v_g1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk6, v_mask), _mm_and_si128(layer1_chunk7, v_mask)); + v_a1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk6, 16), _mm_srli_epi32(layer1_chunk7, 16)); +} + +inline void _mm_deinterliv_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, + __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) +{ + __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g1); + __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g1); + __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b0); + __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b0); + __m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_b1); + __m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_b1); + + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk3); + __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk3); + __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk4); + __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk4); + __m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk5); + __m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk5); + + v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk3); + v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk3); + v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk4); + v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk4); + v_b0 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk5); + v_b1 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk5); +} + +inline void _mm_deinterliv_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, + __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) +{ + __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_b0); + __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_b0); + __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b1); + __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b1); + __m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_a0); + __m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_a0); + __m128 layer1_chunk6 = _mm_unpacklo_ps(v_g1, v_a1); + __m128 layer1_chunk7 = _mm_unpackhi_ps(v_g1, v_a1); + + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk4); + __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk4); + __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk5); + __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk5); + __m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk6); + __m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk6); + __m128 layer2_chunk6 = _mm_unpacklo_ps(layer1_chunk3, layer1_chunk7); + __m128 layer2_chunk7 = _mm_unpackhi_ps(layer1_chunk3, layer1_chunk7); + + v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk4); + v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk4); + v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk5); + v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk5); + v_b0 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk6); + v_b1 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk6); + v_a0 = _mm_unpacklo_ps(layer2_chunk3, layer2_chunk7); + v_a1 = _mm_unpackhi_ps(layer2_chunk3, layer2_chunk7); +} + +inline void _mm_interliv_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, + __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) +{ + const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); + + __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); + __m128 layer2_chunk3 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); + __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo); + __m128 layer2_chunk4 = _mm_shuffle_ps(v_g0, v_g1, mask_hi); + __m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo); + __m128 layer2_chunk5 = _mm_shuffle_ps(v_b0, v_b1, mask_hi); + + __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo); + __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi); + __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo); + __m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi); + __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo); + __m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi); + + v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo); + v_g1 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi); + v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo); + v_b0 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi); + v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo); + v_b1 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi); +} + +inline void _mm_interliv_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, + __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) +{ + const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); + + __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); + __m128 layer2_chunk4 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); + __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo); + __m128 layer2_chunk5 = _mm_shuffle_ps(v_g0, v_g1, mask_hi); + __m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo); + __m128 layer2_chunk6 = _mm_shuffle_ps(v_b0, v_b1, mask_hi); + __m128 layer2_chunk3 = _mm_shuffle_ps(v_a0, v_a1, mask_lo); + __m128 layer2_chunk7 = _mm_shuffle_ps(v_a0, v_a1, mask_hi); + + __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo); + __m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi); + __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo); + __m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi); + __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo); + __m128 layer1_chunk6 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi); + __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk6, layer2_chunk7, mask_lo); + __m128 layer1_chunk7 = _mm_shuffle_ps(layer2_chunk6, layer2_chunk7, mask_hi); + + v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo); + v_b0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi); + v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo); + v_b1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi); + v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo); + v_a0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi); + v_g1 = _mm_shuffle_ps(layer1_chunk6, layer1_chunk7, mask_lo); + v_a1 = _mm_shuffle_ps(layer1_chunk6, layer1_chunk7, mask_hi); +} + +#endif + +#endif //__OPENCV_CORE_SSE_UTILS_HPP__ diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 2794da36e..4d74e86db 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -102,205 +102,6 @@ static IppStatus sts = ippInit(); #endif -#if CV_SSE2 - -#define _MM_DEINTERLIV_EPI8(layer0_chunk0, layer0_chunk1, layer0_chunk2, \ - layer0_chunk3, layer0_chunk4, layer0_chunk5) \ - { \ - __m128i layer1_chunk0 = _mm_unpacklo_epi8(layer0_chunk0, layer0_chunk3); \ - __m128i layer1_chunk1 = _mm_unpackhi_epi8(layer0_chunk0, layer0_chunk3); \ - __m128i layer1_chunk2 = _mm_unpacklo_epi8(layer0_chunk1, layer0_chunk4); \ - __m128i layer1_chunk3 = _mm_unpackhi_epi8(layer0_chunk1, layer0_chunk4); \ - __m128i layer1_chunk4 = _mm_unpacklo_epi8(layer0_chunk2, layer0_chunk5); \ - __m128i layer1_chunk5 = _mm_unpackhi_epi8(layer0_chunk2, layer0_chunk5); \ - \ - __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk3); \ - __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk3); \ - __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk4); \ - __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk4); \ - __m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk5); \ - __m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk5); \ - \ - __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk3); \ - __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk3); \ - __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk4); \ - __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk4); \ - __m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk5); \ - __m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk5); \ - \ - __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk3); \ - __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk3); \ - __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk4); \ - __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk4); \ - __m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk5); \ - __m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk5); \ - \ - layer0_chunk0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk3); \ - layer0_chunk1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk3); \ - layer0_chunk2 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk4); \ - layer0_chunk3 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk4); \ - layer0_chunk4 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk5); \ - layer0_chunk5 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk5); \ - } - -#define _MM_INTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) \ - { \ - __m128i v_mask = _mm_set1_epi16(0x00ff); \ - \ - __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); \ - __m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); \ - __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); \ - __m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8)); \ - __m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); \ - __m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8)); \ - \ - __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask)); \ - __m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); \ - __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); \ - __m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); \ - __m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask)); \ - __m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8)); \ - \ - __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); \ - __m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); \ - __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); \ - __m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); \ - __m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); \ - __m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8)); \ - \ - __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); \ - __m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); \ - __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); \ - __m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); \ - __m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); \ - __m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8)); \ - \ - v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); \ - v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); \ - v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); \ - v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); \ - v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); \ - v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); \ - } - -#define _MM_DEINTERLIV_EPI16(layer0_chunk0, layer0_chunk1, layer0_chunk2, \ - layer0_chunk3, layer0_chunk4, layer0_chunk5) \ - { \ - __m128i layer1_chunk0 = _mm_unpacklo_epi16(layer0_chunk0, layer0_chunk3); \ - __m128i layer1_chunk1 = _mm_unpackhi_epi16(layer0_chunk0, layer0_chunk3); \ - __m128i layer1_chunk2 = _mm_unpacklo_epi16(layer0_chunk1, layer0_chunk4); \ - __m128i layer1_chunk3 = _mm_unpackhi_epi16(layer0_chunk1, layer0_chunk4); \ - __m128i layer1_chunk4 = _mm_unpacklo_epi16(layer0_chunk2, layer0_chunk5); \ - __m128i layer1_chunk5 = _mm_unpackhi_epi16(layer0_chunk2, layer0_chunk5); \ - \ - __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk3); \ - __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk3); \ - __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk4); \ - __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk4); \ - __m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk5); \ - __m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk5); \ - \ - __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk3); \ - __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk3); \ - __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk4); \ - __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk4); \ - __m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk5); \ - __m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk5); \ - \ - layer0_chunk0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk3); \ - layer0_chunk1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk3); \ - layer0_chunk2 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk4); \ - layer0_chunk3 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk4); \ - layer0_chunk4 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk5); \ - layer0_chunk5 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk5); \ - } - -#define _MM_INTERLIV_EPI16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) \ - { \ - __m128i v_mask = _mm_set1_epi32(0x0000ffff); \ - \ - __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); \ - __m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16)); \ - __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); \ - __m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); \ - __m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); \ - __m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16)); \ - \ - __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); \ - __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); \ - __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); \ - __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); \ - __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); \ - __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); \ - \ - __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); \ - __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); \ - __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); \ - __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); \ - __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); \ - __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); \ - \ - v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); \ - v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); \ - v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); \ - v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); \ - v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); \ - v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16)); \ - } - -#define _MM_DEINTERLIV_PS(layer0_chunk0, layer0_chunk1, layer0_chunk2, \ - layer0_chunk3, layer0_chunk4, layer0_chunk5) \ - { \ - __m128 layer1_chunk0 = _mm_unpacklo_ps(layer0_chunk0, layer0_chunk3); \ - __m128 layer1_chunk1 = _mm_unpackhi_ps(layer0_chunk0, layer0_chunk3); \ - __m128 layer1_chunk2 = _mm_unpacklo_ps(layer0_chunk1, layer0_chunk4); \ - __m128 layer1_chunk3 = _mm_unpackhi_ps(layer0_chunk1, layer0_chunk4); \ - __m128 layer1_chunk4 = _mm_unpacklo_ps(layer0_chunk2, layer0_chunk5); \ - __m128 layer1_chunk5 = _mm_unpackhi_ps(layer0_chunk2, layer0_chunk5); \ - \ - __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk3); \ - __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk3); \ - __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk4); \ - __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk4); \ - __m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk5); \ - __m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk5); \ - \ - layer0_chunk0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk3); \ - layer0_chunk1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk3); \ - layer0_chunk2 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk4); \ - layer0_chunk3 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk4); \ - layer0_chunk4 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk5); \ - layer0_chunk5 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk5); \ - } - -#define _MM_INTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) \ - { \ - const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); \ - \ - __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); \ - __m128 layer2_chunk3 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); \ - __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo); \ - __m128 layer2_chunk4 = _mm_shuffle_ps(v_g0, v_g1, mask_hi); \ - __m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo); \ - __m128 layer2_chunk5 = _mm_shuffle_ps(v_b0, v_b1, mask_hi); \ - \ - __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo); \ - __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi); \ - __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo); \ - __m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi); \ - __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo); \ - __m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi); \ - \ - v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo); \ - v_g1 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi); \ - v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo); \ - v_b0 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi); \ - v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo); \ - v_b1 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi); \ - } - -#endif - namespace cv { @@ -1703,7 +1504,34 @@ struct RGB2Gray __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 32)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 40)); - _MM_DEINTERLIV_EPI16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_deinterliv_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + __m128i v_gray0; + process(v_r0, v_g0, v_b0, + v_gray0); + + __m128i v_gray1; + process(v_r1, v_g1, v_b1, + v_gray1); + + _mm_storeu_si128((__m128i *)(dst + i), v_gray0); + _mm_storeu_si128((__m128i *)(dst + i + 8), v_gray1); + } + } + else if (scn == 4) + { + for ( ; i <= n - 16; i += 16, src += scn * 16) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + 8)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + 16)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + 24)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 32)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 40)); + __m128i v_a0 = _mm_loadu_si128((__m128i const *)(src + 48)); + __m128i v_a1 = _mm_loadu_si128((__m128i const *)(src + 56)); + + _mm_deinterliv_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1, v_a0, v_a1); __m128i v_gray0; process(v_r0, v_g0, v_b0, @@ -1768,7 +1596,34 @@ struct RGB2Gray __m128 v_b0 = _mm_loadu_ps(src + 16); __m128 v_b1 = _mm_loadu_ps(src + 20); - _MM_DEINTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_deinterliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + __m128 v_gray0; + process(v_r0, v_g0, v_b0, + v_gray0); + + __m128 v_gray1; + process(v_r1, v_g1, v_b1, + v_gray1); + + _mm_storeu_ps(dst + i, v_gray0); + _mm_storeu_ps(dst + i + 4, v_gray1); + } + } + else if (scn == 4) + { + for ( ; i <= n - 8; i += 8, src += scn * 8) + { + __m128 v_r0 = _mm_loadu_ps(src); + __m128 v_r1 = _mm_loadu_ps(src + 4); + __m128 v_g0 = _mm_loadu_ps(src + 8); + __m128 v_g1 = _mm_loadu_ps(src + 12); + __m128 v_b0 = _mm_loadu_ps(src + 16); + __m128 v_b1 = _mm_loadu_ps(src + 20); + __m128 v_a0 = _mm_loadu_ps(src + 24); + __m128 v_a1 = _mm_loadu_ps(src + 28); + + _mm_deinterliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1, v_a0, v_a1); __m128 v_gray0; process(v_r0, v_g0, v_b0, @@ -1966,7 +1821,7 @@ struct RGB2YCrCb_f __m128 v_b0 = _mm_loadu_ps(src + 16); __m128 v_b1 = _mm_loadu_ps(src + 20); - _MM_DEINTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_deinterliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128 v_y0, v_cr0, v_cb0; process(v_r0, v_g0, v_b0, @@ -1976,7 +1831,7 @@ struct RGB2YCrCb_f process(v_r1, v_g1, v_b1, v_y1, v_cr1, v_cb1); - _MM_INTERLIV_PS(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1) + _mm_interliv_ps(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); _mm_storeu_ps(dst + i, v_y0); _mm_storeu_ps(dst + i + 4, v_y1); @@ -2331,7 +2186,7 @@ struct RGB2YCrCb_i __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 64)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 80)); - _MM_DEINTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_deinterliv_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128i v_y0 = v_zero, v_cr0 = v_zero, v_cb0 = v_zero; process(_mm_unpacklo_epi8(v_r0, v_zero), @@ -2363,7 +2218,7 @@ struct RGB2YCrCb_i __m128i v_cr_1 = _mm_packus_epi16(v_cr0, v_cr1); __m128i v_cb_1 = _mm_packus_epi16(v_cb0, v_cb1); - _MM_INTERLIV_EPI8(v_y_0, v_y_1, v_cr_0, v_cr_1, v_cb_0, v_cb_1) + _mm_interlive_epi8(v_y_0, v_y_1, v_cr_0, v_cr_1, v_cb_0, v_cb_1); _mm_storeu_si128((__m128i *)(dst + i), v_y_0); _mm_storeu_si128((__m128i *)(dst + i + 16), v_y_1); @@ -2473,7 +2328,7 @@ struct RGB2YCrCb_i __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 32)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 40)); - _MM_DEINTERLIV_EPI16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_deinterliv_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128i v_y0 = v_zero, v_cr0 = v_zero, v_cb0 = v_zero; process(v_r0, v_g0, v_b0, @@ -2483,7 +2338,7 @@ struct RGB2YCrCb_i process(v_r1, v_g1, v_b1, v_y1, v_cr1, v_cb1); - _MM_INTERLIV_EPI16(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1) + _mm_interliv_epi16(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); _mm_storeu_si128((__m128i *)(dst + i), v_y0); _mm_storeu_si128((__m128i *)(dst + i + 8), v_y1); @@ -2681,7 +2536,7 @@ struct YCrCb2RGB_f __m128 v_cb0 = _mm_loadu_ps(src + i + 16); __m128 v_cb1 = _mm_loadu_ps(src + i + 20); - _MM_DEINTERLIV_PS(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1) + _mm_deinterliv_ps(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); __m128 v_r0, v_g0, v_b0; process(v_y0, v_cr0, v_cb0, @@ -2691,7 +2546,7 @@ struct YCrCb2RGB_f process(v_y1, v_cr1, v_cb1, v_r1, v_g1, v_b1); - _MM_INTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_interliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_storeu_ps(dst, v_r0); _mm_storeu_ps(dst + 4, v_r1); @@ -3094,7 +2949,7 @@ struct YCrCb2RGB_i __m128i v_cb0 = _mm_loadu_si128((__m128i const *)(src + i + 64)); __m128i v_cb1 = _mm_loadu_si128((__m128i const *)(src + i + 80)); - _MM_DEINTERLIV_EPI8(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1) + _mm_deinterliv_epi8(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); __m128i v_r_0 = v_zero, v_g_0 = v_zero, v_b_0 = v_zero; process(_mm_unpacklo_epi8(v_y0, v_zero), @@ -3132,7 +2987,7 @@ struct YCrCb2RGB_i std::swap(v_r1, v_b1); } - _MM_INTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_interlive_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_storeu_si128((__m128i *)(dst), v_r0); _mm_storeu_si128((__m128i *)(dst + 16), v_r1); @@ -3355,7 +3210,7 @@ struct RGB2XYZ_f __m128 v_b0 = _mm_loadu_ps(src + 16); __m128 v_b1 = _mm_loadu_ps(src + 20); - _MM_DEINTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_deinterliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128 v_x0, v_y0, v_z0; process(v_r0, v_g0, v_b0, @@ -3365,7 +3220,7 @@ struct RGB2XYZ_f process(v_r1, v_g1, v_b1, v_x1, v_y1, v_z1); - _MM_INTERLIV_PS(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1) + _mm_interliv_ps(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1); _mm_storeu_ps(dst + i, v_x0); _mm_storeu_ps(dst + i + 4, v_x1); @@ -3781,7 +3636,7 @@ struct XYZ2RGB_f __m128 v_z0 = _mm_loadu_ps(src + i + 16); __m128 v_z1 = _mm_loadu_ps(src + i + 20); - _MM_DEINTERLIV_PS(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1) + _mm_deinterliv_ps(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1); __m128 v_r0, v_g0, v_b0; process(v_x0, v_y0, v_z0, @@ -3791,7 +3646,7 @@ struct XYZ2RGB_f process(v_x1, v_y1, v_z1, v_r1, v_g1, v_b1); - _MM_INTERLIV_PS(v_b0, v_b1, v_g0, v_g1, v_r0, v_r1) + _mm_interliv_ps(v_b0, v_b1, v_g0, v_g1, v_r0, v_r1); _mm_storeu_ps(dst, v_b0); _mm_storeu_ps(dst + 4, v_b1); @@ -4361,7 +4216,7 @@ struct HSV2RGB_b v_g1 = _mm_mul_ps(v_g1, v_scale_inv); v_b1 = _mm_mul_ps(v_b1, v_scale_inv); - _MM_INTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_interliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_store_ps(buf, v_r0); _mm_store_ps(buf + 4, v_r1); @@ -4412,7 +4267,7 @@ struct HSV2RGB_b __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _MM_DEINTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_deinterliv_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); process(_mm_unpacklo_epi8(v_r0, v_zero), _mm_unpacklo_epi8(v_g0, v_zero), @@ -4606,7 +4461,7 @@ struct RGB2HLS_b __m128 v_s0f = _mm_load_ps(buf + 16); __m128 v_s1f = _mm_load_ps(buf + 20); - _MM_DEINTERLIV_PS(v_h0f, v_h1f, v_l0f, v_l1f, v_s0f, v_s1f) + _mm_deinterliv_ps(v_h0f, v_h1f, v_l0f, v_l1f, v_s0f, v_s1f); v_l0f = _mm_mul_ps(v_l0f, v_scale); v_l1f = _mm_mul_ps(v_l1f, v_scale); @@ -4729,7 +4584,7 @@ struct RGB2HLS_b __m128i v_l1 = _mm_packus_epi16(v_l_0, v_l_1); __m128i v_s1 = _mm_packus_epi16(v_s_0, v_s_1); - _MM_INTERLIV_EPI8(v_h0, v_h1, v_l0, v_l1, v_s0, v_s1) + _mm_interlive_epi8(v_h0, v_h1, v_l0, v_l1, v_s0, v_s1); _mm_storeu_si128((__m128i *)(dst + j), v_h0); _mm_storeu_si128((__m128i *)(dst + j + 16), v_h1); @@ -4861,7 +4716,7 @@ struct HLS2RGB_b v_g1 = _mm_mul_ps(v_g1, v_scale_inv); v_b1 = _mm_mul_ps(v_b1, v_scale_inv); - _MM_INTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_interliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_store_ps(buf, v_r0); _mm_store_ps(buf + 4, v_r1); @@ -4912,7 +4767,7 @@ struct HLS2RGB_b __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _MM_DEINTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_deinterliv_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); process(_mm_unpacklo_epi8(v_r0, v_zero), _mm_unpacklo_epi8(v_g0, v_zero), @@ -5360,7 +5215,7 @@ struct Lab2RGB_b v_b0 = _mm_sub_ps(v_b0, v_128); v_b1 = _mm_sub_ps(v_b1, v_128); - _MM_INTERLIV_PS(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_interliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_store_ps(buf, v_r0); _mm_store_ps(buf + 4, v_r1); @@ -5411,7 +5266,7 @@ struct Lab2RGB_b __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _MM_DEINTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_deinterliv_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); process(_mm_unpacklo_epi8(v_r0, v_zero), _mm_unpacklo_epi8(v_g0, v_zero), @@ -5713,7 +5568,7 @@ struct RGB2Luv_b __m128 v_v0f = _mm_load_ps(buf + 16); __m128 v_v1f = _mm_load_ps(buf + 20); - _MM_DEINTERLIV_PS(v_l0f, v_l1f, v_u0f, v_u1f, v_v0f, v_v1f) + _mm_deinterliv_ps(v_l0f, v_l1f, v_u0f, v_u1f, v_v0f, v_v1f); v_l0f = _mm_mul_ps(v_l0f, v_scale); v_l1f = _mm_mul_ps(v_l1f, v_scale); @@ -5839,7 +5694,7 @@ struct RGB2Luv_b __m128i v_u1 = _mm_packus_epi16(v_u_0, v_u_1); __m128i v_v1 = _mm_packus_epi16(v_v_0, v_v_1); - _MM_INTERLIV_EPI8(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1) + _mm_interlive_epi8(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); _mm_storeu_si128((__m128i *)(dst + j), v_l0); _mm_storeu_si128((__m128i *)(dst + j + 16), v_l1); @@ -5920,7 +5775,7 @@ struct Luv2RGB_b v_v0 = _mm_sub_ps(_mm_mul_ps(v_v0, v_coeff2), v_140); v_v1 = _mm_sub_ps(_mm_mul_ps(v_v1, v_coeff2), v_140); - _MM_INTERLIV_PS(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1) + _mm_interliv_ps(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); _mm_store_ps(buf, v_l0); _mm_store_ps(buf + 4, v_l1); @@ -5971,7 +5826,7 @@ struct Luv2RGB_b __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _MM_DEINTERLIV_EPI8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1) + _mm_deinterliv_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); process(_mm_unpacklo_epi8(v_r0, v_zero), _mm_unpacklo_epi8(v_g0, v_zero), From bc394e7516194be88816cbc8cf7603d394f84433 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 36/98] detection of other CPU features --- CMakeLists.txt | 2 + cmake/OpenCVCompilerOptions.cmake | 11 ++- modules/core/include/opencv2/core/cvdef.h | 91 ++++++++++++++++--- modules/core/include/opencv2/core/utility.hpp | 34 +++++-- modules/core/src/system.cpp | 10 ++ modules/ts/src/ts_func.cpp | 36 ++++++++ 6 files changed, 157 insertions(+), 27 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7b5648efd..d9bb04081 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -214,12 +214,14 @@ OCV_OPTION(ENABLE_COVERAGE "Enable coverage collection with GCov" OCV_OPTION(ENABLE_OMIT_FRAME_POINTER "Enable -fomit-frame-pointer for GCC" ON IF CMAKE_COMPILER_IS_GNUCXX AND NOT (APPLE AND CMAKE_COMPILER_IS_CLANGCXX) ) OCV_OPTION(ENABLE_POWERPC "Enable PowerPC for GCC" ON IF (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES powerpc.*) ) OCV_OPTION(ENABLE_FAST_MATH "Enable -ffast-math (not recommended for GCC 4.6.x)" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_POPCNT "Enable POPCNT instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE "Enable SSE instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE2 "Enable SSE2 instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE3 "Enable SSE3 instructions" ON IF ((CV_ICC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSSE3 "Enable SSSE3 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE41 "Enable SSE4.1 instructions" OFF IF ((CV_ICC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_FMA3 "Enable FMA3 instructions" ON IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_AVX2 "Enable AVX2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_NEON "Enable NEON instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) ) diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index 831026fb5..3d1155c87 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -122,16 +122,19 @@ if(CMAKE_COMPILER_IS_GNUCXX) if(ENABLE_POWERPC) add_extra_compiler_option("-mcpu=G3 -mtune=G5") endif() + if(ENABLE_POPCNT) + add_extra_compiler_option(-mpopcnt) + endif() if(ENABLE_SSE) add_extra_compiler_option(-msse) endif() if(ENABLE_SSE2) add_extra_compiler_option(-msse2) endif() - if (ENABLE_NEON) + if(ENABLE_NEON) add_extra_compiler_option("-mfpu=neon") endif() - if (ENABLE_VFPV3 AND NOT ENABLE_NEON) + if(ENABLE_VFPV3 AND NOT ENABLE_NEON) add_extra_compiler_option("-mfpu=vfpv3") endif() @@ -162,6 +165,10 @@ if(CMAKE_COMPILER_IS_GNUCXX) add_extra_compiler_option(-msse4.2) endif() endif() + + if(ENABLE_FMA3) + add_extra_compiler_option(-mfma) + endif() endif(NOT MINGW) if(X86 OR X86_64) diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index c52cb021c..45146a39c 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -104,18 +104,32 @@ #endif /* CPU features and intrinsics support */ -#define CV_CPU_NONE 0 -#define CV_CPU_MMX 1 -#define CV_CPU_SSE 2 -#define CV_CPU_SSE2 3 -#define CV_CPU_SSE3 4 -#define CV_CPU_SSSE3 5 -#define CV_CPU_SSE4_1 6 -#define CV_CPU_SSE4_2 7 -#define CV_CPU_POPCNT 8 -#define CV_CPU_AVX 10 -#define CV_CPU_AVX2 11 -#define CV_CPU_NEON 12 +#define CV_CPU_NONE 0 +#define CV_CPU_MMX 1 +#define CV_CPU_SSE 2 +#define CV_CPU_SSE2 3 +#define CV_CPU_SSE3 4 +#define CV_CPU_SSSE3 5 +#define CV_CPU_SSE4_1 6 +#define CV_CPU_SSE4_2 7 +#define CV_CPU_POPCNT 8 + +#define CV_CPU_AVX 10 +#define CV_CPU_AVX2 11 +#define CV_CPU_FMA3 12 + +#define CV_CPU_AVX_512F 13 +#define CV_CPU_AVX_512BW 14 +#define CV_CPU_AVX_512CD 15 +#define CV_CPU_AVX_512DQ 16 +#define CV_CPU_AVX_512ER 17 +#define CV_CPU_AVX_512IFMA512 18 +#define CV_CPU_AVX_512PF 19 +#define CV_CPU_AVX_512VBMI 20 +#define CV_CPU_AVX_512VL 21 + +#define CV_CPU_NEON 100 + // when adding to this list remember to update the enum in core/utility.cpp #define CV_HARDWARE_MAX_FEATURE 255 @@ -124,6 +138,7 @@ #if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2) # include +# define CV_MMX # define CV_SSE 1 # define CV_SSE2 1 # if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500) @@ -142,6 +157,14 @@ # include # define CV_SSE4_2 1 # endif +# if defined __FMA__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include +# define CV_FMA3 1 +# endif +# if defined __POPCNT__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include +# define CV_POPCNT 1 +# endif # if defined __AVX__ || defined __AVX2__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) // MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX // See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32 @@ -151,11 +174,12 @@ # define __xgetbv() _xgetbv(_XCR_XFEATURE_ENABLED_MASK) # else # define __xgetbv() 0 -# ifdef __AVX2__ -# define CV_AVX2 1 -# endif # endif # endif +# if defined __AVX2__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) +# include +# define CV_AVX2 1 +# endif #endif #if (defined WIN32 || defined _WIN32) && defined(_M_ARM) @@ -170,6 +194,9 @@ #endif // __CUDACC__ +#ifndef CV_MMX +# define CV_MMX 0 +#endif #ifndef CV_SSE # define CV_SSE 0 #endif @@ -194,6 +221,40 @@ #ifndef CV_AVX2 # define CV_AVX2 0 #endif +#ifndef CV_POPCNT +#define CV_POPCNT 0 +#endif +#ifndef CV_FMA3 +# define CV_FMA3 0 +#endif +#ifndef CV_AVX_512F +# define CV_AVX_512F 0 +#endif +#ifndef CV_AVX_512BW +# define CV_AVX_512BW 0 +#endif +#ifndef CV_AVX_512CD +# define CV_AVX_512CD 0 +#endif +#ifndef CV_AVX_512DQ +# define CV_AVX_512DQ 0 +#endif +#ifndef CV_AVX_512ER +# define CV_AVX_512ER 0 +#endif +#ifndef CV_AVX_512IFMA512 +# define CV_AVX_512IFMA512 0 +#endif +#ifndef CV_AVX_512PF +# define CV_AVX_512PF 0 +#endif +#ifndef CV_AVX_512VBMI +# define CV_AVX_512VBMI 0 +#endif +#ifndef CV_AVX_512VL +# define CV_AVX_512VL 0 +#endif + #ifndef CV_NEON # define CV_NEON 0 #endif diff --git a/modules/core/include/opencv2/core/utility.hpp b/modules/core/include/opencv2/core/utility.hpp index 88989ef5c..fb8ccd88d 100644 --- a/modules/core/include/opencv2/core/utility.hpp +++ b/modules/core/include/opencv2/core/utility.hpp @@ -281,16 +281,30 @@ CV_EXPORTS_W int64 getCPUTickCount(); remember to keep this list identical to the one in cvdef.h */ enum CpuFeatures { - CPU_MMX = 1, - CPU_SSE = 2, - CPU_SSE2 = 3, - CPU_SSE3 = 4, - CPU_SSSE3 = 5, - CPU_SSE4_1 = 6, - CPU_SSE4_2 = 7, - CPU_POPCNT = 8, - CPU_AVX = 10, - CPU_NEON = 11 + CPU_MMX = 1, + CPU_SSE = 2, + CPU_SSE2 = 3, + CPU_SSE3 = 4, + CPU_SSSE3 = 5, + CPU_SSE4_1 = 6, + CPU_SSE4_2 = 7, + CPU_POPCNT = 8, + + CPU_AVX = 10, + CPU_AVX2 = 11, + CPU_FMA3 = 12, + + CPU_AVX_512F = 13, + CPU_AVX_512BW = 14, + CPU_AVX_512CD = 15, + CPU_AVX_512DQ = 16, + CPU_AVX_512ER = 17, + CPU_AVX_512IFMA512 = 18, + CPU_AVX_512PF = 19, + CPU_AVX_512VBMI = 20, + CPU_AVX_512VL = 21, + + CPU_NEON = 100 }; /** @brief Returns true if the specified feature is supported by the host hardware. diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index 11bbab3a2..a7a6e98d4 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -263,6 +263,7 @@ struct HWFeatures f.have[CV_CPU_SSE2] = (cpuid_data[3] & (1<<26)) != 0; f.have[CV_CPU_SSE3] = (cpuid_data[2] & (1<<0)) != 0; f.have[CV_CPU_SSSE3] = (cpuid_data[2] & (1<<9)) != 0; + f.have[CV_CPU_FMA3] = (cpuid_data[2] & (1<<12)) != 0; f.have[CV_CPU_SSE4_1] = (cpuid_data[2] & (1<<19)) != 0; f.have[CV_CPU_SSE4_2] = (cpuid_data[2] & (1<<20)) != 0; f.have[CV_CPU_POPCNT] = (cpuid_data[2] & (1<<23)) != 0; @@ -301,6 +302,15 @@ struct HWFeatures #endif f.have[CV_CPU_AVX2] = (cpuid_data[1] & (1<<5)) != 0; + f.have[CV_CPU_AVX_512F] = (cpuid_data[1] & (1<<16)) != 0; + f.have[CV_CPU_AVX_512DQ] = (cpuid_data[1] & (1<<17)) != 0; + f.have[CV_CPU_AVX_512IFMA512] = (cpuid_data[1] & (1<<21)) != 0; + f.have[CV_CPU_AVX_512PF] = (cpuid_data[1] & (1<<26)) != 0; + f.have[CV_CPU_AVX_512ER] = (cpuid_data[1] & (1<<27)) != 0; + f.have[CV_CPU_AVX_512CD] = (cpuid_data[1] & (1<<28)) != 0; + f.have[CV_CPU_AVX_512BW] = (cpuid_data[1] & (1<<30)) != 0; + f.have[CV_CPU_AVX_512VL] = (cpuid_data[1] & (1<<31)) != 0; + f.have[CV_CPU_AVX_512VBMI] = (cpuid_data[2] & (1<<1)) != 0; } return f; diff --git a/modules/ts/src/ts_func.cpp b/modules/ts/src/ts_func.cpp index 53b62e74d..84a9233dd 100644 --- a/modules/ts/src/ts_func.cpp +++ b/modules/ts/src/ts_func.cpp @@ -2998,6 +2998,12 @@ void printVersionInfo(bool useStdOut) std::string cpu_features; +#if CV_MMX + if (checkHardwareSupport(CV_CPU_MMX)) cpu_features += " mmx"; +#endif +#if CV_POPCNT + if (checkHardwareSupport(CV_CPU_POPCNT)) cpu_features += " popcnt"; +#endif #if CV_SSE if (checkHardwareSupport(CV_CPU_SSE)) cpu_features += " sse"; #endif @@ -3022,6 +3028,36 @@ void printVersionInfo(bool useStdOut) #if CV_AVX2 if (checkHardwareSupport(CV_CPU_AVX2)) cpu_features += " avx2"; #endif +#if CV_FMA3 + if (checkHardwareSupport(CV_CPU_FMA3)) cpu_features += " fma3"; +#endif +#if CV_AVX_512F + if (checkHardwareSupport(CV_CPU_AVX_512F) cpu_features += " avx-512f"; +#endif +#if CV_AVX_512BW + if (checkHardwareSupport(CV_CPU_AVX_512BW) cpu_features += " avx-512bw"; +#endif +#if CV_AVX_512CD + if (checkHardwareSupport(CV_CPU_AVX_512CD) cpu_features += " avx-512cd"; +#endif +#if CV_AVX_512DQ + if (checkHardwareSupport(CV_CPU_AVX_512DQ) cpu_features += " avx-512dq"; +#endif +#if CV_AVX_512ER + if (checkHardwareSupport(CV_CPU_AVX_512ER) cpu_features += " avx-512er"; +#endif +#if CV_AVX_512IFMA512 + if (checkHardwareSupport(CV_CPU_AVX_512IFMA512) cpu_features += " avx-512ifma512"; +#endif +#if CV_AVX_512PF + if (checkHardwareSupport(CV_CPU_AVX_512PF) cpu_features += " avx-512pf"; +#endif +#if CV_AVX_512VBMI + if (checkHardwareSupport(CV_CPU_AVX_512VBMI) cpu_features += " avx-512vbmi"; +#endif +#if CV_AVX_512VL + if (checkHardwareSupport(CV_CPU_AVX_512VL) cpu_features += " avx-512vl"; +#endif #if CV_NEON cpu_features += " neon"; // NEON is currently not checked at runtime #endif From 31827d8dfeacc4390fdae79cfbb1d39cc5b7e8a9 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 37/98] fixed typo --- modules/core/include/opencv2/core/cvdef.h | 2 +- .../core/include/opencv2/core/sse_utils.hpp | 48 +++++++-------- modules/imgproc/src/color.cpp | 60 +++++++++---------- 3 files changed, 55 insertions(+), 55 deletions(-) diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index 45146a39c..5fa45a592 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -138,7 +138,7 @@ #if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2) # include -# define CV_MMX +# define CV_MMX 1 # define CV_SSE 1 # define CV_SSE2 1 # if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500) diff --git a/modules/core/include/opencv2/core/sse_utils.hpp b/modules/core/include/opencv2/core/sse_utils.hpp index 13673f57b..9db8f0ade 100644 --- a/modules/core/include/opencv2/core/sse_utils.hpp +++ b/modules/core/include/opencv2/core/sse_utils.hpp @@ -48,8 +48,8 @@ #if CV_SSE2 -inline void _mm_deinterliv_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, - __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) { __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_g1); __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_g1); @@ -87,8 +87,8 @@ inline void _mm_deinterliv_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, v_b1 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk5); } -inline void _mm_deinterliv_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, - __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) { __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_b0); __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_b0); @@ -136,8 +136,8 @@ inline void _mm_deinterliv_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, v_a1 = _mm_unpackhi_epi8(layer4_chunk3, layer4_chunk7); } -inline void _mm_interlive_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, - __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +inline void _mm_interleavee_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) { __m128i v_mask = _mm_set1_epi16(0x00ff); @@ -177,8 +177,8 @@ inline void _mm_interlive_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); } -inline void _mm_interlive_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, - __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +inline void _mm_interleavee_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) { __m128i v_mask = _mm_set1_epi16(0x00ff); @@ -228,8 +228,8 @@ inline void _mm_interlive_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, _ v_a1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk6, 8), _mm_srli_epi16(layer1_chunk7, 8)); } -inline void _mm_deinterliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, - __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) { __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g1); __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g1); @@ -260,8 +260,8 @@ inline void _mm_deinterliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, v_b1 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk5); } -inline void _mm_deinterliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, - __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) { __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_b0); __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_b0); @@ -300,8 +300,8 @@ inline void _mm_deinterliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, v_a1 = _mm_unpackhi_epi16(layer3_chunk3, layer3_chunk7); } -inline void _mm_interliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, - __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) { __m128i v_mask = _mm_set1_epi32(0x0000ffff); @@ -334,8 +334,8 @@ inline void _mm_interliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16)); } -inline void _mm_interliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, - __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) { __m128i v_mask = _mm_set1_epi32(0x0000ffff); @@ -376,8 +376,8 @@ inline void _mm_interliv_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, _ v_a1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk6, 16), _mm_srli_epi32(layer1_chunk7, 16)); } -inline void _mm_deinterliv_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, - __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) +inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, + __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) { __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g1); __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g1); @@ -401,8 +401,8 @@ inline void _mm_deinterliv_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, v_b1 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk5); } -inline void _mm_deinterliv_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, - __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) +inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, + __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) { __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_b0); __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_b0); @@ -432,8 +432,8 @@ inline void _mm_deinterliv_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m12 v_a1 = _mm_unpackhi_ps(layer2_chunk3, layer2_chunk7); } -inline void _mm_interliv_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, - __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) +inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, + __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) { const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); @@ -459,8 +459,8 @@ inline void _mm_interliv_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, v_b1 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi); } -inline void _mm_interliv_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, - __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) +inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, + __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) { const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 4d74e86db..6049f9993 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -1504,7 +1504,7 @@ struct RGB2Gray __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 32)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 40)); - _mm_deinterliv_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128i v_gray0; process(v_r0, v_g0, v_b0, @@ -1531,7 +1531,7 @@ struct RGB2Gray __m128i v_a0 = _mm_loadu_si128((__m128i const *)(src + 48)); __m128i v_a1 = _mm_loadu_si128((__m128i const *)(src + 56)); - _mm_deinterliv_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1, v_a0, v_a1); + _mm_deinterleave_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1, v_a0, v_a1); __m128i v_gray0; process(v_r0, v_g0, v_b0, @@ -1596,7 +1596,7 @@ struct RGB2Gray __m128 v_b0 = _mm_loadu_ps(src + 16); __m128 v_b1 = _mm_loadu_ps(src + 20); - _mm_deinterliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128 v_gray0; process(v_r0, v_g0, v_b0, @@ -1623,7 +1623,7 @@ struct RGB2Gray __m128 v_a0 = _mm_loadu_ps(src + 24); __m128 v_a1 = _mm_loadu_ps(src + 28); - _mm_deinterliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1, v_a0, v_a1); + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1, v_a0, v_a1); __m128 v_gray0; process(v_r0, v_g0, v_b0, @@ -1821,7 +1821,7 @@ struct RGB2YCrCb_f __m128 v_b0 = _mm_loadu_ps(src + 16); __m128 v_b1 = _mm_loadu_ps(src + 20); - _mm_deinterliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128 v_y0, v_cr0, v_cb0; process(v_r0, v_g0, v_b0, @@ -1831,7 +1831,7 @@ struct RGB2YCrCb_f process(v_r1, v_g1, v_b1, v_y1, v_cr1, v_cb1); - _mm_interliv_ps(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); + _mm_interleave_ps(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); _mm_storeu_ps(dst + i, v_y0); _mm_storeu_ps(dst + i + 4, v_y1); @@ -2186,7 +2186,7 @@ struct RGB2YCrCb_i __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 64)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 80)); - _mm_deinterliv_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128i v_y0 = v_zero, v_cr0 = v_zero, v_cb0 = v_zero; process(_mm_unpacklo_epi8(v_r0, v_zero), @@ -2218,7 +2218,7 @@ struct RGB2YCrCb_i __m128i v_cr_1 = _mm_packus_epi16(v_cr0, v_cr1); __m128i v_cb_1 = _mm_packus_epi16(v_cb0, v_cb1); - _mm_interlive_epi8(v_y_0, v_y_1, v_cr_0, v_cr_1, v_cb_0, v_cb_1); + _mm_interleavee_epi8(v_y_0, v_y_1, v_cr_0, v_cr_1, v_cb_0, v_cb_1); _mm_storeu_si128((__m128i *)(dst + i), v_y_0); _mm_storeu_si128((__m128i *)(dst + i + 16), v_y_1); @@ -2328,7 +2328,7 @@ struct RGB2YCrCb_i __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 32)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 40)); - _mm_deinterliv_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128i v_y0 = v_zero, v_cr0 = v_zero, v_cb0 = v_zero; process(v_r0, v_g0, v_b0, @@ -2338,7 +2338,7 @@ struct RGB2YCrCb_i process(v_r1, v_g1, v_b1, v_y1, v_cr1, v_cb1); - _mm_interliv_epi16(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); + _mm_interleave_epi16(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); _mm_storeu_si128((__m128i *)(dst + i), v_y0); _mm_storeu_si128((__m128i *)(dst + i + 8), v_y1); @@ -2536,7 +2536,7 @@ struct YCrCb2RGB_f __m128 v_cb0 = _mm_loadu_ps(src + i + 16); __m128 v_cb1 = _mm_loadu_ps(src + i + 20); - _mm_deinterliv_ps(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); + _mm_deinterleave_ps(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); __m128 v_r0, v_g0, v_b0; process(v_y0, v_cr0, v_cb0, @@ -2546,7 +2546,7 @@ struct YCrCb2RGB_f process(v_y1, v_cr1, v_cb1, v_r1, v_g1, v_b1); - _mm_interliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_storeu_ps(dst, v_r0); _mm_storeu_ps(dst + 4, v_r1); @@ -2949,7 +2949,7 @@ struct YCrCb2RGB_i __m128i v_cb0 = _mm_loadu_si128((__m128i const *)(src + i + 64)); __m128i v_cb1 = _mm_loadu_si128((__m128i const *)(src + i + 80)); - _mm_deinterliv_epi8(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); + _mm_deinterleave_epi8(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); __m128i v_r_0 = v_zero, v_g_0 = v_zero, v_b_0 = v_zero; process(_mm_unpacklo_epi8(v_y0, v_zero), @@ -2987,7 +2987,7 @@ struct YCrCb2RGB_i std::swap(v_r1, v_b1); } - _mm_interlive_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_interleavee_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_storeu_si128((__m128i *)(dst), v_r0); _mm_storeu_si128((__m128i *)(dst + 16), v_r1); @@ -3210,7 +3210,7 @@ struct RGB2XYZ_f __m128 v_b0 = _mm_loadu_ps(src + 16); __m128 v_b1 = _mm_loadu_ps(src + 20); - _mm_deinterliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128 v_x0, v_y0, v_z0; process(v_r0, v_g0, v_b0, @@ -3220,7 +3220,7 @@ struct RGB2XYZ_f process(v_r1, v_g1, v_b1, v_x1, v_y1, v_z1); - _mm_interliv_ps(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1); + _mm_interleave_ps(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1); _mm_storeu_ps(dst + i, v_x0); _mm_storeu_ps(dst + i + 4, v_x1); @@ -3636,7 +3636,7 @@ struct XYZ2RGB_f __m128 v_z0 = _mm_loadu_ps(src + i + 16); __m128 v_z1 = _mm_loadu_ps(src + i + 20); - _mm_deinterliv_ps(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1); + _mm_deinterleave_ps(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1); __m128 v_r0, v_g0, v_b0; process(v_x0, v_y0, v_z0, @@ -3646,7 +3646,7 @@ struct XYZ2RGB_f process(v_x1, v_y1, v_z1, v_r1, v_g1, v_b1); - _mm_interliv_ps(v_b0, v_b1, v_g0, v_g1, v_r0, v_r1); + _mm_interleave_ps(v_b0, v_b1, v_g0, v_g1, v_r0, v_r1); _mm_storeu_ps(dst, v_b0); _mm_storeu_ps(dst + 4, v_b1); @@ -4216,7 +4216,7 @@ struct HSV2RGB_b v_g1 = _mm_mul_ps(v_g1, v_scale_inv); v_b1 = _mm_mul_ps(v_b1, v_scale_inv); - _mm_interliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_store_ps(buf, v_r0); _mm_store_ps(buf + 4, v_r1); @@ -4267,7 +4267,7 @@ struct HSV2RGB_b __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _mm_deinterliv_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); process(_mm_unpacklo_epi8(v_r0, v_zero), _mm_unpacklo_epi8(v_g0, v_zero), @@ -4461,7 +4461,7 @@ struct RGB2HLS_b __m128 v_s0f = _mm_load_ps(buf + 16); __m128 v_s1f = _mm_load_ps(buf + 20); - _mm_deinterliv_ps(v_h0f, v_h1f, v_l0f, v_l1f, v_s0f, v_s1f); + _mm_deinterleave_ps(v_h0f, v_h1f, v_l0f, v_l1f, v_s0f, v_s1f); v_l0f = _mm_mul_ps(v_l0f, v_scale); v_l1f = _mm_mul_ps(v_l1f, v_scale); @@ -4584,7 +4584,7 @@ struct RGB2HLS_b __m128i v_l1 = _mm_packus_epi16(v_l_0, v_l_1); __m128i v_s1 = _mm_packus_epi16(v_s_0, v_s_1); - _mm_interlive_epi8(v_h0, v_h1, v_l0, v_l1, v_s0, v_s1); + _mm_interleavee_epi8(v_h0, v_h1, v_l0, v_l1, v_s0, v_s1); _mm_storeu_si128((__m128i *)(dst + j), v_h0); _mm_storeu_si128((__m128i *)(dst + j + 16), v_h1); @@ -4716,7 +4716,7 @@ struct HLS2RGB_b v_g1 = _mm_mul_ps(v_g1, v_scale_inv); v_b1 = _mm_mul_ps(v_b1, v_scale_inv); - _mm_interliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_store_ps(buf, v_r0); _mm_store_ps(buf + 4, v_r1); @@ -4767,7 +4767,7 @@ struct HLS2RGB_b __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _mm_deinterliv_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); process(_mm_unpacklo_epi8(v_r0, v_zero), _mm_unpacklo_epi8(v_g0, v_zero), @@ -5215,7 +5215,7 @@ struct Lab2RGB_b v_b0 = _mm_sub_ps(v_b0, v_128); v_b1 = _mm_sub_ps(v_b1, v_128); - _mm_interliv_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_store_ps(buf, v_r0); _mm_store_ps(buf + 4, v_r1); @@ -5266,7 +5266,7 @@ struct Lab2RGB_b __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _mm_deinterliv_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); process(_mm_unpacklo_epi8(v_r0, v_zero), _mm_unpacklo_epi8(v_g0, v_zero), @@ -5568,7 +5568,7 @@ struct RGB2Luv_b __m128 v_v0f = _mm_load_ps(buf + 16); __m128 v_v1f = _mm_load_ps(buf + 20); - _mm_deinterliv_ps(v_l0f, v_l1f, v_u0f, v_u1f, v_v0f, v_v1f); + _mm_deinterleave_ps(v_l0f, v_l1f, v_u0f, v_u1f, v_v0f, v_v1f); v_l0f = _mm_mul_ps(v_l0f, v_scale); v_l1f = _mm_mul_ps(v_l1f, v_scale); @@ -5694,7 +5694,7 @@ struct RGB2Luv_b __m128i v_u1 = _mm_packus_epi16(v_u_0, v_u_1); __m128i v_v1 = _mm_packus_epi16(v_v_0, v_v_1); - _mm_interlive_epi8(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); + _mm_interleavee_epi8(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); _mm_storeu_si128((__m128i *)(dst + j), v_l0); _mm_storeu_si128((__m128i *)(dst + j + 16), v_l1); @@ -5775,7 +5775,7 @@ struct Luv2RGB_b v_v0 = _mm_sub_ps(_mm_mul_ps(v_v0, v_coeff2), v_140); v_v1 = _mm_sub_ps(_mm_mul_ps(v_v1, v_coeff2), v_140); - _mm_interliv_ps(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); + _mm_interleave_ps(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); _mm_store_ps(buf, v_l0); _mm_store_ps(buf + 4, v_l1); @@ -5826,7 +5826,7 @@ struct Luv2RGB_b __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _mm_deinterliv_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); process(_mm_unpacklo_epi8(v_r0, v_zero), _mm_unpacklo_epi8(v_g0, v_zero), From fc0869735d742ecd6a83bc8e3989734b6271fda7 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 38/98] used popcnt --- CMakeLists.txt | 2 +- cmake/OpenCVCompilerOptions.cmake | 8 +- modules/core/include/opencv2/core/base.hpp | 1 + modules/core/include/opencv2/core/cvdef.h | 16 +- .../core/include/opencv2/core/sse_utils.hpp | 77 +++++- modules/core/include/opencv2/core/utility.hpp | 1 + modules/core/src/arithm.cpp | 1 + modules/core/src/convert.cpp | 233 ++++++++++++++++-- modules/core/src/copy.cpp | 1 + modules/core/src/mathfuncs.cpp | 1 + modules/core/src/matmul.cpp | 1 + modules/core/src/stat.cpp | 10 +- modules/core/src/system.cpp | 1 + modules/core/src/umatrix.cpp | 3 +- modules/imgproc/src/accum.cpp | 1 + modules/imgproc/src/canny.cpp | 1 + modules/imgproc/src/clahe.cpp | 1 + modules/imgproc/src/color.cpp | 1 + modules/imgproc/src/corner.cpp | 1 + modules/imgproc/src/demosaicing.cpp | 1 + modules/imgproc/src/imgwarp.cpp | 1 + modules/imgproc/src/pyramids.cpp | 1 + modules/imgproc/src/smooth.cpp | 1 + modules/imgproc/src/sumpixels.cpp | 1 + modules/ts/src/ts_func.cpp | 6 +- 25 files changed, 327 insertions(+), 45 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d9bb04081..da0b42cb1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -221,9 +221,9 @@ OCV_OPTION(ENABLE_SSE3 "Enable SSE3 instructions" OCV_OPTION(ENABLE_SSSE3 "Enable SSSE3 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE41 "Enable SSE4.1 instructions" OFF IF ((CV_ICC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) -OCV_OPTION(ENABLE_FMA3 "Enable FMA3 instructions" ON IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_AVX2 "Enable AVX2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_FMA3 "Enable FMA3 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_NEON "Enable NEON instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) ) OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) ) OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF ) diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index 3d1155c87..2f5f13d7b 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -145,6 +145,10 @@ if(CMAKE_COMPILER_IS_GNUCXX) endif() if(ENABLE_AVX2) add_extra_compiler_option(-mavx2) + + if(ENABLE_FMA3) + add_extra_compiler_option(-mfma) + endif() endif() # GCC depresses SSEx instructions when -mavx is used. Instead, it generates new AVX instructions or AVX equivalence for all SSEx instructions when needed. @@ -165,10 +169,6 @@ if(CMAKE_COMPILER_IS_GNUCXX) add_extra_compiler_option(-msse4.2) endif() endif() - - if(ENABLE_FMA3) - add_extra_compiler_option(-mfma) - endif() endif(NOT MINGW) if(X86 OR X86_64) diff --git a/modules/core/include/opencv2/core/base.hpp b/modules/core/include/opencv2/core/base.hpp index c60eedddc..f2acaa3fb 100644 --- a/modules/core/include/opencv2/core/base.hpp +++ b/modules/core/include/opencv2/core/base.hpp @@ -13,6 +13,7 @@ // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index 5fa45a592..ded58a18c 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -13,6 +13,7 @@ // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -157,15 +158,11 @@ # include # define CV_SSE4_2 1 # endif -# if defined __FMA__ || (defined _MSC_VER && _MSC_VER >= 1500) -# include -# define CV_FMA3 1 -# endif # if defined __POPCNT__ || (defined _MSC_VER && _MSC_VER >= 1500) # include # define CV_POPCNT 1 # endif -# if defined __AVX__ || defined __AVX2__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) +# if defined __AVX__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) // MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX // See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32 # include @@ -179,6 +176,9 @@ # if defined __AVX2__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) # include # define CV_AVX2 1 +# if defined __FMA__ +# define CV_FMA3 1 +# endif # endif #endif @@ -194,6 +194,9 @@ #endif // __CUDACC__ +#ifndef CV_POPCNT +#define CV_POPCNT 0 +#endif #ifndef CV_MMX # define CV_MMX 0 #endif @@ -221,9 +224,6 @@ #ifndef CV_AVX2 # define CV_AVX2 0 #endif -#ifndef CV_POPCNT -#define CV_POPCNT 0 -#endif #ifndef CV_FMA3 # define CV_FMA3 0 #endif diff --git a/modules/core/include/opencv2/core/sse_utils.hpp b/modules/core/include/opencv2/core/sse_utils.hpp index 9db8f0ade..0667ae921 100644 --- a/modules/core/include/opencv2/core/sse_utils.hpp +++ b/modules/core/include/opencv2/core/sse_utils.hpp @@ -10,7 +10,7 @@ // License Agreement // For Open Source Computer Vision Library // -// Copyright (C) 2015, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -48,6 +48,34 @@ #if CV_SSE2 +inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_g0); + __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_g0); + __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_g1); + __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_g1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk3); + __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk3); + + __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk3); + __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk3); + + __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk2); + __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk2); + __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk3); + __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk3); + + v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk2); + v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk2); + v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk3); + v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk3); +} + inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) { @@ -228,6 +256,29 @@ inline void _mm_interleavee_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, v_a1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk6, 8), _mm_srli_epi16(layer1_chunk7, 8)); } +inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g0); + __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g0); + __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_g1); + __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_g1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk3); + __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk3); + + __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk3); + __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk3); + + v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk2); + v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk2); + v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk3); + v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk3); +} + inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) { @@ -300,6 +351,8 @@ inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g v_a1 = _mm_unpackhi_epi16(layer3_chunk3, layer3_chunk7); } +#if CV_SSE4_1 + inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) { @@ -376,6 +429,26 @@ inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, v_a1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk6, 16), _mm_srli_epi32(layer1_chunk7, 16)); } +#endif // CV_SSE4_1 + +inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1) +{ + __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g0); + __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g0); + __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_g1); + __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_g1); + + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk2); + __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk2); + __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk3); + __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk3); + + v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk2); + v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk2); + v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk3); + v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk3); +} + inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) { @@ -492,6 +565,6 @@ inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m12 v_a1 = _mm_shuffle_ps(layer1_chunk6, layer1_chunk7, mask_hi); } -#endif +#endif // CV_SSE2 #endif //__OPENCV_CORE_SSE_UTILS_HPP__ diff --git a/modules/core/include/opencv2/core/utility.hpp b/modules/core/include/opencv2/core/utility.hpp index fb8ccd88d..f89560a80 100644 --- a/modules/core/include/opencv2/core/utility.hpp +++ b/modules/core/include/opencv2/core/utility.hpp @@ -13,6 +13,7 @@ // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index a9bf3d7e7..07678f0c6 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index ef8edee6a..5c792f379 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -62,8 +63,11 @@ template struct VSplit4; #define SPLIT2_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \ template<> \ - struct name{ \ - void operator()(const data_type* src, data_type* dst0, data_type* dst1){ \ + struct name \ + { \ + void operator()(const data_type* src, data_type* dst0, \ + data_type* dst1) const \ + { \ reg_type r = load_func(src); \ store_func(dst0, r.val[0]); \ store_func(dst1, r.val[1]); \ @@ -72,9 +76,11 @@ template struct VSplit4; #define SPLIT3_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \ template<> \ - struct name{ \ + struct name \ + { \ void operator()(const data_type* src, data_type* dst0, data_type* dst1, \ - data_type* dst2){ \ + data_type* dst2) const \ + { \ reg_type r = load_func(src); \ store_func(dst0, r.val[0]); \ store_func(dst1, r.val[1]); \ @@ -84,9 +90,11 @@ template struct VSplit4; #define SPLIT4_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \ template<> \ - struct name{ \ + struct name \ + { \ void operator()(const data_type* src, data_type* dst0, data_type* dst1, \ - data_type* dst2, data_type* dst3){ \ + data_type* dst2, data_type* dst3) const \ + { \ reg_type r = load_func(src); \ store_func(dst0, r.val[0]); \ store_func(dst1, r.val[1]); \ @@ -96,28 +104,174 @@ template struct VSplit4; } SPLIT2_KERNEL_TEMPLATE(VSplit2, uchar , uint8x16x2_t, vld2q_u8 , vst1q_u8 ); -SPLIT2_KERNEL_TEMPLATE(VSplit2, schar , int8x16x2_t, vld2q_s8 , vst1q_s8 ); SPLIT2_KERNEL_TEMPLATE(VSplit2, ushort, uint16x8x2_t, vld2q_u16, vst1q_u16); -SPLIT2_KERNEL_TEMPLATE(VSplit2, short , int16x8x2_t, vld2q_s16, vst1q_s16); SPLIT2_KERNEL_TEMPLATE(VSplit2, int , int32x4x2_t, vld2q_s32, vst1q_s32); -SPLIT2_KERNEL_TEMPLATE(VSplit2, float , float32x4x2_t, vld2q_f32, vst1q_f32); SPLIT2_KERNEL_TEMPLATE(VSplit2, int64 , int64x1x2_t, vld2_s64 , vst1_s64 ); SPLIT3_KERNEL_TEMPLATE(VSplit3, uchar , uint8x16x3_t, vld3q_u8 , vst1q_u8 ); -SPLIT3_KERNEL_TEMPLATE(VSplit3, schar , int8x16x3_t, vld3q_s8 , vst1q_s8 ); SPLIT3_KERNEL_TEMPLATE(VSplit3, ushort, uint16x8x3_t, vld3q_u16, vst1q_u16); -SPLIT3_KERNEL_TEMPLATE(VSplit3, short , int16x8x3_t, vld3q_s16, vst1q_s16); SPLIT3_KERNEL_TEMPLATE(VSplit3, int , int32x4x3_t, vld3q_s32, vst1q_s32); -SPLIT3_KERNEL_TEMPLATE(VSplit3, float , float32x4x3_t, vld3q_f32, vst1q_f32); SPLIT3_KERNEL_TEMPLATE(VSplit3, int64 , int64x1x3_t, vld3_s64 , vst1_s64 ); SPLIT4_KERNEL_TEMPLATE(VSplit4, uchar , uint8x16x4_t, vld4q_u8 , vst1q_u8 ); -SPLIT4_KERNEL_TEMPLATE(VSplit4, schar , int8x16x4_t, vld4q_s8 , vst1q_s8 ); SPLIT4_KERNEL_TEMPLATE(VSplit4, ushort, uint16x8x4_t, vld4q_u16, vst1q_u16); -SPLIT4_KERNEL_TEMPLATE(VSplit4, short , int16x8x4_t, vld4q_s16, vst1q_s16); SPLIT4_KERNEL_TEMPLATE(VSplit4, int , int32x4x4_t, vld4q_s32, vst1q_s32); -SPLIT4_KERNEL_TEMPLATE(VSplit4, float , float32x4x4_t, vld4q_f32, vst1q_f32); SPLIT4_KERNEL_TEMPLATE(VSplit4, int64 , int64x1x4_t, vld4_s64 , vst1_s64 ); + +#elif CV_SSE2 + +template +struct VSplit2 +{ + VSplit2() : support(false) { } + void operator()(const T *, T *, T *) const { } + + bool support; +}; + +template +struct VSplit3 +{ + VSplit3() : support(false) { } + void operator()(const T *, T *, T *, T *) const { } + + bool support; +}; + +template +struct VSplit4 +{ + VSplit4() : support(false) { } + void operator()(const T *, T *, T *, T *, T *) const { } + + bool support; +}; + +#define SPLIT2_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_deinterleave, flavor) \ +template <> \ +struct VSplit2 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VSplit2() \ + { \ + support = true; \ + } \ + \ + void operator()(const data_type * src, \ + data_type * dst0, data_type * dst1) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((cast_type const *)(src)); \ + reg_type v_src1 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 2)); \ + reg_type v_src3 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 3)); \ + \ + _mm_deinterleave(v_src0, v_src1, v_src2, v_src3); \ + \ + _mm_storeu_##flavor((cast_type *)(dst0), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst0 + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst1), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst1 + ELEMS_IN_VEC), v_src3); \ + } \ + \ + bool support; \ +} + +#define SPLIT3_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_deinterleave, flavor) \ +template <> \ +struct VSplit3 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VSplit3() \ + { \ + support = true; \ + } \ + \ + void operator()(const data_type * src, \ + data_type * dst0, data_type * dst1, data_type * dst2) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((cast_type const *)(src)); \ + reg_type v_src1 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 2)); \ + reg_type v_src3 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 3)); \ + reg_type v_src4 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 4)); \ + reg_type v_src5 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 5)); \ + \ + _mm_deinterleave(v_src0, v_src1, v_src2, \ + v_src3, v_src4, v_src5); \ + \ + _mm_storeu_##flavor((cast_type *)(dst0), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst0 + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst1), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst1 + ELEMS_IN_VEC), v_src3); \ + _mm_storeu_##flavor((cast_type *)(dst2), v_src4); \ + _mm_storeu_##flavor((cast_type *)(dst2 + ELEMS_IN_VEC), v_src5); \ + } \ + \ + bool support; \ +} + +#define SPLIT4_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_deinterleave, flavor) \ +template <> \ +struct VSplit4 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VSplit4() \ + { \ + support = true; \ + } \ + \ + void operator()(const data_type * src, data_type * dst0, data_type * dst1, \ + data_type * dst2, data_type * dst3) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((cast_type const *)(src)); \ + reg_type v_src1 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 2)); \ + reg_type v_src3 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 3)); \ + reg_type v_src4 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 4)); \ + reg_type v_src5 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 5)); \ + reg_type v_src6 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 6)); \ + reg_type v_src7 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 7)); \ + \ + _mm_deinterleave(v_src0, v_src1, v_src2, v_src3, \ + v_src4, v_src5, v_src6, v_src7); \ + \ + _mm_storeu_##flavor((cast_type *)(dst0), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst0 + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst1), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst1 + ELEMS_IN_VEC), v_src3); \ + _mm_storeu_##flavor((cast_type *)(dst2), v_src4); \ + _mm_storeu_##flavor((cast_type *)(dst2 + ELEMS_IN_VEC), v_src5); \ + _mm_storeu_##flavor((cast_type *)(dst3), v_src6); \ + _mm_storeu_##flavor((cast_type *)(dst3 + ELEMS_IN_VEC), v_src7); \ + } \ + \ + bool support; \ +} + +SPLIT2_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_deinterleave_epi8, si128); +SPLIT2_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_deinterleave_epi16, si128); +SPLIT2_KERNEL_TEMPLATE( int, __m128, float, _mm_deinterleave_ps, ps); + +SPLIT3_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_deinterleave_epi8, si128); +SPLIT3_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_deinterleave_epi16, si128); +SPLIT3_KERNEL_TEMPLATE( int, __m128, float, _mm_deinterleave_ps, ps); + +SPLIT4_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_deinterleave_epi8, si128); +SPLIT4_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_deinterleave_epi16, si128); +SPLIT4_KERNEL_TEMPLATE( int, __m128, float, _mm_deinterleave_ps, ps); + #endif template static void @@ -154,6 +308,19 @@ split_( const T* src, T** dst, int len, int cn ) for( ; i < len - inc_i; i += inc_i, j += inc_j) vsplit(src + j, dst0 + i, dst1 + i); } +#elif CV_SSE2 + if (cn == 2) + { + int inc_i = 32/sizeof(T); + int inc_j = 2 * inc_i; + + VSplit2 vsplit; + if (vsplit.support) + { + for( ; i <= len - inc_i; i += inc_i, j += inc_j) + vsplit(src + j, dst0 + i, dst1 + i); + } + } #endif for( ; i < len; i++, j += cn ) { @@ -176,6 +343,20 @@ split_( const T* src, T** dst, int len, int cn ) for( ; i <= len - inc_i; i += inc_i, j += inc_j) vsplit(src + j, dst0 + i, dst1 + i, dst2 + i); } +#elif CV_SSE2 + if (cn == 3) + { + int inc_i = 32/sizeof(T); + int inc_j = 3 * inc_i; + + VSplit3 vsplit; + + if (vsplit.support) + { + for( ; i <= len - inc_i; i += inc_i, j += inc_j) + vsplit(src + j, dst0 + i, dst1 + i, dst2 + i); + } + } #endif for( ; i < len; i++, j += cn ) { @@ -199,6 +380,19 @@ split_( const T* src, T** dst, int len, int cn ) for( ; i <= len - inc_i; i += inc_i, j += inc_j) vsplit(src + j, dst0 + i, dst1 + i, dst2 + i, dst3 + i); } +#elif CV_SSE2 + if (cn == 4) + { + int inc_i = 32/sizeof(T); + int inc_j = 4 * inc_i; + + VSplit4 vsplit; + if (vsplit.support) + { + for( ; i <= len - inc_i; i += inc_i, j += inc_j) + vsplit(src + j, dst0 + i, dst1 + i, dst2 + i, dst3 + i); + } + } #endif for( ; i < len; i++, j += cn ) { @@ -265,27 +459,18 @@ template struct VMerge4; } MERGE2_KERNEL_TEMPLATE(VMerge2, uchar , uint8x16x2_t, vld1q_u8 , vst2q_u8 ); -MERGE2_KERNEL_TEMPLATE(VMerge2, schar , int8x16x2_t, vld1q_s8 , vst2q_s8 ); MERGE2_KERNEL_TEMPLATE(VMerge2, ushort, uint16x8x2_t, vld1q_u16, vst2q_u16); -MERGE2_KERNEL_TEMPLATE(VMerge2, short , int16x8x2_t, vld1q_s16, vst2q_s16); MERGE2_KERNEL_TEMPLATE(VMerge2, int , int32x4x2_t, vld1q_s32, vst2q_s32); -MERGE2_KERNEL_TEMPLATE(VMerge2, float , float32x4x2_t, vld1q_f32, vst2q_f32); MERGE2_KERNEL_TEMPLATE(VMerge2, int64 , int64x1x2_t, vld1_s64 , vst2_s64 ); MERGE3_KERNEL_TEMPLATE(VMerge3, uchar , uint8x16x3_t, vld1q_u8 , vst3q_u8 ); -MERGE3_KERNEL_TEMPLATE(VMerge3, schar , int8x16x3_t, vld1q_s8 , vst3q_s8 ); MERGE3_KERNEL_TEMPLATE(VMerge3, ushort, uint16x8x3_t, vld1q_u16, vst3q_u16); -MERGE3_KERNEL_TEMPLATE(VMerge3, short , int16x8x3_t, vld1q_s16, vst3q_s16); MERGE3_KERNEL_TEMPLATE(VMerge3, int , int32x4x3_t, vld1q_s32, vst3q_s32); -MERGE3_KERNEL_TEMPLATE(VMerge3, float , float32x4x3_t, vld1q_f32, vst3q_f32); MERGE3_KERNEL_TEMPLATE(VMerge3, int64 , int64x1x3_t, vld1_s64 , vst3_s64 ); MERGE4_KERNEL_TEMPLATE(VMerge4, uchar , uint8x16x4_t, vld1q_u8 , vst4q_u8 ); -MERGE4_KERNEL_TEMPLATE(VMerge4, schar , int8x16x4_t, vld1q_s8 , vst4q_s8 ); MERGE4_KERNEL_TEMPLATE(VMerge4, ushort, uint16x8x4_t, vld1q_u16, vst4q_u16); -MERGE4_KERNEL_TEMPLATE(VMerge4, short , int16x8x4_t, vld1q_s16, vst4q_s16); MERGE4_KERNEL_TEMPLATE(VMerge4, int , int32x4x4_t, vld1q_s32, vst4q_s32); -MERGE4_KERNEL_TEMPLATE(VMerge4, float , float32x4x4_t, vld1q_f32, vst4q_f32); MERGE4_KERNEL_TEMPLATE(VMerge4, int64 , int64x1x4_t, vld1_s64 , vst4_s64 ); #endif diff --git a/modules/core/src/copy.cpp b/modules/core/src/copy.cpp index 301ea80a1..fe8ffd771 100644 --- a/modules/core/src/copy.cpp +++ b/modules/core/src/copy.cpp @@ -11,6 +11,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/core/src/mathfuncs.cpp b/modules/core/src/mathfuncs.cpp index d3d09c338..7b27dc350 100644 --- a/modules/core/src/mathfuncs.cpp +++ b/modules/core/src/mathfuncs.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/core/src/matmul.cpp b/modules/core/src/matmul.cpp index b2f36b329..6c8bad244 100644 --- a/modules/core/src/matmul.cpp +++ b/modules/core/src/matmul.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/core/src/stat.cpp b/modules/core/src/stat.cpp index 530e3205b..4eb17d6a1 100644 --- a/modules/core/src/stat.cpp +++ b/modules/core/src/stat.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -404,13 +405,20 @@ static const uchar * initPopcountTable() { // we compute inverse popcount table, // since we pass (img[x] == 0) mask as index in the table. - for( int j = 0; j < 256; j++ ) + unsigned int j = 0u; +#if CV_POPCNT + if (checkHardwareSupport(CV_CPU_POPCNT)) + for( ; j < 256u; j++ ) + tab[j] = (uchar)(8 - _mm_popcnt_u32(j)); +#else + for( ; j < 256u; j++ ) { int val = 0; for( int mask = 1; mask < 256; mask += mask ) val += (j & mask) == 0; tab[j] = (uchar)val; } +#endif initialized = true; } diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index a7a6e98d4..6744b3311 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/core/src/umatrix.cpp b/modules/core/src/umatrix.cpp index 443111f48..ffc20777b 100644 --- a/modules/core/src/umatrix.cpp +++ b/modules/core/src/umatrix.cpp @@ -10,8 +10,7 @@ // License Agreement // For Open Source Computer Vision Library // -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/accum.cpp b/modules/imgproc/src/accum.cpp index d2e8b39aa..23dc4576b 100644 --- a/modules/imgproc/src/accum.cpp +++ b/modules/imgproc/src/accum.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. / // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/canny.cpp b/modules/imgproc/src/canny.cpp index 1311d5abb..233218b3e 100644 --- a/modules/imgproc/src/canny.cpp +++ b/modules/imgproc/src/canny.cpp @@ -11,6 +11,7 @@ // For Open Source Computer Vision Library // // Copyright (C) 2000, Intel Corporation, all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/clahe.cpp b/modules/imgproc/src/clahe.cpp index 18a91d954..06fc73153 100644 --- a/modules/imgproc/src/clahe.cpp +++ b/modules/imgproc/src/clahe.cpp @@ -11,6 +11,7 @@ // For Open Source Computer Vision Library // // Copyright (C) 2013, NVIDIA Corporation, all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 6049f9993..574ff1627 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/corner.cpp b/modules/imgproc/src/corner.cpp index 85f2063b2..0d04f6f7e 100644 --- a/modules/imgproc/src/corner.cpp +++ b/modules/imgproc/src/corner.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/demosaicing.cpp b/modules/imgproc/src/demosaicing.cpp index 0b7afb8ea..cec450dc7 100644 --- a/modules/imgproc/src/demosaicing.cpp +++ b/modules/imgproc/src/demosaicing.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index 1fa4557ca..d8b538591 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/pyramids.cpp b/modules/imgproc/src/pyramids.cpp index 5425de11c..f48937218 100644 --- a/modules/imgproc/src/pyramids.cpp +++ b/modules/imgproc/src/pyramids.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index b7c300403..59acdd71b 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/sumpixels.cpp b/modules/imgproc/src/sumpixels.cpp index cdef88f6c..16c7c7ef2 100755 --- a/modules/imgproc/src/sumpixels.cpp +++ b/modules/imgproc/src/sumpixels.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/ts/src/ts_func.cpp b/modules/ts/src/ts_func.cpp index 84a9233dd..b8de763f1 100644 --- a/modules/ts/src/ts_func.cpp +++ b/modules/ts/src/ts_func.cpp @@ -2998,12 +2998,12 @@ void printVersionInfo(bool useStdOut) std::string cpu_features; -#if CV_MMX - if (checkHardwareSupport(CV_CPU_MMX)) cpu_features += " mmx"; -#endif #if CV_POPCNT if (checkHardwareSupport(CV_CPU_POPCNT)) cpu_features += " popcnt"; #endif +#if CV_MMX + if (checkHardwareSupport(CV_CPU_MMX)) cpu_features += " mmx"; +#endif #if CV_SSE if (checkHardwareSupport(CV_CPU_SSE)) cpu_features += " sse"; #endif From d87457a025adb71c6e922018417844cc731434c9 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 39/98] split/merge --- .../core/include/opencv2/core/sse_utils.hpp | 83 +++++++- modules/core/src/convert.cpp | 189 ++++++++++++++++++ modules/imgproc/src/color.cpp | 8 +- 3 files changed, 272 insertions(+), 8 deletions(-) diff --git a/modules/core/include/opencv2/core/sse_utils.hpp b/modules/core/include/opencv2/core/sse_utils.hpp index 0667ae921..7af6d84f2 100644 --- a/modules/core/include/opencv2/core/sse_utils.hpp +++ b/modules/core/include/opencv2/core/sse_utils.hpp @@ -164,8 +164,38 @@ inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0 v_a1 = _mm_unpackhi_epi8(layer4_chunk3, layer4_chunk7); } -inline void _mm_interleavee_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, - __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i v_mask = _mm_set1_epi16(0x00ff); + + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer4_chunk2 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); + __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8)); + + __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask)); + __m128i layer3_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); + __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); + + __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); + __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); + + __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); + __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); + + v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); + v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); +} + +inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) { __m128i v_mask = _mm_set1_epi16(0x00ff); @@ -205,8 +235,8 @@ inline void _mm_interleavee_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); } -inline void _mm_interleavee_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, - __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) { __m128i v_mask = _mm_set1_epi16(0x00ff); @@ -353,6 +383,31 @@ inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g #if CV_SSE4_1 +inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i v_mask = _mm_set1_epi32(0x0000ffff); + + __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer3_chunk2 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16)); + __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); + + __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + + __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + + v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); + v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); +} + inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) { @@ -505,6 +560,26 @@ inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m v_a1 = _mm_unpackhi_ps(layer2_chunk3, layer2_chunk7); } +inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1) +{ + const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); + + __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); + __m128 layer2_chunk2 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); + __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo); + __m128 layer2_chunk3 = _mm_shuffle_ps(v_g0, v_g1, mask_hi); + + __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo); + __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi); + __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo); + __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi); + + v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo); + v_g0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi); + v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo); + v_g1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi); +} + inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) { diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 5c792f379..5cb2a15fd 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -472,6 +472,162 @@ MERGE4_KERNEL_TEMPLATE(VMerge4, uchar , uint8x16x4_t, vld1q_u8 , vst4q_u8 ); MERGE4_KERNEL_TEMPLATE(VMerge4, ushort, uint16x8x4_t, vld1q_u16, vst4q_u16); MERGE4_KERNEL_TEMPLATE(VMerge4, int , int32x4x4_t, vld1q_s32, vst4q_s32); MERGE4_KERNEL_TEMPLATE(VMerge4, int64 , int64x1x4_t, vld1_s64 , vst4_s64 ); + +#elif CV_SSE2 + +template +struct VMerge2 +{ + VMerge2() : support(false) { } + void operator()(const T *, const T *, T *) const { } + + bool support; +}; + +template +struct VMerge3 +{ + VMerge3() : support(false) { } + void operator()(const T *, const T *, const T *, T *) const { } + + bool support; +}; + +template +struct VMerge4 +{ + VMerge4() : support(false) { } + void operator()(const T *, const T *, const T *, const T *, T *) const { } + + bool support; +}; + +#define MERGE2_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor) \ +template <> \ +struct VMerge2 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VMerge2() \ + { \ + support = true; \ + } \ + \ + void operator()(const data_type * src0, const data_type * src1, \ + data_type * dst) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((const cast_type *)(src0)); \ + reg_type v_src1 = _mm_loadu_##flavor((const cast_type *)(src0 + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((const cast_type *)(src1)); \ + reg_type v_src3 = _mm_loadu_##flavor((const cast_type *)(src1 + ELEMS_IN_VEC)); \ + \ + _mm_interleave(v_src0, v_src1, v_src2, v_src3); \ + \ + _mm_storeu_##flavor((cast_type *)(dst), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 2), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 3), v_src3); \ + } \ + \ + bool support; \ +} + +#define MERGE3_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor) \ +template <> \ +struct VMerge3 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VMerge3() \ + { \ + support = true; \ + } \ + \ + void operator()(const data_type * src0, const data_type * src1, const data_type * src2,\ + data_type * dst) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((const cast_type *)(src0)); \ + reg_type v_src1 = _mm_loadu_##flavor((const cast_type *)(src0 + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((const cast_type *)(src1)); \ + reg_type v_src3 = _mm_loadu_##flavor((const cast_type *)(src1 + ELEMS_IN_VEC)); \ + reg_type v_src4 = _mm_loadu_##flavor((const cast_type *)(src2)); \ + reg_type v_src5 = _mm_loadu_##flavor((const cast_type *)(src2 + ELEMS_IN_VEC)); \ + \ + _mm_interleave(v_src0, v_src1, v_src2, \ + v_src3, v_src4, v_src5); \ + \ + _mm_storeu_##flavor((cast_type *)(dst), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 2), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 3), v_src3); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 4), v_src4); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 5), v_src5); \ + } \ + \ + bool support; \ +} + +#define MERGE4_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor) \ +template <> \ +struct VMerge4 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VMerge4() \ + { \ + support = true; \ + } \ + \ + void operator()(const data_type * src0, const data_type * src1, \ + const data_type * src2, const data_type * src3, \ + data_type * dst) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((const cast_type *)(src0)); \ + reg_type v_src1 = _mm_loadu_##flavor((const cast_type *)(src0 + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((const cast_type *)(src1)); \ + reg_type v_src3 = _mm_loadu_##flavor((const cast_type *)(src1 + ELEMS_IN_VEC)); \ + reg_type v_src4 = _mm_loadu_##flavor((const cast_type *)(src2)); \ + reg_type v_src5 = _mm_loadu_##flavor((const cast_type *)(src2 + ELEMS_IN_VEC)); \ + reg_type v_src6 = _mm_loadu_##flavor((const cast_type *)(src3)); \ + reg_type v_src7 = _mm_loadu_##flavor((const cast_type *)(src3 + ELEMS_IN_VEC)); \ + \ + _mm_interleave(v_src0, v_src1, v_src2, v_src3, \ + v_src4, v_src5, v_src6, v_src7); \ + \ + _mm_storeu_##flavor((cast_type *)(dst), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 2), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 3), v_src3); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 4), v_src4); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 5), v_src5); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 6), v_src6); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 7), v_src7); \ + } \ + \ + bool support; \ +} + +MERGE2_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_deinterleave_epi8, si128); +MERGE2_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_deinterleave_epi16, si128); +MERGE2_KERNEL_TEMPLATE( int, __m128, float, _mm_deinterleave_ps, ps); + +MERGE3_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128); +MERGE3_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128); +MERGE3_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps); + +MERGE4_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128); +MERGE4_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128); +MERGE4_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps); + #endif template static void @@ -499,6 +655,17 @@ merge_( const T** src, T* dst, int len, int cn ) for( ; i < len - inc_i; i += inc_i, j += inc_j) vmerge(src0 + i, src1 + i, dst + j); } +#elif CV_SSE2 + if(cn == 2) + { + int inc_i = 32/sizeof(T); + int inc_j = 2 * inc_i; + + VMerge2 vmerge; + if (vmerge.support) + for( ; i < len - inc_i; i += inc_i, j += inc_j) + vmerge(src0 + i, src1 + i, dst + j); + } #endif for( ; i < len; i++, j += cn ) { @@ -520,6 +687,17 @@ merge_( const T** src, T* dst, int len, int cn ) for( ; i < len - inc_i; i += inc_i, j += inc_j) vmerge(src0 + i, src1 + i, src2 + i, dst + j); } +#elif CV_SSE2 + if(cn == 3) + { + int inc_i = 32/sizeof(T); + int inc_j = 3 * inc_i; + + VMerge3 vmerge; + if (vmerge.support) + for( ; i < len - inc_i; i += inc_i, j += inc_j) + vmerge(src0 + i, src1 + i, src2 + i, dst + j); + } #endif for( ; i < len; i++, j += cn ) { @@ -542,6 +720,17 @@ merge_( const T** src, T* dst, int len, int cn ) for( ; i < len - inc_i; i += inc_i, j += inc_j) vmerge(src0 + i, src1 + i, src2 + i, src3 + i, dst + j); } +#elif CV_SSE2 + if(cn == 4) + { + int inc_i = 32/sizeof(T); + int inc_j = 4 * inc_i; + + VMerge4 vmerge; + if (vmerge.support) + for( ; i < len - inc_i; i += inc_i, j += inc_j) + vmerge(src0 + i, src1 + i, src2 + i, src3 + i, dst + j); + } #endif for( ; i < len; i++, j += cn ) { diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 574ff1627..19cf1357b 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -2219,7 +2219,7 @@ struct RGB2YCrCb_i __m128i v_cr_1 = _mm_packus_epi16(v_cr0, v_cr1); __m128i v_cb_1 = _mm_packus_epi16(v_cb0, v_cb1); - _mm_interleavee_epi8(v_y_0, v_y_1, v_cr_0, v_cr_1, v_cb_0, v_cb_1); + _mm_interleave_epi8(v_y_0, v_y_1, v_cr_0, v_cr_1, v_cb_0, v_cb_1); _mm_storeu_si128((__m128i *)(dst + i), v_y_0); _mm_storeu_si128((__m128i *)(dst + i + 16), v_y_1); @@ -2988,7 +2988,7 @@ struct YCrCb2RGB_i std::swap(v_r1, v_b1); } - _mm_interleavee_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_interleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_storeu_si128((__m128i *)(dst), v_r0); _mm_storeu_si128((__m128i *)(dst + 16), v_r1); @@ -4585,7 +4585,7 @@ struct RGB2HLS_b __m128i v_l1 = _mm_packus_epi16(v_l_0, v_l_1); __m128i v_s1 = _mm_packus_epi16(v_s_0, v_s_1); - _mm_interleavee_epi8(v_h0, v_h1, v_l0, v_l1, v_s0, v_s1); + _mm_interleave_epi8(v_h0, v_h1, v_l0, v_l1, v_s0, v_s1); _mm_storeu_si128((__m128i *)(dst + j), v_h0); _mm_storeu_si128((__m128i *)(dst + j + 16), v_h1); @@ -5695,7 +5695,7 @@ struct RGB2Luv_b __m128i v_u1 = _mm_packus_epi16(v_u_0, v_u_1); __m128i v_v1 = _mm_packus_epi16(v_v_0, v_v_1); - _mm_interleavee_epi8(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); + _mm_interleave_epi8(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); _mm_storeu_si128((__m128i *)(dst + j), v_l0); _mm_storeu_si128((__m128i *)(dst + j + 16), v_l1); From b2f851af0619d51d2f77e6c31372d0140156bae4 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 40/98] cornerMinEigenVal --- modules/imgproc/src/corner.cpp | 31 ++++++++++++++++++++++++++++++- modules/imgproc/src/smooth.cpp | 2 -- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/modules/imgproc/src/corner.cpp b/modules/imgproc/src/corner.cpp index 0d04f6f7e..358cd5802 100644 --- a/modules/imgproc/src/corner.cpp +++ b/modules/imgproc/src/corner.cpp @@ -12,7 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Copyright (C) 2014, Itseez Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -271,6 +271,8 @@ cornerEigenValsVecs( const Mat& src, Mat& eigenv, int block_size, #ifdef HAVE_TEGRA_OPTIMIZATION if (tegra::cornerEigenValsVecs(src, eigenv, block_size, aperture_size, op_type, k, borderType)) return; +#elif CV_SSE2 + bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2); #endif int depth = src.depth(); @@ -319,6 +321,33 @@ cornerEigenValsVecs( const Mat& src, Mat& eigenv, int block_size, vst3q_f32(cov_data + j * 3, v_dst); } + #elif CV_SSE2 + if (haveSSE2) + { + for( ; j <= size.width - 8; j += 8 ) + { + __m128 v_dx_0 = _mm_loadu_ps(dxdata + j); + __m128 v_dx_1 = _mm_loadu_ps(dxdata + j + 4); + __m128 v_dy_0 = _mm_loadu_ps(dydata + j); + __m128 v_dy_1 = _mm_loadu_ps(dydata + j + 4); + + __m128 v_dx2_0 = _mm_mul_ps(v_dx_0, v_dx_0); + __m128 v_dxy_0 = _mm_mul_ps(v_dx_0, v_dy_0); + __m128 v_dy2_0 = _mm_mul_ps(v_dy_0, v_dy_0); + __m128 v_dx2_1 = _mm_mul_ps(v_dx_1, v_dx_1); + __m128 v_dxy_1 = _mm_mul_ps(v_dx_1, v_dy_1); + __m128 v_dy2_1 = _mm_mul_ps(v_dy_1, v_dy_1); + + _mm_interleave_ps(v_dx2_0, v_dx2_1, v_dxy_0, v_dxy_1, v_dy2_0, v_dy2_1); + + _mm_storeu_ps(cov_data + j * 3, v_dx2_0); + _mm_storeu_ps(cov_data + j * 3 + 4, v_dx2_1); + _mm_storeu_ps(cov_data + j * 3 + 8, v_dxy_0); + _mm_storeu_ps(cov_data + j * 3 + 12, v_dxy_1); + _mm_storeu_ps(cov_data + j * 3 + 16, v_dy2_0); + _mm_storeu_ps(cov_data + j * 3 + 20, v_dy2_1); + } + } #endif for( ; j < size.width; j++ ) diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index 59acdd71b..2dc2fbdf7 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -736,8 +736,6 @@ struct ColumnSum : bool haveScale = scale != 1; double _scale = scale; - printf("bgfbffbbfg\n"); - #if CV_SSE2 bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2); #endif From 612b8ce2cbdd1f878d771b40550549623a53bc01 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 41/98] fixed compilation issues --- cmake/OpenCVCompilerOptions.cmake | 6 ++++++ modules/core/src/convert.cpp | 17 ++++++++++------- modules/imgproc/src/color.cpp | 9 ++------- modules/imgproc/src/imgwarp.cpp | 2 +- modules/imgproc/src/pyramids.cpp | 13 ++++++++++--- 5 files changed, 29 insertions(+), 18 deletions(-) diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index 2f5f13d7b..ecd9a8b41 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -227,6 +227,12 @@ if(MSVC) if(ENABLE_AVX AND NOT MSVC_VERSION LESS 1600) set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:AVX") endif() + if(ENABLE_AVX2 AND NOT MSVC_VERSION LESS 1600) + set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:AVX2") + endif() + if(ENABLE_FMA3 AND NOT MSVC_VERSION LESS 1600) + set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:FMA") + endif() if(ENABLE_SSE4_1 AND CV_ICC AND NOT OPENCV_EXTRA_FLAGS MATCHES "/arch:") set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:SSE4.1") diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 5cb2a15fd..7f450e581 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -617,15 +617,18 @@ struct VMerge4 } MERGE2_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_deinterleave_epi8, si128); -MERGE2_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_deinterleave_epi16, si128); -MERGE2_KERNEL_TEMPLATE( int, __m128, float, _mm_deinterleave_ps, ps); - MERGE3_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128); -MERGE3_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128); -MERGE3_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps); - MERGE4_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128); + +MERGE2_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_deinterleave_epi16, si128); + +#if CV_SSE4_1 +MERGE3_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128); MERGE4_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128); +#endif + +MERGE2_KERNEL_TEMPLATE( int, __m128, float, _mm_deinterleave_ps, ps); +MERGE3_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps); MERGE4_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps); #endif @@ -4328,7 +4331,7 @@ cvtScale_( const short* src, size_t sstep, { __m256 scale256 = _mm256_set1_ps(scale); __m256 shift256 = _mm256_set1_ps(shift); - int shuffle = 0xD8; + const int shuffle = 0xD8; for ( ; x <= size.width - 16; x += 16) { diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 19cf1357b..c1130db40 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -1445,7 +1445,7 @@ struct RGB2Gray float32x4_t v_cb, v_cg, v_cr; }; -#elif CV_SSE2 +#elif CV_SSE4_1 template <> struct RGB2Gray @@ -2106,7 +2106,7 @@ struct RGB2YCrCb_i int32x4_t v_c0, v_c1, v_c2, v_c3, v_c4, v_delta, v_delta2; }; -#elif CV_SSE2 +#elif CV_SSE4_1 template <> struct RGB2YCrCb_i @@ -2247,8 +2247,6 @@ struct RGB2YCrCb_i __m128i v_zero; }; -#if CV_SSE4_1 - template <> struct RGB2YCrCb_i { @@ -2369,9 +2367,6 @@ struct RGB2YCrCb_i #endif // CV_SSE4_1 - -#endif - template struct YCrCb2RGB_f { typedef _Tp channel_type; diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index d8b538591..0de708981 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -2306,7 +2306,7 @@ struct ResizeAreaFastVec_SIMD_32f if (cn == 1) { - int shuffle_lo = _MM_SHUFFLE(2, 0, 2, 0), shuffle_hi = _MM_SHUFFLE(3, 1, 3, 1); + const int shuffle_lo = _MM_SHUFFLE(2, 0, 2, 0), shuffle_hi = _MM_SHUFFLE(3, 1, 3, 1); for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4) { __m128 v_row00 = _mm_loadu_ps(S0), v_row01 = _mm_loadu_ps(S0 + 4), diff --git a/modules/imgproc/src/pyramids.cpp b/modules/imgproc/src/pyramids.cpp index f48937218..93b9bfa16 100644 --- a/modules/imgproc/src/pyramids.cpp +++ b/modules/imgproc/src/pyramids.cpp @@ -236,7 +236,11 @@ struct PyrDownVec_32s16u bool haveSSE; }; -#endif +#else + +typedef PyrDownNoVec PyrDownVec_32s16u; + +#endif // CV_SSE4_1 struct PyrDownVec_32s16s { @@ -288,7 +292,6 @@ struct PyrDownVec_32s16s bool haveSSE; }; - struct PyrUpVec_32s8u { int operator()(int** src, uchar** dst, int, int width) const @@ -471,7 +474,11 @@ struct PyrUpVec_32s16u } }; -#endif +#else + +typedef PyrUpNoVec PyrUpVec_32s16u; + +#endif // CV_SSE4_1 struct PyrUpVec_32f { From 70933ea999d3e29c2a73aa0135bae766170e6de5 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 42/98] convert from f64 --- modules/core/src/convert.cpp | 154 ++++++++++++++++++++++++++++++++++- 1 file changed, 153 insertions(+), 1 deletion(-) diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 7f450e581..a48e90e45 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -4394,7 +4394,159 @@ struct Cvt_SIMD } }; -#if CV_NEON +#if CV_SSE2 + +// from double + +template <> +struct Cvt_SIMD +{ + int operator() (const double * src, uchar * dst, int width) const + { + int x = 0; + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + __m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)); + __m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)); + + v_src0 = _mm_movelh_ps(v_src0, v_src1); + v_src1 = _mm_movelh_ps(v_src2, v_src3); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_dst)); + } + + return x; + } +}; + +template <> +struct Cvt_SIMD +{ + int operator() (const double * src, schar * dst, int width) const + { + int x = 0; + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + __m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)); + __m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)); + + v_src0 = _mm_movelh_ps(v_src0, v_src1); + v_src1 = _mm_movelh_ps(v_src2, v_src3); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_dst)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct Cvt_SIMD +{ + int operator() (const double * src, ushort * dst, int width) const + { + int x = 0; + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + __m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)); + __m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)); + + v_src0 = _mm_movelh_ps(v_src0, v_src1); + v_src1 = _mm_movelh_ps(v_src2, v_src3); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Cvt_SIMD +{ + int operator() (const double * src, short * dst, int width) const + { + int x = 0; + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + __m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)); + __m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)); + + v_src0 = _mm_movelh_ps(v_src0, v_src1); + v_src1 = _mm_movelh_ps(v_src2, v_src3); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct Cvt_SIMD +{ + int operator() (const double * src, int * dst, int width) const + { + int x = 0; + + for ( ; x <= width - 4; x += 4) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + v_src0 = _mm_movelh_ps(v_src0, v_src1); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_src0)); + } + + return x; + } +}; + +template <> +struct Cvt_SIMD +{ + int operator() (const double * src, float * dst, int width) const + { + int x = 0; + + for ( ; x <= width - 4; x += 4) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + + _mm_storeu_ps(dst + x, _mm_movelh_ps(v_src0, v_src1)); + } + + return x; + } +}; + + +#elif CV_NEON // from uchar From 25e99c453f7bfd0ea673c91627b012c21abaad2c Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 43/98] avx2 in arithm --- cmake/OpenCVCompilerOptions.cmake | 11 +- modules/core/include/opencv2/core/cvdef.h | 4 +- modules/core/src/arithm.cpp | 234 ++++++++++++++++++++-- modules/imgproc/src/color.cpp | 5 +- 4 files changed, 225 insertions(+), 29 deletions(-) diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index ecd9a8b41..bbe617dd6 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -224,14 +224,11 @@ if(MSVC) set(OPENCV_EXTRA_FLAGS_RELEASE "${OPENCV_EXTRA_FLAGS_RELEASE} /Zi") endif() - if(ENABLE_AVX AND NOT MSVC_VERSION LESS 1600) - set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:AVX") - endif() if(ENABLE_AVX2 AND NOT MSVC_VERSION LESS 1600) set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:AVX2") endif() - if(ENABLE_FMA3 AND NOT MSVC_VERSION LESS 1600) - set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:FMA") + if(ENABLE_AVX AND NOT MSVC_VERSION LESS 1600 AND NOT OPENCV_EXTRA_FLAGS MATCHES "/arch:") + set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:AVX") endif() if(ENABLE_SSE4_1 AND CV_ICC AND NOT OPENCV_EXTRA_FLAGS MATCHES "/arch:") @@ -252,7 +249,7 @@ if(MSVC) endif() endif() - if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1 OR ENABLE_AVX) + if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1 OR ENABLE_AVX OR ENABLE_AVX2) set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /Oi") endif() @@ -312,7 +309,7 @@ if(MSVC) string(REPLACE "/W3" "/W4" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}") if(NOT ENABLE_NOISY_WARNINGS AND MSVC_VERSION EQUAL 1400) - ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4510 /wd4610 /wd4312 /wd4201 /wd4244 /wd4328 /wd4267) + ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4510 /wd4610 /wd4312 /wd4201 /wd4244 /wd4328 /wd4267 /wd4324) endif() # allow extern "C" functions throw exceptions diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index ded58a18c..610c3fbad 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -159,7 +159,9 @@ # define CV_SSE4_2 1 # endif # if defined __POPCNT__ || (defined _MSC_VER && _MSC_VER >= 1500) -# include +# ifndef _MSC_VER +# include +# endif # define CV_POPCNT 1 # endif # if defined __AVX__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 07678f0c6..8f490a9c9 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -64,6 +64,10 @@ FUNCTOR_TEMPLATE(VLoadStore128); #if CV_SSE2 FUNCTOR_TEMPLATE(VLoadStore64); FUNCTOR_TEMPLATE(VLoadStore128Aligned); +#if CV_AVX +FUNCTOR_TEMPLATE(VLoadStore256); +FUNCTOR_TEMPLATE(VLoadStore256Aligned); +#endif #endif #endif @@ -76,17 +80,28 @@ void vBinOp(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, si #endif Op op; - for( ; sz.height--; src1 += step1/sizeof(src1[0]), - src2 += step2/sizeof(src2[0]), - dst += step/sizeof(dst[0]) ) + for( ; sz.height--; src1 = (const T *)((const uchar *)src1 + step1), + src2 = (const T *)((const uchar *)src2 + step2), + dst = (T *)((uchar *)dst + step) ) { int x = 0; #if CV_NEON || CV_SSE2 +#if CV_AVX2 + if( USE_AVX2 ) + { + for( ; x <= sz.width - 32/(int)sizeof(T); x += 32/sizeof(T) ) + { + typename VLoadStore256::reg_type r0 = VLoadStore256::load(src1 + x); + r0 = vop(r0, VLoadStore256::load(src2 + x)); + VLoadStore256::store(dst + x, r0); + } + } +#else #if CV_SSE2 if( USE_SSE2 ) { -#endif +#endif // CV_SSE2 for( ; x <= sz.width - 32/(int)sizeof(T); x += 32/sizeof(T) ) { typename VLoadStore128::reg_type r0 = VLoadStore128::load(src1 + x ); @@ -98,9 +113,13 @@ void vBinOp(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, si } #if CV_SSE2 } -#endif -#endif -#if CV_SSE2 +#endif // CV_SSE2 +#endif // CV_AVX2 +#endif // CV_NEON || CV_SSE2 + +#if CV_AVX2 + // nothing +#elif CV_SSE2 if( USE_SSE2 ) { for( ; x <= sz.width - 8/(int)sizeof(T); x += 8/sizeof(T) ) @@ -111,6 +130,7 @@ void vBinOp(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, si } } #endif + #if CV_ENABLE_UNROLLED for( ; x <= sz.width - 4; x += 4 ) { @@ -137,13 +157,26 @@ void vBinOp32(const T* src1, size_t step1, const T* src2, size_t step2, #endif Op op; - for( ; sz.height--; src1 += step1/sizeof(src1[0]), - src2 += step2/sizeof(src2[0]), - dst += step/sizeof(dst[0]) ) + for( ; sz.height--; src1 = (const T *)((const uchar *)src1 + step1), + src2 = (const T *)((const uchar *)src2 + step2), + dst = (T *)((uchar *)dst + step) ) { int x = 0; -#if CV_SSE2 +#if CV_AVX2 + if( USE_AVX2 ) + { + if( (((size_t)src1|(size_t)src2|(size_t)dst)&31) == 0 ) + { + for( ; x <= sz.width - 8; x += 8 ) + { + typename VLoadStore256Aligned::reg_type r0 = VLoadStore256Aligned::load(src1 + x); + r0 = op32(r0, VLoadStore256Aligned::load(src2 + x)); + VLoadStore256Aligned::store(dst + x, r0); + } + } + } +#elif CV_SSE2 if( USE_SSE2 ) { if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 ) @@ -159,12 +192,24 @@ void vBinOp32(const T* src1, size_t step1, const T* src2, size_t step2, } } } -#endif +#endif // CV_AVX2 + #if CV_NEON || CV_SSE2 +#if CV_AVX2 + if( USE_AVX2 ) + { + for( ; x <= sz.width - 8; x += 8 ) + { + typename VLoadStore256::reg_type r0 = VLoadStore256::load(src1 + x); + r0 = op32(r0, VLoadStore256::load(src2 + x)); + VLoadStore256::store(dst + x, r0); + } + } +#else #if CV_SSE2 if( USE_SSE2 ) { -#endif +#endif // CV_SSE2 for( ; x <= sz.width - 8; x += 8 ) { typename VLoadStore128::reg_type r0 = VLoadStore128::load(src1 + x ); @@ -176,8 +221,10 @@ void vBinOp32(const T* src1, size_t step1, const T* src2, size_t step2, } #if CV_SSE2 } -#endif -#endif +#endif // CV_SSE2 +#endif // CV_AVX2 +#endif // CV_NEON || CV_SSE2 + #if CV_ENABLE_UNROLLED for( ; x <= sz.width - 4; x += 4 ) { @@ -205,13 +252,26 @@ void vBinOp64(const T* src1, size_t step1, const T* src2, size_t step2, #endif Op op; - for( ; sz.height--; src1 += step1/sizeof(src1[0]), - src2 += step2/sizeof(src2[0]), - dst += step/sizeof(dst[0]) ) + for( ; sz.height--; src1 = (const T *)((const uchar *)src1 + step1), + src2 = (const T *)((const uchar *)src2 + step2), + dst = (T *)((uchar *)dst + step) ) { int x = 0; -#if CV_SSE2 +#if CV_AVX2 + if( USE_AVX2 ) + { + if( (((size_t)src1|(size_t)src2|(size_t)dst)&31) == 0 ) + { + for( ; x <= sz.width - 4; x += 4 ) + { + typename VLoadStore256Aligned::reg_type r0 = VLoadStore256Aligned::load(src1 + x); + r0 = op64(r0, VLoadStore256Aligned::load(src2 + x)); + VLoadStore256Aligned::store(dst + x, r0); + } + } + } +#elif CV_SSE2 if( USE_SSE2 ) { if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 ) @@ -244,7 +304,141 @@ void vBinOp64(const T* src1, size_t step1, const T* src2, size_t step2, } } -#if CV_SSE2 +#if CV_AVX2 + +#define FUNCTOR_LOADSTORE_CAST(name, template_arg, register_type, load_body, store_body) \ + template <> \ + struct name{ \ + typedef register_type reg_type; \ + static reg_type load(const template_arg * p) { return load_body ((const reg_type *)p); } \ + static void store(template_arg * p, reg_type v) { store_body ((reg_type *)p, v); } \ + } + +#define FUNCTOR_LOADSTORE(name, template_arg, register_type, load_body, store_body) \ + template <> \ + struct name{ \ + typedef register_type reg_type; \ + static reg_type load(const template_arg * p) { return load_body (p); } \ + static void store(template_arg * p, reg_type v) { store_body (p, v); } \ + } + +#define FUNCTOR_CLOSURE_2arg(name, template_arg, body) \ + template<> \ + struct name \ + { \ + VLoadStore256::reg_type operator()( \ + const VLoadStore256::reg_type & a, \ + const VLoadStore256::reg_type & b) const \ + { \ + body; \ + } \ + } + +#define FUNCTOR_CLOSURE_1arg(name, template_arg, body) \ + template<> \ + struct name \ + { \ + VLoadStore256::reg_type operator()( \ + const VLoadStore256::reg_type & a, \ + const VLoadStore256::reg_type & ) const \ + { \ + body; \ + } \ + } + +FUNCTOR_LOADSTORE_CAST(VLoadStore256, uchar, __m256i, _mm256_loadu_si256, _mm256_storeu_si256); +FUNCTOR_LOADSTORE_CAST(VLoadStore256, schar, __m256i, _mm256_loadu_si256, _mm256_storeu_si256); +FUNCTOR_LOADSTORE_CAST(VLoadStore256, ushort, __m256i, _mm256_loadu_si256, _mm256_storeu_si256); +FUNCTOR_LOADSTORE_CAST(VLoadStore256, short, __m256i, _mm256_loadu_si256, _mm256_storeu_si256); +FUNCTOR_LOADSTORE_CAST(VLoadStore256, int, __m256i, _mm256_loadu_si256, _mm256_storeu_si256); +FUNCTOR_LOADSTORE( VLoadStore256, float, __m256 , _mm256_loadu_ps , _mm256_storeu_ps ); +FUNCTOR_LOADSTORE( VLoadStore256, double, __m256d, _mm256_loadu_pd , _mm256_storeu_pd ); + +FUNCTOR_LOADSTORE_CAST(VLoadStore256Aligned, int, __m256i, _mm256_load_si256, _mm256_store_si256); +FUNCTOR_LOADSTORE( VLoadStore256Aligned, float, __m256 , _mm256_load_ps , _mm256_store_ps ); +FUNCTOR_LOADSTORE( VLoadStore256Aligned, double, __m256d, _mm256_load_pd , _mm256_store_pd ); + +FUNCTOR_TEMPLATE(VAdd); +FUNCTOR_CLOSURE_2arg(VAdd, uchar, return _mm256_adds_epu8 (a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, schar, return _mm256_adds_epi8 (a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, ushort, return _mm256_adds_epu16(a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, short, return _mm256_adds_epi16(a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, int, return _mm256_add_epi32 (a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, float, return _mm256_add_ps (a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, double, return _mm256_add_pd (a, b)); + +FUNCTOR_TEMPLATE(VSub); +FUNCTOR_CLOSURE_2arg(VSub, uchar, return _mm256_subs_epu8 (a, b)); +FUNCTOR_CLOSURE_2arg(VSub, schar, return _mm256_subs_epi8 (a, b)); +FUNCTOR_CLOSURE_2arg(VSub, ushort, return _mm256_subs_epu16(a, b)); +FUNCTOR_CLOSURE_2arg(VSub, short, return _mm256_subs_epi16(a, b)); +FUNCTOR_CLOSURE_2arg(VSub, int, return _mm256_sub_epi32 (a, b)); +FUNCTOR_CLOSURE_2arg(VSub, float, return _mm256_sub_ps (a, b)); +FUNCTOR_CLOSURE_2arg(VSub, double, return _mm256_sub_pd (a, b)); + +FUNCTOR_TEMPLATE(VMin); +FUNCTOR_CLOSURE_2arg(VMin, uchar, return _mm256_min_epu8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMin, schar, return _mm256_min_epi8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMin, ushort, return _mm256_min_epi16(a, b)); +FUNCTOR_CLOSURE_2arg(VMin, short, return _mm256_min_epi16(a, b)); +FUNCTOR_CLOSURE_2arg(VMin, int, return _mm256_min_epi32(a, b)); +FUNCTOR_CLOSURE_2arg(VMin, float, return _mm256_min_ps (a, b)); +FUNCTOR_CLOSURE_2arg(VMin, double, return _mm256_min_pd (a, b)); + +FUNCTOR_TEMPLATE(VMax); +FUNCTOR_CLOSURE_2arg(VMax, uchar, return _mm256_max_epu8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMax, schar, return _mm256_max_epi8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMax, ushort, return _mm256_max_epu16(a, b)); +FUNCTOR_CLOSURE_2arg(VMax, short, return _mm256_max_epi16(a, b)); +FUNCTOR_CLOSURE_2arg(VMax, int, return _mm256_max_epi32(a, b)); +FUNCTOR_CLOSURE_2arg(VMax, float, return _mm256_max_ps (a, b)); +FUNCTOR_CLOSURE_2arg(VMax, double, return _mm256_max_pd (a, b)); + + +static unsigned int CV_DECL_ALIGNED(32) v32f_absmask[] = { 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, + 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff }; +static unsigned int CV_DECL_ALIGNED(32) v64f_absmask[] = { 0xffffffff, 0x7fffffff, 0xffffffff, 0x7fffffff, + 0xffffffff, 0x7fffffff, 0xffffffff, 0x7fffffff }; + +FUNCTOR_TEMPLATE(VAbsDiff); +FUNCTOR_CLOSURE_2arg(VAbsDiff, uchar, + return _mm256_add_epi8(_mm256_subs_epu8(a, b), _mm256_subs_epu8(b, a)); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, schar, + __m256i d = _mm256_subs_epi8(a, b); + __m256i m = _mm256_cmpgt_epi8(b, a); + return _mm256_subs_epi8(_mm256_xor_si256(d, m), m); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, ushort, + return _mm256_add_epi16(_mm256_subs_epu16(a, b), _mm256_subs_epu16(b, a)); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, short, + __m256i M = _mm256_max_epi16(a, b); + __m256i m = _mm256_min_epi16(a, b); + return _mm256_subs_epi16(M, m); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, int, + __m256i d = _mm256_sub_epi32(a, b); + __m256i m = _mm256_cmpgt_epi32(b, a); + return _mm256_sub_epi32(_mm256_xor_si256(d, m), m); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, float, + return _mm256_and_ps(_mm256_sub_ps(a, b), *(const __m256*)v32f_absmask); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, double, + return _mm256_and_pd(_mm256_sub_pd(a, b), *(const __m256d*)v64f_absmask); + ); + +FUNCTOR_TEMPLATE(VAnd); +FUNCTOR_CLOSURE_2arg(VAnd, uchar, return _mm256_and_si256(a, b)); +FUNCTOR_TEMPLATE(VOr); +FUNCTOR_CLOSURE_2arg(VOr , uchar, return _mm256_or_si256 (a, b)); +FUNCTOR_TEMPLATE(VXor); +FUNCTOR_CLOSURE_2arg(VXor, uchar, return _mm256_xor_si256(a, b)); +FUNCTOR_TEMPLATE(VNot); +FUNCTOR_CLOSURE_1arg(VNot, uchar, return _mm256_xor_si256(_mm256_set1_epi32(-1), a)); + +#elif CV_SSE2 #define FUNCTOR_LOADSTORE_CAST(name, template_arg, register_type, load_body, store_body)\ template <> \ diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index c1130db40..7197627b2 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -2879,6 +2879,8 @@ struct YCrCb2RGB_i v_delta = _mm_set1_epi16(ColorChannel::half()); v_delta2 = _mm_set1_epi32(1 << (yuv_shift - 1)); v_zero = _mm_setzero_si128(); + + useSSE = coeffs[0] <= std::numeric_limits::max(); } // 16s x 8 @@ -2934,7 +2936,7 @@ struct YCrCb2RGB_i int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3]; n *= 3; - if (dcn == 3) + if (dcn == 3 && useSSE) { for ( ; i <= n - 96; i += 96, dst += dcn * 32) { @@ -3014,6 +3016,7 @@ struct YCrCb2RGB_i } int dstcn, blueIdx; int coeffs[4]; + bool useSSE; __m128i v_c0, v_c1, v_c2, v_c3, v_delta2; __m128i v_delta, v_alpha, v_zero; From f2cd65cf1e888ff7b48e67578fba59c2d533bc85 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 44/98] fixes --- cmake/OpenCVCompilerOptions.cmake | 7 +- modules/core/include/opencv2/core/cvdef.h | 8 +- .../core/include/opencv2/core/sse_utils.hpp | 82 +++++++++---------- modules/core/src/arithm.cpp | 2 +- modules/core/src/convert.cpp | 7 +- modules/imgproc/src/color.cpp | 6 +- modules/photo/test/test_cloning.cpp | 28 +++++-- 7 files changed, 78 insertions(+), 62 deletions(-) diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index bbe617dd6..66e16e786 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -224,7 +224,7 @@ if(MSVC) set(OPENCV_EXTRA_FLAGS_RELEASE "${OPENCV_EXTRA_FLAGS_RELEASE} /Zi") endif() - if(ENABLE_AVX2 AND NOT MSVC_VERSION LESS 1600) + if(ENABLE_AVX2 AND NOT MSVC_VERSION LESS 1800) set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:AVX2") endif() if(ENABLE_AVX AND NOT MSVC_VERSION LESS 1600 AND NOT OPENCV_EXTRA_FLAGS MATCHES "/arch:") @@ -309,7 +309,7 @@ if(MSVC) string(REPLACE "/W3" "/W4" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}") if(NOT ENABLE_NOISY_WARNINGS AND MSVC_VERSION EQUAL 1400) - ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4510 /wd4610 /wd4312 /wd4201 /wd4244 /wd4328 /wd4267 /wd4324) + ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4510 /wd4610 /wd4312 /wd4201 /wd4244 /wd4328 /wd4267) endif() # allow extern "C" functions throw exceptions @@ -321,6 +321,7 @@ if(MSVC) endforeach() if(NOT ENABLE_NOISY_WARNINGS) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4251") #class 'std::XXX' needs to have dll-interface to be used by clients of YYY + ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4251) # class 'std::XXX' needs to have dll-interface to be used by clients of YYY + ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4324) # 'struct_name' : structure was padded due to __declspec(align()) endif() endif() diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index 610c3fbad..a9d59c769 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -159,12 +159,14 @@ # define CV_SSE4_2 1 # endif # if defined __POPCNT__ || (defined _MSC_VER && _MSC_VER >= 1500) -# ifndef _MSC_VER +# ifdef _MSC_VER +# include +# else # include # endif # define CV_POPCNT 1 # endif -# if defined __AVX__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) +# if defined __AVX__ || (defined _MSC_VER && _MSC_VER >= 1600) // MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX // See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32 # include @@ -175,7 +177,7 @@ # define __xgetbv() 0 # endif # endif -# if defined __AVX2__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) +# if defined __AVX2__ || (defined _MSC_VER && _MSC_VER >= 1800) # include # define CV_AVX2 1 # if defined __FMA__ diff --git a/modules/core/include/opencv2/core/sse_utils.hpp b/modules/core/include/opencv2/core/sse_utils.hpp index 7af6d84f2..e0283eb3f 100644 --- a/modules/core/include/opencv2/core/sse_utils.hpp +++ b/modules/core/include/opencv2/core/sse_utils.hpp @@ -43,7 +43,7 @@ #define __OPENCV_CORE_SSE_UTILS_HPP__ #ifndef __cplusplus -# error base.hpp header must be compiled as C++ +# error sse_utils.hpp header must be compiled as C++ #endif #if CV_SSE2 @@ -117,7 +117,7 @@ inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0 inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) -{ +{ __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_b0); __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_b0); __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b1); @@ -165,9 +165,9 @@ inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0 } inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) -{ +{ __m128i v_mask = _mm_set1_epi16(0x00ff); - + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); __m128i layer4_chunk2 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); @@ -177,28 +177,28 @@ inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i layer3_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); __m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); - + __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); __m128i layer2_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); __m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); - + __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); __m128i layer1_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); __m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); - + v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); v_g0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); } -inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, +inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) -{ +{ __m128i v_mask = _mm_set1_epi16(0x00ff); - + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); __m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); @@ -237,7 +237,7 @@ inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) -{ +{ __m128i v_mask = _mm_set1_epi16(0x00ff); __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); @@ -286,8 +286,8 @@ inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, v_a1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk6, 8), _mm_srli_epi16(layer1_chunk7, 8)); } -inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) -{ +inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g0); __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g0); __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_g1); @@ -310,8 +310,8 @@ inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g } inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, - __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) -{ + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g1); __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g1); __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b0); @@ -342,7 +342,7 @@ inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g } inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, - __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) { __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_b0); __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_b0); @@ -352,7 +352,7 @@ inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g __m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_a0); __m128i layer1_chunk6 = _mm_unpacklo_epi16(v_g1, v_a1); __m128i layer1_chunk7 = _mm_unpackhi_epi16(v_g1, v_a1); - + __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk4); __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk4); __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk5); @@ -393,14 +393,14 @@ inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); - __m128i layer2_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); - __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); - __m128i layer1_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); - __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); v_g0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); @@ -421,18 +421,18 @@ inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16)); __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); - __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); - __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); - __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); + __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); - __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); - __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); - __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); + __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); @@ -457,26 +457,26 @@ inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i layer3_chunk7 = _mm_packus_epi32(_mm_srli_epi32(v_a0, 16), _mm_srli_epi32(v_a1, 16)); __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); - __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); - __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); - __m128i layer2_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); + __m128i layer2_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); __m128i layer2_chunk3 = _mm_packus_epi32(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask)); - __m128i layer2_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk6, 16), _mm_srli_epi32(layer3_chunk7, 16)); + __m128i layer2_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk6, 16), _mm_srli_epi32(layer3_chunk7, 16)); __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); - __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); - __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); - __m128i layer1_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); + __m128i layer1_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); __m128i layer1_chunk3 = _mm_packus_epi32(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask)); - __m128i layer1_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk6, 16), _mm_srli_epi32(layer2_chunk7, 16)); + __m128i layer1_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk6, 16), _mm_srli_epi32(layer2_chunk7, 16)); v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); - v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); v_a0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16)); @@ -487,12 +487,12 @@ inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, #endif // CV_SSE4_1 inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1) -{ +{ __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g0); __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g0); __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_g1); __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_g1); - + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk2); __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk2); __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk3); @@ -506,14 +506,14 @@ inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) -{ +{ __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g1); __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g1); __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b0); __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b0); __m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_b1); __m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_b1); - + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk3); __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk3); __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk4); @@ -531,7 +531,7 @@ inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) -{ +{ __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_b0); __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_b0); __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b1); diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 8f490a9c9..4c14732e7 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -3476,7 +3476,7 @@ struct Cmp_SIMD haveSSE = checkHardwareSupport(CV_CPU_SSE2); - v_mask = _mm_set1_epi8(0xff); + v_mask = _mm_set1_epi8(-1); } int operator () (const schar * src1, const schar * src2, uchar * dst, int width) const diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index a48e90e45..626a666a9 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -616,18 +616,17 @@ struct VMerge4 bool support; \ } -MERGE2_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_deinterleave_epi8, si128); +MERGE2_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128); MERGE3_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128); MERGE4_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128); -MERGE2_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_deinterleave_epi16, si128); - #if CV_SSE4_1 +MERGE2_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128); MERGE3_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128); MERGE4_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128); #endif -MERGE2_KERNEL_TEMPLATE( int, __m128, float, _mm_deinterleave_ps, ps); +MERGE2_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps); MERGE3_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps); MERGE4_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps); diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 7197627b2..675d6b908 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -1460,9 +1460,9 @@ struct RGB2Gray if( blueIdx == 0 ) std::swap(coeffs[0], coeffs[2]); - v_cb = _mm_set1_epi16(coeffs[0]); - v_cg = _mm_set1_epi16(coeffs[1]); - v_cr = _mm_set1_epi16(coeffs[2]); + v_cb = _mm_set1_epi16((short)coeffs[0]); + v_cg = _mm_set1_epi16((short)coeffs[1]); + v_cr = _mm_set1_epi16((short)coeffs[2]); v_delta = _mm_set1_epi32(1 << (yuv_shift - 1)); } diff --git a/modules/photo/test/test_cloning.cpp b/modules/photo/test/test_cloning.cpp index 56d166205..1f86612a4 100644 --- a/modules/photo/test/test_cloning.cpp +++ b/modules/photo/test/test_cloning.cpp @@ -64,6 +64,7 @@ TEST(Photo_SeamlessClone_normal, regression) string original_path1 = folder + "source1.png"; string original_path2 = folder + "destination1.png"; string original_path3 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat destination = imread(original_path2, IMREAD_COLOR); @@ -79,8 +80,8 @@ TEST(Photo_SeamlessClone_normal, regression) p.y = destination.size().height/2; seamlessClone(source, destination, mask, p, result, 1); - - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); + ASSERT_FALSE(reference.empty()) << "Could not load reference image " << reference_path; SAVE(result); @@ -94,6 +95,7 @@ TEST(Photo_SeamlessClone_mixed, regression) string original_path1 = folder + "source1.png"; string original_path2 = folder + "destination1.png"; string original_path3 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat destination = imread(original_path2, IMREAD_COLOR); @@ -111,7 +113,9 @@ TEST(Photo_SeamlessClone_mixed, regression) SAVE(result); - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); + ASSERT_FALSE(reference.empty()) << "Could not load reference image " << reference_path; + double error = cvtest::norm(reference, result, NORM_L1); EXPECT_LE(error, numerical_precision); @@ -123,6 +127,7 @@ TEST(Photo_SeamlessClone_featureExchange, regression) string original_path1 = folder + "source1.png"; string original_path2 = folder + "destination1.png"; string original_path3 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat destination = imread(original_path2, IMREAD_COLOR); @@ -140,7 +145,9 @@ TEST(Photo_SeamlessClone_featureExchange, regression) SAVE(result); - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); + ASSERT_FALSE(reference.empty()) << "Could not load reference image " << reference_path; + double error = cvtest::norm(reference, result, NORM_L1); EXPECT_LE(error, numerical_precision); @@ -151,6 +158,7 @@ TEST(Photo_SeamlessClone_colorChange, regression) string folder = string(cvtest::TS::ptr()->get_data_path()) + "cloning/color_change/"; string original_path1 = folder + "source1.png"; string original_path2 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat mask = imread(original_path2, IMREAD_COLOR); @@ -163,7 +171,9 @@ TEST(Photo_SeamlessClone_colorChange, regression) SAVE(result); - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); + ASSERT_FALSE(reference.empty()) << "Could not load reference image " << reference_path; + double error = cvtest::norm(reference, result, NORM_L1); EXPECT_LE(error, numerical_precision); @@ -174,6 +184,7 @@ TEST(Photo_SeamlessClone_illuminationChange, regression) string folder = string(cvtest::TS::ptr()->get_data_path()) + "cloning/Illumination_Change/"; string original_path1 = folder + "source1.png"; string original_path2 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat mask = imread(original_path2, IMREAD_COLOR); @@ -186,7 +197,7 @@ TEST(Photo_SeamlessClone_illuminationChange, regression) SAVE(result); - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); double error = cvtest::norm(reference, result, NORM_L1); EXPECT_LE(error, numerical_precision); @@ -197,6 +208,7 @@ TEST(Photo_SeamlessClone_textureFlattening, regression) string folder = string(cvtest::TS::ptr()->get_data_path()) + "cloning/Texture_Flattening/"; string original_path1 = folder + "source1.png"; string original_path2 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat mask = imread(original_path2, IMREAD_COLOR); @@ -209,7 +221,9 @@ TEST(Photo_SeamlessClone_textureFlattening, regression) SAVE(result); - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); + ASSERT_FALSE(reference.empty()) << "Could not load reference image " << reference_path; + double error = cvtest::norm(reference, result, NORM_L1); EXPECT_LE(error, numerical_precision); From 7b060d91224ef65d680a74eb57bccdd0ea93a35d Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 45/98] cvtColor 4 cn --- modules/imgproc/src/color.cpp | 130 ++++++++++++++++++++++++++-------- 1 file changed, 102 insertions(+), 28 deletions(-) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 675d6b908..4efbcc5f8 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -1811,9 +1811,9 @@ struct RGB2YCrCb_f float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4]; n *= 3; - if (scn == 3) + if (scn == 3 || scn == 4) { - for ( ; i <= n - 24; i += 24, src += 24) + for ( ; i <= n - 24; i += 24, src += 8 * scn) { __m128 v_r0 = _mm_loadu_ps(src); __m128 v_r1 = _mm_loadu_ps(src + 4); @@ -1822,7 +1822,15 @@ struct RGB2YCrCb_f __m128 v_b0 = _mm_loadu_ps(src + 16); __m128 v_b1 = _mm_loadu_ps(src + 20); - _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + if (scn == 4) + { + __m128 v_a0 = _mm_loadu_ps(src + 24); + __m128 v_a1 = _mm_loadu_ps(src + 28); + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); + } + else + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128 v_y0, v_cr0, v_cb0; process(v_r0, v_g0, v_b0, @@ -2141,7 +2149,7 @@ struct RGB2YCrCb_i __m128i v_b_p = _mm_unpacklo_epi16(v_b, v_zero); __m128i v_y0 = _mm_add_epi32(_mm_mullo_epi32(v_r_p, v_c0), - _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), + _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), _mm_mullo_epi32(v_b_p, v_c2))); v_y0 = _mm_srli_epi32(_mm_add_epi32(v_delta2, v_y0), yuv_shift); @@ -2176,7 +2184,7 @@ struct RGB2YCrCb_i int delta = ColorChannel::half()*(1 << yuv_shift); n *= 3; - if (scn == 3) + if (scn == 3 || scn == 4) { for ( ; i <= n - 96; i += 96, src += scn * 32) { @@ -2187,7 +2195,15 @@ struct RGB2YCrCb_i __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 64)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 80)); - _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + if (scn == 4) + { + __m128i v_a0 = _mm_loadu_si128((__m128i const *)(src + 96)); + __m128i v_a1 = _mm_loadu_si128((__m128i const *)(src + 112)); + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); + } + else + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128i v_y0 = v_zero, v_cr0 = v_zero, v_cb0 = v_zero; process(_mm_unpacklo_epi8(v_r0, v_zero), @@ -2280,7 +2296,7 @@ struct RGB2YCrCb_i __m128i v_b_p = _mm_unpacklo_epi16(v_b, v_zero); __m128i v_y0 = _mm_add_epi32(_mm_mullo_epi32(v_r_p, v_c0), - _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), + _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), _mm_mullo_epi32(v_b_p, v_c2))); v_y0 = _mm_srli_epi32(_mm_add_epi32(v_delta2, v_y0), yuv_shift); @@ -2315,8 +2331,7 @@ struct RGB2YCrCb_i int delta = ColorChannel::half()*(1 << yuv_shift); n *= 3; - - if (scn == 3) + if (scn == 3 || scn == 4) { for ( ; i <= n - 48; i += 48, src += scn * 16) { @@ -2327,7 +2342,16 @@ struct RGB2YCrCb_i __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 32)); __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 40)); - _mm_deinterleave_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + if (scn == 4) + { + __m128i v_a0 = _mm_loadu_si128((__m128i const *)(src + 48)); + __m128i v_a1 = _mm_loadu_si128((__m128i const *)(src + 56)); + + _mm_deinterleave_epi16(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); + } + else + _mm_deinterleave_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128i v_y0 = v_zero, v_cr0 = v_zero, v_cb0 = v_zero; process(v_r0, v_g0, v_b0, @@ -2521,9 +2545,9 @@ struct YCrCb2RGB_f float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3]; n *= 3; - if (dcn == 3) + if (dcn == 3 || dcn == 4) { - for ( ; i <= n - 24; i += 24, dst += 24) + for ( ; i <= n - 24; i += 24, dst += 8 * dcn) { __m128 v_y0 = _mm_loadu_ps(src + i); __m128 v_y1 = _mm_loadu_ps(src + i + 4); @@ -2542,7 +2566,13 @@ struct YCrCb2RGB_f process(v_y1, v_cr1, v_cb1, v_r1, v_g1, v_b1); - _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + __m128 v_a0 = v_alpha, v_a1 = v_alpha; + + if (dcn == 3) + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + else + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); _mm_storeu_ps(dst, v_r0); _mm_storeu_ps(dst + 4, v_r1); @@ -2550,6 +2580,12 @@ struct YCrCb2RGB_f _mm_storeu_ps(dst + 12, v_g1); _mm_storeu_ps(dst + 16, v_b0); _mm_storeu_ps(dst + 20, v_b1); + + if (dcn == 4) + { + _mm_storeu_ps(dst + 24, v_a0); + _mm_storeu_ps(dst + 28, v_a1); + } } } @@ -2568,7 +2604,7 @@ struct YCrCb2RGB_f } int dstcn, blueIdx; float coeffs[4]; - + __m128 v_c0, v_c1, v_c2, v_c3, v_alpha, v_delta; }; @@ -2880,11 +2916,14 @@ struct YCrCb2RGB_i v_delta2 = _mm_set1_epi32(1 << (yuv_shift - 1)); v_zero = _mm_setzero_si128(); + uchar alpha = ColorChannel::max(); + v_alpha = _mm_set1_epi8(*(char *)&alpha); + useSSE = coeffs[0] <= std::numeric_limits::max(); } // 16s x 8 - void process(__m128i v_y, __m128i v_cr, __m128i v_cb, + void process(__m128i v_y, __m128i v_cr, __m128i v_cb, __m128i & v_r, __m128i & v_g, __m128i & v_b) const { v_cr = _mm_sub_epi16(v_cr, v_delta); @@ -2903,11 +2942,11 @@ struct YCrCb2RGB_i __m128i v_mulhi_0 = _mm_mulhi_epi16(v_cr, v_c0); __m128i v_b0 = _mm_srai_epi32(_mm_add_epi32(_mm_unpacklo_epi16(v_mullo_3, v_mulhi_3), v_delta2), yuv_shift); - __m128i v_g0 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi16(v_mullo_2, v_mulhi_2), + __m128i v_g0 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi16(v_mullo_2, v_mulhi_2), _mm_unpacklo_epi16(v_mullo_1, v_mulhi_1)), v_delta2), yuv_shift); __m128i v_r0 = _mm_srai_epi32(_mm_add_epi32(_mm_unpacklo_epi16(v_mullo_0, v_mulhi_0), v_delta2), yuv_shift); - + v_r0 = _mm_add_epi32(v_r0, v_y_p); v_g0 = _mm_add_epi32(v_g0, v_y_p); v_b0 = _mm_add_epi32(v_b0, v_y_p); @@ -2915,7 +2954,7 @@ struct YCrCb2RGB_i v_y_p = _mm_unpackhi_epi16(v_y, v_zero); __m128i v_b1 = _mm_srai_epi32(_mm_add_epi32(_mm_unpackhi_epi16(v_mullo_3, v_mulhi_3), v_delta2), yuv_shift); - __m128i v_g1 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(_mm_unpackhi_epi16(v_mullo_2, v_mulhi_2), + __m128i v_g1 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(_mm_unpackhi_epi16(v_mullo_2, v_mulhi_2), _mm_unpackhi_epi16(v_mullo_1, v_mulhi_1)), v_delta2), yuv_shift); __m128i v_r1 = _mm_srai_epi32(_mm_add_epi32(_mm_unpackhi_epi16(v_mullo_0, v_mulhi_0), v_delta2), yuv_shift); @@ -2936,7 +2975,7 @@ struct YCrCb2RGB_i int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3]; n *= 3; - if (dcn == 3 && useSSE) + if ((dcn == 3 || dcn == 4) && useSSE) { for ( ; i <= n - 96; i += 96, dst += dcn * 32) { @@ -2985,7 +3024,13 @@ struct YCrCb2RGB_i std::swap(v_r1, v_b1); } - _mm_interleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + __m128i v_a0 = v_alpha, v_a1 = v_alpha; + + if (dcn == 3) + _mm_interleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + else + _mm_interleave_epi8(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); _mm_storeu_si128((__m128i *)(dst), v_r0); _mm_storeu_si128((__m128i *)(dst + 16), v_r1); @@ -2993,10 +3038,15 @@ struct YCrCb2RGB_i _mm_storeu_si128((__m128i *)(dst + 48), v_g1); _mm_storeu_si128((__m128i *)(dst + 64), v_b0); _mm_storeu_si128((__m128i *)(dst + 80), v_b1); + + if (dcn == 4) + { + _mm_storeu_si128((__m128i *)(dst + 96), v_a0); + _mm_storeu_si128((__m128i *)(dst + 112), v_a1); + } } } - for ( ; i < n; i += 3, dst += dcn) { uchar Y = src[i]; @@ -3198,9 +3248,9 @@ struct RGB2XYZ_f n *= 3; - if (scn == 3) + if (scn == 3 || scn == 4) { - for ( ; i <= n - 24; i += 24, src += 24) + for ( ; i <= n - 24; i += 24, src += 8 * scn) { __m128 v_r0 = _mm_loadu_ps(src); __m128 v_r1 = _mm_loadu_ps(src + 4); @@ -3209,7 +3259,16 @@ struct RGB2XYZ_f __m128 v_b0 = _mm_loadu_ps(src + 16); __m128 v_b1 = _mm_loadu_ps(src + 20); - _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + if (scn == 4) + { + __m128 v_a0 = _mm_loadu_ps(src + 24); + __m128 v_a1 = _mm_loadu_ps(src + 28); + + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); + } + else + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); __m128 v_x0, v_y0, v_z0; process(v_r0, v_g0, v_b0, @@ -3596,6 +3655,8 @@ struct XYZ2RGB_f v_c6 = _mm_set1_ps(coeffs[6]); v_c7 = _mm_set1_ps(coeffs[7]); v_c8 = _mm_set1_ps(coeffs[8]); + + v_alpha = _mm_set1_ps(ColorChannel::max()); } void process(__m128 v_x, __m128 v_y, __m128 v_z, @@ -3624,9 +3685,9 @@ struct XYZ2RGB_f n *= 3; int i = 0; - if (dcn == 3) + if (dcn == 3 || dcn == 4) { - for ( ; i <= n - 24; i += 24, dst += 24) + for ( ; i <= n - 24; i += 24, dst += 8 * dcn) { __m128 v_x0 = _mm_loadu_ps(src + i); __m128 v_x1 = _mm_loadu_ps(src + i + 4); @@ -3645,7 +3706,13 @@ struct XYZ2RGB_f process(v_x1, v_y1, v_z1, v_r1, v_g1, v_b1); - _mm_interleave_ps(v_b0, v_b1, v_g0, v_g1, v_r0, v_r1); + __m128 v_a0 = v_alpha, v_a1 = v_alpha; + + if (dcn == 4) + _mm_interleave_ps(v_b0, v_b1, v_g0, v_g1, + v_r0, v_r1, v_a0, v_a1); + else + _mm_interleave_ps(v_b0, v_b1, v_g0, v_g1, v_r0, v_r1); _mm_storeu_ps(dst, v_b0); _mm_storeu_ps(dst + 4, v_b1); @@ -3653,6 +3720,12 @@ struct XYZ2RGB_f _mm_storeu_ps(dst + 12, v_g1); _mm_storeu_ps(dst + 16, v_r0); _mm_storeu_ps(dst + 20, v_r1); + + if (dcn == 4) + { + _mm_storeu_ps(dst + 24, v_a0); + _mm_storeu_ps(dst + 28, v_a1); + } } } @@ -3671,6 +3744,7 @@ struct XYZ2RGB_f float coeffs[9]; __m128 v_c0, v_c1, v_c2, v_c3, v_c4, v_c5, v_c6, v_c7, v_c8; + __m128 v_alpha; }; #endif // CV_SSE2 @@ -5213,7 +5287,7 @@ struct Lab2RGB_b v_g1 = _mm_sub_ps(v_g1, v_128); v_b0 = _mm_sub_ps(v_b0, v_128); v_b1 = _mm_sub_ps(v_b1, v_128); - + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); _mm_store_ps(buf, v_r0); From 44d89638fd6cb074e87a4b3fd1bcff3f8ecc6c89 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 46/98] divide --- modules/core/perf/perf_arithm.cpp | 14 ++ modules/core/src/arithm.cpp | 231 +++++++++++++++++++++++++++++- 2 files changed, 244 insertions(+), 1 deletion(-) diff --git a/modules/core/perf/perf_arithm.cpp b/modules/core/perf/perf_arithm.cpp index 3598c8639..c6e4c40db 100644 --- a/modules/core/perf/perf_arithm.cpp +++ b/modules/core/perf/perf_arithm.cpp @@ -242,3 +242,17 @@ PERF_TEST_P(Size_MatType, multiplyScale, TYPICAL_MATS_CORE_ARITHM) SANITY_CHECK(c, 1e-8); } + +PERF_TEST_P(Size_MatType, divide, TYPICAL_MATS_CORE_ARITHM) +{ + Size sz = get<0>(GetParam()); + int type = get<1>(GetParam()); + cv::Mat a(sz, type), b(sz, type), c(sz, type); + double scale = 0.5; + + declare.in(a, b, WARMUP_RNG).out(c); + + TEST_CYCLE() divide(a, b, c, scale); + + SANITY_CHECK_NOTHING(); +} diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 4c14732e7..5875d61cc 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -2610,6 +2610,233 @@ mul_( const T* src1, size_t step1, const T* src2, size_t step2, } } +template +struct Div_SIMD +{ + int operator() (const T *, const T *, T *, int, double) const + { + return 0; + } +}; + +#if CV_SSE2 + +#if CV_SSE4_1 + +template <> +struct Div_SIMD +{ + int operator() (const uchar * src1, const uchar * src2, uchar * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(src1 + x)), v_zero); + __m128i _v_src2 = _mm_loadl_epi64((const __m128i *)(src2 + x)); + __m128i v_src2 = _mm_unpacklo_epi8(_v_src2, v_zero); + + __m128i v_src1i = _mm_unpacklo_epi16(v_src1, v_zero); + __m128i v_src2i = _mm_unpacklo_epi16(v_src2, v_zero); + __m128d v_src1d = _mm_cvtepi32_pd(v_src1i); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src1i = _mm_unpackhi_epi16(v_src1, v_zero); + v_src2i = _mm_unpackhi_epi16(v_src2, v_zero); + v_src1d = _mm_cvtepi32_pd(v_src1i); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi8(_v_src2, v_zero); + _mm_storel_epi64((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packus_epi16(_mm_packs_epi32(v_dst_0, v_dst_1), v_zero))); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Div_SIMD +{ + int operator() (const schar * src1, const schar * src2, schar * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src1 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((const __m128i *)(src1 + x))), 8); + __m128i _v_src2 = _mm_loadl_epi64((const __m128i *)(src2 + x)); + __m128i v_src2 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _v_src2), 8); + + __m128i v_src1i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src1), 16); + __m128i v_src2i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2), 16); + __m128d v_src1d = _mm_cvtepi32_pd(v_src1i); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src1i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src1), 16); + v_src2i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2), 16); + v_src1d = _mm_cvtepi32_pd(v_src1i); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi8(_v_src2, v_zero); + _mm_storel_epi64((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packs_epi16(_mm_packs_epi32(v_dst_0, v_dst_1), v_zero))); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct Div_SIMD +{ + int operator() (const ushort * src1, const ushort * src2, ushort * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src1 = _mm_loadu_si128((const __m128i *)(src1 + x)); + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128i v_src1i = _mm_unpacklo_epi16(v_src1, v_zero); + __m128i v_src2i = _mm_unpacklo_epi16(v_src2, v_zero); + __m128d v_src1d = _mm_cvtepi32_pd(v_src1i); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src1i = _mm_unpackhi_epi16(v_src1, v_zero); + v_src2i = _mm_unpackhi_epi16(v_src2, v_zero); + v_src1d = _mm_cvtepi32_pd(v_src1i); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi16(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packus_epi32(v_dst_0, v_dst_1))); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Div_SIMD +{ + int operator() (const short * src1, const short * src2, short * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src1 = _mm_loadu_si128((const __m128i *)(src1 + x)); + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128i v_src1i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src1), 16); + __m128i v_src2i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2), 16); + __m128d v_src1d = _mm_cvtepi32_pd(v_src1i); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src1i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src1), 16); + v_src2i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2), 16); + v_src1d = _mm_cvtepi32_pd(v_src1i); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi16(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packs_epi32(v_dst_0, v_dst_1))); + } + + return x; + } +}; + +template <> +struct Div_SIMD +{ + int operator() (const int * src1, const int * src2, int * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 4; x += 4) + { + __m128i v_src1 = _mm_loadu_si128((const __m128i *)(src1 + x)); + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128d v_src1d = _mm_cvtepi32_pd(v_src1); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + + __m128i v_dst = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + __m128i v_mask = _mm_cmpeq_epi32(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, v_dst)); + } + + return x; + } +}; + +#endif + template static void div_( const T* src1, size_t step1, const T* src2, size_t step2, T* dst, size_t step, Size size, double scale ) @@ -2618,9 +2845,11 @@ div_( const T* src1, size_t step1, const T* src2, size_t step2, step2 /= sizeof(src2[0]); step /= sizeof(dst[0]); + Div_SIMD vop; + for( ; size.height--; src1 += step1, src2 += step2, dst += step ) { - int i = 0; + int i = vop(src1, src2, dst, size.width, scale); #if CV_ENABLE_UNROLLED for( ; i <= size.width - 4; i += 4 ) { From ef29b15c9a360fcbd4c3fe48d3ce2574a8ff37f1 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:30 +0300 Subject: [PATCH 47/98] reciprocal --- modules/core/perf/perf_arithm.cpp | 14 +++ modules/core/src/arithm.cpp | 200 +++++++++++++++++++++++++++++- 2 files changed, 213 insertions(+), 1 deletion(-) diff --git a/modules/core/perf/perf_arithm.cpp b/modules/core/perf/perf_arithm.cpp index c6e4c40db..c6c2a1b29 100644 --- a/modules/core/perf/perf_arithm.cpp +++ b/modules/core/perf/perf_arithm.cpp @@ -256,3 +256,17 @@ PERF_TEST_P(Size_MatType, divide, TYPICAL_MATS_CORE_ARITHM) SANITY_CHECK_NOTHING(); } + +PERF_TEST_P(Size_MatType, reciprocal, TYPICAL_MATS_CORE_ARITHM) +{ + Size sz = get<0>(GetParam()); + int type = get<1>(GetParam()); + cv::Mat b(sz, type), c(sz, type); + double scale = 0.5; + + declare.in(b, WARMUP_RNG).out(c); + + TEST_CYCLE() divide(scale, b, c); + + SANITY_CHECK_NOTHING(); +} diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 5875d61cc..49a9cceae 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -2886,6 +2886,202 @@ div_( const T* src1, size_t step1, const T* src2, size_t step2, } } +template +struct Recip_SIMD +{ + int operator() (const T *, T *, int, double) const + { + return 0; + } +}; + +#if CV_SSE2 + +#if CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + int operator() (const uchar * src2, uchar * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i _v_src2 = _mm_loadl_epi64((const __m128i *)(src2 + x)); + __m128i v_src2 = _mm_unpacklo_epi8(_v_src2, v_zero); + + __m128i v_src2i = _mm_unpacklo_epi16(v_src2, v_zero); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_unpackhi_epi16(v_src2, v_zero); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi8(_v_src2, v_zero); + _mm_storel_epi64((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packus_epi16(_mm_packs_epi32(v_dst_0, v_dst_1), v_zero))); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + int operator() (const schar * src2, schar * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i _v_src2 = _mm_loadl_epi64((const __m128i *)(src2 + x)); + __m128i v_src2 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _v_src2), 8); + + __m128i v_src2i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2), 16); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2), 16); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi8(_v_src2, v_zero); + _mm_storel_epi64((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packs_epi16(_mm_packs_epi32(v_dst_0, v_dst_1), v_zero))); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + int operator() (const ushort * src2, ushort * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128i v_src2i = _mm_unpacklo_epi16(v_src2, v_zero); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_unpackhi_epi16(v_src2, v_zero); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi16(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packus_epi32(v_dst_0, v_dst_1))); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + int operator() (const short * src2, short * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128i v_src2i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2), 16); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2), 16); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi16(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packs_epi32(v_dst_0, v_dst_1))); + } + + return x; + } +}; + +template <> +struct Recip_SIMD +{ + int operator() (const int * src2, int * dst, int width, double scale) const + { + int x = 0; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 4; x += 4) + { + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128d v_src2d = _mm_cvtepi32_pd(v_src2); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + + __m128i v_dst = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + __m128i v_mask = _mm_cmpeq_epi32(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, v_dst)); + } + + return x; + } +}; + +#endif + template static void recip_( const T*, size_t, const T* src2, size_t step2, T* dst, size_t step, Size size, double scale ) @@ -2893,9 +3089,11 @@ recip_( const T*, size_t, const T* src2, size_t step2, step2 /= sizeof(src2[0]); step /= sizeof(dst[0]); + Recip_SIMD vop; + for( ; size.height--; src2 += step2, dst += step ) { - int i = 0; + int i = vop(src2, dst, size.width, scale); #if CV_ENABLE_UNROLLED for( ; i <= size.width - 4; i += 4 ) { From d92f67ee2ca97a65da92d0461508b1a1a2d3be24 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:31 +0300 Subject: [PATCH 48/98] warpPerspective --- modules/imgproc/src/imgwarp.cpp | 250 +++++++++++++++++++++++++++++++- 1 file changed, 248 insertions(+), 2 deletions(-) diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index 0de708981..dc254fd7d 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -5503,6 +5503,19 @@ public: int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width); bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height); + #if CV_SSE2 + __m128d v_M0 = _mm_set1_pd(M[0]); + __m128d v_M3 = _mm_set1_pd(M[3]); + __m128d v_M6 = _mm_set1_pd(M[6]); + __m128d v_intmax = _mm_set1_pd((double)INT_MAX); + __m128d v_intmin = _mm_set1_pd((double)INT_MIN); + __m128d v_2 = _mm_set1_pd(2), + v_zero = _mm_setzero_pd(), + v_1 = _mm_set1_pd(1), + v_its = _mm_set1_pd(INTER_TAB_SIZE); + __m128i v_itsi1 = _mm_set1_epi32(INTER_TAB_SIZE - 1); + #endif + for( y = range.start; y < range.end; y += bh0 ) { for( x = 0; x < width; x += bw0 ) @@ -5521,7 +5534,117 @@ public: double W0 = M[6]*x + M[7]*(y + y1) + M[8]; if( interpolation == INTER_NEAREST ) - for( x1 = 0; x1 < bw; x1++ ) + { + x1 = 0; + + #if CV_SSE2 + __m128d v_X0d = _mm_set1_pd(X0); + __m128d v_Y0d = _mm_set1_pd(Y0); + __m128d v_W0 = _mm_set1_pd(W0); + __m128d v_x1 = _mm_set_pd(1, 0); + + for( ; x1 <= bw - 16; x1 += 16 ) + { + // 0-3 + __m128i v_X0, v_Y0; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 4-8 + __m128i v_X1, v_Y1; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 8-11 + __m128i v_X2, v_Y2; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 12-15 + __m128i v_X3, v_Y3; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // convert to 16s + v_X0 = _mm_packs_epi32(v_X0, v_X1); + v_X1 = _mm_packs_epi32(v_X2, v_X3); + v_Y0 = _mm_packs_epi32(v_Y0, v_Y1); + v_Y1 = _mm_packs_epi32(v_Y2, v_Y3); + + _mm_interleave_epi16(v_X0, v_X1, v_Y0, v_Y1); + + _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_X0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_X1); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_Y0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_Y1); + } + #endif + + for( ; x1 < bw; x1++ ) { double W = W0 + M[6]*x1; W = W ? 1./W : 0; @@ -5533,10 +5656,133 @@ public: xy[x1*2] = saturate_cast(X); xy[x1*2+1] = saturate_cast(Y); } + } else { short* alpha = A + y1*bw; - for( x1 = 0; x1 < bw; x1++ ) + x1 = 0; + + #if CV_SSE2 + __m128d v_X0d = _mm_set1_pd(X0); + __m128d v_Y0d = _mm_set1_pd(Y0); + __m128d v_W0 = _mm_set1_pd(W0); + __m128d v_x1 = _mm_set_pd(1, 0); + + for( ; x1 <= bw - 16; x1 += 16 ) + { + // 0-3 + __m128i v_X0, v_Y0; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 4-8 + __m128i v_X1, v_Y1; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 8-11 + __m128i v_X2, v_Y2; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 12-15 + __m128i v_X3, v_Y3; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // store alpha + __m128i v_alpha0 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y0, v_itsi1), INTER_BITS), + _mm_and_si128(v_X0, v_itsi1)); + __m128i v_alpha1 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y1, v_itsi1), INTER_BITS), + _mm_and_si128(v_X1, v_itsi1)); + _mm_storeu_si128((__m128i *)(alpha + x1), _mm_packs_epi32(v_alpha0, v_alpha1)); + + v_alpha0 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y2, v_itsi1), INTER_BITS), + _mm_and_si128(v_X2, v_itsi1)); + v_alpha1 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y3, v_itsi1), INTER_BITS), + _mm_and_si128(v_X3, v_itsi1)); + _mm_storeu_si128((__m128i *)(alpha + x1 + 8), _mm_packs_epi32(v_alpha0, v_alpha1)); + + // convert to 16s + v_X0 = _mm_packs_epi32(_mm_srai_epi32(v_X0, INTER_BITS), _mm_srai_epi32(v_X1, INTER_BITS)); + v_X1 = _mm_packs_epi32(_mm_srai_epi32(v_X2, INTER_BITS), _mm_srai_epi32(v_X3, INTER_BITS)); + v_Y0 = _mm_packs_epi32(_mm_srai_epi32(v_Y0, INTER_BITS), _mm_srai_epi32(v_Y1, INTER_BITS)); + v_Y1 = _mm_packs_epi32(_mm_srai_epi32(v_Y2, INTER_BITS), _mm_srai_epi32(v_Y3, INTER_BITS)); + + _mm_interleave_epi16(v_X0, v_X1, v_Y0, v_Y1); + + _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_X0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_X1); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_Y0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_Y1); + } + #endif + + for( ; x1 < bw; x1++ ) { double W = W0 + M[6]*x1; W = W ? INTER_TAB_SIZE/W : 0; From 0fd8f5052273c422b2acc7b37bbade01f8bb0d91 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:31 +0300 Subject: [PATCH 49/98] warpAffine INTER_NEAREST --- modules/imgproc/src/imgwarp.cpp | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index dc254fd7d..a2f607039 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -5096,6 +5096,28 @@ public: vst2q_s16(xy + (x1 << 1), v_dst); } + #elif CV_SSE2 + __m128i v_X0 = _mm_set1_epi32(X0); + __m128i v_Y0 = _mm_set1_epi32(Y0); + for ( ; x1 <= bw - 16; x1 += 16) + { + __m128i v_x0 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 4))), AB_BITS)); + __m128i v_x1 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 8))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 12))), AB_BITS)); + + __m128i v_y0 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 4))), AB_BITS)); + __m128i v_y1 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 8))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 12))), AB_BITS)); + + _mm_interleave_epi16(v_x0, v_x1, v_y0, v_y1); + + _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_x0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_x1); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_y0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_y1); + } #endif for( ; x1 < bw; x1++ ) { From 3b23e57925a951c892219e59e38458235a138cdc Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:31 +0300 Subject: [PATCH 50/98] convertMaps --- modules/imgproc/src/imgwarp.cpp | 154 +++++++++++++++++++++++++++++++- 1 file changed, 150 insertions(+), 4 deletions(-) diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index a2f607039..4880819b9 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -4847,6 +4847,26 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vst2q_s16(dst1 + (x << 1), v_dst); } + #elif CV_SSE4_1 + for( ; x <= size.width - 16; x += 16 ) + { + __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x)), + _mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 4))); + __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 8)), + _mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 12))); + + __m128i v_dst2 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src2f + x)), + _mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 4))); + __m128i v_dst3 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 8)), + _mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 12))); + + _mm_interleave_epi16(v_dst0, v_dst1, v_dst2, v_dst3); + + _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst0); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 8), v_dst1); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 16), v_dst2); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 24), v_dst3); + } #endif for( ; x < size.width; x++ ) { @@ -4881,6 +4901,49 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vandq_s32(v_ix1, v_mask))); vst1q_u16(dst2 + x, vcombine_u16(v_dst0, v_dst1)); } + #elif CV_SSE4_1 + __m128 v_its = _mm_set1_ps(INTER_TAB_SIZE); + __m128i v_its1 = _mm_set1_epi32(INTER_TAB_SIZE-1); + + for( ; x <= size.width - 16; x += 16 ) + { + __m128i v_ix0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x), v_its)); + __m128i v_ix1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 4), v_its)); + __m128i v_iy0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x), v_its)); + __m128i v_iy1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 4), v_its)); + + __m128i v_dst10 = _mm_packs_epi32(_mm_srai_epi32(v_ix0, INTER_BITS), + _mm_srai_epi32(v_ix1, INTER_BITS)); + __m128i v_dst12 = _mm_packs_epi32(_mm_srai_epi32(v_iy0, INTER_BITS), + _mm_srai_epi32(v_iy1, INTER_BITS)); + __m128i v_dst20 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy0, v_its1), INTER_BITS), + _mm_and_si128(v_ix0, v_its1)); + __m128i v_dst21 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy1, v_its1), INTER_BITS), + _mm_and_si128(v_ix1, v_its1)); + _mm_storeu_si128((__m128i *)(dst2 + x), _mm_packus_epi32(v_dst20, v_dst21)); + + v_ix0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 8), v_its)); + v_ix1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 12), v_its)); + v_iy0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 8), v_its)); + v_iy1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 12), v_its)); + + __m128i v_dst11 = _mm_packs_epi32(_mm_srai_epi32(v_ix0, INTER_BITS), + _mm_srai_epi32(v_ix1, INTER_BITS)); + __m128i v_dst13 = _mm_packs_epi32(_mm_srai_epi32(v_iy0, INTER_BITS), + _mm_srai_epi32(v_iy1, INTER_BITS)); + v_dst20 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy0, v_its1), INTER_BITS), + _mm_and_si128(v_ix0, v_its1)); + v_dst21 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy1, v_its1), INTER_BITS), + _mm_and_si128(v_ix1, v_its1)); + _mm_storeu_si128((__m128i *)(dst2 + x + 8), _mm_packus_epi32(v_dst20, v_dst21)); + + _mm_interleave_epi16(v_dst10, v_dst11, v_dst12, v_dst13); + + _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst10); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 8), v_dst11); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 16), v_dst12); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 24), v_dst13); + } #endif for( ; x < size.width; x++ ) { @@ -4900,6 +4963,12 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, for( ; x <= (size.width << 1) - 8; x += 8 ) vst1q_s16(dst1 + x, vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(vld1q_f32(src1f + x))), vqmovn_s32(cv_vrndq_s32_f32(vld1q_f32(src1f + x + 4))))); + #elif CV_SSE2 + for( ; x <= (size.width << 1) - 8; x += 8 ) + { + _mm_storeu_si128((__m128i *)(dst1 + x), _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x)), + _mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 4)))); + } #endif for( ; x < size.width; x++ ) { @@ -4935,6 +5004,27 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vandq_s32(v_ix1, v_mask))); vst1q_u16(dst2 + x, vcombine_u16(v_dst0, v_dst1)); } + #elif CV_SSE2 + __m128 v_its = _mm_set1_ps(INTER_TAB_SIZE); + __m128i v_its1 = _mm_set1_epi32(INTER_TAB_SIZE-1); + __m128i v_y_mask = _mm_set1_epi32((INTER_TAB_SIZE-1) << 16); + + for( ; x <= size.width - 4; x += 4 ) + { + __m128i v_src0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x * 2), v_its)); + __m128i v_src1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x * 2 + 4), v_its)); + + __m128i v_dst1 = _mm_packs_epi32(_mm_srai_epi32(v_src0, INTER_BITS), + _mm_srai_epi32(v_src1, INTER_BITS)); + _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst1); + + // x0 y0 x1 y1 . . . + v_src0 = _mm_packs_epi32(_mm_and_si128(v_src0, v_its1), + _mm_and_si128(v_src1, v_its1)); + __m128i v_dst2 = _mm_or_si128(_mm_srli_epi32(_mm_and_si128(v_src0, v_y_mask), 16 - INTER_BITS), // y0 0 y1 0 . . . + _mm_and_si128(v_src0, v_its1)); // 0 x0 0 x1 . . . + _mm_storel_epi64((__m128i *)(dst2 + x), _mm_packus_epi32(v_dst2, v_dst2)); + } #endif for( ; x < size.width; x++ ) { @@ -4980,6 +5070,44 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vst1q_f32(dst1f + x + 4, v_dst1); vst1q_f32(dst2f + x + 4, v_dst2); } + #elif CV_SSE2 + __m128i v_mask2 = _mm_set1_epi16(INTER_TAB_SIZE2-1); + __m128i v_zero = _mm_setzero_si128(), v_mask = _mm_set1_epi32(INTER_TAB_SIZE-1); + __m128 v_scale = _mm_set1_ps(scale); + + for( ; x <= size.width - 16; x += 16) + { + __m128i v_src10 = _mm_loadu_si128((__m128i const *)(src1 + x * 2)); + __m128i v_src11 = _mm_loadu_si128((__m128i const *)(src1 + x * 2 + 8)); + __m128i v_src20 = _mm_loadu_si128((__m128i const *)(src1 + x * 2 + 16)); + __m128i v_src21 = _mm_loadu_si128((__m128i const *)(src1 + x * 2 + 24)); + + _mm_deinterleave_epi16(v_src10, v_src11, v_src20, v_src21); + + __m128i v_fxy = src2 ? _mm_and_si128(_mm_loadu_si128((__m128i const *)(src2 + x)), v_mask2) : v_zero; + __m128i v_fxy_p = _mm_unpacklo_epi16(v_fxy, v_zero); + _mm_storeu_ps(dst1f + x, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src10), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask))))); + _mm_storeu_ps(dst2f + x, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src20), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS))))); + v_fxy_p = _mm_unpackhi_epi16(v_fxy, v_zero); + _mm_storeu_ps(dst1f + x + 4, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src10), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask))))); + _mm_storeu_ps(dst2f + x + 4, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src20), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS))))); + + v_fxy = src2 ? _mm_and_si128(_mm_loadu_si128((__m128i const *)(src2 + x + 8)), v_mask2) : v_zero; + v_fxy_p = _mm_unpackhi_epi16(v_fxy, v_zero); + _mm_storeu_ps(dst1f + x + 8, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src11), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask))))); + _mm_storeu_ps(dst2f + x + 8, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src21), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS))))); + v_fxy_p = _mm_unpackhi_epi16(v_fxy, v_zero); + _mm_storeu_ps(dst1f + x + 12, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src11), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask))))); + _mm_storeu_ps(dst2f + x + 12, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src21), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS))))); + } #endif for( ; x < size.width; x++ ) { @@ -5021,6 +5149,24 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, v_scale, vcvtq_f32_s32(vshrq_n_s32(v_fxy2, INTER_BITS))); vst2q_f32(dst1f + (x << 1) + 8, v_dst); } + #elif CV_SSE2 + __m128i v_mask2 = _mm_set1_epi16(INTER_TAB_SIZE2-1); + __m128i v_zero = _mm_set1_epi32(0), v_mask = _mm_set1_epi32(INTER_TAB_SIZE-1); + __m128 v_scale = _mm_set1_ps(scale); + + for ( ; x <= size.width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src1 + x * 2)); + __m128i v_fxy = src2 ? _mm_and_si128(_mm_loadu_si128((__m128i const *)(src2 + x)), v_mask2) : v_zero; + __m128i v_fxy1 = _mm_and_si128(v_fxy, v_mask); + __m128i v_fxy2 = _mm_srli_epi16(v_fxy, INTER_BITS); + + __m128 v_add = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_fxy1, v_fxy2)), v_scale); + _mm_storeu_ps(dst1f + x * 2, _mm_add_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)), v_add)); + + v_add = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_fxy1, v_fxy2)), v_scale); + _mm_storeu_ps(dst1f + x * 2, _mm_add_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)), v_add)); + } #endif for( ; x < size.width; x++ ) { @@ -5096,7 +5242,7 @@ public: vst2q_s16(xy + (x1 << 1), v_dst); } - #elif CV_SSE2 + #elif CV_SSE4_1 __m128i v_X0 = _mm_set1_epi32(X0); __m128i v_Y0 = _mm_set1_epi32(Y0); for ( ; x1 <= bw - 16; x1 += 16) @@ -5525,7 +5671,7 @@ public: int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width); bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height); - #if CV_SSE2 + #if CV_SSE4_1 __m128d v_M0 = _mm_set1_pd(M[0]); __m128d v_M3 = _mm_set1_pd(M[3]); __m128d v_M6 = _mm_set1_pd(M[6]); @@ -5559,7 +5705,7 @@ public: { x1 = 0; - #if CV_SSE2 + #if CV_SSE4_1 __m128d v_X0d = _mm_set1_pd(X0); __m128d v_Y0d = _mm_set1_pd(Y0); __m128d v_W0 = _mm_set1_pd(W0); @@ -5684,7 +5830,7 @@ public: short* alpha = A + y1*bw; x1 = 0; - #if CV_SSE2 + #if CV_SSE4_1 __m128d v_X0d = _mm_set1_pd(X0); __m128d v_Y0d = _mm_set1_pd(Y0); __m128d v_W0 = _mm_set1_pd(W0); From 1d3c86041105d8f5971eb5ceb27eba0979dd0af1 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:31 +0300 Subject: [PATCH 51/98] SinCos_32f --- modules/core/src/mathfuncs.cpp | 68 +++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 2 deletions(-) diff --git a/modules/core/src/mathfuncs.cpp b/modules/core/src/mathfuncs.cpp index 7b27dc350..d7f9dc537 100644 --- a/modules/core/src/mathfuncs.cpp +++ b/modules/core/src/mathfuncs.cpp @@ -824,14 +824,78 @@ static void SinCos_32f( const float *angle, float *sinval, float* cosval, /*static const double cos_a2 = 1;*/ double k1; - int i; + int i = 0; if( !angle_in_degrees ) k1 = N/(2*CV_PI); else k1 = N/360.; - for( i = 0; i < len; i++ ) +#if CV_AVX2 + __m128d v_i = _mm_set_pd(1, 0); + __m128d v_k1 = _mm_set1_pd(k1); + __m128d v_1 = _mm_set1_pd(1); + __m128i v_N1 = _mm_set1_epi32(N - 1); + __m128i v_N4 = _mm_set1_epi32(N >> 2); + __m128d v_sin_a0 = _mm_set1_pd(sin_a0); + __m128d v_sin_a2 = _mm_set1_pd(sin_a2); + __m128d v_cos_a0 = _mm_set1_pd(cos_a0); + + if (USE_AVX2) + { + for ( ; i <= len - 4; i += 4) + { + __m128 v_angle = _mm_loadu_ps(angle + i); + + // 0-1 + __m128d v_t = _mm_mul_pd(_mm_cvtps_pd(v_angle), v_k1); + __m128i v_it = _mm_cvtpd_epi32(v_t); + v_t = _mm_sub_pd(v_t, _mm_cvtepi32_pd(v_it)); + + __m128i v_sin_idx = _mm_and_si128(v_it, v_N1); + __m128i v_cos_idx = _mm_and_si128(_mm_sub_epi32(v_N4, v_sin_idx), v_N1); + + __m128d v_t2 = _mm_mul_pd(v_t, v_t); + __m128d v_sin_b = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(v_sin_a0, v_t2), v_sin_a2), v_t); + __m128d v_cos_b = _mm_add_pd(_mm_mul_pd(v_cos_a0, v_t2), v_1); + + __m128d v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 1); + __m128d v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 1); + + __m128d v_sin_val_0 = _mm_add_pd(_mm_mul_pd(v_sin_a, v_cos_b), + _mm_mul_pd(v_cos_a, v_sin_b)); + __m128d v_cos_val_0 = _mm_sub_pd(_mm_mul_pd(v_cos_a, v_cos_b), + _mm_mul_pd(v_sin_a, v_sin_b)); + + // 2-3 + v_t = _mm_mul_pd(_mm_cvtps_pd(_mm_castsi128_ps(_mm_slli_si128(_mm_castps_si128(v_angle), 8))), v_k1); + v_it = _mm_cvtpd_epi32(v_t); + v_t = _mm_sub_pd(v_t, _mm_cvtepi32_pd(v_it)); + + v_sin_idx = _mm_and_si128(v_it, v_N1); + v_cos_idx = _mm_and_si128(_mm_sub_epi32(v_N4, v_sin_idx), v_N1); + + v_t2 = _mm_mul_pd(v_t, v_t); + v_sin_b = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(v_sin_a0, v_t2), v_sin_a2), v_t); + v_cos_b = _mm_add_pd(_mm_mul_pd(v_cos_a0, v_t2), v_1); + + v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 1); + v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 1); + + __m128d v_sin_val_1 = _mm_add_pd(_mm_mul_pd(v_sin_a, v_cos_b), + _mm_mul_pd(v_cos_a, v_sin_b)); + __m128d v_cos_val_1 = _mm_sub_pd(_mm_mul_pd(v_cos_a, v_cos_b), + _mm_mul_pd(v_sin_a, v_sin_b)); + + _mm_storeu_ps(sinval + i, _mm_movelh_ps(_mm_cvtpd_ps(v_sin_val_0), + _mm_cvtpd_ps(v_sin_val_1))); + _mm_storeu_ps(cosval + i, _mm_movelh_ps(_mm_cvtpd_ps(v_cos_val_0), + _mm_cvtpd_ps(v_cos_val_1))); + } + } +#endif + + for( ; i < len; i++ ) { double t = angle[i]*k1; int it = cvRound(t); From 6bce6ee34a7bf23d19d2df37bc44a47072a8f6e0 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:31 +0300 Subject: [PATCH 52/98] checks --- modules/core/src/arithm.cpp | 89 ++++- modules/core/src/convert.cpp | 62 ++- modules/core/src/mathfuncs.cpp | 102 ++--- modules/core/src/stat.cpp | 6 +- modules/imgproc/src/color.cpp | 551 +++++++++++++++----------- modules/imgproc/src/imgwarp.cpp | 653 ++++++++++++++++--------------- modules/imgproc/src/pyramids.cpp | 10 +- 7 files changed, 841 insertions(+), 632 deletions(-) diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 49a9cceae..fb69cd201 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -64,7 +64,7 @@ FUNCTOR_TEMPLATE(VLoadStore128); #if CV_SSE2 FUNCTOR_TEMPLATE(VLoadStore64); FUNCTOR_TEMPLATE(VLoadStore128Aligned); -#if CV_AVX +#if CV_AVX2 FUNCTOR_TEMPLATE(VLoadStore256); FUNCTOR_TEMPLATE(VLoadStore256Aligned); #endif @@ -2626,10 +2626,16 @@ struct Div_SIMD template <> struct Div_SIMD { + bool haveSIMD; + Div_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } + int operator() (const uchar * src1, const uchar * src2, uchar * dst, int width, double scale) const { int x = 0; + if (!haveSIMD) + return x; + __m128d v_scale = _mm_set1_pd(scale); __m128i v_zero = _mm_setzero_si128(); @@ -2672,10 +2678,16 @@ struct Div_SIMD template <> struct Div_SIMD { + bool haveSIMD; + Div_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + int operator() (const schar * src1, const schar * src2, schar * dst, int width, double scale) const { int x = 0; + if (!haveSIMD) + return x; + __m128d v_scale = _mm_set1_pd(scale); __m128i v_zero = _mm_setzero_si128(); @@ -2718,10 +2730,16 @@ struct Div_SIMD template <> struct Div_SIMD { + bool haveSIMD; + Div_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } + int operator() (const ushort * src1, const ushort * src2, ushort * dst, int width, double scale) const { int x = 0; + if (!haveSIMD) + return x; + __m128d v_scale = _mm_set1_pd(scale); __m128i v_zero = _mm_setzero_si128(); @@ -2763,10 +2781,16 @@ struct Div_SIMD template <> struct Div_SIMD { + bool haveSIMD; + Div_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + int operator() (const short * src1, const short * src2, short * dst, int width, double scale) const { int x = 0; + if (!haveSIMD) + return x; + __m128d v_scale = _mm_set1_pd(scale); __m128i v_zero = _mm_setzero_si128(); @@ -2806,10 +2830,16 @@ struct Div_SIMD template <> struct Div_SIMD { + bool haveSIMD; + Div_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + int operator() (const int * src1, const int * src2, int * dst, int width, double scale) const { int x = 0; + if (!haveSIMD) + return x; + __m128d v_scale = _mm_set1_pd(scale); __m128i v_zero = _mm_setzero_si128(); @@ -2902,10 +2932,16 @@ struct Recip_SIMD template <> struct Recip_SIMD { + bool haveSIMD; + Recip_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } + int operator() (const uchar * src2, uchar * dst, int width, double scale) const { int x = 0; + if (!haveSIMD) + return x; + __m128d v_scale = _mm_set1_pd(scale); __m128i v_zero = _mm_setzero_si128(); @@ -2941,10 +2977,16 @@ struct Recip_SIMD template <> struct Recip_SIMD { + bool haveSIMD; + Recip_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + int operator() (const schar * src2, schar * dst, int width, double scale) const { int x = 0; + if (!haveSIMD) + return x; + __m128d v_scale = _mm_set1_pd(scale); __m128i v_zero = _mm_setzero_si128(); @@ -2980,10 +3022,16 @@ struct Recip_SIMD template <> struct Recip_SIMD { + bool haveSIMD; + Recip_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } + int operator() (const ushort * src2, ushort * dst, int width, double scale) const { int x = 0; + if (!haveSIMD) + return x; + __m128d v_scale = _mm_set1_pd(scale); __m128i v_zero = _mm_setzero_si128(); @@ -3018,10 +3066,16 @@ struct Recip_SIMD template <> struct Recip_SIMD { + bool haveSIMD; + Recip_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + int operator() (const short * src2, short * dst, int width, double scale) const { int x = 0; + if (!haveSIMD) + return x; + __m128d v_scale = _mm_set1_pd(scale); __m128i v_zero = _mm_setzero_si128(); @@ -3054,10 +3108,16 @@ struct Recip_SIMD template <> struct Recip_SIMD { + bool haveSIMD; + Recip_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + int operator() (const int * src2, int * dst, int width, double scale) const { int x = 0; + if (!haveSIMD) + return x; + __m128d v_scale = _mm_set1_pd(scale); __m128i v_zero = _mm_setzero_si128(); @@ -4126,7 +4186,8 @@ static void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t ste { int x =0; #if CV_SSE2 - if( USE_SSE2 ){ + if( USE_SSE2 ) + { __m128i m128 = code == CMP_GT ? _mm_setzero_si128() : _mm_set1_epi8 (-1); __m128i c128 = _mm_set1_epi8 (-128); for( ; x <= size.width - 16; x += 16 ) @@ -4142,7 +4203,7 @@ static void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t ste } } - #elif CV_NEON + #elif CV_NEON uint8x16_t mask = code == CMP_GT ? vdupq_n_u8(0) : vdupq_n_u8(255); for( ; x <= size.width - 16; x += 16 ) @@ -4164,7 +4225,8 @@ static void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t ste { int x = 0; #if CV_SSE2 - if( USE_SSE2 ){ + if( USE_SSE2 ) + { __m128i m128 = code == CMP_EQ ? _mm_setzero_si128() : _mm_set1_epi8 (-1); for( ; x <= size.width - 16; x += 16 ) { @@ -4174,7 +4236,7 @@ static void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t ste _mm_storeu_si128((__m128i*)(dst + x), r00); } } - #elif CV_NEON + #elif CV_NEON uint8x16_t mask = code == CMP_EQ ? vdupq_n_u8(0) : vdupq_n_u8(255); for( ; x <= size.width - 16; x += 16 ) @@ -4254,7 +4316,8 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st { int x =0; #if CV_SSE2 - if( USE_SSE2){// + if( USE_SSE2) + { __m128i m128 = code == CMP_GT ? _mm_setzero_si128() : _mm_set1_epi16 (-1); for( ; x <= size.width - 16; x += 16 ) { @@ -4278,7 +4341,7 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st x += 8; } } - #elif CV_NEON + #elif CV_NEON uint8x16_t mask = code == CMP_GT ? vdupq_n_u8(0) : vdupq_n_u8(255); for( ; x <= size.width - 16; x += 16 ) @@ -4293,8 +4356,7 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st vst1q_u8(dst+x, veorq_u8(vcombine_u8(t1, t2), mask)); } - - #endif + #endif for( ; x < size.width; x++ ){ dst[x] = (uchar)(-(src1[x] > src2[x]) ^ m); @@ -4308,7 +4370,8 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st { int x = 0; #if CV_SSE2 - if( USE_SSE2 ){ + if( USE_SSE2 ) + { __m128i m128 = code == CMP_EQ ? _mm_setzero_si128() : _mm_set1_epi16 (-1); for( ; x <= size.width - 16; x += 16 ) { @@ -4332,7 +4395,7 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st x += 8; } } - #elif CV_NEON + #elif CV_NEON uint8x16_t mask = code == CMP_EQ ? vdupq_n_u8(0) : vdupq_n_u8(255); for( ; x <= size.width - 16; x += 16 ) @@ -4347,8 +4410,8 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st vst1q_u8(dst+x, veorq_u8(vcombine_u8(t1, t2), mask)); } - #endif - for( ; x < size.width; x++ ) + #endif + for( ; x < size.width; x++ ) dst[x] = (uchar)(-(src1[x] == src2[x]) ^ m); } } diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 626a666a9..090acf550 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -158,7 +158,7 @@ struct VSplit2 \ VSplit2() \ { \ - support = true; \ + support = checkHardwareSupport(CV_CPU_SSE2); \ } \ \ void operator()(const data_type * src, \ @@ -191,7 +191,7 @@ struct VSplit3 \ VSplit3() \ { \ - support = true; \ + support = checkHardwareSupport(CV_CPU_SSE2); \ } \ \ void operator()(const data_type * src, \ @@ -229,7 +229,7 @@ struct VSplit4 \ VSplit4() \ { \ - support = true; \ + support = checkHardwareSupport(CV_CPU_SSE2); \ } \ \ void operator()(const data_type * src, data_type * dst0, data_type * dst1, \ @@ -502,7 +502,7 @@ struct VMerge4 bool support; }; -#define MERGE2_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor) \ +#define MERGE2_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor, se) \ template <> \ struct VMerge2 \ { \ @@ -513,7 +513,7 @@ struct VMerge2 \ VMerge2() \ { \ - support = true; \ + support = checkHardwareSupport(se); \ } \ \ void operator()(const data_type * src0, const data_type * src1, \ @@ -535,7 +535,7 @@ struct VMerge2 bool support; \ } -#define MERGE3_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor) \ +#define MERGE3_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor, se) \ template <> \ struct VMerge3 \ { \ @@ -546,7 +546,7 @@ struct VMerge3 \ VMerge3() \ { \ - support = true; \ + support = checkHardwareSupport(se); \ } \ \ void operator()(const data_type * src0, const data_type * src1, const data_type * src2,\ @@ -573,7 +573,7 @@ struct VMerge3 bool support; \ } -#define MERGE4_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor) \ +#define MERGE4_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor, se) \ template <> \ struct VMerge4 \ { \ @@ -584,7 +584,7 @@ struct VMerge4 \ VMerge4() \ { \ - support = true; \ + support = checkHardwareSupport(se); \ } \ \ void operator()(const data_type * src0, const data_type * src1, \ @@ -616,19 +616,19 @@ struct VMerge4 bool support; \ } -MERGE2_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128); -MERGE3_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128); -MERGE4_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128); +MERGE2_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128, CV_CPU_SSE2); +MERGE3_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128, CV_CPU_SSE2); +MERGE4_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128, CV_CPU_SSE2); #if CV_SSE4_1 -MERGE2_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128); -MERGE3_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128); -MERGE4_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128); +MERGE2_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128, CV_CPU_SSE4_1); +MERGE3_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128, CV_CPU_SSE4_1); +MERGE4_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128, CV_CPU_SSE4_1); #endif -MERGE2_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps); -MERGE3_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps); -MERGE4_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps); +MERGE2_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps, CV_CPU_SSE2); +MERGE3_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps, CV_CPU_SSE2); +MERGE4_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps, CV_CPU_SSE2); #endif @@ -4404,6 +4404,9 @@ struct Cvt_SIMD { int x = 0; + if (!USE_SSE2) + return x; + for ( ; x <= width - 8; x += 8) { __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); @@ -4430,6 +4433,9 @@ struct Cvt_SIMD { int x = 0; + if (!USE_SSE2) + return x; + for ( ; x <= width - 8; x += 8) { __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); @@ -4454,10 +4460,16 @@ struct Cvt_SIMD template <> struct Cvt_SIMD { + bool haveSIMD; + Cvt_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } + int operator() (const double * src, ushort * dst, int width) const { int x = 0; + if (!haveSIMD) + return x; + for ( ; x <= width - 8; x += 8) { __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); @@ -4486,6 +4498,9 @@ struct Cvt_SIMD { int x = 0; + if (!USE_SSE2) + return x; + for ( ; x <= width - 8; x += 8) { __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); @@ -4512,6 +4527,9 @@ struct Cvt_SIMD { int x = 0; + if (!USE_SSE2) + return x; + for ( ; x <= width - 4; x += 4) { __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); @@ -4532,6 +4550,9 @@ struct Cvt_SIMD { int x = 0; + if (!USE_SSE2) + return x; + for ( ; x <= width - 4; x += 4) { __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); @@ -5114,8 +5135,9 @@ cvt_( const float* src, size_t sstep, { int x = 0; #if CV_SSE2 - if(USE_SSE2){ - for( ; x <= size.width - 8; x += 8 ) + if(USE_SSE2) + { + for( ; x <= size.width - 8; x += 8 ) { __m128 src128 = _mm_loadu_ps (src + x); __m128i src_int128 = _mm_cvtps_epi32 (src128); diff --git a/modules/core/src/mathfuncs.cpp b/modules/core/src/mathfuncs.cpp index d7f9dc537..13ada1d1d 100644 --- a/modules/core/src/mathfuncs.cpp +++ b/modules/core/src/mathfuncs.cpp @@ -597,15 +597,18 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre k = 0; #if CV_SSE2 - for ( ; k <= len - 4; k += 4) + if (USE_SSE2) { - __m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)), - _mm_cvtpd_ps(_mm_loadu_pd(x + k + 2))); - __m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)), - _mm_cvtpd_ps(_mm_loadu_pd(y + k + 2))); + for ( ; k <= len - 4; k += 4) + { + __m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)), + _mm_cvtpd_ps(_mm_loadu_pd(x + k + 2))); + __m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)), + _mm_cvtpd_ps(_mm_loadu_pd(y + k + 2))); - _mm_storeu_ps(buf[0] + k, v_dst0); - _mm_storeu_ps(buf[1] + k, v_dst1); + _mm_storeu_ps(buf[0] + k, v_dst0); + _mm_storeu_ps(buf[1] + k, v_dst1); + } } #endif @@ -619,11 +622,14 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre k = 0; #if CV_SSE2 - for ( ; k <= len - 4; k += 4) + if (USE_SSE2) { - __m128 v_src = _mm_loadu_ps(buf[0] + k); - _mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src)); - _mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); + for ( ; k <= len - 4; k += 4) + { + __m128 v_src = _mm_loadu_ps(buf[0] + k); + _mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src)); + _mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); + } } #endif @@ -728,15 +734,18 @@ void cartToPolar( InputArray src1, InputArray src2, k = 0; #if CV_SSE2 - for ( ; k <= len - 4; k += 4) + if (USE_SSE2) { - __m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)), - _mm_cvtpd_ps(_mm_loadu_pd(x + k + 2))); - __m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)), - _mm_cvtpd_ps(_mm_loadu_pd(y + k + 2))); + for ( ; k <= len - 4; k += 4) + { + __m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)), + _mm_cvtpd_ps(_mm_loadu_pd(x + k + 2))); + __m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)), + _mm_cvtpd_ps(_mm_loadu_pd(y + k + 2))); - _mm_storeu_ps(buf[0] + k, v_dst0); - _mm_storeu_ps(buf[1] + k, v_dst1); + _mm_storeu_ps(buf[0] + k, v_dst0); + _mm_storeu_ps(buf[1] + k, v_dst1); + } } #endif @@ -750,11 +759,14 @@ void cartToPolar( InputArray src1, InputArray src2, k = 0; #if CV_SSE2 - for ( ; k <= len - 4; k += 4) + if (USE_SSE2) { - __m128 v_src = _mm_loadu_ps(buf[0] + k); - _mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src)); - _mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); + for ( ; k <= len - 4; k += 4) + { + __m128 v_src = _mm_loadu_ps(buf[0] + k); + _mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src)); + _mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); + } } #endif @@ -832,17 +844,16 @@ static void SinCos_32f( const float *angle, float *sinval, float* cosval, k1 = N/360.; #if CV_AVX2 - __m128d v_i = _mm_set_pd(1, 0); - __m128d v_k1 = _mm_set1_pd(k1); - __m128d v_1 = _mm_set1_pd(1); - __m128i v_N1 = _mm_set1_epi32(N - 1); - __m128i v_N4 = _mm_set1_epi32(N >> 2); - __m128d v_sin_a0 = _mm_set1_pd(sin_a0); - __m128d v_sin_a2 = _mm_set1_pd(sin_a2); - __m128d v_cos_a0 = _mm_set1_pd(cos_a0); - if (USE_AVX2) { + __m128d v_k1 = _mm_set1_pd(k1); + __m128d v_1 = _mm_set1_pd(1); + __m128i v_N1 = _mm_set1_epi32(N - 1); + __m128i v_N4 = _mm_set1_epi32(N >> 2); + __m128d v_sin_a0 = _mm_set1_pd(sin_a0); + __m128d v_sin_a2 = _mm_set1_pd(sin_a2); + __m128d v_cos_a0 = _mm_set1_pd(cos_a0); + for ( ; i <= len - 4; i += 4) { __m128 v_angle = _mm_loadu_ps(angle + i); @@ -859,8 +870,8 @@ static void SinCos_32f( const float *angle, float *sinval, float* cosval, __m128d v_sin_b = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(v_sin_a0, v_t2), v_sin_a2), v_t); __m128d v_cos_b = _mm_add_pd(_mm_mul_pd(v_cos_a0, v_t2), v_1); - __m128d v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 1); - __m128d v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 1); + __m128d v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 8); + __m128d v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 8); __m128d v_sin_val_0 = _mm_add_pd(_mm_mul_pd(v_sin_a, v_cos_b), _mm_mul_pd(v_cos_a, v_sin_b)); @@ -868,7 +879,7 @@ static void SinCos_32f( const float *angle, float *sinval, float* cosval, _mm_mul_pd(v_sin_a, v_sin_b)); // 2-3 - v_t = _mm_mul_pd(_mm_cvtps_pd(_mm_castsi128_ps(_mm_slli_si128(_mm_castps_si128(v_angle), 8))), v_k1); + v_t = _mm_mul_pd(_mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_angle), 8))), v_k1); v_it = _mm_cvtpd_epi32(v_t); v_t = _mm_sub_pd(v_t, _mm_cvtepi32_pd(v_it)); @@ -879,8 +890,8 @@ static void SinCos_32f( const float *angle, float *sinval, float* cosval, v_sin_b = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(v_sin_a0, v_t2), v_sin_a2), v_t); v_cos_b = _mm_add_pd(_mm_mul_pd(v_cos_a0, v_t2), v_1); - v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 1); - v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 1); + v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 8); + v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 8); __m128d v_sin_val_1 = _mm_add_pd(_mm_mul_pd(v_sin_a, v_cos_b), _mm_mul_pd(v_cos_a, v_sin_b)); @@ -1032,11 +1043,14 @@ void polarToCart( InputArray src1, InputArray src2, vst1q_f32(y + k, vmulq_f32(vld1q_f32(y + k), v_m)); } #elif CV_SSE2 - for( ; k <= len - 4; k += 4 ) + if (USE_SSE2) { - __m128 v_m = _mm_loadu_ps(mag + k); - _mm_storeu_ps(x + k, _mm_mul_ps(_mm_loadu_ps(x + k), v_m)); - _mm_storeu_ps(y + k, _mm_mul_ps(_mm_loadu_ps(y + k), v_m)); + for( ; k <= len - 4; k += 4 ) + { + __m128 v_m = _mm_loadu_ps(mag + k); + _mm_storeu_ps(x + k, _mm_mul_ps(_mm_loadu_ps(x + k), v_m)); + _mm_storeu_ps(y + k, _mm_mul_ps(_mm_loadu_ps(y + k), v_m)); + } } #endif @@ -1063,10 +1077,10 @@ void polarToCart( InputArray src1, InputArray src2, x[k] = buf[0][k]*m; y[k] = buf[1][k]*m; } else - for( k = 0; k < len; k++ ) - { - x[k] = buf[0][k]; y[k] = buf[1][k]; - } + { + std::memcpy(x, buf[0], sizeof(float) * len); + std::memcpy(y, buf[1], sizeof(float) * len); + } } if( ptrs[0] ) diff --git a/modules/core/src/stat.cpp b/modules/core/src/stat.cpp index 4eb17d6a1..1fcb9b54d 100644 --- a/modules/core/src/stat.cpp +++ b/modules/core/src/stat.cpp @@ -397,6 +397,8 @@ static int countNonZero_(const T* src, int len ) return nz; } +#if CV_SSE2 + static const uchar * initPopcountTable() { static uchar tab[256]; @@ -425,6 +427,8 @@ static const uchar * initPopcountTable() return tab; } +#endif + static int countNonZero8u( const uchar* src, int len ) { int i=0, nz = 0; @@ -645,7 +649,7 @@ static int countNonZero32f( const float* src, int len ) } static int countNonZero64f( const double* src, int len ) -{ +{ int i = 0, nz = 0; #if CV_SSE2 if (USE_SSE2) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 4efbcc5f8..5ae1170b4 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -967,6 +967,7 @@ struct Gray2RGB5x5 v_n7 = vdup_n_u8(~7); v_n3 = vdup_n_u8(~3); #elif CV_SSE2 + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); v_n7 = _mm_set1_epi16(~7); v_n3 = _mm_set1_epi16(~3); v_zero = _mm_setzero_si128(); @@ -988,21 +989,24 @@ struct Gray2RGB5x5 vst1q_u16((ushort *)dst + i, v_dst); } #elif CV_SSE2 - for ( ; i <= n - 16; i += 16 ) + if (haveSIMD) { - __m128i v_src = _mm_loadu_si128((__m128i const *)(src + i)); + for ( ; i <= n - 16; i += 16 ) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + i)); - __m128i v_src_p = _mm_unpacklo_epi8(v_src, v_zero); - __m128i v_dst = _mm_or_si128(_mm_srli_epi16(v_src_p, 3), - _mm_or_si128(_mm_slli_epi16(_mm_and_si128(v_src_p, v_n3), 3), - _mm_slli_epi16(_mm_and_si128(v_src_p, v_n7), 8))); - _mm_storeu_si128((__m128i *)((ushort *)dst + i), v_dst); + __m128i v_src_p = _mm_unpacklo_epi8(v_src, v_zero); + __m128i v_dst = _mm_or_si128(_mm_srli_epi16(v_src_p, 3), + _mm_or_si128(_mm_slli_epi16(_mm_and_si128(v_src_p, v_n3), 3), + _mm_slli_epi16(_mm_and_si128(v_src_p, v_n7), 8))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i), v_dst); - v_src_p = _mm_unpackhi_epi8(v_src, v_zero); - v_dst = _mm_or_si128(_mm_srli_epi16(v_src_p, 3), - _mm_or_si128(_mm_slli_epi16(_mm_and_si128(v_src_p, v_n3), 3), - _mm_slli_epi16(_mm_and_si128(v_src_p, v_n7), 8))); - _mm_storeu_si128((__m128i *)((ushort *)dst + i + 8), v_dst); + v_src_p = _mm_unpackhi_epi8(v_src, v_zero); + v_dst = _mm_or_si128(_mm_srli_epi16(v_src_p, 3), + _mm_or_si128(_mm_slli_epi16(_mm_and_si128(v_src_p, v_n3), 3), + _mm_slli_epi16(_mm_and_si128(v_src_p, v_n7), 8))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i + 8), v_dst); + } } #endif for ( ; i < n; i++ ) @@ -1021,21 +1025,24 @@ struct Gray2RGB5x5 vst1q_u16((ushort *)dst + i, v_dst); } #elif CV_SSE2 - for ( ; i <= n - 16; i += 8 ) + if (haveSIMD) { - __m128i v_src = _mm_loadu_si128((__m128i const *)(src + i)); + for ( ; i <= n - 16; i += 8 ) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + i)); - __m128i v_src_p = _mm_srli_epi16(_mm_unpacklo_epi8(v_src, v_zero), 3); - __m128i v_dst = _mm_or_si128(v_src_p, - _mm_or_si128(_mm_slli_epi32(v_src_p, 5), - _mm_slli_epi16(v_src_p, 10))); - _mm_storeu_si128((__m128i *)((ushort *)dst + i), v_dst); + __m128i v_src_p = _mm_srli_epi16(_mm_unpacklo_epi8(v_src, v_zero), 3); + __m128i v_dst = _mm_or_si128(v_src_p, + _mm_or_si128(_mm_slli_epi32(v_src_p, 5), + _mm_slli_epi16(v_src_p, 10))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i), v_dst); - v_src_p = _mm_srli_epi16(_mm_unpackhi_epi8(v_src, v_zero), 3); - v_dst = _mm_or_si128(v_src_p, - _mm_or_si128(_mm_slli_epi16(v_src_p, 5), - _mm_slli_epi16(v_src_p, 10))); - _mm_storeu_si128((__m128i *)((ushort *)dst + i + 8), v_dst); + v_src_p = _mm_srli_epi16(_mm_unpackhi_epi8(v_src, v_zero), 3); + v_dst = _mm_or_si128(v_src_p, + _mm_or_si128(_mm_slli_epi16(v_src_p, 5), + _mm_slli_epi16(v_src_p, 10))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i + 8), v_dst); + } } #endif for( ; i < n; i++ ) @@ -1051,6 +1058,7 @@ struct Gray2RGB5x5 uint8x8_t v_n7, v_n3; #elif CV_SSE2 __m128i v_n7, v_n3, v_zero; + bool haveSIMD; #endif }; @@ -1084,6 +1092,7 @@ struct RGB5x52Gray v_f8 = vdupq_n_u16(0xf8); v_fc = vdupq_n_u16(0xfc); #elif CV_SSE2 + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); v_b2y = _mm_set1_epi16(B2Y); v_g2y = _mm_set1_epi16(G2Y); v_r2y = _mm_set1_epi16(R2Y); @@ -1116,37 +1125,40 @@ struct RGB5x52Gray vst1_u8(dst + i, vmovn_u16(vcombine_u16(vmovn_u32(v_dst0), vmovn_u32(v_dst1)))); } #elif CV_SSE2 - __m128i v_zero = _mm_setzero_si128(); - - for ( ; i <= n - 8; i += 8) + if (haveSIMD) { - __m128i v_src = _mm_loadu_si128((__m128i const *)((ushort *)src + i)); - __m128i v_t0 = _mm_and_si128(_mm_slli_epi16(v_src, 3), v_f8), - v_t1 = _mm_and_si128(_mm_srli_epi16(v_src, 3), v_fc), - v_t2 = _mm_and_si128(_mm_srli_epi16(v_src, 8), v_f8); + __m128i v_zero = _mm_setzero_si128(); - __m128i v_mullo_b = _mm_mullo_epi16(v_t0, v_b2y); - __m128i v_mullo_g = _mm_mullo_epi16(v_t1, v_g2y); - __m128i v_mullo_r = _mm_mullo_epi16(v_t2, v_r2y); - __m128i v_mulhi_b = _mm_mulhi_epi16(v_t0, v_b2y); - __m128i v_mulhi_g = _mm_mulhi_epi16(v_t1, v_g2y); - __m128i v_mulhi_r = _mm_mulhi_epi16(v_t2, v_r2y); + for ( ; i <= n - 8; i += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)((ushort *)src + i)); + __m128i v_t0 = _mm_and_si128(_mm_slli_epi16(v_src, 3), v_f8), + v_t1 = _mm_and_si128(_mm_srli_epi16(v_src, 3), v_fc), + v_t2 = _mm_and_si128(_mm_srli_epi16(v_src, 8), v_f8); - __m128i v_dst0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_b, v_mulhi_b), - _mm_unpacklo_epi16(v_mullo_g, v_mulhi_g)); - v_dst0 = _mm_add_epi32(_mm_add_epi32(v_dst0, v_delta), - _mm_unpacklo_epi16(v_mullo_r, v_mulhi_r)); + __m128i v_mullo_b = _mm_mullo_epi16(v_t0, v_b2y); + __m128i v_mullo_g = _mm_mullo_epi16(v_t1, v_g2y); + __m128i v_mullo_r = _mm_mullo_epi16(v_t2, v_r2y); + __m128i v_mulhi_b = _mm_mulhi_epi16(v_t0, v_b2y); + __m128i v_mulhi_g = _mm_mulhi_epi16(v_t1, v_g2y); + __m128i v_mulhi_r = _mm_mulhi_epi16(v_t2, v_r2y); - __m128i v_dst1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_b, v_mulhi_b), - _mm_unpackhi_epi16(v_mullo_g, v_mulhi_g)); - v_dst1 = _mm_add_epi32(_mm_add_epi32(v_dst1, v_delta), - _mm_unpackhi_epi16(v_mullo_r, v_mulhi_r)); + __m128i v_dst0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_b, v_mulhi_b), + _mm_unpacklo_epi16(v_mullo_g, v_mulhi_g)); + v_dst0 = _mm_add_epi32(_mm_add_epi32(v_dst0, v_delta), + _mm_unpacklo_epi16(v_mullo_r, v_mulhi_r)); - v_dst0 = _mm_srli_epi32(v_dst0, yuv_shift); - v_dst1 = _mm_srli_epi32(v_dst1, yuv_shift); + __m128i v_dst1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_b, v_mulhi_b), + _mm_unpackhi_epi16(v_mullo_g, v_mulhi_g)); + v_dst1 = _mm_add_epi32(_mm_add_epi32(v_dst1, v_delta), + _mm_unpackhi_epi16(v_mullo_r, v_mulhi_r)); - __m128i v_dst = _mm_packs_epi32(v_dst0, v_dst1); - _mm_storel_epi64((__m128i *)(dst + i), _mm_packus_epi16(v_dst, v_zero)); + v_dst0 = _mm_srli_epi32(v_dst0, yuv_shift); + v_dst1 = _mm_srli_epi32(v_dst1, yuv_shift); + + __m128i v_dst = _mm_packs_epi32(v_dst0, v_dst1); + _mm_storel_epi64((__m128i *)(dst + i), _mm_packus_epi16(v_dst, v_zero)); + } } #endif for ( ; i < n; i++) @@ -1177,37 +1189,40 @@ struct RGB5x52Gray vst1_u8(dst + i, vmovn_u16(vcombine_u16(vmovn_u32(v_dst0), vmovn_u32(v_dst1)))); } #elif CV_SSE2 - __m128i v_zero = _mm_setzero_si128(); - - for ( ; i <= n - 8; i += 8) + if (haveSIMD) { - __m128i v_src = _mm_loadu_si128((__m128i const *)((ushort *)src + i)); - __m128i v_t0 = _mm_and_si128(_mm_slli_epi16(v_src, 3), v_f8), - v_t1 = _mm_and_si128(_mm_srli_epi16(v_src, 2), v_f8), - v_t2 = _mm_and_si128(_mm_srli_epi16(v_src, 7), v_f8); + __m128i v_zero = _mm_setzero_si128(); - __m128i v_mullo_b = _mm_mullo_epi16(v_t0, v_b2y); - __m128i v_mullo_g = _mm_mullo_epi16(v_t1, v_g2y); - __m128i v_mullo_r = _mm_mullo_epi16(v_t2, v_r2y); - __m128i v_mulhi_b = _mm_mulhi_epi16(v_t0, v_b2y); - __m128i v_mulhi_g = _mm_mulhi_epi16(v_t1, v_g2y); - __m128i v_mulhi_r = _mm_mulhi_epi16(v_t2, v_r2y); + for ( ; i <= n - 8; i += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)((ushort *)src + i)); + __m128i v_t0 = _mm_and_si128(_mm_slli_epi16(v_src, 3), v_f8), + v_t1 = _mm_and_si128(_mm_srli_epi16(v_src, 2), v_f8), + v_t2 = _mm_and_si128(_mm_srli_epi16(v_src, 7), v_f8); - __m128i v_dst0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_b, v_mulhi_b), - _mm_unpacklo_epi16(v_mullo_g, v_mulhi_g)); - v_dst0 = _mm_add_epi32(_mm_add_epi32(v_dst0, v_delta), - _mm_unpacklo_epi16(v_mullo_r, v_mulhi_r)); + __m128i v_mullo_b = _mm_mullo_epi16(v_t0, v_b2y); + __m128i v_mullo_g = _mm_mullo_epi16(v_t1, v_g2y); + __m128i v_mullo_r = _mm_mullo_epi16(v_t2, v_r2y); + __m128i v_mulhi_b = _mm_mulhi_epi16(v_t0, v_b2y); + __m128i v_mulhi_g = _mm_mulhi_epi16(v_t1, v_g2y); + __m128i v_mulhi_r = _mm_mulhi_epi16(v_t2, v_r2y); - __m128i v_dst1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_b, v_mulhi_b), - _mm_unpackhi_epi16(v_mullo_g, v_mulhi_g)); - v_dst1 = _mm_add_epi32(_mm_add_epi32(v_dst1, v_delta), - _mm_unpackhi_epi16(v_mullo_r, v_mulhi_r)); + __m128i v_dst0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_b, v_mulhi_b), + _mm_unpacklo_epi16(v_mullo_g, v_mulhi_g)); + v_dst0 = _mm_add_epi32(_mm_add_epi32(v_dst0, v_delta), + _mm_unpacklo_epi16(v_mullo_r, v_mulhi_r)); - v_dst0 = _mm_srli_epi32(v_dst0, yuv_shift); - v_dst1 = _mm_srli_epi32(v_dst1, yuv_shift); + __m128i v_dst1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_b, v_mulhi_b), + _mm_unpackhi_epi16(v_mullo_g, v_mulhi_g)); + v_dst1 = _mm_add_epi32(_mm_add_epi32(v_dst1, v_delta), + _mm_unpackhi_epi16(v_mullo_r, v_mulhi_r)); - __m128i v_dst = _mm_packs_epi32(v_dst0, v_dst1); - _mm_storel_epi64((__m128i *)(dst + i), _mm_packus_epi16(v_dst, v_zero)); + v_dst0 = _mm_srli_epi32(v_dst0, yuv_shift); + v_dst1 = _mm_srli_epi32(v_dst1, yuv_shift); + + __m128i v_dst = _mm_packs_epi32(v_dst0, v_dst1); + _mm_storel_epi64((__m128i *)(dst + i), _mm_packus_epi16(v_dst, v_zero)); + } } #endif for ( ; i < n; i++) @@ -1226,6 +1241,7 @@ struct RGB5x52Gray uint32x4_t v_delta; uint16x8_t v_f8, v_fc; #elif CV_SSE2 + bool haveSIMD; __m128i v_b2y, v_g2y, v_r2y; __m128i v_delta; __m128i v_f8, v_fc; @@ -1445,7 +1461,9 @@ struct RGB2Gray float32x4_t v_cb, v_cg, v_cr; }; -#elif CV_SSE4_1 +#elif CV_SSE2 + +#if CV_SSE4_1 template <> struct RGB2Gray @@ -1464,6 +1482,8 @@ struct RGB2Gray v_cg = _mm_set1_epi16((short)coeffs[1]); v_cr = _mm_set1_epi16((short)coeffs[2]); v_delta = _mm_set1_epi32(1 << (yuv_shift - 1)); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } // 16s x 8 @@ -1494,7 +1514,7 @@ struct RGB2Gray { int scn = srccn, cb = coeffs[0], cg = coeffs[1], cr = coeffs[2], i = 0; - if (scn == 3) + if (scn == 3 && haveSIMD) { for ( ; i <= n - 16; i += 16, src += scn * 16) { @@ -1519,7 +1539,7 @@ struct RGB2Gray _mm_storeu_si128((__m128i *)(dst + i + 8), v_gray1); } } - else if (scn == 4) + else if (scn == 4 && haveSIMD) { for ( ; i <= n - 16; i += 16, src += scn * 16) { @@ -1554,8 +1574,11 @@ struct RGB2Gray int srccn, coeffs[3]; __m128i v_cb, v_cg, v_cr; __m128i v_delta; + bool haveSIMD; }; +#endif // CV_SSE4_1 + template <> struct RGB2Gray { @@ -1571,6 +1594,8 @@ struct RGB2Gray v_cb = _mm_set1_ps(coeffs[0]); v_cg = _mm_set1_ps(coeffs[1]); v_cr = _mm_set1_ps(coeffs[2]); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } void process(__m128 v_r, __m128 v_g, __m128 v_b, @@ -1586,7 +1611,7 @@ struct RGB2Gray int scn = srccn, i = 0; float cb = coeffs[0], cg = coeffs[1], cr = coeffs[2]; - if (scn == 3) + if (scn == 3 && haveSIMD) { for ( ; i <= n - 8; i += 8, src += scn * 8) { @@ -1611,7 +1636,7 @@ struct RGB2Gray _mm_storeu_ps(dst + i + 4, v_gray1); } } - else if (scn == 4) + else if (scn == 4 && haveSIMD) { for ( ; i <= n - 8; i += 8, src += scn * 8) { @@ -1646,6 +1671,7 @@ struct RGB2Gray int srccn; float coeffs[3]; __m128 v_cb, v_cg, v_cr; + bool haveSIMD; }; #else @@ -1791,6 +1817,8 @@ struct RGB2YCrCb_f v_c3 = _mm_set1_ps(coeffs[3]); v_c4 = _mm_set1_ps(coeffs[4]); v_delta = _mm_set1_ps(ColorChannel::half()); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } void process(__m128 v_r, __m128 v_g, __m128 v_b, @@ -1811,7 +1839,7 @@ struct RGB2YCrCb_f float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4]; n *= 3; - if (scn == 3 || scn == 4) + if (haveSIMD) { for ( ; i <= n - 24; i += 24, src += 8 * scn) { @@ -1862,6 +1890,7 @@ struct RGB2YCrCb_f int srccn, blueIdx; float coeffs[5]; __m128 v_c0, v_c1, v_c2, v_c3, v_c4, v_delta; + bool haveSIMD; }; #endif @@ -2138,6 +2167,8 @@ struct RGB2YCrCb_i v_delta = _mm_set1_epi32(ColorChannel::half()*(1 << yuv_shift)); v_delta = _mm_add_epi32(v_delta, v_delta2); v_zero = _mm_setzero_si128(); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } // 16u x 8 @@ -2184,7 +2215,7 @@ struct RGB2YCrCb_i int delta = ColorChannel::half()*(1 << yuv_shift); n *= 3; - if (scn == 3 || scn == 4) + if (haveSIMD) { for ( ; i <= n - 96; i += 96, src += scn * 32) { @@ -2261,6 +2292,7 @@ struct RGB2YCrCb_i __m128i v_c0, v_c1, v_c2; __m128i v_c3, v_c4, v_delta, v_delta2; __m128i v_zero; + bool haveSIMD; }; template <> @@ -2285,6 +2317,8 @@ struct RGB2YCrCb_i v_delta = _mm_set1_epi32(ColorChannel::half()*(1 << yuv_shift)); v_delta = _mm_add_epi32(v_delta, v_delta2); v_zero = _mm_setzero_si128(); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } // 16u x 8 @@ -2331,7 +2365,7 @@ struct RGB2YCrCb_i int delta = ColorChannel::half()*(1 << yuv_shift); n *= 3; - if (scn == 3 || scn == 4) + if (haveSIMD) { for ( ; i <= n - 48; i += 48, src += scn * 16) { @@ -2387,6 +2421,7 @@ struct RGB2YCrCb_i __m128i v_c0, v_c1, v_c2; __m128i v_c3, v_c4, v_delta, v_delta2; __m128i v_zero; + bool haveSIMD; }; #endif // CV_SSE4_1 @@ -2518,6 +2553,8 @@ struct YCrCb2RGB_f v_c3 = _mm_set1_ps(coeffs[3]); v_delta = _mm_set1_ps(ColorChannel::half()); v_alpha = _mm_set1_ps(ColorChannel::max()); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } void process(__m128 v_y, __m128 v_cr, __m128 v_cb, @@ -2545,7 +2582,7 @@ struct YCrCb2RGB_f float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3]; n *= 3; - if (dcn == 3 || dcn == 4) + if (haveSIMD) { for ( ; i <= n - 24; i += 24, dst += 8 * dcn) { @@ -2606,6 +2643,7 @@ struct YCrCb2RGB_f float coeffs[4]; __m128 v_c0, v_c1, v_c2, v_c3, v_alpha, v_delta; + bool haveSIMD; }; #endif @@ -2920,6 +2958,7 @@ struct YCrCb2RGB_i v_alpha = _mm_set1_epi8(*(char *)&alpha); useSSE = coeffs[0] <= std::numeric_limits::max(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } // 16s x 8 @@ -2975,7 +3014,7 @@ struct YCrCb2RGB_i int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3]; n *= 3; - if ((dcn == 3 || dcn == 4) && useSSE) + if (haveSIMD && useSSE) { for ( ; i <= n - 96; i += 96, dst += dcn * 32) { @@ -3066,7 +3105,7 @@ struct YCrCb2RGB_i } int dstcn, blueIdx; int coeffs[4]; - bool useSSE; + bool useSSE, haveSIMD; __m128i v_c0, v_c1, v_c2, v_c3, v_delta2; __m128i v_delta, v_alpha, v_zero; @@ -3221,6 +3260,8 @@ struct RGB2XYZ_f v_c6 = _mm_set1_ps(coeffs[6]); v_c7 = _mm_set1_ps(coeffs[7]); v_c8 = _mm_set1_ps(coeffs[8]); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } void process(__m128 v_r, __m128 v_g, __m128 v_b, @@ -3248,7 +3289,7 @@ struct RGB2XYZ_f n *= 3; - if (scn == 3 || scn == 4) + if (haveSIMD) { for ( ; i <= n - 24; i += 24, src += 8 * scn) { @@ -3301,6 +3342,7 @@ struct RGB2XYZ_f int srccn; float coeffs[9]; __m128 v_c0, v_c1, v_c2, v_c3, v_c4, v_c5, v_c6, v_c7, v_c8; + bool haveSIMD; }; @@ -3657,6 +3699,8 @@ struct XYZ2RGB_f v_c8 = _mm_set1_ps(coeffs[8]); v_alpha = _mm_set1_ps(ColorChannel::max()); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } void process(__m128 v_x, __m128 v_y, __m128 v_z, @@ -3685,7 +3729,7 @@ struct XYZ2RGB_f n *= 3; int i = 0; - if (dcn == 3 || dcn == 4) + if (haveSIMD) { for ( ; i <= n - 24; i += 24, dst += 8 * dcn) { @@ -3745,6 +3789,7 @@ struct XYZ2RGB_f __m128 v_c0, v_c1, v_c2, v_c3, v_c4, v_c5, v_c6, v_c7, v_c8; __m128 v_alpha; + bool haveSIMD; }; #endif // CV_SSE2 @@ -4267,6 +4312,7 @@ struct HSV2RGB_b v_scale_inv = _mm_set1_ps(1.f/255.f); v_scale = _mm_set1_ps(255.0f); v_zero = _mm_setzero_si128(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } @@ -4331,36 +4377,39 @@ struct HSV2RGB_b vst3q_f32(buf + j + 12, v_dst); } #elif CV_SSE2 - for ( ; j <= (dn - 32) * 3; j += 96) + if (haveSIMD) { - __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); - __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); - __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); - __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); - __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); - __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); - process(_mm_unpacklo_epi8(v_r0, v_zero), - _mm_unpacklo_epi8(v_g0, v_zero), - _mm_unpacklo_epi8(v_b0, v_zero), - buf + j); + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); - process(_mm_unpackhi_epi8(v_r0, v_zero), - _mm_unpackhi_epi8(v_g0, v_zero), - _mm_unpackhi_epi8(v_b0, v_zero), - buf + j + 24); + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); - process(_mm_unpacklo_epi8(v_r1, v_zero), - _mm_unpacklo_epi8(v_g1, v_zero), - _mm_unpacklo_epi8(v_b1, v_zero), - buf + j + 48); + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); - process(_mm_unpackhi_epi8(v_r1, v_zero), - _mm_unpackhi_epi8(v_g1, v_zero), - _mm_unpackhi_epi8(v_b1, v_zero), - buf + j + 72); + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } } #endif @@ -4403,7 +4452,7 @@ struct HSV2RGB_b } } #elif CV_SSE2 - if (dcn == 3) + if (dcn == 3 && haveSIMD) { for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) { @@ -4445,6 +4494,7 @@ struct HSV2RGB_b #elif CV_SSE2 __m128 v_scale_inv, v_scale; __m128i v_zero; + bool haveSIMD; #endif }; @@ -4520,6 +4570,7 @@ struct RGB2HLS_b v_scale_inv = _mm_set1_ps(1.f/255.f); v_scale = _mm_set1_ps(255.f); v_zero = _mm_setzero_si128(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } @@ -4589,7 +4640,7 @@ struct RGB2HLS_b vst3q_f32(buf + j + 12, v_dst); } #elif CV_SSE2 - if (scn == 3) + if (scn == 3 && haveSIMD) { for ( ; j <= (dn * 3 - 16); j += 16, src += 16) { @@ -4633,38 +4684,41 @@ struct RGB2HLS_b vst3_u8(dst + j, v_dst); } #elif CV_SSE2 - for ( ; j <= (dn - 32) * 3; j += 96) + if (haveSIMD) { - __m128i v_h_0, v_l_0, v_s_0; - process(buf + j, - v_h_0, v_l_0, v_s_0); + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_h_0, v_l_0, v_s_0; + process(buf + j, + v_h_0, v_l_0, v_s_0); - __m128i v_h_1, v_l_1, v_s_1; - process(buf + j + 24, - v_h_1, v_l_1, v_s_1); + __m128i v_h_1, v_l_1, v_s_1; + process(buf + j + 24, + v_h_1, v_l_1, v_s_1); - __m128i v_h0 = _mm_packus_epi16(v_h_0, v_h_1); - __m128i v_l0 = _mm_packus_epi16(v_l_0, v_l_1); - __m128i v_s0 = _mm_packus_epi16(v_s_0, v_s_1); + __m128i v_h0 = _mm_packus_epi16(v_h_0, v_h_1); + __m128i v_l0 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_s0 = _mm_packus_epi16(v_s_0, v_s_1); - process(buf + j + 48, - v_h_0, v_l_0, v_s_0); + process(buf + j + 48, + v_h_0, v_l_0, v_s_0); - process(buf + j + 72, - v_h_1, v_l_1, v_s_1); + process(buf + j + 72, + v_h_1, v_l_1, v_s_1); - __m128i v_h1 = _mm_packus_epi16(v_h_0, v_h_1); - __m128i v_l1 = _mm_packus_epi16(v_l_0, v_l_1); - __m128i v_s1 = _mm_packus_epi16(v_s_0, v_s_1); + __m128i v_h1 = _mm_packus_epi16(v_h_0, v_h_1); + __m128i v_l1 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_s1 = _mm_packus_epi16(v_s_0, v_s_1); - _mm_interleave_epi8(v_h0, v_h1, v_l0, v_l1, v_s0, v_s1); + _mm_interleave_epi8(v_h0, v_h1, v_l0, v_l1, v_s0, v_s1); - _mm_storeu_si128((__m128i *)(dst + j), v_h0); - _mm_storeu_si128((__m128i *)(dst + j + 16), v_h1); - _mm_storeu_si128((__m128i *)(dst + j + 32), v_l0); - _mm_storeu_si128((__m128i *)(dst + j + 48), v_l1); - _mm_storeu_si128((__m128i *)(dst + j + 64), v_s0); - _mm_storeu_si128((__m128i *)(dst + j + 80), v_s1); + _mm_storeu_si128((__m128i *)(dst + j), v_h0); + _mm_storeu_si128((__m128i *)(dst + j + 16), v_h1); + _mm_storeu_si128((__m128i *)(dst + j + 32), v_l0); + _mm_storeu_si128((__m128i *)(dst + j + 48), v_l1); + _mm_storeu_si128((__m128i *)(dst + j + 64), v_s0); + _mm_storeu_si128((__m128i *)(dst + j + 80), v_s1); + } } #endif for( ; j < dn*3; j += 3 ) @@ -4684,6 +4738,7 @@ struct RGB2HLS_b #elif CV_SSE2 __m128 v_scale, v_scale_inv; __m128i v_zero; + bool haveSIMD; #endif }; @@ -4767,6 +4822,7 @@ struct HLS2RGB_b v_scale_inv = _mm_set1_ps(1.f/255.f); v_scale = _mm_set1_ps(255.f); v_zero = _mm_setzero_si128(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } @@ -4831,36 +4887,39 @@ struct HLS2RGB_b vst3q_f32(buf + j + 12, v_dst); } #elif CV_SSE2 - for ( ; j <= (dn - 32) * 3; j += 96) + if (haveSIMD) { - __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); - __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); - __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); - __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); - __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); - __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); - process(_mm_unpacklo_epi8(v_r0, v_zero), - _mm_unpacklo_epi8(v_g0, v_zero), - _mm_unpacklo_epi8(v_b0, v_zero), - buf + j); + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); - process(_mm_unpackhi_epi8(v_r0, v_zero), - _mm_unpackhi_epi8(v_g0, v_zero), - _mm_unpackhi_epi8(v_b0, v_zero), - buf + j + 24); + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); - process(_mm_unpacklo_epi8(v_r1, v_zero), - _mm_unpacklo_epi8(v_g1, v_zero), - _mm_unpacklo_epi8(v_b1, v_zero), - buf + j + 48); + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); - process(_mm_unpackhi_epi8(v_r1, v_zero), - _mm_unpackhi_epi8(v_g1, v_zero), - _mm_unpackhi_epi8(v_b1, v_zero), - buf + j + 72); + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } } #endif for( ; j < dn*3; j += 3 ) @@ -4902,7 +4961,7 @@ struct HLS2RGB_b } } #elif CV_SSE2 - if (dcn == 3) + if (dcn == 3 && haveSIMD) { for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) { @@ -4944,6 +5003,7 @@ struct HLS2RGB_b #elif CV_SSE2 __m128 v_scale, v_scale_inv; __m128i v_zero; + bool haveSIMD; #endif }; @@ -5264,6 +5324,7 @@ struct Lab2RGB_b v_scale = _mm_set1_ps(255.f); v_128 = _mm_set1_ps(128.0f); v_zero = _mm_setzero_si128(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } @@ -5330,36 +5391,39 @@ struct Lab2RGB_b vst3q_f32(buf + j + 12, v_dst); } #elif CV_SSE2 - for ( ; j <= (dn - 32) * 3; j += 96) + if (haveSIMD) { - __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); - __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); - __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); - __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); - __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); - __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); - process(_mm_unpacklo_epi8(v_r0, v_zero), - _mm_unpacklo_epi8(v_g0, v_zero), - _mm_unpacklo_epi8(v_b0, v_zero), - buf + j); + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); - process(_mm_unpackhi_epi8(v_r0, v_zero), - _mm_unpackhi_epi8(v_g0, v_zero), - _mm_unpackhi_epi8(v_b0, v_zero), - buf + j + 24); + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); - process(_mm_unpacklo_epi8(v_r1, v_zero), - _mm_unpacklo_epi8(v_g1, v_zero), - _mm_unpacklo_epi8(v_b1, v_zero), - buf + j + 48); + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); - process(_mm_unpackhi_epi8(v_r1, v_zero), - _mm_unpackhi_epi8(v_g1, v_zero), - _mm_unpackhi_epi8(v_b1, v_zero), - buf + j + 72); + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } } #endif @@ -5402,7 +5466,7 @@ struct Lab2RGB_b } } #elif CV_SSE2 - if (dcn == 3) + if (dcn == 3 && haveSIMD) { for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) { @@ -5445,6 +5509,7 @@ struct Lab2RGB_b #elif CV_SSE2 __m128 v_scale, v_scale_inv, v_128; __m128i v_zero; + bool haveSIMD; #endif }; @@ -5627,6 +5692,7 @@ struct RGB2Luv_b v_coeff2 = _mm_set1_ps(96.525423728813564f); v_coeff3 = _mm_set1_ps(0.9732824427480916f); v_coeff4 = _mm_set1_ps(136.259541984732824f); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } @@ -5698,7 +5764,7 @@ struct RGB2Luv_b vst3q_f32(buf + j + 12, v_dst); } #elif CV_SSE2 - if (scn == 3) + if (scn == 3 && haveSIMD) { for ( ; j <= (dn * 3 - 16); j += 16, src += 16) { @@ -5743,38 +5809,41 @@ struct RGB2Luv_b vst3_u8(dst + j, v_dst); } #elif CV_SSE2 - for ( ; j <= (dn - 32) * 3; j += 96) + if (haveSIMD) { - __m128i v_l_0, v_u_0, v_v_0; - process(buf + j, - v_l_0, v_u_0, v_v_0); + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_l_0, v_u_0, v_v_0; + process(buf + j, + v_l_0, v_u_0, v_v_0); - __m128i v_l_1, v_u_1, v_v_1; - process(buf + j + 24, - v_l_1, v_u_1, v_v_1); + __m128i v_l_1, v_u_1, v_v_1; + process(buf + j + 24, + v_l_1, v_u_1, v_v_1); - __m128i v_l0 = _mm_packus_epi16(v_l_0, v_l_1); - __m128i v_u0 = _mm_packus_epi16(v_u_0, v_u_1); - __m128i v_v0 = _mm_packus_epi16(v_v_0, v_v_1); + __m128i v_l0 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_u0 = _mm_packus_epi16(v_u_0, v_u_1); + __m128i v_v0 = _mm_packus_epi16(v_v_0, v_v_1); - process(buf + j + 48, - v_l_0, v_u_0, v_v_0); + process(buf + j + 48, + v_l_0, v_u_0, v_v_0); - process(buf + j + 72, - v_l_1, v_u_1, v_v_1); + process(buf + j + 72, + v_l_1, v_u_1, v_v_1); - __m128i v_l1 = _mm_packus_epi16(v_l_0, v_l_1); - __m128i v_u1 = _mm_packus_epi16(v_u_0, v_u_1); - __m128i v_v1 = _mm_packus_epi16(v_v_0, v_v_1); + __m128i v_l1 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_u1 = _mm_packus_epi16(v_u_0, v_u_1); + __m128i v_v1 = _mm_packus_epi16(v_v_0, v_v_1); - _mm_interleave_epi8(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); + _mm_interleave_epi8(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); - _mm_storeu_si128((__m128i *)(dst + j), v_l0); - _mm_storeu_si128((__m128i *)(dst + j + 16), v_l1); - _mm_storeu_si128((__m128i *)(dst + j + 32), v_u0); - _mm_storeu_si128((__m128i *)(dst + j + 48), v_u1); - _mm_storeu_si128((__m128i *)(dst + j + 64), v_v0); - _mm_storeu_si128((__m128i *)(dst + j + 80), v_v1); + _mm_storeu_si128((__m128i *)(dst + j), v_l0); + _mm_storeu_si128((__m128i *)(dst + j + 16), v_l1); + _mm_storeu_si128((__m128i *)(dst + j + 32), v_u0); + _mm_storeu_si128((__m128i *)(dst + j + 48), v_u1); + _mm_storeu_si128((__m128i *)(dst + j + 64), v_v0); + _mm_storeu_si128((__m128i *)(dst + j + 80), v_v1); + } } #endif @@ -5796,6 +5865,7 @@ struct RGB2Luv_b #elif CV_SSE2 __m128 v_scale, v_scale_inv, v_coeff1, v_coeff2, v_coeff3, v_coeff4; __m128i v_zero; + bool haveSIMD; #endif }; @@ -5824,6 +5894,7 @@ struct Luv2RGB_b v_140 = _mm_set1_ps(140.f); v_scale = _mm_set1_ps(255.f); v_zero = _mm_setzero_si128(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } @@ -5847,7 +5918,7 @@ struct Luv2RGB_b v_u1 = _mm_sub_ps(_mm_mul_ps(v_u1, v_coeff1), v_134); v_v0 = _mm_sub_ps(_mm_mul_ps(v_v0, v_coeff2), v_140); v_v1 = _mm_sub_ps(_mm_mul_ps(v_v1, v_coeff2), v_140); - + _mm_interleave_ps(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); _mm_store_ps(buf, v_l0); @@ -5890,36 +5961,39 @@ struct Luv2RGB_b vst3q_f32(buf + j + 12, v_dst); } #elif CV_SSE2 - for ( ; j <= (dn - 32) * 3; j += 96) + if (haveSIMD) { - __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); - __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); - __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); - __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); - __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); - __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); - _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); - process(_mm_unpacklo_epi8(v_r0, v_zero), - _mm_unpacklo_epi8(v_g0, v_zero), - _mm_unpacklo_epi8(v_b0, v_zero), - buf + j); + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); - process(_mm_unpackhi_epi8(v_r0, v_zero), - _mm_unpackhi_epi8(v_g0, v_zero), - _mm_unpackhi_epi8(v_b0, v_zero), - buf + j + 24); + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); - process(_mm_unpacklo_epi8(v_r1, v_zero), - _mm_unpacklo_epi8(v_g1, v_zero), - _mm_unpacklo_epi8(v_b1, v_zero), - buf + j + 48); + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); - process(_mm_unpackhi_epi8(v_r1, v_zero), - _mm_unpackhi_epi8(v_g1, v_zero), - _mm_unpackhi_epi8(v_b1, v_zero), - buf + j + 72); + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } } #endif for( ; j < dn*3; j += 3 ) @@ -5961,7 +6035,7 @@ struct Luv2RGB_b } } #elif CV_SSE2 - if (dcn == 3) + if (dcn == 3 && haveSIMD) { for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) { @@ -6004,6 +6078,7 @@ struct Luv2RGB_b #elif CV_SSE2 __m128 v_scale, v_scale_inv, v_coeff1, v_coeff2, v_134, v_140; __m128i v_zero; + bool haveSIMD; #endif }; diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index 4880819b9..304210f84 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -1963,9 +1963,9 @@ private: struct ResizeAreaFastVec_SIMD_32f { ResizeAreaFastVec_SIMD_32f(int _scale_x, int _scale_y, int _cn, int _step) : - scale_x(_scale_x), scale_y(_scale_y), cn(_cn), step(_step) + cn(_cn), step(_step) { - fast_mode = scale_x == 2 && scale_y == 2 && (cn == 1 || cn == 3 || cn == 4); + fast_mode = _scale_x == 2 && _scale_y == 2 && (cn == 1 || cn == 4); } int operator() (const float * S, float * D, int w) const @@ -2005,7 +2005,6 @@ struct ResizeAreaFastVec_SIMD_32f } private: - int scale_x, scale_y; int cn; bool fast_mode; int step; @@ -2289,9 +2288,10 @@ private: struct ResizeAreaFastVec_SIMD_32f { ResizeAreaFastVec_SIMD_32f(int _scale_x, int _scale_y, int _cn, int _step) : - scale_x(_scale_x), scale_y(_scale_y), cn(_cn), step(_step) + cn(_cn), step(_step) { - fast_mode = scale_x == 2 && scale_y == 2 && (cn == 1 || cn == 3 || cn == 4); + fast_mode = _scale_x == 2 && _scale_y == 2 && (cn == 1 || cn == 4); + fast_mode = fast_mode && checkHardwareSupport(CV_CPU_SSE2); } int operator() (const float * S, float * D, int w) const @@ -2335,7 +2335,6 @@ struct ResizeAreaFastVec_SIMD_32f } private: - int scale_x, scale_y; int cn; bool fast_mode; int step; @@ -4817,6 +4816,13 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, size.height = 1; } +#if CV_SSE2 + bool useSSE2 = checkHardwareSupport(CV_CPU_SSE2); +#endif +#if CV_SSE4_1 + bool useSSE4_1 = checkHardwareSupport(CV_CPU_SSE4_1); +#endif + const float scale = 1.f/INTER_TAB_SIZE; int x, y; for( y = 0; y < size.height; y++ ) @@ -4848,24 +4854,27 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vst2q_s16(dst1 + (x << 1), v_dst); } #elif CV_SSE4_1 - for( ; x <= size.width - 16; x += 16 ) + if (useSSE4_1) { - __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x)), - _mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 4))); - __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 8)), - _mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 12))); + for( ; x <= size.width - 16; x += 16 ) + { + __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x)), + _mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 4))); + __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 8)), + _mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 12))); - __m128i v_dst2 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src2f + x)), - _mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 4))); - __m128i v_dst3 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 8)), - _mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 12))); + __m128i v_dst2 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src2f + x)), + _mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 4))); + __m128i v_dst3 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 8)), + _mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 12))); - _mm_interleave_epi16(v_dst0, v_dst1, v_dst2, v_dst3); + _mm_interleave_epi16(v_dst0, v_dst1, v_dst2, v_dst3); - _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst0); - _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 8), v_dst1); - _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 16), v_dst2); - _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 24), v_dst3); + _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst0); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 8), v_dst1); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 16), v_dst2); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 24), v_dst3); + } } #endif for( ; x < size.width; x++ ) @@ -4902,47 +4911,50 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vst1q_u16(dst2 + x, vcombine_u16(v_dst0, v_dst1)); } #elif CV_SSE4_1 - __m128 v_its = _mm_set1_ps(INTER_TAB_SIZE); - __m128i v_its1 = _mm_set1_epi32(INTER_TAB_SIZE-1); - - for( ; x <= size.width - 16; x += 16 ) + if (useSSE4_1) { - __m128i v_ix0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x), v_its)); - __m128i v_ix1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 4), v_its)); - __m128i v_iy0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x), v_its)); - __m128i v_iy1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 4), v_its)); + __m128 v_its = _mm_set1_ps(INTER_TAB_SIZE); + __m128i v_its1 = _mm_set1_epi32(INTER_TAB_SIZE-1); - __m128i v_dst10 = _mm_packs_epi32(_mm_srai_epi32(v_ix0, INTER_BITS), - _mm_srai_epi32(v_ix1, INTER_BITS)); - __m128i v_dst12 = _mm_packs_epi32(_mm_srai_epi32(v_iy0, INTER_BITS), - _mm_srai_epi32(v_iy1, INTER_BITS)); - __m128i v_dst20 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy0, v_its1), INTER_BITS), - _mm_and_si128(v_ix0, v_its1)); - __m128i v_dst21 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy1, v_its1), INTER_BITS), - _mm_and_si128(v_ix1, v_its1)); - _mm_storeu_si128((__m128i *)(dst2 + x), _mm_packus_epi32(v_dst20, v_dst21)); + for( ; x <= size.width - 16; x += 16 ) + { + __m128i v_ix0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x), v_its)); + __m128i v_ix1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 4), v_its)); + __m128i v_iy0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x), v_its)); + __m128i v_iy1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 4), v_its)); - v_ix0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 8), v_its)); - v_ix1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 12), v_its)); - v_iy0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 8), v_its)); - v_iy1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 12), v_its)); + __m128i v_dst10 = _mm_packs_epi32(_mm_srai_epi32(v_ix0, INTER_BITS), + _mm_srai_epi32(v_ix1, INTER_BITS)); + __m128i v_dst12 = _mm_packs_epi32(_mm_srai_epi32(v_iy0, INTER_BITS), + _mm_srai_epi32(v_iy1, INTER_BITS)); + __m128i v_dst20 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy0, v_its1), INTER_BITS), + _mm_and_si128(v_ix0, v_its1)); + __m128i v_dst21 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy1, v_its1), INTER_BITS), + _mm_and_si128(v_ix1, v_its1)); + _mm_storeu_si128((__m128i *)(dst2 + x), _mm_packus_epi32(v_dst20, v_dst21)); - __m128i v_dst11 = _mm_packs_epi32(_mm_srai_epi32(v_ix0, INTER_BITS), - _mm_srai_epi32(v_ix1, INTER_BITS)); - __m128i v_dst13 = _mm_packs_epi32(_mm_srai_epi32(v_iy0, INTER_BITS), - _mm_srai_epi32(v_iy1, INTER_BITS)); - v_dst20 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy0, v_its1), INTER_BITS), - _mm_and_si128(v_ix0, v_its1)); - v_dst21 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy1, v_its1), INTER_BITS), - _mm_and_si128(v_ix1, v_its1)); - _mm_storeu_si128((__m128i *)(dst2 + x + 8), _mm_packus_epi32(v_dst20, v_dst21)); + v_ix0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 8), v_its)); + v_ix1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 12), v_its)); + v_iy0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 8), v_its)); + v_iy1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 12), v_its)); - _mm_interleave_epi16(v_dst10, v_dst11, v_dst12, v_dst13); + __m128i v_dst11 = _mm_packs_epi32(_mm_srai_epi32(v_ix0, INTER_BITS), + _mm_srai_epi32(v_ix1, INTER_BITS)); + __m128i v_dst13 = _mm_packs_epi32(_mm_srai_epi32(v_iy0, INTER_BITS), + _mm_srai_epi32(v_iy1, INTER_BITS)); + v_dst20 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy0, v_its1), INTER_BITS), + _mm_and_si128(v_ix0, v_its1)); + v_dst21 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy1, v_its1), INTER_BITS), + _mm_and_si128(v_ix1, v_its1)); + _mm_storeu_si128((__m128i *)(dst2 + x + 8), _mm_packus_epi32(v_dst20, v_dst21)); - _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst10); - _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 8), v_dst11); - _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 16), v_dst12); - _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 24), v_dst13); + _mm_interleave_epi16(v_dst10, v_dst11, v_dst12, v_dst13); + + _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst10); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 8), v_dst11); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 16), v_dst12); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 24), v_dst13); + } } #endif for( ; x < size.width; x++ ) @@ -5005,25 +5017,28 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vst1q_u16(dst2 + x, vcombine_u16(v_dst0, v_dst1)); } #elif CV_SSE2 - __m128 v_its = _mm_set1_ps(INTER_TAB_SIZE); - __m128i v_its1 = _mm_set1_epi32(INTER_TAB_SIZE-1); - __m128i v_y_mask = _mm_set1_epi32((INTER_TAB_SIZE-1) << 16); - - for( ; x <= size.width - 4; x += 4 ) + if (useSSE2) { - __m128i v_src0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x * 2), v_its)); - __m128i v_src1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x * 2 + 4), v_its)); + __m128 v_its = _mm_set1_ps(INTER_TAB_SIZE); + __m128i v_its1 = _mm_set1_epi32(INTER_TAB_SIZE-1); + __m128i v_y_mask = _mm_set1_epi32((INTER_TAB_SIZE-1) << 16); - __m128i v_dst1 = _mm_packs_epi32(_mm_srai_epi32(v_src0, INTER_BITS), - _mm_srai_epi32(v_src1, INTER_BITS)); - _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst1); + for( ; x <= size.width - 4; x += 4 ) + { + __m128i v_src0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x * 2), v_its)); + __m128i v_src1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x * 2 + 4), v_its)); - // x0 y0 x1 y1 . . . - v_src0 = _mm_packs_epi32(_mm_and_si128(v_src0, v_its1), - _mm_and_si128(v_src1, v_its1)); - __m128i v_dst2 = _mm_or_si128(_mm_srli_epi32(_mm_and_si128(v_src0, v_y_mask), 16 - INTER_BITS), // y0 0 y1 0 . . . - _mm_and_si128(v_src0, v_its1)); // 0 x0 0 x1 . . . - _mm_storel_epi64((__m128i *)(dst2 + x), _mm_packus_epi32(v_dst2, v_dst2)); + __m128i v_dst1 = _mm_packs_epi32(_mm_srai_epi32(v_src0, INTER_BITS), + _mm_srai_epi32(v_src1, INTER_BITS)); + _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst1); + + // x0 y0 x1 y1 . . . + v_src0 = _mm_packs_epi32(_mm_and_si128(v_src0, v_its1), + _mm_and_si128(v_src1, v_its1)); + __m128i v_dst2 = _mm_or_si128(_mm_srli_epi32(_mm_and_si128(v_src0, v_y_mask), 16 - INTER_BITS), // y0 0 y1 0 . . . + _mm_and_si128(v_src0, v_its1)); // 0 x0 0 x1 . . . + _mm_storel_epi64((__m128i *)(dst2 + x), _mm_packus_epi32(v_dst2, v_dst2)); + } } #endif for( ; x < size.width; x++ ) @@ -5150,22 +5165,25 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vst2q_f32(dst1f + (x << 1) + 8, v_dst); } #elif CV_SSE2 - __m128i v_mask2 = _mm_set1_epi16(INTER_TAB_SIZE2-1); - __m128i v_zero = _mm_set1_epi32(0), v_mask = _mm_set1_epi32(INTER_TAB_SIZE-1); - __m128 v_scale = _mm_set1_ps(scale); - - for ( ; x <= size.width - 8; x += 8) + if (useSSE2) { - __m128i v_src = _mm_loadu_si128((__m128i const *)(src1 + x * 2)); - __m128i v_fxy = src2 ? _mm_and_si128(_mm_loadu_si128((__m128i const *)(src2 + x)), v_mask2) : v_zero; - __m128i v_fxy1 = _mm_and_si128(v_fxy, v_mask); - __m128i v_fxy2 = _mm_srli_epi16(v_fxy, INTER_BITS); + __m128i v_mask2 = _mm_set1_epi16(INTER_TAB_SIZE2-1); + __m128i v_zero = _mm_set1_epi32(0), v_mask = _mm_set1_epi32(INTER_TAB_SIZE-1); + __m128 v_scale = _mm_set1_ps(scale); - __m128 v_add = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_fxy1, v_fxy2)), v_scale); - _mm_storeu_ps(dst1f + x * 2, _mm_add_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)), v_add)); + for ( ; x <= size.width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src1 + x * 2)); + __m128i v_fxy = src2 ? _mm_and_si128(_mm_loadu_si128((__m128i const *)(src2 + x)), v_mask2) : v_zero; + __m128i v_fxy1 = _mm_and_si128(v_fxy, v_mask); + __m128i v_fxy2 = _mm_srli_epi16(v_fxy, INTER_BITS); - v_add = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_fxy1, v_fxy2)), v_scale); - _mm_storeu_ps(dst1f + x * 2, _mm_add_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)), v_add)); + __m128 v_add = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_fxy1, v_fxy2)), v_scale); + _mm_storeu_ps(dst1f + x * 2, _mm_add_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)), v_add)); + + v_add = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_fxy1, v_fxy2)), v_scale); + _mm_storeu_ps(dst1f + x * 2, _mm_add_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)), v_add)); + } } #endif for( ; x < size.width; x++ ) @@ -5204,7 +5222,10 @@ public: const int AB_SCALE = 1 << AB_BITS; int round_delta = interpolation == INTER_NEAREST ? AB_SCALE/2 : AB_SCALE/INTER_TAB_SIZE/2, x, y, x1, y1; #if CV_SSE2 - bool useSIMD = checkHardwareSupport(CV_CPU_SSE2); + bool useSSE2 = checkHardwareSupport(CV_CPU_SSE2); + #endif + #if CV_SSE4_1 + bool useSSE4_1 = checkHardwareSupport(CV_CPU_SSE4_1); #endif int bh0 = std::min(BLOCK_SZ/2, dst.rows); @@ -5243,26 +5264,29 @@ public: vst2q_s16(xy + (x1 << 1), v_dst); } #elif CV_SSE4_1 - __m128i v_X0 = _mm_set1_epi32(X0); - __m128i v_Y0 = _mm_set1_epi32(Y0); - for ( ; x1 <= bw - 16; x1 += 16) + if (useSSE4_1) { - __m128i v_x0 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1))), AB_BITS), - _mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 4))), AB_BITS)); - __m128i v_x1 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 8))), AB_BITS), - _mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 12))), AB_BITS)); + __m128i v_X0 = _mm_set1_epi32(X0); + __m128i v_Y0 = _mm_set1_epi32(Y0); + for ( ; x1 <= bw - 16; x1 += 16) + { + __m128i v_x0 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 4))), AB_BITS)); + __m128i v_x1 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 8))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 12))), AB_BITS)); - __m128i v_y0 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1))), AB_BITS), - _mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 4))), AB_BITS)); - __m128i v_y1 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 8))), AB_BITS), - _mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 12))), AB_BITS)); + __m128i v_y0 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 4))), AB_BITS)); + __m128i v_y1 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 8))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 12))), AB_BITS)); - _mm_interleave_epi16(v_x0, v_x1, v_y0, v_y1); + _mm_interleave_epi16(v_x0, v_x1, v_y0, v_y1); - _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_x0); - _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_x1); - _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_y0); - _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_y1); + _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_x0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_x1); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_y0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_y1); + } } #endif for( ; x1 < bw; x1++ ) @@ -5278,7 +5302,7 @@ public: short* alpha = A + y1*bw; x1 = 0; #if CV_SSE2 - if( useSIMD ) + if( useSSE2 ) { __m128i fxy_mask = _mm_set1_epi32(INTER_TAB_SIZE - 1); __m128i XX = _mm_set1_epi32(X0), YY = _mm_set1_epi32(Y0); @@ -5672,6 +5696,7 @@ public: bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height); #if CV_SSE4_1 + bool haveSSE4_1 = checkHardwareSupport(CV_CPU_SSE4_1); __m128d v_M0 = _mm_set1_pd(M[0]); __m128d v_M3 = _mm_set1_pd(M[3]); __m128d v_M6 = _mm_set1_pd(M[6]); @@ -5706,109 +5731,112 @@ public: x1 = 0; #if CV_SSE4_1 - __m128d v_X0d = _mm_set1_pd(X0); - __m128d v_Y0d = _mm_set1_pd(Y0); - __m128d v_W0 = _mm_set1_pd(W0); - __m128d v_x1 = _mm_set_pd(1, 0); - - for( ; x1 <= bw - 16; x1 += 16 ) + if (haveSSE4_1) { - // 0-3 - __m128i v_X0, v_Y0; + __m128d v_X0d = _mm_set1_pd(X0); + __m128d v_Y0d = _mm_set1_pd(Y0); + __m128d v_W0 = _mm_set1_pd(W0); + __m128d v_x1 = _mm_set_pd(1, 0); + + for( ; x1 <= bw - 16; x1 += 16 ) { - __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); - __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); + // 0-3 + __m128i v_X0, v_Y0; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); - v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); - __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); - v_X0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); - v_Y0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + v_X0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 4-8 + __m128i v_X1, v_Y1; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 8-11 + __m128i v_X2, v_Y2; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 12-15 + __m128i v_X3, v_Y3; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // convert to 16s + v_X0 = _mm_packs_epi32(v_X0, v_X1); + v_X1 = _mm_packs_epi32(v_X2, v_X3); + v_Y0 = _mm_packs_epi32(v_Y0, v_Y1); + v_Y1 = _mm_packs_epi32(v_Y2, v_Y3); + + _mm_interleave_epi16(v_X0, v_X1, v_Y0, v_Y1); + + _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_X0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_X1); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_Y0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_Y1); } - - // 4-8 - __m128i v_X1, v_Y1; - { - __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); - __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); - __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_X1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); - v_Y1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); - } - - // 8-11 - __m128i v_X2, v_Y2; - { - __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); - __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); - __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_X2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); - v_Y2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); - } - - // 12-15 - __m128i v_X3, v_Y3; - { - __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); - __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); - __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_X3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); - v_Y3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); - } - - // convert to 16s - v_X0 = _mm_packs_epi32(v_X0, v_X1); - v_X1 = _mm_packs_epi32(v_X2, v_X3); - v_Y0 = _mm_packs_epi32(v_Y0, v_Y1); - v_Y1 = _mm_packs_epi32(v_Y2, v_Y3); - - _mm_interleave_epi16(v_X0, v_X1, v_Y0, v_Y1); - - _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_X0); - _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_X1); - _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_Y0); - _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_Y1); } #endif @@ -5831,122 +5859,125 @@ public: x1 = 0; #if CV_SSE4_1 - __m128d v_X0d = _mm_set1_pd(X0); - __m128d v_Y0d = _mm_set1_pd(Y0); - __m128d v_W0 = _mm_set1_pd(W0); - __m128d v_x1 = _mm_set_pd(1, 0); - - for( ; x1 <= bw - 16; x1 += 16 ) + if (haveSSE4_1) { - // 0-3 - __m128i v_X0, v_Y0; + __m128d v_X0d = _mm_set1_pd(X0); + __m128d v_Y0d = _mm_set1_pd(Y0); + __m128d v_W0 = _mm_set1_pd(W0); + __m128d v_x1 = _mm_set_pd(1, 0); + + for( ; x1 <= bw - 16; x1 += 16 ) { - __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); - __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); + // 0-3 + __m128i v_X0, v_Y0; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); - v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); - __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); - v_X0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); - v_Y0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + v_X0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 4-8 + __m128i v_X1, v_Y1; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 8-11 + __m128i v_X2, v_Y2; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 12-15 + __m128i v_X3, v_Y3; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // store alpha + __m128i v_alpha0 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y0, v_itsi1), INTER_BITS), + _mm_and_si128(v_X0, v_itsi1)); + __m128i v_alpha1 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y1, v_itsi1), INTER_BITS), + _mm_and_si128(v_X1, v_itsi1)); + _mm_storeu_si128((__m128i *)(alpha + x1), _mm_packs_epi32(v_alpha0, v_alpha1)); + + v_alpha0 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y2, v_itsi1), INTER_BITS), + _mm_and_si128(v_X2, v_itsi1)); + v_alpha1 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y3, v_itsi1), INTER_BITS), + _mm_and_si128(v_X3, v_itsi1)); + _mm_storeu_si128((__m128i *)(alpha + x1 + 8), _mm_packs_epi32(v_alpha0, v_alpha1)); + + // convert to 16s + v_X0 = _mm_packs_epi32(_mm_srai_epi32(v_X0, INTER_BITS), _mm_srai_epi32(v_X1, INTER_BITS)); + v_X1 = _mm_packs_epi32(_mm_srai_epi32(v_X2, INTER_BITS), _mm_srai_epi32(v_X3, INTER_BITS)); + v_Y0 = _mm_packs_epi32(_mm_srai_epi32(v_Y0, INTER_BITS), _mm_srai_epi32(v_Y1, INTER_BITS)); + v_Y1 = _mm_packs_epi32(_mm_srai_epi32(v_Y2, INTER_BITS), _mm_srai_epi32(v_Y3, INTER_BITS)); + + _mm_interleave_epi16(v_X0, v_X1, v_Y0, v_Y1); + + _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_X0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_X1); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_Y0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_Y1); } - - // 4-8 - __m128i v_X1, v_Y1; - { - __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); - __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); - __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_X1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); - v_Y1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); - } - - // 8-11 - __m128i v_X2, v_Y2; - { - __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); - __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); - __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_X2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); - v_Y2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); - } - - // 12-15 - __m128i v_X3, v_Y3; - { - __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); - __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); - v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); - __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); - __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); - v_x1 = _mm_add_pd(v_x1, v_2); - - v_X3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); - v_Y3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), - _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); - } - - // store alpha - __m128i v_alpha0 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y0, v_itsi1), INTER_BITS), - _mm_and_si128(v_X0, v_itsi1)); - __m128i v_alpha1 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y1, v_itsi1), INTER_BITS), - _mm_and_si128(v_X1, v_itsi1)); - _mm_storeu_si128((__m128i *)(alpha + x1), _mm_packs_epi32(v_alpha0, v_alpha1)); - - v_alpha0 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y2, v_itsi1), INTER_BITS), - _mm_and_si128(v_X2, v_itsi1)); - v_alpha1 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y3, v_itsi1), INTER_BITS), - _mm_and_si128(v_X3, v_itsi1)); - _mm_storeu_si128((__m128i *)(alpha + x1 + 8), _mm_packs_epi32(v_alpha0, v_alpha1)); - - // convert to 16s - v_X0 = _mm_packs_epi32(_mm_srai_epi32(v_X0, INTER_BITS), _mm_srai_epi32(v_X1, INTER_BITS)); - v_X1 = _mm_packs_epi32(_mm_srai_epi32(v_X2, INTER_BITS), _mm_srai_epi32(v_X3, INTER_BITS)); - v_Y0 = _mm_packs_epi32(_mm_srai_epi32(v_Y0, INTER_BITS), _mm_srai_epi32(v_Y1, INTER_BITS)); - v_Y1 = _mm_packs_epi32(_mm_srai_epi32(v_Y2, INTER_BITS), _mm_srai_epi32(v_Y3, INTER_BITS)); - - _mm_interleave_epi16(v_X0, v_X1, v_Y0, v_Y1); - - _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_X0); - _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_X1); - _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_Y0); - _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_Y1); } #endif diff --git a/modules/imgproc/src/pyramids.cpp b/modules/imgproc/src/pyramids.cpp index 93b9bfa16..4271b942a 100644 --- a/modules/imgproc/src/pyramids.cpp +++ b/modules/imgproc/src/pyramids.cpp @@ -386,10 +386,10 @@ struct PyrUpVec_32s16s __m128i v_dst01 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); __m128i v_dst11 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); - _mm_storeu_si128((__m128i *)(dst0 + x), + _mm_storeu_si128((__m128i *)(dst0 + x), _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_dst00, v_delta), 6), _mm_srai_epi32(_mm_add_epi32(v_dst01, v_delta), 6))); - _mm_storeu_si128((__m128i *)(dst1 + x), + _mm_storeu_si128((__m128i *)(dst1 + x), _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_dst10, v_delta), 6), _mm_srai_epi32(_mm_add_epi32(v_dst11, v_delta), 6))); } @@ -446,10 +446,10 @@ struct PyrUpVec_32s16u __m128i v_dst01 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); __m128i v_dst11 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); - _mm_storeu_si128((__m128i *)(dst0 + x), + _mm_storeu_si128((__m128i *)(dst0 + x), _mm_packus_epi32(_mm_srli_epi32(_mm_add_epi32(v_dst00, v_delta), 6), _mm_srli_epi32(_mm_add_epi32(v_dst01, v_delta), 6))); - _mm_storeu_si128((__m128i *)(dst1 + x), + _mm_storeu_si128((__m128i *)(dst1 + x), _mm_packus_epi32(_mm_srli_epi32(_mm_add_epi32(v_dst10, v_delta), 6), _mm_srli_epi32(_mm_add_epi32(v_dst11, v_delta), 6))); } @@ -491,7 +491,7 @@ struct PyrUpVec_32f const float *row0 = src[0], *row1 = src[1], *row2 = src[2]; float *dst0 = dst[0], *dst1 = dst[1]; - __m128 v_6 = _mm_set1_ps(6.0f), v_scale = _mm_set1_ps(1.f/64.0f), + __m128 v_6 = _mm_set1_ps(6.0f), v_scale = _mm_set1_ps(1.f/64.0f), v_scale4 = _mm_mul_ps(v_scale, _mm_set1_ps(4.0f)); for( ; x <= width - 8; x += 8 ) From 33176db5dcea9479580219196b29c533d6a2d563 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:31 +0300 Subject: [PATCH 53/98] compareHist --- modules/imgproc/src/histogram.cpp | 106 ++++++++++++++++++++++++++++-- 1 file changed, 101 insertions(+), 5 deletions(-) diff --git a/modules/imgproc/src/histogram.cpp b/modules/imgproc/src/histogram.cpp index 9acdc1141..ec8de4d81 100644 --- a/modules/imgproc/src/histogram.cpp +++ b/modules/imgproc/src/histogram.cpp @@ -2284,15 +2284,20 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) CV_Assert( it.planes[0].isContinuous() && it.planes[1].isContinuous() ); +#if CV_SSE2 + bool haveSIMD = checkHardwareSupport(CV_CPU_SSE2); +#endif + for( size_t i = 0; i < it.nplanes; i++, ++it ) { const float* h1 = it.planes[0].ptr(); const float* h2 = it.planes[1].ptr(); len = it.planes[0].rows*it.planes[0].cols*H1.channels(); + j = 0; if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT)) { - for( j = 0; j < len; j++ ) + for( ; j < len; j++ ) { double a = h1[j] - h2[j]; double b = (method == CV_COMP_CHISQR) ? h1[j] : h1[j] + h2[j]; @@ -2302,7 +2307,51 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) } else if( method == CV_COMP_CORREL ) { - for( j = 0; j < len; j++ ) + #if CV_SSE2 + if (haveSIMD) + { + __m128d v_s1 = _mm_setzero_pd(), v_s2 = v_s1; + __m128d v_s11 = v_s1, v_s22 = v_s1, v_s12 = v_s1; + + for ( ; j <= len - 4; j += 4) + { + __m128 v_a = _mm_loadu_ps(h1 + j); + __m128 v_b = _mm_loadu_ps(h2 + j); + + // 0-1 + __m128d v_ad = _mm_cvtps_pd(v_a); + __m128d v_bd = _mm_cvtps_pd(v_b); + v_s12 = _mm_add_pd(v_s12, _mm_mul_pd(v_ad, v_bd)); + v_s11 = _mm_add_pd(v_s11, _mm_mul_pd(v_ad, v_ad)); + v_s22 = _mm_add_pd(v_s22, _mm_mul_pd(v_bd, v_bd)); + v_s1 = _mm_add_pd(v_s1, v_ad); + v_s2 = _mm_add_pd(v_s2, v_bd); + + // 2-3 + v_ad = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_a), 8))); + v_bd = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_b), 8))); + v_s12 = _mm_add_pd(v_s12, _mm_mul_pd(v_ad, v_bd)); + v_s11 = _mm_add_pd(v_s11, _mm_mul_pd(v_ad, v_ad)); + v_s22 = _mm_add_pd(v_s22, _mm_mul_pd(v_bd, v_bd)); + v_s1 = _mm_add_pd(v_s1, v_ad); + v_s2 = _mm_add_pd(v_s2, v_bd); + } + + double CV_DECL_ALIGNED(16) ar[10]; + _mm_store_pd(ar, v_s12); + _mm_store_pd(ar + 2, v_s11); + _mm_store_pd(ar + 4, v_s22); + _mm_store_pd(ar + 6, v_s1); + _mm_store_pd(ar + 8, v_s2); + + s12 += ar[0] + ar[1]; + s11 += ar[2] + ar[3]; + s22 += ar[4] + ar[5]; + s1 += ar[6] + ar[7]; + s2 += ar[8] + ar[9]; + } + #endif + for( ; j < len; j++ ) { double a = h1[j]; double b = h2[j]; @@ -2316,7 +2365,6 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) } else if( method == CV_COMP_INTERSECT ) { - j = 0; #if CV_NEON float32x4_t v_result = vdupq_n_f32(0.0f); for( ; j <= len - 4; j += 4 ) @@ -2324,13 +2372,61 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) float CV_DECL_ALIGNED(16) ar[4]; vst1q_f32(ar, v_result); result += ar[0] + ar[1] + ar[2] + ar[3]; + #elif CV_SSE2 + if (haveSIMD) + { + __m128d v_result = _mm_setzero_pd(); + for ( ; j <= len - 4; j += 4) + { + __m128 v_src = _mm_min_ps(_mm_loadu_ps(h1 + j), + _mm_loadu_ps(h2 + j)); + v_result = _mm_add_pd(v_result, _mm_cvtps_pd(v_src)); + v_src = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)); + v_result = _mm_add_pd(v_result, _mm_cvtps_pd(v_src)); + } + + double CV_DECL_ALIGNED(16) ar[2]; + _mm_store_pd(ar, v_result); + result += ar[0] + ar[1]; + } #endif for( ; j < len; j++ ) result += std::min(h1[j], h2[j]); } else if( method == CV_COMP_BHATTACHARYYA ) { - for( j = 0; j < len; j++ ) + #if CV_SSE2 + if (haveSIMD) + { + __m128d v_s1 = _mm_setzero_pd(), v_s2 = v_s1, v_result = v_s1; + for ( ; j <= len - 4; j += 4) + { + __m128 v_a = _mm_loadu_ps(h1 + j); + __m128 v_b = _mm_loadu_ps(h2 + j); + + __m128d v_ad = _mm_cvtps_pd(v_a); + __m128d v_bd = _mm_cvtps_pd(v_b); + v_s1 = _mm_add_pd(v_s1, v_ad); + v_s2 = _mm_add_pd(v_s2, v_bd); + v_result = _mm_add_pd(v_result, _mm_sqrt_pd(_mm_mul_pd(v_ad, v_bd))); + + v_ad = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_a), 8))); + v_bd = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_b), 8))); + v_s1 = _mm_add_pd(v_s1, v_ad); + v_s2 = _mm_add_pd(v_s2, v_bd); + v_result = _mm_add_pd(v_result, _mm_sqrt_pd(_mm_mul_pd(v_ad, v_bd))); + } + + double CV_DECL_ALIGNED(16) ar[6]; + _mm_store_pd(ar, v_s1); + _mm_store_pd(ar + 2, v_s2); + _mm_store_pd(ar + 4, v_result); + s1 += ar[0] + ar[1]; + s2 += ar[2] + ar[3]; + result += ar[4] + ar[5]; + } + #endif + for( ; j < len; j++ ) { double a = h1[j]; double b = h2[j]; @@ -2341,7 +2437,7 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) } else if( method == CV_COMP_KL_DIV ) { - for( j = 0; j < len; j++ ) + for( ; j < len; j++ ) { double p = h1[j]; double q = h2[j]; From 8c94568cc3f58abdf4991f00994559defcdb8050 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:31 +0300 Subject: [PATCH 54/98] cv::sum --- modules/core/src/stat.cpp | 110 +++++++++++++++++++++++++++++++++++++- 1 file changed, 108 insertions(+), 2 deletions(-) diff --git a/modules/core/src/stat.cpp b/modules/core/src/stat.cpp index 1fcb9b54d..b26308051 100644 --- a/modules/core/src/stat.cpp +++ b/modules/core/src/stat.cpp @@ -73,7 +73,114 @@ struct Sum_SIMD } }; -#if CV_NEON +#if CV_SSE2 + +template <> +struct Sum_SIMD +{ + int operator () (const schar * src0, const uchar * mask, int * dst, int len, int cn) const + { + if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2) + return 0; + + int x = 0; + __m128i v_zero = _mm_setzero_si128(), v_sum = v_zero; + + for ( ; x <= len - 16; x += 16) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x)); + __m128i v_half = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, v_src), 8); + + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16)); + + v_half = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero, v_src), 8); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16)); + } + + for ( ; x <= len - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src0 + x))), 8); + + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + } + + int CV_DECL_ALIGNED(16) ar[4]; + _mm_store_si128((__m128i*)ar, v_sum); + + for (int i = 0; i < 4; i += cn) + for (int j = 0; j < cn; ++j) + dst[j] += ar[j + i]; + + return x / cn; + } +}; + +template <> +struct Sum_SIMD +{ + int operator () (const int * src0, const uchar * mask, double * dst, int len, int cn) const + { + if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2) + return 0; + + int x = 0; + __m128d v_zero = _mm_setzero_pd(), v_sum0 = v_zero, v_sum1 = v_zero; + + for ( ; x <= len - 4; x += 4) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src0 + x)); + v_sum0 = _mm_add_pd(v_sum0, _mm_cvtepi32_pd(v_src)); + v_sum1 = _mm_add_pd(v_sum1, _mm_cvtepi32_pd(_mm_srli_si128(v_src, 8))); + } + + double CV_DECL_ALIGNED(16) ar[4]; + _mm_store_pd(ar, v_sum0); + _mm_store_pd(ar + 2, v_sum1); + + for (int i = 0; i < 4; i += cn) + for (int j = 0; j < cn; ++j) + dst[j] += ar[j + i]; + + return x / cn; + } +}; + +template <> +struct Sum_SIMD +{ + int operator () (const float * src0, const uchar * mask, double * dst, int len, int cn) const + { + if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2) + return 0; + + int x = 0; + __m128d v_zero = _mm_setzero_pd(), v_sum0 = v_zero, v_sum1 = v_zero; + + for ( ; x <= len - 4; x += 4) + { + __m128 v_src = _mm_loadu_ps(src0 + x); + v_sum0 = _mm_add_pd(v_sum0, _mm_cvtps_pd(v_src)); + v_src = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)); + v_sum1 = _mm_add_pd(v_sum1, _mm_cvtps_pd(v_src)); + } + + double CV_DECL_ALIGNED(16) ar[4]; + _mm_store_pd(ar, v_sum0); + _mm_store_pd(ar + 2, v_sum1); + + for (int i = 0; i < 4; i += cn) + for (int j = 0; j < cn; ++j) + dst[j] += ar[j + i]; + + return x / cn; + } +}; + + +#elif CV_NEON template <> struct Sum_SIMD @@ -1023,7 +1130,6 @@ cv::Scalar cv::sum( InputArray _src ) } } #endif - SumFunc func = getSumFunc(depth); CV_Assert( cn <= 4 && func != 0 ); From 052fe626b39f03799276d1462e0d39ffb514d010 Mon Sep 17 00:00:00 2001 From: Nicolas Riebesel Date: Sun, 18 Jan 2015 20:27:09 +0100 Subject: [PATCH 55/98] Corrected the WITH_GSTREAMER and WITH_GSTREAMER_0_10 semantic --- cmake/OpenCVFindLibsVideo.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index 6d6ca62e8..be394857c 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -32,7 +32,7 @@ if(WITH_GSTREAMER AND NOT WITH_GSTREAMER_0_10) endif(WITH_GSTREAMER AND NOT WITH_GSTREAMER_0_10) # if gstreamer 1.x was not found, or we specified we wanted 0.10, try to find it -if(WITH_GSTREAMER_0_10 OR NOT HAVE_GSTREAMER) +if(WITH_GSTREAMER AND NOT HAVE_GSTREAMER OR WITH_GSTREAMER_0_10) CHECK_MODULE(gstreamer-base-0.10 HAVE_GSTREAMER_BASE) CHECK_MODULE(gstreamer-video-0.10 HAVE_GSTREAMER_VIDEO) CHECK_MODULE(gstreamer-app-0.10 HAVE_GSTREAMER_APP) @@ -47,7 +47,7 @@ if(WITH_GSTREAMER_0_10 OR NOT HAVE_GSTREAMER) set(GSTREAMER_RIFF_VERSION ${ALIASOF_gstreamer-riff-0.10_VERSION}) set(GSTREAMER_PBUTILS_VERSION ${ALIASOF_gstreamer-pbutils-0.10_VERSION}) endif() -endif(WITH_GSTREAMER_0_10 OR NOT HAVE_GSTREAMER) +endif(WITH_GSTREAMER AND NOT HAVE_GSTREAMER OR WITH_GSTREAMER_0_10) # --- unicap --- ocv_clear_vars(HAVE_UNICAP) From dae188d14f0abc85a9fdba418168ea1f0d3187fa Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Tue, 30 Dec 2014 17:35:25 +0300 Subject: [PATCH 56/98] move obsolete algorithms from cudabgsegm to cudalegacy: * GMG * FGD --- modules/cudabgsegm/CMakeLists.txt | 2 +- .../cudabgsegm/include/opencv2/cudabgsegm.hpp | 109 -------- modules/cudabgsegm/perf/perf_bgsegm.cpp | 196 -------------- modules/cudabgsegm/src/precomp.hpp | 12 - modules/cudalegacy/CMakeLists.txt | 2 +- .../cudalegacy/include/opencv2/cudalegacy.hpp | 115 ++++++++ modules/cudalegacy/perf/perf_bgsegm.cpp | 256 ++++++++++++++++++ modules/cudalegacy/perf/perf_main.cpp | 47 ++++ modules/cudalegacy/perf/perf_precomp.hpp | 66 +++++ .../src/cuda/fgd.cu | 0 .../src/cuda/fgd.hpp | 0 .../src/cuda/gmg.cu | 0 .../{cudabgsegm => cudalegacy}/src/fgd.cpp | 0 .../{cudabgsegm => cudalegacy}/src/gmg.cpp | 0 modules/cudalegacy/src/precomp.hpp | 12 + samples/gpu/bgfg_segm.cpp | 1 + 16 files changed, 499 insertions(+), 319 deletions(-) create mode 100644 modules/cudalegacy/perf/perf_bgsegm.cpp create mode 100644 modules/cudalegacy/perf/perf_main.cpp create mode 100644 modules/cudalegacy/perf/perf_precomp.hpp rename modules/{cudabgsegm => cudalegacy}/src/cuda/fgd.cu (100%) rename modules/{cudabgsegm => cudalegacy}/src/cuda/fgd.hpp (100%) rename modules/{cudabgsegm => cudalegacy}/src/cuda/gmg.cu (100%) rename modules/{cudabgsegm => cudalegacy}/src/fgd.cpp (100%) rename modules/{cudabgsegm => cudalegacy}/src/gmg.cpp (100%) diff --git a/modules/cudabgsegm/CMakeLists.txt b/modules/cudabgsegm/CMakeLists.txt index 4c3d3f1db..c60fdd076 100644 --- a/modules/cudabgsegm/CMakeLists.txt +++ b/modules/cudabgsegm/CMakeLists.txt @@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Background Segmentation") ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow) -ocv_define_module(cudabgsegm opencv_video OPTIONAL opencv_imgproc opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc) +ocv_define_module(cudabgsegm opencv_video) diff --git a/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp b/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp index 4b5e305d6..32ea7c17e 100644 --- a/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp +++ b/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp @@ -147,115 +147,6 @@ CV_EXPORTS Ptr createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16, bool detectShadows = true); -//////////////////////////////////////////////////// -// GMG - -/** @brief Background/Foreground Segmentation Algorithm. - -The class discriminates between foreground and background pixels by building and maintaining a model -of the background. Any pixel which does not fit this model is then deemed to be foreground. The -class implements algorithm described in @cite Gold2012 . - */ -class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor -{ -public: - using cv::BackgroundSubtractor::apply; - virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0; - - virtual int getMaxFeatures() const = 0; - virtual void setMaxFeatures(int maxFeatures) = 0; - - virtual double getDefaultLearningRate() const = 0; - virtual void setDefaultLearningRate(double lr) = 0; - - virtual int getNumFrames() const = 0; - virtual void setNumFrames(int nframes) = 0; - - virtual int getQuantizationLevels() const = 0; - virtual void setQuantizationLevels(int nlevels) = 0; - - virtual double getBackgroundPrior() const = 0; - virtual void setBackgroundPrior(double bgprior) = 0; - - virtual int getSmoothingRadius() const = 0; - virtual void setSmoothingRadius(int radius) = 0; - - virtual double getDecisionThreshold() const = 0; - virtual void setDecisionThreshold(double thresh) = 0; - - virtual bool getUpdateBackgroundModel() const = 0; - virtual void setUpdateBackgroundModel(bool update) = 0; - - virtual double getMinVal() const = 0; - virtual void setMinVal(double val) = 0; - - virtual double getMaxVal() const = 0; - virtual void setMaxVal(double val) = 0; -}; - -/** @brief Creates GMG Background Subtractor - -@param initializationFrames Number of frames of video to use to initialize histograms. -@param decisionThreshold Value above which pixel is determined to be FG. - */ -CV_EXPORTS Ptr - createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8); - -//////////////////////////////////////////////////// -// FGD - -/** @brief The class discriminates between foreground and background pixels by building and maintaining a model -of the background. - -Any pixel which does not fit this model is then deemed to be foreground. The class implements -algorithm described in @cite FGD2003 . -@sa BackgroundSubtractor - */ -class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor -{ -public: - /** @brief Returns the output foreground regions calculated by findContours. - - @param foreground_regions Output array (CPU memory). - */ - virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0; -}; - -struct CV_EXPORTS FGDParams -{ - int Lc; //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128. - int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel. - int N2c; //!< Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. - //!< Used to allow the first N1c vectors to adapt over time to changing background. - - int Lcc; //!< Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. - int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel. - int N2cc; //!< Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. - //!< Used to allow the first N1cc vectors to adapt over time to changing background. - - bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE. - int perform_morphing; //!< Number of erode-dilate-erode foreground-blob cleanup iterations. - //!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. - - float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1. - float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005. - float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. - - float delta; //!< Affects color and color co-occurrence quantization, typically set to 2. - float T; //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9). - float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold. - - //! default Params - FGDParams(); -}; - -/** @brief Creates FGD Background Subtractor - -@param params Algorithm's parameters. See @cite FGD2003 for explanation. - */ -CV_EXPORTS Ptr - createBackgroundSubtractorFGD(const FGDParams& params = FGDParams()); - //! @} }} // namespace cv { namespace cuda { diff --git a/modules/cudabgsegm/perf/perf_bgsegm.cpp b/modules/cudabgsegm/perf/perf_bgsegm.cpp index c2491f433..48bda4a33 100644 --- a/modules/cudabgsegm/perf/perf_bgsegm.cpp +++ b/modules/cudabgsegm/perf/perf_bgsegm.cpp @@ -42,10 +42,6 @@ #include "perf_precomp.hpp" -#ifdef HAVE_OPENCV_CUDAIMGPROC -# include "opencv2/cudaimgproc.hpp" -#endif - using namespace std; using namespace testing; using namespace perf; @@ -63,83 +59,6 @@ using namespace perf; # define BUILD_WITH_VIDEO_INPUT_SUPPORT 0 #endif -////////////////////////////////////////////////////// -// FGDStatModel - -#if BUILD_WITH_VIDEO_INPUT_SUPPORT - -DEF_PARAM_TEST_1(Video, string); - -PERF_TEST_P(Video, FGDStatModel, - Values(string("gpu/video/768x576.avi"))) -{ - const int numIters = 10; - - declare.time(60); - - const string inputFile = perf::TestBase::getDataPath(GetParam()); - - cv::VideoCapture cap(inputFile); - ASSERT_TRUE(cap.isOpened()); - - cv::Mat frame; - cap >> frame; - ASSERT_FALSE(frame.empty()); - - if (PERF_RUN_CUDA()) - { - cv::cuda::GpuMat d_frame(frame), foreground; - - cv::Ptr d_fgd = cv::cuda::createBackgroundSubtractorFGD(); - d_fgd->apply(d_frame, foreground); - - int i = 0; - - // collect performance data - for (; i < numIters; ++i) - { - cap >> frame; - ASSERT_FALSE(frame.empty()); - - d_frame.upload(frame); - - startTimer(); - if(!next()) - break; - - d_fgd->apply(d_frame, foreground); - - stopTimer(); - } - - // process last frame in sequence to get data for sanity test - for (; i < numIters; ++i) - { - cap >> frame; - ASSERT_FALSE(frame.empty()); - - d_frame.upload(frame); - - d_fgd->apply(d_frame, foreground); - } - - CUDA_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE); - -#ifdef HAVE_OPENCV_CUDAIMGPROC - cv::cuda::GpuMat background3, background; - d_fgd->getBackgroundImage(background3); - cv::cuda::cvtColor(background3, background, cv::COLOR_BGR2BGRA); - CUDA_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE); -#endif - } - else - { - FAIL_NO_CPU(); - } -} - -#endif - ////////////////////////////////////////////////////// // MOG @@ -484,118 +403,3 @@ PERF_TEST_P(Video_Cn, MOG2GetBackgroundImage, } #endif - -////////////////////////////////////////////////////// -// GMG - -#if BUILD_WITH_VIDEO_INPUT_SUPPORT - -DEF_PARAM_TEST(Video_Cn_MaxFeatures, string, MatCn, int); - -PERF_TEST_P(Video_Cn_MaxFeatures, GMG, - Combine(Values(string("gpu/video/768x576.avi")), - CUDA_CHANNELS_1_3_4, - Values(20, 40, 60))) -{ - const int numIters = 150; - - const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0)); - const int cn = GET_PARAM(1); - const int maxFeatures = GET_PARAM(2); - - cv::VideoCapture cap(inputFile); - ASSERT_TRUE(cap.isOpened()); - - cv::Mat frame; - cap >> frame; - ASSERT_FALSE(frame.empty()); - - if (cn != 3) - { - cv::Mat temp; - if (cn == 1) - cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); - else - cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); - cv::swap(temp, frame); - } - - if (PERF_RUN_CUDA()) - { - cv::cuda::GpuMat d_frame(frame); - cv::cuda::GpuMat foreground; - - cv::Ptr d_gmg = cv::cuda::createBackgroundSubtractorGMG(); - d_gmg->setMaxFeatures(maxFeatures); - - d_gmg->apply(d_frame, foreground); - - int i = 0; - - // collect performance data - for (; i < numIters; ++i) - { - cap >> frame; - if (frame.empty()) - { - cap.release(); - cap.open(inputFile); - cap >> frame; - } - - if (cn != 3) - { - cv::Mat temp; - if (cn == 1) - cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); - else - cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); - cv::swap(temp, frame); - } - - d_frame.upload(frame); - - startTimer(); - if(!next()) - break; - - d_gmg->apply(d_frame, foreground); - - stopTimer(); - } - - // process last frame in sequence to get data for sanity test - for (; i < numIters; ++i) - { - cap >> frame; - if (frame.empty()) - { - cap.release(); - cap.open(inputFile); - cap >> frame; - } - - if (cn != 3) - { - cv::Mat temp; - if (cn == 1) - cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); - else - cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); - cv::swap(temp, frame); - } - - d_frame.upload(frame); - - d_gmg->apply(d_frame, foreground); - } - - CUDA_SANITY_CHECK(foreground); - } - else - { - FAIL_NO_CPU(); - } -} - -#endif diff --git a/modules/cudabgsegm/src/precomp.hpp b/modules/cudabgsegm/src/precomp.hpp index 4b1ae8d96..e8d627e67 100644 --- a/modules/cudabgsegm/src/precomp.hpp +++ b/modules/cudabgsegm/src/precomp.hpp @@ -51,16 +51,4 @@ #include "opencv2/opencv_modules.hpp" -#ifdef HAVE_OPENCV_CUDAARITHM -# include "opencv2/cudaarithm.hpp" -#endif - -#ifdef HAVE_OPENCV_CUDAFILTERS -# include "opencv2/cudafilters.hpp" -#endif - -#ifdef HAVE_OPENCV_CUDAIMGPROC -# include "opencv2/cudaimgproc.hpp" -#endif - #endif /* __OPENCV_PRECOMP_H__ */ diff --git a/modules/cudalegacy/CMakeLists.txt b/modules/cudalegacy/CMakeLists.txt index 8947cd6fd..0ef6e359b 100644 --- a/modules/cudalegacy/CMakeLists.txt +++ b/modules/cudalegacy/CMakeLists.txt @@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Computer Vision (legacy)") ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4130 /wd4324 /wd4512 /wd4310 -Wundef -Wmissing-declarations -Wuninitialized -Wshadow) -ocv_define_module(cudalegacy opencv_core OPTIONAL opencv_objdetect) +ocv_define_module(cudalegacy opencv_core opencv_video OPTIONAL opencv_objdetect opencv_imgproc opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc) diff --git a/modules/cudalegacy/include/opencv2/cudalegacy.hpp b/modules/cudalegacy/include/opencv2/cudalegacy.hpp index 5e5773385..328836c53 100644 --- a/modules/cudalegacy/include/opencv2/cudalegacy.hpp +++ b/modules/cudalegacy/include/opencv2/cudalegacy.hpp @@ -49,6 +49,7 @@ #include "opencv2/cudalegacy/NCVPyramid.hpp" #include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp" #include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp" +#include "opencv2/video/background_segm.hpp" /** @addtogroup cuda @@ -59,6 +60,9 @@ namespace cv { namespace cuda { +//! @addtogroup cudalegacy +//! @{ + class CV_EXPORTS ImagePyramid : public Algorithm { public: @@ -67,6 +71,117 @@ public: CV_EXPORTS Ptr createImagePyramid(InputArray img, int nLayers = -1, Stream& stream = Stream::Null()); +//////////////////////////////////////////////////// +// GMG + +/** @brief Background/Foreground Segmentation Algorithm. + +The class discriminates between foreground and background pixels by building and maintaining a model +of the background. Any pixel which does not fit this model is then deemed to be foreground. The +class implements algorithm described in @cite Gold2012 . + */ +class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor +{ +public: + using cv::BackgroundSubtractor::apply; + virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0; + + virtual int getMaxFeatures() const = 0; + virtual void setMaxFeatures(int maxFeatures) = 0; + + virtual double getDefaultLearningRate() const = 0; + virtual void setDefaultLearningRate(double lr) = 0; + + virtual int getNumFrames() const = 0; + virtual void setNumFrames(int nframes) = 0; + + virtual int getQuantizationLevels() const = 0; + virtual void setQuantizationLevels(int nlevels) = 0; + + virtual double getBackgroundPrior() const = 0; + virtual void setBackgroundPrior(double bgprior) = 0; + + virtual int getSmoothingRadius() const = 0; + virtual void setSmoothingRadius(int radius) = 0; + + virtual double getDecisionThreshold() const = 0; + virtual void setDecisionThreshold(double thresh) = 0; + + virtual bool getUpdateBackgroundModel() const = 0; + virtual void setUpdateBackgroundModel(bool update) = 0; + + virtual double getMinVal() const = 0; + virtual void setMinVal(double val) = 0; + + virtual double getMaxVal() const = 0; + virtual void setMaxVal(double val) = 0; +}; + +/** @brief Creates GMG Background Subtractor + +@param initializationFrames Number of frames of video to use to initialize histograms. +@param decisionThreshold Value above which pixel is determined to be FG. + */ +CV_EXPORTS Ptr + createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8); + +//////////////////////////////////////////////////// +// FGD + +/** @brief The class discriminates between foreground and background pixels by building and maintaining a model +of the background. + +Any pixel which does not fit this model is then deemed to be foreground. The class implements +algorithm described in @cite FGD2003 . +@sa BackgroundSubtractor + */ +class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor +{ +public: + /** @brief Returns the output foreground regions calculated by findContours. + + @param foreground_regions Output array (CPU memory). + */ + virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0; +}; + +struct CV_EXPORTS FGDParams +{ + int Lc; //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128. + int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel. + int N2c; //!< Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. + //!< Used to allow the first N1c vectors to adapt over time to changing background. + + int Lcc; //!< Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. + int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel. + int N2cc; //!< Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. + //!< Used to allow the first N1cc vectors to adapt over time to changing background. + + bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE. + int perform_morphing; //!< Number of erode-dilate-erode foreground-blob cleanup iterations. + //!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. + + float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1. + float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005. + float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. + + float delta; //!< Affects color and color co-occurrence quantization, typically set to 2. + float T; //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9). + float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold. + + //! default Params + FGDParams(); +}; + +/** @brief Creates FGD Background Subtractor + +@param params Algorithm's parameters. See @cite FGD2003 for explanation. + */ +CV_EXPORTS Ptr + createBackgroundSubtractorFGD(const FGDParams& params = FGDParams()); + +//! @} + }} #endif /* __OPENCV_CUDALEGACY_HPP__ */ diff --git a/modules/cudalegacy/perf/perf_bgsegm.cpp b/modules/cudalegacy/perf/perf_bgsegm.cpp new file mode 100644 index 000000000..a37c4fde1 --- /dev/null +++ b/modules/cudalegacy/perf/perf_bgsegm.cpp @@ -0,0 +1,256 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "perf_precomp.hpp" + +#ifdef HAVE_OPENCV_CUDAIMGPROC +# include "opencv2/cudaimgproc.hpp" +#endif + +using namespace std; +using namespace testing; +using namespace perf; + +#if defined(HAVE_XINE) || \ + defined(HAVE_GSTREAMER) || \ + defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ + defined(HAVE_AVFOUNDATION) || \ + defined(HAVE_FFMPEG) || \ + defined(WIN32) /* assume that we have ffmpeg */ + +# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1 +#else +# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0 +#endif + +////////////////////////////////////////////////////// +// FGDStatModel + +#if BUILD_WITH_VIDEO_INPUT_SUPPORT + +DEF_PARAM_TEST_1(Video, string); + +PERF_TEST_P(Video, FGDStatModel, + Values(string("gpu/video/768x576.avi"))) +{ + const int numIters = 10; + + declare.time(60); + + const string inputFile = perf::TestBase::getDataPath(GetParam()); + + cv::VideoCapture cap(inputFile); + ASSERT_TRUE(cap.isOpened()); + + cv::Mat frame; + cap >> frame; + ASSERT_FALSE(frame.empty()); + + if (PERF_RUN_CUDA()) + { + cv::cuda::GpuMat d_frame(frame), foreground; + + cv::Ptr d_fgd = cv::cuda::createBackgroundSubtractorFGD(); + d_fgd->apply(d_frame, foreground); + + int i = 0; + + // collect performance data + for (; i < numIters; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + d_frame.upload(frame); + + startTimer(); + if(!next()) + break; + + d_fgd->apply(d_frame, foreground); + + stopTimer(); + } + + // process last frame in sequence to get data for sanity test + for (; i < numIters; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + d_frame.upload(frame); + + d_fgd->apply(d_frame, foreground); + } + +#ifdef HAVE_OPENCV_CUDAIMGPROC + cv::cuda::GpuMat background3, background; + d_fgd->getBackgroundImage(background3); + cv::cuda::cvtColor(background3, background, cv::COLOR_BGR2BGRA); + CUDA_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE); +#endif + } + else + { + FAIL_NO_CPU(); + } + + SANITY_CHECK_NOTHING(); +} + +#endif + +////////////////////////////////////////////////////// +// GMG + +#if BUILD_WITH_VIDEO_INPUT_SUPPORT + +DEF_PARAM_TEST(Video_Cn_MaxFeatures, string, MatCn, int); + +PERF_TEST_P(Video_Cn_MaxFeatures, GMG, + Combine(Values(string("gpu/video/768x576.avi")), + CUDA_CHANNELS_1_3_4, + Values(20, 40, 60))) +{ + const int numIters = 150; + + const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0)); + const int cn = GET_PARAM(1); + const int maxFeatures = GET_PARAM(2); + + cv::VideoCapture cap(inputFile); + ASSERT_TRUE(cap.isOpened()); + + cv::Mat frame; + cap >> frame; + ASSERT_FALSE(frame.empty()); + + if (cn != 3) + { + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + } + + if (PERF_RUN_CUDA()) + { + cv::cuda::GpuMat d_frame(frame); + cv::cuda::GpuMat foreground; + + cv::Ptr d_gmg = cv::cuda::createBackgroundSubtractorGMG(); + d_gmg->setMaxFeatures(maxFeatures); + + d_gmg->apply(d_frame, foreground); + + int i = 0; + + // collect performance data + for (; i < numIters; ++i) + { + cap >> frame; + if (frame.empty()) + { + cap.release(); + cap.open(inputFile); + cap >> frame; + } + + if (cn != 3) + { + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + } + + d_frame.upload(frame); + + startTimer(); + if(!next()) + break; + + d_gmg->apply(d_frame, foreground); + + stopTimer(); + } + + // process last frame in sequence to get data for sanity test + for (; i < numIters; ++i) + { + cap >> frame; + if (frame.empty()) + { + cap.release(); + cap.open(inputFile); + cap >> frame; + } + + if (cn != 3) + { + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + } + + d_frame.upload(frame); + + d_gmg->apply(d_frame, foreground); + } + } + else + { + FAIL_NO_CPU(); + } + + SANITY_CHECK_NOTHING(); +} + +#endif diff --git a/modules/cudalegacy/perf/perf_main.cpp b/modules/cudalegacy/perf/perf_main.cpp new file mode 100644 index 000000000..083070746 --- /dev/null +++ b/modules/cudalegacy/perf/perf_main.cpp @@ -0,0 +1,47 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "perf_precomp.hpp" + +using namespace perf; + +CV_PERF_TEST_CUDA_MAIN(cudalegacy) diff --git a/modules/cudalegacy/perf/perf_precomp.hpp b/modules/cudalegacy/perf/perf_precomp.hpp new file mode 100644 index 000000000..847765018 --- /dev/null +++ b/modules/cudalegacy/perf/perf_precomp.hpp @@ -0,0 +1,66 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __GNUC__ +# pragma GCC diagnostic ignored "-Wmissing-declarations" +# if defined __clang__ || defined __APPLE__ +# pragma GCC diagnostic ignored "-Wmissing-prototypes" +# pragma GCC diagnostic ignored "-Wextra" +# endif +#endif + +#ifndef __OPENCV_PERF_PRECOMP_HPP__ +#define __OPENCV_PERF_PRECOMP_HPP__ + +#include "opencv2/ts.hpp" +#include "opencv2/ts/cuda_perf.hpp" + +#include "opencv2/cudalegacy.hpp" +#include "opencv2/video.hpp" + +#include "opencv2/opencv_modules.hpp" + +#ifdef GTEST_CREATE_SHARED_LIBRARY +#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined +#endif + +#endif diff --git a/modules/cudabgsegm/src/cuda/fgd.cu b/modules/cudalegacy/src/cuda/fgd.cu similarity index 100% rename from modules/cudabgsegm/src/cuda/fgd.cu rename to modules/cudalegacy/src/cuda/fgd.cu diff --git a/modules/cudabgsegm/src/cuda/fgd.hpp b/modules/cudalegacy/src/cuda/fgd.hpp similarity index 100% rename from modules/cudabgsegm/src/cuda/fgd.hpp rename to modules/cudalegacy/src/cuda/fgd.hpp diff --git a/modules/cudabgsegm/src/cuda/gmg.cu b/modules/cudalegacy/src/cuda/gmg.cu similarity index 100% rename from modules/cudabgsegm/src/cuda/gmg.cu rename to modules/cudalegacy/src/cuda/gmg.cu diff --git a/modules/cudabgsegm/src/fgd.cpp b/modules/cudalegacy/src/fgd.cpp similarity index 100% rename from modules/cudabgsegm/src/fgd.cpp rename to modules/cudalegacy/src/fgd.cpp diff --git a/modules/cudabgsegm/src/gmg.cpp b/modules/cudalegacy/src/gmg.cpp similarity index 100% rename from modules/cudabgsegm/src/gmg.cpp rename to modules/cudalegacy/src/gmg.cpp diff --git a/modules/cudalegacy/src/precomp.hpp b/modules/cudalegacy/src/precomp.hpp index b432057ef..a15e9f03c 100644 --- a/modules/cudalegacy/src/precomp.hpp +++ b/modules/cudalegacy/src/precomp.hpp @@ -56,6 +56,18 @@ # include "opencv2/objdetect.hpp" #endif +#ifdef HAVE_OPENCV_CUDAARITHM +# include "opencv2/cudaarithm.hpp" +#endif + +#ifdef HAVE_OPENCV_CUDAFILTERS +# include "opencv2/cudafilters.hpp" +#endif + +#ifdef HAVE_OPENCV_CUDAIMGPROC +# include "opencv2/cudaimgproc.hpp" +#endif + #include "opencv2/core/private.cuda.hpp" #include "opencv2/cudalegacy/private.hpp" diff --git a/samples/gpu/bgfg_segm.cpp b/samples/gpu/bgfg_segm.cpp index 89bb8d6a2..00bb59e24 100644 --- a/samples/gpu/bgfg_segm.cpp +++ b/samples/gpu/bgfg_segm.cpp @@ -4,6 +4,7 @@ #include "opencv2/core.hpp" #include "opencv2/core/utility.hpp" #include "opencv2/cudabgsegm.hpp" +#include "opencv2/cudalegacy.hpp" #include "opencv2/video.hpp" #include "opencv2/highgui.hpp" From e97d18ba812f6f0410850a22eaa04abecf3c9310 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicu=20=C8=98tiurc=C4=83?= Date: Mon, 19 Jan 2015 15:26:51 -0500 Subject: [PATCH 57/98] implement _OutputArray::assign() for _OutputArrays of type MATX --- modules/core/src/matrix.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp index 38ff7ed53..04d6fb40f 100644 --- a/modules/core/src/matrix.cpp +++ b/modules/core/src/matrix.cpp @@ -2644,6 +2644,10 @@ void _OutputArray::assign(const UMat& u) const { u.copyTo(*(Mat*)obj); // TODO check u.getMat() } + else if (k == MATX) + { + u.copyTo(getMat()); // TODO check u.getMat() + } else { CV_Error(Error::StsNotImplemented, ""); @@ -2662,6 +2666,10 @@ void _OutputArray::assign(const Mat& m) const { *(Mat*)obj = m; } + else if (k == MATX) + { + getMat() = m; + } else { CV_Error(Error::StsNotImplemented, ""); From f5b21e81415c417fd3a7db97a2a02c5bd79f12c9 Mon Sep 17 00:00:00 2001 From: Yan Wang Date: Mon, 19 Jan 2015 15:50:16 +0800 Subject: [PATCH 58/98] Avoid deadlock becasue some work item couldn't reach barrier in loop and unsynchronized. Signed-off-by: Yan Wang --- modules/objdetect/src/cascadedetect.cpp | 2 +- modules/objdetect/src/opencl/cascadedetect.cl | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/objdetect/src/cascadedetect.cpp b/modules/objdetect/src/cascadedetect.cpp index 6ddc0c878..8da3ffbf3 100644 --- a/modules/objdetect/src/cascadedetect.cpp +++ b/modules/objdetect/src/cascadedetect.cpp @@ -587,7 +587,7 @@ bool HaarEvaluator::read(const FileNode& node, Size _origWinSize) localSize = lbufSize = Size(0, 0); if (ocl::haveOpenCL()) { - if (ocl::Device::getDefault().isAMD()) + if (ocl::Device::getDefault().isAMD() || ocl::Device::getDefault().isIntel()) { localSize = Size(8, 8); lbufSize = Size(origWinSize.width + localSize.width, diff --git a/modules/objdetect/src/opencl/cascadedetect.cl b/modules/objdetect/src/opencl/cascadedetect.cl index 13cb1aa38..ce676426f 100644 --- a/modules/objdetect/src/opencl/cascadedetect.cl +++ b/modules/objdetect/src/opencl/cascadedetect.cl @@ -233,11 +233,12 @@ void runHaarClassifier( for( stageIdx = SPLIT_STAGE; stageIdx < N_STAGES; stageIdx++ ) { + barrier(CLK_LOCAL_MEM_FENCE); int nrects = lcount[0]; - barrier(CLK_LOCAL_MEM_FENCE); if( nrects == 0 ) break; + barrier(CLK_LOCAL_MEM_FENCE); if( lidx == 0 ) lcount[0] = 0; From 1ded2de2dd02e218ccf0d32fabe2cfea24dda021 Mon Sep 17 00:00:00 2001 From: karelknoest Date: Thu, 11 Dec 2014 10:30:24 +0100 Subject: [PATCH 59/98] Let LineSegmentDetector output line segment coordinates in float precision, as supported by the LSD algorithm. --- modules/imgproc/include/opencv2/imgproc.hpp | 4 +-- modules/imgproc/src/lsd.cpp | 30 ++++++++++----------- modules/imgproc/test/test_lsd.cpp | 2 +- samples/cpp/lsd_lines.cpp | 2 +- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index fad801b94..50cb2c94e 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -985,8 +985,8 @@ public: @param _image A grayscale (CV_8UC1) input image. If only a roi needs to be selected, use: `lsd_ptr-\>detect(image(roi), lines, ...); lines += Scalar(roi.x, roi.y, roi.x, roi.y);` - @param _lines A vector of Vec4i elements specifying the beginning and ending point of a line. Where - Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly + @param _lines A vector of Vec4f elements specifying the beginning and ending point of a line. Where + Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient. @param width Vector of widths of the regions, where the lines are found. E.g. Width of line. @param prec Vector of precisions with which the lines are found. diff --git a/modules/imgproc/src/lsd.cpp b/modules/imgproc/src/lsd.cpp index 65e874e00..347d8765c 100644 --- a/modules/imgproc/src/lsd.cpp +++ b/modules/imgproc/src/lsd.cpp @@ -191,8 +191,8 @@ public: * If only a roi needs to be selected, use * lsd_ptr->detect(image(roi), ..., lines); * lines += Scalar(roi.x, roi.y, roi.x, roi.y); - * @param _lines Return: A vector of Vec4i elements specifying the beginning and ending point of a line. - * Where Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end. + * @param _lines Return: A vector of Vec4f elements specifying the beginning and ending point of a line. + * Where Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. * Returned lines are strictly oriented depending on the gradient. * @param width Return: Vector of widths of the regions, where the lines are found. E.g. Width of line. * @param prec Return: Vector of precisions with which the lines are found. @@ -286,8 +286,8 @@ private: /** * Detect lines in the whole input image. * - * @param lines Return: A vector of Vec4i elements specifying the beginning and ending point of a line. - * Where Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end. + * @param lines Return: A vector of Vec4f elements specifying the beginning and ending point of a line. + * Where Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. * Returned lines are strictly oriented depending on the gradient. * @param widths Return: Vector of widths of the regions, where the lines are found. E.g. Width of line. * @param precisions Return: Vector of precisions with which the lines are found. @@ -297,7 +297,7 @@ private: * * 0 corresponds to 1 mean false alarm * * 1 corresponds to 0.1 mean false alarms */ - void flsd(std::vector& lines, + void flsd(std::vector& lines, std::vector& widths, std::vector& precisions, std::vector& nfas); @@ -418,7 +418,7 @@ void LineSegmentDetectorImpl::detect(InputArray _image, OutputArray _lines, // Convert image to double img.convertTo(image, CV_64FC1); - std::vector lines; + std::vector lines; std::vector w, p, n; w_needed = _width.needed(); p_needed = _prec.needed(); @@ -435,7 +435,7 @@ void LineSegmentDetectorImpl::detect(InputArray _image, OutputArray _lines, if(n_needed) Mat(n).copyTo(_nfa); } -void LineSegmentDetectorImpl::flsd(std::vector& lines, +void LineSegmentDetectorImpl::flsd(std::vector& lines, std::vector& widths, std::vector& precisions, std::vector& nfas) { @@ -518,7 +518,7 @@ void LineSegmentDetectorImpl::flsd(std::vector& lines, } //Store the relevant data - lines.push_back(Vec4i(int(rec.x1), int(rec.y1), int(rec.x2), int(rec.y2))); + lines.push_back(Vec4f(float(rec.x1), float(rec.y1), float(rec.x2), float(rec.y2))); if(w_needed) widths.push_back(rec.width); if(p_needed) precisions.push_back(rec.p); if(n_needed && doRefine >= LSD_REFINE_ADV) nfas.push_back(log_nfa); @@ -1181,9 +1181,9 @@ void LineSegmentDetectorImpl::drawSegments(InputOutputArray _image, InputArray l // Draw segments for(int i = 0; i < N; ++i) { - const Vec4i& v = _lines.at(i); - Point b(v[0], v[1]); - Point e(v[2], v[3]); + const Vec4f& v = _lines.at(i); + Point2f b(v[0], v[1]); + Point2f e(v[2], v[3]); line(_image.getMatRef(), b, e, Scalar(0, 0, 255), 1); } } @@ -1208,14 +1208,14 @@ int LineSegmentDetectorImpl::compareSegments(const Size& size, InputArray lines1 // Draw segments for(int i = 0; i < N1; ++i) { - Point b(_lines1.at(i)[0], _lines1.at(i)[1]); - Point e(_lines1.at(i)[2], _lines1.at(i)[3]); + Point2f b(_lines1.at(i)[0], _lines1.at(i)[1]); + Point2f e(_lines1.at(i)[2], _lines1.at(i)[3]); line(I1, b, e, Scalar::all(255), 1); } for(int i = 0; i < N2; ++i) { - Point b(_lines2.at(i)[0], _lines2.at(i)[1]); - Point e(_lines2.at(i)[2], _lines2.at(i)[3]); + Point2f b(_lines2.at(i)[0], _lines2.at(i)[1]); + Point2f e(_lines2.at(i)[2], _lines2.at(i)[3]); line(I2, b, e, Scalar::all(255), 1); } diff --git a/modules/imgproc/test/test_lsd.cpp b/modules/imgproc/test/test_lsd.cpp index 50a353503..2daa4bef1 100644 --- a/modules/imgproc/test/test_lsd.cpp +++ b/modules/imgproc/test/test_lsd.cpp @@ -16,7 +16,7 @@ public: protected: Mat test_image; - vector lines; + vector lines; RNG rng; int passedtests; diff --git a/samples/cpp/lsd_lines.cpp b/samples/cpp/lsd_lines.cpp index 69497b387..34f6b906b 100644 --- a/samples/cpp/lsd_lines.cpp +++ b/samples/cpp/lsd_lines.cpp @@ -37,7 +37,7 @@ int main(int argc, char** argv) #endif double start = double(getTickCount()); - vector lines_std; + vector lines_std; // Detect the lines ls->detect(image, lines_std); From b5bd2fd880ea80659e879ec4939a5147217d7e2f Mon Sep 17 00:00:00 2001 From: karelknoest Date: Mon, 29 Dec 2014 14:53:17 +0100 Subject: [PATCH 60/98] Improved documentation and method comment to clarify that LineSegmentDetector::detect method accepts both Vec4i and Vec4f as return vector. --- modules/imgproc/include/opencv2/imgproc.hpp | 4 ++-- modules/imgproc/src/lsd.cpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 50cb2c94e..a434500b8 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -985,8 +985,8 @@ public: @param _image A grayscale (CV_8UC1) input image. If only a roi needs to be selected, use: `lsd_ptr-\>detect(image(roi), lines, ...); lines += Scalar(roi.x, roi.y, roi.x, roi.y);` - @param _lines A vector of Vec4f elements specifying the beginning and ending point of a line. Where - Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly + @param _lines A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. Where + Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient. @param width Vector of widths of the regions, where the lines are found. E.g. Width of line. @param prec Vector of precisions with which the lines are found. diff --git a/modules/imgproc/src/lsd.cpp b/modules/imgproc/src/lsd.cpp index 347d8765c..cda073a61 100644 --- a/modules/imgproc/src/lsd.cpp +++ b/modules/imgproc/src/lsd.cpp @@ -191,8 +191,8 @@ public: * If only a roi needs to be selected, use * lsd_ptr->detect(image(roi), ..., lines); * lines += Scalar(roi.x, roi.y, roi.x, roi.y); - * @param _lines Return: A vector of Vec4f elements specifying the beginning and ending point of a line. - * Where Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. + * @param _lines Return: A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. + * Where Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. * Returned lines are strictly oriented depending on the gradient. * @param width Return: Vector of widths of the regions, where the lines are found. E.g. Width of line. * @param prec Return: Vector of precisions with which the lines are found. From e9a6c5db219e250792c90cdf16414b33f633964f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 12 Jan 2015 10:59:31 +0300 Subject: [PATCH 61/98] sqsum --- CMakeLists.txt | 10 +- cmake/OpenCVCompilerOptions.cmake | 7 +- modules/core/include/opencv2/core/cvdef.h | 4 +- modules/core/src/stat.cpp | 144 +++++++++++++++++++++- modules/imgproc/src/color.cpp | 4 +- modules/imgproc/src/imgwarp.cpp | 4 +- modules/imgproc/src/smooth.cpp | 2 +- 7 files changed, 154 insertions(+), 21 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index da0b42cb1..2f4fd3323 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -214,13 +214,13 @@ OCV_OPTION(ENABLE_COVERAGE "Enable coverage collection with GCov" OCV_OPTION(ENABLE_OMIT_FRAME_POINTER "Enable -fomit-frame-pointer for GCC" ON IF CMAKE_COMPILER_IS_GNUCXX AND NOT (APPLE AND CMAKE_COMPILER_IS_CLANGCXX) ) OCV_OPTION(ENABLE_POWERPC "Enable PowerPC for GCC" ON IF (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES powerpc.*) ) OCV_OPTION(ENABLE_FAST_MATH "Enable -ffast-math (not recommended for GCC 4.6.x)" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) -OCV_OPTION(ENABLE_POPCNT "Enable POPCNT instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE "Enable SSE instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE2 "Enable SSE2 instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) -OCV_OPTION(ENABLE_SSE3 "Enable SSE3 instructions" ON IF ((CV_ICC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) -OCV_OPTION(ENABLE_SSSE3 "Enable SSSE3 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) -OCV_OPTION(ENABLE_SSE41 "Enable SSE4.1 instructions" OFF IF ((CV_ICC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) -OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_SSE3 "Enable SSE3 instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX OR CV_ICC) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_SSSE3 "Enable SSSE3 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_SSE41 "Enable SSE4.1 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX OR CV_ICC) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_POPCNT "Enable POPCNT instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_AVX2 "Enable AVX2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_FMA3 "Enable FMA3 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index 66e16e786..13559b5c8 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -122,9 +122,6 @@ if(CMAKE_COMPILER_IS_GNUCXX) if(ENABLE_POWERPC) add_extra_compiler_option("-mcpu=G3 -mtune=G5") endif() - if(ENABLE_POPCNT) - add_extra_compiler_option(-mpopcnt) - endif() if(ENABLE_SSE) add_extra_compiler_option(-msse) endif() @@ -168,6 +165,10 @@ if(CMAKE_COMPILER_IS_GNUCXX) if(ENABLE_SSE42) add_extra_compiler_option(-msse4.2) endif() + + if(ENABLE_POPCNT) + add_extra_compiler_option(-mpopcnt) + endif() endif() endif(NOT MINGW) diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index a9d59c769..3fdaa6954 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -166,7 +166,7 @@ # endif # define CV_POPCNT 1 # endif -# if defined __AVX__ || (defined _MSC_VER && _MSC_VER >= 1600) +# if defined __AVX__ || (defined _MSC_VER && _MSC_VER >= 1600 && 0) // MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX // See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32 # include @@ -177,7 +177,7 @@ # define __xgetbv() 0 # endif # endif -# if defined __AVX2__ || (defined _MSC_VER && _MSC_VER >= 1800) +# if defined __AVX2__ || (defined _MSC_VER && _MSC_VER >= 1800 && 0) # include # define CV_AVX2 1 # if defined __FMA__ diff --git a/modules/core/src/stat.cpp b/modules/core/src/stat.cpp index b26308051..87c423dc3 100644 --- a/modules/core/src/stat.cpp +++ b/modules/core/src/stat.cpp @@ -800,6 +800,137 @@ static CountNonZeroFunc getCountNonZeroTab(int depth) return countNonZeroTab[depth]; } +template +struct SumSqr_SIMD +{ + int operator () (const T *, const uchar *, ST *, SQT *, int, int) const + { + return 0; + } +}; + +#if CV_SSE2 + +template <> +struct SumSqr_SIMD +{ + int operator () (const uchar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const + { + if (mask || (cn != 1 && cn != 2) || !USE_SSE2) + return 0; + + int x = 0; + __m128i v_zero = _mm_setzero_si128(), v_sum = v_zero, v_sqsum = v_zero; + + for ( ; x <= len - 16; x += 16) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x)); + __m128i v_half = _mm_unpacklo_epi8(v_src, v_zero); + + __m128i v_mullo = _mm_mullo_epi16(v_half, v_half); + __m128i v_mulhi = _mm_mulhi_epi16(v_half, v_half); + v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_half, v_zero)); + v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_half, v_zero)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + + v_half = _mm_unpackhi_epi8(v_src, v_zero); + v_mullo = _mm_mullo_epi16(v_half, v_half); + v_mulhi = _mm_mulhi_epi16(v_half, v_half); + v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_half, v_zero)); + v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_half, v_zero)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + } + + for ( ; x <= len - 8; x += 8) + { + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src0 + x)), v_zero); + + __m128i v_mullo = _mm_mullo_epi16(v_src, v_src); + __m128i v_mulhi = _mm_mulhi_epi16(v_src, v_src); + v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_src, v_zero)); + v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_src, v_zero)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + } + + int CV_DECL_ALIGNED(16) ar[8]; + _mm_store_si128((__m128i*)ar, v_sum); + _mm_store_si128((__m128i*)(ar + 4), v_sqsum); + + for (int i = 0; i < 4; i += cn) + for (int j = 0; j < cn; ++j) + { + sum[j] += ar[j + i]; + sqsum[j] += ar[4 + j + i]; + } + + return x / cn; + } +}; + +template <> +struct SumSqr_SIMD +{ + int operator () (const schar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const + { + if (mask || (cn != 1 && cn != 2) || !USE_SSE2) + return 0; + + int x = 0; + __m128i v_zero = _mm_setzero_si128(), v_sum = v_zero, v_sqsum = v_zero; + + for ( ; x <= len - 16; x += 16) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x)); + __m128i v_half = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, v_src), 8); + + __m128i v_mullo = _mm_mullo_epi16(v_half, v_half); + __m128i v_mulhi = _mm_mulhi_epi16(v_half, v_half); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + + v_half = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero, v_src), 8); + v_mullo = _mm_mullo_epi16(v_half, v_half); + v_mulhi = _mm_mulhi_epi16(v_half, v_half); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + } + + for ( ; x <= len - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src0 + x))), 8); + + __m128i v_mullo = _mm_mullo_epi16(v_src, v_src); + __m128i v_mulhi = _mm_mulhi_epi16(v_src, v_src); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + } + + int CV_DECL_ALIGNED(16) ar[8]; + _mm_store_si128((__m128i*)ar, v_sum); + _mm_store_si128((__m128i*)(ar + 4), v_sqsum); + + for (int i = 0; i < 4; i += cn) + for (int j = 0; j < cn; ++j) + { + sum[j] += ar[j + i]; + sqsum[j] += ar[4 + j + i]; + } + + return x / cn; + } +}; + +#endif + template static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int len, int cn ) { @@ -807,14 +938,15 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le if( !mask ) { - int i; - int k = cn % 4; + SumSqr_SIMD vop; + int i = vop(src0, mask, sum, sqsum, len, cn), k = cn % 4; + src += i * cn; if( k == 1 ) { ST s0 = sum[0]; SQT sq0 = sqsum[0]; - for( i = 0; i < len; i++, src += cn ) + for( ; i < len; i++, src += cn ) { T v = src[0]; s0 += v; sq0 += (SQT)v*v; @@ -826,7 +958,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le { ST s0 = sum[0], s1 = sum[1]; SQT sq0 = sqsum[0], sq1 = sqsum[1]; - for( i = 0; i < len; i++, src += cn ) + for( ; i < len; i++, src += cn ) { T v0 = src[0], v1 = src[1]; s0 += v0; sq0 += (SQT)v0*v0; @@ -839,7 +971,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le { ST s0 = sum[0], s1 = sum[1], s2 = sum[2]; SQT sq0 = sqsum[0], sq1 = sqsum[1], sq2 = sqsum[2]; - for( i = 0; i < len; i++, src += cn ) + for( ; i < len; i++, src += cn ) { T v0 = src[0], v1 = src[1], v2 = src[2]; s0 += v0; sq0 += (SQT)v0*v0; @@ -855,7 +987,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le src = src0 + k; ST s0 = sum[k], s1 = sum[k+1], s2 = sum[k+2], s3 = sum[k+3]; SQT sq0 = sqsum[k], sq1 = sqsum[k+1], sq2 = sqsum[k+2], sq3 = sqsum[k+3]; - for( i = 0; i < len; i++, src += cn ) + for( ; i < len; i++, src += cn ) { T v0, v1; v0 = src[0], v1 = src[1]; diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 5ae1170b4..b900cf184 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -1598,10 +1598,10 @@ struct RGB2Gray haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } - void process(__m128 v_r, __m128 v_g, __m128 v_b, + void process(__m128 v_b, __m128 v_g, __m128 v_r, __m128 & v_gray) const { - v_gray = _mm_mul_ps(v_r, v_cb); + v_gray = _mm_mul_ps(v_r, v_cr); v_gray = _mm_add_ps(v_gray, _mm_mul_ps(v_g, v_cg)); v_gray = _mm_add_ps(v_gray, _mm_mul_ps(v_b, v_cb)); } diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index 304210f84..fe126fbbd 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -5016,8 +5016,8 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vandq_s32(v_ix1, v_mask))); vst1q_u16(dst2 + x, vcombine_u16(v_dst0, v_dst1)); } - #elif CV_SSE2 - if (useSSE2) + #elif CV_SSE4_1 + if (useSSE4_1) { __m128 v_its = _mm_set1_ps(INTER_TAB_SIZE); __m128i v_its1 = _mm_set1_epi32(INTER_TAB_SIZE-1); diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index 2dc2fbdf7..ec274259e 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -842,7 +842,7 @@ struct ColumnSum : { int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)); - vst1q_s32(D + i, v_s01); + vst1q_s32(D + i, v_s0); vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i))); } #endif From 7a3ca99894eabd0ea5f6d42797627ad115ee4a3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicu=20=C8=98tiurc=C4=83?= Date: Tue, 20 Jan 2015 23:44:03 -0500 Subject: [PATCH 62/98] fix _OutputArray::assign() for _OutputArrays of type MATX when data types differ --- modules/core/src/matrix.cpp | 2 +- modules/core/test/test_misc.cpp | 104 ++++++++++++++++++++++++++++++++ 2 files changed, 105 insertions(+), 1 deletion(-) diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp index 04d6fb40f..3bd9f2c62 100644 --- a/modules/core/src/matrix.cpp +++ b/modules/core/src/matrix.cpp @@ -2668,7 +2668,7 @@ void _OutputArray::assign(const Mat& m) const } else if (k == MATX) { - getMat() = m; + m.copyTo(getMat()); } else { diff --git a/modules/core/test/test_misc.cpp b/modules/core/test/test_misc.cpp index d37f0ee4f..0438065b8 100644 --- a/modules/core/test/test_misc.cpp +++ b/modules/core/test/test_misc.cpp @@ -26,3 +26,107 @@ TEST(Core_SaturateCast, NegativeNotClipped) ASSERT_EQ(0xffffffff, val); } + +template +static double maxAbsDiff(const T &t, const U &u) +{ + Mat_ d; + absdiff(t, u, d); + double ret; + minMaxLoc(d, NULL, &ret); + return ret; +} + +TEST(Core_OutputArrayAssign, _Matxd_Matd) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + Matx23d actualx; + + { + OutputArray oa(actualx); + oa.assign(expected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), 0.0); +} + +TEST(Core_OutputArrayAssign, _Matxd_Matf) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + Matx23d actualx; + + { + OutputArray oa(actualx); + oa.assign(expected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON); +} + +TEST(Core_OutputArrayAssign, _Matxf_Matd) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + Matx23f actualx; + + { + OutputArray oa(actualx); + oa.assign(expected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON); +} + +TEST(Core_OutputArrayAssign, _Matxd_UMatd) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + UMat uexpected = expected.getUMat(ACCESS_READ); + Matx23d actualx; + + { + OutputArray oa(actualx); + oa.assign(uexpected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), 0.0); +} + +TEST(Core_OutputArrayAssign, _Matxd_UMatf) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + UMat uexpected = expected.getUMat(ACCESS_READ); + Matx23d actualx; + + { + OutputArray oa(actualx); + oa.assign(uexpected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON); +} + +TEST(Core_OutputArrayAssign, _Matxf_UMatd) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + UMat uexpected = expected.getUMat(ACCESS_READ); + Matx23f actualx; + + { + OutputArray oa(actualx); + oa.assign(uexpected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON); +} + From 8722faa1671725144323d7f22b10aabb6cd82f84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicu=20=C8=98tiurc=C4=83?= Date: Tue, 20 Jan 2015 23:58:05 -0500 Subject: [PATCH 63/98] fix whitespace --- modules/core/test/test_misc.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/core/test/test_misc.cpp b/modules/core/test/test_misc.cpp index 0438065b8..cd4ec7c5a 100644 --- a/modules/core/test/test_misc.cpp +++ b/modules/core/test/test_misc.cpp @@ -41,7 +41,7 @@ TEST(Core_OutputArrayAssign, _Matxd_Matd) { Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); Matx23d actualx; - + { OutputArray oa(actualx); oa.assign(expected); @@ -129,4 +129,3 @@ TEST(Core_OutputArrayAssign, _Matxf_UMatd) EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON); } - From 1a15596f21211dd24c9ea2a49539b1742552bd1f Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Wed, 21 Jan 2015 12:53:50 +0300 Subject: [PATCH 64/98] Fixed build warning in GDAL support module --- modules/imgcodecs/src/grfmt_gdal.cpp | 29 ---------------------------- 1 file changed, 29 deletions(-) diff --git a/modules/imgcodecs/src/grfmt_gdal.cpp b/modules/imgcodecs/src/grfmt_gdal.cpp index 031163095..55dd7192f 100644 --- a/modules/imgcodecs/src/grfmt_gdal.cpp +++ b/modules/imgcodecs/src/grfmt_gdal.cpp @@ -140,35 +140,6 @@ int gdal2opencv( const GDALDataType& gdalType, const int& channels ){ return -1; } - -std::string GetOpenCVTypeName( const int& type ){ - - switch(type){ - case CV_8UC1: - return "CV_8UC1"; - case CV_8UC3: - return "CV_8UC3"; - case CV_8UC4: - return "CV_8UC4"; - case CV_16UC1: - return "CV_16UC1"; - case CV_16UC3: - return "CV_16UC3"; - case CV_16UC4: - return "CV_16UC4"; - case CV_16SC1: - return "CV_16SC1"; - case CV_16SC3: - return "CV_16SC3"; - case CV_16SC4: - return "CV_16SC4"; - default: - return "Unknown"; - } - return "Unknown"; -} - - /** * GDAL Decoder Constructor */ From 036b13ee05c793a765d621aa785b078fcf08fe03 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Wed, 21 Jan 2015 14:01:21 +0300 Subject: [PATCH 65/98] Fixed GDAL loader check --- modules/imgcodecs/src/loadsave.cpp | 10 +++++----- modules/imgcodecs/test/test_grfmt.cpp | 5 +++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/modules/imgcodecs/src/loadsave.cpp b/modules/imgcodecs/src/loadsave.cpp index c06b42ade..8526a4a3f 100644 --- a/modules/imgcodecs/src/loadsave.cpp +++ b/modules/imgcodecs/src/loadsave.cpp @@ -247,7 +247,7 @@ imread_( const String& filename, int flags, int hdrtype, Mat* mat=0 ) ImageDecoder decoder; #ifdef HAVE_GDAL - if( (flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL ){ + if(flags != IMREAD_UNCHANGED && (flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL ){ decoder = GdalDecoder().newDecoder(); }else{ #endif @@ -275,7 +275,7 @@ imread_( const String& filename, int flags, int hdrtype, Mat* mat=0 ) // grab the decoded type int type = decoder->type(); - if( flags != -1 ) + if( flags != IMREAD_UNCHANGED ) { if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 ) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); @@ -336,7 +336,7 @@ imreadmulti_(const String& filename, int flags, std::vector& mats) ImageDecoder decoder; #ifdef HAVE_GDAL - if ((flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL){ + if (flags != IMREAD_UNCHANGED && (flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL){ decoder = GdalDecoder().newDecoder(); } else{ @@ -362,7 +362,7 @@ imreadmulti_(const String& filename, int flags, std::vector& mats) { // grab the decoded type int type = decoder->type(); - if (flags != -1) + if (flags != IMREAD_UNCHANGED) { if ((flags & CV_LOAD_IMAGE_ANYDEPTH) == 0) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); @@ -508,7 +508,7 @@ imdecode_( const Mat& buf, int flags, int hdrtype, Mat* mat=0 ) size.height = decoder->height(); int type = decoder->type(); - if( flags != -1 ) + if( flags != IMREAD_UNCHANGED ) { if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 ) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); diff --git a/modules/imgcodecs/test/test_grfmt.cpp b/modules/imgcodecs/test/test_grfmt.cpp index d1610ae7f..423d030a0 100644 --- a/modules/imgcodecs/test/test_grfmt.cpp +++ b/modules/imgcodecs/test/test_grfmt.cpp @@ -104,7 +104,8 @@ TEST(Imgcodecs_imread, regression) ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_COLOR)); ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_ANYDEPTH)); ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_ANYCOLOR)); - ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_LOAD_GDAL)); + if (i != 2) // GDAL does not support hdr + ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_LOAD_GDAL)); } } @@ -684,7 +685,7 @@ public: compare(IMREAD_COLOR); compare(IMREAD_ANYDEPTH); compare(IMREAD_ANYCOLOR); - compare(IMREAD_LOAD_GDAL); + // compare(IMREAD_LOAD_GDAL); // GDAL does not support multi-page TIFFs } }; From 71c391cd8dbac3b2203af757ddb1fbdeed41d87c Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 31 Dec 2014 11:27:31 +0300 Subject: [PATCH 66/98] remove unused memory transfer from TVL1 CUDA implementation it caused runtime failures --- modules/cudaoptflow/src/tvl1flow.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/cudaoptflow/src/tvl1flow.cpp b/modules/cudaoptflow/src/tvl1flow.cpp index d0efffa57..b8dfea56f 100644 --- a/modules/cudaoptflow/src/tvl1flow.cpp +++ b/modules/cudaoptflow/src/tvl1flow.cpp @@ -248,7 +248,6 @@ void cv::cuda::OpticalFlowDual_TVL1_CUDA::procOneScale(const GpuMat& I0, const G { // some tweaks to make sum operation less frequently bool calcError = (epsilon > 0) && (n & 0x1) && (prevError < scaledEpsilon); - cv::Mat m1(u3); estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, diff, l_t, static_cast(theta), gamma, calcError); if (calcError) { From c4b2058d233347b83b53b3afd293b9eed80ed806 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 31 Dec 2014 11:27:51 +0300 Subject: [PATCH 67/98] simplify TVL1 accuracy test to reduce run time --- modules/cudaoptflow/test/test_optflow.cpp | 35 +++++++++++------------ 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/modules/cudaoptflow/test/test_optflow.cpp b/modules/cudaoptflow/test/test_optflow.cpp index 2b976563b..dce9cc59b 100644 --- a/modules/cudaoptflow/test/test_optflow.cpp +++ b/modules/cudaoptflow/test/test_optflow.cpp @@ -325,15 +325,20 @@ INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, FarnebackOpticalFlow, testing::Combine( ////////////////////////////////////////////////////// // OpticalFlowDual_TVL1 -PARAM_TEST_CASE(OpticalFlowDual_TVL1, cv::cuda::DeviceInfo, UseRoi) +namespace +{ + IMPLEMENT_PARAM_CLASS(Gamma, double) +} + +PARAM_TEST_CASE(OpticalFlowDual_TVL1, cv::cuda::DeviceInfo, Gamma) { cv::cuda::DeviceInfo devInfo; - bool useRoi; + double gamma; virtual void SetUp() { devInfo = GET_PARAM(0); - useRoi = GET_PARAM(1); + gamma = GET_PARAM(1); cv::cuda::setDevice(devInfo.deviceID()); } @@ -348,30 +353,22 @@ CUDA_TEST_P(OpticalFlowDual_TVL1, Accuracy) ASSERT_FALSE(frame1.empty()); cv::cuda::OpticalFlowDual_TVL1_CUDA d_alg; - cv::cuda::GpuMat d_flowx = createMat(frame0.size(), CV_32FC1, useRoi); - cv::cuda::GpuMat d_flowy = createMat(frame0.size(), CV_32FC1, useRoi); - d_alg(loadMat(frame0, useRoi), loadMat(frame1, useRoi), d_flowx, d_flowy); + d_alg.iterations = 10; + d_alg.gamma = gamma; + + cv::cuda::GpuMat d_flowx, d_flowy; + d_alg(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy); cv::Ptr alg = cv::createOptFlow_DualTVL1(); alg->set("medianFiltering", 1); alg->set("innerIterations", 1); alg->set("outerIterations", d_alg.iterations); + alg->set("gamma", gamma); + cv::Mat flow; alg->calc(frame0, frame1, flow); cv::Mat gold[2]; cv::split(flow, gold); - cv::Mat mx(d_flowx); - cv::Mat my(d_flowx); - - EXPECT_MAT_SIMILAR(gold[0], d_flowx, 4e-3); - EXPECT_MAT_SIMILAR(gold[1], d_flowy, 4e-3); - d_alg.gamma = 1; - alg->set("gamma", 1); - d_alg(loadMat(frame0, useRoi), loadMat(frame1, useRoi), d_flowx, d_flowy); - alg->calc(frame0, frame1, flow); - cv::split(flow, gold); - mx = cv::Mat(d_flowx); - my = cv::Mat(d_flowx); EXPECT_MAT_SIMILAR(gold[0], d_flowx, 4e-3); EXPECT_MAT_SIMILAR(gold[1], d_flowy, 4e-3); @@ -379,7 +376,7 @@ CUDA_TEST_P(OpticalFlowDual_TVL1, Accuracy) INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, OpticalFlowDual_TVL1, testing::Combine( ALL_DEVICES, - WHOLE_SUBMAT)); + testing::Values(Gamma(0.0), Gamma(1.0)))); ////////////////////////////////////////////////////// // FastOpticalFlowBM From 19c6bbe7d91efc74039636b6ce00a26810f40ef9 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 31 Dec 2014 15:35:23 +0300 Subject: [PATCH 68/98] move obsolete algorithms from cudaoptflow to cudalegacy --- .../cudalegacy/include/opencv2/cudalegacy.hpp | 51 +++++- .../{cudaoptflow => cudalegacy}/src/bm.cpp | 0 .../src/bm_fast.cpp | 0 .../src/cuda/bm.cu | 0 .../src/cuda/bm_fast.cu | 0 .../src/cuda/needle_map.cu | 0 .../src/interpolate_frames.cpp | 0 .../src/needle_map.cpp | 0 .../include/opencv2/cudaoptflow.hpp | 41 ----- modules/cudaoptflow/perf/perf_optflow.cpp | 150 ------------------ modules/cudaoptflow/test/test_optflow.cpp | 118 -------------- 11 files changed, 49 insertions(+), 311 deletions(-) rename modules/{cudaoptflow => cudalegacy}/src/bm.cpp (100%) rename modules/{cudaoptflow => cudalegacy}/src/bm_fast.cpp (100%) rename modules/{cudaoptflow => cudalegacy}/src/cuda/bm.cu (100%) rename modules/{cudaoptflow => cudalegacy}/src/cuda/bm_fast.cu (100%) rename modules/{cudaoptflow => cudalegacy}/src/cuda/needle_map.cu (100%) rename modules/{cudaoptflow => cudalegacy}/src/interpolate_frames.cpp (100%) rename modules/{cudaoptflow => cudalegacy}/src/needle_map.cpp (100%) diff --git a/modules/cudalegacy/include/opencv2/cudalegacy.hpp b/modules/cudalegacy/include/opencv2/cudalegacy.hpp index 328836c53..f0107499d 100644 --- a/modules/cudalegacy/include/opencv2/cudalegacy.hpp +++ b/modules/cudalegacy/include/opencv2/cudalegacy.hpp @@ -71,8 +71,9 @@ public: CV_EXPORTS Ptr createImagePyramid(InputArray img, int nLayers = -1, Stream& stream = Stream::Null()); -//////////////////////////////////////////////////// +// // GMG +// /** @brief Background/Foreground Segmentation Algorithm. @@ -125,8 +126,9 @@ public: CV_EXPORTS Ptr createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8); -//////////////////////////////////////////////////// +// // FGD +// /** @brief The class discriminates between foreground and background pixels by building and maintaining a model of the background. @@ -180,6 +182,51 @@ struct CV_EXPORTS FGDParams CV_EXPORTS Ptr createBackgroundSubtractorFGD(const FGDParams& params = FGDParams()); +// +// Optical flow +// + +//! Calculates optical flow for 2 images using block matching algorithm */ +CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr, + Size block_size, Size shift_size, Size max_range, bool use_previous, + GpuMat& velx, GpuMat& vely, GpuMat& buf, + Stream& stream = Stream::Null()); + +class CV_EXPORTS FastOpticalFlowBM +{ +public: + void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null()); + +private: + GpuMat buffer; + GpuMat extended_I0; + GpuMat extended_I1; +}; + +/** @brief Interpolates frames (images) using provided optical flow (displacement field). + +@param frame0 First frame (32-bit floating point images, single channel). +@param frame1 Second frame. Must have the same type and size as frame0 . +@param fu Forward horizontal displacement. +@param fv Forward vertical displacement. +@param bu Backward horizontal displacement. +@param bv Backward vertical displacement. +@param pos New frame position. +@param newFrame Output image. +@param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6 +GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward +horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow, +interpolated backward vertical flow. +@param stream Stream for the asynchronous version. + */ +CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1, + const GpuMat& fu, const GpuMat& fv, + const GpuMat& bu, const GpuMat& bv, + float pos, GpuMat& newFrame, GpuMat& buf, + Stream& stream = Stream::Null()); + +CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors); + //! @} }} diff --git a/modules/cudaoptflow/src/bm.cpp b/modules/cudalegacy/src/bm.cpp similarity index 100% rename from modules/cudaoptflow/src/bm.cpp rename to modules/cudalegacy/src/bm.cpp diff --git a/modules/cudaoptflow/src/bm_fast.cpp b/modules/cudalegacy/src/bm_fast.cpp similarity index 100% rename from modules/cudaoptflow/src/bm_fast.cpp rename to modules/cudalegacy/src/bm_fast.cpp diff --git a/modules/cudaoptflow/src/cuda/bm.cu b/modules/cudalegacy/src/cuda/bm.cu similarity index 100% rename from modules/cudaoptflow/src/cuda/bm.cu rename to modules/cudalegacy/src/cuda/bm.cu diff --git a/modules/cudaoptflow/src/cuda/bm_fast.cu b/modules/cudalegacy/src/cuda/bm_fast.cu similarity index 100% rename from modules/cudaoptflow/src/cuda/bm_fast.cu rename to modules/cudalegacy/src/cuda/bm_fast.cu diff --git a/modules/cudaoptflow/src/cuda/needle_map.cu b/modules/cudalegacy/src/cuda/needle_map.cu similarity index 100% rename from modules/cudaoptflow/src/cuda/needle_map.cu rename to modules/cudalegacy/src/cuda/needle_map.cu diff --git a/modules/cudaoptflow/src/interpolate_frames.cpp b/modules/cudalegacy/src/interpolate_frames.cpp similarity index 100% rename from modules/cudaoptflow/src/interpolate_frames.cpp rename to modules/cudalegacy/src/interpolate_frames.cpp diff --git a/modules/cudaoptflow/src/needle_map.cpp b/modules/cudalegacy/src/needle_map.cpp similarity index 100% rename from modules/cudaoptflow/src/needle_map.cpp rename to modules/cudalegacy/src/needle_map.cpp diff --git a/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp b/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp index f65b1447b..7882a8e62 100644 --- a/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp +++ b/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp @@ -347,47 +347,6 @@ private: GpuMat norm_buf; }; -//! Calculates optical flow for 2 images using block matching algorithm */ -CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr, - Size block_size, Size shift_size, Size max_range, bool use_previous, - GpuMat& velx, GpuMat& vely, GpuMat& buf, - Stream& stream = Stream::Null()); - -class CV_EXPORTS FastOpticalFlowBM -{ -public: - void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null()); - -private: - GpuMat buffer; - GpuMat extended_I0; - GpuMat extended_I1; -}; - -/** @brief Interpolates frames (images) using provided optical flow (displacement field). - -@param frame0 First frame (32-bit floating point images, single channel). -@param frame1 Second frame. Must have the same type and size as frame0 . -@param fu Forward horizontal displacement. -@param fv Forward vertical displacement. -@param bu Backward horizontal displacement. -@param bv Backward vertical displacement. -@param pos New frame position. -@param newFrame Output image. -@param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6 -GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward -horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow, -interpolated backward vertical flow. -@param stream Stream for the asynchronous version. - */ -CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1, - const GpuMat& fu, const GpuMat& fv, - const GpuMat& bu, const GpuMat& bv, - float pos, GpuMat& newFrame, GpuMat& buf, - Stream& stream = Stream::Null()); - -CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors); - //! @} }} // namespace cv { namespace cuda { diff --git a/modules/cudaoptflow/perf/perf_optflow.cpp b/modules/cudaoptflow/perf/perf_optflow.cpp index d22eb7e60..12612b062 100644 --- a/modules/cudaoptflow/perf/perf_optflow.cpp +++ b/modules/cudaoptflow/perf/perf_optflow.cpp @@ -46,91 +46,10 @@ using namespace std; using namespace testing; using namespace perf; -////////////////////////////////////////////////////// -// InterpolateFrames - typedef pair pair_string; DEF_PARAM_TEST_1(ImagePair, pair_string); -PERF_TEST_P(ImagePair, InterpolateFrames, - Values(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png"))) -{ - cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame0.empty()); - - cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame1.empty()); - - frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0); - frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0); - - if (PERF_RUN_CUDA()) - { - const cv::cuda::GpuMat d_frame0(frame0); - const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat d_fu, d_fv; - cv::cuda::GpuMat d_bu, d_bv; - - cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/, - 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); - - d_flow(d_frame0, d_frame1, d_fu, d_fv); - d_flow(d_frame1, d_frame0, d_bu, d_bv); - - cv::cuda::GpuMat newFrame; - cv::cuda::GpuMat d_buf; - - TEST_CYCLE() cv::cuda::interpolateFrames(d_frame0, d_frame1, d_fu, d_fv, d_bu, d_bv, 0.5f, newFrame, d_buf); - - CUDA_SANITY_CHECK(newFrame, 1e-4); - } - else - { - FAIL_NO_CPU(); - } -} - -////////////////////////////////////////////////////// -// CreateOpticalFlowNeedleMap - -PERF_TEST_P(ImagePair, CreateOpticalFlowNeedleMap, - Values(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png"))) -{ - cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame0.empty()); - - cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame1.empty()); - - frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0); - frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0); - - if (PERF_RUN_CUDA()) - { - const cv::cuda::GpuMat d_frame0(frame0); - const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; - - cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/, - 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); - - d_flow(d_frame0, d_frame1, u, v); - - cv::cuda::GpuMat vertex, colors; - - TEST_CYCLE() cv::cuda::createOpticalFlowNeedleMap(u, v, vertex, colors); - - CUDA_SANITY_CHECK(vertex, 1e-6); - CUDA_SANITY_CHECK(colors); - } - else - { - FAIL_NO_CPU(); - } -} - ////////////////////////////////////////////////////// // BroxOpticalFlow @@ -383,72 +302,3 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1, CPU_SANITY_CHECK(flow); } } - -////////////////////////////////////////////////////// -// OpticalFlowBM - -PERF_TEST_P(ImagePair, OpticalFlowBM, - Values(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png"))) -{ - declare.time(400); - - const cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame0.empty()); - - const cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame1.empty()); - - const cv::Size block_size(16, 16); - const cv::Size shift_size(1, 1); - const cv::Size max_range(16, 16); - - if (PERF_RUN_CUDA()) - { - const cv::cuda::GpuMat d_frame0(frame0); - const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u, v, buf; - - TEST_CYCLE() cv::cuda::calcOpticalFlowBM(d_frame0, d_frame1, block_size, shift_size, max_range, false, u, v, buf); - - CUDA_SANITY_CHECK(u); - CUDA_SANITY_CHECK(v); - } - else - { - FAIL_NO_CPU(); - } -} - -PERF_TEST_P(ImagePair, DISABLED_FastOpticalFlowBM, - Values(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png"))) -{ - declare.time(400); - - const cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame0.empty()); - - const cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame1.empty()); - - const cv::Size block_size(16, 16); - const cv::Size shift_size(1, 1); - const cv::Size max_range(16, 16); - - if (PERF_RUN_CUDA()) - { - const cv::cuda::GpuMat d_frame0(frame0); - const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u, v; - - cv::cuda::FastOpticalFlowBM fastBM; - - TEST_CYCLE() fastBM(d_frame0, d_frame1, u, v, max_range.width, block_size.width); - - CUDA_SANITY_CHECK(u, 2); - CUDA_SANITY_CHECK(v, 2); - } - else - { - FAIL_NO_CPU(); - } -} diff --git a/modules/cudaoptflow/test/test_optflow.cpp b/modules/cudaoptflow/test/test_optflow.cpp index dce9cc59b..7a6e68310 100644 --- a/modules/cudaoptflow/test/test_optflow.cpp +++ b/modules/cudaoptflow/test/test_optflow.cpp @@ -378,122 +378,4 @@ INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, OpticalFlowDual_TVL1, testing::Combine( ALL_DEVICES, testing::Values(Gamma(0.0), Gamma(1.0)))); -////////////////////////////////////////////////////// -// FastOpticalFlowBM - -namespace -{ - void FastOpticalFlowBM_gold(const cv::Mat_& I0, const cv::Mat_& I1, cv::Mat_& velx, cv::Mat_& vely, int search_window, int block_window) - { - velx.create(I0.size()); - vely.create(I0.size()); - - int search_radius = search_window / 2; - int block_radius = block_window / 2; - - for (int y = 0; y < I0.rows; ++y) - { - for (int x = 0; x < I0.cols; ++x) - { - int bestDist = std::numeric_limits::max(); - int bestDx = 0; - int bestDy = 0; - - for (int dy = -search_radius; dy <= search_radius; ++dy) - { - for (int dx = -search_radius; dx <= search_radius; ++dx) - { - int dist = 0; - - for (int by = -block_radius; by <= block_radius; ++by) - { - for (int bx = -block_radius; bx <= block_radius; ++bx) - { - int I0_val = I0(cv::borderInterpolate(y + by, I0.rows, cv::BORDER_DEFAULT), cv::borderInterpolate(x + bx, I0.cols, cv::BORDER_DEFAULT)); - int I1_val = I1(cv::borderInterpolate(y + dy + by, I0.rows, cv::BORDER_DEFAULT), cv::borderInterpolate(x + dx + bx, I0.cols, cv::BORDER_DEFAULT)); - - dist += std::abs(I0_val - I1_val); - } - } - - if (dist < bestDist) - { - bestDist = dist; - bestDx = dx; - bestDy = dy; - } - } - } - - velx(y, x) = (float) bestDx; - vely(y, x) = (float) bestDy; - } - } - } - - double calc_rmse(const cv::Mat_& flow1, const cv::Mat_& flow2) - { - double sum = 0.0; - - for (int y = 0; y < flow1.rows; ++y) - { - for (int x = 0; x < flow1.cols; ++x) - { - double diff = flow1(y, x) - flow2(y, x); - sum += diff * diff; - } - } - - return std::sqrt(sum / flow1.size().area()); - } -} - -struct FastOpticalFlowBM : testing::TestWithParam -{ -}; - -CUDA_TEST_P(FastOpticalFlowBM, Accuracy) -{ - const double MAX_RMSE = 0.6; - - int search_window = 15; - int block_window = 5; - - cv::cuda::DeviceInfo devInfo = GetParam(); - cv::cuda::setDevice(devInfo.deviceID()); - - cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame0.empty()); - - cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame1.empty()); - - cv::Size smallSize(320, 240); - cv::Mat frame0_small; - cv::Mat frame1_small; - - cv::resize(frame0, frame0_small, smallSize); - cv::resize(frame1, frame1_small, smallSize); - - cv::cuda::GpuMat d_flowx; - cv::cuda::GpuMat d_flowy; - cv::cuda::FastOpticalFlowBM fastBM; - - fastBM(loadMat(frame0_small), loadMat(frame1_small), d_flowx, d_flowy, search_window, block_window); - - cv::Mat_ flowx; - cv::Mat_ flowy; - FastOpticalFlowBM_gold(frame0_small, frame1_small, flowx, flowy, search_window, block_window); - - double err; - - err = calc_rmse(flowx, cv::Mat(d_flowx)); - EXPECT_LE(err, MAX_RMSE); - - err = calc_rmse(flowy, cv::Mat(d_flowy)); - EXPECT_LE(err, MAX_RMSE); -} - -INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, FastOpticalFlowBM, ALL_DEVICES); - #endif // HAVE_CUDA From 381216aa5411fd20faf897cc8900c0358e7dc695 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 31 Dec 2014 15:36:15 +0300 Subject: [PATCH 69/98] refactor cudaoptflow public API: * use opaque algorithm interfaces * add stream support --- .../include/opencv2/cudaoptflow.hpp | 403 ++++++------ modules/cudaoptflow/src/brox.cpp | 196 ++++-- modules/cudaoptflow/src/cuda/pyrlk.cu | 12 +- modules/cudaoptflow/src/cuda/tvl1flow.cu | 35 +- modules/cudaoptflow/src/farneback.cpp | 617 ++++++++++-------- modules/cudaoptflow/src/pyrlk.cpp | 394 ++++++----- modules/cudaoptflow/src/tvl1flow.cpp | 526 ++++++++------- 7 files changed, 1258 insertions(+), 925 deletions(-) diff --git a/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp b/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp index 7882a8e62..6ea75594d 100644 --- a/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp +++ b/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp @@ -61,49 +61,94 @@ namespace cv { namespace cuda { //! @addtogroup cudaoptflow //! @{ -/** @brief Class computing the optical flow for two images using Brox et al Optical Flow algorithm -(@cite Brox2004). : +// +// Interface +// + +/** @brief Base interface for dense optical flow algorithms. */ -class CV_EXPORTS BroxOpticalFlow +class CV_EXPORTS DenseOpticalFlow : public Algorithm { public: - BroxOpticalFlow(float alpha_, float gamma_, float scale_factor_, int inner_iterations_, int outer_iterations_, int solver_iterations_) : - alpha(alpha_), gamma(gamma_), scale_factor(scale_factor_), - inner_iterations(inner_iterations_), outer_iterations(outer_iterations_), solver_iterations(solver_iterations_) - { - } + /** @brief Calculates a dense optical flow. - //! Compute optical flow - //! frame0 - source frame (supports only CV_32FC1 type) - //! frame1 - frame to track (with the same size and type as frame0) - //! u - flow horizontal component (along x axis) - //! v - flow vertical component (along y axis) - void operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& stream = Stream::Null()); - - //! flow smoothness - float alpha; - - //! gradient constancy importance - float gamma; - - //! pyramid scale factor - float scale_factor; - - //! number of lagged non-linearity iterations (inner loop) - int inner_iterations; - - //! number of warping iterations (number of pyramid levels) - int outer_iterations; - - //! number of linear system solver iterations - int solver_iterations; - - GpuMat buf; + @param I0 first input image. + @param I1 second input image of the same size and the same type as I0. + @param flow computed flow image that has the same size as I0 and type CV_32FC2. + @param stream Stream for the asynchronous version. + */ + virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream = Stream::Null()) = 0; }; -/** @brief Class used for calculating an optical flow. +/** @brief Base interface for sparse optical flow algorithms. + */ +class CV_EXPORTS SparseOpticalFlow : public Algorithm +{ +public: + /** @brief Calculates a sparse optical flow. -The class can calculate an optical flow for a sparse feature set or dense optical flow using the + @param prevImg First input image. + @param nextImg Second input image of the same size and the same type as prevImg. + @param prevPts Vector of 2D points for which the flow needs to be found. + @param nextPts Output vector of 2D points containing the calculated new positions of input features in the second image. + @param status Output status vector. Each element of the vector is set to 1 if the + flow for the corresponding features has been found. Otherwise, it is set to 0. + @param err Optional output vector that contains error response for each point (inverse confidence). + @param stream Stream for the asynchronous version. + */ + virtual void calc(InputArray prevImg, InputArray nextImg, + InputArray prevPts, InputOutputArray nextPts, + OutputArray status, + OutputArray err = cv::noArray(), + Stream& stream = Stream::Null()) = 0; +}; + +// +// BroxOpticalFlow +// + +/** @brief Class computing the optical flow for two images using Brox et al Optical Flow algorithm (@cite Brox2004). + */ +class CV_EXPORTS BroxOpticalFlow : public DenseOpticalFlow +{ +public: + virtual double getFlowSmoothness() const = 0; + virtual void setFlowSmoothness(double alpha) = 0; + + virtual double getGradientConstancyImportance() const = 0; + virtual void setGradientConstancyImportance(double gamma) = 0; + + virtual double getPyramidScaleFactor() const = 0; + virtual void setPyramidScaleFactor(double scale_factor) = 0; + + //! number of lagged non-linearity iterations (inner loop) + virtual int getInnerIterations() const = 0; + virtual void setInnerIterations(int inner_iterations) = 0; + + //! number of warping iterations (number of pyramid levels) + virtual int getOuterIterations() const = 0; + virtual void setOuterIterations(int outer_iterations) = 0; + + //! number of linear system solver iterations + virtual int getSolverIterations() const = 0; + virtual void setSolverIterations(int solver_iterations) = 0; + + static Ptr create( + double alpha = 0.197, + double gamma = 50.0, + double scale_factor = 0.8, + int inner_iterations = 5, + int outer_iterations = 150, + int solver_iterations = 10); +}; + +// +// PyrLKOpticalFlow +// + +/** @brief Class used for calculating a sparse optical flow. + +The class can calculate an optical flow for a sparse feature set using the iterative Lucas-Kanade method with pyramids. @sa calcOpticalFlowPyrLK @@ -112,158 +157,116 @@ iterative Lucas-Kanade method with pyramids. - An example of the Lucas Kanade optical flow algorithm can be found at opencv_source_code/samples/gpu/pyrlk_optical_flow.cpp */ -class CV_EXPORTS PyrLKOpticalFlow +class CV_EXPORTS SparsePyrLKOpticalFlow : public SparseOpticalFlow { public: - PyrLKOpticalFlow(); + virtual Size getWinSize() const = 0; + virtual void setWinSize(Size winSize) = 0; - /** @brief Calculate an optical flow for a sparse feature set. + virtual int getMaxLevel() const = 0; + virtual void setMaxLevel(int maxLevel) = 0; - @param prevImg First 8-bit input image (supports both grayscale and color images). - @param nextImg Second input image of the same size and the same type as prevImg . - @param prevPts Vector of 2D points for which the flow needs to be found. It must be one row matrix - with CV_32FC2 type. - @param nextPts Output vector of 2D points (with single-precision floating-point coordinates) - containing the calculated new positions of input features in the second image. When useInitialFlow - is true, the vector must have the same size as in the input. - @param status Output status vector (CV_8UC1 type). Each element of the vector is set to 1 if the - flow for the corresponding features has been found. Otherwise, it is set to 0. - @param err Output vector (CV_32FC1 type) that contains the difference between patches around the - original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not - needed. + virtual int getNumIters() const = 0; + virtual void setNumIters(int iters) = 0; - @sa calcOpticalFlowPyrLK - */ - void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, - GpuMat& status, GpuMat* err = 0); + virtual bool getUseInitialFlow() const = 0; + virtual void setUseInitialFlow(bool useInitialFlow) = 0; - /** @brief Calculate dense optical flow. - - @param prevImg First 8-bit grayscale input image. - @param nextImg Second input image of the same size and the same type as prevImg . - @param u Horizontal component of the optical flow of the same size as input images, 32-bit - floating-point, single-channel - @param v Vertical component of the optical flow of the same size as input images, 32-bit - floating-point, single-channel - @param err Output vector (CV_32FC1 type) that contains the difference between patches around the - original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not - needed. - */ - void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0); - - /** @brief Releases inner buffers memory. - */ - void releaseMemory(); - - Size winSize; - int maxLevel; - int iters; - bool useInitialFlow; - -private: - std::vector prevPyr_; - std::vector nextPyr_; - - GpuMat buf_; - - GpuMat uPyr_[2]; - GpuMat vPyr_[2]; + static Ptr create( + Size winSize = Size(21, 21), + int maxLevel = 3, + int iters = 30, + bool useInitialFlow = false); }; -/** @brief Class computing a dense optical flow using the Gunnar Farneback’s algorithm. : +/** @brief Class used for calculating a dense optical flow. + +The class can calculate an optical flow for a dense optical flow using the +iterative Lucas-Kanade method with pyramids. */ -class CV_EXPORTS FarnebackOpticalFlow +class CV_EXPORTS DensePyrLKOpticalFlow : public DenseOpticalFlow { public: - FarnebackOpticalFlow() - { - numLevels = 5; - pyrScale = 0.5; - fastPyramids = false; - winSize = 13; - numIters = 10; - polyN = 5; - polySigma = 1.1; - flags = 0; - } + virtual Size getWinSize() const = 0; + virtual void setWinSize(Size winSize) = 0; - int numLevels; - double pyrScale; - bool fastPyramids; - int winSize; - int numIters; - int polyN; - double polySigma; - int flags; + virtual int getMaxLevel() const = 0; + virtual void setMaxLevel(int maxLevel) = 0; - /** @brief Computes a dense optical flow using the Gunnar Farneback’s algorithm. + virtual int getNumIters() const = 0; + virtual void setNumIters(int iters) = 0; - @param frame0 First 8-bit gray-scale input image - @param frame1 Second 8-bit gray-scale input image - @param flowx Flow horizontal component - @param flowy Flow vertical component - @param s Stream + virtual bool getUseInitialFlow() const = 0; + virtual void setUseInitialFlow(bool useInitialFlow) = 0; - @sa calcOpticalFlowFarneback - */ - void operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null()); - - /** @brief Releases unused auxiliary memory buffers. - */ - void releaseMemory() - { - frames_[0].release(); - frames_[1].release(); - pyrLevel_[0].release(); - pyrLevel_[1].release(); - M_.release(); - bufM_.release(); - R_[0].release(); - R_[1].release(); - blurredFrame_[0].release(); - blurredFrame_[1].release(); - pyramid0_.clear(); - pyramid1_.clear(); - } - -private: - void prepareGaussian( - int n, double sigma, float *g, float *xg, float *xxg, - double &ig11, double &ig03, double &ig33, double &ig55); - - void setPolynomialExpansionConsts(int n, double sigma); - - void updateFlow_boxFilter( - const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy, - GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); - - void updateFlow_gaussianBlur( - const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy, - GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); - - GpuMat frames_[2]; - GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2]; - std::vector pyramid0_, pyramid1_; + static Ptr create( + Size winSize = Size(13, 13), + int maxLevel = 3, + int iters = 30, + bool useInitialFlow = false); }; -// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method // -// see reference: -// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow". -// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". -class CV_EXPORTS OpticalFlowDual_TVL1_CUDA +// FarnebackOpticalFlow +// + +/** @brief Class computing a dense optical flow using the Gunnar Farneback’s algorithm. + */ +class CV_EXPORTS FarnebackOpticalFlow : public DenseOpticalFlow { public: - OpticalFlowDual_TVL1_CUDA(); + virtual int getNumLevels() const = 0; + virtual void setNumLevels(int numLevels) = 0; - void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy); + virtual double getPyrScale() const = 0; + virtual void setPyrScale(double pyrScale) = 0; - void collectGarbage(); + virtual bool getFastPyramids() const = 0; + virtual void setFastPyramids(bool fastPyramids) = 0; + virtual int getWinSize() const = 0; + virtual void setWinSize(int winSize) = 0; + + virtual int getNumIters() const = 0; + virtual void setNumIters(int numIters) = 0; + + virtual int getPolyN() const = 0; + virtual void setPolyN(int polyN) = 0; + + virtual double getPolySigma() const = 0; + virtual void setPolySigma(double polySigma) = 0; + + virtual int getFlags() const = 0; + virtual void setFlags(int flags) = 0; + + static Ptr create( + int numLevels = 5, + double pyrScale = 0.5, + bool fastPyramids = false, + int winSize = 13, + int numIters = 10, + int polyN = 5, + double polySigma = 1.1, + int flags = 0); +}; + +// +// OpticalFlowDual_TVL1 +// + +/** @brief Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method. + * + * @sa C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow". + * @sa Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". + */ +class CV_EXPORTS OpticalFlowDual_TVL1 : public DenseOpticalFlow +{ +public: /** * Time step of the numerical scheme. */ - double tau; + virtual double getTau() const = 0; + virtual void setTau(double tau) = 0; /** * Weight parameter for the data term, attachment parameter. @@ -271,7 +274,8 @@ public: * The smaller this parameter is, the smoother the solutions we obtain. * It depends on the range of motions of the images, so its value should be adapted to each image sequence. */ - double lambda; + virtual double getLambda() const = 0; + virtual void setLambda(double lambda) = 0; /** * Weight parameter for (u - v)^2, tightness parameter. @@ -279,20 +283,23 @@ public: * In theory, it should have a small value in order to maintain both parts in correspondence. * The method is stable for a large range of values of this parameter. */ + virtual double getGamma() const = 0; + virtual void setGamma(double gamma) = 0; - double gamma; /** - * parameter used for motion estimation. It adds a variable allowing for illumination variations - * Set this parameter to 1. if you have varying illumination. - * See: Chambolle et al, A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging - * Journal of Mathematical imaging and vision, may 2011 Vol 40 issue 1, pp 120-145 - */ - double theta; + * parameter used for motion estimation. It adds a variable allowing for illumination variations + * Set this parameter to 1. if you have varying illumination. + * See: Chambolle et al, A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging + * Journal of Mathematical imaging and vision, may 2011 Vol 40 issue 1, pp 120-145 + */ + virtual double getTheta() const = 0; + virtual void setTheta(double theta) = 0; /** * Number of scales used to create the pyramid of images. */ - int nscales; + virtual int getNumScales() const = 0; + virtual void setNumScales(int nscales) = 0; /** * Number of warpings per scale. @@ -300,51 +307,39 @@ public: * This is a parameter that assures the stability of the method. * It also affects the running time, so it is a compromise between speed and accuracy. */ - int warps; + virtual int getNumWarps() const = 0; + virtual void setNumWarps(int warps) = 0; /** * Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time. * A small value will yield more accurate solutions at the expense of a slower convergence. */ - double epsilon; + virtual double getEpsilon() const = 0; + virtual void setEpsilon(double epsilon) = 0; /** * Stopping criterion iterations number used in the numerical scheme. */ - int iterations; + virtual int getNumIterations() const = 0; + virtual void setNumIterations(int iterations) = 0; - double scaleStep; + virtual double getScaleStep() const = 0; + virtual void setScaleStep(double scaleStep) = 0; - bool useInitialFlow; + virtual bool getUseInitialFlow() const = 0; + virtual void setUseInitialFlow(bool useInitialFlow) = 0; -private: - void procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3); - - std::vector I0s; - std::vector I1s; - std::vector u1s; - std::vector u2s; - std::vector u3s; - - GpuMat I1x_buf; - GpuMat I1y_buf; - - GpuMat I1w_buf; - GpuMat I1wx_buf; - GpuMat I1wy_buf; - - GpuMat grad_buf; - GpuMat rho_c_buf; - - GpuMat p11_buf; - GpuMat p12_buf; - GpuMat p21_buf; - GpuMat p22_buf; - GpuMat p31_buf; - GpuMat p32_buf; - - GpuMat diff_buf; - GpuMat norm_buf; + static Ptr create( + double tau = 0.25, + double lambda = 0.15, + double theta = 0.3, + int nscales = 5, + int warps = 5, + double epsilon = 0.01, + int iterations = 300, + double scaleStep = 0.8, + double gamma = 0.0, + bool useInitialFlow = false); }; //! @} diff --git a/modules/cudaoptflow/src/brox.cpp b/modules/cudaoptflow/src/brox.cpp index 39eae9a8a..11c541906 100644 --- a/modules/cudaoptflow/src/brox.cpp +++ b/modules/cudaoptflow/src/brox.cpp @@ -47,84 +47,148 @@ using namespace cv::cuda; #if !defined (HAVE_CUDA) || !defined (HAVE_OPENCV_CUDALEGACY) || defined (CUDA_DISABLER) -void cv::cuda::BroxOpticalFlow::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); } +Ptr cv::cuda::BroxOpticalFlow::create(double, double, double, int, int, int) { throw_no_cuda(); return Ptr(); } #else -namespace -{ - size_t getBufSize(const NCVBroxOpticalFlowDescriptor& desc, const NCVMatrix& frame0, const NCVMatrix& frame1, - NCVMatrix& u, NCVMatrix& v, const cudaDeviceProp& devProp) +namespace { + + class BroxOpticalFlowImpl : public BroxOpticalFlow { - NCVMemStackAllocator gpuCounter(static_cast(devProp.textureAlignment)); + public: + BroxOpticalFlowImpl(double alpha, double gamma, double scale_factor, + int inner_iterations, int outer_iterations, int solver_iterations) : + alpha_(alpha), gamma_(gamma), scale_factor_(scale_factor), + inner_iterations_(inner_iterations), outer_iterations_(outer_iterations), + solver_iterations_(solver_iterations) + { + } + + virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream); + + virtual double getFlowSmoothness() const { return alpha_; } + virtual void setFlowSmoothness(double alpha) { alpha_ = static_cast(alpha); } + + virtual double getGradientConstancyImportance() const { return gamma_; } + virtual void setGradientConstancyImportance(double gamma) { gamma_ = static_cast(gamma); } + + virtual double getPyramidScaleFactor() const { return scale_factor_; } + virtual void setPyramidScaleFactor(double scale_factor) { scale_factor_ = static_cast(scale_factor); } + + //! number of lagged non-linearity iterations (inner loop) + virtual int getInnerIterations() const { return inner_iterations_; } + virtual void setInnerIterations(int inner_iterations) { inner_iterations_ = inner_iterations; } + + //! number of warping iterations (number of pyramid levels) + virtual int getOuterIterations() const { return outer_iterations_; } + virtual void setOuterIterations(int outer_iterations) { outer_iterations_ = outer_iterations; } + + //! number of linear system solver iterations + virtual int getSolverIterations() const { return solver_iterations_; } + virtual void setSolverIterations(int solver_iterations) { solver_iterations_ = solver_iterations; } + + private: + //! flow smoothness + float alpha_; + + //! gradient constancy importance + float gamma_; + + //! pyramid scale factor + float scale_factor_; + + //! number of lagged non-linearity iterations (inner loop) + int inner_iterations_; + + //! number of warping iterations (number of pyramid levels) + int outer_iterations_; + + //! number of linear system solver iterations + int solver_iterations_; + }; + + static size_t getBufSize(const NCVBroxOpticalFlowDescriptor& desc, + const NCVMatrix& frame0, const NCVMatrix& frame1, + NCVMatrix& u, NCVMatrix& v, + size_t textureAlignment) + { + NCVMemStackAllocator gpuCounter(static_cast(textureAlignment)); ncvSafeCall( NCVBroxOpticalFlow(desc, gpuCounter, frame0, frame1, u, v, 0) ); return gpuCounter.maxSize(); } + + static void outputHandler(const String &msg) + { + CV_Error(cv::Error::GpuApiCallError, msg.c_str()); + } + + void BroxOpticalFlowImpl::calc(InputArray _I0, InputArray _I1, InputOutputArray _flow, Stream& stream) + { + const GpuMat frame0 = _I0.getGpuMat(); + const GpuMat frame1 = _I1.getGpuMat(); + + CV_Assert( frame0.type() == CV_32FC1 ); + CV_Assert( frame1.size() == frame0.size() && frame1.type() == frame0.type() ); + + ncvSetDebugOutputHandler(outputHandler); + + BufferPool pool(stream); + GpuMat u = pool.getBuffer(frame0.size(), CV_32FC1); + GpuMat v = pool.getBuffer(frame0.size(), CV_32FC1); + + NCVBroxOpticalFlowDescriptor desc; + desc.alpha = alpha_; + desc.gamma = gamma_; + desc.scale_factor = scale_factor_; + desc.number_of_inner_iterations = inner_iterations_; + desc.number_of_outer_iterations = outer_iterations_; + desc.number_of_solver_iterations = solver_iterations_; + + NCVMemSegment frame0MemSeg; + frame0MemSeg.begin.memtype = NCVMemoryTypeDevice; + frame0MemSeg.begin.ptr = const_cast(frame0.data); + frame0MemSeg.size = frame0.step * frame0.rows; + + NCVMemSegment frame1MemSeg; + frame1MemSeg.begin.memtype = NCVMemoryTypeDevice; + frame1MemSeg.begin.ptr = const_cast(frame1.data); + frame1MemSeg.size = frame1.step * frame1.rows; + + NCVMemSegment uMemSeg; + uMemSeg.begin.memtype = NCVMemoryTypeDevice; + uMemSeg.begin.ptr = u.ptr(); + uMemSeg.size = u.step * u.rows; + + NCVMemSegment vMemSeg; + vMemSeg.begin.memtype = NCVMemoryTypeDevice; + vMemSeg.begin.ptr = v.ptr(); + vMemSeg.size = v.step * v.rows; + + DeviceInfo devInfo; + size_t textureAlignment = devInfo.textureAlignment(); + + NCVMatrixReuse frame0Mat(frame0MemSeg, static_cast(textureAlignment), frame0.cols, frame0.rows, static_cast(frame0.step)); + NCVMatrixReuse frame1Mat(frame1MemSeg, static_cast(textureAlignment), frame1.cols, frame1.rows, static_cast(frame1.step)); + NCVMatrixReuse uMat(uMemSeg, static_cast(textureAlignment), u.cols, u.rows, static_cast(u.step)); + NCVMatrixReuse vMat(vMemSeg, static_cast(textureAlignment), v.cols, v.rows, static_cast(v.step)); + + size_t bufSize = getBufSize(desc, frame0Mat, frame1Mat, uMat, vMat, textureAlignment); + GpuMat buf = pool.getBuffer(1, static_cast(bufSize), CV_8UC1); + + NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast(textureAlignment), buf.ptr()); + + ncvSafeCall( NCVBroxOpticalFlow(desc, gpuAllocator, frame0Mat, frame1Mat, uMat, vMat, StreamAccessor::getStream(stream)) ); + + GpuMat flows[] = {u, v}; + cuda::merge(flows, 2, _flow, stream); + } } -namespace +Ptr cv::cuda::BroxOpticalFlow::create(double alpha, double gamma, double scale_factor, int inner_iterations, int outer_iterations, int solver_iterations) { - static void outputHandler(const String &msg) { CV_Error(cv::Error::GpuApiCallError, msg.c_str()); } -} - -void cv::cuda::BroxOpticalFlow::operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& s) -{ - ncvSetDebugOutputHandler(outputHandler); - - CV_Assert(frame0.type() == CV_32FC1); - CV_Assert(frame1.size() == frame0.size() && frame1.type() == frame0.type()); - - u.create(frame0.size(), CV_32FC1); - v.create(frame0.size(), CV_32FC1); - - cudaDeviceProp devProp; - cudaSafeCall( cudaGetDeviceProperties(&devProp, getDevice()) ); - - NCVBroxOpticalFlowDescriptor desc; - - desc.alpha = alpha; - desc.gamma = gamma; - desc.scale_factor = scale_factor; - desc.number_of_inner_iterations = inner_iterations; - desc.number_of_outer_iterations = outer_iterations; - desc.number_of_solver_iterations = solver_iterations; - - NCVMemSegment frame0MemSeg; - frame0MemSeg.begin.memtype = NCVMemoryTypeDevice; - frame0MemSeg.begin.ptr = const_cast(frame0.data); - frame0MemSeg.size = frame0.step * frame0.rows; - - NCVMemSegment frame1MemSeg; - frame1MemSeg.begin.memtype = NCVMemoryTypeDevice; - frame1MemSeg.begin.ptr = const_cast(frame1.data); - frame1MemSeg.size = frame1.step * frame1.rows; - - NCVMemSegment uMemSeg; - uMemSeg.begin.memtype = NCVMemoryTypeDevice; - uMemSeg.begin.ptr = u.ptr(); - uMemSeg.size = u.step * u.rows; - - NCVMemSegment vMemSeg; - vMemSeg.begin.memtype = NCVMemoryTypeDevice; - vMemSeg.begin.ptr = v.ptr(); - vMemSeg.size = v.step * v.rows; - - NCVMatrixReuse frame0Mat(frame0MemSeg, static_cast(devProp.textureAlignment), frame0.cols, frame0.rows, static_cast(frame0.step)); - NCVMatrixReuse frame1Mat(frame1MemSeg, static_cast(devProp.textureAlignment), frame1.cols, frame1.rows, static_cast(frame1.step)); - NCVMatrixReuse uMat(uMemSeg, static_cast(devProp.textureAlignment), u.cols, u.rows, static_cast(u.step)); - NCVMatrixReuse vMat(vMemSeg, static_cast(devProp.textureAlignment), v.cols, v.rows, static_cast(v.step)); - - cudaStream_t stream = StreamAccessor::getStream(s); - - size_t bufSize = getBufSize(desc, frame0Mat, frame1Mat, uMat, vMat, devProp); - - ensureSizeIsEnough(1, static_cast(bufSize), CV_8UC1, buf); - - NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast(devProp.textureAlignment), buf.ptr()); - - ncvSafeCall( NCVBroxOpticalFlow(desc, gpuAllocator, frame0Mat, frame1Mat, uMat, vMat, stream) ); + return makePtr(alpha, gamma, scale_factor, inner_iterations, outer_iterations, solver_iterations); } #endif /* HAVE_CUDA */ diff --git a/modules/cudaoptflow/src/cuda/pyrlk.cu b/modules/cudaoptflow/src/cuda/pyrlk.cu index d4606f228..7693551fc 100644 --- a/modules/cudaoptflow/src/cuda/pyrlk.cu +++ b/modules/cudaoptflow/src/cuda/pyrlk.cu @@ -472,16 +472,16 @@ namespace pyrlk } } - void loadConstants(int2 winSize, int iters) + void loadConstants(int2 winSize, int iters, cudaStream_t stream) { - cudaSafeCall( cudaMemcpyToSymbol(c_winSize_x, &winSize.x, sizeof(int)) ); - cudaSafeCall( cudaMemcpyToSymbol(c_winSize_y, &winSize.y, sizeof(int)) ); + cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_x, &winSize.x, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); + cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_y, &winSize.y, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2); - cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_x, &halfWin.x, sizeof(int)) ); - cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_y, &halfWin.y, sizeof(int)) ); + cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_x, &halfWin.x, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); + cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_y, &halfWin.y, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); - cudaSafeCall( cudaMemcpyToSymbol(c_iters, &iters, sizeof(int)) ); + cudaSafeCall( cudaMemcpyToSymbolAsync(c_iters, &iters, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); } void sparse1(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, diff --git a/modules/cudaoptflow/src/cuda/tvl1flow.cu b/modules/cudaoptflow/src/cuda/tvl1flow.cu index 2b66c972b..66f0d664a 100644 --- a/modules/cudaoptflow/src/cuda/tvl1flow.cu +++ b/modules/cudaoptflow/src/cuda/tvl1flow.cu @@ -66,15 +66,16 @@ namespace tvl1flow dy(y, x) = 0.5f * (src(::min(y + 1, src.rows - 1), x) - src(::max(y - 1, 0), x)); } - void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy) + void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); - centeredGradientKernel<<>>(src, dx, dy); + centeredGradientKernel<<>>(src, dx, dy); cudaSafeCall( cudaGetLastError() ); - cudaSafeCall( cudaDeviceSynchronize() ); + if (!stream) + cudaSafeCall( cudaDeviceSynchronize() ); } } @@ -164,7 +165,10 @@ namespace tvl1flow rho(y, x) = I1wVal - I1wxVal * u1Val - I1wyVal * u2Val - I0Val; } - void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho) + void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, + PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, + PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho, + cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(I0.cols, block.x), divUp(I0.rows, block.y)); @@ -173,10 +177,11 @@ namespace tvl1flow bindTexture(&tex_I1x, I1x); bindTexture(&tex_I1y, I1y); - warpBackwardKernel<<>>(I0, u1, u2, I1w, I1wx, I1wy, grad, rho); + warpBackwardKernel<<>>(I0, u1, u2, I1w, I1wx, I1wy, grad, rho); cudaSafeCall( cudaGetLastError() ); - cudaSafeCall( cudaDeviceSynchronize() ); + if (!stream) + cudaSafeCall( cudaDeviceSynchronize() ); } } @@ -292,15 +297,17 @@ namespace tvl1flow PtrStepSzf grad, PtrStepSzf rho_c, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf error, - float l_t, float theta, float gamma, bool calcError) + float l_t, float theta, float gamma, bool calcError, + cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(I1wx.cols, block.x), divUp(I1wx.rows, block.y)); - estimateUKernel<<>>(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, error, l_t, theta, gamma, calcError); + estimateUKernel<<>>(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, error, l_t, theta, gamma, calcError); cudaSafeCall( cudaGetLastError() ); - cudaSafeCall( cudaDeviceSynchronize() ); + if (!stream) + cudaSafeCall( cudaDeviceSynchronize() ); } } @@ -346,15 +353,19 @@ namespace tvl1flow } } - void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, float taut, float gamma) + void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, + PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, + float taut, float gamma, + cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(u1.cols, block.x), divUp(u1.rows, block.y)); - estimateDualVariablesKernel<<>>(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma); + estimateDualVariablesKernel<<>>(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma); cudaSafeCall( cudaGetLastError() ); - cudaSafeCall( cudaDeviceSynchronize() ); + if (!stream) + cudaSafeCall( cudaDeviceSynchronize() ); } } diff --git a/modules/cudaoptflow/src/farneback.cpp b/modules/cudaoptflow/src/farneback.cpp index 6b7443263..b7fefeb19 100644 --- a/modules/cudaoptflow/src/farneback.cpp +++ b/modules/cudaoptflow/src/farneback.cpp @@ -42,23 +42,21 @@ #include "precomp.hpp" -#define MIN_SIZE 32 - -#define S(x) StreamAccessor::getStream(x) - -// CUDA resize() is fast, but it differs from the CPU analog. Disabling this flag -// leads to an inefficient code. It's for debug purposes only. -#define ENABLE_CUDA_RESIZE 1 - using namespace cv; using namespace cv::cuda; #if !defined HAVE_CUDA || defined(CUDA_DISABLER) -void cv::cuda::FarnebackOpticalFlow::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); } +Ptr cv::cuda::FarnebackOpticalFlow::create(int, double, bool, int, int, int, double, int) { throw_no_cuda(); return Ptr(); } #else +#define MIN_SIZE 32 + +// CUDA resize() is fast, but it differs from the CPU analog. Disabling this flag +// leads to an inefficient code. It's for debug purposes only. +#define ENABLE_CUDA_RESIZE 1 + namespace cv { namespace cuda { namespace device { namespace optflow_farneback { void setPolynomialExpansionConsts( @@ -76,8 +74,6 @@ namespace cv { namespace cuda { namespace device { namespace optflow_farneback void updateFlowGpu( const PtrStepSzf M, PtrStepSzf flowx, PtrStepSzf flowy, cudaStream_t stream); - /*void boxFilterGpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream);*/ - void boxFilter5Gpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream); void boxFilter5Gpu_CC11(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream); @@ -93,10 +89,93 @@ namespace cv { namespace cuda { namespace device { namespace optflow_farneback void gaussianBlur5Gpu_CC11( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderType, cudaStream_t stream); -}}}} // namespace cv { namespace cuda { namespace cudev { namespace optflow_farneback +}}}} namespace { + class FarnebackOpticalFlowImpl : public FarnebackOpticalFlow + { + public: + FarnebackOpticalFlowImpl(int numLevels, double pyrScale, bool fastPyramids, int winSize, + int numIters, int polyN, double polySigma, int flags) : + numLevels_(numLevels), pyrScale_(pyrScale), fastPyramids_(fastPyramids), winSize_(winSize), + numIters_(numIters), polyN_(polyN), polySigma_(polySigma), flags_(flags) + { + } + + virtual int getNumLevels() const { return numLevels_; } + virtual void setNumLevels(int numLevels) { numLevels_ = numLevels; } + + virtual double getPyrScale() const { return pyrScale_; } + virtual void setPyrScale(double pyrScale) { pyrScale_ = pyrScale; } + + virtual bool getFastPyramids() const { return fastPyramids_; } + virtual void setFastPyramids(bool fastPyramids) { fastPyramids_ = fastPyramids; } + + virtual int getWinSize() const { return winSize_; } + virtual void setWinSize(int winSize) { winSize_ = winSize; } + + virtual int getNumIters() const { return numIters_; } + virtual void setNumIters(int numIters) { numIters_ = numIters; } + + virtual int getPolyN() const { return polyN_; } + virtual void setPolyN(int polyN) { polyN_ = polyN; } + + virtual double getPolySigma() const { return polySigma_; } + virtual void setPolySigma(double polySigma) { polySigma_ = polySigma; } + + virtual int getFlags() const { return flags_; } + virtual void setFlags(int flags) { flags_ = flags; } + + virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream); + + private: + int numLevels_; + double pyrScale_; + bool fastPyramids_; + int winSize_; + int numIters_; + int polyN_; + double polySigma_; + int flags_; + + private: + void prepareGaussian( + int n, double sigma, float *g, float *xg, float *xxg, + double &ig11, double &ig03, double &ig33, double &ig55); + + void setPolynomialExpansionConsts(int n, double sigma); + + void updateFlow_boxFilter( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); + + void updateFlow_gaussianBlur( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); + + void calcImpl(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &stream); + + GpuMat frames_[2]; + GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2]; + std::vector pyramid0_, pyramid1_; + }; + + void FarnebackOpticalFlowImpl::calc(InputArray _frame0, InputArray _frame1, InputOutputArray _flow, Stream& stream) + { + const GpuMat frame0 = _frame0.getGpuMat(); + const GpuMat frame1 = _frame1.getGpuMat(); + + BufferPool pool(stream); + GpuMat flowx = pool.getBuffer(frame0.size(), CV_32FC1); + GpuMat flowy = pool.getBuffer(frame0.size(), CV_32FC1); + + calcImpl(frame0, frame1, flowx, flowy, stream); + + GpuMat flows[] = {flowx, flowy}; + cuda::merge(flows, 2, _flow, stream); + } + GpuMat allocMatFromBuf(int rows, int cols, int type, GpuMat& mat) { if (!mat.empty() && mat.type() == type && mat.rows >= rows && mat.cols >= cols) @@ -104,285 +183,287 @@ namespace return mat = GpuMat(rows, cols, type); } -} -void cv::cuda::FarnebackOpticalFlow::prepareGaussian( - int n, double sigma, float *g, float *xg, float *xxg, - double &ig11, double &ig03, double &ig33, double &ig55) -{ - double s = 0.; - for (int x = -n; x <= n; x++) - { - g[x] = (float)std::exp(-x*x/(2*sigma*sigma)); - s += g[x]; - } - - s = 1./s; - for (int x = -n; x <= n; x++) - { - g[x] = (float)(g[x]*s); - xg[x] = (float)(x*g[x]); - xxg[x] = (float)(x*x*g[x]); - } - - Mat_ G(6, 6); - G.setTo(0); - - for (int y = -n; y <= n; y++) + void FarnebackOpticalFlowImpl::prepareGaussian( + int n, double sigma, float *g, float *xg, float *xxg, + double &ig11, double &ig03, double &ig33, double &ig55) { + double s = 0.; for (int x = -n; x <= n; x++) { - G(0,0) += g[y]*g[x]; - G(1,1) += g[y]*g[x]*x*x; - G(3,3) += g[y]*g[x]*x*x*x*x; - G(5,5) += g[y]*g[x]*x*x*y*y; - } - } - - //G[0][0] = 1.; - G(2,2) = G(0,3) = G(0,4) = G(3,0) = G(4,0) = G(1,1); - G(4,4) = G(3,3); - G(3,4) = G(4,3) = G(5,5); - - // invG: - // [ x e e ] - // [ y ] - // [ y ] - // [ e z ] - // [ e z ] - // [ u ] - Mat_ invG = G.inv(DECOMP_CHOLESKY); - - ig11 = invG(1,1); - ig03 = invG(0,3); - ig33 = invG(3,3); - ig55 = invG(5,5); -} - - -void cv::cuda::FarnebackOpticalFlow::setPolynomialExpansionConsts(int n, double sigma) -{ - std::vector buf(n*6 + 3); - float* g = &buf[0] + n; - float* xg = g + n*2 + 1; - float* xxg = xg + n*2 + 1; - - if (sigma < FLT_EPSILON) - sigma = n*0.3; - - double ig11, ig03, ig33, ig55; - prepareGaussian(n, sigma, g, xg, xxg, ig11, ig03, ig33, ig55); - - device::optflow_farneback::setPolynomialExpansionConsts(n, g, xg, xxg, static_cast(ig11), static_cast(ig03), static_cast(ig33), static_cast(ig55)); -} - - -void cv::cuda::FarnebackOpticalFlow::updateFlow_boxFilter( - const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy, - GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]) -{ - if (deviceSupports(FEATURE_SET_COMPUTE_12)) - device::optflow_farneback::boxFilter5Gpu(M, blockSize/2, bufM, S(streams[0])); - else - device::optflow_farneback::boxFilter5Gpu_CC11(M, blockSize/2, bufM, S(streams[0])); - swap(M, bufM); - - for (int i = 1; i < 5; ++i) - streams[i].waitForCompletion(); - device::optflow_farneback::updateFlowGpu(M, flowx, flowy, S(streams[0])); - - if (updateMatrices) - device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, S(streams[0])); -} - - -void cv::cuda::FarnebackOpticalFlow::updateFlow_gaussianBlur( - const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy, - GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]) -{ - if (deviceSupports(FEATURE_SET_COMPUTE_12)) - device::optflow_farneback::gaussianBlur5Gpu( - M, blockSize/2, bufM, BORDER_REPLICATE, S(streams[0])); - else - device::optflow_farneback::gaussianBlur5Gpu_CC11( - M, blockSize/2, bufM, BORDER_REPLICATE, S(streams[0])); - swap(M, bufM); - - device::optflow_farneback::updateFlowGpu(M, flowx, flowy, S(streams[0])); - - if (updateMatrices) - device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, S(streams[0])); -} - - -void cv::cuda::FarnebackOpticalFlow::operator ()( - const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s) -{ - CV_Assert(frame0.channels() == 1 && frame1.channels() == 1); - CV_Assert(frame0.size() == frame1.size()); - CV_Assert(polyN == 5 || polyN == 7); - CV_Assert(!fastPyramids || std::abs(pyrScale - 0.5) < 1e-6); - - Stream streams[5]; - if (S(s)) - streams[0] = s; - - Size size = frame0.size(); - GpuMat prevFlowX, prevFlowY, curFlowX, curFlowY; - - flowx.create(size, CV_32F); - flowy.create(size, CV_32F); - GpuMat flowx0 = flowx; - GpuMat flowy0 = flowy; - - // Crop unnecessary levels - double scale = 1; - int numLevelsCropped = 0; - for (; numLevelsCropped < numLevels; numLevelsCropped++) - { - scale *= pyrScale; - if (size.width*scale < MIN_SIZE || size.height*scale < MIN_SIZE) - break; - } - - frame0.convertTo(frames_[0], CV_32F, streams[0]); - frame1.convertTo(frames_[1], CV_32F, streams[1]); - - if (fastPyramids) - { - // Build Gaussian pyramids using pyrDown() - pyramid0_.resize(numLevelsCropped + 1); - pyramid1_.resize(numLevelsCropped + 1); - pyramid0_[0] = frames_[0]; - pyramid1_[0] = frames_[1]; - for (int i = 1; i <= numLevelsCropped; ++i) - { - cuda::pyrDown(pyramid0_[i - 1], pyramid0_[i], streams[0]); - cuda::pyrDown(pyramid1_[i - 1], pyramid1_[i], streams[1]); - } - } - - setPolynomialExpansionConsts(polyN, polySigma); - device::optflow_farneback::setUpdateMatricesConsts(); - - for (int k = numLevelsCropped; k >= 0; k--) - { - streams[0].waitForCompletion(); - - scale = 1; - for (int i = 0; i < k; i++) - scale *= pyrScale; - - double sigma = (1./scale - 1) * 0.5; - int smoothSize = cvRound(sigma*5) | 1; - smoothSize = std::max(smoothSize, 3); - - int width = cvRound(size.width*scale); - int height = cvRound(size.height*scale); - - if (fastPyramids) - { - width = pyramid0_[k].cols; - height = pyramid0_[k].rows; + g[x] = (float)std::exp(-x*x/(2*sigma*sigma)); + s += g[x]; } - if (k > 0) + s = 1./s; + for (int x = -n; x <= n; x++) { - curFlowX.create(height, width, CV_32F); - curFlowY.create(height, width, CV_32F); - } - else - { - curFlowX = flowx0; - curFlowY = flowy0; + g[x] = (float)(g[x]*s); + xg[x] = (float)(x*g[x]); + xxg[x] = (float)(x*x*g[x]); } - if (!prevFlowX.data) + Mat_ G(6, 6); + G.setTo(0); + + for (int y = -n; y <= n; y++) { - if (flags & OPTFLOW_USE_INITIAL_FLOW) + for (int x = -n; x <= n; x++) { - cuda::resize(flowx0, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]); - cuda::resize(flowy0, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]); - curFlowX.convertTo(curFlowX, curFlowX.depth(), scale, streams[0]); - curFlowY.convertTo(curFlowY, curFlowY.depth(), scale, streams[1]); + G(0,0) += g[y]*g[x]; + G(1,1) += g[y]*g[x]*x*x; + G(3,3) += g[y]*g[x]*x*x*x*x; + G(5,5) += g[y]*g[x]*x*x*y*y; + } + } + + //G[0][0] = 1.; + G(2,2) = G(0,3) = G(0,4) = G(3,0) = G(4,0) = G(1,1); + G(4,4) = G(3,3); + G(3,4) = G(4,3) = G(5,5); + + // invG: + // [ x e e ] + // [ y ] + // [ y ] + // [ e z ] + // [ e z ] + // [ u ] + Mat_ invG = G.inv(DECOMP_CHOLESKY); + + ig11 = invG(1,1); + ig03 = invG(0,3); + ig33 = invG(3,3); + ig55 = invG(5,5); + } + + void FarnebackOpticalFlowImpl::setPolynomialExpansionConsts(int n, double sigma) + { + std::vector buf(n*6 + 3); + float* g = &buf[0] + n; + float* xg = g + n*2 + 1; + float* xxg = xg + n*2 + 1; + + if (sigma < FLT_EPSILON) + sigma = n*0.3; + + double ig11, ig03, ig33, ig55; + prepareGaussian(n, sigma, g, xg, xxg, ig11, ig03, ig33, ig55); + + device::optflow_farneback::setPolynomialExpansionConsts(n, g, xg, xxg, static_cast(ig11), static_cast(ig03), static_cast(ig33), static_cast(ig55)); + } + + void FarnebackOpticalFlowImpl::updateFlow_boxFilter( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]) + { + if (deviceSupports(FEATURE_SET_COMPUTE_12)) + device::optflow_farneback::boxFilter5Gpu(M, blockSize/2, bufM, StreamAccessor::getStream(streams[0])); + else + device::optflow_farneback::boxFilter5Gpu_CC11(M, blockSize/2, bufM, StreamAccessor::getStream(streams[0])); + swap(M, bufM); + + for (int i = 1; i < 5; ++i) + streams[i].waitForCompletion(); + device::optflow_farneback::updateFlowGpu(M, flowx, flowy, StreamAccessor::getStream(streams[0])); + + if (updateMatrices) + device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, StreamAccessor::getStream(streams[0])); + } + + void FarnebackOpticalFlowImpl::updateFlow_gaussianBlur( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]) + { + if (deviceSupports(FEATURE_SET_COMPUTE_12)) + device::optflow_farneback::gaussianBlur5Gpu( + M, blockSize/2, bufM, BORDER_REPLICATE, StreamAccessor::getStream(streams[0])); + else + device::optflow_farneback::gaussianBlur5Gpu_CC11( + M, blockSize/2, bufM, BORDER_REPLICATE, StreamAccessor::getStream(streams[0])); + swap(M, bufM); + + device::optflow_farneback::updateFlowGpu(M, flowx, flowy, StreamAccessor::getStream(streams[0])); + + if (updateMatrices) + device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, StreamAccessor::getStream(streams[0])); + } + + void FarnebackOpticalFlowImpl::calcImpl(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &stream) + { + CV_Assert(frame0.channels() == 1 && frame1.channels() == 1); + CV_Assert(frame0.size() == frame1.size()); + CV_Assert(polyN_ == 5 || polyN_ == 7); + CV_Assert(!fastPyramids_ || std::abs(pyrScale_ - 0.5) < 1e-6); + + Stream streams[5]; + if (stream) + streams[0] = stream; + + Size size = frame0.size(); + GpuMat prevFlowX, prevFlowY, curFlowX, curFlowY; + + flowx.create(size, CV_32F); + flowy.create(size, CV_32F); + GpuMat flowx0 = flowx; + GpuMat flowy0 = flowy; + + // Crop unnecessary levels + double scale = 1; + int numLevelsCropped = 0; + for (; numLevelsCropped < numLevels_; numLevelsCropped++) + { + scale *= pyrScale_; + if (size.width*scale < MIN_SIZE || size.height*scale < MIN_SIZE) + break; + } + + frame0.convertTo(frames_[0], CV_32F, streams[0]); + frame1.convertTo(frames_[1], CV_32F, streams[1]); + + if (fastPyramids_) + { + // Build Gaussian pyramids using pyrDown() + pyramid0_.resize(numLevelsCropped + 1); + pyramid1_.resize(numLevelsCropped + 1); + pyramid0_[0] = frames_[0]; + pyramid1_[0] = frames_[1]; + for (int i = 1; i <= numLevelsCropped; ++i) + { + cuda::pyrDown(pyramid0_[i - 1], pyramid0_[i], streams[0]); + cuda::pyrDown(pyramid1_[i - 1], pyramid1_[i], streams[1]); + } + } + + setPolynomialExpansionConsts(polyN_, polySigma_); + device::optflow_farneback::setUpdateMatricesConsts(); + + for (int k = numLevelsCropped; k >= 0; k--) + { + streams[0].waitForCompletion(); + + scale = 1; + for (int i = 0; i < k; i++) + scale *= pyrScale_; + + double sigma = (1./scale - 1) * 0.5; + int smoothSize = cvRound(sigma*5) | 1; + smoothSize = std::max(smoothSize, 3); + + int width = cvRound(size.width*scale); + int height = cvRound(size.height*scale); + + if (fastPyramids_) + { + width = pyramid0_[k].cols; + height = pyramid0_[k].rows; + } + + if (k > 0) + { + curFlowX.create(height, width, CV_32F); + curFlowY.create(height, width, CV_32F); } else { - curFlowX.setTo(0, streams[0]); - curFlowY.setTo(0, streams[1]); + curFlowX = flowx0; + curFlowY = flowy0; } - } - else - { - cuda::resize(prevFlowX, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]); - cuda::resize(prevFlowY, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]); - curFlowX.convertTo(curFlowX, curFlowX.depth(), 1./pyrScale, streams[0]); - curFlowY.convertTo(curFlowY, curFlowY.depth(), 1./pyrScale, streams[1]); - } - GpuMat M = allocMatFromBuf(5*height, width, CV_32F, M_); - GpuMat bufM = allocMatFromBuf(5*height, width, CV_32F, bufM_); - GpuMat R[2] = - { - allocMatFromBuf(5*height, width, CV_32F, R_[0]), - allocMatFromBuf(5*height, width, CV_32F, R_[1]) - }; - - if (fastPyramids) - { - device::optflow_farneback::polynomialExpansionGpu(pyramid0_[k], polyN, R[0], S(streams[0])); - device::optflow_farneback::polynomialExpansionGpu(pyramid1_[k], polyN, R[1], S(streams[1])); - } - else - { - GpuMat blurredFrame[2] = + if (!prevFlowX.data) { - allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[0]), - allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[1]) - }; - GpuMat pyrLevel[2] = - { - allocMatFromBuf(height, width, CV_32F, pyrLevel_[0]), - allocMatFromBuf(height, width, CV_32F, pyrLevel_[1]) - }; - - Mat g = getGaussianKernel(smoothSize, sigma, CV_32F); - device::optflow_farneback::setGaussianBlurKernel(g.ptr(smoothSize/2), smoothSize/2); - - for (int i = 0; i < 2; i++) - { - device::optflow_farneback::gaussianBlurGpu( - frames_[i], smoothSize/2, blurredFrame[i], BORDER_REFLECT101, S(streams[i])); - cuda::resize(blurredFrame[i], pyrLevel[i], Size(width, height), 0.0, 0.0, INTER_LINEAR, streams[i]); - device::optflow_farneback::polynomialExpansionGpu(pyrLevel[i], polyN, R[i], S(streams[i])); + if (flags_ & OPTFLOW_USE_INITIAL_FLOW) + { + cuda::resize(flowx0, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]); + cuda::resize(flowy0, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]); + curFlowX.convertTo(curFlowX, curFlowX.depth(), scale, streams[0]); + curFlowY.convertTo(curFlowY, curFlowY.depth(), scale, streams[1]); + } + else + { + curFlowX.setTo(0, streams[0]); + curFlowY.setTo(0, streams[1]); + } } - } - - streams[1].waitForCompletion(); - device::optflow_farneback::updateMatricesGpu(curFlowX, curFlowY, R[0], R[1], M, S(streams[0])); - - if (flags & OPTFLOW_FARNEBACK_GAUSSIAN) - { - Mat g = getGaussianKernel(winSize, winSize/2*0.3f, CV_32F); - device::optflow_farneback::setGaussianBlurKernel(g.ptr(winSize/2), winSize/2); - } - for (int i = 0; i < numIters; i++) - { - if (flags & OPTFLOW_FARNEBACK_GAUSSIAN) - updateFlow_gaussianBlur(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize, i < numIters-1, streams); else - updateFlow_boxFilter(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize, i < numIters-1, streams); + { + cuda::resize(prevFlowX, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]); + cuda::resize(prevFlowY, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]); + curFlowX.convertTo(curFlowX, curFlowX.depth(), 1./pyrScale_, streams[0]); + curFlowY.convertTo(curFlowY, curFlowY.depth(), 1./pyrScale_, streams[1]); + } + + GpuMat M = allocMatFromBuf(5*height, width, CV_32F, M_); + GpuMat bufM = allocMatFromBuf(5*height, width, CV_32F, bufM_); + GpuMat R[2] = + { + allocMatFromBuf(5*height, width, CV_32F, R_[0]), + allocMatFromBuf(5*height, width, CV_32F, R_[1]) + }; + + if (fastPyramids_) + { + device::optflow_farneback::polynomialExpansionGpu(pyramid0_[k], polyN_, R[0], StreamAccessor::getStream(streams[0])); + device::optflow_farneback::polynomialExpansionGpu(pyramid1_[k], polyN_, R[1], StreamAccessor::getStream(streams[1])); + } + else + { + GpuMat blurredFrame[2] = + { + allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[0]), + allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[1]) + }; + GpuMat pyrLevel[2] = + { + allocMatFromBuf(height, width, CV_32F, pyrLevel_[0]), + allocMatFromBuf(height, width, CV_32F, pyrLevel_[1]) + }; + + Mat g = getGaussianKernel(smoothSize, sigma, CV_32F); + device::optflow_farneback::setGaussianBlurKernel(g.ptr(smoothSize/2), smoothSize/2); + + for (int i = 0; i < 2; i++) + { + device::optflow_farneback::gaussianBlurGpu( + frames_[i], smoothSize/2, blurredFrame[i], BORDER_REFLECT101, StreamAccessor::getStream(streams[i])); + cuda::resize(blurredFrame[i], pyrLevel[i], Size(width, height), 0.0, 0.0, INTER_LINEAR, streams[i]); + device::optflow_farneback::polynomialExpansionGpu(pyrLevel[i], polyN_, R[i], StreamAccessor::getStream(streams[i])); + } + } + + streams[1].waitForCompletion(); + device::optflow_farneback::updateMatricesGpu(curFlowX, curFlowY, R[0], R[1], M, StreamAccessor::getStream(streams[0])); + + if (flags_ & OPTFLOW_FARNEBACK_GAUSSIAN) + { + Mat g = getGaussianKernel(winSize_, winSize_/2*0.3f, CV_32F); + device::optflow_farneback::setGaussianBlurKernel(g.ptr(winSize_/2), winSize_/2); + } + for (int i = 0; i < numIters_; i++) + { + if (flags_ & OPTFLOW_FARNEBACK_GAUSSIAN) + updateFlow_gaussianBlur(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize_, i < numIters_-1, streams); + else + updateFlow_boxFilter(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize_, i < numIters_-1, streams); + } + + prevFlowX = curFlowX; + prevFlowY = curFlowY; } - prevFlowX = curFlowX; - prevFlowY = curFlowY; + flowx = curFlowX; + flowy = curFlowY; + + if (!stream) + streams[0].waitForCompletion(); } +} - flowx = curFlowX; - flowy = curFlowY; - - if (!S(s)) - streams[0].waitForCompletion(); +Ptr cv::cuda::FarnebackOpticalFlow::create(int numLevels, double pyrScale, bool fastPyramids, int winSize, + int numIters, int polyN, double polySigma, int flags) +{ + return makePtr(numLevels, pyrScale, fastPyramids, winSize, + numIters, polyN, polySigma, flags); } #endif diff --git a/modules/cudaoptflow/src/pyrlk.cpp b/modules/cudaoptflow/src/pyrlk.cpp index 52ee91f2f..f4182743c 100644 --- a/modules/cudaoptflow/src/pyrlk.cpp +++ b/modules/cudaoptflow/src/pyrlk.cpp @@ -47,37 +47,54 @@ using namespace cv::cuda; #if !defined (HAVE_CUDA) || defined (CUDA_DISABLER) -cv::cuda::PyrLKOpticalFlow::PyrLKOpticalFlow() { throw_no_cuda(); } -void cv::cuda::PyrLKOpticalFlow::sparse(const GpuMat&, const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat*) { throw_no_cuda(); } -void cv::cuda::PyrLKOpticalFlow::dense(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat*) { throw_no_cuda(); } -void cv::cuda::PyrLKOpticalFlow::releaseMemory() {} +Ptr cv::cuda::SparsePyrLKOpticalFlow::create(Size, int, int, bool) { throw_no_cuda(); return Ptr(); } + +Ptr cv::cuda::DensePyrLKOpticalFlow::create(Size, int, int, bool) { throw_no_cuda(); return Ptr(); } #else /* !defined (HAVE_CUDA) */ namespace pyrlk { - void loadConstants(int2 winSize, int iters); + void loadConstants(int2 winSize, int iters, cudaStream_t stream); void sparse1(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, - int level, dim3 block, dim3 patch, cudaStream_t stream = 0); + int level, dim3 block, dim3 patch, cudaStream_t stream); void sparse4(PtrStepSz I, PtrStepSz J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, - int level, dim3 block, dim3 patch, cudaStream_t stream = 0); + int level, dim3 block, dim3 patch, cudaStream_t stream); void dense(PtrStepSzb I, PtrStepSzf J, PtrStepSzf u, PtrStepSzf v, PtrStepSzf prevU, PtrStepSzf prevV, - PtrStepSzf err, int2 winSize, cudaStream_t stream = 0); -} - -cv::cuda::PyrLKOpticalFlow::PyrLKOpticalFlow() -{ - winSize = Size(21, 21); - maxLevel = 3; - iters = 30; - useInitialFlow = false; + PtrStepSzf err, int2 winSize, cudaStream_t stream); } namespace { - void calcPatchSize(cv::Size winSize, dim3& block, dim3& patch) + class PyrLKOpticalFlowBase + { + public: + PyrLKOpticalFlowBase(Size winSize, int maxLevel, int iters, bool useInitialFlow); + + void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, + GpuMat& status, GpuMat* err, Stream& stream); + + void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, Stream& stream); + + protected: + Size winSize_; + int maxLevel_; + int iters_; + bool useInitialFlow_; + + private: + std::vector prevPyr_; + std::vector nextPyr_; + }; + + PyrLKOpticalFlowBase::PyrLKOpticalFlowBase(Size winSize, int maxLevel, int iters, bool useInitialFlow) : + winSize_(winSize), maxLevel_(maxLevel), iters_(iters), useInitialFlow_(useInitialFlow) + { + } + + void calcPatchSize(Size winSize, dim3& block, dim3& patch) { if (winSize.width > 32 && winSize.width > 2 * winSize.height) { @@ -95,156 +112,239 @@ namespace block.z = patch.z = 1; } -} -void cv::cuda::PyrLKOpticalFlow::sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, GpuMat& status, GpuMat* err) -{ - if (prevPts.empty()) + void PyrLKOpticalFlowBase::sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, GpuMat& status, GpuMat* err, Stream& stream) { - nextPts.release(); - status.release(); - if (err) err->release(); - return; - } - - dim3 block, patch; - calcPatchSize(winSize, block, patch); - - CV_Assert(prevImg.channels() == 1 || prevImg.channels() == 3 || prevImg.channels() == 4); - CV_Assert(prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type()); - CV_Assert(maxLevel >= 0); - CV_Assert(winSize.width > 2 && winSize.height > 2); - CV_Assert(patch.x > 0 && patch.x < 6 && patch.y > 0 && patch.y < 6); - CV_Assert(prevPts.rows == 1 && prevPts.type() == CV_32FC2); - - if (useInitialFlow) - CV_Assert(nextPts.size() == prevPts.size() && nextPts.type() == CV_32FC2); - else - ensureSizeIsEnough(1, prevPts.cols, prevPts.type(), nextPts); - - GpuMat temp1 = (useInitialFlow ? nextPts : prevPts).reshape(1); - GpuMat temp2 = nextPts.reshape(1); - cuda::multiply(temp1, Scalar::all(1.0 / (1 << maxLevel) / 2.0), temp2); - - ensureSizeIsEnough(1, prevPts.cols, CV_8UC1, status); - status.setTo(Scalar::all(1)); - - if (err) - ensureSizeIsEnough(1, prevPts.cols, CV_32FC1, *err); - - // build the image pyramids. - - prevPyr_.resize(maxLevel + 1); - nextPyr_.resize(maxLevel + 1); - - int cn = prevImg.channels(); - - if (cn == 1 || cn == 4) - { - prevImg.convertTo(prevPyr_[0], CV_32F); - nextImg.convertTo(nextPyr_[0], CV_32F); - } - else - { - cuda::cvtColor(prevImg, buf_, COLOR_BGR2BGRA); - buf_.convertTo(prevPyr_[0], CV_32F); - - cuda::cvtColor(nextImg, buf_, COLOR_BGR2BGRA); - buf_.convertTo(nextPyr_[0], CV_32F); - } - - for (int level = 1; level <= maxLevel; ++level) - { - cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level]); - cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level]); - } - - pyrlk::loadConstants(make_int2(winSize.width, winSize.height), iters); - - for (int level = maxLevel; level >= 0; level--) - { - if (cn == 1) + if (prevPts.empty()) { - pyrlk::sparse1(prevPyr_[level], nextPyr_[level], - prevPts.ptr(), nextPts.ptr(), status.ptr(), level == 0 && err ? err->ptr() : 0, prevPts.cols, - level, block, patch); + nextPts.release(); + status.release(); + if (err) err->release(); + return; + } + + dim3 block, patch; + calcPatchSize(winSize_, block, patch); + + CV_Assert( prevImg.channels() == 1 || prevImg.channels() == 3 || prevImg.channels() == 4 ); + CV_Assert( prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type() ); + CV_Assert( maxLevel_ >= 0 ); + CV_Assert( winSize_.width > 2 && winSize_.height > 2 ); + CV_Assert( patch.x > 0 && patch.x < 6 && patch.y > 0 && patch.y < 6 ); + CV_Assert( prevPts.rows == 1 && prevPts.type() == CV_32FC2 ); + + if (useInitialFlow_) + CV_Assert( nextPts.size() == prevPts.size() && nextPts.type() == prevPts.type() ); + else + ensureSizeIsEnough(1, prevPts.cols, prevPts.type(), nextPts); + + GpuMat temp1 = (useInitialFlow_ ? nextPts : prevPts).reshape(1); + GpuMat temp2 = nextPts.reshape(1); + cuda::multiply(temp1, Scalar::all(1.0 / (1 << maxLevel_) / 2.0), temp2, 1, -1, stream); + + ensureSizeIsEnough(1, prevPts.cols, CV_8UC1, status); + status.setTo(Scalar::all(1), stream); + + if (err) + ensureSizeIsEnough(1, prevPts.cols, CV_32FC1, *err); + + // build the image pyramids. + + BufferPool pool(stream); + + prevPyr_.resize(maxLevel_ + 1); + nextPyr_.resize(maxLevel_ + 1); + + int cn = prevImg.channels(); + + if (cn == 1 || cn == 4) + { + prevImg.convertTo(prevPyr_[0], CV_32F, stream); + nextImg.convertTo(nextPyr_[0], CV_32F, stream); } else { - pyrlk::sparse4(prevPyr_[level], nextPyr_[level], - prevPts.ptr(), nextPts.ptr(), status.ptr(), level == 0 && err ? err->ptr() : 0, prevPts.cols, - level, block, patch); + GpuMat buf = pool.getBuffer(prevImg.size(), CV_MAKE_TYPE(prevImg.depth(), 4)); + + cuda::cvtColor(prevImg, buf, COLOR_BGR2BGRA, 0, stream); + buf.convertTo(prevPyr_[0], CV_32F, stream); + + cuda::cvtColor(nextImg, buf, COLOR_BGR2BGRA, 0, stream); + buf.convertTo(nextPyr_[0], CV_32F, stream); + } + + for (int level = 1; level <= maxLevel_; ++level) + { + cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level], stream); + cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level], stream); + } + + pyrlk::loadConstants(make_int2(winSize_.width, winSize_.height), iters_, StreamAccessor::getStream(stream)); + + for (int level = maxLevel_; level >= 0; level--) + { + if (cn == 1) + { + pyrlk::sparse1(prevPyr_[level], nextPyr_[level], + prevPts.ptr(), nextPts.ptr(), + status.ptr(), + level == 0 && err ? err->ptr() : 0, prevPts.cols, + level, block, patch, + StreamAccessor::getStream(stream)); + } + else + { + pyrlk::sparse4(prevPyr_[level], nextPyr_[level], + prevPts.ptr(), nextPts.ptr(), + status.ptr(), + level == 0 && err ? err->ptr() : 0, prevPts.cols, + level, block, patch, + StreamAccessor::getStream(stream)); + } } } -} -void cv::cuda::PyrLKOpticalFlow::dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err) -{ - CV_Assert(prevImg.type() == CV_8UC1); - CV_Assert(prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type()); - CV_Assert(maxLevel >= 0); - CV_Assert(winSize.width > 2 && winSize.height > 2); - - if (err) - err->create(prevImg.size(), CV_32FC1); - - // build the image pyramids. - - prevPyr_.resize(maxLevel + 1); - nextPyr_.resize(maxLevel + 1); - - prevPyr_[0] = prevImg; - nextImg.convertTo(nextPyr_[0], CV_32F); - - for (int level = 1; level <= maxLevel; ++level) + void PyrLKOpticalFlowBase::dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, Stream& stream) { - cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level]); - cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level]); + CV_Assert( prevImg.type() == CV_8UC1 ); + CV_Assert( prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type() ); + CV_Assert( maxLevel_ >= 0 ); + CV_Assert( winSize_.width > 2 && winSize_.height > 2 ); + + // build the image pyramids. + + prevPyr_.resize(maxLevel_ + 1); + nextPyr_.resize(maxLevel_ + 1); + + prevPyr_[0] = prevImg; + nextImg.convertTo(nextPyr_[0], CV_32F, stream); + + for (int level = 1; level <= maxLevel_; ++level) + { + cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level], stream); + cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level], stream); + } + + BufferPool pool(stream); + + GpuMat uPyr[] = { + pool.getBuffer(prevImg.size(), CV_32FC1), + pool.getBuffer(prevImg.size(), CV_32FC1), + }; + GpuMat vPyr[] = { + pool.getBuffer(prevImg.size(), CV_32FC1), + pool.getBuffer(prevImg.size(), CV_32FC1), + }; + + uPyr[0].setTo(Scalar::all(0), stream); + vPyr[0].setTo(Scalar::all(0), stream); + uPyr[1].setTo(Scalar::all(0), stream); + vPyr[1].setTo(Scalar::all(0), stream); + + int2 winSize2i = make_int2(winSize_.width, winSize_.height); + pyrlk::loadConstants(winSize2i, iters_, StreamAccessor::getStream(stream)); + + int idx = 0; + + for (int level = maxLevel_; level >= 0; level--) + { + int idx2 = (idx + 1) & 1; + + pyrlk::dense(prevPyr_[level], nextPyr_[level], + uPyr[idx], vPyr[idx], uPyr[idx2], vPyr[idx2], + PtrStepSzf(), winSize2i, + StreamAccessor::getStream(stream)); + + if (level > 0) + idx = idx2; + } + + uPyr[idx].copyTo(u, stream); + vPyr[idx].copyTo(v, stream); } - ensureSizeIsEnough(prevImg.size(), CV_32FC1, uPyr_[0]); - ensureSizeIsEnough(prevImg.size(), CV_32FC1, vPyr_[0]); - ensureSizeIsEnough(prevImg.size(), CV_32FC1, uPyr_[1]); - ensureSizeIsEnough(prevImg.size(), CV_32FC1, vPyr_[1]); - uPyr_[0].setTo(Scalar::all(0)); - vPyr_[0].setTo(Scalar::all(0)); - uPyr_[1].setTo(Scalar::all(0)); - vPyr_[1].setTo(Scalar::all(0)); - - int2 winSize2i = make_int2(winSize.width, winSize.height); - pyrlk::loadConstants(winSize2i, iters); - - PtrStepSzf derr = err ? *err : PtrStepSzf(); - - int idx = 0; - - for (int level = maxLevel; level >= 0; level--) + class SparsePyrLKOpticalFlowImpl : public SparsePyrLKOpticalFlow, private PyrLKOpticalFlowBase { - int idx2 = (idx + 1) & 1; + public: + SparsePyrLKOpticalFlowImpl(Size winSize, int maxLevel, int iters, bool useInitialFlow) : + PyrLKOpticalFlowBase(winSize, maxLevel, iters, useInitialFlow) + { + } - pyrlk::dense(prevPyr_[level], nextPyr_[level], uPyr_[idx], vPyr_[idx], uPyr_[idx2], vPyr_[idx2], - level == 0 ? derr : PtrStepSzf(), winSize2i); + virtual Size getWinSize() const { return winSize_; } + virtual void setWinSize(Size winSize) { winSize_ = winSize; } - if (level > 0) - idx = idx2; - } + virtual int getMaxLevel() const { return maxLevel_; } + virtual void setMaxLevel(int maxLevel) { maxLevel_ = maxLevel; } - uPyr_[idx].copyTo(u); - vPyr_[idx].copyTo(v); + virtual int getNumIters() const { return iters_; } + virtual void setNumIters(int iters) { iters_ = iters; } + + virtual bool getUseInitialFlow() const { return useInitialFlow_; } + virtual void setUseInitialFlow(bool useInitialFlow) { useInitialFlow_ = useInitialFlow; } + + virtual void calc(InputArray _prevImg, InputArray _nextImg, + InputArray _prevPts, InputOutputArray _nextPts, + OutputArray _status, + OutputArray _err, + Stream& stream) + { + const GpuMat prevImg = _prevImg.getGpuMat(); + const GpuMat nextImg = _nextImg.getGpuMat(); + const GpuMat prevPts = _prevPts.getGpuMat(); + GpuMat& nextPts = _nextPts.getGpuMatRef(); + GpuMat& status = _status.getGpuMatRef(); + GpuMat* err = _err.needed() ? &(_err.getGpuMatRef()) : NULL; + + sparse(prevImg, nextImg, prevPts, nextPts, status, err, stream); + } + }; + + class DensePyrLKOpticalFlowImpl : public DensePyrLKOpticalFlow, private PyrLKOpticalFlowBase + { + public: + DensePyrLKOpticalFlowImpl(Size winSize, int maxLevel, int iters, bool useInitialFlow) : + PyrLKOpticalFlowBase(winSize, maxLevel, iters, useInitialFlow) + { + } + + virtual Size getWinSize() const { return winSize_; } + virtual void setWinSize(Size winSize) { winSize_ = winSize; } + + virtual int getMaxLevel() const { return maxLevel_; } + virtual void setMaxLevel(int maxLevel) { maxLevel_ = maxLevel; } + + virtual int getNumIters() const { return iters_; } + virtual void setNumIters(int iters) { iters_ = iters; } + + virtual bool getUseInitialFlow() const { return useInitialFlow_; } + virtual void setUseInitialFlow(bool useInitialFlow) { useInitialFlow_ = useInitialFlow; } + + virtual void calc(InputArray _prevImg, InputArray _nextImg, InputOutputArray _flow, Stream& stream) + { + const GpuMat prevImg = _prevImg.getGpuMat(); + const GpuMat nextImg = _nextImg.getGpuMat(); + + BufferPool pool(stream); + GpuMat u = pool.getBuffer(prevImg.size(), CV_32FC1); + GpuMat v = pool.getBuffer(prevImg.size(), CV_32FC1); + + dense(prevImg, nextImg, u, v, stream); + + GpuMat flows[] = {u, v}; + cuda::merge(flows, 2, _flow, stream); + } + }; } -void cv::cuda::PyrLKOpticalFlow::releaseMemory() +Ptr cv::cuda::SparsePyrLKOpticalFlow::create(Size winSize, int maxLevel, int iters, bool useInitialFlow) { - prevPyr_.clear(); - nextPyr_.clear(); + return makePtr(winSize, maxLevel, iters, useInitialFlow); +} - buf_.release(); - - uPyr_[0].release(); - vPyr_[0].release(); - - uPyr_[1].release(); - vPyr_[1].release(); +Ptr cv::cuda::DensePyrLKOpticalFlow::create(Size winSize, int maxLevel, int iters, bool useInitialFlow) +{ + return makePtr(winSize, maxLevel, iters, useInitialFlow); } #endif /* !defined (HAVE_CUDA) */ diff --git a/modules/cudaoptflow/src/tvl1flow.cpp b/modules/cudaoptflow/src/tvl1flow.cpp index b8dfea56f..e2ef07b0d 100644 --- a/modules/cudaoptflow/src/tvl1flow.cpp +++ b/modules/cudaoptflow/src/tvl1flow.cpp @@ -44,256 +44,338 @@ #if !defined HAVE_CUDA || defined(CUDA_DISABLER) -cv::cuda::OpticalFlowDual_TVL1_CUDA::OpticalFlowDual_TVL1_CUDA() { throw_no_cuda(); } -void cv::cuda::OpticalFlowDual_TVL1_CUDA::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); } -void cv::cuda::OpticalFlowDual_TVL1_CUDA::collectGarbage() {} -void cv::cuda::OpticalFlowDual_TVL1_CUDA::procOneScale(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); } +Ptr cv::cuda::OpticalFlowDual_TVL1::create(double, double, double, int, int, double, int, double, double, bool) { throw_no_cuda(); return Ptr(); } #else using namespace cv; using namespace cv::cuda; -cv::cuda::OpticalFlowDual_TVL1_CUDA::OpticalFlowDual_TVL1_CUDA() -{ - tau = 0.25; - lambda = 0.15; - theta = 0.3; - nscales = 5; - warps = 5; - epsilon = 0.01; - iterations = 300; - scaleStep = 0.8; - gamma = 0.0; - useInitialFlow = false; -} - -void cv::cuda::OpticalFlowDual_TVL1_CUDA::operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy) -{ - CV_Assert( I0.type() == CV_8UC1 || I0.type() == CV_32FC1 ); - CV_Assert( I0.size() == I1.size() ); - CV_Assert( I0.type() == I1.type() ); - CV_Assert( !useInitialFlow || (flowx.size() == I0.size() && flowx.type() == CV_32FC1 && flowy.size() == flowx.size() && flowy.type() == flowx.type()) ); - CV_Assert( nscales > 0 ); - - // allocate memory for the pyramid structure - I0s.resize(nscales); - I1s.resize(nscales); - u1s.resize(nscales); - u2s.resize(nscales); - u3s.resize(nscales); - - I0.convertTo(I0s[0], CV_32F, I0.depth() == CV_8U ? 1.0 : 255.0); - I1.convertTo(I1s[0], CV_32F, I1.depth() == CV_8U ? 1.0 : 255.0); - - if (!useInitialFlow) - { - flowx.create(I0.size(), CV_32FC1); - flowy.create(I0.size(), CV_32FC1); - } - - u1s[0] = flowx; - u2s[0] = flowy; - if (gamma) - u3s[0].create(I0.size(), CV_32FC1); - - I1x_buf.create(I0.size(), CV_32FC1); - I1y_buf.create(I0.size(), CV_32FC1); - - I1w_buf.create(I0.size(), CV_32FC1); - I1wx_buf.create(I0.size(), CV_32FC1); - I1wy_buf.create(I0.size(), CV_32FC1); - - grad_buf.create(I0.size(), CV_32FC1); - rho_c_buf.create(I0.size(), CV_32FC1); - - p11_buf.create(I0.size(), CV_32FC1); - p12_buf.create(I0.size(), CV_32FC1); - p21_buf.create(I0.size(), CV_32FC1); - p22_buf.create(I0.size(), CV_32FC1); - if (gamma) - { - p31_buf.create(I0.size(), CV_32FC1); - p32_buf.create(I0.size(), CV_32FC1); - } - diff_buf.create(I0.size(), CV_32FC1); - - // create the scales - for (int s = 1; s < nscales; ++s) - { - cuda::resize(I0s[s-1], I0s[s], Size(), scaleStep, scaleStep); - cuda::resize(I1s[s-1], I1s[s], Size(), scaleStep, scaleStep); - - if (I0s[s].cols < 16 || I0s[s].rows < 16) - { - nscales = s; - break; - } - - if (useInitialFlow) - { - cuda::resize(u1s[s-1], u1s[s], Size(), scaleStep, scaleStep); - cuda::resize(u2s[s-1], u2s[s], Size(), scaleStep, scaleStep); - - cuda::multiply(u1s[s], Scalar::all(scaleStep), u1s[s]); - cuda::multiply(u2s[s], Scalar::all(scaleStep), u2s[s]); - } - else - { - u1s[s].create(I0s[s].size(), CV_32FC1); - u2s[s].create(I0s[s].size(), CV_32FC1); - } - if (gamma) - u3s[s].create(I0s[s].size(), CV_32FC1); - } - - if (!useInitialFlow) - { - u1s[nscales-1].setTo(Scalar::all(0)); - u2s[nscales-1].setTo(Scalar::all(0)); - } - if (gamma) - u3s[nscales - 1].setTo(Scalar::all(0)); - - // pyramidal structure for computing the optical flow - for (int s = nscales - 1; s >= 0; --s) - { - // compute the optical flow at the current scale - procOneScale(I0s[s], I1s[s], u1s[s], u2s[s], u3s[s]); - - // if this was the last scale, finish now - if (s == 0) - break; - - // otherwise, upsample the optical flow - - // zoom the optical flow for the next finer scale - cuda::resize(u1s[s], u1s[s - 1], I0s[s - 1].size()); - cuda::resize(u2s[s], u2s[s - 1], I0s[s - 1].size()); - if (gamma) - cuda::resize(u3s[s], u3s[s - 1], I0s[s - 1].size()); - - // scale the optical flow with the appropriate zoom factor - cuda::multiply(u1s[s - 1], Scalar::all(1/scaleStep), u1s[s - 1]); - cuda::multiply(u2s[s - 1], Scalar::all(1/scaleStep), u2s[s - 1]); - } -} - namespace tvl1flow { - void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy); - void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho); + void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy, cudaStream_t stream); + void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, + PtrStepSzf u1, PtrStepSzf u2, + PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, + PtrStepSzf grad, PtrStepSzf rho, + cudaStream_t stream); void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho_c, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf error, - float l_t, float theta, float gamma, bool calcError); - void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, float taut, const float gamma); + float l_t, float theta, float gamma, bool calcError, + cudaStream_t stream); + void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, + PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, + float taut, float gamma, + cudaStream_t stream); } -void cv::cuda::OpticalFlowDual_TVL1_CUDA::procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3) +namespace { - using namespace tvl1flow; - - const double scaledEpsilon = epsilon * epsilon * I0.size().area(); - - CV_DbgAssert( I1.size() == I0.size() ); - CV_DbgAssert( I1.type() == I0.type() ); - CV_DbgAssert( u1.size() == I0.size() ); - CV_DbgAssert( u2.size() == u1.size() ); - - GpuMat I1x = I1x_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat I1y = I1y_buf(Rect(0, 0, I0.cols, I0.rows)); - centeredGradient(I1, I1x, I1y); - - GpuMat I1w = I1w_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat I1wx = I1wx_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat I1wy = I1wy_buf(Rect(0, 0, I0.cols, I0.rows)); - - GpuMat grad = grad_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat rho_c = rho_c_buf(Rect(0, 0, I0.cols, I0.rows)); - - GpuMat p11 = p11_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat p12 = p12_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat p21 = p21_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat p22 = p22_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat p31, p32; - if (gamma) + class OpticalFlowDual_TVL1_Impl : public OpticalFlowDual_TVL1 { - p31 = p31_buf(Rect(0, 0, I0.cols, I0.rows)); - p32 = p32_buf(Rect(0, 0, I0.cols, I0.rows)); - } - p11.setTo(Scalar::all(0)); - p12.setTo(Scalar::all(0)); - p21.setTo(Scalar::all(0)); - p22.setTo(Scalar::all(0)); - if (gamma) - { - p31.setTo(Scalar::all(0)); - p32.setTo(Scalar::all(0)); - } - - GpuMat diff = diff_buf(Rect(0, 0, I0.cols, I0.rows)); - - const float l_t = static_cast(lambda * theta); - const float taut = static_cast(tau / theta); - - for (int warpings = 0; warpings < warps; ++warpings) - { - warpBackward(I0, I1, I1x, I1y, u1, u2, I1w, I1wx, I1wy, grad, rho_c); - - double error = std::numeric_limits::max(); - double prevError = 0.0; - for (int n = 0; error > scaledEpsilon && n < iterations; ++n) + public: + OpticalFlowDual_TVL1_Impl(double tau, double lambda, double theta, int nscales, int warps, double epsilon, + int iterations, double scaleStep, double gamma, bool useInitialFlow) : + tau_(tau), lambda_(lambda), gamma_(gamma), theta_(theta), nscales_(nscales), warps_(warps), + epsilon_(epsilon), iterations_(iterations), scaleStep_(scaleStep), useInitialFlow_(useInitialFlow) { - // some tweaks to make sum operation less frequently - bool calcError = (epsilon > 0) && (n & 0x1) && (prevError < scaledEpsilon); - estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, diff, l_t, static_cast(theta), gamma, calcError); - if (calcError) + } + + virtual double getTau() const { return tau_; } + virtual void setTau(double tau) { tau_ = tau; } + + virtual double getLambda() const { return lambda_; } + virtual void setLambda(double lambda) { lambda_ = lambda; } + + virtual double getGamma() const { return gamma_; } + virtual void setGamma(double gamma) { gamma_ = gamma; } + + virtual double getTheta() const { return theta_; } + virtual void setTheta(double theta) { theta_ = theta; } + + virtual int getNumScales() const { return nscales_; } + virtual void setNumScales(int nscales) { nscales_ = nscales; } + + virtual int getNumWarps() const { return warps_; } + virtual void setNumWarps(int warps) { warps_ = warps; } + + virtual double getEpsilon() const { return epsilon_; } + virtual void setEpsilon(double epsilon) { epsilon_ = epsilon; } + + virtual int getNumIterations() const { return iterations_; } + virtual void setNumIterations(int iterations) { iterations_ = iterations; } + + virtual double getScaleStep() const { return scaleStep_; } + virtual void setScaleStep(double scaleStep) { scaleStep_ = scaleStep; } + + virtual bool getUseInitialFlow() const { return useInitialFlow_; } + virtual void setUseInitialFlow(bool useInitialFlow) { useInitialFlow_ = useInitialFlow; } + + virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream); + + private: + double tau_; + double lambda_; + double gamma_; + double theta_; + int nscales_; + int warps_; + double epsilon_; + int iterations_; + double scaleStep_; + bool useInitialFlow_; + + private: + void calcImpl(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, Stream& stream); + void procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3, Stream& stream); + + std::vector I0s; + std::vector I1s; + std::vector u1s; + std::vector u2s; + std::vector u3s; + + GpuMat I1x_buf; + GpuMat I1y_buf; + + GpuMat I1w_buf; + GpuMat I1wx_buf; + GpuMat I1wy_buf; + + GpuMat grad_buf; + GpuMat rho_c_buf; + + GpuMat p11_buf; + GpuMat p12_buf; + GpuMat p21_buf; + GpuMat p22_buf; + GpuMat p31_buf; + GpuMat p32_buf; + + GpuMat diff_buf; + GpuMat norm_buf; + }; + + void OpticalFlowDual_TVL1_Impl::calc(InputArray _frame0, InputArray _frame1, InputOutputArray _flow, Stream& stream) + { + const GpuMat frame0 = _frame0.getGpuMat(); + const GpuMat frame1 = _frame1.getGpuMat(); + + BufferPool pool(stream); + GpuMat flowx = pool.getBuffer(frame0.size(), CV_32FC1); + GpuMat flowy = pool.getBuffer(frame0.size(), CV_32FC1); + + calcImpl(frame0, frame1, flowx, flowy, stream); + + GpuMat flows[] = {flowx, flowy}; + cuda::merge(flows, 2, _flow, stream); + } + + void OpticalFlowDual_TVL1_Impl::calcImpl(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, Stream& stream) + { + CV_Assert( I0.type() == CV_8UC1 || I0.type() == CV_32FC1 ); + CV_Assert( I0.size() == I1.size() ); + CV_Assert( I0.type() == I1.type() ); + CV_Assert( !useInitialFlow_ || (flowx.size() == I0.size() && flowx.type() == CV_32FC1 && flowy.size() == flowx.size() && flowy.type() == flowx.type()) ); + CV_Assert( nscales_ > 0 ); + + // allocate memory for the pyramid structure + I0s.resize(nscales_); + I1s.resize(nscales_); + u1s.resize(nscales_); + u2s.resize(nscales_); + u3s.resize(nscales_); + + I0.convertTo(I0s[0], CV_32F, I0.depth() == CV_8U ? 1.0 : 255.0, stream); + I1.convertTo(I1s[0], CV_32F, I1.depth() == CV_8U ? 1.0 : 255.0, stream); + + if (!useInitialFlow_) + { + flowx.create(I0.size(), CV_32FC1); + flowy.create(I0.size(), CV_32FC1); + } + + u1s[0] = flowx; + u2s[0] = flowy; + if (gamma_) + { + u3s[0].create(I0.size(), CV_32FC1); + } + + I1x_buf.create(I0.size(), CV_32FC1); + I1y_buf.create(I0.size(), CV_32FC1); + + I1w_buf.create(I0.size(), CV_32FC1); + I1wx_buf.create(I0.size(), CV_32FC1); + I1wy_buf.create(I0.size(), CV_32FC1); + + grad_buf.create(I0.size(), CV_32FC1); + rho_c_buf.create(I0.size(), CV_32FC1); + + p11_buf.create(I0.size(), CV_32FC1); + p12_buf.create(I0.size(), CV_32FC1); + p21_buf.create(I0.size(), CV_32FC1); + p22_buf.create(I0.size(), CV_32FC1); + if (gamma_) + { + p31_buf.create(I0.size(), CV_32FC1); + p32_buf.create(I0.size(), CV_32FC1); + } + diff_buf.create(I0.size(), CV_32FC1); + + // create the scales + for (int s = 1; s < nscales_; ++s) + { + cuda::resize(I0s[s-1], I0s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream); + cuda::resize(I1s[s-1], I1s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream); + + if (I0s[s].cols < 16 || I0s[s].rows < 16) { - error = cuda::sum(diff, norm_buf)[0]; - prevError = error; + nscales_ = s; + break; + } + + if (useInitialFlow_) + { + cuda::resize(u1s[s-1], u1s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream); + cuda::resize(u2s[s-1], u2s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream); + + cuda::multiply(u1s[s], Scalar::all(scaleStep_), u1s[s], 1, -1, stream); + cuda::multiply(u2s[s], Scalar::all(scaleStep_), u2s[s], 1, -1, stream); } else { - error = std::numeric_limits::max(); - prevError -= scaledEpsilon; + u1s[s].create(I0s[s].size(), CV_32FC1); + u2s[s].create(I0s[s].size(), CV_32FC1); + } + if (gamma_) + { + u3s[s].create(I0s[s].size(), CV_32FC1); + } + } + + if (!useInitialFlow_) + { + u1s[nscales_-1].setTo(Scalar::all(0), stream); + u2s[nscales_-1].setTo(Scalar::all(0), stream); + } + if (gamma_) + { + u3s[nscales_ - 1].setTo(Scalar::all(0), stream); + } + + // pyramidal structure for computing the optical flow + for (int s = nscales_ - 1; s >= 0; --s) + { + // compute the optical flow at the current scale + procOneScale(I0s[s], I1s[s], u1s[s], u2s[s], u3s[s], stream); + + // if this was the last scale, finish now + if (s == 0) + break; + + // otherwise, upsample the optical flow + + // zoom the optical flow for the next finer scale + cuda::resize(u1s[s], u1s[s - 1], I0s[s - 1].size(), 0, 0, INTER_LINEAR, stream); + cuda::resize(u2s[s], u2s[s - 1], I0s[s - 1].size(), 0, 0, INTER_LINEAR, stream); + if (gamma_) + { + cuda::resize(u3s[s], u3s[s - 1], I0s[s - 1].size(), 0, 0, INTER_LINEAR, stream); } - estimateDualVariables(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma); + // scale the optical flow with the appropriate zoom factor + cuda::multiply(u1s[s - 1], Scalar::all(1/scaleStep_), u1s[s - 1], 1, -1, stream); + cuda::multiply(u2s[s - 1], Scalar::all(1/scaleStep_), u2s[s - 1], 1, -1, stream); + } + } + + void OpticalFlowDual_TVL1_Impl::procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3, Stream& _stream) + { + using namespace tvl1flow; + + cudaStream_t stream = StreamAccessor::getStream(_stream); + + const double scaledEpsilon = epsilon_ * epsilon_ * I0.size().area(); + + CV_DbgAssert( I1.size() == I0.size() ); + CV_DbgAssert( I1.type() == I0.type() ); + CV_DbgAssert( u1.size() == I0.size() ); + CV_DbgAssert( u2.size() == u1.size() ); + + GpuMat I1x = I1x_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat I1y = I1y_buf(Rect(0, 0, I0.cols, I0.rows)); + centeredGradient(I1, I1x, I1y, stream); + + GpuMat I1w = I1w_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat I1wx = I1wx_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat I1wy = I1wy_buf(Rect(0, 0, I0.cols, I0.rows)); + + GpuMat grad = grad_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat rho_c = rho_c_buf(Rect(0, 0, I0.cols, I0.rows)); + + GpuMat p11 = p11_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p12 = p12_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p21 = p21_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p22 = p22_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p31, p32; + if (gamma_) + { + p31 = p31_buf(Rect(0, 0, I0.cols, I0.rows)); + p32 = p32_buf(Rect(0, 0, I0.cols, I0.rows)); + } + p11.setTo(Scalar::all(0), _stream); + p12.setTo(Scalar::all(0), _stream); + p21.setTo(Scalar::all(0), _stream); + p22.setTo(Scalar::all(0), _stream); + if (gamma_) + { + p31.setTo(Scalar::all(0), _stream); + p32.setTo(Scalar::all(0), _stream); + } + + GpuMat diff = diff_buf(Rect(0, 0, I0.cols, I0.rows)); + + const float l_t = static_cast(lambda_ * theta_); + const float taut = static_cast(tau_ / theta_); + + for (int warpings = 0; warpings < warps_; ++warpings) + { + warpBackward(I0, I1, I1x, I1y, u1, u2, I1w, I1wx, I1wy, grad, rho_c, stream); + + double error = std::numeric_limits::max(); + double prevError = 0.0; + for (int n = 0; error > scaledEpsilon && n < iterations_; ++n) + { + // some tweaks to make sum operation less frequently + bool calcError = (epsilon_ > 0) && (n & 0x1) && (prevError < scaledEpsilon); + estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, diff, l_t, static_cast(theta_), gamma_, calcError, stream); + if (calcError) + { + _stream.waitForCompletion(); + error = cuda::sum(diff, norm_buf)[0]; + prevError = error; + } + else + { + error = std::numeric_limits::max(); + prevError -= scaledEpsilon; + } + + estimateDualVariables(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma_, stream); + } } } } -void cv::cuda::OpticalFlowDual_TVL1_CUDA::collectGarbage() +Ptr cv::cuda::OpticalFlowDual_TVL1::create( + double tau, double lambda, double theta, int nscales, int warps, + double epsilon, int iterations, double scaleStep, double gamma, bool useInitialFlow) { - I0s.clear(); - I1s.clear(); - u1s.clear(); - u2s.clear(); - u3s.clear(); - - I1x_buf.release(); - I1y_buf.release(); - - I1w_buf.release(); - I1wx_buf.release(); - I1wy_buf.release(); - - grad_buf.release(); - rho_c_buf.release(); - - p11_buf.release(); - p12_buf.release(); - p21_buf.release(); - p22_buf.release(); - if (gamma) - { - p31_buf.release(); - p32_buf.release(); - } - diff_buf.release(); - norm_buf.release(); + return makePtr(tau, lambda, theta, nscales, warps, + epsilon, iterations, scaleStep, gamma, useInitialFlow); } #endif // !defined HAVE_CUDA || defined(CUDA_DISABLER) From 2dc3b0f7f95135648d6a05f61e7e47787b3d63f2 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 31 Dec 2014 15:36:31 +0300 Subject: [PATCH 70/98] fix cudaoptflow tests build --- modules/cudaoptflow/perf/perf_optflow.cpp | 81 ++++++++++++--------- modules/cudaoptflow/perf/perf_precomp.hpp | 1 + modules/cudaoptflow/test/test_optflow.cpp | 87 ++++++++++++----------- modules/cudaoptflow/test/test_precomp.hpp | 1 + 4 files changed, 98 insertions(+), 72 deletions(-) diff --git a/modules/cudaoptflow/perf/perf_optflow.cpp b/modules/cudaoptflow/perf/perf_optflow.cpp index 12612b062..32040f282 100644 --- a/modules/cudaoptflow/perf/perf_optflow.cpp +++ b/modules/cudaoptflow/perf/perf_optflow.cpp @@ -71,13 +71,19 @@ PERF_TEST_P(ImagePair, BroxOpticalFlow, { const cv::cuda::GpuMat d_frame0(frame0); const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; + cv::cuda::GpuMat flow; - cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/, - 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); + cv::Ptr d_alg = + cv::cuda::BroxOpticalFlow::create(0.197 /*alpha*/, 50.0 /*gamma*/, 0.8 /*scale_factor*/, + 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); - TEST_CYCLE() d_flow(d_frame0, d_frame1, u, v); + TEST_CYCLE() d_alg->calc(d_frame0, d_frame1, flow); + + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); + + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; CUDA_SANITY_CHECK(u, 1e-1); CUDA_SANITY_CHECK(v, 1e-1); @@ -129,17 +135,17 @@ PERF_TEST_P(ImagePair_Gray_NPts_WinSz_Levels_Iters, PyrLKOpticalFlowSparse, { const cv::cuda::GpuMat d_pts(pts.reshape(2, 1)); - cv::cuda::PyrLKOpticalFlow d_pyrLK; - d_pyrLK.winSize = cv::Size(winSize, winSize); - d_pyrLK.maxLevel = levels - 1; - d_pyrLK.iters = iters; + cv::Ptr d_pyrLK = + cv::cuda::SparsePyrLKOpticalFlow::create(cv::Size(winSize, winSize), + levels - 1, + iters); const cv::cuda::GpuMat d_frame0(frame0); const cv::cuda::GpuMat d_frame1(frame1); cv::cuda::GpuMat nextPts; cv::cuda::GpuMat status; - TEST_CYCLE() d_pyrLK.sparse(d_frame0, d_frame1, d_pts, nextPts, status); + TEST_CYCLE() d_pyrLK->calc(d_frame0, d_frame1, d_pts, nextPts, status); CUDA_SANITY_CHECK(nextPts); CUDA_SANITY_CHECK(status); @@ -189,15 +195,20 @@ PERF_TEST_P(ImagePair_WinSz_Levels_Iters, PyrLKOpticalFlowDense, { const cv::cuda::GpuMat d_frame0(frame0); const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; + cv::cuda::GpuMat flow; - cv::cuda::PyrLKOpticalFlow d_pyrLK; - d_pyrLK.winSize = cv::Size(winSize, winSize); - d_pyrLK.maxLevel = levels - 1; - d_pyrLK.iters = iters; + cv::Ptr d_pyrLK = + cv::cuda::DensePyrLKOpticalFlow::create(cv::Size(winSize, winSize), + levels - 1, + iters); - TEST_CYCLE() d_pyrLK.dense(d_frame0, d_frame1, u, v); + TEST_CYCLE() d_pyrLK->calc(d_frame0, d_frame1, flow); + + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); + + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; CUDA_SANITY_CHECK(u); CUDA_SANITY_CHECK(v); @@ -234,19 +245,19 @@ PERF_TEST_P(ImagePair, FarnebackOpticalFlow, { const cv::cuda::GpuMat d_frame0(frame0); const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; + cv::cuda::GpuMat flow; - cv::cuda::FarnebackOpticalFlow d_farneback; - d_farneback.numLevels = numLevels; - d_farneback.pyrScale = pyrScale; - d_farneback.winSize = winSize; - d_farneback.numIters = numIters; - d_farneback.polyN = polyN; - d_farneback.polySigma = polySigma; - d_farneback.flags = flags; + cv::Ptr d_farneback = + cv::cuda::FarnebackOpticalFlow::create(numLevels, pyrScale, false, winSize, + numIters, polyN, polySigma, flags); - TEST_CYCLE() d_farneback(d_frame0, d_frame1, u, v); + TEST_CYCLE() d_farneback->calc(d_frame0, d_frame1, flow); + + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); + + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; CUDA_SANITY_CHECK(u, 1e-4); CUDA_SANITY_CHECK(v, 1e-4); @@ -279,12 +290,18 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1, { const cv::cuda::GpuMat d_frame0(frame0); const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; + cv::cuda::GpuMat flow; - cv::cuda::OpticalFlowDual_TVL1_CUDA d_alg; + cv::Ptr d_alg = + cv::cuda::OpticalFlowDual_TVL1::create(); - TEST_CYCLE() d_alg(d_frame0, d_frame1, u, v); + TEST_CYCLE() d_alg->calc(d_frame0, d_frame1, flow); + + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); + + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; CUDA_SANITY_CHECK(u, 1e-1); CUDA_SANITY_CHECK(v, 1e-1); diff --git a/modules/cudaoptflow/perf/perf_precomp.hpp b/modules/cudaoptflow/perf/perf_precomp.hpp index 1dc00ae4b..d7761a587 100644 --- a/modules/cudaoptflow/perf/perf_precomp.hpp +++ b/modules/cudaoptflow/perf/perf_precomp.hpp @@ -55,6 +55,7 @@ #include "opencv2/ts/cuda_perf.hpp" #include "opencv2/cudaoptflow.hpp" +#include "opencv2/cudaarithm.hpp" #include "opencv2/video.hpp" #ifdef GTEST_CREATE_SHARED_LIBRARY diff --git a/modules/cudaoptflow/test/test_optflow.cpp b/modules/cudaoptflow/test/test_optflow.cpp index 7a6e68310..c5b2ad847 100644 --- a/modules/cudaoptflow/test/test_optflow.cpp +++ b/modules/cudaoptflow/test/test_optflow.cpp @@ -71,12 +71,18 @@ CUDA_TEST_P(BroxOpticalFlow, Regression) cv::Mat frame1 = readImageType("opticalflow/frame1.png", CV_32FC1); ASSERT_FALSE(frame1.empty()); - cv::cuda::BroxOpticalFlow brox(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/, - 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); + cv::Ptr brox = + cv::cuda::BroxOpticalFlow::create(0.197 /*alpha*/, 50.0 /*gamma*/, 0.8 /*scale_factor*/, + 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; - brox(loadMat(frame0), loadMat(frame1), u, v); + cv::cuda::GpuMat flow; + brox->calc(loadMat(frame0), loadMat(frame1), flow); + + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); + + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; std::string fname(cvtest::TS::ptr()->get_data_path()); if (devInfo.majorVersion() >= 2) @@ -133,12 +139,18 @@ CUDA_TEST_P(BroxOpticalFlow, OpticalFlowNan) cv::resize(frame0, r_frame0, cv::Size(1380,1000)); cv::resize(frame1, r_frame1, cv::Size(1380,1000)); - cv::cuda::BroxOpticalFlow brox(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/, - 5 /*inner_iterations*/, 150 /*outer_iterations*/, 10 /*solver_iterations*/); + cv::Ptr brox = + cv::cuda::BroxOpticalFlow::create(0.197 /*alpha*/, 50.0 /*gamma*/, 0.8 /*scale_factor*/, + 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; - brox(loadMat(r_frame0), loadMat(r_frame1), u, v); + cv::cuda::GpuMat flow; + brox->calc(loadMat(frame0), loadMat(frame1), flow); + + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); + + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; cv::Mat h_u, h_v; u.download(h_u); @@ -193,11 +205,12 @@ CUDA_TEST_P(PyrLKOpticalFlow, Sparse) cv::Mat pts_mat(1, (int) pts.size(), CV_32FC2, (void*) &pts[0]); d_pts.upload(pts_mat); - cv::cuda::PyrLKOpticalFlow pyrLK; + cv::Ptr pyrLK = + cv::cuda::SparsePyrLKOpticalFlow::create(); cv::cuda::GpuMat d_nextPts; cv::cuda::GpuMat d_status; - pyrLK.sparse(loadMat(frame0), loadMat(frame1), d_pts, d_nextPts, d_status); + pyrLK->calc(loadMat(frame0), loadMat(frame1), d_pts, d_nextPts, d_status); std::vector nextPts(d_nextPts.cols); cv::Mat nextPts_mat(1, d_nextPts.cols, CV_32FC2, (void*) &nextPts[0]); @@ -285,34 +298,30 @@ CUDA_TEST_P(FarnebackOpticalFlow, Accuracy) double polySigma = polyN <= 5 ? 1.1 : 1.5; - cv::cuda::FarnebackOpticalFlow farn; - farn.pyrScale = pyrScale; - farn.polyN = polyN; - farn.polySigma = polySigma; - farn.flags = flags; + cv::Ptr farn = + cv::cuda::FarnebackOpticalFlow::create(); + farn->setPyrScale(pyrScale); + farn->setPolyN(polyN); + farn->setPolySigma(polySigma); + farn->setFlags(flags); - cv::cuda::GpuMat d_flowx, d_flowy; - farn(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy); + cv::cuda::GpuMat d_flow; + farn->calc(loadMat(frame0), loadMat(frame1), d_flow); cv::Mat flow; if (useInitFlow) { - cv::Mat flowxy[] = {cv::Mat(d_flowx), cv::Mat(d_flowy)}; - cv::merge(flowxy, 2, flow); + d_flow.download(flow); - farn.flags |= cv::OPTFLOW_USE_INITIAL_FLOW; - farn(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy); + farn->setFlags(farn->getFlags() | cv::OPTFLOW_USE_INITIAL_FLOW); + farn->calc(loadMat(frame0), loadMat(frame1), d_flow); } cv::calcOpticalFlowFarneback( - frame0, frame1, flow, farn.pyrScale, farn.numLevels, farn.winSize, - farn.numIters, farn.polyN, farn.polySigma, farn.flags); + frame0, frame1, flow, farn->getPyrScale(), farn->getNumLevels(), farn->getWinSize(), + farn->getNumIters(), farn->getPolyN(), farn->getPolySigma(), farn->getFlags()); - std::vector flowxy; - cv::split(flow, flowxy); - - EXPECT_MAT_SIMILAR(flowxy[0], d_flowx, 0.1); - EXPECT_MAT_SIMILAR(flowxy[1], d_flowy, 0.1); + EXPECT_MAT_SIMILAR(flow, d_flow, 0.1); } INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, FarnebackOpticalFlow, testing::Combine( @@ -352,26 +361,24 @@ CUDA_TEST_P(OpticalFlowDual_TVL1, Accuracy) cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE); ASSERT_FALSE(frame1.empty()); - cv::cuda::OpticalFlowDual_TVL1_CUDA d_alg; - d_alg.iterations = 10; - d_alg.gamma = gamma; + cv::Ptr d_alg = + cv::cuda::OpticalFlowDual_TVL1::create(); + d_alg->setNumIterations(10); + d_alg->setGamma(gamma); - cv::cuda::GpuMat d_flowx, d_flowy; - d_alg(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy); + cv::cuda::GpuMat d_flow; + d_alg->calc(loadMat(frame0), loadMat(frame1), d_flow); cv::Ptr alg = cv::createOptFlow_DualTVL1(); alg->set("medianFiltering", 1); alg->set("innerIterations", 1); - alg->set("outerIterations", d_alg.iterations); + alg->set("outerIterations", d_alg->getNumIterations()); alg->set("gamma", gamma); cv::Mat flow; alg->calc(frame0, frame1, flow); - cv::Mat gold[2]; - cv::split(flow, gold); - EXPECT_MAT_SIMILAR(gold[0], d_flowx, 4e-3); - EXPECT_MAT_SIMILAR(gold[1], d_flowy, 4e-3); + EXPECT_MAT_SIMILAR(flow, d_flow, 4e-3); } INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, OpticalFlowDual_TVL1, testing::Combine( diff --git a/modules/cudaoptflow/test/test_precomp.hpp b/modules/cudaoptflow/test/test_precomp.hpp index 54812022a..2dc36abf5 100644 --- a/modules/cudaoptflow/test/test_precomp.hpp +++ b/modules/cudaoptflow/test/test_precomp.hpp @@ -57,6 +57,7 @@ #include "opencv2/ts/cuda_test.hpp" #include "opencv2/cudaoptflow.hpp" +#include "opencv2/cudaarithm.hpp" #include "opencv2/video.hpp" #include "cvconfig.h" From 03ae1e5aae21bc12bfbefaf78b353a3b83c96ff4 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 31 Dec 2014 15:36:44 +0300 Subject: [PATCH 71/98] fix superres module compilation --- modules/superres/src/optical_flow.cpp | 161 ++++++++++++++++---------- 1 file changed, 98 insertions(+), 63 deletions(-) diff --git a/modules/superres/src/optical_flow.cpp b/modules/superres/src/optical_flow.cpp index fcc9bef34..52fc2648e 100644 --- a/modules/superres/src/optical_flow.cpp +++ b/modules/superres/src/optical_flow.cpp @@ -341,7 +341,7 @@ namespace int iterations_; bool useInitialFlow_; - Ptr alg_; + Ptr alg_; }; CV_INIT_ALGORITHM(DualTVL1, "DenseOpticalFlowExt.DualTVL1", @@ -514,7 +514,7 @@ namespace int outerIterations_; int solverIterations_; - BroxOpticalFlow alg_; + Ptr alg_; }; CV_INIT_ALGORITHM(Brox_CUDA, "DenseOpticalFlowExt.Brox_CUDA", @@ -525,31 +525,40 @@ namespace obj.info()->addParam(obj, "outerIterations", obj.outerIterations_, false, 0, 0, "Number of warping iterations (number of pyramid levels)"); obj.info()->addParam(obj, "solverIterations", obj.solverIterations_, false, 0, 0, "Number of linear system solver iterations")) - Brox_CUDA::Brox_CUDA() : GpuOpticalFlow(CV_32FC1), alg_(0.197f, 50.0f, 0.8f, 10, 77, 10) + Brox_CUDA::Brox_CUDA() : GpuOpticalFlow(CV_32FC1) { - alpha_ = alg_.alpha; - gamma_ = alg_.gamma; - scaleFactor_ = alg_.scale_factor; - innerIterations_ = alg_.inner_iterations; - outerIterations_ = alg_.outer_iterations; - solverIterations_ = alg_.solver_iterations; + alg_ = cuda::BroxOpticalFlow::create(0.197f, 50.0f, 0.8f, 10, 77, 10); + + alpha_ = alg_->getFlowSmoothness(); + gamma_ = alg_->getGradientConstancyImportance(); + scaleFactor_ = alg_->getPyramidScaleFactor(); + innerIterations_ = alg_->getInnerIterations(); + outerIterations_ = alg_->getOuterIterations(); + solverIterations_ = alg_->getSolverIterations(); } void Brox_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { - alg_.alpha = static_cast(alpha_); - alg_.gamma = static_cast(gamma_); - alg_.scale_factor = static_cast(scaleFactor_); - alg_.inner_iterations = innerIterations_; - alg_.outer_iterations = outerIterations_; - alg_.solver_iterations = solverIterations_; + alg_->setFlowSmoothness(alpha_); + alg_->setGradientConstancyImportance(gamma_); + alg_->setPyramidScaleFactor(scaleFactor_); + alg_->setInnerIterations(innerIterations_); + alg_->setOuterIterations(outerIterations_); + alg_->setSolverIterations(solverIterations_); - alg_(input0, input1, dst1, dst2); + GpuMat flow; + alg_->calc(input0, input1, flow); + + GpuMat flows[2]; + cuda::split(flow, flows); + + dst1 = flows[0]; + dst2 = flows[1]; } void Brox_CUDA::collectGarbage() { - alg_.buf.release(); + alg_ = cuda::BroxOpticalFlow::create(alpha_, gamma_, scaleFactor_, innerIterations_, outerIterations_, solverIterations_); GpuOpticalFlow::collectGarbage(); } } @@ -581,7 +590,7 @@ namespace int maxLevel_; int iterations_; - PyrLKOpticalFlow alg_; + Ptr alg_; }; CV_INIT_ALGORITHM(PyrLK_CUDA, "DenseOpticalFlowExt.PyrLK_CUDA", @@ -591,24 +600,32 @@ namespace PyrLK_CUDA::PyrLK_CUDA() : GpuOpticalFlow(CV_8UC1) { - winSize_ = alg_.winSize.width; - maxLevel_ = alg_.maxLevel; - iterations_ = alg_.iters; + alg_ = cuda::DensePyrLKOpticalFlow::create(); + + winSize_ = alg_->getWinSize().width; + maxLevel_ = alg_->getMaxLevel(); + iterations_ = alg_->getNumIters(); } void PyrLK_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { - alg_.winSize.width = winSize_; - alg_.winSize.height = winSize_; - alg_.maxLevel = maxLevel_; - alg_.iters = iterations_; + alg_->setWinSize(Size(winSize_, winSize_)); + alg_->setMaxLevel(maxLevel_); + alg_->setNumIters(iterations_); - alg_.dense(input0, input1, dst1, dst2); + GpuMat flow; + alg_->calc(input0, input1, flow); + + GpuMat flows[2]; + cuda::split(flow, flows); + + dst1 = flows[0]; + dst2 = flows[1]; } void PyrLK_CUDA::collectGarbage() { - alg_.releaseMemory(); + alg_ = cuda::DensePyrLKOpticalFlow::create(); GpuOpticalFlow::collectGarbage(); } } @@ -644,7 +661,7 @@ namespace double polySigma_; int flags_; - FarnebackOpticalFlow alg_; + Ptr alg_; }; CV_INIT_ALGORITHM(Farneback_CUDA, "DenseOpticalFlowExt.Farneback_CUDA", @@ -658,31 +675,40 @@ namespace Farneback_CUDA::Farneback_CUDA() : GpuOpticalFlow(CV_8UC1) { - pyrScale_ = alg_.pyrScale; - numLevels_ = alg_.numLevels; - winSize_ = alg_.winSize; - numIters_ = alg_.numIters; - polyN_ = alg_.polyN; - polySigma_ = alg_.polySigma; - flags_ = alg_.flags; + alg_ = cuda::FarnebackOpticalFlow::create(); + + pyrScale_ = alg_->getPyrScale(); + numLevels_ = alg_->getNumLevels(); + winSize_ = alg_->getWinSize(); + numIters_ = alg_->getNumIters(); + polyN_ = alg_->getPolyN(); + polySigma_ = alg_->getPolySigma(); + flags_ = alg_->getFlags(); } void Farneback_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { - alg_.pyrScale = pyrScale_; - alg_.numLevels = numLevels_; - alg_.winSize = winSize_; - alg_.numIters = numIters_; - alg_.polyN = polyN_; - alg_.polySigma = polySigma_; - alg_.flags = flags_; + alg_->setPyrScale(pyrScale_); + alg_->setNumLevels(numLevels_); + alg_->setWinSize(winSize_); + alg_->setNumIters(numIters_); + alg_->setPolyN(polyN_); + alg_->setPolySigma(polySigma_); + alg_->setFlags(flags_); - alg_(input0, input1, dst1, dst2); + GpuMat flow; + alg_->calc(input0, input1, flow); + + GpuMat flows[2]; + cuda::split(flow, flows); + + dst1 = flows[0]; + dst2 = flows[1]; } void Farneback_CUDA::collectGarbage() { - alg_.releaseMemory(); + alg_ = cuda::FarnebackOpticalFlow::create(); GpuOpticalFlow::collectGarbage(); } } @@ -719,7 +745,7 @@ namespace int iterations_; bool useInitialFlow_; - OpticalFlowDual_TVL1_CUDA alg_; + Ptr alg_; }; CV_INIT_ALGORITHM(DualTVL1_CUDA, "DenseOpticalFlowExt.DualTVL1_CUDA", @@ -734,33 +760,42 @@ namespace DualTVL1_CUDA::DualTVL1_CUDA() : GpuOpticalFlow(CV_8UC1) { - tau_ = alg_.tau; - lambda_ = alg_.lambda; - theta_ = alg_.theta; - nscales_ = alg_.nscales; - warps_ = alg_.warps; - epsilon_ = alg_.epsilon; - iterations_ = alg_.iterations; - useInitialFlow_ = alg_.useInitialFlow; + alg_ = cuda::OpticalFlowDual_TVL1::create(); + + tau_ = alg_->getTau(); + lambda_ = alg_->getLambda(); + theta_ = alg_->getTheta(); + nscales_ = alg_->getNumScales(); + warps_ = alg_->getNumWarps(); + epsilon_ = alg_->getEpsilon(); + iterations_ = alg_->getNumIterations(); + useInitialFlow_ = alg_->getUseInitialFlow(); } void DualTVL1_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { - alg_.tau = tau_; - alg_.lambda = lambda_; - alg_.theta = theta_; - alg_.nscales = nscales_; - alg_.warps = warps_; - alg_.epsilon = epsilon_; - alg_.iterations = iterations_; - alg_.useInitialFlow = useInitialFlow_; + alg_->setTau(tau_); + alg_->setLambda(lambda_); + alg_->setTheta(theta_); + alg_->setNumScales(nscales_); + alg_->setNumWarps(warps_); + alg_->setEpsilon(epsilon_); + alg_->setNumIterations(iterations_); + alg_->setUseInitialFlow(useInitialFlow_); - alg_(input0, input1, dst1, dst2); + GpuMat flow; + alg_->calc(input0, input1, flow); + + GpuMat flows[2]; + cuda::split(flow, flows); + + dst1 = flows[0]; + dst2 = flows[1]; } void DualTVL1_CUDA::collectGarbage() { - alg_.collectGarbage(); + alg_ = cuda::OpticalFlowDual_TVL1::create(); GpuOpticalFlow::collectGarbage(); } } From 62f8240b004fe2d1386be8003082b42673fa7c55 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 31 Dec 2014 15:36:51 +0300 Subject: [PATCH 72/98] fix videostab module compilation --- .../opencv2/videostab/optical_flow.hpp | 4 +-- modules/videostab/src/optical_flow.cpp | 36 +++++++++++++------ 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/modules/videostab/include/opencv2/videostab/optical_flow.hpp b/modules/videostab/include/opencv2/videostab/optical_flow.hpp index a34a82e3f..41d195354 100644 --- a/modules/videostab/include/opencv2/videostab/optical_flow.hpp +++ b/modules/videostab/include/opencv2/videostab/optical_flow.hpp @@ -121,7 +121,7 @@ public: cuda::GpuMat &status); private: - cuda::PyrLKOpticalFlow optFlowEstimator_; + Ptr optFlowEstimator_; cuda::GpuMat frame0_, frame1_, points0_, points1_, status_, errors_; }; @@ -136,7 +136,7 @@ public: OutputArray errors); private: - cuda::PyrLKOpticalFlow optFlowEstimator_; + Ptr optFlowEstimator_; cuda::GpuMat frame0_, frame1_, flowX_, flowY_, errors_; }; diff --git a/modules/videostab/src/optical_flow.cpp b/modules/videostab/src/optical_flow.cpp index d8a059c1f..32c8133a7 100644 --- a/modules/videostab/src/optical_flow.cpp +++ b/modules/videostab/src/optical_flow.cpp @@ -45,6 +45,10 @@ #include "opencv2/videostab/optical_flow.hpp" #include "opencv2/videostab/ring_buffer.hpp" +#ifdef HAVE_OPENCV_CUDAARITHM + #include "opencv2/cudaarithm.hpp" +#endif + namespace cv { namespace videostab @@ -63,6 +67,7 @@ void SparsePyrLkOptFlowEstimator::run( SparsePyrLkOptFlowEstimatorGpu::SparsePyrLkOptFlowEstimatorGpu() { CV_Assert(cuda::getCudaEnabledDeviceCount() > 0); + optFlowEstimator_ = cuda::SparsePyrLKOpticalFlow::create(); } @@ -91,9 +96,9 @@ void SparsePyrLkOptFlowEstimatorGpu::run( const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1, cuda::GpuMat &status, cuda::GpuMat &errors) { - optFlowEstimator_.winSize = winSize_; - optFlowEstimator_.maxLevel = maxLevel_; - optFlowEstimator_.sparse(frame0, frame1, points0, points1, status, &errors); + optFlowEstimator_->setWinSize(winSize_); + optFlowEstimator_->setMaxLevel(maxLevel_); + optFlowEstimator_->calc(frame0, frame1, points0, points1, status, errors); } @@ -101,15 +106,16 @@ void SparsePyrLkOptFlowEstimatorGpu::run( const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1, cuda::GpuMat &status) { - optFlowEstimator_.winSize = winSize_; - optFlowEstimator_.maxLevel = maxLevel_; - optFlowEstimator_.sparse(frame0, frame1, points0, points1, status); + optFlowEstimator_->setWinSize(winSize_); + optFlowEstimator_->setMaxLevel(maxLevel_); + optFlowEstimator_->calc(frame0, frame1, points0, points1, status); } DensePyrLkOptFlowEstimatorGpu::DensePyrLkOptFlowEstimatorGpu() { CV_Assert(cuda::getCudaEnabledDeviceCount() > 0); + optFlowEstimator_ = cuda::DensePyrLKOpticalFlow::create(); } @@ -120,16 +126,24 @@ void DensePyrLkOptFlowEstimatorGpu::run( frame0_.upload(frame0.getMat()); frame1_.upload(frame1.getMat()); - optFlowEstimator_.winSize = winSize_; - optFlowEstimator_.maxLevel = maxLevel_; + optFlowEstimator_->setWinSize(winSize_); + optFlowEstimator_->setMaxLevel(maxLevel_); if (errors.needed()) { - optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_, &errors_); - errors_.download(errors.getMatRef()); + CV_Error(Error::StsNotImplemented, "DensePyrLkOptFlowEstimatorGpu doesn't support errors calculation"); } else - optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_); + { + cuda::GpuMat flow; + optFlowEstimator_->calc(frame0_, frame1_, flow); + + cuda::GpuMat flows[2]; + cuda::split(flow, flows); + + flowX_ = flows[0]; + flowY_ = flows[1]; + } flowX_.download(flowX.getMatRef()); flowY_.download(flowY.getMatRef()); From b3a743f09a94bddcde0344ae37f1754c887a1685 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 31 Dec 2014 15:37:09 +0300 Subject: [PATCH 73/98] fix gpu samples compilation --- samples/gpu/farneback_optical_flow.cpp | 19 +++++--- samples/gpu/optical_flow.cpp | 61 +++++++++----------------- samples/gpu/pyrlk_optical_flow.cpp | 24 ++-------- 3 files changed, 35 insertions(+), 69 deletions(-) diff --git a/samples/gpu/farneback_optical_flow.cpp b/samples/gpu/farneback_optical_flow.cpp index b8ed55ea6..798b108a6 100644 --- a/samples/gpu/farneback_optical_flow.cpp +++ b/samples/gpu/farneback_optical_flow.cpp @@ -7,6 +7,7 @@ #include "opencv2/highgui.hpp" #include "opencv2/video.hpp" #include "opencv2/cudaoptflow.hpp" +#include "opencv2/cudaarithm.hpp" using namespace std; using namespace cv; @@ -70,8 +71,8 @@ int main(int argc, char **argv) if (frameL.empty() || frameR.empty()) return -1; GpuMat d_frameL(frameL), d_frameR(frameR); - GpuMat d_flowx, d_flowy; - FarnebackOpticalFlow d_calc; + GpuMat d_flow; + Ptr d_calc = cuda::FarnebackOpticalFlow::create(); Mat flowxy, flowx, flowy, image; bool running = true, gpuMode = true; @@ -86,17 +87,21 @@ int main(int argc, char **argv) if (gpuMode) { tc0 = getTickCount(); - d_calc(d_frameL, d_frameR, d_flowx, d_flowy); + d_calc->calc(d_frameL, d_frameR, d_flow); tc1 = getTickCount(); - d_flowx.download(flowx); - d_flowy.download(flowy); + + GpuMat planes[2]; + cuda::split(d_flow, planes); + + planes[0].download(flowx); + planes[1].download(flowy); } else { tc0 = getTickCount(); calcOpticalFlowFarneback( - frameL, frameR, flowxy, d_calc.pyrScale, d_calc.numLevels, d_calc.winSize, - d_calc.numIters, d_calc.polyN, d_calc.polySigma, d_calc.flags); + frameL, frameR, flowxy, d_calc->getPyrScale(), d_calc->getNumLevels(), d_calc->getWinSize(), + d_calc->getNumIters(), d_calc->getPolyN(), d_calc->getPolySigma(), d_calc->getFlags()); tc1 = getTickCount(); Mat planes[] = {flowx, flowy}; diff --git a/samples/gpu/optical_flow.cpp b/samples/gpu/optical_flow.cpp index 7d625de85..b1b3c8de1 100644 --- a/samples/gpu/optical_flow.cpp +++ b/samples/gpu/optical_flow.cpp @@ -5,6 +5,7 @@ #include #include "opencv2/highgui.hpp" #include "opencv2/cudaoptflow.hpp" +#include "opencv2/cudaarithm.hpp" using namespace std; using namespace cv; @@ -122,10 +123,13 @@ static void drawOpticalFlow(const Mat_& flowx, const Mat_& flowy, } } -static void showFlow(const char* name, const GpuMat& d_flowx, const GpuMat& d_flowy) +static void showFlow(const char* name, const GpuMat& d_flow) { - Mat flowx(d_flowx); - Mat flowy(d_flowy); + GpuMat planes[2]; + cuda::split(d_flow, planes); + + Mat flowx(planes[0]); + Mat flowy(planes[1]); Mat out; drawOpticalFlow(flowx, flowy, out, 10); @@ -171,14 +175,12 @@ int main(int argc, const char* argv[]) GpuMat d_frame0(frame0); GpuMat d_frame1(frame1); - GpuMat d_flowx(frame0.size(), CV_32FC1); - GpuMat d_flowy(frame0.size(), CV_32FC1); + GpuMat d_flow(frame0.size(), CV_32FC2); - BroxOpticalFlow brox(0.197f, 50.0f, 0.8f, 10, 77, 10); - PyrLKOpticalFlow lk; lk.winSize = Size(7, 7); - FarnebackOpticalFlow farn; - OpticalFlowDual_TVL1_CUDA tvl1; - FastOpticalFlowBM fastBM; + Ptr brox = cuda::BroxOpticalFlow::create(0.197f, 50.0f, 0.8f, 10, 77, 10); + Ptr lk = cuda::DensePyrLKOpticalFlow::create(Size(7, 7)); + Ptr farn = cuda::FarnebackOpticalFlow::create(); + Ptr tvl1 = cuda::OpticalFlowDual_TVL1::create(); { GpuMat d_frame0f; @@ -189,68 +191,45 @@ int main(int argc, const char* argv[]) const int64 start = getTickCount(); - brox(d_frame0f, d_frame1f, d_flowx, d_flowy); + brox->calc(d_frame0f, d_frame1f, d_flow); const double timeSec = (getTickCount() - start) / getTickFrequency(); cout << "Brox : " << timeSec << " sec" << endl; - showFlow("Brox", d_flowx, d_flowy); + showFlow("Brox", d_flow); } { const int64 start = getTickCount(); - lk.dense(d_frame0, d_frame1, d_flowx, d_flowy); + lk->calc(d_frame0, d_frame1, d_flow); const double timeSec = (getTickCount() - start) / getTickFrequency(); cout << "LK : " << timeSec << " sec" << endl; - showFlow("LK", d_flowx, d_flowy); + showFlow("LK", d_flow); } { const int64 start = getTickCount(); - farn(d_frame0, d_frame1, d_flowx, d_flowy); + farn->calc(d_frame0, d_frame1, d_flow); const double timeSec = (getTickCount() - start) / getTickFrequency(); cout << "Farn : " << timeSec << " sec" << endl; - showFlow("Farn", d_flowx, d_flowy); + showFlow("Farn", d_flow); } { const int64 start = getTickCount(); - tvl1(d_frame0, d_frame1, d_flowx, d_flowy); + tvl1->calc(d_frame0, d_frame1, d_flow); const double timeSec = (getTickCount() - start) / getTickFrequency(); cout << "TVL1 : " << timeSec << " sec" << endl; - showFlow("TVL1", d_flowx, d_flowy); - } - - { - const int64 start = getTickCount(); - - GpuMat buf; - calcOpticalFlowBM(d_frame0, d_frame1, Size(7, 7), Size(1, 1), Size(21, 21), false, d_flowx, d_flowy, buf); - - const double timeSec = (getTickCount() - start) / getTickFrequency(); - cout << "BM : " << timeSec << " sec" << endl; - - showFlow("BM", d_flowx, d_flowy); - } - - { - const int64 start = getTickCount(); - - fastBM(d_frame0, d_frame1, d_flowx, d_flowy); - - const double timeSec = (getTickCount() - start) / getTickFrequency(); - cout << "Fast BM : " << timeSec << " sec" << endl; - - showFlow("Fast BM", d_flowx, d_flowy); + showFlow("TVL1", d_flow); } imshow("Frame 0", frame0); diff --git a/samples/gpu/pyrlk_optical_flow.cpp b/samples/gpu/pyrlk_optical_flow.cpp index febc28f28..9074c47b6 100644 --- a/samples/gpu/pyrlk_optical_flow.cpp +++ b/samples/gpu/pyrlk_optical_flow.cpp @@ -186,12 +186,8 @@ int main(int argc, const char* argv[]) // Sparse - PyrLKOpticalFlow d_pyrLK; - - d_pyrLK.winSize.width = winSize; - d_pyrLK.winSize.height = winSize; - d_pyrLK.maxLevel = maxLevel; - d_pyrLK.iters = iters; + Ptr d_pyrLK = cuda::SparsePyrLKOpticalFlow::create( + Size(winSize, winSize), maxLevel, iters); GpuMat d_frame0(frame0); GpuMat d_frame1(frame1); @@ -199,7 +195,7 @@ int main(int argc, const char* argv[]) GpuMat d_nextPts; GpuMat d_status; - d_pyrLK.sparse(useGray ? d_frame0Gray : d_frame0, useGray ? d_frame1Gray : d_frame1, d_prevPts, d_nextPts, d_status); + d_pyrLK->calc(useGray ? d_frame0Gray : d_frame0, useGray ? d_frame1Gray : d_frame1, d_prevPts, d_nextPts, d_status); // Draw arrows @@ -216,20 +212,6 @@ int main(int argc, const char* argv[]) imshow("PyrLK [Sparse]", frame0); - // Dense - - GpuMat d_u; - GpuMat d_v; - - d_pyrLK.dense(d_frame0Gray, d_frame1Gray, d_u, d_v); - - // Draw flow field - - Mat flowField; - getFlowField(Mat(d_u), Mat(d_v), flowField); - - imshow("PyrLK [Dense] Flow Field", flowField); - waitKey(); return 0; From 63ff39f9f3e1e486002266b75410ad988be9e9ec Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 31 Dec 2014 15:37:25 +0300 Subject: [PATCH 74/98] remove obsolete gpu optical flow samples --- samples/gpu/brox_optical_flow.cpp | 270 ------------------------------ samples/gpu/performance/tests.cpp | 81 --------- 2 files changed, 351 deletions(-) delete mode 100644 samples/gpu/brox_optical_flow.cpp diff --git a/samples/gpu/brox_optical_flow.cpp b/samples/gpu/brox_optical_flow.cpp deleted file mode 100644 index 638aade45..000000000 --- a/samples/gpu/brox_optical_flow.cpp +++ /dev/null @@ -1,270 +0,0 @@ -#include -#include -#include -#include - -#include "opencv2/core.hpp" -#include "opencv2/core/utility.hpp" -#include "opencv2/highgui.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/cudaoptflow.hpp" -#include "opencv2/cudaarithm.hpp" - -using namespace std; -using namespace cv; -using namespace cv::cuda; - -void getFlowField(const Mat& u, const Mat& v, Mat& flowField); - -int main(int argc, const char* argv[]) -{ - try - { - const char* keys = - "{ h help | | print help message }" - "{ l left | | specify left image }" - "{ r right | | specify right image }" - "{ s scale | 0.8 | set pyramid scale factor }" - "{ a alpha | 0.197 | set alpha }" - "{ g gamma | 50.0 | set gamma }" - "{ i inner | 10 | set number of inner iterations }" - "{ o outer | 77 | set number of outer iterations }" - "{ si solver | 10 | set number of basic solver iterations }" - "{ t time_step | 0.1 | set frame interpolation time step }"; - - CommandLineParser cmd(argc, argv, keys); - - if (cmd.has("help") || !cmd.check()) - { - cmd.printMessage(); - cmd.printErrors(); - return 0; - } - - string frame0Name = cmd.get("left"); - string frame1Name = cmd.get("right"); - float scale = cmd.get("scale"); - float alpha = cmd.get("alpha"); - float gamma = cmd.get("gamma"); - int inner_iterations = cmd.get("inner"); - int outer_iterations = cmd.get("outer"); - int solver_iterations = cmd.get("solver"); - float timeStep = cmd.get("time_step"); - - if (frame0Name.empty() || frame1Name.empty()) - { - cerr << "Missing input file names" << endl; - return -1; - } - - Mat frame0Color = imread(frame0Name); - Mat frame1Color = imread(frame1Name); - - if (frame0Color.empty() || frame1Color.empty()) - { - cout << "Can't load input images" << endl; - return -1; - } - - cv::cuda::printShortCudaDeviceInfo(cv::cuda::getDevice()); - - cout << "OpenCV / NVIDIA Computer Vision" << endl; - cout << "Optical Flow Demo: Frame Interpolation" << endl; - cout << "=========================================" << endl; - - namedWindow("Forward flow"); - namedWindow("Backward flow"); - - namedWindow("Interpolated frame"); - - cout << "Press:" << endl; - cout << "\tESC to quit" << endl; - cout << "\t'a' to move to the previous frame" << endl; - cout << "\t's' to move to the next frame\n" << endl; - - frame0Color.convertTo(frame0Color, CV_32F, 1.0 / 255.0); - frame1Color.convertTo(frame1Color, CV_32F, 1.0 / 255.0); - - Mat frame0Gray, frame1Gray; - - cv::cvtColor(frame0Color, frame0Gray, COLOR_BGR2GRAY); - cv::cvtColor(frame1Color, frame1Gray, COLOR_BGR2GRAY); - - GpuMat d_frame0(frame0Gray); - GpuMat d_frame1(frame1Gray); - - cout << "Estimating optical flow" << endl; - - BroxOpticalFlow d_flow(alpha, gamma, scale, inner_iterations, outer_iterations, solver_iterations); - - cout << "\tForward..." << endl; - - GpuMat d_fu, d_fv; - - d_flow(d_frame0, d_frame1, d_fu, d_fv); - - Mat flowFieldForward; - getFlowField(Mat(d_fu), Mat(d_fv), flowFieldForward); - - cout << "\tBackward..." << endl; - - GpuMat d_bu, d_bv; - - d_flow(d_frame1, d_frame0, d_bu, d_bv); - - Mat flowFieldBackward; - getFlowField(Mat(d_bu), Mat(d_bv), flowFieldBackward); - - cout << "Interpolating..." << endl; - - // first frame color components - GpuMat d_b, d_g, d_r; - - // second frame color components - GpuMat d_bt, d_gt, d_rt; - - // prepare color components on host and copy them to device memory - Mat channels[3]; - cv::split(frame0Color, channels); - - d_b.upload(channels[0]); - d_g.upload(channels[1]); - d_r.upload(channels[2]); - - cv::split(frame1Color, channels); - - d_bt.upload(channels[0]); - d_gt.upload(channels[1]); - d_rt.upload(channels[2]); - - // temporary buffer - GpuMat d_buf; - - // intermediate frame color components (GPU memory) - GpuMat d_rNew, d_gNew, d_bNew; - - GpuMat d_newFrame; - - vector frames; - frames.reserve(static_cast(1.0f / timeStep) + 2); - - frames.push_back(frame0Color); - - // compute interpolated frames - for (float timePos = timeStep; timePos < 1.0f; timePos += timeStep) - { - // interpolate blue channel - interpolateFrames(d_b, d_bt, d_fu, d_fv, d_bu, d_bv, timePos, d_bNew, d_buf); - - // interpolate green channel - interpolateFrames(d_g, d_gt, d_fu, d_fv, d_bu, d_bv, timePos, d_gNew, d_buf); - - // interpolate red channel - interpolateFrames(d_r, d_rt, d_fu, d_fv, d_bu, d_bv, timePos, d_rNew, d_buf); - - GpuMat channels3[] = {d_bNew, d_gNew, d_rNew}; - cuda::merge(channels3, 3, d_newFrame); - - frames.push_back(Mat(d_newFrame)); - - cout << setprecision(4) << timePos * 100.0f << "%\r"; - } - - frames.push_back(frame1Color); - - cout << setw(5) << "100%" << endl; - - cout << "Done" << endl; - - imshow("Forward flow", flowFieldForward); - imshow("Backward flow", flowFieldBackward); - - int currentFrame = 0; - - imshow("Interpolated frame", frames[currentFrame]); - - for(;;) - { - int key = toupper(waitKey(10) & 0xff); - - switch (key) - { - case 27: - return 0; - - case 'A': - if (currentFrame > 0) - --currentFrame; - - imshow("Interpolated frame", frames[currentFrame]); - break; - - case 'S': - if (currentFrame < static_cast(frames.size()) - 1) - ++currentFrame; - - imshow("Interpolated frame", frames[currentFrame]); - break; - } - } - } - catch (const exception& ex) - { - cerr << ex.what() << endl; - return -1; - } - catch (...) - { - cerr << "Unknow error" << endl; - return -1; - } -} - -template inline T clamp (T x, T a, T b) -{ - return ((x) > (a) ? ((x) < (b) ? (x) : (b)) : (a)); -} - -template inline T mapValue(T x, T a, T b, T c, T d) -{ - x = clamp(x, a, b); - return c + (d - c) * (x - a) / (b - a); -} - -void getFlowField(const Mat& u, const Mat& v, Mat& flowField) -{ - float maxDisplacement = 1.0f; - - for (int i = 0; i < u.rows; ++i) - { - const float* ptr_u = u.ptr(i); - const float* ptr_v = v.ptr(i); - - for (int j = 0; j < u.cols; ++j) - { - float d = max(fabsf(ptr_u[j]), fabsf(ptr_v[j])); - - if (d > maxDisplacement) - maxDisplacement = d; - } - } - - flowField.create(u.size(), CV_8UC4); - - for (int i = 0; i < flowField.rows; ++i) - { - const float* ptr_u = u.ptr(i); - const float* ptr_v = v.ptr(i); - - - Vec4b* row = flowField.ptr(i); - - for (int j = 0; j < flowField.cols; ++j) - { - row[j][0] = 0; - row[j][1] = static_cast (mapValue (-ptr_v[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); - row[j][2] = static_cast (mapValue ( ptr_u[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); - row[j][3] = 255; - } - } -} diff --git a/samples/gpu/performance/tests.cpp b/samples/gpu/performance/tests.cpp index 14910f9a3..9e8bfd28c 100644 --- a/samples/gpu/performance/tests.cpp +++ b/samples/gpu/performance/tests.cpp @@ -1187,87 +1187,6 @@ TEST(GoodFeaturesToTrack) CUDA_OFF; } -TEST(PyrLKOpticalFlow) -{ - Mat frame0 = imread(abspath("../data/rubberwhale1.png")); - if (frame0.empty()) throw runtime_error("can't open ../data/rubberwhale1.png"); - - Mat frame1 = imread(abspath("../data/rubberwhale2.png")); - if (frame1.empty()) throw runtime_error("can't open ../data/rubberwhale2.png"); - - Mat gray_frame; - cvtColor(frame0, gray_frame, COLOR_BGR2GRAY); - - for (int points = 1000; points <= 8000; points *= 2) - { - SUBTEST << points; - - vector pts; - goodFeaturesToTrack(gray_frame, pts, points, 0.01, 0.0); - - vector nextPts; - vector status; - - vector err; - - calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts, status, err); - - CPU_ON; - calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts, status, err); - CPU_OFF; - - cuda::PyrLKOpticalFlow d_pyrLK; - - cuda::GpuMat d_frame0(frame0); - cuda::GpuMat d_frame1(frame1); - - cuda::GpuMat d_pts; - Mat pts_mat(1, (int)pts.size(), CV_32FC2, (void*)&pts[0]); - d_pts.upload(pts_mat); - - cuda::GpuMat d_nextPts; - cuda::GpuMat d_status; - cuda::GpuMat d_err; - - d_pyrLK.sparse(d_frame0, d_frame1, d_pts, d_nextPts, d_status, &d_err); - - CUDA_ON; - d_pyrLK.sparse(d_frame0, d_frame1, d_pts, d_nextPts, d_status, &d_err); - CUDA_OFF; - } -} - - -TEST(FarnebackOpticalFlow) -{ - const string datasets[] = {"../data/rubberwhale", "../data/basketball"}; - for (size_t i = 0; i < sizeof(datasets)/sizeof(*datasets); ++i) { - for (int fastPyramids = 0; fastPyramids < 2; ++fastPyramids) { - for (int useGaussianBlur = 0; useGaussianBlur < 2; ++useGaussianBlur) { - - SUBTEST << "dataset=" << datasets[i] << ", fastPyramids=" << fastPyramids << ", useGaussianBlur=" << useGaussianBlur; - Mat frame0 = imread(abspath(datasets[i] + "1.png"), IMREAD_GRAYSCALE); - Mat frame1 = imread(abspath(datasets[i] + "2.png"), IMREAD_GRAYSCALE); - if (frame0.empty()) throw runtime_error("can't open " + datasets[i] + "1.png"); - if (frame1.empty()) throw runtime_error("can't open " + datasets[i] + "2.png"); - - cuda::FarnebackOpticalFlow calc; - calc.fastPyramids = fastPyramids != 0; - calc.flags |= useGaussianBlur ? OPTFLOW_FARNEBACK_GAUSSIAN : 0; - - cuda::GpuMat d_frame0(frame0), d_frame1(frame1), d_flowx, d_flowy; - CUDA_ON; - calc(d_frame0, d_frame1, d_flowx, d_flowy); - CUDA_OFF; - - Mat flow; - CPU_ON; - calcOpticalFlowFarneback(frame0, frame1, flow, calc.pyrScale, calc.numLevels, calc.winSize, calc.numIters, calc.polyN, calc.polySigma, calc.flags); - CPU_OFF; - - }}} -} - #ifdef HAVE_OPENCV_BGSEGM TEST(MOG) From 710617034ba4840d3d007e3444a03d84b6f5e423 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Mon, 12 Jan 2015 11:26:07 +0300 Subject: [PATCH 75/98] remove unused function from pyrlk_optical_flow sample --- samples/gpu/pyrlk_optical_flow.cpp | 38 ------------------------------ 1 file changed, 38 deletions(-) diff --git a/samples/gpu/pyrlk_optical_flow.cpp b/samples/gpu/pyrlk_optical_flow.cpp index 9074c47b6..f13487b62 100644 --- a/samples/gpu/pyrlk_optical_flow.cpp +++ b/samples/gpu/pyrlk_optical_flow.cpp @@ -77,44 +77,6 @@ template inline T mapValue(T x, T a, T b, T c, T d) return c + (d - c) * (x - a) / (b - a); } -static void getFlowField(const Mat& u, const Mat& v, Mat& flowField) -{ - float maxDisplacement = 1.0f; - - for (int i = 0; i < u.rows; ++i) - { - const float* ptr_u = u.ptr(i); - const float* ptr_v = v.ptr(i); - - for (int j = 0; j < u.cols; ++j) - { - float d = max(fabsf(ptr_u[j]), fabsf(ptr_v[j])); - - if (d > maxDisplacement) - maxDisplacement = d; - } - } - - flowField.create(u.size(), CV_8UC4); - - for (int i = 0; i < flowField.rows; ++i) - { - const float* ptr_u = u.ptr(i); - const float* ptr_v = v.ptr(i); - - - Vec4b* row = flowField.ptr(i); - - for (int j = 0; j < flowField.cols; ++j) - { - row[j][0] = 0; - row[j][1] = static_cast (mapValue (-ptr_v[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); - row[j][2] = static_cast (mapValue ( ptr_u[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); - row[j][3] = 255; - } - } -} - int main(int argc, const char* argv[]) { const char* keys = From df697f65209713976715944b735c67a6a1012c2e Mon Sep 17 00:00:00 2001 From: Yan Wang Date: Thu, 22 Jan 2015 16:36:16 +0800 Subject: [PATCH 76/98] Optimize runLBPClassifierStumpSimple by built-in mad24. Signed-off-by: Yan Wang --- modules/objdetect/src/opencl/cascadedetect.cl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/objdetect/src/opencl/cascadedetect.cl b/modules/objdetect/src/opencl/cascadedetect.cl index ce676426f..465fa0c65 100644 --- a/modules/objdetect/src/opencl/cascadedetect.cl +++ b/modules/objdetect/src/opencl/cascadedetect.cl @@ -397,8 +397,8 @@ __kernel void runLBPClassifierStumpSimple( for( tileIdx = groupIdx; tileIdx < totalTiles; tileIdx += ngroups ) { - int iy = ((tileIdx / ntiles.x)*local_size_y + ly)*ystep; - int ix = ((tileIdx % ntiles.x)*local_size_x + lx)*ystep; + int iy = mad24((tileIdx / ntiles.x), local_size_y, ly) * ystep; + int ix = mad24((tileIdx % ntiles.x), local_size_x, lx) * ystep; if( ix < worksize.x && iy < worksize.y ) { From b416e8715d923e6a792afd7c0ebe683f931079b2 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Thu, 15 Jan 2015 11:18:46 +0300 Subject: [PATCH 77/98] move labeling routines from cuda to cudalegacy --- modules/cuda/include/opencv2/cuda.hpp | 23 ------------------ .../cudalegacy/include/opencv2/cudalegacy.hpp | 24 +++++++++++++++++++ .../perf/perf_labeling.cpp | 0 .../src/cuda/ccomponetns.cu | 0 .../{cuda => cudalegacy}/src/graphcuts.cpp | 0 .../test/test_labeling.cpp | 0 modules/stitching/CMakeLists.txt | 2 +- .../opencv2/stitching/detail/seam_finders.hpp | 2 +- modules/stitching/src/precomp.hpp | 4 ++-- modules/stitching/src/seam_finders.cpp | 2 +- 10 files changed, 29 insertions(+), 28 deletions(-) rename modules/{cuda => cudalegacy}/perf/perf_labeling.cpp (100%) rename modules/{cuda => cudalegacy}/src/cuda/ccomponetns.cu (100%) rename modules/{cuda => cudalegacy}/src/graphcuts.cpp (100%) rename modules/{cuda => cudalegacy}/test/test_labeling.cpp (100%) diff --git a/modules/cuda/include/opencv2/cuda.hpp b/modules/cuda/include/opencv2/cuda.hpp index c6004296b..b8f2ed715 100644 --- a/modules/cuda/include/opencv2/cuda.hpp +++ b/modules/cuda/include/opencv2/cuda.hpp @@ -58,29 +58,6 @@ namespace cv { namespace cuda { -//////////////////////////// Labeling //////////////////////////// - -//! @addtogroup cuda -//! @{ - -//!performs labeling via graph cuts of a 2D regular 4-connected graph. -CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, - GpuMat& buf, Stream& stream = Stream::Null()); - -//!performs labeling via graph cuts of a 2D regular 8-connected graph. -CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight, - GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight, - GpuMat& labels, - GpuMat& buf, Stream& stream = Stream::Null()); - -//! compute mask for Generalized Flood fill componetns labeling. -CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null()); - -//! performs connected componnents labeling. -CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null()); - -//! @} - //////////////////////////// Calib3d //////////////////////////// //! @addtogroup cuda_calib3d diff --git a/modules/cudalegacy/include/opencv2/cudalegacy.hpp b/modules/cudalegacy/include/opencv2/cudalegacy.hpp index f0107499d..18f55eba7 100644 --- a/modules/cudalegacy/include/opencv2/cudalegacy.hpp +++ b/modules/cudalegacy/include/opencv2/cudalegacy.hpp @@ -63,6 +63,10 @@ namespace cv { namespace cuda { //! @addtogroup cudalegacy //! @{ +// +// ImagePyramid +// + class CV_EXPORTS ImagePyramid : public Algorithm { public: @@ -227,6 +231,26 @@ CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1, CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors); +// +// Labeling +// + +//!performs labeling via graph cuts of a 2D regular 4-connected graph. +CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, + GpuMat& buf, Stream& stream = Stream::Null()); + +//!performs labeling via graph cuts of a 2D regular 8-connected graph. +CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight, + GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight, + GpuMat& labels, + GpuMat& buf, Stream& stream = Stream::Null()); + +//! compute mask for Generalized Flood fill componetns labeling. +CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null()); + +//! performs connected componnents labeling. +CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null()); + //! @} }} diff --git a/modules/cuda/perf/perf_labeling.cpp b/modules/cudalegacy/perf/perf_labeling.cpp similarity index 100% rename from modules/cuda/perf/perf_labeling.cpp rename to modules/cudalegacy/perf/perf_labeling.cpp diff --git a/modules/cuda/src/cuda/ccomponetns.cu b/modules/cudalegacy/src/cuda/ccomponetns.cu similarity index 100% rename from modules/cuda/src/cuda/ccomponetns.cu rename to modules/cudalegacy/src/cuda/ccomponetns.cu diff --git a/modules/cuda/src/graphcuts.cpp b/modules/cudalegacy/src/graphcuts.cpp similarity index 100% rename from modules/cuda/src/graphcuts.cpp rename to modules/cudalegacy/src/graphcuts.cpp diff --git a/modules/cuda/test/test_labeling.cpp b/modules/cudalegacy/test/test_labeling.cpp similarity index 100% rename from modules/cuda/test/test_labeling.cpp rename to modules/cudalegacy/test/test_labeling.cpp diff --git a/modules/stitching/CMakeLists.txt b/modules/stitching/CMakeLists.txt index 36d4452c7..827675405 100644 --- a/modules/stitching/CMakeLists.txt +++ b/modules/stitching/CMakeLists.txt @@ -5,4 +5,4 @@ if(HAVE_CUDA) endif() ocv_define_module(stitching opencv_imgproc opencv_features2d opencv_calib3d opencv_objdetect - OPTIONAL opencv_cuda opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d opencv_xfeatures2d) + OPTIONAL opencv_cuda opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d opencv_cudalegacy opencv_xfeatures2d) diff --git a/modules/stitching/include/opencv2/stitching/detail/seam_finders.hpp b/modules/stitching/include/opencv2/stitching/detail/seam_finders.hpp index e4f7816bb..37029215e 100644 --- a/modules/stitching/include/opencv2/stitching/detail/seam_finders.hpp +++ b/modules/stitching/include/opencv2/stitching/detail/seam_finders.hpp @@ -249,7 +249,7 @@ private: }; -#ifdef HAVE_OPENCV_CUDA +#ifdef HAVE_OPENCV_CUDALEGACY class CV_EXPORTS GraphCutSeamFinderGpu : public GraphCutSeamFinderBase, public PairwiseSeamFinder { public: diff --git a/modules/stitching/src/precomp.hpp b/modules/stitching/src/precomp.hpp index c70d9f9a4..70636b684 100644 --- a/modules/stitching/src/precomp.hpp +++ b/modules/stitching/src/precomp.hpp @@ -83,8 +83,8 @@ # include "opencv2/cudafeatures2d.hpp" #endif -#ifdef HAVE_OPENCV_CUDA -# include "opencv2/cuda.hpp" +#ifdef HAVE_OPENCV_CUDALEGACY +# include "opencv2/cudalegacy.hpp" #endif #ifdef HAVE_OPENCV_XFEATURES2D diff --git a/modules/stitching/src/seam_finders.cpp b/modules/stitching/src/seam_finders.cpp index 4d5c8d163..8a673ede0 100644 --- a/modules/stitching/src/seam_finders.cpp +++ b/modules/stitching/src/seam_finders.cpp @@ -1321,7 +1321,7 @@ void GraphCutSeamFinder::find(const std::vector &src, const std::vector &src, const std::vector &corners, std::vector &masks) { From f1e17853861a151ca3c0ab96be1e6e21af550b70 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Thu, 15 Jan 2015 14:28:49 +0300 Subject: [PATCH 78/98] move vstab related CUDA routines to vstab module --- modules/cuda/include/opencv2/cuda.hpp | 14 --- modules/cuda/src/global_motion.cpp | 96 ------------------- modules/cuda/test/test_global_motion.cpp | 90 ----------------- modules/videostab/CMakeLists.txt | 7 +- .../src/cuda/global_motion.cu | 0 modules/videostab/src/global_motion.cpp | 33 ++++++- modules/videostab/src/wobble_suppression.cpp | 37 ++++++- 7 files changed, 67 insertions(+), 210 deletions(-) delete mode 100644 modules/cuda/src/global_motion.cpp delete mode 100644 modules/cuda/test/test_global_motion.cpp rename modules/{cuda => videostab}/src/cuda/global_motion.cu (100%) diff --git a/modules/cuda/include/opencv2/cuda.hpp b/modules/cuda/include/opencv2/cuda.hpp index b8f2ed715..1a5d17d46 100644 --- a/modules/cuda/include/opencv2/cuda.hpp +++ b/modules/cuda/include/opencv2/cuda.hpp @@ -93,20 +93,6 @@ CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& c //! @} -//////////////////////////// VStab //////////////////////////// - -//! @addtogroup cuda -//! @{ - -//! removes points (CV_32FC2, single row matrix) with zero mask value -CV_EXPORTS void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask); - -CV_EXPORTS void calcWobbleSuppressionMaps( - int left, int idx, int right, Size size, const Mat &ml, const Mat &mr, - GpuMat &mapx, GpuMat &mapy); - -//! @} - }} // namespace cv { namespace cuda { #endif /* __OPENCV_CUDA_HPP__ */ diff --git a/modules/cuda/src/global_motion.cpp b/modules/cuda/src/global_motion.cpp deleted file mode 100644 index 4f847c924..000000000 --- a/modules/cuda/src/global_motion.cpp +++ /dev/null @@ -1,96 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -using namespace cv; -using namespace cv::cuda; - -#if !defined HAVE_CUDA || defined(CUDA_DISABLER) - -void cv::cuda::compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); } -void cv::cuda::calcWobbleSuppressionMaps( - int, int, int, Size, const Mat&, const Mat&, GpuMat&, GpuMat&) { throw_no_cuda(); } - -#else - -namespace cv { namespace cuda { namespace device { namespace globmotion { - - int compactPoints(int N, float *points0, float *points1, const uchar *mask); - - void calcWobbleSuppressionMaps( - int left, int idx, int right, int width, int height, - const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy); - -}}}} - -void cv::cuda::compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask) -{ - CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1); - CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U); - CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols); - - int npoints = points0.cols; - int remaining = cv::cuda::device::globmotion::compactPoints( - npoints, (float*)points0.data, (float*)points1.data, mask.data); - - points0 = points0.colRange(0, remaining); - points1 = points1.colRange(0, remaining); -} - - -void cv::cuda::calcWobbleSuppressionMaps( - int left, int idx, int right, Size size, const Mat &ml, const Mat &mr, - GpuMat &mapx, GpuMat &mapy) -{ - CV_Assert(ml.size() == Size(3, 3) && ml.type() == CV_32F && ml.isContinuous()); - CV_Assert(mr.size() == Size(3, 3) && mr.type() == CV_32F && mr.isContinuous()); - - mapx.create(size, CV_32F); - mapy.create(size, CV_32F); - - cv::cuda::device::globmotion::calcWobbleSuppressionMaps( - left, idx, right, size.width, size.height, - ml.ptr(), mr.ptr(), mapx, mapy); -} - -#endif diff --git a/modules/cuda/test/test_global_motion.cpp b/modules/cuda/test/test_global_motion.cpp deleted file mode 100644 index 633fe647c..000000000 --- a/modules/cuda/test/test_global_motion.cpp +++ /dev/null @@ -1,90 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "test_precomp.hpp" - -#ifdef HAVE_CUDA - -using namespace std; -using namespace cv; - -struct CompactPoints : testing::TestWithParam -{ - virtual void SetUp() { cuda::setDevice(GetParam().deviceID()); } -}; - -CUDA_TEST_P(CompactPoints, CanCompactizeSmallInput) -{ - Mat src0(1, 3, CV_32FC2); - src0.at(0,0) = Point2f(0,0); - src0.at(0,1) = Point2f(0,1); - src0.at(0,2) = Point2f(0,2); - - Mat src1(1, 3, CV_32FC2); - src1.at(0,0) = Point2f(1,0); - src1.at(0,1) = Point2f(1,1); - src1.at(0,2) = Point2f(1,2); - - Mat mask(1, 3, CV_8U); - mask.at(0,0) = 1; - mask.at(0,1) = 0; - mask.at(0,2) = 1; - - cuda::GpuMat dsrc0(src0), dsrc1(src1), dmask(mask); - cuda::compactPoints(dsrc0, dsrc1, dmask); - - dsrc0.download(src0); - dsrc1.download(src1); - - ASSERT_EQ(2, src0.cols); - ASSERT_EQ(2, src1.cols); - - ASSERT_TRUE(src0.at(0,0) == Point2f(0,0)); - ASSERT_TRUE(src0.at(0,1) == Point2f(0,2)); - - ASSERT_TRUE(src1.at(0,0) == Point2f(1,0)); - ASSERT_TRUE(src1.at(0,1) == Point2f(1,2)); -} - -INSTANTIATE_TEST_CASE_P(CUDA_GlobalMotion, CompactPoints, ALL_DEVICES); - -#endif // HAVE_CUDA diff --git a/modules/videostab/CMakeLists.txt b/modules/videostab/CMakeLists.txt index e252bdbf5..f57a5d215 100644 --- a/modules/videostab/CMakeLists.txt +++ b/modules/videostab/CMakeLists.txt @@ -1,3 +1,8 @@ set(the_description "Video stabilization") + +if(HAVE_CUDA) + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter) +endif() + ocv_define_module(videostab opencv_imgproc opencv_features2d opencv_video opencv_photo opencv_calib3d - OPTIONAL opencv_cuda opencv_cudawarping opencv_cudaoptflow opencv_videoio) + OPTIONAL opencv_cudawarping opencv_cudaoptflow opencv_videoio) diff --git a/modules/cuda/src/cuda/global_motion.cu b/modules/videostab/src/cuda/global_motion.cu similarity index 100% rename from modules/cuda/src/cuda/global_motion.cu rename to modules/videostab/src/cuda/global_motion.cu diff --git a/modules/videostab/src/global_motion.cpp b/modules/videostab/src/global_motion.cpp index 4875bef23..27e2bc3f4 100644 --- a/modules/videostab/src/global_motion.cpp +++ b/modules/videostab/src/global_motion.cpp @@ -47,8 +47,34 @@ #include "opencv2/opencv_modules.hpp" #include "clp.hpp" -#ifdef HAVE_OPENCV_CUDA -# include "opencv2/cuda.hpp" +#if !defined HAVE_CUDA || defined(CUDA_DISABLER) + +namespace cv { namespace cuda { + static void compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); } +}} + +#else + +namespace cv { namespace cuda { namespace device { namespace globmotion { + int compactPoints(int N, float *points0, float *points1, const uchar *mask); +}}}} + +namespace cv { namespace cuda { + static void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask) + { + CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1); + CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U); + CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols); + + int npoints = points0.cols; + int remaining = cv::cuda::device::globmotion::compactPoints( + npoints, (float*)points0.data, (float*)points1.data, mask.data); + + points0 = points0.colRange(0, remaining); + points1 = points1.colRange(0, remaining); + } +}} + #endif namespace cv @@ -736,8 +762,7 @@ Mat KeypointBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, return motionEstimator_->estimate(pointsPrevGood_, pointsGood_, ok); } - -#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) KeypointBasedMotionEstimatorGpu::KeypointBasedMotionEstimatorGpu(Ptr estimator) : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator) diff --git a/modules/videostab/src/wobble_suppression.cpp b/modules/videostab/src/wobble_suppression.cpp index e2635d5e0..d05fd05cc 100644 --- a/modules/videostab/src/wobble_suppression.cpp +++ b/modules/videostab/src/wobble_suppression.cpp @@ -48,10 +48,38 @@ # include "opencv2/cudawarping.hpp" #endif -#ifdef HAVE_OPENCV_CUDA -# include "opencv2/cuda.hpp" -#endif +#if !defined HAVE_CUDA || defined(CUDA_DISABLER) +namespace cv { namespace cuda { + static void calcWobbleSuppressionMaps(int, int, int, Size, const Mat&, const Mat&, GpuMat&, GpuMat&) { throw_no_cuda(); } +}} + +#else + +namespace cv { namespace cuda { namespace device { namespace globmotion { + void calcWobbleSuppressionMaps( + int left, int idx, int right, int width, int height, + const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy); +}}}} + +namespace cv { namespace cuda { + static void calcWobbleSuppressionMaps( + int left, int idx, int right, Size size, const Mat &ml, const Mat &mr, + GpuMat &mapx, GpuMat &mapy) + { + CV_Assert(ml.size() == Size(3, 3) && ml.type() == CV_32F && ml.isContinuous()); + CV_Assert(mr.size() == Size(3, 3) && mr.type() == CV_32F && mr.isContinuous()); + + mapx.create(size, CV_32F); + mapy.create(size, CV_32F); + + cv::cuda::device::globmotion::calcWobbleSuppressionMaps( + left, idx, right, size.width, size.height, + ml.ptr(), mr.ptr(), mapx, mapy); + } +}} + +#endif namespace cv { @@ -121,8 +149,7 @@ void MoreAccurateMotionWobbleSuppressor::suppress(int idx, const Mat &frame, Mat remap(frame, result, mapx_, mapy_, INTER_LINEAR, BORDER_REPLICATE); } - -#if defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_OPENCV_CUDAWARPING) void MoreAccurateMotionWobbleSuppressorGpu::suppress(int idx, const cuda::GpuMat &frame, cuda::GpuMat &result) { CV_Assert(motions_ && stabilizationMotions_); From ce0e595a4f6b674d0d7898e80bba28e81f5f009b Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Thu, 15 Jan 2015 16:09:53 +0300 Subject: [PATCH 79/98] move the rest cuda functions to cuda legacy --- modules/cuda/include/opencv2/cuda.hpp | 28 ---------------- modules/cudalegacy/CMakeLists.txt | 3 +- .../cudalegacy/include/opencv2/cudalegacy.hpp | 32 +++++++++++++++++++ .../perf/perf_calib3d.cpp | 6 ++++ modules/{cuda => cudalegacy}/src/calib3d.cpp | 2 +- .../{cuda => cudalegacy}/src/cuda/calib3d.cu | 0 modules/cudalegacy/src/precomp.hpp | 4 +++ .../test/test_calib3d.cpp | 4 ++- modules/cudalegacy/test/test_precomp.hpp | 2 ++ samples/gpu/performance/tests.cpp | 2 +- 10 files changed, 51 insertions(+), 32 deletions(-) rename modules/{cuda => cudalegacy}/perf/perf_calib3d.cpp (98%) rename modules/{cuda => cudalegacy}/src/calib3d.cpp (99%) rename modules/{cuda => cudalegacy}/src/cuda/calib3d.cu (100%) rename modules/{cuda => cudalegacy}/test/test_calib3d.cpp (98%) diff --git a/modules/cuda/include/opencv2/cuda.hpp b/modules/cuda/include/opencv2/cuda.hpp index 1a5d17d46..40483f485 100644 --- a/modules/cuda/include/opencv2/cuda.hpp +++ b/modules/cuda/include/opencv2/cuda.hpp @@ -63,34 +63,6 @@ namespace cv { namespace cuda { //! @addtogroup cuda_calib3d //! @{ -CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, - GpuMat& dst, Stream& stream = Stream::Null()); - -CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, - const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, - Stream& stream = Stream::Null()); - -/** @brief Finds the object pose from 3D-2D point correspondences. - -@param object Single-row matrix of object points. -@param image Single-row matrix of image points. -@param camera_mat 3x3 matrix of intrinsic camera parameters. -@param dist_coef Distortion coefficients. See undistortPoints for details. -@param rvec Output 3D rotation vector. -@param tvec Output 3D translation vector. -@param use_extrinsic_guess Flag to indicate that the function must use rvec and tvec as an -initial transformation guess. It is not supported for now. -@param num_iters Maximum number of RANSAC iterations. -@param max_dist Euclidean distance threshold to detect whether point is inlier or not. -@param min_inlier_count Flag to indicate that the function must stop if greater or equal number -of inliers is achieved. It is not supported for now. -@param inliers Output vector of inlier indices. - */ -CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat, - const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false, - int num_iters=100, float max_dist=8.0, int min_inlier_count=100, - std::vector* inliers=NULL); - //! @} }} // namespace cv { namespace cuda { diff --git a/modules/cudalegacy/CMakeLists.txt b/modules/cudalegacy/CMakeLists.txt index 0ef6e359b..7fe342e11 100644 --- a/modules/cudalegacy/CMakeLists.txt +++ b/modules/cudalegacy/CMakeLists.txt @@ -6,4 +6,5 @@ set(the_description "CUDA-accelerated Computer Vision (legacy)") ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4130 /wd4324 /wd4512 /wd4310 -Wundef -Wmissing-declarations -Wuninitialized -Wshadow) -ocv_define_module(cudalegacy opencv_core opencv_video OPTIONAL opencv_objdetect opencv_imgproc opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc) +ocv_define_module(cudalegacy opencv_core opencv_video + OPTIONAL opencv_objdetect opencv_imgproc opencv_calib3d opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc) diff --git a/modules/cudalegacy/include/opencv2/cudalegacy.hpp b/modules/cudalegacy/include/opencv2/cudalegacy.hpp index 18f55eba7..c27a1161f 100644 --- a/modules/cudalegacy/include/opencv2/cudalegacy.hpp +++ b/modules/cudalegacy/include/opencv2/cudalegacy.hpp @@ -251,6 +251,38 @@ CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Sc //! performs connected componnents labeling. CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null()); +// +// Calib3d +// + +CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, + GpuMat& dst, Stream& stream = Stream::Null()); + +CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, + const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, + Stream& stream = Stream::Null()); + +/** @brief Finds the object pose from 3D-2D point correspondences. + +@param object Single-row matrix of object points. +@param image Single-row matrix of image points. +@param camera_mat 3x3 matrix of intrinsic camera parameters. +@param dist_coef Distortion coefficients. See undistortPoints for details. +@param rvec Output 3D rotation vector. +@param tvec Output 3D translation vector. +@param use_extrinsic_guess Flag to indicate that the function must use rvec and tvec as an +initial transformation guess. It is not supported for now. +@param num_iters Maximum number of RANSAC iterations. +@param max_dist Euclidean distance threshold to detect whether point is inlier or not. +@param min_inlier_count Flag to indicate that the function must stop if greater or equal number +of inliers is achieved. It is not supported for now. +@param inliers Output vector of inlier indices. + */ +CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat, + const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false, + int num_iters=100, float max_dist=8.0, int min_inlier_count=100, + std::vector* inliers=NULL); + //! @} }} diff --git a/modules/cuda/perf/perf_calib3d.cpp b/modules/cudalegacy/perf/perf_calib3d.cpp similarity index 98% rename from modules/cuda/perf/perf_calib3d.cpp rename to modules/cudalegacy/perf/perf_calib3d.cpp index 4148b5191..0e708ce4f 100644 --- a/modules/cuda/perf/perf_calib3d.cpp +++ b/modules/cudalegacy/perf/perf_calib3d.cpp @@ -42,6 +42,10 @@ #include "perf_precomp.hpp" +#ifdef HAVE_OPENCV_CALIB3D + +#include "opencv2/calib3d.hpp" + using namespace std; using namespace testing; using namespace perf; @@ -133,3 +137,5 @@ PERF_TEST_P(Count, Calib3D_SolvePnPRansac, CPU_SANITY_CHECK(tvec, 1e-6); } } + +#endif diff --git a/modules/cuda/src/calib3d.cpp b/modules/cudalegacy/src/calib3d.cpp similarity index 99% rename from modules/cuda/src/calib3d.cpp rename to modules/cudalegacy/src/calib3d.cpp index eaadb9343..7d8c816fa 100644 --- a/modules/cuda/src/calib3d.cpp +++ b/modules/cudalegacy/src/calib3d.cpp @@ -45,7 +45,7 @@ using namespace cv; using namespace cv::cuda; -#if !defined HAVE_CUDA || defined(CUDA_DISABLER) +#if !defined HAVE_CUDA || !defined HAVE_OPENCV_CALIB3D || defined(CUDA_DISABLER) void cv::cuda::transformPoints(const GpuMat&, const Mat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); } diff --git a/modules/cuda/src/cuda/calib3d.cu b/modules/cudalegacy/src/cuda/calib3d.cu similarity index 100% rename from modules/cuda/src/cuda/calib3d.cu rename to modules/cudalegacy/src/cuda/calib3d.cu diff --git a/modules/cudalegacy/src/precomp.hpp b/modules/cudalegacy/src/precomp.hpp index a15e9f03c..9eda7e01d 100644 --- a/modules/cudalegacy/src/precomp.hpp +++ b/modules/cudalegacy/src/precomp.hpp @@ -56,6 +56,10 @@ # include "opencv2/objdetect.hpp" #endif +#ifdef HAVE_OPENCV_CALIB3D +# include "opencv2/calib3d.hpp" +#endif + #ifdef HAVE_OPENCV_CUDAARITHM # include "opencv2/cudaarithm.hpp" #endif diff --git a/modules/cuda/test/test_calib3d.cpp b/modules/cudalegacy/test/test_calib3d.cpp similarity index 98% rename from modules/cuda/test/test_calib3d.cpp rename to modules/cudalegacy/test/test_calib3d.cpp index 7208b10ba..7a73f817e 100644 --- a/modules/cuda/test/test_calib3d.cpp +++ b/modules/cudalegacy/test/test_calib3d.cpp @@ -42,7 +42,9 @@ #include "test_precomp.hpp" -#ifdef HAVE_CUDA +#if defined HAVE_CUDA && defined HAVE_OPENCV_CALIB3D + +#include "opencv2/calib3d.hpp" using namespace cvtest; diff --git a/modules/cudalegacy/test/test_precomp.hpp b/modules/cudalegacy/test/test_precomp.hpp index 5169ef2bb..727b40d83 100644 --- a/modules/cudalegacy/test/test_precomp.hpp +++ b/modules/cudalegacy/test/test_precomp.hpp @@ -74,6 +74,8 @@ #include "opencv2/core/private.cuda.hpp" +#include "opencv2/opencv_modules.hpp" + #include "cvconfig.h" #include "NCVTest.hpp" diff --git a/samples/gpu/performance/tests.cpp b/samples/gpu/performance/tests.cpp index 9e8bfd28c..b4bf4cfbe 100644 --- a/samples/gpu/performance/tests.cpp +++ b/samples/gpu/performance/tests.cpp @@ -3,7 +3,7 @@ #include "opencv2/highgui.hpp" #include "opencv2/calib3d.hpp" #include "opencv2/video.hpp" -#include "opencv2/cuda.hpp" +#include "opencv2/cudalegacy.hpp" #include "opencv2/cudaimgproc.hpp" #include "opencv2/cudaarithm.hpp" #include "opencv2/cudawarping.hpp" From 124ac15f1e7ce710677a89bbff842aa790c2de17 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Thu, 15 Jan 2015 16:14:13 +0300 Subject: [PATCH 80/98] remove cuda module --- cmake/OpenCVModule.cmake | 2 +- modules/cuda/CMakeLists.txt | 9 -- modules/cuda/doc/introduction.markdown | 85 ------------------- modules/cuda/include/opencv2/cuda.hpp | 70 --------------- modules/cuda/perf/perf_main.cpp | 47 ---------- modules/cuda/perf/perf_precomp.hpp | 64 -------------- modules/cuda/src/precomp.hpp | 60 ------------- modules/cuda/test/test_main.cpp | 45 ---------- modules/cuda/test/test_precomp.hpp | 66 -------------- modules/stitching/CMakeLists.txt | 2 +- modules/stitching/src/stitcher.cpp | 4 +- .../opencv2/videostab/global_motion.hpp | 4 +- .../opencv2/videostab/wobble_suppression.hpp | 2 +- modules/videostab/src/global_motion.cpp | 2 +- samples/cpp/stitching_detailed.cpp | 4 +- samples/cpp/videostab.cpp | 8 +- samples/gpu/CMakeLists.txt | 2 +- samples/gpu/performance/performance.cpp | 1 + samples/gpu/performance/performance.h | 1 - 19 files changed, 15 insertions(+), 463 deletions(-) delete mode 100644 modules/cuda/CMakeLists.txt delete mode 100644 modules/cuda/doc/introduction.markdown delete mode 100644 modules/cuda/include/opencv2/cuda.hpp delete mode 100644 modules/cuda/perf/perf_main.cpp delete mode 100644 modules/cuda/perf/perf_precomp.hpp delete mode 100644 modules/cuda/src/precomp.hpp delete mode 100644 modules/cuda/test/test_main.cpp delete mode 100644 modules/cuda/test/test_precomp.hpp diff --git a/cmake/OpenCVModule.cmake b/cmake/OpenCVModule.cmake index 708578d8b..fa0291963 100644 --- a/cmake/OpenCVModule.cmake +++ b/cmake/OpenCVModule.cmake @@ -109,7 +109,7 @@ endmacro() # Usage: # ocv_add_module( [INTERNAL|BINDINGS] [REQUIRED] [] [OPTIONAL ]) # Example: -# ocv_add_module(yaom INTERNAL opencv_core opencv_highgui opencv_flann OPTIONAL opencv_cuda) +# ocv_add_module(yaom INTERNAL opencv_core opencv_highgui opencv_flann OPTIONAL opencv_cudev) macro(ocv_add_module _name) ocv_debug_message("ocv_add_module(" ${_name} ${ARGN} ")") string(TOLOWER "${_name}" name) diff --git a/modules/cuda/CMakeLists.txt b/modules/cuda/CMakeLists.txt deleted file mode 100644 index d668ea8b0..000000000 --- a/modules/cuda/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -if(IOS OR (NOT HAVE_CUDA AND NOT BUILD_CUDA_STUBS)) - ocv_module_disable(cuda) -endif() - -set(the_description "CUDA-accelerated Computer Vision") - -ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4100 /wd4324 /wd4512 /wd4515 -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter) - -ocv_define_module(cuda opencv_calib3d opencv_cudaarithm opencv_cudawarping OPTIONAL opencv_cudalegacy) diff --git a/modules/cuda/doc/introduction.markdown b/modules/cuda/doc/introduction.markdown deleted file mode 100644 index ebe8c21af..000000000 --- a/modules/cuda/doc/introduction.markdown +++ /dev/null @@ -1,85 +0,0 @@ -CUDA Module Introduction {#cuda_intro} -======================== - -General Information -------------------- - -The OpenCV CUDA module is a set of classes and functions to utilize CUDA computational capabilities. -It is implemented using NVIDIA\* CUDA\* Runtime API and supports only NVIDIA GPUs. The OpenCV CUDA -module includes utility functions, low-level vision primitives, and high-level algorithms. The -utility functions and low-level primitives provide a powerful infrastructure for developing fast -vision algorithms taking advantage of CUDA whereas the high-level functionality includes some -state-of-the-art algorithms (such as stereo correspondence, face and people detectors, and others) -ready to be used by the application developers. - -The CUDA module is designed as a host-level API. This means that if you have pre-compiled OpenCV -CUDA binaries, you are not required to have the CUDA Toolkit installed or write any extra code to -make use of the CUDA. - -The OpenCV CUDA module is designed for ease of use and does not require any knowledge of CUDA. -Though, such a knowledge will certainly be useful to handle non-trivial cases or achieve the highest -performance. It is helpful to understand the cost of various operations, what the GPU does, what the -preferred data formats are, and so on. The CUDA module is an effective instrument for quick -implementation of CUDA-accelerated computer vision algorithms. However, if your algorithm involves -many simple operations, then, for the best possible performance, you may still need to write your -own kernels to avoid extra write and read operations on the intermediate results. - -To enable CUDA support, configure OpenCV using CMake with WITH\_CUDA=ON . When the flag is set and -if CUDA is installed, the full-featured OpenCV CUDA module is built. Otherwise, the module is still -built but at runtime all functions from the module throw Exception with CV\_GpuNotSupported error -code, except for cuda::getCudaEnabledDeviceCount(). The latter function returns zero GPU count in -this case. Building OpenCV without CUDA support does not perform device code compilation, so it does -not require the CUDA Toolkit installed. Therefore, using the cuda::getCudaEnabledDeviceCount() -function, you can implement a high-level algorithm that will detect GPU presence at runtime and -choose an appropriate implementation (CPU or GPU) accordingly. - -Compilation for Different NVIDIA\* Platforms --------------------------------------------- - -NVIDIA\* compiler enables generating binary code (cubin and fatbin) and intermediate code (PTX). -Binary code often implies a specific GPU architecture and generation, so the compatibility with -other GPUs is not guaranteed. PTX is targeted for a virtual platform that is defined entirely by the -set of capabilities or features. Depending on the selected virtual platform, some of the -instructions are emulated or disabled, even if the real hardware supports all the features. - -At the first call, the PTX code is compiled to binary code for the particular GPU using a JIT -compiler. When the target GPU has a compute capability (CC) lower than the PTX code, JIT fails. By -default, the OpenCV CUDA module includes: - -\* - Binaries for compute capabilities 1.3 and 2.0 (controlled by CUDA\_ARCH\_BIN in CMake) - -\* - PTX code for compute capabilities 1.1 and 1.3 (controlled by CUDA\_ARCH\_PTX in CMake) - -This means that for devices with CC 1.3 and 2.0 binary images are ready to run. For all newer -platforms, the PTX code for 1.3 is JIT'ed to a binary image. For devices with CC 1.1 and 1.2, the -PTX for 1.1 is JIT'ed. For devices with CC 1.0, no code is available and the functions throw -Exception. For platforms where JIT compilation is performed first, the run is slow. - -On a GPU with CC 1.0, you can still compile the CUDA module and most of the functions will run -flawlessly. To achieve this, add "1.0" to the list of binaries, for example, -CUDA\_ARCH\_BIN="1.0 1.3 2.0" . The functions that cannot be run on CC 1.0 GPUs throw an exception. - -You can always determine at runtime whether the OpenCV GPU-built binaries (or PTX code) are -compatible with your GPU. The function cuda::DeviceInfo::isCompatible returns the compatibility -status (true/false). - -Utilizing Multiple GPUs ------------------------ - -In the current version, each of the OpenCV CUDA algorithms can use only a single GPU. So, to utilize -multiple GPUs, you have to manually distribute the work between GPUs. Switching active devie can be -done using cuda::setDevice() function. For more details please read Cuda C Programming Guide. - -While developing algorithms for multiple GPUs, note a data passing overhead. For primitive functions -and small images, it can be significant, which may eliminate all the advantages of having multiple -GPUs. But for high-level algorithms, consider using multi-GPU acceleration. For example, the Stereo -Block Matching algorithm has been successfully parallelized using the following algorithm: - -1. Split each image of the stereo pair into two horizontal overlapping stripes. -2. Process each pair of stripes (from the left and right images) on a separate Fermi\* GPU. -3. Merge the results into a single disparity map. - -With this algorithm, a dual GPU gave a 180% performance increase comparing to the single Fermi GPU. -For a source code example, see . diff --git a/modules/cuda/include/opencv2/cuda.hpp b/modules/cuda/include/opencv2/cuda.hpp deleted file mode 100644 index 40483f485..000000000 --- a/modules/cuda/include/opencv2/cuda.hpp +++ /dev/null @@ -1,70 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifndef __OPENCV_CUDA_HPP__ -#define __OPENCV_CUDA_HPP__ - -#ifndef __cplusplus -# error cuda.hpp header must be compiled as C++ -#endif - -#include "opencv2/core/cuda.hpp" - -/** - @addtogroup cuda - @{ - @defgroup cuda_calib3d Camera Calibration and 3D Reconstruction - @} - */ - -namespace cv { namespace cuda { - -//////////////////////////// Calib3d //////////////////////////// - -//! @addtogroup cuda_calib3d -//! @{ - -//! @} - -}} // namespace cv { namespace cuda { - -#endif /* __OPENCV_CUDA_HPP__ */ diff --git a/modules/cuda/perf/perf_main.cpp b/modules/cuda/perf/perf_main.cpp deleted file mode 100644 index f01a2768d..000000000 --- a/modules/cuda/perf/perf_main.cpp +++ /dev/null @@ -1,47 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "perf_precomp.hpp" - -using namespace perf; - -CV_PERF_TEST_CUDA_MAIN(cuda) diff --git a/modules/cuda/perf/perf_precomp.hpp b/modules/cuda/perf/perf_precomp.hpp deleted file mode 100644 index f810968cb..000000000 --- a/modules/cuda/perf/perf_precomp.hpp +++ /dev/null @@ -1,64 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifdef __GNUC__ -# pragma GCC diagnostic ignored "-Wmissing-declarations" -# if defined __clang__ || defined __APPLE__ -# pragma GCC diagnostic ignored "-Wmissing-prototypes" -# pragma GCC diagnostic ignored "-Wextra" -# endif -#endif - -#ifndef __OPENCV_PERF_PRECOMP_HPP__ -#define __OPENCV_PERF_PRECOMP_HPP__ - -#include "opencv2/ts.hpp" -#include "opencv2/ts/cuda_perf.hpp" - -#include "opencv2/cuda.hpp" -#include "opencv2/calib3d.hpp" - -#ifdef GTEST_CREATE_SHARED_LIBRARY -#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined -#endif - -#endif diff --git a/modules/cuda/src/precomp.hpp b/modules/cuda/src/precomp.hpp deleted file mode 100644 index 7feeadddc..000000000 --- a/modules/cuda/src/precomp.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifndef __OPENCV_PRECOMP_H__ -#define __OPENCV_PRECOMP_H__ - -#include "opencv2/cuda.hpp" -#include "opencv2/cudaarithm.hpp" -#include "opencv2/cudawarping.hpp" -#include "opencv2/calib3d.hpp" - -#include "opencv2/core/private.cuda.hpp" -#include "opencv2/core/utility.hpp" - -#include "opencv2/opencv_modules.hpp" - -#ifdef HAVE_OPENCV_CUDALEGACY -# include "opencv2/cudalegacy/private.hpp" -#endif - -#endif /* __OPENCV_PRECOMP_H__ */ diff --git a/modules/cuda/test/test_main.cpp b/modules/cuda/test/test_main.cpp deleted file mode 100644 index 04f4fcf6e..000000000 --- a/modules/cuda/test/test_main.cpp +++ /dev/null @@ -1,45 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "test_precomp.hpp" - -CV_CUDA_TEST_MAIN("gpu") diff --git a/modules/cuda/test/test_precomp.hpp b/modules/cuda/test/test_precomp.hpp deleted file mode 100644 index e3b33017a..000000000 --- a/modules/cuda/test/test_precomp.hpp +++ /dev/null @@ -1,66 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifdef __GNUC__ -# pragma GCC diagnostic ignored "-Wmissing-declarations" -# if defined __clang__ || defined __APPLE__ -# pragma GCC diagnostic ignored "-Wmissing-prototypes" -# pragma GCC diagnostic ignored "-Wextra" -# endif -#endif - -#ifndef __OPENCV_TEST_PRECOMP_HPP__ -#define __OPENCV_TEST_PRECOMP_HPP__ - -#include - -#include "opencv2/ts.hpp" -#include "opencv2/ts/cuda_test.hpp" - -#include "opencv2/cuda.hpp" -#include "opencv2/core.hpp" -#include "opencv2/core/opengl.hpp" -#include "opencv2/calib3d.hpp" - -#include "cvconfig.h" - -#endif diff --git a/modules/stitching/CMakeLists.txt b/modules/stitching/CMakeLists.txt index 827675405..8650f7280 100644 --- a/modules/stitching/CMakeLists.txt +++ b/modules/stitching/CMakeLists.txt @@ -5,4 +5,4 @@ if(HAVE_CUDA) endif() ocv_define_module(stitching opencv_imgproc opencv_features2d opencv_calib3d opencv_objdetect - OPTIONAL opencv_cuda opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d opencv_cudalegacy opencv_xfeatures2d) + OPTIONAL opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d opencv_cudalegacy opencv_xfeatures2d) diff --git a/modules/stitching/src/stitcher.cpp b/modules/stitching/src/stitcher.cpp index 43efbd388..c515c192f 100644 --- a/modules/stitching/src/stitcher.cpp +++ b/modules/stitching/src/stitcher.cpp @@ -56,7 +56,7 @@ Stitcher Stitcher::createDefault(bool try_use_gpu) stitcher.setFeaturesMatcher(makePtr(try_use_gpu)); stitcher.setBundleAdjuster(makePtr()); -#ifdef HAVE_OPENCV_CUDA +#ifdef HAVE_CUDA if (try_use_gpu && cuda::getCudaEnabledDeviceCount() > 0) { #ifdef HAVE_OPENCV_XFEATURES2D @@ -544,7 +544,7 @@ Ptr createStitcher(bool try_use_gpu) stitcher->setFeaturesMatcher(makePtr(try_use_gpu)); stitcher->setBundleAdjuster(makePtr()); - #ifdef HAVE_OPENCV_CUDA + #ifdef HAVE_CUDA if (try_use_gpu && cuda::getCudaEnabledDeviceCount() > 0) { #ifdef HAVE_OPENCV_NONFREE diff --git a/modules/videostab/include/opencv2/videostab/global_motion.hpp b/modules/videostab/include/opencv2/videostab/global_motion.hpp index 547f1b282..5d51e4234 100644 --- a/modules/videostab/include/opencv2/videostab/global_motion.hpp +++ b/modules/videostab/include/opencv2/videostab/global_motion.hpp @@ -249,7 +249,7 @@ private: std::vector pointsPrevGood_, pointsGood_; }; -#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) class CV_EXPORTS KeypointBasedMotionEstimatorGpu : public ImageMotionEstimatorBase { @@ -280,7 +280,7 @@ private: std::vector rejectionStatus_; }; -#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) /** @brief Computes motion between two frames assuming that all the intermediate motions are known. diff --git a/modules/videostab/include/opencv2/videostab/wobble_suppression.hpp b/modules/videostab/include/opencv2/videostab/wobble_suppression.hpp index 6701d7810..3f0a9432b 100644 --- a/modules/videostab/include/opencv2/videostab/wobble_suppression.hpp +++ b/modules/videostab/include/opencv2/videostab/wobble_suppression.hpp @@ -119,7 +119,7 @@ private: Mat_ mapx_, mapy_; }; -#if defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_OPENCV_CUDAWARPING) class CV_EXPORTS MoreAccurateMotionWobbleSuppressorGpu : public MoreAccurateMotionWobbleSuppressorBase { public: diff --git a/modules/videostab/src/global_motion.cpp b/modules/videostab/src/global_motion.cpp index 27e2bc3f4..409662203 100644 --- a/modules/videostab/src/global_motion.cpp +++ b/modules/videostab/src/global_motion.cpp @@ -837,7 +837,7 @@ Mat KeypointBasedMotionEstimatorGpu::estimate(const cuda::GpuMat &frame0, const return motionEstimator_->estimate(hostPointsPrev_, hostPoints_, ok); } -#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) Mat getMotion(int from, int to, const std::vector &motions) diff --git a/samples/cpp/stitching_detailed.cpp b/samples/cpp/stitching_detailed.cpp index 0d9b4a683..2d6351ae1 100644 --- a/samples/cpp/stitching_detailed.cpp +++ b/samples/cpp/stitching_detailed.cpp @@ -673,7 +673,7 @@ int main(int argc, char* argv[]) seam_finder = makePtr(); else if (seam_find_type == "gc_color") { -#ifdef HAVE_OPENCV_CUDA +#ifdef HAVE_OPENCV_CUDALEGACY if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0) seam_finder = makePtr(GraphCutSeamFinderBase::COST_COLOR); else @@ -682,7 +682,7 @@ int main(int argc, char* argv[]) } else if (seam_find_type == "gc_colorgrad") { -#ifdef HAVE_OPENCV_CUDA +#ifdef HAVE_OPENCV_CUDALEGACY if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0) seam_finder = makePtr(GraphCutSeamFinderBase::COST_COLOR_GRAD); else diff --git a/samples/cpp/videostab.cpp b/samples/cpp/videostab.cpp index 703acba41..2eea9b902 100644 --- a/samples/cpp/videostab.cpp +++ b/samples/cpp/videostab.cpp @@ -217,7 +217,7 @@ public: outlierRejector = tblor; } -#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) if (gpu) { Ptr kbest = makePtr(est); @@ -258,7 +258,7 @@ public: outlierRejector = tblor; } -#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) if (gpu) { Ptr kbest = makePtr(est); @@ -343,7 +343,6 @@ int main(int argc, const char **argv) return 0; } -#ifdef HAVE_OPENCV_CUDA if (arg("gpu") == "yes") { cout << "initializing GPU..."; cout.flush(); @@ -352,7 +351,6 @@ int main(int argc, const char **argv) deviceTmp.upload(hostTmp); cout << endl; } -#endif StabilizerBase *stabilizer = 0; @@ -421,7 +419,7 @@ int main(int argc, const char **argv) { Ptr ws = makePtr(); if (arg("gpu") == "yes") -#ifdef HAVE_OPENCV_CUDA +#ifdef HAVE_OPENCV_CUDAWARPING ws = makePtr(); #else throw runtime_error("OpenCV is built without CUDA support"); diff --git a/samples/gpu/CMakeLists.txt b/samples/gpu/CMakeLists.txt index 10c91991c..8741f1170 100644 --- a/samples/gpu/CMakeLists.txt +++ b/samples/gpu/CMakeLists.txt @@ -1,6 +1,6 @@ SET(OPENCV_CUDA_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_features2d - opencv_calib3d opencv_cuda opencv_superres + opencv_calib3d opencv_superres opencv_cudaarithm opencv_cudafilters opencv_cudawarping opencv_cudaimgproc opencv_cudafeatures2d opencv_cudaoptflow opencv_cudabgsegm opencv_cudastereo opencv_cudalegacy opencv_cudaobjdetect) diff --git a/samples/gpu/performance/performance.cpp b/samples/gpu/performance/performance.cpp index 082a6383c..cef979954 100644 --- a/samples/gpu/performance/performance.cpp +++ b/samples/gpu/performance/performance.cpp @@ -2,6 +2,7 @@ #include #include #include "performance.h" +#include "opencv2/core/cuda.hpp" using namespace std; using namespace cv; diff --git a/samples/gpu/performance/performance.h b/samples/gpu/performance/performance.h index d2386284d..98889f342 100644 --- a/samples/gpu/performance/performance.h +++ b/samples/gpu/performance/performance.h @@ -7,7 +7,6 @@ #include #include #include -#include "opencv2/cuda.hpp" #define TAB " " From a0691289f9b27e95e2eb28931ab8d24029dc0869 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Thu, 15 Jan 2015 16:18:48 +0300 Subject: [PATCH 81/98] fix cudalegacy sanity test --- modules/cudalegacy/perf/perf_bgsegm.cpp | 7 ------- 1 file changed, 7 deletions(-) diff --git a/modules/cudalegacy/perf/perf_bgsegm.cpp b/modules/cudalegacy/perf/perf_bgsegm.cpp index a37c4fde1..436791049 100644 --- a/modules/cudalegacy/perf/perf_bgsegm.cpp +++ b/modules/cudalegacy/perf/perf_bgsegm.cpp @@ -122,13 +122,6 @@ PERF_TEST_P(Video, FGDStatModel, d_fgd->apply(d_frame, foreground); } - -#ifdef HAVE_OPENCV_CUDAIMGPROC - cv::cuda::GpuMat background3, background; - d_fgd->getBackgroundImage(background3); - cv::cuda::cvtColor(background3, background, cv::COLOR_BGR2BGRA); - CUDA_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE); -#endif } else { From 2660eee96110076fa7e5e8e13cba7cd6ba903c78 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Thu, 15 Jan 2015 19:00:54 +0300 Subject: [PATCH 82/98] restore CUDA module introduction put it into core documentation --- modules/core/doc/cuda.markdown | 85 ++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 modules/core/doc/cuda.markdown diff --git a/modules/core/doc/cuda.markdown b/modules/core/doc/cuda.markdown new file mode 100644 index 000000000..ebe8c21af --- /dev/null +++ b/modules/core/doc/cuda.markdown @@ -0,0 +1,85 @@ +CUDA Module Introduction {#cuda_intro} +======================== + +General Information +------------------- + +The OpenCV CUDA module is a set of classes and functions to utilize CUDA computational capabilities. +It is implemented using NVIDIA\* CUDA\* Runtime API and supports only NVIDIA GPUs. The OpenCV CUDA +module includes utility functions, low-level vision primitives, and high-level algorithms. The +utility functions and low-level primitives provide a powerful infrastructure for developing fast +vision algorithms taking advantage of CUDA whereas the high-level functionality includes some +state-of-the-art algorithms (such as stereo correspondence, face and people detectors, and others) +ready to be used by the application developers. + +The CUDA module is designed as a host-level API. This means that if you have pre-compiled OpenCV +CUDA binaries, you are not required to have the CUDA Toolkit installed or write any extra code to +make use of the CUDA. + +The OpenCV CUDA module is designed for ease of use and does not require any knowledge of CUDA. +Though, such a knowledge will certainly be useful to handle non-trivial cases or achieve the highest +performance. It is helpful to understand the cost of various operations, what the GPU does, what the +preferred data formats are, and so on. The CUDA module is an effective instrument for quick +implementation of CUDA-accelerated computer vision algorithms. However, if your algorithm involves +many simple operations, then, for the best possible performance, you may still need to write your +own kernels to avoid extra write and read operations on the intermediate results. + +To enable CUDA support, configure OpenCV using CMake with WITH\_CUDA=ON . When the flag is set and +if CUDA is installed, the full-featured OpenCV CUDA module is built. Otherwise, the module is still +built but at runtime all functions from the module throw Exception with CV\_GpuNotSupported error +code, except for cuda::getCudaEnabledDeviceCount(). The latter function returns zero GPU count in +this case. Building OpenCV without CUDA support does not perform device code compilation, so it does +not require the CUDA Toolkit installed. Therefore, using the cuda::getCudaEnabledDeviceCount() +function, you can implement a high-level algorithm that will detect GPU presence at runtime and +choose an appropriate implementation (CPU or GPU) accordingly. + +Compilation for Different NVIDIA\* Platforms +-------------------------------------------- + +NVIDIA\* compiler enables generating binary code (cubin and fatbin) and intermediate code (PTX). +Binary code often implies a specific GPU architecture and generation, so the compatibility with +other GPUs is not guaranteed. PTX is targeted for a virtual platform that is defined entirely by the +set of capabilities or features. Depending on the selected virtual platform, some of the +instructions are emulated or disabled, even if the real hardware supports all the features. + +At the first call, the PTX code is compiled to binary code for the particular GPU using a JIT +compiler. When the target GPU has a compute capability (CC) lower than the PTX code, JIT fails. By +default, the OpenCV CUDA module includes: + +\* + Binaries for compute capabilities 1.3 and 2.0 (controlled by CUDA\_ARCH\_BIN in CMake) + +\* + PTX code for compute capabilities 1.1 and 1.3 (controlled by CUDA\_ARCH\_PTX in CMake) + +This means that for devices with CC 1.3 and 2.0 binary images are ready to run. For all newer +platforms, the PTX code for 1.3 is JIT'ed to a binary image. For devices with CC 1.1 and 1.2, the +PTX for 1.1 is JIT'ed. For devices with CC 1.0, no code is available and the functions throw +Exception. For platforms where JIT compilation is performed first, the run is slow. + +On a GPU with CC 1.0, you can still compile the CUDA module and most of the functions will run +flawlessly. To achieve this, add "1.0" to the list of binaries, for example, +CUDA\_ARCH\_BIN="1.0 1.3 2.0" . The functions that cannot be run on CC 1.0 GPUs throw an exception. + +You can always determine at runtime whether the OpenCV GPU-built binaries (or PTX code) are +compatible with your GPU. The function cuda::DeviceInfo::isCompatible returns the compatibility +status (true/false). + +Utilizing Multiple GPUs +----------------------- + +In the current version, each of the OpenCV CUDA algorithms can use only a single GPU. So, to utilize +multiple GPUs, you have to manually distribute the work between GPUs. Switching active devie can be +done using cuda::setDevice() function. For more details please read Cuda C Programming Guide. + +While developing algorithms for multiple GPUs, note a data passing overhead. For primitive functions +and small images, it can be significant, which may eliminate all the advantages of having multiple +GPUs. But for high-level algorithms, consider using multi-GPU acceleration. For example, the Stereo +Block Matching algorithm has been successfully parallelized using the following algorithm: + +1. Split each image of the stereo pair into two horizontal overlapping stripes. +2. Process each pair of stripes (from the left and right images) on a separate Fermi\* GPU. +3. Merge the results into a single disparity map. + +With this algorithm, a dual GPU gave a 180% performance increase comparing to the single Fermi GPU. +For a source code example, see . From 6ebc95470bc6c9f301b55ddd3ffe5f5759372344 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Fri, 16 Jan 2015 11:11:40 +0300 Subject: [PATCH 83/98] fix videostab compilation without CUDA --- modules/videostab/src/global_motion.cpp | 2 ++ modules/videostab/src/wobble_suppression.cpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/modules/videostab/src/global_motion.cpp b/modules/videostab/src/global_motion.cpp index 409662203..d73751e39 100644 --- a/modules/videostab/src/global_motion.cpp +++ b/modules/videostab/src/global_motion.cpp @@ -47,6 +47,8 @@ #include "opencv2/opencv_modules.hpp" #include "clp.hpp" +#include "opencv2/core/private.cuda.hpp" + #if !defined HAVE_CUDA || defined(CUDA_DISABLER) namespace cv { namespace cuda { diff --git a/modules/videostab/src/wobble_suppression.cpp b/modules/videostab/src/wobble_suppression.cpp index d05fd05cc..98fb69900 100644 --- a/modules/videostab/src/wobble_suppression.cpp +++ b/modules/videostab/src/wobble_suppression.cpp @@ -44,6 +44,8 @@ #include "opencv2/videostab/wobble_suppression.hpp" #include "opencv2/videostab/ring_buffer.hpp" +#include "opencv2/core/private.cuda.hpp" + #ifdef HAVE_OPENCV_CUDAWARPING # include "opencv2/cudawarping.hpp" #endif From 8914fa51ff71c3c3ec463d10a683960e3b62fc7c Mon Sep 17 00:00:00 2001 From: Ben Hagen Date: Thu, 22 Jan 2015 14:17:43 +0100 Subject: [PATCH 84/98] Do not blacklist GStreamer on Apple --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2f4fd3323..10e019dac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -131,7 +131,7 @@ OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support" OCV_OPTION(WITH_EIGEN "Include Eigen2/Eigen3 support" ON) OCV_OPTION(WITH_VFW "Include Video for Windows support" ON IF WIN32 ) OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" ON IF (NOT ANDROID AND NOT IOS)) -OCV_OPTION(WITH_GSTREAMER "Include Gstreamer support" ON IF (UNIX AND NOT APPLE AND NOT ANDROID) ) +OCV_OPTION(WITH_GSTREAMER "Include Gstreamer support" ON IF (UNIX AND NOT ANDROID) ) OCV_OPTION(WITH_GSTREAMER_0_10 "Enable Gstreamer 0.10 support (instead of 1.x)" OFF ) OCV_OPTION(WITH_GTK "Include GTK support" ON IF (UNIX AND NOT APPLE AND NOT ANDROID) ) OCV_OPTION(WITH_GTK_2_X "Use GTK version 2" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) ) From c9e07bbc95ef1ed018f6d0922bb3b93bfc5e5463 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Thu, 22 Jan 2015 16:36:16 +0300 Subject: [PATCH 85/98] exclude compactPoints and calcWobbleSuppressionMaps from compilation if they are not needed --- modules/videostab/src/global_motion.cpp | 49 ++++++++--------- modules/videostab/src/wobble_suppression.cpp | 55 +++++++++----------- 2 files changed, 49 insertions(+), 55 deletions(-) diff --git a/modules/videostab/src/global_motion.cpp b/modules/videostab/src/global_motion.cpp index d73751e39..d840e895d 100644 --- a/modules/videostab/src/global_motion.cpp +++ b/modules/videostab/src/global_motion.cpp @@ -49,34 +49,31 @@ #include "opencv2/core/private.cuda.hpp" -#if !defined HAVE_CUDA || defined(CUDA_DISABLER) +#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) + #if !defined HAVE_CUDA || defined(CUDA_DISABLER) + namespace cv { namespace cuda { + static void compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); } + }} + #else + namespace cv { namespace cuda { namespace device { namespace globmotion { + int compactPoints(int N, float *points0, float *points1, const uchar *mask); + }}}} + namespace cv { namespace cuda { + static void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask) + { + CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1); + CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U); + CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols); -namespace cv { namespace cuda { - static void compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); } -}} - -#else - -namespace cv { namespace cuda { namespace device { namespace globmotion { - int compactPoints(int N, float *points0, float *points1, const uchar *mask); -}}}} - -namespace cv { namespace cuda { - static void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask) - { - CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1); - CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U); - CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols); - - int npoints = points0.cols; - int remaining = cv::cuda::device::globmotion::compactPoints( - npoints, (float*)points0.data, (float*)points1.data, mask.data); - - points0 = points0.colRange(0, remaining); - points1 = points1.colRange(0, remaining); - } -}} + int npoints = points0.cols; + int remaining = cv::cuda::device::globmotion::compactPoints( + npoints, (float*)points0.data, (float*)points1.data, mask.data); + points0 = points0.colRange(0, remaining); + points1 = points1.colRange(0, remaining); + } + }} + #endif #endif namespace cv diff --git a/modules/videostab/src/wobble_suppression.cpp b/modules/videostab/src/wobble_suppression.cpp index 98fb69900..86067fbcc 100644 --- a/modules/videostab/src/wobble_suppression.cpp +++ b/modules/videostab/src/wobble_suppression.cpp @@ -50,37 +50,34 @@ # include "opencv2/cudawarping.hpp" #endif -#if !defined HAVE_CUDA || defined(CUDA_DISABLER) +#if defined(HAVE_OPENCV_CUDAWARPING) + #if !defined HAVE_CUDA || defined(CUDA_DISABLER) + namespace cv { namespace cuda { + static void calcWobbleSuppressionMaps(int, int, int, Size, const Mat&, const Mat&, GpuMat&, GpuMat&) { throw_no_cuda(); } + }} + #else + namespace cv { namespace cuda { namespace device { namespace globmotion { + void calcWobbleSuppressionMaps( + int left, int idx, int right, int width, int height, + const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy); + }}}} + namespace cv { namespace cuda { + static void calcWobbleSuppressionMaps( + int left, int idx, int right, Size size, const Mat &ml, const Mat &mr, + GpuMat &mapx, GpuMat &mapy) + { + CV_Assert(ml.size() == Size(3, 3) && ml.type() == CV_32F && ml.isContinuous()); + CV_Assert(mr.size() == Size(3, 3) && mr.type() == CV_32F && mr.isContinuous()); -namespace cv { namespace cuda { - static void calcWobbleSuppressionMaps(int, int, int, Size, const Mat&, const Mat&, GpuMat&, GpuMat&) { throw_no_cuda(); } -}} - -#else - -namespace cv { namespace cuda { namespace device { namespace globmotion { - void calcWobbleSuppressionMaps( - int left, int idx, int right, int width, int height, - const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy); -}}}} - -namespace cv { namespace cuda { - static void calcWobbleSuppressionMaps( - int left, int idx, int right, Size size, const Mat &ml, const Mat &mr, - GpuMat &mapx, GpuMat &mapy) - { - CV_Assert(ml.size() == Size(3, 3) && ml.type() == CV_32F && ml.isContinuous()); - CV_Assert(mr.size() == Size(3, 3) && mr.type() == CV_32F && mr.isContinuous()); - - mapx.create(size, CV_32F); - mapy.create(size, CV_32F); - - cv::cuda::device::globmotion::calcWobbleSuppressionMaps( - left, idx, right, size.width, size.height, - ml.ptr(), mr.ptr(), mapx, mapy); - } -}} + mapx.create(size, CV_32F); + mapy.create(size, CV_32F); + cv::cuda::device::globmotion::calcWobbleSuppressionMaps( + left, idx, right, size.width, size.height, + ml.ptr(), mr.ptr(), mapx, mapy); + } + }} + #endif #endif namespace cv From dcb0c68ed3ce496e3abb7e46a3f624170872e779 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 23 Jan 2015 17:26:43 +0300 Subject: [PATCH 86/98] fix for -m32 --- modules/core/src/system.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index b6f3466f7..2590f215a 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -293,14 +293,13 @@ struct HWFeatures #else asm volatile ( - "pushl %%eax\n\t" - "pushl %%edx\n\t" + "pushl %%ebx\n\t" "movl $7,%%eax\n\t" "movl $0,%%ecx\n\t" "cpuid\n\t" - "popl %%edx\n\t" - "popl %%eax\n\t" - : "=b"(cpuid_data[1]), "=c"(cpuid_data[2]) + "movl %%ebx, %0\n\t" + "popl %%ebx\n\t" + : "=r"(cpuid_data[1]), "=c"(cpuid_data[2]) : : "cc" ); From 58ad952b1addb2732179a81942959665ab030d1e Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 2 Jan 2015 01:59:36 +0300 Subject: [PATCH 87/98] UMat: added USAGE_ALLOCATE_SHARED_MEMORY --- modules/core/include/opencv2/core/mat.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/core/include/opencv2/core/mat.hpp b/modules/core/include/opencv2/core/mat.hpp index 2b4148624..522b8b815 100644 --- a/modules/core/include/opencv2/core/mat.hpp +++ b/modules/core/include/opencv2/core/mat.hpp @@ -376,9 +376,10 @@ enum UMatUsageFlags { USAGE_DEFAULT = 0, - // default allocation policy is platform and usage specific + // buffer allocation policy is platform and usage specific USAGE_ALLOCATE_HOST_MEMORY = 1 << 0, USAGE_ALLOCATE_DEVICE_MEMORY = 1 << 1, + USAGE_ALLOCATE_SHARED_MEMORY = 1 << 2, // It is not equal to: USAGE_ALLOCATE_HOST_MEMORY | USAGE_ALLOCATE_DEVICE_MEMORY __UMAT_USAGE_FLAGS_32BIT = 0x7fffffff // Binary compatibility hint }; From fa23a01775019929baea649f9e616e00e5060242 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Fri, 23 Jan 2015 19:17:00 +0300 Subject: [PATCH 88/98] fix FindCUDA CMake module: do not unset variables if CUDA_TOOLKIT_TARGET_DIR is not defined --- cmake/FindCUDA.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/FindCUDA.cmake b/cmake/FindCUDA.cmake index ccfc4b93b..f7502c7f2 100644 --- a/cmake/FindCUDA.cmake +++ b/cmake/FindCUDA.cmake @@ -538,7 +538,7 @@ if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}") endif() if(NOT "${CUDA_TARGET_TRIPLET}" STREQUAL "${CUDA_TARGET_TRIPLET_INTERNAL}" OR - NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}") + (DEFINED CUDA_TOOLKIT_TARGET_DIR AND NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}")) cuda_unset_include_and_libraries() endif() From 0a07d780e0024b2a9b927d16bf3ff8513082a3ba Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 2 Jan 2015 03:33:40 +0300 Subject: [PATCH 89/98] ocl: OpenCL SVM support --- CMakeLists.txt | 1 + cmake/OpenCVDetectOpenCL.cmake | 4 + cmake/templates/cvconfig.h.in | 1 + modules/core/include/opencv2/core/mat.hpp | 4 +- modules/core/include/opencv2/core/ocl.hpp | 20 +- .../opencv2/core/opencl/opencl_svm.hpp | 81 ++ .../core/opencl/runtime/opencl_core.hpp | 12 + .../core/opencl/runtime/opencl_svm_20.hpp | 52 + .../opencl/runtime/opencl_svm_definitions.hpp | 42 + .../runtime/opencl_svm_hsa_extension.hpp | 166 +++ modules/core/src/matmul.cpp | 10 + modules/core/src/matrix.cpp | 3 +- modules/core/src/ocl.cpp | 1232 +++++++++++++++-- .../core/src/opencl/runtime/opencl_core.cpp | 67 + modules/core/src/umatrix.cpp | 8 +- 15 files changed, 1542 insertions(+), 161 deletions(-) create mode 100644 modules/core/include/opencv2/core/opencl/opencl_svm.hpp create mode 100644 modules/core/include/opencv2/core/opencl/runtime/opencl_svm_20.hpp create mode 100644 modules/core/include/opencv2/core/opencl/runtime/opencl_svm_definitions.hpp create mode 100644 modules/core/include/opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 2f4fd3323..07d8ec6f2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -162,6 +162,7 @@ OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) ) OCV_OPTION(WITH_CLP "Include Clp support (EPL)" OFF) OCV_OPTION(WITH_OPENCL "Include OpenCL Runtime support" ON IF (NOT IOS) ) +OCV_OPTION(WITH_OPENCL_SVM "Include OpenCL Shared Virtual Memory support" OFF ) # experimental OCV_OPTION(WITH_OPENCLAMDFFT "Include AMD OpenCL FFT library support" ON IF (NOT ANDROID AND NOT IOS) ) OCV_OPTION(WITH_OPENCLAMDBLAS "Include AMD OpenCL BLAS library support" ON IF (NOT ANDROID AND NOT IOS) ) OCV_OPTION(WITH_DIRECTX "Include DirectX support" ON IF WIN32 ) diff --git a/cmake/OpenCVDetectOpenCL.cmake b/cmake/OpenCVDetectOpenCL.cmake index f732546e5..ce76ad173 100644 --- a/cmake/OpenCVDetectOpenCL.cmake +++ b/cmake/OpenCVDetectOpenCL.cmake @@ -26,6 +26,10 @@ if(OPENCL_FOUND) set(HAVE_OPENCL 1) + if(WITH_OPENCL_SVM) + set(HAVE_OPENCL_SVM 1) + endif() + if(HAVE_OPENCL_STATIC) set(OPENCL_LIBRARIES "${OPENCL_LIBRARY}") else() diff --git a/cmake/templates/cvconfig.h.in b/cmake/templates/cvconfig.h.in index 3eea4fafe..f8c1c4035 100644 --- a/cmake/templates/cvconfig.h.in +++ b/cmake/templates/cvconfig.h.in @@ -122,6 +122,7 @@ /* OpenCL Support */ #cmakedefine HAVE_OPENCL #cmakedefine HAVE_OPENCL_STATIC +#cmakedefine HAVE_OPENCL_SVM /* OpenEXR codec */ #cmakedefine HAVE_OPENEXR diff --git a/modules/core/include/opencv2/core/mat.hpp b/modules/core/include/opencv2/core/mat.hpp index 522b8b815..8b0d94f6e 100644 --- a/modules/core/include/opencv2/core/mat.hpp +++ b/modules/core/include/opencv2/core/mat.hpp @@ -415,7 +415,7 @@ public: const size_t dstofs[], const size_t dststep[], bool sync) const; // default implementation returns DummyBufferPoolController - virtual BufferPoolController* getBufferPoolController() const; + virtual BufferPoolController* getBufferPoolController(const char* id = NULL) const; }; @@ -481,7 +481,7 @@ struct CV_EXPORTS UMatData int refcount; uchar* data; uchar* origdata; - size_t size, capacity; + size_t size; int flags; void* handle; diff --git a/modules/core/include/opencv2/core/ocl.hpp b/modules/core/include/opencv2/core/ocl.hpp index 3b023fb09..f87e15ee6 100644 --- a/modules/core/include/opencv2/core/ocl.hpp +++ b/modules/core/include/opencv2/core/ocl.hpp @@ -56,6 +56,8 @@ CV_EXPORTS_W bool haveAmdFft(); CV_EXPORTS_W void setUseOpenCL(bool flag); CV_EXPORTS_W void finish(); +CV_EXPORTS bool haveSVM(); + class CV_EXPORTS Context; class CV_EXPORTS Device; class CV_EXPORTS Kernel; @@ -248,7 +250,10 @@ public: void* ptr() const; friend void initializeContextFromHandle(Context& ctx, void* platform, void* context, void* device); -protected: + + bool useSVM() const; + void setUseSVM(bool enabled); + struct Impl; Impl* p; }; @@ -666,8 +671,17 @@ protected: CV_EXPORTS MatAllocator* getOpenCLAllocator(); -CV_EXPORTS_W bool isPerformanceCheckBypassed(); -#define OCL_PERFORMANCE_CHECK(condition) (cv::ocl::isPerformanceCheckBypassed() || (condition)) + +#ifdef __OPENCV_BUILD +namespace internal { + +CV_EXPORTS bool isPerformanceCheckBypassed(); +#define OCL_PERFORMANCE_CHECK(condition) (cv::ocl::internal::isPerformanceCheckBypassed() || (condition)) + +CV_EXPORTS bool isCLBuffer(UMat& u); + +} // namespace internal +#endif //! @} diff --git a/modules/core/include/opencv2/core/opencl/opencl_svm.hpp b/modules/core/include/opencv2/core/opencl/opencl_svm.hpp new file mode 100644 index 000000000..e9f7ba023 --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/opencl_svm.hpp @@ -0,0 +1,81 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef __OPENCV_CORE_OPENCL_SVM_HPP__ +#define __OPENCV_CORE_OPENCL_SVM_HPP__ + +// +// Internal usage only (binary compatibility is not guaranteed) +// +#ifndef __OPENCV_BUILD +#error Internal header file +#endif + +#if defined(HAVE_OPENCL) && defined(HAVE_OPENCL_SVM) +#include "runtime/opencl_core.hpp" +#include "runtime/opencl_svm_20.hpp" +#include "runtime/opencl_svm_hsa_extension.hpp" + +namespace cv { namespace ocl { namespace svm { + +struct SVMCapabilities +{ + enum Value + { + SVM_COARSE_GRAIN_BUFFER = (1 << 0), + SVM_FINE_GRAIN_BUFFER = (1 << 1), + SVM_FINE_GRAIN_SYSTEM = (1 << 2), + SVM_ATOMICS = (1 << 3), + }; + int value_; + + SVMCapabilities(int capabilities = 0) : value_(capabilities) { } + operator int() const { return value_; } + + inline bool isNoSVMSupport() const { return value_ == 0; } + inline bool isSupportCoarseGrainBuffer() const { return (value_ & SVM_COARSE_GRAIN_BUFFER) != 0; } + inline bool isSupportFineGrainBuffer() const { return (value_ & SVM_FINE_GRAIN_BUFFER) != 0; } + inline bool isSupportFineGrainSystem() const { return (value_ & SVM_FINE_GRAIN_SYSTEM) != 0; } + inline bool isSupportAtomics() const { return (value_ & SVM_ATOMICS) != 0; } +}; + +CV_EXPORTS const SVMCapabilities getSVMCapabilitites(const ocl::Context& context); + +struct SVMFunctions +{ + clSVMAllocAMD_fn fn_clSVMAlloc; + clSVMFreeAMD_fn fn_clSVMFree; + clSetKernelArgSVMPointerAMD_fn fn_clSetKernelArgSVMPointer; + //clSetKernelExecInfoAMD_fn fn_clSetKernelExecInfo; + //clEnqueueSVMFreeAMD_fn fn_clEnqueueSVMFree; + clEnqueueSVMMemcpyAMD_fn fn_clEnqueueSVMMemcpy; + clEnqueueSVMMemFillAMD_fn fn_clEnqueueSVMMemFill; + clEnqueueSVMMapAMD_fn fn_clEnqueueSVMMap; + clEnqueueSVMUnmapAMD_fn fn_clEnqueueSVMUnmap; + + inline SVMFunctions() + : fn_clSVMAlloc(NULL), fn_clSVMFree(NULL), + fn_clSetKernelArgSVMPointer(NULL), /*fn_clSetKernelExecInfo(NULL),*/ + /*fn_clEnqueueSVMFree(NULL),*/ fn_clEnqueueSVMMemcpy(NULL), fn_clEnqueueSVMMemFill(NULL), + fn_clEnqueueSVMMap(NULL), fn_clEnqueueSVMUnmap(NULL) + { + // nothing + } + + inline bool isValid() const + { + return fn_clSVMAlloc != NULL && fn_clSVMFree && fn_clSetKernelArgSVMPointer && + /*fn_clSetKernelExecInfo && fn_clEnqueueSVMFree &&*/ fn_clEnqueueSVMMemcpy && + fn_clEnqueueSVMMemFill && fn_clEnqueueSVMMap && fn_clEnqueueSVMUnmap; + } +}; + +// We should guarantee that SVMFunctions lifetime is not less than context's lifetime +CV_EXPORTS const SVMFunctions* getSVMFunctions(const ocl::Context& context); + +CV_EXPORTS bool useSVM(UMatUsageFlags usageFlags); + +}}} //namespace cv::ocl::svm +#endif + +#endif // __OPENCV_CORE_OPENCL_SVM_HPP__ +/* End of file. */ diff --git a/modules/core/include/opencv2/core/opencl/runtime/opencl_core.hpp b/modules/core/include/opencv2/core/opencl/runtime/opencl_core.hpp index b19563cbc..bd30f813d 100644 --- a/modules/core/include/opencv2/core/opencl/runtime/opencl_core.hpp +++ b/modules/core/include/opencv2/core/opencl/runtime/opencl_core.hpp @@ -62,6 +62,18 @@ #endif #endif +#ifdef HAVE_OPENCL_SVM +#define clSVMAlloc clSVMAlloc_ +#define clSVMFree clSVMFree_ +#define clSetKernelArgSVMPointer clSetKernelArgSVMPointer_ +#define clSetKernelExecInfo clSetKernelExecInfo_ +#define clEnqueueSVMFree clEnqueueSVMFree_ +#define clEnqueueSVMMemcpy clEnqueueSVMMemcpy_ +#define clEnqueueSVMMemFill clEnqueueSVMMemFill_ +#define clEnqueueSVMMap clEnqueueSVMMap_ +#define clEnqueueSVMUnmap clEnqueueSVMUnmap_ +#endif + #include "autogenerated/opencl_core.hpp" #endif // HAVE_OPENCL_STATIC diff --git a/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_20.hpp b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_20.hpp new file mode 100644 index 000000000..7f0ff91d1 --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_20.hpp @@ -0,0 +1,52 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP__ +#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP__ + +#if defined(HAVE_OPENCL_SVM) +#include "opencl_core.hpp" + +#include "opencl_svm_definitions.hpp" + +#ifndef HAVE_OPENCL_STATIC + +#undef clSVMAlloc +#define clSVMAlloc clSVMAlloc_pfn +#undef clSVMFree +#define clSVMFree clSVMFree_pfn +#undef clSetKernelArgSVMPointer +#define clSetKernelArgSVMPointer clSetKernelArgSVMPointer_pfn +#undef clSetKernelExecInfo +//#define clSetKernelExecInfo clSetKernelExecInfo_pfn +#undef clEnqueueSVMFree +//#define clEnqueueSVMFree clEnqueueSVMFree_pfn +#undef clEnqueueSVMMemcpy +#define clEnqueueSVMMemcpy clEnqueueSVMMemcpy_pfn +#undef clEnqueueSVMMemFill +#define clEnqueueSVMMemFill clEnqueueSVMMemFill_pfn +#undef clEnqueueSVMMap +#define clEnqueueSVMMap clEnqueueSVMMap_pfn +#undef clEnqueueSVMUnmap +#define clEnqueueSVMUnmap clEnqueueSVMUnmap_pfn + +extern CL_RUNTIME_EXPORT void* (CL_API_CALL *clSVMAlloc)(cl_context context, cl_svm_mem_flags flags, size_t size, unsigned int alignment); +extern CL_RUNTIME_EXPORT void (CL_API_CALL *clSVMFree)(cl_context context, void* svm_pointer); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clSetKernelArgSVMPointer)(cl_kernel kernel, cl_uint arg_index, const void* arg_value); +//extern CL_RUNTIME_EXPORT void* (CL_API_CALL *clSetKernelExecInfo)(cl_kernel kernel, cl_kernel_exec_info param_name, size_t param_value_size, const void* param_value); +//extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMFree)(cl_command_queue command_queue, cl_uint num_svm_pointers, void* svm_pointers[], +// void (CL_CALLBACK *pfn_free_func)(cl_command_queue queue, cl_uint num_svm_pointers, void* svm_pointers[], void* user_data), void* user_data, +// cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMemcpy)(cl_command_queue command_queue, cl_bool blocking_copy, void* dst_ptr, const void* src_ptr, size_t size, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMemFill)(cl_command_queue command_queue, void* svm_ptr, const void* pattern, size_t pattern_size, size_t size, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMap)(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags map_flags, void* svm_ptr, size_t size, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMUnmap)(cl_command_queue command_queue, void* svm_ptr, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); + +#endif // HAVE_OPENCL_STATIC + +#endif // HAVE_OPENCL_SVM + +#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP__ diff --git a/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_definitions.hpp b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_definitions.hpp new file mode 100644 index 000000000..a4fd5fc81 --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_definitions.hpp @@ -0,0 +1,42 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP__ +#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP__ + +#if defined(HAVE_OPENCL_SVM) +#if defined(CL_VERSION_2_0) + +// OpenCL 2.0 contains SVM definitions + +#else + +typedef cl_bitfield cl_device_svm_capabilities; +typedef cl_bitfield cl_svm_mem_flags; +typedef cl_uint cl_kernel_exec_info; + +// +// TODO Add real values after OpenCL 2.0 release +// + +#ifndef CL_DEVICE_SVM_CAPABILITIES +#define CL_DEVICE_SVM_CAPABILITIES 0x1053 + +#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER (1 << 0) +#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER (1 << 1) +#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM (1 << 2) +#define CL_DEVICE_SVM_ATOMICS (1 << 3) +#endif + +#ifndef CL_MEM_SVM_FINE_GRAIN_BUFFER +#define CL_MEM_SVM_FINE_GRAIN_BUFFER (1 << 10) +#endif + +#ifndef CL_MEM_SVM_ATOMICS +#define CL_MEM_SVM_ATOMICS (1 << 11) +#endif + + +#endif // CL_VERSION_2_0 +#endif // HAVE_OPENCL_SVM + +#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP__ diff --git a/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp new file mode 100644 index 000000000..9e50408f0 --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp @@ -0,0 +1,166 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP__ +#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP__ + +#if defined(HAVE_OPENCL_SVM) +#include "opencl_core.hpp" + +#ifndef CL_DEVICE_SVM_CAPABILITIES_AMD +// +// Part of the file is an extract from the cl_ext.h file from AMD APP SDK package. +// Below is the original copyright. +// +/******************************************************************************* + * Copyright (c) 2008-2013 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + ******************************************************************************/ + +/******************************************* + * Shared Virtual Memory (SVM) extension + *******************************************/ +typedef cl_bitfield cl_device_svm_capabilities_amd; +typedef cl_bitfield cl_svm_mem_flags_amd; +typedef cl_uint cl_kernel_exec_info_amd; + +/* cl_device_info */ +#define CL_DEVICE_SVM_CAPABILITIES_AMD 0x1053 +#define CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT_AMD 0x1054 + +/* cl_device_svm_capabilities_amd */ +#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER_AMD (1 << 0) +#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER_AMD (1 << 1) +#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM_AMD (1 << 2) +#define CL_DEVICE_SVM_ATOMICS_AMD (1 << 3) + +/* cl_svm_mem_flags_amd */ +#define CL_MEM_SVM_FINE_GRAIN_BUFFER_AMD (1 << 10) +#define CL_MEM_SVM_ATOMICS_AMD (1 << 11) + +/* cl_mem_info */ +#define CL_MEM_USES_SVM_POINTER_AMD 0x1109 + +/* cl_kernel_exec_info_amd */ +#define CL_KERNEL_EXEC_INFO_SVM_PTRS_AMD 0x11B6 +#define CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM_AMD 0x11B7 + +/* cl_command_type */ +#define CL_COMMAND_SVM_FREE_AMD 0x1209 +#define CL_COMMAND_SVM_MEMCPY_AMD 0x120A +#define CL_COMMAND_SVM_MEMFILL_AMD 0x120B +#define CL_COMMAND_SVM_MAP_AMD 0x120C +#define CL_COMMAND_SVM_UNMAP_AMD 0x120D + +typedef CL_API_ENTRY void* +(CL_API_CALL * clSVMAllocAMD_fn)( + cl_context /* context */, + cl_svm_mem_flags_amd /* flags */, + size_t /* size */, + unsigned int /* alignment */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY void +(CL_API_CALL * clSVMFreeAMD_fn)( + cl_context /* context */, + void* /* svm_pointer */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMFreeAMD_fn)( + cl_command_queue /* command_queue */, + cl_uint /* num_svm_pointers */, + void** /* svm_pointers */, + void (CL_CALLBACK *)( /*pfn_free_func*/ + cl_command_queue /* queue */, + cl_uint /* num_svm_pointers */, + void** /* svm_pointers */, + void* /* user_data */), + void* /* user_data */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMMemcpyAMD_fn)( + cl_command_queue /* command_queue */, + cl_bool /* blocking_copy */, + void* /* dst_ptr */, + const void* /* src_ptr */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMMemFillAMD_fn)( + cl_command_queue /* command_queue */, + void* /* svm_ptr */, + const void* /* pattern */, + size_t /* pattern_size */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMMapAMD_fn)( + cl_command_queue /* command_queue */, + cl_bool /* blocking_map */, + cl_map_flags /* map_flags */, + void* /* svm_ptr */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMUnmapAMD_fn)( + cl_command_queue /* command_queue */, + void* /* svm_ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clSetKernelArgSVMPointerAMD_fn)( + cl_kernel /* kernel */, + cl_uint /* arg_index */, + const void * /* arg_value */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clSetKernelExecInfoAMD_fn)( + cl_kernel /* kernel */, + cl_kernel_exec_info_amd /* param_name */, + size_t /* param_value_size */, + const void * /* param_value */ +) CL_EXT_SUFFIX__VERSION_1_2; + +#endif + +#endif // HAVE_OPENCL_SVM + +#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP__ diff --git a/modules/core/src/matmul.cpp b/modules/core/src/matmul.cpp index 6c8bad244..feffc8d32 100644 --- a/modules/core/src/matmul.cpp +++ b/modules/core/src/matmul.cpp @@ -721,6 +721,16 @@ static bool ocl_gemm_amdblas( InputArray matA, InputArray matB, double alpha, return false; UMat A = matA.getUMat(), B = matB.getUMat(), D = matD.getUMat(); + if (!ocl::internal::isCLBuffer(A) || !ocl::internal::isCLBuffer(B) || !ocl::internal::isCLBuffer(D)) + { + return false; + } + if (haveC) + { + UMat C = matC.getUMat(); + if (!ocl::internal::isCLBuffer(C)) + return false; + } if (haveC) ctrans ? transpose(matC, D) : matC.copyTo(D); else diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp index 3bd9f2c62..e1e9caa83 100644 --- a/modules/core/src/matrix.cpp +++ b/modules/core/src/matrix.cpp @@ -159,8 +159,9 @@ void MatAllocator::copy(UMatData* usrc, UMatData* udst, int dims, const size_t s memcpy(ptrs[1], ptrs[0], planesz); } -BufferPoolController* MatAllocator::getBufferPoolController() const +BufferPoolController* MatAllocator::getBufferPoolController(const char* id) const { + (void)id; static DummyBufferPoolController dummy; return &dummy; } diff --git a/modules/core/src/ocl.cpp b/modules/core/src/ocl.cpp index c18d8ba61..efe3b936d 100644 --- a/modules/core/src/ocl.cpp +++ b/modules/core/src/ocl.cpp @@ -48,6 +48,8 @@ #define CV_OPENCL_ALWAYS_SHOW_BUILD_LOG 0 #define CV_OPENCL_SHOW_RUN_ERRORS 0 +#define CV_OPENCL_SHOW_SVM_ERROR_LOG 1 +#define CV_OPENCL_SHOW_SVM_LOG 0 #include "opencv2/core/bufferpool.hpp" #ifndef LOG_BUFFER_POOL @@ -111,6 +113,20 @@ static size_t getConfigurationParameterForSize(const char* name, size_t defaultV CV_ErrorNoReturn(cv::Error::StsBadArg, cv::format("Invalid value for %s parameter: %s", name, value.c_str())); } +#if CV_OPENCL_SHOW_SVM_LOG +// TODO add timestamp logging +#define CV_OPENCL_SVM_TRACE_P printf("line %d (ocl.cpp): ", __LINE__); printf +#else +#define CV_OPENCL_SVM_TRACE_P(...) +#endif + +#if CV_OPENCL_SHOW_SVM_ERROR_LOG +// TODO add timestamp logging +#define CV_OPENCL_SVM_TRACE_ERROR_P printf("Error on line %d (ocl.cpp): ", __LINE__); printf +#else +#define CV_OPENCL_SVM_TRACE_ERROR_P(...) +#endif + #include "opencv2/core/opencl/runtime/opencl_clamdblas.hpp" #include "opencv2/core/opencl/runtime/opencl_clamdfft.hpp" @@ -920,6 +936,7 @@ OCL_FUNC(cl_int, clGetSupportedImageFormats, cl_uint * num_image_formats), (context, flags, image_type, num_entries, image_formats, num_image_formats)) + /* OCL_FUNC(cl_int, clGetMemObjectInfo, (cl_mem memobj, @@ -1342,6 +1359,12 @@ static bool isRaiseError() #define CV_OclDbgAssert(expr) do { if (isRaiseError()) { CV_Assert(expr); } else { (void)(expr); } } while ((void)0, 0) #endif +#ifdef HAVE_OPENCL_SVM +#include "opencv2/core/opencl/runtime/opencl_svm_20.hpp" +#include "opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp" +#include "opencv2/core/opencl/opencl_svm.hpp" +#endif + namespace cv { namespace ocl { struct UMat2D @@ -1627,6 +1650,15 @@ bool haveAmdFft() #endif +bool haveSVM() +{ +#ifdef HAVE_OPENCL_SVM + return true; +#else + return false; +#endif +} + void finish() { Queue::getDefault().finish(); @@ -2357,12 +2389,86 @@ not_found: } #endif +#ifdef HAVE_OPENCL_SVM +namespace svm { + +enum AllocatorFlags { // don't use first 16 bits + OPENCL_SVM_COARSE_GRAIN_BUFFER = 1 << 16, // clSVMAlloc + SVM map/unmap + OPENCL_SVM_FINE_GRAIN_BUFFER = 2 << 16, // clSVMAlloc + OPENCL_SVM_FINE_GRAIN_SYSTEM = 3 << 16, // direct access + OPENCL_SVM_BUFFER_MASK = 3 << 16, + OPENCL_SVM_BUFFER_MAP = 4 << 16 +}; + +static bool checkForceSVMUmatUsage() +{ + static bool initialized = false; + static bool force = false; + if (!initialized) + { + force = getBoolParameter("OPENCV_OPENCL_SVM_FORCE_UMAT_USAGE", false); + initialized = true; + } + return force; +} +static bool checkDisableSVMUMatUsage() +{ + static bool initialized = false; + static bool force = false; + if (!initialized) + { + force = getBoolParameter("OPENCV_OPENCL_SVM_DISABLE_UMAT_USAGE", false); + initialized = true; + } + return force; +} +static bool checkDisableSVM() +{ + static bool initialized = false; + static bool force = false; + if (!initialized) + { + force = getBoolParameter("OPENCV_OPENCL_SVM_DISABLE", false); + initialized = true; + } + return force; +} +// see SVMCapabilities +static unsigned int getSVMCapabilitiesMask() +{ + static bool initialized = false; + static unsigned int mask = 0; + if (!initialized) + { + const char* envValue = getenv("OPENCV_OPENCL_SVM_CAPABILITIES_MASK"); + if (envValue == NULL) + { + return ~0U; // all bits 1 + } + mask = atoi(envValue); + initialized = true; + } + return mask; +} +} // namespace +#endif + struct Context::Impl { - Impl() + static Context::Impl* get(Context& context) { return context.p; } + + void __init() { refcount = 1; handle = 0; +#ifdef HAVE_OPENCL_SVM + svmInitialized = false; +#endif + } + + Impl() + { + __init(); } void setDefault() @@ -2401,8 +2507,7 @@ struct Context::Impl Impl(int dtype0) { - refcount = 1; - handle = 0; + __init(); cl_int retval = 0; cl_platform_id pl = (cl_platform_id)Platform::getDefault().ptr(); @@ -2419,7 +2524,7 @@ struct Context::Impl AutoBuffer dlistbuf(nd0*2+1); cl_device_id* dlist = (cl_device_id*)(void**)dlistbuf; cl_device_id* dlist_new = dlist + nd0; - CV_OclDbgAssert(clGetDeviceIDs( pl, dtype, nd0, dlist, &nd0 ) == CL_SUCCESS); + CV_OclDbgAssert(clGetDeviceIDs( pl, dtype, nd0, dlist, &nd0 ) == CL_SUCCESS); String name0; for(i = 0; i < nd0; i++) @@ -2496,6 +2601,144 @@ struct Context::Impl }; typedef std::map phash_t; phash_t phash; + +#ifdef HAVE_OPENCL_SVM + bool svmInitialized; + bool svmAvailable; + bool svmEnabled; + svm::SVMCapabilities svmCapabilities; + svm::SVMFunctions svmFunctions; + + void svmInit() + { + CV_Assert(handle != NULL); + const Device& device = devices[0]; + cl_device_svm_capabilities deviceCaps = 0; + CV_Assert(((void)0, CL_DEVICE_SVM_CAPABILITIES == CL_DEVICE_SVM_CAPABILITIES_AMD)); // Check assumption + cl_int status = clGetDeviceInfo((cl_device_id)device.ptr(), CL_DEVICE_SVM_CAPABILITIES, sizeof(deviceCaps), &deviceCaps, NULL); + if (status != CL_SUCCESS) + { + CV_OPENCL_SVM_TRACE_ERROR_P("CL_DEVICE_SVM_CAPABILITIES via clGetDeviceInfo failed: %d\n", status); + goto noSVM; + } + CV_OPENCL_SVM_TRACE_P("CL_DEVICE_SVM_CAPABILITIES returned: 0x%x\n", (int)deviceCaps); + CV_Assert(((void)0, CL_DEVICE_SVM_COARSE_GRAIN_BUFFER == CL_DEVICE_SVM_COARSE_GRAIN_BUFFER_AMD)); // Check assumption + svmCapabilities.value_ = + ((deviceCaps & CL_DEVICE_SVM_COARSE_GRAIN_BUFFER) ? svm::SVMCapabilities::SVM_COARSE_GRAIN_BUFFER : 0) | + ((deviceCaps & CL_DEVICE_SVM_FINE_GRAIN_BUFFER) ? svm::SVMCapabilities::SVM_FINE_GRAIN_BUFFER : 0) | + ((deviceCaps & CL_DEVICE_SVM_FINE_GRAIN_SYSTEM) ? svm::SVMCapabilities::SVM_FINE_GRAIN_SYSTEM : 0) | + ((deviceCaps & CL_DEVICE_SVM_ATOMICS) ? svm::SVMCapabilities::SVM_ATOMICS : 0); + svmCapabilities.value_ &= svm::getSVMCapabilitiesMask(); + if (svmCapabilities.value_ == 0) + { + CV_OPENCL_SVM_TRACE_ERROR_P("svmCapabilities is empty\n"); + goto noSVM; + } + try + { + // Try OpenCL 2.0 + CV_OPENCL_SVM_TRACE_P("Try SVM from OpenCL 2.0 ...\n"); + void* ptr = clSVMAlloc(handle, CL_MEM_READ_WRITE, 100, 0); + if (!ptr) + { + CV_OPENCL_SVM_TRACE_ERROR_P("clSVMAlloc returned NULL...\n"); + CV_ErrorNoReturn(Error::StsBadArg, "clSVMAlloc returned NULL"); + } + try + { + bool error = false; + cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); + if (CL_SUCCESS != clEnqueueSVMMap(q, CL_TRUE, CL_MAP_WRITE, ptr, 100, 0, NULL, NULL)) + { + CV_OPENCL_SVM_TRACE_ERROR_P("clEnqueueSVMMap failed...\n"); + CV_ErrorNoReturn(Error::StsBadArg, "clEnqueueSVMMap FAILED"); + } + clFinish(q); + try + { + ((int*)ptr)[0] = 100; + } + catch (...) + { + CV_OPENCL_SVM_TRACE_ERROR_P("SVM buffer access test FAILED\n"); + error = true; + } + if (CL_SUCCESS != clEnqueueSVMUnmap(q, ptr, 0, NULL, NULL)) + { + CV_OPENCL_SVM_TRACE_ERROR_P("clEnqueueSVMUnmap failed...\n"); + CV_ErrorNoReturn(Error::StsBadArg, "clEnqueueSVMUnmap FAILED"); + } + clFinish(q); + if (error) + { + CV_ErrorNoReturn(Error::StsBadArg, "OpenCL SVM buffer access test was FAILED"); + } + } + catch (...) + { + CV_OPENCL_SVM_TRACE_ERROR_P("OpenCL SVM buffer access test was FAILED\n"); + clSVMFree(handle, ptr); + throw; + } + clSVMFree(handle, ptr); + svmFunctions.fn_clSVMAlloc = clSVMAlloc; + svmFunctions.fn_clSVMFree = clSVMFree; + svmFunctions.fn_clSetKernelArgSVMPointer = clSetKernelArgSVMPointer; + //svmFunctions.fn_clSetKernelExecInfo = clSetKernelExecInfo; + //svmFunctions.fn_clEnqueueSVMFree = clEnqueueSVMFree; + svmFunctions.fn_clEnqueueSVMMemcpy = clEnqueueSVMMemcpy; + svmFunctions.fn_clEnqueueSVMMemFill = clEnqueueSVMMemFill; + svmFunctions.fn_clEnqueueSVMMap = clEnqueueSVMMap; + svmFunctions.fn_clEnqueueSVMUnmap = clEnqueueSVMUnmap; + } + catch (...) + { + CV_OPENCL_SVM_TRACE_P("clSVMAlloc failed, trying HSA extension...\n"); + try + { + // Try HSA extension + String extensions = device.extensions(); + if (extensions.find("cl_amd_svm") == String::npos) + { + CV_OPENCL_SVM_TRACE_P("Device extension doesn't have cl_amd_svm: %s\n", extensions.c_str()); + goto noSVM; + } + cl_platform_id p = NULL; + status = clGetDeviceInfo((cl_device_id)device.ptr(), CL_DEVICE_PLATFORM, sizeof(cl_platform_id), &p, NULL); + CV_Assert(status == CL_SUCCESS); + svmFunctions.fn_clSVMAlloc = (clSVMAllocAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clSVMAllocAMD"); + svmFunctions.fn_clSVMFree = (clSVMFreeAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clSVMFreeAMD"); + svmFunctions.fn_clSetKernelArgSVMPointer = (clSetKernelArgSVMPointerAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clSetKernelArgSVMPointerAMD"); + //svmFunctions.fn_clSetKernelExecInfo = (clSetKernelExecInfoAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clSetKernelExecInfoAMD"); + //svmFunctions.fn_clEnqueueSVMFree = (clEnqueueSVMFreeAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clEnqueueSVMFreeAMD"); + svmFunctions.fn_clEnqueueSVMMemcpy = (clEnqueueSVMMemcpyAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clEnqueueSVMMemcpyAMD"); + svmFunctions.fn_clEnqueueSVMMemFill = (clEnqueueSVMMemFillAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clEnqueueSVMMemFillAMD"); + svmFunctions.fn_clEnqueueSVMMap = (clEnqueueSVMMapAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clEnqueueSVMMapAMD"); + svmFunctions.fn_clEnqueueSVMUnmap = (clEnqueueSVMUnmapAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clEnqueueSVMUnmapAMD"); + CV_Assert(svmFunctions.isValid()); + } + catch (...) + { + CV_OPENCL_SVM_TRACE_P("Something is totally wrong\n"); + goto noSVM; + } + } + + svmAvailable = true; + svmEnabled = !svm::checkDisableSVM(); + svmInitialized = true; + CV_OPENCL_SVM_TRACE_P("OpenCV OpenCL SVM support initialized\n"); + return; + noSVM: + CV_OPENCL_SVM_TRACE_P("OpenCL SVM is not detected\n"); + svmAvailable = false; + svmEnabled = false; + svmCapabilities.value_ = 0; + svmInitialized = true; + svmFunctions.fn_clSVMAlloc = NULL; + return; + } +#endif }; @@ -2610,6 +2853,71 @@ Program Context::getProg(const ProgramSource& prog, return p ? p->getProg(prog, buildopts, errmsg) : Program(); } + + +#ifdef HAVE_OPENCL_SVM +bool Context::useSVM() const +{ + Context::Impl* i = p; + CV_Assert(i); + if (!i->svmInitialized) + i->svmInit(); + return i->svmEnabled; +} +void Context::setUseSVM(bool enabled) +{ + Context::Impl* i = p; + CV_Assert(i); + if (!i->svmInitialized) + i->svmInit(); + if (enabled && !i->svmAvailable) + { + CV_ErrorNoReturn(Error::StsError, "OpenCL Shared Virtual Memory (SVM) is not supported by OpenCL device"); + } + i->svmEnabled = enabled; +} +#else +bool Context::useSVM() const { return false; } +void Context::setUseSVM(bool enabled) { CV_Assert(!enabled); } +#endif + +#ifdef HAVE_OPENCL_SVM +namespace svm { + +const SVMCapabilities getSVMCapabilitites(const ocl::Context& context) +{ + Context::Impl* i = context.p; + CV_Assert(i); + if (!i->svmInitialized) + i->svmInit(); + return i->svmCapabilities; +} + +CV_EXPORTS const SVMFunctions* getSVMFunctions(const ocl::Context& context) +{ + Context::Impl* i = context.p; + CV_Assert(i); + CV_Assert(i->svmInitialized); // getSVMCapabilitites() must be called first + CV_Assert(i->svmFunctions.fn_clSVMAlloc != NULL); + return &i->svmFunctions; +} + +CV_EXPORTS bool useSVM(UMatUsageFlags usageFlags) +{ + if (checkForceSVMUmatUsage()) + return true; + if (checkDisableSVMUMatUsage()) + return false; + if ((usageFlags & USAGE_ALLOCATE_SHARED_MEMORY) != 0) + return true; + return false; // don't use SVM by default +} + +} // namespace cv::ocl::svm +#endif // HAVE_OPENCL_SVM + + + void initializeContextFromHandle(Context& ctx, void* platform, void* _context, void* _device) { cl_context context = (cl_context)_context; @@ -2979,12 +3287,33 @@ int Kernel::set(int i, const KernelArg& arg) return -1; } +#ifdef HAVE_OPENCL_SVM + if ((arg.m->u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + const Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + uchar*& svmDataPtr = (uchar*&)arg.m->u->handle; + CV_OPENCL_SVM_TRACE_P("clSetKernelArgSVMPointer: %p\n", svmDataPtr); +#if 1 // TODO + cl_int status = svmFns->fn_clSetKernelArgSVMPointer(p->handle, (cl_uint)i, svmDataPtr); +#else + cl_int status = svmFns->fn_clSetKernelArgSVMPointer(p->handle, (cl_uint)i, &svmDataPtr); +#endif + CV_Assert(status == CL_SUCCESS); + } + else +#endif + { + CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)i, sizeof(h), &h) == CL_SUCCESS); + } + if (ptronly) - CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)i++, sizeof(h), &h) == CL_SUCCESS); + { + i++; + } else if( arg.m->dims <= 2 ) { UMat2D u2d(*arg.m); - CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)i, sizeof(h), &h) == CL_SUCCESS); CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)(i+1), sizeof(u2d.step), &u2d.step) == CL_SUCCESS); CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)(i+2), sizeof(u2d.offset), &u2d.offset) == CL_SUCCESS); i += 3; @@ -3000,7 +3329,6 @@ int Kernel::set(int i, const KernelArg& arg) else { UMat3D u3d(*arg.m); - CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)i, sizeof(h), &h) == CL_SUCCESS); CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)(i+1), sizeof(u3d.slicestep), &u3d.slicestep) == CL_SUCCESS); CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)(i+2), sizeof(u3d.step), &u3d.step) == CL_SUCCESS); CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)(i+3), sizeof(u3d.offset), &u3d.offset) == CL_SUCCESS); @@ -3433,39 +3761,55 @@ ProgramSource::hash_t ProgramSource::hash() const //////////////////////////////////////////// OpenCLAllocator ////////////////////////////////////////////////// +template class OpenCLBufferPool { protected: ~OpenCLBufferPool() { } public: - virtual cl_mem allocate(size_t size, CV_OUT size_t& capacity) = 0; - virtual void release(cl_mem handle, size_t capacity) = 0; + virtual T allocate(size_t size) = 0; + virtual void release(T buffer) = 0; }; -class OpenCLBufferPoolImpl : public BufferPoolController, public OpenCLBufferPool +template +class OpenCLBufferPoolBaseImpl : public BufferPoolController, public OpenCLBufferPool { -public: - struct BufferEntry - { - cl_mem clBuffer_; - size_t capacity_; - }; +private: + inline Derived& derived() { return *static_cast(this); } protected: Mutex mutex_; size_t currentReservedSize; size_t maxReservedSize; - std::list reservedEntries_; // LRU order + std::list allocatedEntries_; // Allocated and used entries + std::list reservedEntries_; // LRU order. Allocated, but not used entries + + // synchronized + bool _findAndRemoveEntryFromAllocatedList(CV_OUT BufferEntry& entry, T buffer) + { + typename std::list::iterator i = allocatedEntries_.begin(); + for (; i != allocatedEntries_.end(); ++i) + { + BufferEntry& e = *i; + if (e.clBuffer_ == buffer) + { + entry = e; + allocatedEntries_.erase(i); + return true; + } + } + return false; + } // synchronized bool _findAndRemoveEntryFromReservedList(CV_OUT BufferEntry& entry, const size_t size) { if (reservedEntries_.empty()) return false; - std::list::iterator i = reservedEntries_.begin(); - std::list::iterator result_pos = reservedEntries_.end(); - BufferEntry result = {NULL, 0}; + typename std::list::iterator i = reservedEntries_.begin(); + typename std::list::iterator result_pos = reservedEntries_.end(); + BufferEntry result; size_t minDiff = (size_t)(-1); for (; i != reservedEntries_.end(); ++i) { @@ -3489,6 +3833,7 @@ protected: reservedEntries_.erase(result_pos); entry = result; currentReservedSize -= entry.capacity_; + allocatedEntries_.push_back(entry); return true; } return false; @@ -3503,7 +3848,7 @@ protected: const BufferEntry& entry = reservedEntries_.back(); CV_DbgAssert(currentReservedSize >= entry.capacity_); currentReservedSize -= entry.capacity_; - _releaseBufferEntry(entry); + derived()._releaseBufferEntry(entry); reservedEntries_.pop_back(); } } @@ -3523,72 +3868,45 @@ protected: return 1024*1024; } - void _allocateBufferEntry(BufferEntry& entry, size_t size) - { - CV_DbgAssert(entry.clBuffer_ == NULL); - entry.capacity_ = alignSize(size, (int)_allocationGranularity(size)); - Context& ctx = Context::getDefault(); - cl_int retval = CL_SUCCESS; - entry.clBuffer_ = clCreateBuffer((cl_context)ctx.ptr(), CL_MEM_READ_WRITE, entry.capacity_, 0, &retval); - CV_Assert(retval == CL_SUCCESS); - CV_Assert(entry.clBuffer_ != NULL); - if(retval == CL_SUCCESS) - { - CV_IMPL_ADD(CV_IMPL_OCL); - } - LOG_BUFFER_POOL("OpenCL allocate %lld (0x%llx) bytes: %p\n", - (long long)entry.capacity_, (long long)entry.capacity_, entry.clBuffer_); - } - - void _releaseBufferEntry(const BufferEntry& entry) - { - CV_Assert(entry.capacity_ != 0); - CV_Assert(entry.clBuffer_ != NULL); - LOG_BUFFER_POOL("OpenCL release buffer: %p, %lld (0x%llx) bytes\n", - entry.clBuffer_, (long long)entry.capacity_, (long long)entry.capacity_); - clReleaseMemObject(entry.clBuffer_); - } public: - OpenCLBufferPoolImpl() - : currentReservedSize(0), maxReservedSize(0) + OpenCLBufferPoolBaseImpl() + : currentReservedSize(0), + maxReservedSize(0) { - int poolSize = ocl::Device::getDefault().isIntel() ? 1 << 27 : 0; - maxReservedSize = getConfigurationParameterForSize("OPENCV_OPENCL_BUFFERPOOL_LIMIT", poolSize); + // nothing } - virtual ~OpenCLBufferPoolImpl() + virtual ~OpenCLBufferPoolBaseImpl() { freeAllReservedBuffers(); CV_Assert(reservedEntries_.empty()); } public: - virtual cl_mem allocate(size_t size, CV_OUT size_t& capacity) + virtual T allocate(size_t size) { - BufferEntry entry = {NULL, 0}; - if (maxReservedSize > 0) + AutoLock locker(mutex_); + BufferEntry entry; + if (maxReservedSize > 0 && _findAndRemoveEntryFromReservedList(entry, size)) { - AutoLock locker(mutex_); - if (_findAndRemoveEntryFromReservedList(entry, size)) - { - CV_DbgAssert(size <= entry.capacity_); - LOG_BUFFER_POOL("Reuse reserved buffer: %p\n", entry.clBuffer_); - capacity = entry.capacity_; - return entry.clBuffer_; - } - } - _allocateBufferEntry(entry, size); - capacity = entry.capacity_; - return entry.clBuffer_; - } - virtual void release(cl_mem handle, size_t capacity) - { - BufferEntry entry = {handle, capacity}; - if (maxReservedSize == 0 || entry.capacity_ > maxReservedSize / 8) - { - _releaseBufferEntry(entry); + CV_DbgAssert(size <= entry.capacity_); + LOG_BUFFER_POOL("Reuse reserved buffer: %p\n", entry.clBuffer_); + } + else + { + derived()._allocateBufferEntry(entry, size); + } + return entry.clBuffer_; + } + virtual void release(T buffer) + { + AutoLock locker(mutex_); + BufferEntry entry; + CV_Assert(_findAndRemoveEntryFromAllocatedList(entry, buffer)); + if (maxReservedSize == 0 || entry.capacity_ > maxReservedSize / 8) + { + derived()._releaseBufferEntry(entry); } else { - AutoLock locker(mutex_); reservedEntries_.push_front(entry); currentReservedSize += entry.capacity_; _checkSizeOfReservedEntries(); @@ -3604,7 +3922,7 @@ public: maxReservedSize = size; if (maxReservedSize < oldMaxReservedSize) { - std::list::iterator i = reservedEntries_.begin(); + typename std::list::iterator i = reservedEntries_.begin(); for (; i != reservedEntries_.end();) { const BufferEntry& entry = *i; @@ -3612,7 +3930,7 @@ public: { CV_DbgAssert(currentReservedSize >= entry.capacity_); currentReservedSize -= entry.capacity_; - _releaseBufferEntry(entry); + derived()._releaseBufferEntry(entry); i = reservedEntries_.erase(i); continue; } @@ -3624,16 +3942,123 @@ public: virtual void freeAllReservedBuffers() { AutoLock locker(mutex_); - std::list::const_iterator i = reservedEntries_.begin(); + typename std::list::const_iterator i = reservedEntries_.begin(); for (; i != reservedEntries_.end(); ++i) { const BufferEntry& entry = *i; - _releaseBufferEntry(entry); + derived()._releaseBufferEntry(entry); } reservedEntries_.clear(); } }; +struct CLBufferEntry +{ + cl_mem clBuffer_; + size_t capacity_; + CLBufferEntry() : clBuffer_((cl_mem)NULL), capacity_(0) { } +}; + +class OpenCLBufferPoolImpl : public OpenCLBufferPoolBaseImpl +{ +public: + typedef struct CLBufferEntry BufferEntry; +protected: + int createFlags_; +public: + OpenCLBufferPoolImpl(int createFlags = 0) + : createFlags_(createFlags) + { + } + + void _allocateBufferEntry(BufferEntry& entry, size_t size) + { + CV_DbgAssert(entry.clBuffer_ == NULL); + entry.capacity_ = alignSize(size, (int)_allocationGranularity(size)); + Context& ctx = Context::getDefault(); + cl_int retval = CL_SUCCESS; + entry.clBuffer_ = clCreateBuffer((cl_context)ctx.ptr(), CL_MEM_READ_WRITE|createFlags_, entry.capacity_, 0, &retval); + CV_Assert(retval == CL_SUCCESS); + CV_Assert(entry.clBuffer_ != NULL); + if(retval == CL_SUCCESS) + { + CV_IMPL_ADD(CV_IMPL_OCL); + } + LOG_BUFFER_POOL("OpenCL allocate %lld (0x%llx) bytes: %p\n", + (long long)entry.capacity_, (long long)entry.capacity_, entry.clBuffer_); + allocatedEntries_.push_back(entry); + } + + void _releaseBufferEntry(const BufferEntry& entry) + { + CV_Assert(entry.capacity_ != 0); + CV_Assert(entry.clBuffer_ != NULL); + LOG_BUFFER_POOL("OpenCL release buffer: %p, %lld (0x%llx) bytes\n", + entry.clBuffer_, (long long)entry.capacity_, (long long)entry.capacity_); + clReleaseMemObject(entry.clBuffer_); + } +}; + +#ifdef HAVE_OPENCL_SVM +struct CLSVMBufferEntry +{ + void* clBuffer_; + size_t capacity_; + CLSVMBufferEntry() : clBuffer_(NULL), capacity_(0) { } +}; +class OpenCLSVMBufferPoolImpl : public OpenCLBufferPoolBaseImpl +{ +public: + typedef struct CLSVMBufferEntry BufferEntry; +public: + OpenCLSVMBufferPoolImpl() + { + } + + void _allocateBufferEntry(BufferEntry& entry, size_t size) + { + CV_DbgAssert(entry.clBuffer_ == NULL); + entry.capacity_ = alignSize(size, (int)_allocationGranularity(size)); + + Context& ctx = Context::getDefault(); + const svm::SVMCapabilities svmCaps = svm::getSVMCapabilitites(ctx); + bool isFineGrainBuffer = svmCaps.isSupportFineGrainBuffer(); + cl_svm_mem_flags memFlags = CL_MEM_READ_WRITE | + (isFineGrainBuffer ? CL_MEM_SVM_FINE_GRAIN_BUFFER : 0); + + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_OPENCL_SVM_TRACE_P("clSVMAlloc: %d\n", (int)entry.capacity_); + void *buf = svmFns->fn_clSVMAlloc((cl_context)ctx.ptr(), memFlags, entry.capacity_, 0); + CV_Assert(buf); + + entry.clBuffer_ = buf; + { + CV_IMPL_ADD(CV_IMPL_OCL); + } + LOG_BUFFER_POOL("OpenCL SVM allocate %lld (0x%llx) bytes: %p\n", + (long long)entry.capacity_, (long long)entry.capacity_, entry.clBuffer_); + allocatedEntries_.push_back(entry); + } + + void _releaseBufferEntry(const BufferEntry& entry) + { + CV_Assert(entry.capacity_ != 0); + CV_Assert(entry.clBuffer_ != NULL); + LOG_BUFFER_POOL("OpenCL release SVM buffer: %p, %lld (0x%llx) bytes\n", + entry.clBuffer_, (long long)entry.capacity_, (long long)entry.capacity_); + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + CV_OPENCL_SVM_TRACE_P("clSVMFree: %p\n", entry.clBuffer_); + svmFns->fn_clSVMFree((cl_context)ctx.ptr(), entry.clBuffer_); + } +}; +#endif + + + #if defined _MSC_VER #pragma warning(disable:4127) // conditional expression is constant #endif @@ -3697,12 +4122,37 @@ private: class OpenCLAllocator : public MatAllocator { mutable OpenCLBufferPoolImpl bufferPool; + mutable OpenCLBufferPoolImpl bufferPoolHostPtr; +#ifdef HAVE_OPENCL_SVM + mutable OpenCLSVMBufferPoolImpl bufferPoolSVM; +#endif + enum AllocatorFlags { - ALLOCATOR_FLAGS_BUFFER_POOL_USED = 1 << 0 + ALLOCATOR_FLAGS_BUFFER_POOL_USED = 1 << 0, + ALLOCATOR_FLAGS_BUFFER_POOL_HOST_PTR_USED = 1 << 1 +#ifdef HAVE_OPENCL_SVM + ,ALLOCATOR_FLAGS_BUFFER_POOL_SVM_USED = 1 << 2 +#endif }; public: - OpenCLAllocator() { matStdAllocator = Mat::getStdAllocator(); } + OpenCLAllocator() + : bufferPool(0), + bufferPoolHostPtr(CL_MEM_ALLOC_HOST_PTR) + { + size_t defaultPoolSize, poolSize; + defaultPoolSize = ocl::Device::getDefault().isIntel() ? 1 << 27 : 0; + poolSize = getConfigurationParameterForSize("OPENCV_OPENCL_BUFFERPOOL_LIMIT", defaultPoolSize); + bufferPool.setMaxReservedSize(poolSize); + poolSize = getConfigurationParameterForSize("OPENCV_OPENCL_HOST_PTR_BUFFERPOOL_LIMIT", defaultPoolSize); + bufferPoolHostPtr.setMaxReservedSize(poolSize); +#ifdef HAVE_OPENCL_SVM + poolSize = getConfigurationParameterForSize("OPENCV_OPENCL_SVM_BUFFERPOOL_LIMIT", defaultPoolSize); + bufferPoolSVM.setMaxReservedSize(poolSize); +#endif + + matStdAllocator = Mat::getStdAllocator(); + } UMatData* defaultAllocate(int dims, const int* sizes, int type, void* data, size_t* step, int flags, UMatUsageFlags usageFlags) const @@ -3739,33 +4189,47 @@ public: } Context& ctx = Context::getDefault(); + int createFlags = 0, flags0 = 0; getBestFlags(ctx, flags, usageFlags, createFlags, flags0); - size_t capacity = 0; void* handle = NULL; int allocatorFlags = 0; + +#ifdef HAVE_OPENCL_SVM + const svm::SVMCapabilities svmCaps = svm::getSVMCapabilitites(ctx); + if (ctx.useSVM() && svm::useSVM(usageFlags) && !svmCaps.isNoSVMSupport()) + { + allocatorFlags = ALLOCATOR_FLAGS_BUFFER_POOL_SVM_USED; + handle = bufferPoolSVM.allocate(total); + + // this property is constant, so single buffer pool can be used here + bool isFineGrainBuffer = svmCaps.isSupportFineGrainBuffer(); + allocatorFlags |= isFineGrainBuffer ? svm::OPENCL_SVM_FINE_GRAIN_BUFFER : svm::OPENCL_SVM_COARSE_GRAIN_BUFFER; + } + else +#endif if (createFlags == 0) { - handle = bufferPool.allocate(total, capacity); - if (!handle) - return defaultAllocate(dims, sizes, type, data, step, flags, usageFlags); allocatorFlags = ALLOCATOR_FLAGS_BUFFER_POOL_USED; + handle = bufferPool.allocate(total); + } + else if (createFlags == CL_MEM_ALLOC_HOST_PTR) + { + allocatorFlags = ALLOCATOR_FLAGS_BUFFER_POOL_HOST_PTR_USED; + handle = bufferPoolHostPtr.allocate(total); } else { - capacity = total; - cl_int retval = 0; - handle = clCreateBuffer((cl_context)ctx.ptr(), - CL_MEM_READ_WRITE|createFlags, total, 0, &retval); - if( !handle || retval != CL_SUCCESS ) - return defaultAllocate(dims, sizes, type, data, step, flags, usageFlags); - CV_IMPL_ADD(CV_IMPL_OCL) + CV_Assert(handle != NULL); // Unsupported, throw } + + if (!handle) + return defaultAllocate(dims, sizes, type, data, step, flags, usageFlags); + UMatData* u = new UMatData(this); u->data = 0; u->size = total; - u->capacity = capacity; u->handle = handle; u->flags = flags0; u->allocatorFlags_ = allocatorFlags; @@ -3788,22 +4252,81 @@ public: getBestFlags(ctx, accessFlags, usageFlags, createFlags, flags0); cl_context ctx_handle = (cl_context)ctx.ptr(); - cl_int retval = 0; - int tempUMatFlags = UMatData::TEMP_UMAT; - u->handle = clCreateBuffer(ctx_handle, CL_MEM_USE_HOST_PTR|CL_MEM_READ_WRITE, - u->size, u->origdata, &retval); - if((!u->handle || retval != CL_SUCCESS) && !(accessFlags & ACCESS_FAST)) - { - u->handle = clCreateBuffer(ctx_handle, CL_MEM_COPY_HOST_PTR|CL_MEM_READ_WRITE|createFlags, - u->size, u->origdata, &retval); - tempUMatFlags = UMatData::TEMP_COPIED_UMAT; + int allocatorFlags = 0; + int tempUMatFlags = 0; + void* handle = NULL; + cl_int retval = CL_SUCCESS; +#ifdef HAVE_OPENCL_SVM + svm::SVMCapabilities svmCaps = svm::getSVMCapabilitites(ctx); + bool useSVM = ctx.useSVM() && svm::useSVM(usageFlags); + if (useSVM && svmCaps.isSupportFineGrainSystem()) + { + allocatorFlags = svm::OPENCL_SVM_FINE_GRAIN_SYSTEM; + tempUMatFlags = UMatData::TEMP_UMAT; + handle = u->origdata; + CV_OPENCL_SVM_TRACE_P("Use fine grain system: %d (%p)\n", (int)u->size, handle); } - if(!u->handle || retval != CL_SUCCESS) + else if (useSVM && (svmCaps.isSupportFineGrainBuffer() || svmCaps.isSupportCoarseGrainBuffer())) + { + if (!(accessFlags & ACCESS_FAST)) // memcpy used + { + bool isFineGrainBuffer = svmCaps.isSupportFineGrainBuffer(); + + cl_svm_mem_flags memFlags = createFlags | + (isFineGrainBuffer ? CL_MEM_SVM_FINE_GRAIN_BUFFER : 0); + + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_OPENCL_SVM_TRACE_P("clSVMAlloc + copy: %d\n", (int)u->size); + handle = svmFns->fn_clSVMAlloc((cl_context)ctx.ptr(), memFlags, u->size, 0); + CV_Assert(handle); + + cl_command_queue q = NULL; + if (!isFineGrainBuffer) + { + q = (cl_command_queue)Queue::getDefault().ptr(); + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMap: %p (%d)\n", handle, (int)u->size); + cl_int status = svmFns->fn_clEnqueueSVMMap(q, CL_TRUE, CL_MAP_WRITE, + handle, u->size, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + + } + memcpy(handle, u->origdata, u->size); + if (!isFineGrainBuffer) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, handle, 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + + tempUMatFlags = UMatData::TEMP_UMAT | UMatData::TEMP_COPIED_UMAT; + allocatorFlags |= isFineGrainBuffer ? svm::OPENCL_SVM_FINE_GRAIN_BUFFER + : svm::OPENCL_SVM_COARSE_GRAIN_BUFFER; + } + } + else +#endif + { + tempUMatFlags = UMatData::TEMP_UMAT; + handle = clCreateBuffer(ctx_handle, CL_MEM_USE_HOST_PTR|createFlags, + u->size, u->origdata, &retval); + if((!handle || retval < 0) && !(accessFlags & ACCESS_FAST)) + { + handle = clCreateBuffer(ctx_handle, CL_MEM_COPY_HOST_PTR|CL_MEM_READ_WRITE|createFlags, + u->size, u->origdata, &retval); + tempUMatFlags |= UMatData::TEMP_COPIED_UMAT; + } + } + if(!handle || retval != CL_SUCCESS) return false; + u->handle = handle; u->prevAllocator = u->currAllocator; u->currAllocator = this; u->flags |= tempUMatFlags; + u->allocatorFlags_ = allocatorFlags; } if(accessFlags & ACCESS_WRITE) u->markHostCopyObsolete(true); @@ -3848,34 +4371,93 @@ public: CV_Assert(u->urefcount >= 0); CV_Assert(u->refcount >= 0); - // TODO: !!! when we add Shared Virtual Memory Support, - // this function (as well as the others) should be corrected CV_Assert(u->handle != 0 && u->urefcount == 0); if(u->tempUMat()) { // UMatDataAutoLock lock(u); + if( u->hostCopyObsolete() && u->refcount > 0 ) { - cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - if( u->tempCopiedUMat() ) +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) { - AlignedDataPtr alignedPtr(u->origdata, u->size, CV_OPENCL_DATA_PTR_ALIGNMENT); - CV_OclDbgAssert(clEnqueueReadBuffer(q, (cl_mem)u->handle, CL_TRUE, 0, - u->size, alignedPtr.getAlignedPtr(), 0, 0, 0) == CL_SUCCESS); + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + if( u->tempCopiedUMat() ) + { + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER || + (u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER); + bool isFineGrainBuffer = (u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER; + cl_command_queue q = NULL; + if (!isFineGrainBuffer) + { + CV_DbgAssert(((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) == 0)); + q = (cl_command_queue)Queue::getDefault().ptr(); + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMap: %p (%d)\n", u->handle, (int)u->size); + cl_int status = svmFns->fn_clEnqueueSVMMap(q, CL_FALSE, CL_MAP_READ, + u->handle, u->size, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + clFinish(q); + memcpy(u->origdata, u->handle, u->size); + if (!isFineGrainBuffer) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", u->handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, u->handle, 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + } + else + { + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_SYSTEM); + // nothing + } } else +#endif { - cl_int retval = 0; - void* data = clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE, - (CL_MAP_READ | CL_MAP_WRITE), - 0, u->size, 0, 0, 0, &retval); - CV_OclDbgAssert(retval == CL_SUCCESS); - CV_OclDbgAssert(clEnqueueUnmapMemObject(q, (cl_mem)u->handle, data, 0, 0, 0) == CL_SUCCESS); - CV_OclDbgAssert(clFinish(q) == CL_SUCCESS); + cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); + if( u->tempCopiedUMat() ) + { + AlignedDataPtr alignedPtr(u->origdata, u->size, CV_OPENCL_DATA_PTR_ALIGNMENT); + CV_OclDbgAssert(clEnqueueReadBuffer(q, (cl_mem)u->handle, CL_TRUE, 0, + u->size, alignedPtr.getAlignedPtr(), 0, 0, 0) == CL_SUCCESS); + } + else + { + // TODO Is it really needed for clCreateBuffer with CL_MEM_USE_HOST_PTR? + cl_int retval = 0; + void* data = clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE, + (CL_MAP_READ | CL_MAP_WRITE), + 0, u->size, 0, 0, 0, &retval); + CV_OclDbgAssert(retval == CL_SUCCESS); + CV_OclDbgAssert(clEnqueueUnmapMemObject(q, (cl_mem)u->handle, data, 0, 0, 0) == CL_SUCCESS); + CV_OclDbgAssert(clFinish(q) == CL_SUCCESS); + } + } + u->markHostCopyObsolete(false); + } +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + if( u->tempCopiedUMat() ) + { + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_OPENCL_SVM_TRACE_P("clSVMFree: %p\n", u->handle); + svmFns->fn_clSVMFree((cl_context)ctx.ptr(), u->handle); } } - u->markHostCopyObsolete(false); - clReleaseMemObject((cl_mem)u->handle); + else +#endif + { + clReleaseMemObject((cl_mem)u->handle); + } u->handle = 0; u->currAllocator = u->prevAllocator; if(u->data && u->copyOnMap() && !(u->flags & UMatData::USER_ALLOCATED)) @@ -3894,14 +4476,42 @@ public: } if (u->allocatorFlags_ & ALLOCATOR_FLAGS_BUFFER_POOL_USED) { - bufferPool.release((cl_mem)u->handle, u->capacity); + bufferPool.release((cl_mem)u->handle); } + else if (u->allocatorFlags_ & ALLOCATOR_FLAGS_BUFFER_POOL_HOST_PTR_USED) + { + bufferPoolHostPtr.release((cl_mem)u->handle); + } +#ifdef HAVE_OPENCL_SVM + else if (u->allocatorFlags_ & ALLOCATOR_FLAGS_BUFFER_POOL_SVM_USED) + { + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_SYSTEM) + { + //nothing + } + else if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER || + (u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); + + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) != 0) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", u->handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, u->handle, 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + } + bufferPoolSVM.release((void*)u->handle); + } +#endif else { clReleaseMemObject((cl_mem)u->handle); } u->handle = 0; - u->capacity = 0; delete u; } } @@ -3925,13 +4535,41 @@ public: { if( !u->copyOnMap() ) { + // TODO + // because there can be other map requests for the same UMat with different access flags, + // we use the universal (read-write) access mode. +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) == 0) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMap: %p (%d)\n", u->handle, (int)u->size); + cl_int status = svmFns->fn_clEnqueueSVMMap(q, CL_FALSE, CL_MAP_READ | CL_MAP_WRITE, + u->handle, u->size, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + u->allocatorFlags_ |= svm::OPENCL_SVM_BUFFER_MAP; + } + } + clFinish(q); + u->data = (uchar*)u->handle; + u->markHostCopyObsolete(false); + u->markDeviceMemMapped(true); + return; + } +#endif if (u->data) // FIXIT Workaround for UMat synchronization issue { //CV_Assert(u->hostCopyObsolete() == false); return; } - // because there can be other map requests for the same UMat with different access flags, - // we use the universal (read-write) access mode. + cl_int retval = 0; u->data = (uchar*)clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE, (CL_MAP_READ | CL_MAP_WRITE), @@ -3943,6 +4581,7 @@ public: return; } + // TODO Is it really a good idea and was it tested well? // if map failed, switch to copy-on-map mode for the particular buffer u->flags |= UMatData::COPY_ON_MAP; } @@ -3957,6 +4596,9 @@ public: if( (accessFlags & ACCESS_READ) != 0 && u->hostCopyObsolete() ) { AlignedDataPtr alignedPtr(u->data, u->size, CV_OPENCL_DATA_PTR_ALIGNMENT); +#ifdef HAVE_OPENCL_SVM + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == 0); +#endif CV_Assert( clEnqueueReadBuffer(q, (cl_mem)u->handle, CL_TRUE, 0, u->size, alignedPtr.getAlignedPtr(), 0, 0, 0) == CL_SUCCESS ); u->markHostCopyObsolete(false); @@ -3983,6 +4625,31 @@ public: { CV_Assert(u->data != NULL); u->markDeviceMemMapped(false); +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) != 0); + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", u->handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, u->handle, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + clFinish(q); + u->allocatorFlags_ &= ~svm::OPENCL_SVM_BUFFER_MAP; + } + } + u->data = 0; + u->markDeviceCopyObsolete(false); + u->markHostCopyObsolete(false); + return; + } +#endif CV_Assert( (retval = clEnqueueUnmapMemObject(q, (cl_mem)u->handle, u->data, 0, 0, 0)) == CL_SUCCESS ); if (Device::getDefault().isAMD()) @@ -3995,6 +4662,9 @@ public: else if( u->copyOnMap() && u->deviceCopyObsolete() ) { AlignedDataPtr alignedPtr(u->data, u->size, CV_OPENCL_DATA_PTR_ALIGNMENT); +#ifdef HAVE_OPENCL_SVM + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == 0); +#endif CV_Assert( (retval = clEnqueueWriteBuffer(q, (cl_mem)u->handle, CL_TRUE, 0, u->size, alignedPtr.getAlignedPtr(), 0, 0, 0)) == CL_SUCCESS ); } @@ -4102,17 +4772,78 @@ public: srcrawofs, new_srcofs, new_srcstep, dstrawofs, new_dstofs, new_dststep); - AlignedDataPtr alignedPtr((uchar*)dstptr, sz[0] * dststep[0], CV_OPENCL_DATA_PTR_ALIGNMENT); - if( iscontinuous ) +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) { - CV_Assert( clEnqueueReadBuffer(q, (cl_mem)u->handle, CL_TRUE, - srcrawofs, total, alignedPtr.getAlignedPtr(), 0, 0, 0) == CL_SUCCESS ); + CV_DbgAssert(u->data == NULL || u->data == u->handle); + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) == 0); + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMap: %p (%d)\n", u->handle, (int)u->size); + cl_int status = svmFns->fn_clEnqueueSVMMap(q, CL_FALSE, CL_MAP_READ, + u->handle, u->size, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + clFinish(q); + if( iscontinuous ) + { + memcpy(dstptr, (uchar*)u->handle + srcrawofs, total); + } + else + { + // This code is from MatAllocator::download() + int isz[CV_MAX_DIM]; + uchar* srcptr = (uchar*)u->handle; + for( int i = 0; i < dims; i++ ) + { + CV_Assert( sz[i] <= (size_t)INT_MAX ); + if( sz[i] == 0 ) + return; + if( srcofs ) + srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1); + isz[i] = (int)sz[i]; + } + + Mat src(dims, isz, CV_8U, srcptr, srcstep); + Mat dst(dims, isz, CV_8U, dstptr, dststep); + + const Mat* arrays[] = { &src, &dst }; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs, 2); + size_t j, planesz = it.size; + + for( j = 0; j < it.nplanes; j++, ++it ) + memcpy(ptrs[1], ptrs[0], planesz); + } + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", u->handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, u->handle, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + clFinish(q); + } } else +#endif { - CV_Assert( clEnqueueReadBufferRect(q, (cl_mem)u->handle, CL_TRUE, - new_srcofs, new_dstofs, new_sz, new_srcstep[0], new_srcstep[1], - new_dststep[0], new_dststep[1], alignedPtr.getAlignedPtr(), 0, 0, 0) == CL_SUCCESS ); + AlignedDataPtr alignedPtr((uchar*)dstptr, sz[0] * dststep[0], CV_OPENCL_DATA_PTR_ALIGNMENT); + if( iscontinuous ) + { + CV_Assert( clEnqueueReadBuffer(q, (cl_mem)u->handle, CL_TRUE, + srcrawofs, total, alignedPtr.getAlignedPtr(), 0, 0, 0) >= 0 ); + } + else + { + CV_Assert( clEnqueueReadBufferRect(q, (cl_mem)u->handle, CL_TRUE, + new_srcofs, new_dstofs, new_sz, new_srcstep[0], new_srcstep[1], + new_dststep[0], new_dststep[1], alignedPtr.getAlignedPtr(), 0, 0, 0) >= 0 ); + } } } @@ -4153,20 +4884,91 @@ public: CV_Assert( u->handle != 0 ); cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - AlignedDataPtr alignedPtr((uchar*)srcptr, sz[0] * srcstep[0], CV_OPENCL_DATA_PTR_ALIGNMENT); - if( iscontinuous ) +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) { - CV_Assert( clEnqueueWriteBuffer(q, (cl_mem)u->handle, - CL_TRUE, dstrawofs, total, srcptr, 0, 0, 0) == CL_SUCCESS ); + CV_DbgAssert(u->data == NULL || u->data == u->handle); + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) == 0); + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMap: %p (%d)\n", u->handle, (int)u->size); + cl_int status = svmFns->fn_clEnqueueSVMMap(q, CL_FALSE, CL_MAP_WRITE, + u->handle, u->size, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + clFinish(q); + if( iscontinuous ) + { + memcpy((uchar*)u->handle + dstrawofs, srcptr, total); + } + else + { + // This code is from MatAllocator::upload() + int isz[CV_MAX_DIM]; + uchar* dstptr = (uchar*)u->handle; + for( int i = 0; i < dims; i++ ) + { + CV_Assert( sz[i] <= (size_t)INT_MAX ); + if( sz[i] == 0 ) + return; + if( dstofs ) + dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1); + isz[i] = (int)sz[i]; + } + + Mat src(dims, isz, CV_8U, (void*)srcptr, srcstep); + Mat dst(dims, isz, CV_8U, dstptr, dststep); + + const Mat* arrays[] = { &src, &dst }; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs, 2); + size_t j, planesz = it.size; + + for( j = 0; j < it.nplanes; j++, ++it ) + memcpy(ptrs[1], ptrs[0], planesz); + } + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", u->handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, u->handle, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + clFinish(q); + } } else +#endif { - CV_Assert( clEnqueueWriteBufferRect(q, (cl_mem)u->handle, CL_TRUE, - new_dstofs, new_srcofs, new_sz, new_dststep[0], new_dststep[1], - new_srcstep[0], new_srcstep[1], srcptr, 0, 0, 0) == CL_SUCCESS ); + AlignedDataPtr alignedPtr((uchar*)srcptr, sz[0] * srcstep[0], CV_OPENCL_DATA_PTR_ALIGNMENT); + if( iscontinuous ) + { + CV_Assert( clEnqueueWriteBuffer(q, (cl_mem)u->handle, + CL_TRUE, dstrawofs, total, alignedPtr.getAlignedPtr(), 0, 0, 0) >= 0 ); + } + else + { + CV_Assert( clEnqueueWriteBufferRect(q, (cl_mem)u->handle, CL_TRUE, + new_dstofs, new_srcofs, new_sz, new_dststep[0], new_dststep[1], + new_srcstep[0], new_srcstep[1], alignedPtr.getAlignedPtr(), 0, 0, 0) >= 0 ); + } } - u->markHostCopyObsolete(true); +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER || + (u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_SYSTEM) + { + // nothing + } + else +#endif + { + u->markHostCopyObsolete(true); + } u->markDeviceCopyObsolete(false); } @@ -4198,7 +5000,17 @@ public: { download(src, dst->data + dstrawofs, dims, sz, srcofs, srcstep, dststep); dst->markHostCopyObsolete(false); - dst->markDeviceCopyObsolete(true); +#ifdef HAVE_OPENCL_SVM + if ((dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER || + (dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_SYSTEM) + { + // nothing + } + else +#endif + { + dst->markDeviceCopyObsolete(true); + } return; } @@ -4206,26 +5018,110 @@ public: CV_Assert(dst->refcount == 0); cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - cl_int retval; - if( iscontinuous ) + cl_int retval = CL_SUCCESS; +#ifdef HAVE_OPENCL_SVM + if ((src->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0 || + (dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) { - CV_Assert( (retval = clEnqueueCopyBuffer(q, (cl_mem)src->handle, (cl_mem)dst->handle, - srcrawofs, dstrawofs, total, 0, 0, 0)) == CL_SUCCESS ); + if ((src->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0 && + (dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + if( iscontinuous ) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMemcpy: %p <-- %p (%d)\n", + (uchar*)dst->handle + dstrawofs, (uchar*)src->handle + srcrawofs, (int)total); + cl_int status = svmFns->fn_clEnqueueSVMMemcpy(q, CL_TRUE, + (uchar*)dst->handle + dstrawofs, (uchar*)src->handle + srcrawofs, + total, 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + else + { + clFinish(q); + // This code is from MatAllocator::download()/upload() + int isz[CV_MAX_DIM]; + uchar* srcptr = (uchar*)src->handle; + for( int i = 0; i < dims; i++ ) + { + CV_Assert( sz[i] <= (size_t)INT_MAX ); + if( sz[i] == 0 ) + return; + if( srcofs ) + srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1); + isz[i] = (int)sz[i]; + } + Mat m_src(dims, isz, CV_8U, srcptr, srcstep); + + uchar* dstptr = (uchar*)dst->handle; + for( int i = 0; i < dims; i++ ) + { + if( dstofs ) + dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1); + } + Mat m_dst(dims, isz, CV_8U, dstptr, dststep); + + const Mat* arrays[] = { &m_src, &m_dst }; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs, 2); + size_t j, planesz = it.size; + + for( j = 0; j < it.nplanes; j++, ++it ) + memcpy(ptrs[1], ptrs[0], planesz); + } + } + else + { + if ((src->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + map(src, ACCESS_READ); + upload(dst, src->data + srcrawofs, dims, sz, dstofs, dststep, srcstep); + unmap(src); + } + else + { + map(dst, ACCESS_WRITE); + download(src, dst->data + dstrawofs, dims, sz, srcofs, srcstep, dststep); + unmap(dst); + } + } } else +#endif { - CV_Assert( (retval = clEnqueueCopyBufferRect(q, (cl_mem)src->handle, (cl_mem)dst->handle, - new_srcofs, new_dstofs, new_sz, - new_srcstep[0], new_srcstep[1], - new_dststep[0], new_dststep[1], - 0, 0, 0)) == CL_SUCCESS ); + if( iscontinuous ) + { + CV_Assert( (retval = clEnqueueCopyBuffer(q, (cl_mem)src->handle, (cl_mem)dst->handle, + srcrawofs, dstrawofs, total, 0, 0, 0)) == CL_SUCCESS ); + } + else + { + CV_Assert( (retval = clEnqueueCopyBufferRect(q, (cl_mem)src->handle, (cl_mem)dst->handle, + new_srcofs, new_dstofs, new_sz, + new_srcstep[0], new_srcstep[1], + new_dststep[0], new_dststep[1], + 0, 0, 0)) == CL_SUCCESS ); + } } - if(retval == CL_SUCCESS) + if (retval == CL_SUCCESS) { CV_IMPL_ADD(CV_IMPL_OCL) } - dst->markHostCopyObsolete(true); +#ifdef HAVE_OPENCL_SVM + if ((dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER || + (dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_SYSTEM) + { + // nothing + } + else +#endif + { + dst->markHostCopyObsolete(true); + } dst->markDeviceCopyObsolete(false); if( _sync ) @@ -4234,7 +5130,23 @@ public: } } - BufferPoolController* getBufferPoolController() const { return &bufferPool; } + BufferPoolController* getBufferPoolController(const char* id) const { +#ifdef HAVE_OPENCL_SVM + if ((svm::checkForceSVMUmatUsage() && (id == NULL || strcmp(id, "OCL") == 0)) || (id != NULL && strcmp(id, "SVM") == 0)) + { + return &bufferPoolSVM; + } +#endif + if (id != NULL && strcmp(id, "HOST_ALLOC") == 0) + { + return &bufferPoolHostPtr; + } + if (id != NULL && strcmp(id, "OCL") != 0) + { + CV_ErrorNoReturn(cv::Error::StsBadArg, "getBufferPoolController(): unknown BufferPool ID\n"); + } + return &bufferPool; + } MatAllocator* matStdAllocator; }; @@ -4818,7 +5730,7 @@ void* Image2D::ptr() const return p ? p->handle : 0; } -bool isPerformanceCheckBypassed() +bool internal::isPerformanceCheckBypassed() { static bool initialized = false; static bool value = false; @@ -4830,4 +5742,22 @@ bool isPerformanceCheckBypassed() return value; } +bool internal::isCLBuffer(UMat& u) +{ + void* h = u.handle(ACCESS_RW); + if (!h) + return true; + CV_DbgAssert(u.u->currAllocator == getOpenCLAllocator()); +#if 1 + if ((u.u->allocatorFlags_ & 0xffff0000) != 0) // OpenCL SVM flags are stored here + return false; +#else + cl_mem_object_type type = 0; + cl_int ret = clGetMemObjectInfo((cl_mem)h, CL_MEM_TYPE, sizeof(type), &type, NULL); + if (ret != CL_SUCCESS || type != CL_MEM_OBJECT_BUFFER) + return false; +#endif + return true; +} + }} diff --git a/modules/core/src/opencl/runtime/opencl_core.cpp b/modules/core/src/opencl/runtime/opencl_core.cpp index 93f6aae5d..5dd174709 100644 --- a/modules/core/src/opencl/runtime/opencl_core.cpp +++ b/modules/core/src/opencl/runtime/opencl_core.cpp @@ -182,6 +182,65 @@ static void* opencl_check_fn(int ID); #define CUSTOM_FUNCTION_ID 1000 +#ifdef HAVE_OPENCL_SVM +#include "opencv2/core/opencl/runtime/opencl_svm_20.hpp" +#define SVM_FUNCTION_ID_START CUSTOM_FUNCTION_ID +#define SVM_FUNCTION_ID_END CUSTOM_FUNCTION_ID + 100 + +enum OPENCL_FN_SVM_ID +{ + OPENCL_FN_clSVMAlloc = SVM_FUNCTION_ID_START, + OPENCL_FN_clSVMFree, + OPENCL_FN_clSetKernelArgSVMPointer, + OPENCL_FN_clSetKernelExecInfo, + OPENCL_FN_clEnqueueSVMFree, + OPENCL_FN_clEnqueueSVMMemcpy, + OPENCL_FN_clEnqueueSVMMemFill, + OPENCL_FN_clEnqueueSVMMap, + OPENCL_FN_clEnqueueSVMUnmap, +}; + +void* (CL_API_CALL *clSVMAlloc)(cl_context context, cl_svm_mem_flags flags, size_t size, unsigned int alignment) = + opencl_fn4::switch_fn; +static const struct DynamicFnEntry _clSVMAlloc_definition = { "clSVMAlloc", (void**)&clSVMAlloc}; +void (CL_API_CALL *clSVMFree)(cl_context context, void* svm_pointer) = + opencl_fn2::switch_fn; +static const struct DynamicFnEntry _clSVMFree_definition = { "clSVMFree", (void**)&clSVMFree}; +cl_int (CL_API_CALL *clSetKernelArgSVMPointer)(cl_kernel kernel, cl_uint arg_index, const void* arg_value) = + opencl_fn3::switch_fn; +static const struct DynamicFnEntry _clSetKernelArgSVMPointer_definition = { "clSetKernelArgSVMPointer", (void**)&clSetKernelArgSVMPointer}; +//void* (CL_API_CALL *clSetKernelExecInfo)(cl_kernel kernel, cl_kernel_exec_info param_name, size_t param_value_size, const void* param_value) = +// opencl_fn4::switch_fn; +//static const struct DynamicFnEntry _clSetKernelExecInfo_definition = { "clSetKernelExecInfo", (void**)&clSetKernelExecInfo}; +//cl_int (CL_API_CALL *clEnqueueSVMFree)(...) = +// opencl_fn8::switch_fn; +//static const struct DynamicFnEntry _clEnqueueSVMFree_definition = { "clEnqueueSVMFree", (void**)&clEnqueueSVMFree}; +cl_int (CL_API_CALL *clEnqueueSVMMemcpy)(cl_command_queue command_queue, cl_bool blocking_copy, void* dst_ptr, const void* src_ptr, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) = + opencl_fn8::switch_fn; +static const struct DynamicFnEntry _clEnqueueSVMMemcpy_definition = { "clEnqueueSVMMemcpy", (void**)&clEnqueueSVMMemcpy}; +cl_int (CL_API_CALL *clEnqueueSVMMemFill)(cl_command_queue command_queue, void* svm_ptr, const void* pattern, size_t pattern_size, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) = + opencl_fn8::switch_fn; +static const struct DynamicFnEntry _clEnqueueSVMMemFill_definition = { "clEnqueueSVMMemFill", (void**)&clEnqueueSVMMemFill}; +cl_int (CL_API_CALL *clEnqueueSVMMap)(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags map_flags, void* svm_ptr, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) = + opencl_fn8::switch_fn; +static const struct DynamicFnEntry _clEnqueueSVMMap_definition = { "clEnqueueSVMMap", (void**)&clEnqueueSVMMap}; +cl_int (CL_API_CALL *clEnqueueSVMUnmap)(cl_command_queue command_queue, void* svm_ptr, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) = + opencl_fn5::switch_fn; +static const struct DynamicFnEntry _clEnqueueSVMUnmap_definition = { "clEnqueueSVMUnmap", (void**)&clEnqueueSVMUnmap}; + +static const struct DynamicFnEntry* opencl_svm_fn_list[] = { + &_clSVMAlloc_definition, + &_clSVMFree_definition, + &_clSetKernelArgSVMPointer_definition, + NULL/*&_clSetKernelExecInfo_definition*/, + NULL/*&_clEnqueueSVMFree_definition*/, + &_clEnqueueSVMMemcpy_definition, + &_clEnqueueSVMMemFill_definition, + &_clEnqueueSVMMap_definition, + &_clEnqueueSVMUnmap_definition, +}; +#endif // HAVE_OPENCL_SVM + // // END OF CUSTOM FUNCTIONS HERE // @@ -194,6 +253,14 @@ static void* opencl_check_fn(int ID) assert(ID >= 0 && ID < (int)(sizeof(opencl_fn_list)/sizeof(opencl_fn_list[0]))); e = opencl_fn_list[ID]; } +#ifdef HAVE_OPENCL_SVM + else if (ID >= SVM_FUNCTION_ID_START && ID < SVM_FUNCTION_ID_END) + { + ID = ID - SVM_FUNCTION_ID_START; + assert(ID >= 0 && ID < (int)(sizeof(opencl_svm_fn_list)/sizeof(opencl_svm_fn_list[0]))); + e = opencl_svm_fn_list[ID]; + } +#endif else { CV_ErrorNoReturn(cv::Error::StsBadArg, "Invalid function ID"); diff --git a/modules/core/src/umatrix.cpp b/modules/core/src/umatrix.cpp index ffc20777b..1b42f1ee1 100644 --- a/modules/core/src/umatrix.cpp +++ b/modules/core/src/umatrix.cpp @@ -55,7 +55,7 @@ UMatData::UMatData(const MatAllocator* allocator) prevAllocator = currAllocator = allocator; urefcount = refcount = 0; data = origdata = 0; - size = 0; capacity = 0; + size = 0; flags = 0; handle = 0; userdata = 0; @@ -67,7 +67,7 @@ UMatData::~UMatData() prevAllocator = currAllocator = 0; urefcount = refcount = 0; data = origdata = 0; - size = 0; capacity = 0; + size = 0; flags = 0; handle = 0; userdata = 0; @@ -221,7 +221,7 @@ UMat Mat::getUMat(int accessFlags, UMatUsageFlags usageFlags) const temp_u = a->allocate(dims, size.p, type(), data, step.p, accessFlags, usageFlags); temp_u->refcount = 1; } - UMat::getStdAllocator()->allocate(temp_u, accessFlags, usageFlags); + UMat::getStdAllocator()->allocate(temp_u, accessFlags, usageFlags); // TODO result is not checked hdr.flags = flags; setSize(hdr, dims, size.p, step.p); finalizeHdr(hdr); @@ -575,7 +575,7 @@ Mat UMat::getMat(int accessFlags) const { if(!u) return Mat(); - u->currAllocator->map(u, accessFlags | ACCESS_READ); + u->currAllocator->map(u, accessFlags | ACCESS_READ); // TODO Support ACCESS_WRITE without unnecessary data transfers CV_Assert(u->data != 0); Mat hdr(dims, size.p, type(), u->data + offset, step.p); hdr.flags = flags; From 53c946045406f5d0196fd434221f78dcd335def2 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Wed, 31 Dec 2014 14:18:50 +0300 Subject: [PATCH 90/98] Reorganized ML module documentation - fixed one BiB record - moved algorithms overview to separate page - added docs for some enumerations - fixed some small documentation errors --- doc/opencv.bib | 10 +- modules/ml/doc/ml_intro.markdown | 488 ++++++++ modules/ml/include/opencv2/ml.hpp | 1943 ++++++++++------------------- 3 files changed, 1172 insertions(+), 1269 deletions(-) create mode 100644 modules/ml/doc/ml_intro.markdown diff --git a/doc/opencv.bib b/doc/opencv.bib index 067a1aa00..be53dbaa5 100644 --- a/doc/opencv.bib +++ b/doc/opencv.bib @@ -106,11 +106,11 @@ year = {1998}, publisher = {Citeseer} } -@ARTICLE{Breiman84, - author = {Olshen, LBJFR and Stone, Charles J}, - title = {Classification and regression trees}, - year = {1984}, - journal = {Wadsworth International Group} +@book{Breiman84, + title={Classification and regression trees}, + author={Breiman, Leo and Friedman, Jerome and Stone, Charles J and Olshen, Richard A}, + year={1984}, + publisher={CRC press} } @INCOLLECTION{Brox2004, author = {Brox, Thomas and Bruhn, Andres and Papenberg, Nils and Weickert, Joachim}, diff --git a/modules/ml/doc/ml_intro.markdown b/modules/ml/doc/ml_intro.markdown new file mode 100644 index 000000000..5e3c3d2cf --- /dev/null +++ b/modules/ml/doc/ml_intro.markdown @@ -0,0 +1,488 @@ +Machine Learning Overview {#ml_intro} +========================= + +[TOC] + +Training Data {#ml_intro_data} +============= + +In machine learning algorithms there is notion of training data. Training data includes several +components: + +- A set of training samples. Each training sample is a vector of values (in Computer Vision it's + sometimes referred to as feature vector). Usually all the vectors have the same number of + components (features); OpenCV ml module assumes that. Each feature can be ordered (i.e. its + values are floating-point numbers that can be compared with each other and strictly ordered, + i.e. sorted) or categorical (i.e. its value belongs to a fixed set of values that can be + integers, strings etc.). +- Optional set of responses corresponding to the samples. Training data with no responses is used + in unsupervised learning algorithms that learn structure of the supplied data based on distances + between different samples. Training data with responses is used in supervised learning + algorithms, which learn the function mapping samples to responses. Usually the responses are + scalar values, ordered (when we deal with regression problem) or categorical (when we deal with + classification problem; in this case the responses are often called "labels"). Some algorithms, + most noticeably Neural networks, can handle not only scalar, but also multi-dimensional or + vector responses. +- Another optional component is the mask of missing measurements. Most algorithms require all the + components in all the training samples be valid, but some other algorithms, such as decision + tress, can handle the cases of missing measurements. +- In the case of classification problem user may want to give different weights to different + classes. This is useful, for example, when: + - user wants to shift prediction accuracy towards lower false-alarm rate or higher hit-rate. + - user wants to compensate for significantly different amounts of training samples from + different classes. +- In addition to that, each training sample may be given a weight, if user wants the algorithm to + pay special attention to certain training samples and adjust the training model accordingly. +- Also, user may wish not to use the whole training data at once, but rather use parts of it, e.g. + to do parameter optimization via cross-validation procedure. + +As you can see, training data can have rather complex structure; besides, it may be very big and/or +not entirely available, so there is need to make abstraction for this concept. In OpenCV ml there is +cv::ml::TrainData class for that. + +@sa cv::ml::TrainData + +Normal Bayes Classifier {#ml_intro_bayes} +======================= + +This simple classification model assumes that feature vectors from each class are normally +distributed (though, not necessarily independently distributed). So, the whole data distribution +function is assumed to be a Gaussian mixture, one component per class. Using the training data the +algorithm estimates mean vectors and covariance matrices for every class, and then it uses them for +prediction. + +@sa cv::ml::NormalBayesClassifier + +K-Nearest Neighbors {#ml_intro_knn} +=================== + +The algorithm caches all training samples and predicts the response for a new sample by analyzing a +certain number (__K__) of the nearest neighbors of the sample using voting, calculating weighted +sum, and so on. The method is sometimes referred to as "learning by example" because for prediction +it looks for the feature vector with a known response that is closest to the given vector. + +@sa cv::ml::KNearest + +Support Vector Machines {#ml_intro_svm} +======================= + +Originally, support vector machines (SVM) was a technique for building an optimal binary (2-class) +classifier. Later the technique was extended to regression and clustering problems. SVM is a partial +case of kernel-based methods. It maps feature vectors into a higher-dimensional space using a kernel +function and builds an optimal linear discriminating function in this space or an optimal hyper- +plane that fits into the training data. In case of SVM, the kernel is not defined explicitly. +Instead, a distance between any 2 points in the hyper-space needs to be defined. + +The solution is optimal, which means that the margin between the separating hyper-plane and the +nearest feature vectors from both classes (in case of 2-class classifier) is maximal. The feature +vectors that are the closest to the hyper-plane are called _support vectors_, which means that the +position of other vectors does not affect the hyper-plane (the decision function). + +SVM implementation in OpenCV is based on @cite LibSVM + +@sa cv::ml::SVM + +Prediction with SVM {#ml_intro_svm_predict} +------------------- + +StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get +the raw response from SVM (in the case of regression, 1-class or 2-class classification problem). + +Decision Trees {#ml_intro_trees} +============== + +The ML classes discussed in this section implement Classification and Regression Tree algorithms +described in @cite Breiman84 . + +The class cv::ml::DTrees represents a single decision tree or a collection of decision trees. It's +also a base class for RTrees and Boost. + +A decision tree is a binary tree (tree where each non-leaf node has two child nodes). It can be used +either for classification or for regression. For classification, each tree leaf is marked with a +class label; multiple leaves may have the same label. For regression, a constant is also assigned to +each tree leaf, so the approximation function is piecewise constant. + +@sa cv::ml::DTrees + +Predicting with Decision Trees {#ml_intro_trees_predict} +------------------------------ + +To reach a leaf node and to obtain a response for the input feature vector, the prediction procedure +starts with the root node. From each non-leaf node the procedure goes to the left (selects the left +child node as the next observed node) or to the right based on the value of a certain variable whose +index is stored in the observed node. The following variables are possible: + +- __Ordered variables.__ The variable value is compared with a threshold that is also stored in + the node. If the value is less than the threshold, the procedure goes to the left. Otherwise, it + goes to the right. For example, if the weight is less than 1 kilogram, the procedure goes to the + left, else to the right. + +- __Categorical variables.__ A discrete variable value is tested to see whether it belongs to a + certain subset of values (also stored in the node) from a limited set of values the variable + could take. If it does, the procedure goes to the left. Otherwise, it goes to the right. For + example, if the color is green or red, go to the left, else to the right. + +So, in each node, a pair of entities (variable_index , `decision_rule (threshold/subset)` ) is used. +This pair is called a _split_ (split on the variable variable_index ). Once a leaf node is reached, +the value assigned to this node is used as the output of the prediction procedure. + +Sometimes, certain features of the input vector are missed (for example, in the darkness it is +difficult to determine the object color), and the prediction procedure may get stuck in the certain +node (in the mentioned example, if the node is split by color). To avoid such situations, decision +trees use so-called _surrogate splits_. That is, in addition to the best "primary" split, every tree +node may also be split to one or more other variables with nearly the same results. + +Training Decision Trees {#ml_intro_trees_train} +----------------------- + +The tree is built recursively, starting from the root node. All training data (feature vectors and +responses) is used to split the root node. In each node the optimum decision rule (the best +"primary" split) is found based on some criteria. In machine learning, gini "purity" criteria are +used for classification, and sum of squared errors is used for regression. Then, if necessary, the +surrogate splits are found. They resemble the results of the primary split on the training data. All +the data is divided using the primary and the surrogate splits (like it is done in the prediction +procedure) between the left and the right child node. Then, the procedure recursively splits both +left and right nodes. At each node the recursive procedure may stop (that is, stop splitting the +node further) in one of the following cases: + +- Depth of the constructed tree branch has reached the specified maximum value. +- Number of training samples in the node is less than the specified threshold when it is not + statistically representative to split the node further. +- All the samples in the node belong to the same class or, in case of regression, the variation is + too small. +- The best found split does not give any noticeable improvement compared to a random choice. + +When the tree is built, it may be pruned using a cross-validation procedure, if necessary. That is, +some branches of the tree that may lead to the model overfitting are cut off. Normally, this +procedure is only applied to standalone decision trees. Usually tree ensembles build trees that are +small enough and use their own protection schemes against overfitting. + +Variable Importance {#ml_intro_trees_var} +------------------- + +Besides the prediction that is an obvious use of decision trees, the tree can be also used for +various data analyses. One of the key properties of the constructed decision tree algorithms is an +ability to compute the importance (relative decisive power) of each variable. For example, in a spam +filter that uses a set of words occurred in the message as a feature vector, the variable importance +rating can be used to determine the most "spam-indicating" words and thus help keep the dictionary +size reasonable. + +Importance of each variable is computed over all the splits on this variable in the tree, primary +and surrogate ones. Thus, to compute variable importance correctly, the surrogate splits must be +enabled in the training parameters, even if there is no missing data. + +Boosting {#ml_intro_boost} +======== + +A common machine learning task is supervised learning. In supervised learning, the goal is to learn +the functional relationship \f$F: y = F(x)\f$ between the input \f$x\f$ and the output \f$y\f$ . +Predicting the qualitative output is called _classification_, while predicting the quantitative +output is called _regression_. + +Boosting is a powerful learning concept that provides a solution to the supervised classification +learning task. It combines the performance of many "weak" classifiers to produce a powerful +committee @cite HTF01 . A weak classifier is only required to be better than chance, and thus can be +very simple and computationally inexpensive. However, many of them smartly combine results to a +strong classifier that often outperforms most "monolithic" strong classifiers such as SVMs and +Neural Networks. + +Decision trees are the most popular weak classifiers used in boosting schemes. Often the simplest +decision trees with only a single split node per tree (called stumps ) are sufficient. + +The boosted model is based on \f$N\f$ training examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}\f$ +and \f$y_i \in{-1, +1}\f$ . \f$x_i\f$ is a \f$K\f$ -component vector. Each component encodes a +feature relevant to the learning task at hand. The desired two-class output is encoded as -1 and +1. + +Different variants of boosting are known as Discrete Adaboost, Real AdaBoost, LogitBoost, and Gentle +AdaBoost @cite FHT98 . All of them are very similar in their overall structure. Therefore, this +chapter focuses only on the standard two-class Discrete AdaBoost algorithm, outlined below. +Initially the same weight is assigned to each sample (step 2). Then, a weak classifier +\f$f_{m(x)}\f$ is trained on the weighted training data (step 3a). Its weighted training error and +scaling factor \f$c_m\f$ is computed (step 3b). The weights are increased for training samples that +have been misclassified (step 3c). All weights are then normalized, and the process of finding the +next weak classifier continues for another \f$M\f$ -1 times. The final classifier \f$F(x)\f$ is the +sign of the weighted sum over the individual weak classifiers (step 4). + +__Two-class Discrete AdaBoost Algorithm__ + +- Set \f$N\f$ examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}, y_i \in{-1, +1}\f$ . + +- Assign weights as \f$w_i = 1/N, i = 1,...,N\f$ . + +- Repeat for \f$m = 1,2,...,M\f$ : + + - Fit the classifier \f$f_m(x) \in{-1,1}\f$, using weights \f$w_i\f$ on the training data. + + - Compute \f$err_m = E_w [1_{(y \neq f_m(x))}], c_m = log((1 - err_m)/err_m)\f$ . + + - Set \f$w_i \Leftarrow w_i exp[c_m 1_{(y_i \neq f_m(x_i))}], i = 1,2,...,N,\f$ and + renormalize so that \f$\Sigma i w_i = 1\f$ . + +- Classify new samples _x_ using the formula: \f$\textrm{sign} (\Sigma m = 1M c_m f_m(x))\f$ . + +@note Similar to the classical boosting methods, the current implementation supports two-class +classifiers only. For M \> 2 classes, there is the __AdaBoost.MH__ algorithm (described in +@cite FHT98) that reduces the problem to the two-class problem, yet with a much larger training set. + +To reduce computation time for boosted models without substantially losing accuracy, the influence +trimming technique can be employed. As the training algorithm proceeds and the number of trees in +the ensemble is increased, a larger number of the training samples are classified correctly and with +increasing confidence, thereby those samples receive smaller weights on the subsequent iterations. +Examples with a very low relative weight have a small impact on the weak classifier training. Thus, +such examples may be excluded during the weak classifier training without having much effect on the +induced classifier. This process is controlled with the weight_trim_rate parameter. Only examples +with the summary fraction weight_trim_rate of the total weight mass are used in the weak classifier +training. Note that the weights for __all__ training examples are recomputed at each training +iteration. Examples deleted at a particular iteration may be used again for learning some of the +weak classifiers further @cite FHT98 + +@sa cv::ml::Boost + +Prediction with Boost {#ml_intro_boost_predict} +--------------------- +StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get +the raw sum from Boost classifier. + +Random Trees {#ml_intro_rtrees} +============ + +Random trees have been introduced by Leo Breiman and Adele Cutler: + . The algorithm can deal with both +classification and regression problems. Random trees is a collection (ensemble) of tree predictors +that is called _forest_ further in this section (the term has been also introduced by L. Breiman). +The classification works as follows: the random trees classifier takes the input feature vector, +classifies it with every tree in the forest, and outputs the class label that received the majority +of "votes". In case of a regression, the classifier response is the average of the responses over +all the trees in the forest. + +All the trees are trained with the same parameters but on different training sets. These sets are +generated from the original training set using the bootstrap procedure: for each training set, you +randomly select the same number of vectors as in the original set ( =N ). The vectors are chosen +with replacement. That is, some vectors will occur more than once and some will be absent. At each +node of each trained tree, not all the variables are used to find the best split, but a random +subset of them. With each node a new subset is generated. However, its size is fixed for all the +nodes and all the trees. It is a training parameter set to \f$\sqrt{number\_of\_variables}\f$ by +default. None of the built trees are pruned. + +In random trees there is no need for any accuracy estimation procedures, such as cross-validation or +bootstrap, or a separate test set to get an estimate of the training error. The error is estimated +internally during the training. When the training set for the current tree is drawn by sampling with +replacement, some vectors are left out (so-called _oob (out-of-bag) data_ ). The size of oob data is +about N/3 . The classification error is estimated by using this oob-data as follows: + +- Get a prediction for each vector, which is oob relative to the i-th tree, using the very i-th + tree. + +- After all the trees have been trained, for each vector that has ever been oob, find the + class-winner for it (the class that has got the majority of votes in the trees where + the vector was oob) and compare it to the ground-truth response. + +- Compute the classification error estimate as a ratio of the number of misclassified oob vectors + to all the vectors in the original data. In case of regression, the oob-error is computed as the + squared error for oob vectors difference divided by the total number of vectors. + +For the random trees usage example, please, see letter_recog.cpp sample in OpenCV distribution. + +@sa cv::ml::RTrees + +__References:__ + +- _Machine Learning_, Wald I, July 2002. + +- _Looking Inside the Black Box_, Wald II, July 2002. + +- _Software for the Masses_, Wald III, July 2002. + +- And other articles from the web site + + +Expectation Maximization {#ml_intro_em} +======================== + +The Expectation Maximization(EM) algorithm estimates the parameters of the multivariate probability +density function in the form of a Gaussian mixture distribution with a specified number of mixtures. + +Consider the set of the N feature vectors { \f$x_1, x_2,...,x_{N}\f$ } from a d-dimensional Euclidean +space drawn from a Gaussian mixture: + +\f[p(x;a_k,S_k, \pi _k) = \sum _{k=1}^{m} \pi _kp_k(x), \quad \pi _k \geq 0, \quad \sum _{k=1}^{m} \pi _k=1,\f] + +\f[p_k(x)= \varphi (x;a_k,S_k)= \frac{1}{(2\pi)^{d/2}\mid{S_k}\mid^{1/2}} exp \left \{ - \frac{1}{2} (x-a_k)^TS_k^{-1}(x-a_k) \right \} ,\f] + +where \f$m\f$ is the number of mixtures, \f$p_k\f$ is the normal distribution density with the mean +\f$a_k\f$ and covariance matrix \f$S_k\f$, \f$\pi_k\f$ is the weight of the k-th mixture. Given the +number of mixtures \f$M\f$ and the samples \f$x_i\f$, \f$i=1..N\f$ the algorithm finds the maximum- +likelihood estimates (MLE) of all the mixture parameters, that is, \f$a_k\f$, \f$S_k\f$ and +\f$\pi_k\f$ : + +\f[L(x, \theta )=logp(x, \theta )= \sum _{i=1}^{N}log \left ( \sum _{k=1}^{m} \pi _kp_k(x) \right ) \to \max _{ \theta \in \Theta },\f] + +\f[\Theta = \left \{ (a_k,S_k, \pi _k): a_k \in \mathbbm{R} ^d,S_k=S_k^T>0,S_k \in \mathbbm{R} ^{d \times d}, \pi _k \geq 0, \sum _{k=1}^{m} \pi _k=1 \right \} .\f] + +The EM algorithm is an iterative procedure. Each iteration includes two steps. At the first step +(Expectation step or E-step), you find a probability \f$p_{i,k}\f$ (denoted \f$\alpha_{i,k}\f$ in +the formula below) of sample i to belong to mixture k using the currently available mixture +parameter estimates: + +\f[\alpha _{ki} = \frac{\pi_k\varphi(x;a_k,S_k)}{\sum\limits_{j=1}^{m}\pi_j\varphi(x;a_j,S_j)} .\f] + +At the second step (Maximization step or M-step), the mixture parameter estimates are refined using +the computed probabilities: + +\f[\pi _k= \frac{1}{N} \sum _{i=1}^{N} \alpha _{ki}, \quad a_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}x_i}{\sum\limits_{i=1}^{N}\alpha_{ki}} , \quad S_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}(x_i-a_k)(x_i-a_k)^T}{\sum\limits_{i=1}^{N}\alpha_{ki}}\f] + +Alternatively, the algorithm may start with the M-step when the initial values for \f$p_{i,k}\f$ can +be provided. Another alternative when \f$p_{i,k}\f$ are unknown is to use a simpler clustering +algorithm to pre-cluster the input samples and thus obtain initial \f$p_{i,k}\f$ . Often (including +machine learning) the k-means algorithm is used for that purpose. + +One of the main problems of the EM algorithm is a large number of parameters to estimate. The +majority of the parameters reside in covariance matrices, which are \f$d \times d\f$ elements each +where \f$d\f$ is the feature space dimensionality. However, in many practical problems, the +covariance matrices are close to diagonal or even to \f$\mu_k*I\f$ , where \f$I\f$ is an identity +matrix and \f$\mu_k\f$ is a mixture-dependent "scale" parameter. So, a robust computation scheme +could start with harder constraints on the covariance matrices and then use the estimated parameters +as an input for a less constrained optimization problem (often a diagonal covariance matrix is +already a good enough approximation). + +@sa cv::ml::EM + +References: +- Bilmes98 J. A. Bilmes. _A Gentle Tutorial of the EM Algorithm and its Application to Parameter +Estimation for Gaussian Mixture and Hidden Markov Models_. Technical Report TR-97-021, +International Computer Science Institute and Computer Science Division, University of California +at Berkeley, April 1998. + +Neural Networks {#ml_intro_ann} +=============== + +ML implements feed-forward artificial neural networks or, more particularly, multi-layer perceptrons +(MLP), the most commonly used type of neural networks. MLP consists of the input layer, output +layer, and one or more hidden layers. Each layer of MLP includes one or more neurons directionally +linked with the neurons from the previous and the next layer. The example below represents a 3-layer +perceptron with three inputs, two outputs, and the hidden layer including five neurons: + +![image](pics/mlp.png) + +All the neurons in MLP are similar. Each of them has several input links (it takes the output values +from several neurons in the previous layer as input) and several output links (it passes the +response to several neurons in the next layer). The values retrieved from the previous layer are +summed up with certain weights, individual for each neuron, plus the bias term. The sum is +transformed using the activation function \f$f\f$ that may be also different for different neurons. + +![image](pics/neuron_model.png) + +In other words, given the outputs \f$x_j\f$ of the layer \f$n\f$ , the outputs \f$y_i\f$ of the +layer \f$n+1\f$ are computed as: + +\f[u_i = \sum _j (w^{n+1}_{i,j}*x_j) + w^{n+1}_{i,bias}\f] + +\f[y_i = f(u_i)\f] + +Different activation functions may be used. ML implements three standard functions: + +- Identity function ( cv::ml::ANN_MLP::IDENTITY ): \f$f(x)=x\f$ + +- Symmetrical sigmoid ( cv::ml::ANN_MLP::SIGMOID_SYM ): \f$f(x)=\beta*(1-e^{-\alpha + x})/(1+e^{-\alpha x}\f$ ), which is the default choice for MLP. The standard sigmoid with + \f$\beta =1, \alpha =1\f$ is shown below: + + ![image](pics/sigmoid_bipolar.png) + +- Gaussian function ( cv::ml::ANN_MLP::GAUSSIAN ): \f$f(x)=\beta e^{-\alpha x*x}\f$ , which is not + completely supported at the moment. + +In ML, all the neurons have the same activation functions, with the same free parameters ( +\f$\alpha, \beta\f$ ) that are specified by user and are not altered by the training algorithms. + +So, the whole trained network works as follows: + +1. Take the feature vector as input. The vector size is equal to the size of the input layer. +2. Pass values as input to the first hidden layer. +3. Compute outputs of the hidden layer using the weights and the activation functions. +4. Pass outputs further downstream until you compute the output layer. + +So, to compute the network, you need to know all the weights \f$w^{n+1)}_{i,j}\f$ . The weights are +computed by the training algorithm. The algorithm takes a training set, multiple input vectors with +the corresponding output vectors, and iteratively adjusts the weights to enable the network to give +the desired response to the provided input vectors. + +The larger the network size (the number of hidden layers and their sizes) is, the more the potential +network flexibility is. The error on the training set could be made arbitrarily small. But at the +same time the learned network also "learns" the noise present in the training set, so the error on +the test set usually starts increasing after the network size reaches a limit. Besides, the larger +networks are trained much longer than the smaller ones, so it is reasonable to pre-process the data, +using cv::PCA or similar technique, and train a smaller network on only essential features. + +Another MLP feature is an inability to handle categorical data as is. However, there is a +workaround. If a certain feature in the input or output (in case of n -class classifier for +\f$n>2\f$ ) layer is categorical and can take \f$M>2\f$ different values, it makes sense to +represent it as a binary tuple of M elements, where the i -th element is 1 if and only if the +feature is equal to the i -th value out of M possible. It increases the size of the input/output +layer but speeds up the training algorithm convergence and at the same time enables "fuzzy" values +of such variables, that is, a tuple of probabilities instead of a fixed value. + +ML implements two algorithms for training MLP's. The first algorithm is a classical random +sequential back-propagation algorithm. The second (default) one is a batch RPROP algorithm. + +@sa cv::ml::ANN_MLP + +Logistic Regression {#ml_intro_lr} +=================== + +ML implements logistic regression, which is a probabilistic classification technique. Logistic +Regression is a binary classification algorithm which is closely related to Support Vector Machines +(SVM). Like SVM, Logistic Regression can be extended to work on multi-class classification problems +like digit recognition (i.e. recognizing digitis like 0,1 2, 3,... from the given images). This +version of Logistic Regression supports both binary and multi-class classifications (for multi-class +it creates a multiple 2-class classifiers). In order to train the logistic regression classifier, +Batch Gradient Descent and Mini-Batch Gradient Descent algorithms are used (see +). Logistic Regression is a +discriminative classifier (see for more details). +Logistic Regression is implemented as a C++ class in LogisticRegression. + +In Logistic Regression, we try to optimize the training paramater \f$\theta\f$ such that the +hypothesis \f$0 \leq h_\theta(x) \leq 1\f$ is acheived. We have \f$h_\theta(x) = g(h_\theta(x))\f$ +and \f$g(z) = \frac{1}{1+e^{-z}}\f$ as the logistic or sigmoid function. The term "Logistic" in +Logistic Regression refers to this function. For given data of a binary classification problem of +classes 0 and 1, one can determine that the given data instance belongs to class 1 if \f$h_\theta(x) +\geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ . + +In Logistic Regression, choosing the right parameters is of utmost importance for reducing the +training error and ensuring high training accuracy. cv::ml::LogisticRegression::Params is the +structure that defines parameters that are required to train a Logistic Regression classifier. + +The learning rate is determined by cv::ml::LogisticRegression::Params.alpha. It determines how fast +we approach the solution. It is a positive real number. + +Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported in +LogisticRegression. It is important that we mention the number of iterations these optimization +algorithms have to run. The number of iterations are mentioned by +cv::ml::LogisticRegression::Params.num_iters. The number of iterations can be thought as number of +steps taken and learning rate specifies if it is a long step or a short step. These two parameters +define how fast we arrive at a possible solution. + +In order to compensate for overfitting regularization is performed, which can be enabled by setting +cv::ml::LogisticRegression::Params.regularized to a positive integer (greater than zero). One can +specify what kind of regularization has to be performed by setting +cv::ml::LogisticRegression::Params.norm to REG_L1 or REG_L2 values. + +LogisticRegression provides a choice of 2 training methods with Batch Gradient Descent or the Mini- +Batch Gradient Descent. To specify this, set cv::ml::LogisticRegression::Params::train_method to +either BATCH or MINI_BATCH. If training method is set to MINI_BATCH, the size of the mini batch has +to be to a postive integer using cv::ml::LogisticRegression::Params::mini_batch_size. + +A sample set of training parameters for the Logistic Regression classifier can be initialized as +follows: +@code{.cpp} +using namespace cv::ml; +LogisticRegression::Params params; +params.alpha = 0.5; +params.num_iters = 10000; +params.norm = LogisticRegression::REG_L2; +params.regularized = 1; +params.train_method = LogisticRegression::MINI_BATCH; +params.mini_batch_size = 10; +@endcode + +@sa cv::ml::LogisticRegression diff --git a/modules/ml/include/opencv2/ml.hpp b/modules/ml/include/opencv2/ml.hpp index 619664ba3..9dca486af 100644 --- a/modules/ml/include/opencv2/ml.hpp +++ b/modules/ml/include/opencv2/ml.hpp @@ -56,455 +56,16 @@ /** @defgroup ml Machine Learning - @{ -@defgroup ml_stat Statistical Models -@defgroup ml_bayes Normal Bayes Classifier -This simple classification model assumes that feature vectors from each class are normally -distributed (though, not necessarily independently distributed). So, the whole data distribution -function is assumed to be a Gaussian mixture, one component per class. Using the training data the -algorithm estimates mean vectors and covariance matrices for every class, and then it uses them for -prediction. + The Machine Learning Library (MLL) is a set of classes and functions for statistical + classification, regression, and clustering of data. -@defgroup ml_knearest K-Nearest Neighbors + Most of the classification and regression algorithms are implemented as C++ classes. As the + algorithms have different sets of features (like an ability to handle missing measurements or + categorical input variables), there is a little common ground between the classes. This common + ground is defined by the class cv::ml::StatModel that all the other ML classes are derived from. -The algorithm caches all training samples and predicts the response for a new sample by analyzing a -certain number (**K**) of the nearest neighbors of the sample using voting, calculating weighted -sum, and so on. The method is sometimes referred to as "learning by example" because for prediction -it looks for the feature vector with a known response that is closest to the given vector. - -@defgroup ml_svm Support Vector Machines - -Originally, support vector machines (SVM) was a technique for building an optimal binary (2-class) -classifier. Later the technique was extended to regression and clustering problems. SVM is a partial -case of kernel-based methods. It maps feature vectors into a higher-dimensional space using a kernel -function and builds an optimal linear discriminating function in this space or an optimal -hyper-plane that fits into the training data. In case of SVM, the kernel is not defined explicitly. -Instead, a distance between any 2 points in the hyper-space needs to be defined. - -The solution is optimal, which means that the margin between the separating hyper-plane and the -nearest feature vectors from both classes (in case of 2-class classifier) is maximal. The feature -vectors that are the closest to the hyper-plane are called *support vectors*, which means that the -position of other vectors does not affect the hyper-plane (the decision function). - -SVM implementation in OpenCV is based on @cite LibSVM . - -Prediction with SVM -------------------- - -StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get -the raw response from SVM (in the case of regression, 1-class or 2-class classification problem). - -@defgroup ml_decsiontrees Decision Trees - -The ML classes discussed in this section implement Classification and Regression Tree algorithms -described in @cite Breiman84 . - -The class cv::ml::DTrees represents a single decision tree or a collection of decision trees. It's -also a base class for RTrees and Boost. - -A decision tree is a binary tree (tree where each non-leaf node has two child nodes). It can be used -either for classification or for regression. For classification, each tree leaf is marked with a -class label; multiple leaves may have the same label. For regression, a constant is also assigned to -each tree leaf, so the approximation function is piecewise constant. - -Predicting with Decision Trees ------------------------------- - -To reach a leaf node and to obtain a response for the input feature vector, the prediction procedure -starts with the root node. From each non-leaf node the procedure goes to the left (selects the left -child node as the next observed node) or to the right based on the value of a certain variable whose -index is stored in the observed node. The following variables are possible: - -- **Ordered variables.** The variable value is compared with a threshold that is also stored in - the node. If the value is less than the threshold, the procedure goes to the left. Otherwise, it - goes to the right. For example, if the weight is less than 1 kilogram, the procedure goes to the - left, else to the right. - -- **Categorical variables.** A discrete variable value is tested to see whether it belongs to a - certain subset of values (also stored in the node) from a limited set of values the variable - could take. If it does, the procedure goes to the left. Otherwise, it goes to the right. For - example, if the color is green or red, go to the left, else to the right. - -So, in each node, a pair of entities (variable_index , `decision_rule (threshold/subset)` ) is -used. This pair is called a *split* (split on the variable variable_index ). Once a leaf node is -reached, the value assigned to this node is used as the output of the prediction procedure. - -Sometimes, certain features of the input vector are missed (for example, in the darkness it is -difficult to determine the object color), and the prediction procedure may get stuck in the certain -node (in the mentioned example, if the node is split by color). To avoid such situations, decision -trees use so-called *surrogate splits*. That is, in addition to the best "primary" split, every tree -node may also be split to one or more other variables with nearly the same results. - -Training Decision Trees ------------------------ - -The tree is built recursively, starting from the root node. All training data (feature vectors and -responses) is used to split the root node. In each node the optimum decision rule (the best -"primary" split) is found based on some criteria. In machine learning, gini "purity" criteria are -used for classification, and sum of squared errors is used for regression. Then, if necessary, the -surrogate splits are found. They resemble the results of the primary split on the training data. All -the data is divided using the primary and the surrogate splits (like it is done in the prediction -procedure) between the left and the right child node. Then, the procedure recursively splits both -left and right nodes. At each node the recursive procedure may stop (that is, stop splitting the -node further) in one of the following cases: - -- Depth of the constructed tree branch has reached the specified maximum value. -- Number of training samples in the node is less than the specified threshold when it is not - statistically representative to split the node further. -- All the samples in the node belong to the same class or, in case of regression, the variation is - too small. -- The best found split does not give any noticeable improvement compared to a random choice. - -When the tree is built, it may be pruned using a cross-validation procedure, if necessary. That is, -some branches of the tree that may lead to the model overfitting are cut off. Normally, this -procedure is only applied to standalone decision trees. Usually tree ensembles build trees that are -small enough and use their own protection schemes against overfitting. - -Variable Importance -------------------- - -Besides the prediction that is an obvious use of decision trees, the tree can be also used for -various data analyses. One of the key properties of the constructed decision tree algorithms is an -ability to compute the importance (relative decisive power) of each variable. For example, in a spam -filter that uses a set of words occurred in the message as a feature vector, the variable importance -rating can be used to determine the most "spam-indicating" words and thus help keep the dictionary -size reasonable. - -Importance of each variable is computed over all the splits on this variable in the tree, primary -and surrogate ones. Thus, to compute variable importance correctly, the surrogate splits must be -enabled in the training parameters, even if there is no missing data. - -@defgroup ml_boost Boosting - -A common machine learning task is supervised learning. In supervised learning, the goal is to learn -the functional relationship \f$F: y = F(x)\f$ between the input \f$x\f$ and the output \f$y\f$ . Predicting the -qualitative output is called *classification*, while predicting the quantitative output is called -*regression*. - -Boosting is a powerful learning concept that provides a solution to the supervised classification -learning task. It combines the performance of many "weak" classifiers to produce a powerful -committee @cite HTF01 . A weak classifier is only required to be better than chance, and thus can be -very simple and computationally inexpensive. However, many of them smartly combine results to a -strong classifier that often outperforms most "monolithic" strong classifiers such as SVMs and -Neural Networks. - -Decision trees are the most popular weak classifiers used in boosting schemes. Often the simplest -decision trees with only a single split node per tree (called stumps ) are sufficient. - -The boosted model is based on \f$N\f$ training examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}\f$ and -\f$y_i \in{-1, +1}\f$ . \f$x_i\f$ is a \f$K\f$ -component vector. Each component encodes a feature relevant to -the learning task at hand. The desired two-class output is encoded as -1 and +1. - -Different variants of boosting are known as Discrete Adaboost, Real AdaBoost, LogitBoost, and Gentle -AdaBoost @cite FHT98 . All of them are very similar in their overall structure. Therefore, this chapter -focuses only on the standard two-class Discrete AdaBoost algorithm, outlined below. Initially the -same weight is assigned to each sample (step 2). Then, a weak classifier \f$f_{m(x)}\f$ is trained on -the weighted training data (step 3a). Its weighted training error and scaling factor \f$c_m\f$ is -computed (step 3b). The weights are increased for training samples that have been misclassified -(step 3c). All weights are then normalized, and the process of finding the next weak classifier -continues for another \f$M\f$ -1 times. The final classifier \f$F(x)\f$ is the sign of the weighted sum over -the individual weak classifiers (step 4). - -**Two-class Discrete AdaBoost Algorithm** - -- Set \f$N\f$ examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}, y_i \in{-1, +1}\f$ . - -- Assign weights as \f$w_i = 1/N, i = 1,...,N\f$ . - -- Repeat for \f$m = 1,2,...,M\f$ : - - 3.1. Fit the classifier \f$f_m(x) \in{-1,1}\f$, using weights \f$w_i\f$ on the training data. - - 3.2. Compute \f$err_m = E_w [1_{(y \neq f_m(x))}], c_m = log((1 - err_m)/err_m)\f$ . - - 3.3. Set \f$w_i \Leftarrow w_i exp[c_m 1_{(y_i \neq f_m(x_i))}], i = 1,2,...,N,\f$ and renormalize - so that \f$\Sigma i w_i = 1\f$ . - -1. Classify new samples *x* using the formula: \f$\textrm{sign} (\Sigma m = 1M c_m f_m(x))\f$ . - -@note Similar to the classical boosting methods, the current implementation supports two-class -classifiers only. For M \> 2 classes, there is the **AdaBoost.MH** algorithm (described in -@cite FHT98) that reduces the problem to the two-class problem, yet with a much larger training set. -To reduce computation time for boosted models without substantially losing accuracy, the influence -trimming technique can be employed. As the training algorithm proceeds and the number of trees in -the ensemble is increased, a larger number of the training samples are classified correctly and with -increasing confidence, thereby those samples receive smaller weights on the subsequent iterations. -Examples with a very low relative weight have a small impact on the weak classifier training. Thus, -such examples may be excluded during the weak classifier training without having much effect on the -induced classifier. This process is controlled with the weight_trim_rate parameter. Only examples -with the summary fraction weight_trim_rate of the total weight mass are used in the weak -classifier training. Note that the weights for **all** training examples are recomputed at each -training iteration. Examples deleted at a particular iteration may be used again for learning some -of the weak classifiers further @cite FHT98 . - -Prediction with Boost ---------------------- -StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get -the raw sum from Boost classifier. - -@defgroup ml_randomtrees Random Trees - -Random trees have been introduced by Leo Breiman and Adele Cutler: - . The algorithm can deal with both -classification and regression problems. Random trees is a collection (ensemble) of tree predictors -that is called *forest* further in this section (the term has been also introduced by L. Breiman). -The classification works as follows: the random trees classifier takes the input feature vector, -classifies it with every tree in the forest, and outputs the class label that received the majority -of "votes". In case of a regression, the classifier response is the average of the responses over -all the trees in the forest. - -All the trees are trained with the same parameters but on different training sets. These sets are -generated from the original training set using the bootstrap procedure: for each training set, you -randomly select the same number of vectors as in the original set ( =N ). The vectors are chosen -with replacement. That is, some vectors will occur more than once and some will be absent. At each -node of each trained tree, not all the variables are used to find the best split, but a random -subset of them. With each node a new subset is generated. However, its size is fixed for all the -nodes and all the trees. It is a training parameter set to \f$\sqrt{number_of_variables}\f$ by -default. None of the built trees are pruned. - -In random trees there is no need for any accuracy estimation procedures, such as cross-validation or -bootstrap, or a separate test set to get an estimate of the training error. The error is estimated -internally during the training. When the training set for the current tree is drawn by sampling with -replacement, some vectors are left out (so-called *oob (out-of-bag) data* ). The size of oob data is -about N/3 . The classification error is estimated by using this oob-data as follows: - -- Get a prediction for each vector, which is oob relative to the i-th tree, using the very i-th - tree. - -- After all the trees have been trained, for each vector that has ever been oob, find the - class-*winner* for it (the class that has got the majority of votes in the trees where the - vector was oob) and compare it to the ground-truth response. - -- Compute the classification error estimate as a ratio of the number of misclassified oob vectors - to all the vectors in the original data. In case of regression, the oob-error is computed as the - squared error for oob vectors difference divided by the total number of vectors. - -For the random trees usage example, please, see letter_recog.cpp sample in OpenCV distribution. - -**References:** - -- *Machine Learning*, Wald I, July 2002. - -- *Looking Inside the Black Box*, Wald II, July 2002. - -- *Software for the Masses*, Wald III, July 2002. - -- And other articles from the web site - - -@defgroup ml_em Expectation Maximization - -The Expectation Maximization(EM) algorithm estimates the parameters of the multivariate probability -density function in the form of a Gaussian mixture distribution with a specified number of mixtures. - -Consider the set of the N feature vectors { \f$x_1, x_2,...,x_{N}\f$ } from a d-dimensional Euclidean -space drawn from a Gaussian mixture: - -\f[p(x;a_k,S_k, \pi _k) = \sum _{k=1}^{m} \pi _kp_k(x), \quad \pi _k \geq 0, \quad \sum _{k=1}^{m} \pi _k=1,\f] - -\f[p_k(x)= \varphi (x;a_k,S_k)= \frac{1}{(2\pi)^{d/2}\mid{S_k}\mid^{1/2}} exp \left \{ - \frac{1}{2} (x-a_k)^TS_k^{-1}(x-a_k) \right \} ,\f] - -where \f$m\f$ is the number of mixtures, \f$p_k\f$ is the normal distribution density with the mean \f$a_k\f$ -and covariance matrix \f$S_k\f$, \f$\pi_k\f$ is the weight of the k-th mixture. Given the number of mixtures -\f$M\f$ and the samples \f$x_i\f$, \f$i=1..N\f$ the algorithm finds the maximum-likelihood estimates (MLE) of -all the mixture parameters, that is, \f$a_k\f$, \f$S_k\f$ and \f$\pi_k\f$ : - -\f[L(x, \theta )=logp(x, \theta )= \sum _{i=1}^{N}log \left ( \sum _{k=1}^{m} \pi _kp_k(x) \right ) \to \max _{ \theta \in \Theta },\f] - -\f[\Theta = \left \{ (a_k,S_k, \pi _k): a_k \in \mathbbm{R} ^d,S_k=S_k^T>0,S_k \in \mathbbm{R} ^{d \times d}, \pi _k \geq 0, \sum _{k=1}^{m} \pi _k=1 \right \} .\f] - -The EM algorithm is an iterative procedure. Each iteration includes two steps. At the first step -(Expectation step or E-step), you find a probability \f$p_{i,k}\f$ (denoted \f$\alpha_{i,k}\f$ in the -formula below) of sample i to belong to mixture k using the currently available mixture parameter -estimates: - -\f[\alpha _{ki} = \frac{\pi_k\varphi(x;a_k,S_k)}{\sum\limits_{j=1}^{m}\pi_j\varphi(x;a_j,S_j)} .\f] - -At the second step (Maximization step or M-step), the mixture parameter estimates are refined using -the computed probabilities: - -\f[\pi _k= \frac{1}{N} \sum _{i=1}^{N} \alpha _{ki}, \quad a_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}x_i}{\sum\limits_{i=1}^{N}\alpha_{ki}} , \quad S_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}(x_i-a_k)(x_i-a_k)^T}{\sum\limits_{i=1}^{N}\alpha_{ki}}\f] - -Alternatively, the algorithm may start with the M-step when the initial values for \f$p_{i,k}\f$ can be -provided. Another alternative when \f$p_{i,k}\f$ are unknown is to use a simpler clustering algorithm to -pre-cluster the input samples and thus obtain initial \f$p_{i,k}\f$ . Often (including machine learning) -the k-means algorithm is used for that purpose. - -One of the main problems of the EM algorithm is a large number of parameters to estimate. The -majority of the parameters reside in covariance matrices, which are \f$d \times d\f$ elements each where -\f$d\f$ is the feature space dimensionality. However, in many practical problems, the covariance -matrices are close to diagonal or even to \f$\mu_k*I\f$ , where \f$I\f$ is an identity matrix and \f$\mu_k\f$ is -a mixture-dependent "scale" parameter. So, a robust computation scheme could start with harder -constraints on the covariance matrices and then use the estimated parameters as an input for a less -constrained optimization problem (often a diagonal covariance matrix is already a good enough -approximation). - -References: -- Bilmes98 J. A. Bilmes. *A Gentle Tutorial of the EM Algorithm and its Application to Parameter - Estimation for Gaussian Mixture and Hidden Markov Models*. Technical Report TR-97-021, - International Computer Science Institute and Computer Science Division, University of California - at Berkeley, April 1998. - -@defgroup ml_neural Neural Networks - -ML implements feed-forward artificial neural networks or, more particularly, multi-layer perceptrons -(MLP), the most commonly used type of neural networks. MLP consists of the input layer, output -layer, and one or more hidden layers. Each layer of MLP includes one or more neurons directionally -linked with the neurons from the previous and the next layer. The example below represents a 3-layer -perceptron with three inputs, two outputs, and the hidden layer including five neurons: - -![image](pics/mlp.png) - -All the neurons in MLP are similar. Each of them has several input links (it takes the output values -from several neurons in the previous layer as input) and several output links (it passes the -response to several neurons in the next layer). The values retrieved from the previous layer are -summed up with certain weights, individual for each neuron, plus the bias term. The sum is -transformed using the activation function \f$f\f$ that may be also different for different neurons. - -![image](pics/neuron_model.png) - -In other words, given the outputs \f$x_j\f$ of the layer \f$n\f$ , the outputs \f$y_i\f$ of the layer \f$n+1\f$ are -computed as: - -\f[u_i = \sum _j (w^{n+1}_{i,j}*x_j) + w^{n+1}_{i,bias}\f] - -\f[y_i = f(u_i)\f] - -Different activation functions may be used. ML implements three standard functions: - -- Identity function ( ANN_MLP::IDENTITY ): \f$f(x)=x\f$ - -- Symmetrical sigmoid ( ANN_MLP::SIGMOID_SYM ): \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x}\f$ - ), which is the default choice for MLP. The standard sigmoid with \f$\beta =1, \alpha =1\f$ is shown - below: - - ![image](pics/sigmoid_bipolar.png) - -- Gaussian function ( ANN_MLP::GAUSSIAN ): \f$f(x)=\beta e^{-\alpha x*x}\f$ , which is not completely - supported at the moment. - -In ML, all the neurons have the same activation functions, with the same free parameters ( -\f$\alpha, \beta\f$ ) that are specified by user and are not altered by the training algorithms. - -So, the whole trained network works as follows: - -1. Take the feature vector as input. The vector size is equal to the size of the input layer. -2. Pass values as input to the first hidden layer. -3. Compute outputs of the hidden layer using the weights and the activation functions. -4. Pass outputs further downstream until you compute the output layer. - -So, to compute the network, you need to know all the weights \f$w^{n+1)}_{i,j}\f$ . The weights are -computed by the training algorithm. The algorithm takes a training set, multiple input vectors with -the corresponding output vectors, and iteratively adjusts the weights to enable the network to give -the desired response to the provided input vectors. - -The larger the network size (the number of hidden layers and their sizes) is, the more the potential -network flexibility is. The error on the training set could be made arbitrarily small. But at the -same time the learned network also "learns" the noise present in the training set, so the error on -the test set usually starts increasing after the network size reaches a limit. Besides, the larger -networks are trained much longer than the smaller ones, so it is reasonable to pre-process the data, -using PCA::operator() or similar technique, and train a smaller network on only essential features. - -Another MLP feature is an inability to handle categorical data as is. However, there is a -workaround. If a certain feature in the input or output (in case of n -class classifier for \f$n>2\f$ ) -layer is categorical and can take \f$M>2\f$ different values, it makes sense to represent it as a binary -tuple of M elements, where the i -th element is 1 if and only if the feature is equal to the i -th -value out of M possible. It increases the size of the input/output layer but speeds up the training -algorithm convergence and at the same time enables "fuzzy" values of such variables, that is, a -tuple of probabilities instead of a fixed value. - -ML implements two algorithms for training MLP's. The first algorithm is a classical random -sequential back-propagation algorithm. The second (default) one is a batch RPROP algorithm. - -@defgroup ml_lr Logistic Regression - -ML implements logistic regression, which is a probabilistic classification technique. Logistic -Regression is a binary classification algorithm which is closely related to Support Vector Machines -(SVM). Like SVM, Logistic Regression can be extended to work on multi-class classification problems -like digit recognition (i.e. recognizing digitis like 0,1 2, 3,... from the given images). This -version of Logistic Regression supports both binary and multi-class classifications (for multi-class -it creates a multiple 2-class classifiers). In order to train the logistic regression classifier, -Batch Gradient Descent and Mini-Batch Gradient Descent algorithms are used (see ). -Logistic Regression is a discriminative classifier (see for more details). -Logistic Regression is implemented as a C++ class in LogisticRegression. - -In Logistic Regression, we try to optimize the training paramater \f$\theta\f$ such that the hypothesis -\f$0 \leq h_\theta(x) \leq 1\f$ is acheived. We have \f$h_\theta(x) = g(h_\theta(x))\f$ and -\f$g(z) = \frac{1}{1+e^{-z}}\f$ as the logistic or sigmoid function. The term "Logistic" in Logistic -Regression refers to this function. For given data of a binary classification problem of classes 0 -and 1, one can determine that the given data instance belongs to class 1 if \f$h_\theta(x) \geq 0.5\f$ -or class 0 if \f$h_\theta(x) < 0.5\f$ . - -In Logistic Regression, choosing the right parameters is of utmost importance for reducing the -training error and ensuring high training accuracy. LogisticRegression::Params is the structure that -defines parameters that are required to train a Logistic Regression classifier. The learning rate is -determined by LogisticRegression::Params.alpha. It determines how faster we approach the solution. -It is a positive real number. Optimization algorithms like Batch Gradient Descent and Mini-Batch -Gradient Descent are supported in LogisticRegression. It is important that we mention the number of -iterations these optimization algorithms have to run. The number of iterations are mentioned by -LogisticRegression::Params.num_iters. The number of iterations can be thought as number of steps -taken and learning rate specifies if it is a long step or a short step. These two parameters define -how fast we arrive at a possible solution. In order to compensate for overfitting regularization is -performed, which can be enabled by setting LogisticRegression::Params.regularized to a positive -integer (greater than zero). One can specify what kind of regularization has to be performed by -setting LogisticRegression::Params.norm to LogisticRegression::REG_L1 or -LogisticRegression::REG_L2 values. LogisticRegression provides a choice of 2 training methods with -Batch Gradient Descent or the Mini-Batch Gradient Descent. To specify this, set -LogisticRegression::Params.train_method to either LogisticRegression::BATCH or -LogisticRegression::MINI_BATCH. If LogisticRegression::Params is set to -LogisticRegression::MINI_BATCH, the size of the mini batch has to be to a postive integer using -LogisticRegression::Params.mini_batch_size. - -A sample set of training parameters for the Logistic Regression classifier can be initialized as -follows: -@code - LogisticRegression::Params params; - params.alpha = 0.5; - params.num_iters = 10000; - params.norm = LogisticRegression::REG_L2; - params.regularized = 1; - params.train_method = LogisticRegression::MINI_BATCH; - params.mini_batch_size = 10; -@endcode - -@defgroup ml_data Training Data - -In machine learning algorithms there is notion of training data. Training data includes several -components: - -- A set of training samples. Each training sample is a vector of values (in Computer Vision it's - sometimes referred to as feature vector). Usually all the vectors have the same number of - components (features); OpenCV ml module assumes that. Each feature can be ordered (i.e. its - values are floating-point numbers that can be compared with each other and strictly ordered, - i.e. sorted) or categorical (i.e. its value belongs to a fixed set of values that can be - integers, strings etc.). -- Optional set of responses corresponding to the samples. Training data with no responses is used - in unsupervised learning algorithms that learn structure of the supplied data based on distances - between different samples. Training data with responses is used in supervised learning - algorithms, which learn the function mapping samples to responses. Usually the responses are - scalar values, ordered (when we deal with regression problem) or categorical (when we deal with - classification problem; in this case the responses are often called "labels"). Some algorithms, - most noticeably Neural networks, can handle not only scalar, but also multi-dimensional or - vector responses. -- Another optional component is the mask of missing measurements. Most algorithms require all the - components in all the training samples be valid, but some other algorithms, such as decision - tress, can handle the cases of missing measurements. -- In the case of classification problem user may want to give different weights to different - classes. This is useful, for example, when - - user wants to shift prediction accuracy towards lower false-alarm rate or higher hit-rate. - - user wants to compensate for significantly different amounts of training samples from - different classes. -- In addition to that, each training sample may be given a weight, if user wants the algorithm to - pay special attention to certain training samples and adjust the training model accordingly. -- Also, user may wish not to use the whole training data at once, but rather use parts of it, e.g. - to do parameter optimization via cross-validation procedure. - -As you can see, training data can have rather complex structure; besides, it may be very big and/or -not entirely available, so there is need to make abstraction for this concept. In OpenCV ml there is -cv::ml::TrainData class for that. - - @} + See detailed overview here: @ref ml_intro. */ namespace cv @@ -516,83 +77,62 @@ namespace ml //! @addtogroup ml //! @{ -/* Variable type */ -enum +/** @brief Variable types */ +enum VariableTypes { - VAR_NUMERICAL =0, - VAR_ORDERED =0, - VAR_CATEGORICAL =1 + VAR_NUMERICAL =0, //!< same as VAR_ORDERED + VAR_ORDERED =0, //!< ordered variables + VAR_CATEGORICAL =1 //!< categorical variables }; -enum +/** @brief %Error types */ +enum ErrorTypes { TEST_ERROR = 0, TRAIN_ERROR = 1 }; -enum +/** @brief Sample types */ +enum SampleTypes { - ROW_SAMPLE = 0, - COL_SAMPLE = 1 + ROW_SAMPLE = 0, //!< each training sample is a row of samples + COL_SAMPLE = 1 //!< each training sample occupies a column of samples }; -//! @addtogroup ml_svm -//! @{ - /** @brief The structure represents the logarithmic grid range of statmodel parameters. It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate being computed by cross-validation. -- member double ParamGrid::minVal -Minimum value of the statmodel parameter. -- member double ParamGrid::maxVal -Maximum value of the statmodel parameter. -- member double ParamGrid::logStep -Logarithmic step for iterating the statmodel parameter. -The grid determines the following iteration sequence of the statmodel parameter values: - -\f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f] - -where \f$n\f$ is the maximal index satisfying - -\f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f] - -The grid is logarithmic, so logStep must always be greater then 1. */ class CV_EXPORTS_W_MAP ParamGrid { public: - /** @brief The constructors. - - The full constructor initializes corresponding members. The default constructor creates a dummy - grid: - @code - ParamGrid::ParamGrid() - { - minVal = maxVal = 0; - logStep = 1; - } - @endcode - */ + /** @brief Default constructor */ ParamGrid(); + /** @brief Constructor with parameters */ ParamGrid(double _minVal, double _maxVal, double _logStep); - CV_PROP_RW double minVal; - CV_PROP_RW double maxVal; + CV_PROP_RW double minVal; //!< Minimum value of the statmodel parameter. Default value is 0. + CV_PROP_RW double maxVal; //!< Maximum value of the statmodel parameter. Default value is 0. + /** @brief Logarithmic step for iterating the statmodel parameter. + + The grid determines the following iteration sequence of the statmodel parameter values: + \f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f] + where \f$n\f$ is the maximal index satisfying + \f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f] + The grid is logarithmic, so logStep must always be greater then 1. Default value is 1. + */ CV_PROP_RW double logStep; }; -//! @} ml_svm - -//! @addtogroup ml_data -//! @{ - /** @brief Class encapsulating training data. Please note that the class only specifies the interface of training data, but not implementation. -All the statistical model classes in ml take Ptr\. In other words, you can create your -own class derived from TrainData and supply smart pointer to the instance of this class into -StatModel::train. +All the statistical model classes in _ml_ module accepts Ptr\ as parameter. In other +words, you can create your own class derived from TrainData and pass smart pointer to the instance +of this class into StatModel::train. + +@sa @ref ml_intro_data */ class CV_EXPORTS TrainData { @@ -614,14 +154,14 @@ public: /** @brief Returns matrix of train samples @param layout The requested layout. If it's different from the initial one, the matrix is - transposed. + transposed. See ml::SampleTypes. @param compressSamples if true, the function returns only the training samples (specified by - sampleIdx) + sampleIdx) @param compressVars if true, the function returns the shorter training samples, containing only - the active variables. + the active variables. - In current implementation the function tries to avoid physical data copying and returns the matrix - stored inside TrainData (unless the transposition or compression is needed). + In current implementation the function tries to avoid physical data copying and returns the + matrix stored inside TrainData (unless the transposition or compression is needed). */ virtual Mat getTrainSamples(int layout=ROW_SAMPLE, bool compressSamples=true, @@ -629,15 +169,15 @@ public: /** @brief Returns the vector of responses - The function returns ordered or the original categorical responses. Usually it's used in regression - algorithms. + The function returns ordered or the original categorical responses. Usually it's used in + regression algorithms. */ virtual Mat getTrainResponses() const = 0; /** @brief Returns the vector of normalized categorical responses - The function returns vector of responses. Each response is integer from 0 to \-1. The actual label value can be retrieved then from the class label vector, see + The function returns vector of responses. Each response is integer from `0` to `-1`. The actual label value can be retrieved then from the class label vector, see TrainData::getClassLabels. */ virtual Mat getTrainNormCatResponses() const = 0; @@ -668,14 +208,18 @@ public: virtual Mat getCatOfs() const = 0; virtual Mat getCatMap() const = 0; + /** @brief Splits the training data into the training and test parts + @sa TrainData::setTrainTestSplitRatio + */ virtual void setTrainTestSplit(int count, bool shuffle=true) = 0; /** @brief Splits the training data into the training and test parts - The function selects a subset of specified relative size and then returns it as the training set. If - the function is not called, all the data is used for training. Please, note that for each of - TrainData::getTrain\* there is corresponding TrainData::getTest\*, so that the test subset can be - retrieved and processed as well. + The function selects a subset of specified relative size and then returns it as the training + set. If the function is not called, all the data is used for training. Please, note that for + each of TrainData::getTrain\* there is corresponding TrainData::getTest\*, so that the test + subset can be retrieved and processed as well. + @sa TrainData::setTrainTestSplit */ virtual void setTrainTestSplitRatio(double ratio, bool shuffle=true) = 0; virtual void shuffleTrainTest() = 0; @@ -686,23 +230,28 @@ public: @param filename The input file name @param headerLineCount The number of lines in the beginning to skip; besides the header, the - function also skips empty lines and lines staring with '\#' - @param responseStartIdx Index of the first output variable. If -1, the function considers the last - variable as the response - @param responseEndIdx Index of the last output variable + 1. If -1, then there is single response - variable at responseStartIdx. - @param varTypeSpec The optional text string that specifies the variables' types. It has the format ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]. That is, variables from n1 to n2 (inclusive range), n3, n4 to n5 ... are considered ordered and n6, n7 to n8 ... are considered as categorical. The range [n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8] should cover all the variables. If varTypeSpec is not specified, then algorithm uses the following rules: - # all input variables are considered ordered by default. If some column contains has - non-numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding - variable is considered categorical. - # if there are several output variables, they are all considered as ordered. Error is - reported when non-numerical values are used. - # if there is a single output variable, then if its values are non-numerical or are all - integers, then it's considered categorical. Otherwise, it's considered ordered. + function also skips empty lines and lines staring with `#` + @param responseStartIdx Index of the first output variable. If -1, the function considers the + last variable as the response + @param responseEndIdx Index of the last output variable + 1. If -1, then there is single + response variable at responseStartIdx. + @param varTypeSpec The optional text string that specifies the variables' types. It has the + format `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables from `n1 to n2` + (inclusive range), `n3`, `n4 to n5` ... are considered ordered and `n6`, `n7 to n8` ... are + considered as categorical. The range `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` + should cover all the variables. If varTypeSpec is not specified, then algorithm uses the + following rules: + - all input variables are considered ordered by default. If some column contains has non- + numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding + variable is considered categorical. + - if there are several output variables, they are all considered as ordered. Error is + reported when non-numerical values are used. + - if there is a single output variable, then if its values are non-numerical or are all + integers, then it's considered categorical. Otherwise, it's considered ordered. @param delimiter The character used to separate values in each line. @param missch The character used to specify missing measurements. It should not be a digit. - Although it's a non-numerical value, it surely does not affect the decision of whether the - variable ordered or categorical. + Although it's a non-numerical value, it surely does not affect the decision of whether the + variable ordered or categorical. */ static Ptr loadFromCSV(const String& filename, int headerLineCount, @@ -711,76 +260,66 @@ public: const String& varTypeSpec=String(), char delimiter=',', char missch='?'); + /** @brief Creates training data from in-memory arrays. @param samples matrix of samples. It should have CV_32F type. - @param layout it's either ROW_SAMPLE, which means that each training sample is a row of samples, - or COL_SAMPLE, which means that each training sample occupies a column of samples. + @param layout see ml::SampleTypes. @param responses matrix of responses. If the responses are scalar, they should be stored as a - single row or as a single column. The matrix should have type CV_32F or CV_32S (in the former - case the responses are considered as ordered by default; in the latter case - as categorical) + single row or as a single column. The matrix should have type CV_32F or CV_32S (in the + former case the responses are considered as ordered by default; in the latter case - as + categorical) @param varIdx vector specifying which variables to use for training. It can be an integer vector - (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of active - variables. - @param sampleIdx vector specifying which samples to use for training. It can be an integer vector - (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask of training - samples. + (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of + active variables. + @param sampleIdx vector specifying which samples to use for training. It can be an integer + vector (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask + of training samples. @param sampleWeights optional vector with weights for each sample. It should have CV_32F type. - @param varType optional vector of type CV_8U and size \ + - \, containing types of each input and output variable. The - ordered variables are denoted by value VAR_ORDERED, and categorical - by VAR_CATEGORICAL. + @param varType optional vector of type CV_8U and size ` + + `, containing types of each input and output variable. See + ml::VariableTypes. */ static Ptr create(InputArray samples, int layout, InputArray responses, InputArray varIdx=noArray(), InputArray sampleIdx=noArray(), InputArray sampleWeights=noArray(), InputArray varType=noArray()); }; -//! @} ml_data - -//! @addtogroup ml_stat -//! @{ - /** @brief Base class for statistical models in OpenCV ML. */ class CV_EXPORTS_W StatModel : public Algorithm { public: - enum { UPDATE_MODEL = 1, RAW_OUTPUT=1, COMPRESSED_INPUT=2, PREPROCESSED_INPUT=4 }; + /** Predict options */ + enum Flags { + UPDATE_MODEL = 1, + RAW_OUTPUT=1, //!< makes the method return the raw results (the sum), not the class label + COMPRESSED_INPUT=2, + PREPROCESSED_INPUT=4 + }; virtual void clear(); - /** @brief Returns the number of variables in training samples - - The method must be overwritten in the derived classes. - */ + /** @brief Returns the number of variables in training samples */ virtual int getVarCount() const = 0; - /** @brief Returns true if the model is trained - - The method must be overwritten in the derived classes. - */ + /** @brief Returns true if the model is trained */ virtual bool isTrained() const = 0; - /** @brief Returns true if the model is classifier - - The method must be overwritten in the derived classes. - */ + /** @brief Returns true if the model is classifier */ virtual bool isClassifier() const = 0; /** @brief Trains the statistical model @param trainData training data that can be loaded from file using TrainData::loadFromCSV or - created with TrainData::create. + created with TrainData::create. @param flags optional flags, depending on the model. Some of the models can be updated with the - new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). - - There are 2 instance methods and 2 static (class) template methods. The first two train the already - created model (the very first method must be overwritten in the derived classes). And the latter two - variants are convenience methods that construct empty model and then call its train method. + new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). */ virtual bool train( const Ptr& trainData, int flags=0 ); - /** @overload + + /** @brief Trains the statistical model + @param samples training samples - @param layout ROW_SAMPLE (training samples are the matrix rows) or COL_SAMPLE (training samples - are the matrix columns) + @param layout See ml::SampleTypes. @param responses vector of responses associated with the training samples. */ virtual bool train( InputArray samples, int layout, InputArray responses ); @@ -789,14 +328,14 @@ public: @param data the training data @param test if true, the error is computed over the test subset of the data, otherwise it's - computed over the training subset of the data. Please note that if you loaded a completely - different dataset to evaluate already trained classifier, you will probably want not to set the - test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so that the - error is computed for the whole new set. Yes, this sounds a bit confusing. + computed over the training subset of the data. Please note that if you loaded a completely + different dataset to evaluate already trained classifier, you will probably want not to set + the test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so + that the error is computed for the whole new set. Yes, this sounds a bit confusing. @param resp the optional output responses. - The method uses StatModel::predict to compute the error. For regression models the error is computed - as RMS, for classifiers - as a percent of missclassified samples (0%-100%). + The method uses StatModel::predict to compute the error. For regression models the error is + computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%). */ virtual float calcError( const Ptr& data, bool test, OutputArray resp ) const; @@ -804,20 +343,18 @@ public: @param samples The input samples, floating-point matrix @param results The optional output matrix of results. - @param flags The optional flags, model-dependent. Some models, such as Boost, SVM recognize - StatModel::RAW_OUTPUT flag, which makes the method return the raw results (the sum), not the - class label. + @param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags. */ virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0; /** @brief Loads model from the file - This is static template method of StatModel. It's usage is following (in the case of SVM): : - - Ptr svm = StatModel::load("my_svm_model.xml"); - - In order to make this method work, the derived class must overwrite - Algorithm::read(const FileNode& fn). + This is static template method of StatModel. It's usage is following (in the case of SVM): + @code + Ptr svm = StatModel::load("my_svm_model.xml"); + @endcode + In order to make this method work, the derived class must overwrite Algorithm::read(const + FileNode& fn). */ template static Ptr<_Tp> load(const String& filename) { @@ -828,10 +365,13 @@ public: } /** @brief Loads model from a String + @param strModel The string variable containing the model you want to load. This is static template method of StatModel. It's usage is following (in the case of SVM): - Ptr svm = StatModel::loadFromString(myStringModel); + @code + Ptr svm = StatModel::loadFromString(myStringModel); + @endcode */ template static Ptr<_Tp> loadFromString(const String& strModel) { @@ -841,12 +381,30 @@ public: return model->isTrained() ? model : Ptr<_Tp>(); } + + /** @brief Creates new statistical model and trains it + + @param data training data that can be loaded from file using TrainData::loadFromCSV or + created with TrainData::create. + @param p model parameters + @param flags optional flags, depending on the model. Some of the models can be updated with the + new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). + */ template static Ptr<_Tp> train(const Ptr& data, const typename _Tp::Params& p, int flags=0) { Ptr<_Tp> model = _Tp::create(p); return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>(); } + /** @brief Creates new statistical model and trains it + + @param samples training samples + @param layout See ml::SampleTypes. + @param responses vector of responses associated with the training samples. + @param p model parameters + @param flags optional flags, depending on the model. Some of the models can be updated with the + new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). + */ template static Ptr<_Tp> train(InputArray samples, int layout, InputArray responses, const typename _Tp::Params& p, int flags=0) { @@ -863,16 +421,13 @@ public: virtual String getDefaultModelName() const = 0; }; -//! @} ml_stat - /****************************************************************************************\ * Normal Bayes Classifier * \****************************************************************************************/ -//! @addtogroup ml_bayes -//! @{ - /** @brief Bayes classifier for normally distributed data. + +@sa @ref ml_intro_bayes */ class CV_EXPORTS_W NormalBayesClassifier : public StatModel { @@ -884,10 +439,11 @@ public: }; /** @brief Predicts the response for sample(s). - The method estimates the most probable classes for input vectors. Input vectors (one or more) are - stored as rows of the matrix inputs. In case of multiple input vectors, there should be one output - vector outputs. The predicted class for a single input vector is returned by the method. The vector - outputProbs contains the output probabilities corresponding to each element of result. + The method estimates the most probable classes for input vectors. Input vectors (one or more) + are stored as rows of the matrix inputs. In case of multiple input vectors, there should be one + output vector outputs. The predicted class for a single input vector is returned by the method. + The vector outputProbs contains the output probabilities corresponding to each element of + result. */ virtual float predictProb( InputArray inputs, OutputArray outputs, OutputArray outputProbs, int flags=0 ) const = 0; @@ -897,33 +453,24 @@ public: /** @brief Creates empty model @param params The model parameters. There is none so far, the structure is used as a placeholder - for possible extensions. + for possible extensions. - Use StatModel::train to train the model, - StatModel::train\(traindata, params) to create and train the model, - StatModel::load\(filename) to load the pre-trained model. + Use StatModel::train to train the model: + @code + StatModel::train(traindata, params); // to create and train the model + StatModel::load(filename); // load the pre-trained model + @endcode */ static Ptr create(const Params& params=Params()); }; -//! @} ml_bayes - /****************************************************************************************\ * K-Nearest Neighbour Classifier * \****************************************************************************************/ -//! @addtogroup ml_knearest -//! @{ +/** @brief The class implements K-Nearest Neighbors model -/** @brief The class implements K-Nearest Neighbors model as described in the beginning of this section. - -@note - - (Python) An example of digit recognition using KNearest can be found at - opencv_source/samples/python2/digits.py - - (Python) An example of grid search digit recognition using KNearest can be found at - opencv_source/samples/python2/digits_adjust.py - - (Python) An example of video digit recognition using KNearest can be found at - opencv_source/samples/python2/digits_video.py +@sa @ref ml_intro_knn */ class CV_EXPORTS_W KNearest : public StatModel { @@ -931,12 +478,13 @@ public: class CV_EXPORTS_W_MAP Params { public: + /** @brief Constructor with parameters */ Params(int defaultK=10, bool isclassifier_=true, int Emax_=INT_MAX, int algorithmType_=BRUTE_FORCE); - CV_PROP_RW int defaultK; - CV_PROP_RW bool isclassifier; - CV_PROP_RW int Emax; // for implementation with KDTree - CV_PROP_RW int algorithmType; + CV_PROP_RW int defaultK; //!< default number of neighbors to use in predict method + CV_PROP_RW bool isclassifier; //!< whether classification or regression model should be trained + CV_PROP_RW int Emax; //!< for implementation with KDTree + CV_PROP_RW int algorithmType; //!< See KNearest::Types }; virtual void setParams(const Params& p) = 0; virtual Params getParams() const = 0; @@ -944,17 +492,17 @@ public: /** @brief Finds the neighbors and predicts responses for input vectors. @param samples Input samples stored by rows. It is a single-precision floating-point matrix of - \ \* k size. + ` * k` size. @param k Number of used nearest neighbors. Should be greater than 1. @param results Vector with results of prediction (regression or classification) for each input - sample. It is a single-precision floating-point vector with \ elements. - @param neighborResponses Optional output values for corresponding neighbors. It is a - single-precision floating-point matrix of \ \* k size. - @param dist Optional output distances from the input vectors to the corresponding neighbors. It is - a single-precision floating-point matrix of \ \* k size. + sample. It is a single-precision floating-point vector with `` elements. + @param neighborResponses Optional output values for corresponding neighbors. It is a single- + precision floating-point matrix of ` * k` size. + @param dist Optional output distances from the input vectors to the corresponding neighbors. It + is a single-precision floating-point matrix of ` * k` size. - For each input vector (a row of the matrix samples), the method finds the k nearest neighbors. In - case of regression, the predicted result is a mean value of the particular vector's neighbor + For each input vector (a row of the matrix samples), the method finds the k nearest neighbors. + In case of regression, the predicted result is a mean value of the particular vector's neighbor responses. In case of classification, the class is determined by voting. For each input vector, the neighbors are sorted by their distances to the vector. @@ -962,8 +510,8 @@ public: In case of C++ interface you can use output pointers to empty matrices and the function will allocate memory itself. - If only a single input vector is passed, all output matrices are optional and the predicted value is - returned by the method. + If only a single input vector is passed, all output matrices are optional and the predicted + value is returned by the method. The function is parallelized with the TBB library. */ @@ -972,122 +520,77 @@ public: OutputArray neighborResponses=noArray(), OutputArray dist=noArray() ) const = 0; - enum { BRUTE_FORCE=1, KDTREE=2 }; + enum Types { BRUTE_FORCE=1, KDTREE=2 }; /** @brief Creates the empty model - @param params The model parameters: default number of neighbors to use in predict method (in - KNearest::findNearest this number must be passed explicitly) and the flag on whether - classification or regression model should be trained. + @param params The model parameters - The static method creates empty KNearest classifier. It should be then trained using train method - (see StatModel::train). Alternatively, you can load boost model from file using - StatModel::load\(filename). + The static method creates empty %KNearest classifier. It should be then trained using train + method (see StatModel::train). Alternatively, you can load boost model from file using: + `StatModel::load(filename)` */ static Ptr create(const Params& params=Params()); }; -//! @} ml_knearest - /****************************************************************************************\ * Support Vector Machines * \****************************************************************************************/ -//! @addtogroup ml_svm -//! @{ - /** @brief Support Vector Machines. -@note - - (Python) An example of digit recognition using SVM can be found at - opencv_source/samples/python2/digits.py - - (Python) An example of grid search digit recognition using SVM can be found at - opencv_source/samples/python2/digits_adjust.py - - (Python) An example of video digit recognition using SVM can be found at - opencv_source/samples/python2/digits_video.py +@sa @ref ml_intro_svm */ class CV_EXPORTS_W SVM : public StatModel { public: - /** @brief SVM training parameters. + /** @brief %SVM training parameters. - The structure must be initialized and passed to the training method of SVM. + The structure must be initialized and passed to the training method of %SVM. */ class CV_EXPORTS_W_MAP Params { public: + /** @brief Default constructor */ Params(); - /** @brief The constructors - - @param svm_type Type of a SVM formulation. Possible values are: - - **SVM::C_SVC** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows - imperfect separation of classes with penalty multiplier C for outliers. - - **SVM::NU_SVC** \f$\nu\f$-Support Vector Classification. n-class classification with possible - imperfect separation. Parameter \f$\nu\f$ (in the range 0..1, the larger the value, the smoother - the decision boundary) is used instead of C. - - **SVM::ONE_CLASS** Distribution Estimation (One-class SVM). All the training data are from - the same class, SVM builds a boundary that separates the class from the rest of the feature - space. - - **SVM::EPS_SVR** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors - from the training set and the fitting hyper-plane must be less than p. For outliers the - penalty multiplier C is used. - - **SVM::NU_SVR** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p. - See @cite LibSVM for details. - @param kernel_type Type of a SVM kernel. Possible values are: - - **SVM::LINEAR** Linear kernel. No mapping is done, linear discrimination (or regression) is - done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. - - **SVM::POLY** Polynomial kernel: - \f$K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0\f$. - - **SVM::RBF** Radial basis function (RBF), a good choice in most cases. - \f$K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0\f$. - - **SVM::SIGMOID** Sigmoid kernel: \f$K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)\f$. - - **SVM::CHI2** Exponential Chi2 kernel, similar to the RBF kernel: - \f$K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0\f$. - - **SVM::INTER** Histogram intersection kernel. A fast kernel. \f$K(x_i, x_j) = min(x_i,x_j)\f$. - @param degree Parameter degree of a kernel function (POLY). - @param gamma Parameter \f$\gamma\f$ of a kernel function (POLY / RBF / SIGMOID / CHI2). - @param coef0 Parameter coef0 of a kernel function (POLY / SIGMOID). - @param Cvalue Parameter C of a SVM optimization problem (C_SVC / EPS_SVR / NU_SVR). - @param nu Parameter \f$\nu\f$ of a SVM optimization problem (NU_SVC / ONE_CLASS / NU_SVR). - @param p Parameter \f$\epsilon\f$ of a SVM optimization problem (EPS_SVR). - @param classWeights Optional weights in the C_SVC problem , assigned to particular classes. They - are multiplied by C so the parameter C of class \#i becomes classWeights(i) \* C. Thus these - weights affect the misclassification penalty for different classes. The larger weight, the larger - penalty on misclassification of data from the corresponding class. - @param termCrit Termination criteria of the iterative SVM training procedure which solves a - partial case of constrained quadratic optimization problem. You can specify tolerance and/or the - maximum number of iterations. - - The default constructor initialize the structure with following values: - @code - SVMParams::SVMParams() : - svmType(SVM::C_SVC), kernelType(SVM::RBF), degree(0), - gamma(1), coef0(0), C(1), nu(0), p(0), classWeights(0) - { - termCrit = TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, FLT_EPSILON ); - } - @endcode - A comparison of different kernels on the following 2D test case with four classes. Four C_SVC SVMs - have been trained (one against rest) with auto_train. Evaluation on three different kernels (CHI2, - INTER, RBF). The color depicts the class with max score. Bright means max-score \> 0, dark means - max-score \< 0. - - ![image](pics/SVM_Comparison.png) - */ + /** @brief Constructor with parameters */ Params( int svm_type, int kernel_type, double degree, double gamma, double coef0, double Cvalue, double nu, double p, const Mat& classWeights, TermCriteria termCrit ); + /** Type of a %SVM formulation. See SVM::Types. Default value is SVM::C_SVC. */ CV_PROP_RW int svmType; + /** Type of a %SVM kernel. See SVM::KernelTypes. Default value is SVM::RBF. */ CV_PROP_RW int kernelType; - CV_PROP_RW double gamma, coef0, degree; + /** Parameter \f$\gamma\f$ of a kernel function (SVM::POLY / SVM::RBF / SVM::SIGMOID / + SVM::CHI2). Default value is 1. */ + CV_PROP_RW double gamma; + /** Parameter coef0 of a kernel function (SVM::POLY / SVM::SIGMOID). Default value is 0. */ + CV_PROP_RW double coef0; + /** Parameter degree of a kernel function (SVM::POLY). Default value is 0. */ + CV_PROP_RW double degree; - CV_PROP_RW double C; // for CV_SVM_C_SVC, CV_SVM_EPS_SVR and CV_SVM_NU_SVR - CV_PROP_RW double nu; // for CV_SVM_NU_SVC, CV_SVM_ONE_CLASS, and CV_SVM_NU_SVR - CV_PROP_RW double p; // for CV_SVM_EPS_SVR - CV_PROP_RW Mat classWeights; // for CV_SVM_C_SVC - CV_PROP_RW TermCriteria termCrit; // termination criteria + /** Parameter C of a %SVM optimization problem (SVM::C_SVC / SVM::EPS_SVR / SVM::NU_SVR). + Default value is 0. */ + CV_PROP_RW double C; + /** Parameter \f$\nu\f$ of a %SVM optimization problem (SVM::NU_SVC / SVM::ONE_CLASS / + SVM::NU_SVR). Default value is 0. */ + CV_PROP_RW double nu; + /** Parameter \f$\epsilon\f$ of a %SVM optimization problem (SVM::EPS_SVR). Default value is 0. */ + CV_PROP_RW double p; + + /** Optional weights in the SVM::C_SVC problem , assigned to particular classes. They are + multiplied by C so the parameter C of class \#i becomes classWeights(i) \* C. Thus these + weights affect the misclassification penalty for different classes. The larger weight, the + larger penalty on misclassification of data from the corresponding class. Default value is + empty Mat.*/ + CV_PROP_RW Mat classWeights; + /** Termination criteria of the iterative %SVM training procedure which solves a partial + case of constrained quadratic optimization problem. You can specify tolerance and/or the + maximum number of iterations. Default value is TermCriteria( + TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON );*/ + CV_PROP_RW TermCriteria termCrit; }; class CV_EXPORTS Kernel : public Algorithm @@ -1097,49 +600,99 @@ public: virtual void calc( int vcount, int n, const float* vecs, const float* another, float* results ) = 0; }; - // SVM type - enum { C_SVC=100, NU_SVC=101, ONE_CLASS=102, EPS_SVR=103, NU_SVR=104 }; + //! %SVM type + enum Types { + /** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows + imperfect separation of classes with penalty multiplier C for outliers. */ + C_SVC=100, + /** \f$\nu\f$-Support Vector Classification. n-class classification with possible + imperfect separation. Parameter \f$\nu\f$ (in the range 0..1, the larger the value, the smoother + the decision boundary) is used instead of C. */ + NU_SVC=101, + /** Distribution Estimation (One-class %SVM). All the training data are from + the same class, %SVM builds a boundary that separates the class from the rest of the feature + space. */ + ONE_CLASS=102, + /** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors + from the training set and the fitting hyper-plane must be less than p. For outliers the + penalty multiplier C is used. */ + EPS_SVR=103, + /** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p. + See @cite LibSVM for details. */ + NU_SVR=104 + }; - // SVM kernel type - enum { CUSTOM=-1, LINEAR=0, POLY=1, RBF=2, SIGMOID=3, CHI2=4, INTER=5 }; + /** @brief %SVM kernel type - // SVM params type - enum { C=0, GAMMA=1, P=2, NU=3, COEF=4, DEGREE=5 }; + A comparison of different kernels on the following 2D test case with four classes. Four + SVM::C_SVC SVMs have been trained (one against rest) with auto_train. Evaluation on three + different kernels (SVM::CHI2, SVM::INTER, SVM::RBF). The color depicts the class with max score. + Bright means max-score \> 0, dark means max-score \< 0. + ![image](pics/SVM_Comparison.png) + */ + enum KernelTypes { + CUSTOM=-1, + /** Linear kernel. No mapping is done, linear discrimination (or regression) is + done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. */ + LINEAR=0, + /** Polynomial kernel: + \f$K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0\f$. */ + POLY=1, + /** Radial basis function (RBF), a good choice in most cases. + \f$K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0\f$. */ + RBF=2, + /** Sigmoid kernel: \f$K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)\f$. */ + SIGMOID=3, + /** Exponential Chi2 kernel, similar to the RBF kernel: + \f$K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0\f$. */ + CHI2=4, + /** Histogram intersection kernel. A fast kernel. \f$K(x_i, x_j) = min(x_i,x_j)\f$. */ + INTER=5 + }; - /** @brief Trains an SVM with optimal parameters. + //! %SVM params type + enum ParamTypes { + C=0, + GAMMA=1, + P=2, + NU=3, + COEF=4, + DEGREE=5 + }; + + /** @brief Trains an %SVM with optimal parameters. @param data the training data that can be constructed using TrainData::create or - TrainData::loadFromCSV. + TrainData::loadFromCSV. @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One - subset is used to test the model, the others form the train set. So, the SVM algorithm is executed - kFold times. - @param Cgrid - @param gammaGrid - @param pGrid - @param nuGrid - @param coeffGrid - @param degreeGrid Iteration grid for the corresponding SVM parameter. + subset is used to test the model, the others form the train set. So, the %SVM algorithm is + executed kFold times. + @param Cgrid grid for C + @param gammaGrid grid for gamma + @param pGrid grid for p + @param nuGrid grid for nu + @param coeffGrid grid for coeff + @param degreeGrid grid for degree @param balanced If true and the problem is 2-class classification then the method creates more - balanced cross-validation subsets that is proportions between classes in subsets are close to such - proportion in the whole train dataset. + balanced cross-validation subsets that is proportions between classes in subsets are close + to such proportion in the whole train dataset. - The method trains the SVM model automatically by choosing the optimal parameters C, gamma, p, nu, - coef0, degree from SVM::Params. Parameters are considered optimal when the cross-validation estimate - of the test set error is minimal. + The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p, + nu, coef0, degree from SVM::Params. Parameters are considered optimal when the cross-validation + estimate of the test set error is minimal. - If there is no need to optimize a parameter, the corresponding grid step should be set to any value - less than or equal to 1. For example, to avoid optimization in gamma, set gammaGrid.step = 0, - gammaGrid.minVal, gamma_grid.maxVal as arbitrary numbers. In this case, the value params.gamma is - taken for gamma. + If there is no need to optimize a parameter, the corresponding grid step should be set to any + value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step + = 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value + `params.gamma` is taken for gamma. - And, finally, if the optimization in a parameter is required but the corresponding grid is unknown, - you may call the function SVM::getDefaulltGrid. To generate a grid, for example, for gamma, call - SVM::getDefaulltGrid(SVM::GAMMA). + And, finally, if the optimization in a parameter is required but the corresponding grid is + unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for + gamma, call `SVM::getDefaultGrid(SVM::GAMMA)`. - This function works for the classification (params.svmType=SVM::C_SVC or - params.svmType=SVM::NU_SVC) as well as for the regression (params.svmType=SVM::EPS_SVR or - params.svmType=SVM::NU_SVR). If params.svmType=SVM::ONE_CLASS, no optimization is made and the - usual SVM with parameters specified in params is executed. + This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the + regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and + the usual %SVM with parameters specified in params is executed. */ virtual bool trainAuto( const Ptr& data, int kFold = 10, ParamGrid Cgrid = SVM::getDefaultGrid(SVM::C), @@ -1152,14 +705,14 @@ public: /** @brief Retrieves all the support vectors - The method returns all the support vector as floating-point matrix, where support vectors are stored - as matrix rows. + The method returns all the support vector as floating-point matrix, where support vectors are + stored as matrix rows. */ CV_WRAP virtual Mat getSupportVectors() const = 0; virtual void setParams(const Params& p, const Ptr& customKernel=Ptr()) = 0; - /** @brief Returns the current SVM parameters. + /** @brief Returns the current %SVM parameters. This function may be used to get the optimal parameters obtained while automatically training SVM::trainAuto. @@ -1170,101 +723,102 @@ public: /** @brief Retrieves the decision function @param i the index of the decision function. If the problem solved is regression, 1-class or - 2-class classification, then there will be just one decision function and the index should always - be 0. Otherwise, in the case of N-class classification, there will be N\*(N-1)/2 decision - functions. + 2-class classification, then there will be just one decision function and the index should + always be 0. Otherwise, in the case of N-class classification, there will be \f$N(N-1)/2\f$ + decision functions. @param alpha the optional output vector for weights, corresponding to different support vectors. - In the case of linear SVM all the alpha's will be 1's. - @param svidx the optional output vector of indices of support vectors within the matrix of support - vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear SVM each - decision function consists of a single "compressed" support vector. + In the case of linear %SVM all the alpha's will be 1's. + @param svidx the optional output vector of indices of support vectors within the matrix of + support vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear + %SVM each decision function consists of a single "compressed" support vector. - The method returns rho parameter of the decision function, a scalar subtracted from the weighted sum - of kernel responses. + The method returns rho parameter of the decision function, a scalar subtracted from the weighted + sum of kernel responses. */ virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0; - /** @brief Generates a grid for SVM parameters. + /** @brief Generates a grid for %SVM parameters. - @param param_id SVM parameters IDs that must be one of the following: - - **SVM::C** - - **SVM::GAMMA** - - **SVM::P** - - **SVM::NU** - - **SVM::COEF** - - **SVM::DEGREE** - The grid is generated for the parameter with this ID. + @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is + generated for the parameter with this ID. - The function generates a grid for the specified parameter of the SVM algorithm. The grid may be + The function generates a grid for the specified parameter of the %SVM algorithm. The grid may be passed to the function SVM::trainAuto. */ static ParamGrid getDefaultGrid( int param_id ); /** @brief Creates empty model - @param p SVM parameters + @param p %SVM parameters @param customKernel the optional custom kernel to use. It must implement SVM::Kernel interface. - Use StatModel::train to train the model, StatModel::train\(traindata, params) to create and - train the model, StatModel::load\(filename) to load the pre-trained model. Since SVM has - several parameters, you may want to find the best parameters for your problem. It can be done with - SVM::trainAuto. + Use StatModel::train to train the model: + @code + StatModel::train(traindata, params); // to create and train the model + // or + StatModel::load(filename); // to load the pre-trained model. + @endcode + Since %SVM has several parameters, you may want to find the best parameters for your problem. It + can be done with SVM::trainAuto. */ static Ptr create(const Params& p=Params(), const Ptr& customKernel=Ptr()); }; -//! @} ml_svm - /****************************************************************************************\ * Expectation - Maximization * \****************************************************************************************/ -//! @addtogroup ml_em -//! @{ +/** @brief The class implements the Expectation Maximization algorithm. -/** @brief The class implements the EM algorithm as described in the beginning of this section. +@sa @ref ml_intro_em */ class CV_EXPORTS_W EM : public StatModel { public: - // Type of covariation matrices - enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL}; + //! Type of covariation matrices + enum Types { + /** A scaled identity matrix \f$\mu_k * I\f$. There is the only + parameter \f$\mu_k\f$ to be estimated for each matrix. The option may be used in special cases, + when the constraint is relevant, or as a first step in the optimization (for example in case + when the data is preprocessed with PCA). The results of such preliminary estimation may be + passed again to the optimization procedure, this time with + covMatType=EM::COV_MAT_DIAGONAL. */ + COV_MAT_SPHERICAL=0, + /** A diagonal matrix with positive diagonal elements. The number of + free parameters is d for each matrix. This is most commonly used option yielding good + estimation results. */ + COV_MAT_DIAGONAL=1, + /** A symmetric positively defined matrix. The number of free + parameters in each matrix is about \f$d^2/2\f$. It is not recommended to use this option, unless + there is pretty accurate initial estimation of the parameters and/or a huge number of + training samples. */ + COV_MAT_GENERIC=2, + COV_MAT_DEFAULT=COV_MAT_DIAGONAL + }; - // Default parameters + //! Default parameters enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100}; - // The initial step + //! The initial step enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0}; - /** @brief The class describes EM training parameters. + /** @brief The class describes %EM training parameters. */ class CV_EXPORTS_W_MAP Params { public: /** @brief The constructor - @param nclusters The number of mixture components in the Gaussian mixture model. Default value of - the parameter is EM::DEFAULT_NCLUSTERS=5. Some of EM implementation could determine the optimal - number of mixtures within a specified value range, but that is not the case in ML yet. - @param covMatType Constraint on covariance matrices which defines type of matrices. Possible - values are: - - **EM::COV_MAT_SPHERICAL** A scaled identity matrix \f$\mu_k * I\f$. There is the only - parameter \f$\mu_k\f$ to be estimated for each matrix. The option may be used in special cases, - when the constraint is relevant, or as a first step in the optimization (for example in case - when the data is preprocessed with PCA). The results of such preliminary estimation may be - passed again to the optimization procedure, this time with - covMatType=EM::COV_MAT_DIAGONAL. - - **EM::COV_MAT_DIAGONAL** A diagonal matrix with positive diagonal elements. The number of - free parameters is d for each matrix. This is most commonly used option yielding good - estimation results. - - **EM::COV_MAT_GENERIC** A symmetric positively defined matrix. The number of free - parameters in each matrix is about \f$d^2/2\f$. It is not recommended to use this option, unless - there is pretty accurate initial estimation of the parameters and/or a huge number of - training samples. - @param termCrit The termination criteria of the EM algorithm. The EM algorithm can be terminated - by the number of iterations termCrit.maxCount (number of M-steps) or when relative change of - likelihood logarithm is less than termCrit.epsilon. Default maximum number of iterations is - EM::DEFAULT_MAX_ITERS=100. + @param nclusters The number of mixture components in the Gaussian mixture model. Default + value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could + determine the optimal number of mixtures within a specified value range, but that is not + the case in ML yet. + @param covMatType Constraint on covariance matrices which defines type of matrices. See + EM::Types. + @param termCrit The termination criteria of the %EM algorithm. The %EM algorithm can be + terminated by the number of iterations termCrit.maxCount (number of M-steps) or when + relative change of likelihood logarithm is less than termCrit.epsilon. Default maximum + number of iterations is EM::DEFAULT_MAX_ITERS=100. */ explicit Params(int nclusters=DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, @@ -1283,62 +837,61 @@ public: virtual Mat getWeights() const = 0; /** @brief Returns the cluster centers (means of the Gaussian mixture) - Returns matrix with the number of rows equal to the number of mixtures and number of columns equal - to the space dimensionality. + Returns matrix with the number of rows equal to the number of mixtures and number of columns + equal to the space dimensionality. */ virtual Mat getMeans() const = 0; /** @brief Returns covariation matrices - Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures, each - matrix is a square floating-point matrix NxN, where N is the space dimensionality. + Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures, + each matrix is a square floating-point matrix NxN, where N is the space dimensionality. */ virtual void getCovs(std::vector& covs) const = 0; - /** @brief Returns a likelihood logarithm value and an index of the most probable mixture component for the - given sample. + /** @brief Returns a likelihood logarithm value and an index of the most probable mixture component + for the given sample. - @param sample A sample for classification. It should be a one-channel matrix of \f$1 \times dims\f$ or - \f$dims \times 1\f$ size. - @param probs Optional output matrix that contains posterior probabilities of each component given - the sample. It has \f$1 \times nclusters\f$ size and CV_64FC1 type. + @param sample A sample for classification. It should be a one-channel matrix of + \f$1 \times dims\f$ or \f$dims \times 1\f$ size. + @param probs Optional output matrix that contains posterior probabilities of each component + given the sample. It has \f$1 \times nclusters\f$ size and CV_64FC1 type. - The method returns a two-element double vector. Zero element is a likelihood logarithm value for the - sample. First element is an index of the most probable mixture component for the given sample. + The method returns a two-element double vector. Zero element is a likelihood logarithm value for + the sample. First element is an index of the most probable mixture component for the given + sample. */ CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0; virtual bool train( const Ptr& trainData, int flags=0 ) = 0; - /** @brief Static methods that estimate the Gaussian mixture parameters from a samples set + /** @brief Static method that estimate the Gaussian mixture parameters from a samples set + + This variation starts with Expectation step. Initial values of the model parameters will be + estimated by the k-means algorithm. + + Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take + responses (class labels or function values) as input. Instead, it computes the *Maximum + Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the + parameters inside the structure: \f$p_{i,k}\f$ in probs, \f$a_k\f$ in means , \f$S_k\f$ in + covs[k], \f$\pi_k\f$ in weights , and optionally computes the output "class label" for each + sample: \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most + probable mixture component for each sample). + + The trained model can be used further for prediction, just like any other classifier. The + trained model is similar to the NormalBayesClassifier. @param samples Samples from which the Gaussian mixture model will be estimated. It should be a - one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type it - will be converted to the inner matrix of such type for the further computing. + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for - each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. @param labels The optional output "class label" for each sample: - \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable mixture - component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. @param probs The optional output matrix that contains posterior probabilities of each Gaussian - mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1 - type. + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. @param params The Gaussian mixture params, see EM::Params description - @return true if the Gaussian mixture model was trained successfully, otherwise it returns - false. - - Starts with Expectation step. Initial values of the model parameters will be estimated by the - k-means algorithm. - - Unlike many of the ML models, EM is an unsupervised learning algorithm and it does not take - responses (class labels or function values) as input. Instead, it computes the *Maximum Likelihood - Estimate* of the Gaussian mixture parameters from an input sample set, stores all the parameters - inside the structure: \f$p_{i,k}\f$ in probs, \f$a_k\f$ in means , \f$S_k\f$ in covs[k], \f$\pi_k\f$ in weights , - and optionally computes the output "class label" for each sample: - \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable mixture - component for each sample). - - The trained model can be used further for prediction, just like any other classifier. The trained - model is similar to the NormalBayesClassifier. */ static Ptr train(InputArray samples, OutputArray logLikelihoods=noArray(), @@ -1346,30 +899,32 @@ public: OutputArray probs=noArray(), const Params& params=Params()); - /** Starts with Expectation step. You need to provide initial means \f$a_k\f$ of mixture - components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices + /** @brief Static method that estimate the Gaussian mixture parameters from a samples set + + This variation starts with Expectation step. You need to provide initial means \f$a_k\f$ of + mixture components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices \f$S_k\f$ of mixture components. @param samples Samples from which the Gaussian mixture model will be estimated. It should be a - one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type it - will be converted to the inner matrix of such type for the further computing. + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. @param means0 Initial means \f$a_k\f$ of mixture components. It is a one-channel matrix of - \f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be converted to the - inner matrix of such type for the further computing. + \f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be + converted to the inner matrix of such type for the further computing. @param covs0 The vector of initial covariance matrices \f$S_k\f$ of mixture components. Each of - covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices do not - have CV_64F type they will be converted to the inner matrices of such type for the further - computing. + covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices + do not have CV_64F type they will be converted to the inner matrices of such type for the + further computing. @param weights0 Initial weights \f$\pi_k\f$ of mixture components. It should be a one-channel - floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size. + floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size. @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for - each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. @param labels The optional output "class label" for each sample: - \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable mixture - component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. @param probs The optional output matrix that contains posterior probabilities of each Gaussian - mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1 - type. + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. @param params The Gaussian mixture params, see EM::Params description */ static Ptr train_startWithE(InputArray samples, InputArray means0, @@ -1380,21 +935,23 @@ public: OutputArray probs=noArray(), const Params& params=Params()); - /** Starts with Maximization step. You need to provide initial probabilities \f$p_{i,k}\f$ to - use this option. + /** @brief Static method that estimate the Gaussian mixture parameters from a samples set + + This variation starts with Maximization step. You need to provide initial probabilities + \f$p_{i,k}\f$ to use this option. @param samples Samples from which the Gaussian mixture model will be estimated. It should be a - one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type it - will be converted to the inner matrix of such type for the further computing. + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. @param probs0 @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for - each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. @param labels The optional output "class label" for each sample: - \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable mixture - component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. @param probs The optional output matrix that contains posterior probabilities of each Gaussian - mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1 - type. + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. @param params The Gaussian mixture params, see EM::Params description */ static Ptr train_startWithM(InputArray samples, InputArray probs0, @@ -1403,9 +960,9 @@ public: OutputArray probs=noArray(), const Params& params=Params()); - /** @brief Creates empty EM model + /** @brief Creates empty %EM model - @param params EM parameters + @param params %EM parameters The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you can use one of the EM::train\* methods or load it from file using StatModel::load\(filename). @@ -1413,194 +970,154 @@ public: static Ptr create(const Params& params=Params()); }; -//! @} ml_em - /****************************************************************************************\ * Decision Tree * \****************************************************************************************/ -//! @addtogroup ml_decsiontrees -//! @{ +/** @brief The class represents a single decision tree or a collection of decision trees. -/** @brief The class represents a single decision tree or a collection of decision trees. The current public -interface of the class allows user to train only a single decision tree, however the class is -capable of storing multiple decision trees and using them for prediction (by summing responses or -using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost) use this -capability to implement decision tree ensembles. - */ +The current public interface of the class allows user to train only a single decision tree, however +the class is capable of storing multiple decision trees and using them for prediction (by summing +responses or using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost) +use this capability to implement decision tree ensembles. + +@sa @ref ml_intro_trees +*/ class CV_EXPORTS_W DTrees : public StatModel { public: - enum { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) }; + /** Predict options */ + enum Flags { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) }; - /** @brief The structure contains all the decision tree training parameters. You can initialize it by default - constructor and then override any parameters directly before training, or the structure may be fully - initialized using the advanced variant of the constructor. + /** @brief The structure contains all the decision tree training parameters. + + You can initialize it by default constructor and then override any parameters directly before + training, or the structure may be fully initialized using the advanced variant of the + constructor. */ class CV_EXPORTS_W_MAP Params { public: + /** @brief Default constructor. */ Params(); - /** @brief The constructors - - @param maxDepth The maximum possible depth of the tree. That is the training algorithms attempts - to split a node while its depth is less than maxDepth. The root node has zero depth. The actual - depth may be smaller if the other termination criteria are met (see the outline of the training - procedure in the beginning of the section), and/or if the tree is pruned. - @param minSampleCount If the number of samples in a node is less than this parameter then the node - will not be split. - @param regressionAccuracy Termination criteria for regression trees. If all absolute differences - between an estimated value in a node and values of train samples in this node are less than this - parameter then the node will not be split further. - @param useSurrogates If true then surrogate splits will be built. These splits allow to work with - missing data and compute variable importance correctly. - - @note currently it's not implemented. - - @param maxCategories Cluster possible values of a categorical variable into K\<=maxCategories - clusters to find a suboptimal split. If a discrete variable, on which the training procedure - tries to make a split, takes more than maxCategories values, the precise best subset estimation - may take a very long time because the algorithm is exponential. Instead, many decision trees - engines (including our implementation) try to find sub-optimal split in this case by clustering - all the samples into maxCategories clusters that is some categories are merged together. The - clustering is applied only in n \> 2-class classification problems for categorical variables - with N \> max_categories possible values. In case of regression and 2-class classification the - optimal split can be found efficiently without employing clustering, thus the parameter is not - used in these cases. - - @param CVFolds If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold - cross-validation procedure where K is equal to CVFolds. - - @param use1SERule If true then a pruning will be harsher. This will make a tree more compact and - more resistant to the training data noise but a bit less accurate. - - @param truncatePrunedTree If true then pruned branches are physically removed from the tree. - Otherwise they are retained and it is possible to get results from the original unpruned (or - pruned less aggressively) tree. - - @param priors The array of a priori class probabilities, sorted by the class label value. The - parameter can be used to tune the decision tree preferences toward a certain class. For example, - if you want to detect some rare anomaly occurrence, the training base will likely contain much - more normal cases than anomalies, so a very good classification performance will be achieved - just by considering every case as normal. To avoid this, the priors can be specified, where the - anomaly probability is artificially increased (up to 0.5 or even greater), so the weight of the - misclassified anomalies becomes much bigger, and the tree is adjusted properly. You can also - think about this parameter as weights of prediction categories which determine relative weights - that you give to misclassification. That is, if the weight of the first category is 1 and the - weight of the second category is 10, then each mistake in predicting the second category is - equivalent to making 10 mistakes in predicting the first category. - - The default constructor initializes all the parameters with the default values tuned for the - standalone classification tree: - @code - DTrees::Params::Params() - { - maxDepth = INT_MAX; - minSampleCount = 10; - regressionAccuracy = 0.01f; - useSurrogates = false; - maxCategories = 10; - CVFolds = 10; - use1SERule = true; - truncatePrunedTree = true; - priors = Mat(); - } - @endcode - */ + /** @brief Constructor with parameters */ Params( int maxDepth, int minSampleCount, double regressionAccuracy, bool useSurrogates, int maxCategories, int CVFolds, bool use1SERule, bool truncatePrunedTree, const Mat& priors ); + /** @brief Cluster possible values of a categorical variable into K\<=maxCategories clusters + to find a suboptimal split. + + If a discrete variable, on which the training procedure tries to make a split, takes more + than maxCategories values, the precise best subset estimation may take a very long time + because the algorithm is exponential. Instead, many decision trees engines (including our + implementation) try to find sub-optimal split in this case by clustering all the samples + into maxCategories clusters that is some categories are merged together. The clustering is + applied only in n \> 2-class classification problems for categorical variables with N \> + max_categories possible values. In case of regression and 2-class classification the optimal + split can be found efficiently without employing clustering, thus the parameter is not used + in these cases. Default value is 10.*/ CV_PROP_RW int maxCategories; + /** @brief The maximum possible depth of the tree. + + That is the training algorithms attempts to split a node while its depth is less than + maxDepth. The root node has zero depth. The actual depth may be smaller if the other + termination criteria are met (see the outline of the training procedure @ref ml_intro_trees + "here"), and/or if the tree is pruned. Default value is INT_MAX.*/ CV_PROP_RW int maxDepth; + /** If the number of samples in a node is less than this parameter then the node will not be + split. Default value is 10.*/ CV_PROP_RW int minSampleCount; + /** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold + cross-validation procedure where K is equal to CVFolds. Default value is 10.*/ CV_PROP_RW int CVFolds; + /** @brief If true then surrogate splits will be built. + + These splits allow to work with missing data and compute variable importance correctly. + @note currently it's not implemented. Default value is false.*/ CV_PROP_RW bool useSurrogates; + /** If true then a pruning will be harsher. This will make a tree more compact and more + resistant to the training data noise but a bit less accurate. Default value is true.*/ CV_PROP_RW bool use1SERule; + /** If true then pruned branches are physically removed from the tree. Otherwise they are + retained and it is possible to get results from the original unpruned (or pruned less + aggressively) tree. Default value is true.*/ CV_PROP_RW bool truncatePrunedTree; + /** @brief Termination criteria for regression trees. + + If all absolute differences between an estimated value in a node and values of train samples + in this node are less than this parameter then the node will not be split further. Default + value is 0.01f*/ CV_PROP_RW float regressionAccuracy; + /** @brief The array of a priori class probabilities, sorted by the class label value. + + The parameter can be used to tune the decision tree preferences toward a certain class. For + example, if you want to detect some rare anomaly occurrence, the training base will likely + contain much more normal cases than anomalies, so a very good classification performance + will be achieved just by considering every case as normal. To avoid this, the priors can be + specified, where the anomaly probability is artificially increased (up to 0.5 or even + greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is + adjusted properly. + + You can also think about this parameter as weights of prediction categories which determine + relative weights that you give to misclassification. That is, if the weight of the first + category is 1 and the weight of the second category is 10, then each mistake in predicting + the second category is equivalent to making 10 mistakes in predicting the first category. + Default value is empty Mat.*/ CV_PROP_RW Mat priors; }; - /** @brief The class represents a decision tree node. It has public members: - - - member double value - Value at the node: a class label in case of classification or estimated function value in case - of regression. - - member int classIdx - Class index normalized to 0..class_count-1 range and assigned to the node. It is used - internally in classification trees and tree ensembles. - - member int parent - Index of the parent node - - member int left - Index of the left child node - - member int right - Index of right child node. - - member int defaultDir - Default direction where to go (-1: left or +1: right). It helps in the case of missing values. - - member int split - Index of the first split + /** @brief The class represents a decision tree node. */ class CV_EXPORTS Node { public: Node(); - double value; - int classIdx; - - int parent; - int left; - int right; - int defaultDir; - - int split; + double value; //!< Value at the node: a class label in case of classification or estimated + //!< function value in case of regression. + int classIdx; //!< Class index normalized to 0..class_count-1 range and assigned to the + //!< node. It is used internally in classification trees and tree ensembles. + int parent; //!< Index of the parent node + int left; //!< Index of the left child node + int right; //!< Index of right child node + int defaultDir; //!< Default direction where to go (-1: left or +1: right). It helps in the + //!< case of missing values. + int split; //!< Index of the first split }; - /** @brief The class represents split in a decision tree. It has public members: - - member int varIdx - Index of variable on which the split is created. - - member bool inversed - If true, then the inverse split rule is used (i.e. left and right branches are exchanged in - the rule expressions below). - - member float quality - The split quality, a positive number. It is used to choose the best split. - - member int next - Index of the next split in the list of splits for the node - - member float c - The threshold value in case of split on an ordered variable. The rule is: : - if var_value < c - then next_node<-left - else next_node<-right - - member int subsetOfs - Offset of the bitset used by the split on a categorical variable. The rule is: : - if bitset[var_value] == 1 - then next_node <- left - else next_node <- right + /** @brief The class represents split in a decision tree. */ class CV_EXPORTS Split { public: Split(); - int varIdx; - bool inversed; - float quality; - int next; - float c; - int subsetOfs; + int varIdx; //!< Index of variable on which the split is created. + bool inversed; //!< If true, then the inverse split rule is used (i.e. left and right + //!< branches are exchanged in the rule expressions below). + float quality; //!< The split quality, a positive number. It is used to choose the best split. + int next; //!< Index of the next split in the list of splits for the node + float c; /**< The threshold value in case of split on an ordered variable. + The rule is: + @code{.none} + if var_value < c + then next_node <- left + else next_node <- right + @endcode */ + int subsetOfs; /**< Offset of the bitset used by the split on a categorical variable. + The rule is: + @code{.none} + if bitset[var_value] == 1 + then next_node <- left + else next_node <- right + @endcode */ }; /** @brief Sets the training parameters - - @param p Training parameters of type DTrees::Params. - - The method sets the training parameters. */ virtual void setDParams(const Params& p); /** @brief Returns the training parameters - - The method returns the training parameters. */ virtual Params getDParams() const; @@ -1609,13 +1126,12 @@ public: virtual const std::vector& getRoots() const = 0; /** @brief Returns all the nodes - all the node indices, mentioned above (left, right, parent, root indices) are indices in the - returned vector + all the node indices are indices in the returned vector */ virtual const std::vector& getNodes() const = 0; /** @brief Returns all the splits - all the split indices, mentioned above (split, next etc.) are indices in the returned vector + all the split indices are indices in the returned vector */ virtual const std::vector& getSplits() const = 0; /** @brief Returns all the bitsets for categorical splits @@ -1627,22 +1143,19 @@ public: /** @brief Creates the empty model The static method creates empty decision tree with the specified parameters. It should be then - trained using train method (see StatModel::train). Alternatively, you can load the model from file - using StatModel::load\(filename). + trained using train method (see StatModel::train). Alternatively, you can load the model from + file using StatModel::load\(filename). */ static Ptr create(const Params& params=Params()); }; -//! @} ml_decsiontrees - /****************************************************************************************\ * Random Trees Classifier * \****************************************************************************************/ -//! @addtogroup ml_randomtrees -//! @{ +/** @brief The class implements the random forest predictor. -/** @brief The class implements the random forest predictor as described in the beginning of this section. +@sa @ref ml_intro_rtrees */ class CV_EXPORTS_W RTrees : public DTrees { @@ -1656,55 +1169,29 @@ public: class CV_EXPORTS_W_MAP Params : public DTrees::Params { public: + /** @brief Default constructor. */ Params(); - /** @brief The constructors - - @param maxDepth the depth of the tree. A low value will likely underfit and conversely a high - value will likely overfit. The optimal value can be obtained using cross validation or other - suitable methods. - @param minSampleCount minimum samples required at a leaf node for it to be split. A reasonable - value is a small percentage of the total data e.g. 1%. - @param regressionAccuracy - @param useSurrogates - @param maxCategories Cluster possible values of a categorical variable into K \<= maxCategories - clusters to find a suboptimal split. If a discrete variable, on which the training procedure tries - to make a split, takes more than max_categories values, the precise best subset estimation may - take a very long time because the algorithm is exponential. Instead, many decision trees engines - (including ML) try to find sub-optimal split in this case by clustering all the samples into - maxCategories clusters that is some categories are merged together. The clustering is applied only - in n\>2-class classification problems for categorical variables with N \> max_categories possible - values. In case of regression and 2-class classification the optimal split can be found - efficiently without employing clustering, thus the parameter is not used in these cases. - @param priors - @param calcVarImportance If true then variable importance will be calculated and then it can be - retrieved by RTrees::getVarImportance. - @param nactiveVars The size of the randomly selected subset of features at each tree node and that - are used to find the best split(s). If you set it to 0 then the size will be set to the square - root of the total number of features. - @param termCrit The termination criteria that specifies when the training algorithm stops - either - when the specified number of trees is trained and added to the ensemble or when sufficient - accuracy (measured as OOB error) is achieved. Typically the more trees you have the better the - accuracy. However, the improvement in accuracy generally diminishes and asymptotes pass a certain - number of trees. Also to keep in mind, the number of tree increases the prediction time linearly. - - The default constructor sets all parameters to default values which are different from default - values of `DTrees::Params`: - @code - RTrees::Params::Params() : DTrees::Params( 5, 10, 0, false, 10, 0, false, false, Mat() ), - calcVarImportance(false), nactiveVars(0) - { - termCrit = cvTermCriteria( TermCriteria::MAX_ITERS + TermCriteria::EPS, 50, 0.1 ); - } - @endcode - */ + /** @brief Constructor with parameters. */ Params( int maxDepth, int minSampleCount, double regressionAccuracy, bool useSurrogates, int maxCategories, const Mat& priors, bool calcVarImportance, int nactiveVars, TermCriteria termCrit ); - CV_PROP_RW bool calcVarImportance; // true <=> RF processes variable importance + /** If true then variable importance will be calculated and then it can be retrieved by + RTrees::getVarImportance. Default value is false.*/ + CV_PROP_RW bool calcVarImportance; + /** The size of the randomly selected subset of features at each tree node and that are used + to find the best split(s). If you set it to 0 then the size will be set to the square root + of the total number of features. Default value is 0.*/ CV_PROP_RW int nactiveVars; + /** The termination criteria that specifies when the training algorithm stops - either when + the specified number of trees is trained and added to the ensemble or when sufficient + accuracy (measured as OOB error) is achieved. Typically the more trees you have the better + the accuracy. However, the improvement in accuracy generally diminishes and asymptotes pass + a certain number of trees. Also to keep in mind, the number of tree increases the prediction + time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS + TermCriteria::EPS, + 50, 0.1)*/ CV_PROP_RW TermCriteria termCrit; }; @@ -1714,109 +1201,82 @@ public: /** @brief Returns the variable importance array. The method returns the variable importance vector, computed at the training stage when - RTParams::calcVarImportance is set to true. If this flag was set to false, the empty matrix is + Params::calcVarImportance is set to true. If this flag was set to false, the empty matrix is returned. */ virtual Mat getVarImportance() const = 0; /** @brief Creates the empty model - Use StatModel::train to train the model, StatModel::train to create and - train the model, StatModel::load to load the pre-trained model. + Use StatModel::train to train the model, StatModel::train to create and train the model, + StatModel::load to load the pre-trained model. */ static Ptr create(const Params& params=Params()); }; -//! @} ml_randomtrees - /****************************************************************************************\ * Boosted tree classifier * \****************************************************************************************/ -//! @addtogroup ml_boost -//! @{ - /** @brief Boosted tree classifier derived from DTrees + +@sa @ref ml_intro_boost */ class CV_EXPORTS_W Boost : public DTrees { public: - /** @brief The structure is derived from DTrees::Params but not all of the decision tree parameters are + /** @brief Parameters of Boost trees. + + The structure is derived from DTrees::Params but not all of the decision tree parameters are supported. In particular, cross-validation is not supported. - All parameters are public. You can initialize them by a constructor and then override some of them - directly if you want. + All parameters are public. You can initialize them by a constructor and then override some of + them directly if you want. */ class CV_EXPORTS_W_MAP Params : public DTrees::Params { public: - CV_PROP_RW int boostType; - CV_PROP_RW int weakCount; + CV_PROP_RW int boostType; //!< Type of the boosting algorithm. See Boost::Types. + //!< Default value is Boost::REAL. + CV_PROP_RW int weakCount; //!< The number of weak classifiers. Default value is 100. + /** A threshold between 0 and 1 used to save computational time. Samples with summary weight + \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next* iteration of training. Set + this parameter to 0 to turn off this functionality. Default value is 0.95.*/ CV_PROP_RW double weightTrimRate; + /** @brief Default constructor */ Params(); - /** @brief The constructors. - - @param boostType Type of the boosting algorithm. Possible values are: - - **Boost::DISCRETE** Discrete AdaBoost. - - **Boost::REAL** Real AdaBoost. It is a technique that utilizes confidence-rated predictions - and works well with categorical data. - - **Boost::LOGIT** LogitBoost. It can produce good regression fits. - - **Boost::GENTLE** Gentle AdaBoost. It puts less weight on outlier data points and for that - reason is often good with regression data. - Gentle AdaBoost and Real AdaBoost are often the preferable choices. - @param weakCount The number of weak classifiers. - @param weightTrimRate A threshold between 0 and 1 used to save computational time. Samples - with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next* iteration of - training. Set this parameter to 0 to turn off this functionality. - @param maxDepth - @param useSurrogates - @param priors - - See DTrees::Params for description of other parameters. - - Default parameters are: - @code - Boost::Params::Params() - { - boostType = Boost::REAL; - weakCount = 100; - weightTrimRate = 0.95; - CVFolds = 0; - maxDepth = 1; - } - @endcode - */ + /** @brief Constructor with parameters */ Params( int boostType, int weakCount, double weightTrimRate, int maxDepth, bool useSurrogates, const Mat& priors ); }; - // Boosting type - enum { DISCRETE=0, REAL=1, LOGIT=2, GENTLE=3 }; + /** @brief Boosting type - /** @brief Returns the boosting parameters + Gentle AdaBoost and Real AdaBoost are often the preferable choices. + */ + enum Types { + DISCRETE=0, //!< Discrete AdaBoost. + REAL=1, //!< Real AdaBoost. It is a technique that utilizes confidence-rated predictions + //!< and works well with categorical data. + LOGIT=2, //!< LogitBoost. It can produce good regression fits. + GENTLE=3 //!< Gentle AdaBoost. It puts less weight on outlier data points and for that + //!(traindata, params) to create and - train the model, StatModel::load\(filename) to load the pre-trained model. + Use StatModel::train to train the model, StatModel::train\(traindata, params) to create + and train the model, StatModel::load\(filename) to load the pre-trained model. */ static Ptr create(const Params& params=Params()); }; -//! @} ml_boost - /****************************************************************************************\ * Gradient Boosted Trees * \****************************************************************************************/ @@ -1852,170 +1312,138 @@ public: /////////////////////////////////// Multi-Layer Perceptrons ////////////////////////////// -//! @addtogroup ml_neural -//! @{ - -/** @brief MLP model. +/** @brief Artificial Neural Networks - Multi-Layer Perceptrons. Unlike many other models in ML that are constructed and trained at once, in the MLP model these steps are separated. First, a network with the specified topology is created using the non-default constructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is trained using a set of input and output vectors. The training procedure can be repeated more than once, that is, the weights can be adjusted based on the new training data. + +Additional flags for StatModel::train are available: ANN_MLP::TrainFlags. + +@sa @ref ml_intro_ann */ class CV_EXPORTS_W ANN_MLP : public StatModel { public: /** @brief Parameters of the MLP and of the training algorithm. - - You can initialize the structure by a constructor or the individual parameters can be adjusted - after the structure is created. - The network structure: - - member Mat layerSizes - The number of elements in each layer of network. The very first element specifies the number - of elements in the input layer. The last element - number of elements in the output layer. - - member int activateFunc - The activation function. Currently the only fully supported activation function is - ANN_MLP::SIGMOID_SYM. - - member double fparam1 - The first parameter of activation function, 0 by default. - - member double fparam2 - The second parameter of the activation function, 0 by default. - @note - If you are using the default ANN_MLP::SIGMOID_SYM activation function with the default - parameter values fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x), - so the output will range from [-1.7159, 1.7159], instead of [0,1]. - - The back-propagation algorithm parameters: - - member double bpDWScale - Strength of the weight gradient term. The recommended value is about 0.1. - - member double bpMomentScale - Strength of the momentum term (the difference between weights on the 2 previous iterations). - This parameter provides some inertia to smooth the random fluctuations of the weights. It - can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good - enough - The RPROP algorithm parameters (see @cite RPROP93 for details): - - member double prDW0 - Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$. - - member double rpDWPlus - Increase factor \f$\eta^+\f$. It must be \>1. - - member double rpDWMinus - Decrease factor \f$\eta^-\f$. It must be \<1. - - member double rpDWMin - Update-values lower limit \f$\Delta_{min}\f$. It must be positive. - - member double rpDWMax - Update-values upper limit \f$\Delta_{max}\f$. It must be \>1. - */ + */ struct CV_EXPORTS_W_MAP Params { + /** @brief Default constructor */ Params(); - /** @brief Construct the parameter structure - - @param layerSizes Integer vector specifying the number of neurons in each layer including the - input and output layers. - @param activateFunc Parameter specifying the activation function for each neuron: one of - ANN_MLP::IDENTITY, ANN_MLP::SIGMOID_SYM, and ANN_MLP::GAUSSIAN. - @param fparam1 The first parameter of the activation function, \f$\alpha\f$. See the formulas in the - introduction section. - @param fparam2 The second parameter of the activation function, \f$\beta\f$. See the formulas in the - introduction section. - @param termCrit Termination criteria of the training algorithm. You can specify the maximum number - of iterations (maxCount) and/or how much the error could change between the iterations to make the - algorithm continue (epsilon). - @param trainMethod Training method of the MLP. Possible values are: - - **ANN_MLP_TrainParams::BACKPROP** The back-propagation algorithm. - - **ANN_MLP_TrainParams::RPROP** The RPROP algorithm. - @param param1 Parameter of the training method. It is rp_dw0 for RPROP and bp_dw_scale for - BACKPROP. - @param param2 Parameter of the training method. It is rp_dw_min for RPROP and bp_moment_scale - for BACKPROP. - - By default the RPROP algorithm is used: - @code - ANN_MLP_TrainParams::ANN_MLP_TrainParams() - { - layerSizes = Mat(); - activateFun = SIGMOID_SYM; - fparam1 = fparam2 = 0; - term_crit = TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01 ); - train_method = RPROP; - bpDWScale = bpMomentScale = 0.1; - rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5; - rpDWMin = FLT_EPSILON; rpDWMax = 50.; - } - @endcode + /** @brief Constructor with parameters + @note param1 sets Params::rp_dw0 for RPROP and Paramss::bp_dw_scale for BACKPROP. + @note param2 sets Params::rp_dw_min for RPROP and Params::bp_moment_scale for BACKPROP. */ Params( const Mat& layerSizes, int activateFunc, double fparam1, double fparam2, TermCriteria termCrit, int trainMethod, double param1, double param2=0 ); - enum { BACKPROP=0, RPROP=1 }; + /** Available training methods */ + enum TrainingMethods { + BACKPROP=0, //!< The back-propagation algorithm. + RPROP=1 //!< The RPROP algorithm. See @cite RPROP93 for details. + }; + /** Integer vector specifying the number of neurons in each layer including the input and + output layers. The very first element specifies the number of elements in the input layer. + The last element - number of elements in the output layer. Default value is empty Mat.*/ CV_PROP_RW Mat layerSizes; + /** The activation function for each neuron. Currently the default and the only fully + supported activation function is ANN_MLP::SIGMOID_SYM. See ANN_MLP::ActivationFunctions.*/ CV_PROP_RW int activateFunc; + /** The first parameter of the activation function, \f$\alpha\f$. Default value is 0. */ CV_PROP_RW double fparam1; + /** The second parameter of the activation function, \f$\beta\f$. Default value is 0. */ CV_PROP_RW double fparam2; + /** Termination criteria of the training algorithm. You can specify the maximum number of + iterations (maxCount) and/or how much the error could change between the iterations to make + the algorithm continue (epsilon). Default value is TermCriteria(TermCriteria::MAX_ITER + + TermCriteria::EPS, 1000, 0.01).*/ CV_PROP_RW TermCriteria termCrit; + /** Training method. Default value is Params::RPROP. See ANN_MLP::Params::TrainingMethods.*/ CV_PROP_RW int trainMethod; // backpropagation parameters - CV_PROP_RW double bpDWScale, bpMomentScale; + /** BPROP: Strength of the weight gradient term. The recommended value is about 0.1. Default + value is 0.1.*/ + CV_PROP_RW double bpDWScale; + /** BPROP: Strength of the momentum term (the difference between weights on the 2 previous + iterations). This parameter provides some inertia to smooth the random fluctuations of the + weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so + is good enough. Default value is 0.1.*/ + CV_PROP_RW double bpMomentScale; // rprop parameters - CV_PROP_RW double rpDW0, rpDWPlus, rpDWMinus, rpDWMin, rpDWMax; + /** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$. Default value is 0.1.*/ + CV_PROP_RW double rpDW0; + /** RPROP: Increase factor \f$\eta^+\f$. It must be \>1. Default value is 1.2.*/ + CV_PROP_RW double rpDWPlus; + /** RPROP: Decrease factor \f$\eta^-\f$. It must be \<1. Default value is 0.5.*/ + CV_PROP_RW double rpDWMinus; + /** RPROP: Update-values lower limit \f$\Delta_{min}\f$. It must be positive. Default value is FLT_EPSILON.*/ + CV_PROP_RW double rpDWMin; + /** RPROP: Update-values upper limit \f$\Delta_{max}\f$. It must be \>1. Default value is 50.*/ + CV_PROP_RW double rpDWMax; }; - // possible activation functions - enum { IDENTITY = 0, SIGMOID_SYM = 1, GAUSSIAN = 2 }; + /** possible activation functions */ + enum ActivationFunctions { + /** Identity function: \f$f(x)=x\f$ */ + IDENTITY = 0, + /** Symmetrical sigmoid: \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x}\f$ + @note + If you are using the default sigmoid activation function with the default parameter values + fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x), so the output + will range from [-1.7159, 1.7159], instead of [0,1].*/ + SIGMOID_SYM = 1, + /** Gaussian function: \f$f(x)=\beta e^{-\alpha x*x}\f$ */ + GAUSSIAN = 2 + }; - // available training flags - enum { UPDATE_WEIGHTS = 1, NO_INPUT_SCALE = 2, NO_OUTPUT_SCALE = 4 }; + /** Train options */ + enum TrainFlags { + /** Update the network weights, rather than compute them from scratch. In the latter case + the weights are initialized using the Nguyen-Widrow algorithm. */ + UPDATE_WEIGHTS = 1, + /** Do not normalize the input vectors. If this flag is not set, the training algorithm + normalizes each input feature independently, shifting its mean value to 0 and making the + standard deviation equal to 1. If the network is assumed to be updated frequently, the new + training data could be much different from original one. In this case, you should take care + of proper normalization. */ + NO_INPUT_SCALE = 2, + /** Do not normalize the output vectors. If the flag is not set, the training algorithm + normalizes each output feature independently, by transforming it to the certain range + depending on the used activation function. */ + NO_OUTPUT_SCALE = 4 + }; virtual Mat getWeights(int layerIdx) const = 0; - /** @brief Sets the new network parameters - - @param p The new parameters - - The existing network, if any, will be destroyed and new empty one will be created. It should be - re-trained after that. - */ + /** @brief Sets the new network parameters */ virtual void setParams(const Params& p) = 0; - /** @brief Retrieves the current network parameters - */ + /** @brief Retrieves the current network parameters */ virtual Params getParams() const = 0; /** @brief Creates empty model - Use StatModel::train to train the model, StatModel::train\(traindata, params) to create - and train the model, StatModel::load\(filename) to load the pre-trained model. Note that - the train method has optional flags, and the following flags are handled by \`ANN_MLP\`: - - - **UPDATE_WEIGHTS** Algorithm updates the network weights, rather than computes them from - scratch. In the latter case the weights are initialized using the Nguyen-Widrow algorithm. - - **NO_INPUT_SCALE** Algorithm does not normalize the input vectors. If this flag is not set, - the training algorithm normalizes each input feature independently, shifting its mean value to - 0 and making the standard deviation equal to 1. If the network is assumed to be updated - frequently, the new training data could be much different from original one. In this case, you - should take care of proper normalization. - - **NO_OUTPUT_SCALE** Algorithm does not normalize the output vectors. If the flag is not set, - the training algorithm normalizes each output feature independently, by transforming it to the - certain range depending on the used activation function. + Use StatModel::train to train the model, StatModel::train\(traindata, params) to + create and train the model, StatModel::load\(filename) to load the pre-trained model. + Note that the train method has optional flags: ANN_MLP::TrainFlags. */ static Ptr create(const Params& params=Params()); }; -//! @} ml_neural - /****************************************************************************************\ * Logistic Regression * \****************************************************************************************/ -//! @addtogroup ml_lr -//! @{ - /** @brief Implements Logistic Regression classifier. + +@sa @ref ml_intro_lr */ class CV_EXPORTS LogisticRegression : public StatModel { @@ -2023,71 +1451,55 @@ public: class CV_EXPORTS Params { public: - /** @brief The constructors - - @param learning_rate Specifies the learning rate. - @param iters Specifies the number of iterations. - @param method Specifies the kind of training method used. It should be set to either - LogisticRegression::BATCH or LogisticRegression::MINI_BATCH. If using - LogisticRegression::MINI_BATCH, set LogisticRegression::Params.mini_batch_size to a positive - integer. - @param normalization Specifies the kind of regularization to be applied. - LogisticRegression::REG_L1 or LogisticRegression::REG_L2 (L1 norm or L2 norm). To use this, set - LogisticRegression::Params.regularized to a integer greater than zero. - @param reg To enable or disable regularization. Set to positive integer (greater than zero) to - enable and to 0 to disable. - @param batch_size Specifies the number of training samples taken in each step of Mini-Batch - Gradient Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. - It has to take values less than the total number of training samples. - - By initializing this structure, one can set all the parameters required for Logistic Regression - classifier. - */ + /** @brief Constructor */ Params(double learning_rate = 0.001, int iters = 1000, int method = LogisticRegression::BATCH, int normalization = LogisticRegression::REG_L2, int reg = 1, int batch_size = 1); - double alpha; - int num_iters; + double alpha; //!< learning rate. + int num_iters; //!< number of iterations. + /** Kind of regularization to be applied. See LogisticRegression::RegKinds. */ int norm; + /** Enable or disable regularization. Set to positive integer (greater than zero) to enable + and to 0 to disable. */ int regularized; + /** Kind of training method used. See LogisticRegression::Methods. */ int train_method; + /** Specifies the number of training samples taken in each step of Mini-Batch Gradient + Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It + has to take values less than the total number of training samples. */ int mini_batch_size; + /** Termination criteria of the algorithm */ TermCriteria term_crit; }; - enum { REG_L1 = 0, REG_L2 = 1}; - enum { BATCH = 0, MINI_BATCH = 1}; + //! Regularization kinds + enum RegKinds { + REG_L1 = 0, //!< %L1 norm + REG_L2 = 1 //!< %L2 norm. Set Params::regularized \> 0 when using this kind + }; - /** @brief This function writes the trained LogisticRegression clasifier to disk. - */ - virtual void write( FileStorage &fs ) const = 0; - /** @brief This function reads the trained LogisticRegression clasifier from disk. - */ - virtual void read( const FileNode &fn ) = 0; + //! Training methods + enum Methods { + BATCH = 0, + MINI_BATCH = 1 //!< Set Params::mini_batch_size to a positive integer when using this method. + }; - /** @brief Trains the Logistic Regression classifier and returns true if successful. - - @param trainData Instance of ml::TrainData class holding learning data. - @param flags Not used. - */ - virtual bool train( const Ptr& trainData, int flags=0 ) = 0; /** @brief Predicts responses for input samples and returns a float type. @param samples The input data for the prediction algorithm. Matrix [m x n], where each row - contains variables (features) of one object being classified. Should have data type CV_32F. + contains variables (features) of one object being classified. Should have data type CV_32F. @param results Predicted labels as a column matrix of type CV_32S. @param flags Not used. */ virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0; - virtual void clear() = 0; /** @brief This function returns the trained paramters arranged across rows. - For a two class classifcation problem, it returns a row matrix. - It returns learnt paramters of the Logistic Regression as a matrix of type CV_32F. + For a two class classifcation problem, it returns a row matrix. It returns learnt paramters of + the Logistic Regression as a matrix of type CV_32F. */ virtual Mat get_learnt_thetas() const = 0; @@ -2100,21 +1512,24 @@ public: static Ptr create( const Params& params = Params() ); }; -//! @} ml_lr - /****************************************************************************************\ * Auxilary functions declarations * \****************************************************************************************/ -/** Generates `sample` from multivariate normal distribution, where `mean` - is an - average row vector, `cov` - symmetric covariation matrix */ +/** @brief Generates _sample_ from multivariate normal distribution + +@param mean an average row vector +@param cov symmetric covariation matrix +@param nsamples returned samples count +@param samples returned samples array +*/ CV_EXPORTS void randMVNormal( InputArray mean, InputArray cov, int nsamples, OutputArray samples); -/** Generates sample from gaussian mixture distribution */ +/** @brief Generates sample from gaussian mixture distribution */ CV_EXPORTS void randGaussMixture( InputArray means, InputArray covs, InputArray weights, int nsamples, OutputArray samples, OutputArray sampClasses ); -/** creates test set */ +/** @brief Creates test set */ CV_EXPORTS void createConcentricSpheresTestSet( int nsamples, int nfeatures, int nclasses, OutputArray samples, OutputArray responses); From 482feb7266a9aab3aa7988d7a24dd388234cd9cf Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Mon, 26 Jan 2015 10:24:53 +0300 Subject: [PATCH 91/98] FindCUDA fix : do not unset CACHE variables on first call This will allow to override CUDA CMake variables from first CMake call, like cmake -DCUDA_TOOLKIT_ROOT_DIR=... -DCUDA_CUDA_LIBRARY=... --- cmake/FindCUDA.cmake | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/FindCUDA.cmake b/cmake/FindCUDA.cmake index f7502c7f2..ceaed5e3a 100644 --- a/cmake/FindCUDA.cmake +++ b/cmake/FindCUDA.cmake @@ -529,7 +529,7 @@ endmacro() # Check to see if the CUDA_TOOLKIT_ROOT_DIR and CUDA_SDK_ROOT_DIR have changed, # if they have then clear the cache variables, so that will be detected again. -if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}") +if(DEFINED CUDA_TOOLKIT_ROOT_DIR_INTERNAL AND (NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}")) unset(CUDA_TARGET_TRIPLET CACHE) unset(CUDA_TOOLKIT_TARGET_DIR CACHE) unset(CUDA_NVCC_EXECUTABLE CACHE) @@ -537,8 +537,8 @@ if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}") cuda_unset_include_and_libraries() endif() -if(NOT "${CUDA_TARGET_TRIPLET}" STREQUAL "${CUDA_TARGET_TRIPLET_INTERNAL}" OR - (DEFINED CUDA_TOOLKIT_TARGET_DIR AND NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}")) +if(DEFINED CUDA_TARGET_TRIPLET_INTERNAL AND (NOT "${CUDA_TARGET_TRIPLET}" STREQUAL "${CUDA_TARGET_TRIPLET_INTERNAL}") OR + (DEFINED CUDA_TOOLKIT_TARGET_DIR AND DEFINED CUDA_TOOLKIT_TARGET_DIR_INTERNAL AND NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}")) cuda_unset_include_and_libraries() endif() From d08589d435d7c02eeba7025c63809ecb8e51cb57 Mon Sep 17 00:00:00 2001 From: Andrew Senin Date: Sun, 25 Jan 2015 21:56:33 +0400 Subject: [PATCH 92/98] Fix for Visual Studio multiprocess builds for CMake versions > 2.8 --- cmake/OpenCVCRTLinkage.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/OpenCVCRTLinkage.cmake b/cmake/OpenCVCRTLinkage.cmake index 5265e3e8a..7b7fcad7e 100644 --- a/cmake/OpenCVCRTLinkage.cmake +++ b/cmake/OpenCVCRTLinkage.cmake @@ -77,7 +77,7 @@ else() endforeach(flag_var) endif() -if(NOT ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} LESS 2.8 AND NOT ${CMAKE_MINOR_VERSION}.${CMAKE_PATCH_VERSION} LESS 8.6) +if(CMAKE_VERSION VERSION_GREATER "2.8.6") include(ProcessorCount) ProcessorCount(N) if(NOT N EQUAL 0) From 4ea2eceb7043df5f49aa2a1aec1153c0699116da Mon Sep 17 00:00:00 2001 From: tt Date: Wed, 21 Jan 2015 04:49:31 +0100 Subject: [PATCH 93/98] Image Segmentation .cpp tutorial Distance Transform tutorial fixes Distance Transform fixes v.2 Distance Transform fixes v.3 Distance Transform fixes v.4 --- .../distance_transform.markdown | 57 ++++++ .../distance_transformation/images/bin.jpeg | Bin 0 -> 33265 bytes .../images/black_bg.jpeg | Bin 0 -> 49373 bytes .../images/dist_transf.jpeg | Bin 0 -> 20578 bytes .../distance_transformation/images/final.jpeg | Bin 0 -> 41745 bytes .../images/laplace.jpeg | Bin 0 -> 48125 bytes .../images/markers.jpeg | Bin 0 -> 22522 bytes .../distance_transformation/images/peaks.jpeg | Bin 0 -> 23086 bytes .../distance_transformation/images/sharp.jpeg | Bin 0 -> 50913 bytes .../images/source.jpeg | Bin 0 -> 46641 bytes .../imgproc/table_of_content_imgproc.markdown | 8 + .../ImgTrans/imageSegmentation.cpp | 168 ++++++++++++++++++ samples/data/cards.png | Bin 0 -> 77914 bytes 13 files changed, 233 insertions(+) create mode 100644 doc/tutorials/imgproc/imgtrans/distance_transformation/distance_transform.markdown create mode 100644 doc/tutorials/imgproc/imgtrans/distance_transformation/images/bin.jpeg create mode 100644 doc/tutorials/imgproc/imgtrans/distance_transformation/images/black_bg.jpeg create mode 100644 doc/tutorials/imgproc/imgtrans/distance_transformation/images/dist_transf.jpeg create mode 100644 doc/tutorials/imgproc/imgtrans/distance_transformation/images/final.jpeg create mode 100644 doc/tutorials/imgproc/imgtrans/distance_transformation/images/laplace.jpeg create mode 100644 doc/tutorials/imgproc/imgtrans/distance_transformation/images/markers.jpeg create mode 100644 doc/tutorials/imgproc/imgtrans/distance_transformation/images/peaks.jpeg create mode 100644 doc/tutorials/imgproc/imgtrans/distance_transformation/images/sharp.jpeg create mode 100644 doc/tutorials/imgproc/imgtrans/distance_transformation/images/source.jpeg create mode 100644 samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp create mode 100644 samples/data/cards.png diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/distance_transform.markdown b/doc/tutorials/imgproc/imgtrans/distance_transformation/distance_transform.markdown new file mode 100644 index 000000000..1674c404b --- /dev/null +++ b/doc/tutorials/imgproc/imgtrans/distance_transformation/distance_transform.markdown @@ -0,0 +1,57 @@ +Image Segmentation with Distance Transform and Watershed Algorithm {#tutorial_distance_transform} +============= + +Goal +---- + +In this tutorial you will learn how to: + +- Use the OpenCV function @ref cv::filter2D in order to perform some laplacian filtering for image sharpening +- Use the OpenCV function @ref cv::distanceTransform in order to obtain the derived representation of a binary image, where the value of each pixel is replaced by its distance to the nearest background pixel +- Use the OpenCV function @ref cv::watershed in order to isolate objects in the image from the background + +Theory +------ + +Code +---- + +This tutorial code's is shown lines below. You can also download it from + [here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp). +@includelineno samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp + +Explanation / Result +-------------------- + +-# Load the source image and check if it is loaded without any problem, then show it: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp load_image + ![](images/source.jpeg) + +-# Then if we have an image with white background, it is good to tranform it black. This will help us to desciminate the foreground objects easier when we will apply the Distance Transform: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp black_bg + ![](images/black_bg.jpeg) + +-# Afterwards we will sharp our image in order to acute the edges of the foreground objects. We will apply a laplacian filter with a quite strong filter (an approximation of second derivative): + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp sharp + ![](images/laplace.jpeg) + ![](images/sharp.jpeg) + +-# Now we tranfrom our new sharped source image to a grayscale and a binary one, respectively: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp bin + ![](images/bin.jpeg) + +-# We are ready now to apply the Distance Tranform on the binary image. Moreover, we normalize the output image in order to be able visualize and threshold the result: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp dist + ![](images/dist_transf.jpeg) + +-# We threshold the *dist* image and then perform some morphology operation (i.e. dilation) in order to extract the peaks from the above image: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp peaks + ![](images/peaks.jpeg) + +-# From each blob then we create a seed/marker for the watershed algorithm with the help of the @ref cv::findContours function: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp seeds + ![](images/markers.jpeg) + +-# Finally, we can apply the watershed algorithm, and visualize the result: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp watershed + ![](images/final.jpeg) \ No newline at end of file diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/bin.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/bin.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..c8515ec1f9f60f0857131dfbc88950a542713cf7 GIT binary patch literal 33265 zcmd431ytN>_b)uSyHkow@#4kZXK;6Sm*Vd3?(Xi|LUDJ8;w@U-THvGYIqx~I-Fw%% z|8K1uG9f#^?b&&Dl851W>3I`?DkUZ<1^@#C1L(c{0G?L?!au41tL3MX|0@MQg`ax? zXt01wFgge@Gypgn7z7&Fb02^Z00saDz<>e%T3}$oz#*Za;ou=&aQv4*AYP>Zf&pNV z&`{tI&no~#h!XdKyLZNFzS1OzLU<-QVMG?|w@+h%#Jn2uJU0haKj9cdH77Hx0X}6%%LH^G*&j17gl(s{y%tOcSLc~=f;uu1ZgkWi0pOPh^-hC- zlSPK{2CFgil2sWrBPL=AA(#ViAV^#u#T7O7q~B5xyW9VmNAZz|p1>6V=Jt{sUi`~h8 z*yS_)l07AmgFx1O+41SndXR_bgfKWeXXq(dy>DHj%UhT0_jr+P~*n?!oabKBGQL#*KYd$yX5(# z%cgBmE=-k!7Da&`MSzq9U=!vo>mVX~05xqKT_s(D^yW7!Bm2x{Xe_dI^H%pv%(WSw!OkMLjveUZ>AkkQt7|$$kXjM1T^D z*XRkyH3rNt`3^3L5O2LKIeDGP0$ z4J>ZF)RZ7q%0~cG(E98i_2Qqkoo+Rd!=LN#jVNTE@sRJx~4N?v?@WGz@8uGDh%v8sH^Z zKTg!->Lr`F#w!!`Z7-PXD$+U!j&vxF9!y2 z96?DXp1jz zu%mo^_S3;X7ieM0iwC{GDSQEb_LH}$000yO01guTA6@08*T8^*L%^bI zu(D%fp}&=5BVm5&GO#br2Mh{)zSim$bkG0M<_e9>KBIJ+SHsS4s~08&mXLth^?!7i z$QGB%M4CUSH*1BpNR~FOZ$o4yC@@b84uL&YaB1#ruMD za|2vysUeCOu!kD@ucgKy$|OCVsTh%eDokwWic1k!K;JwR(n98NxZL3XZT&!T$Y&G# zT=0XPVu$b-scfNV0H7rt>AQc{3K5sm1x``@v;$9cI*nek&tY%2=Xlw4qHmS^X%xCY zJB13uFb@B9^10+cgs%(-6WAr}i&NmlXCfho6Y3=GKJ_wiNsGgxhjlUZkQFVxaq55! z;}(Tp>2`XAjnY+GM@UT-vWa+mXa));Be%CPEVJbwbxO0*{y|^c>u!IfCD&2K65?$PV#EVUZ$JYJ6O_)42~_C^BAzFPCx{W+}lK zB?MB@E%Utbc9swjCGQ1EM`;@_D5kB%@sT?#9GRyVL-+X~Teiq&QmYk}ZVU?+4_eqs z+bTY?9$blZY`Cei4yt{M8$G1mPVn9=c^@*7_#>d2%dkwsW*|YEDf{z0_bOwlq2OQ$ zDRxtnR8p)`e1h`XA$yI&{xiTXmyWgKdqfK6K?3P{B=%u^0sCCxXCvkr!~M0J4oiRe zYpiqmKT@Y7IB%R{^g--Rg`O`(p10M9gDeR^->*JQYX zx(A0aEC=}6S0F+ed!K>1ldQ0YT@Lsp3{m56DmNh=SX%+(XCKnU&J2c5SiK7MI;wgzkQVeza!*ZbqKBunjwgdJS}6EBpZHd%-Gw z^C+nC)tAgu%i~_(fI>u>%{;6=XYQi_q-Cc#`{_0e_mR8O&#lz!d6z{*#1_7+(WWi& zShLePRVEptodcp3QR0LK@MYzxSEb1foY<&7TP0kDYYihk0ek4`qq5?2_|kVqD&!l2 zz!AjckB$&fq5dDW7A~4w8a27NMj0Z~9vItK_Xw!P(vIQyC9v*A>yD_m8~!*Ae9wUD zH(6PAe#uT_8%YX7r8N6aYT8f(>N$|C2J^cdpMYh2WWd$FvX2iDZoqH@KWB<{ZGW#j z$9A^X4?FsrQg3a92606FnjF=KMxDrw1?(S2Tq}P^Q}_({@W5&sNoO!PIt^|jQO;?E z6`eMskO5-4%l#}R|87}sqBN;Yq}sql8n4%`ZFW!bY^v~_ui}pVKrFzIS@AmYT>tmUmwX0zFG7glN+>YUz1JTs zJ$^r<=+;XAqqkYzgxi@Li{M@%tU|B#95cf1OX{8~qJdq~_d$ZN*DXc^_vqxaaH-+= zl;n|x$|LB#QKYEmLc=J9V)(Od_3O-MVFkAXcs=n;F*)A(*>rzgE!^K$t~4S~jhVV& zzkM2I#-f#)rIcpU+z5?5IW~VA?~kuhX-Ut>L*%k-A`L9~1}x2I)DdV34^bo=Ylur2 z9;T6^ZqGj3vHo%9+zQXSDkIv!Ao@+7AN$oU6n7(^Q`YT?oRgbp9P$2WWSZH6x{Z{J zh5at4n+PlG!l4cP${O>%mHgp!n7TC6B}kR`2)ivI^DwN{?xeWs7zS>k4-tXw$~^hu z(ZhB@w!ipyAyYoyszf)?$}miSWFRmrp0K-<|>&>s3 z%NER`Sld`SaOJPZRj!e%;-Jj}&%{~Rx_D-&&e-(=I7<`Dwe*r97uGjL zV&Q$`By?F_AYGA`s;D*uc2Ci>!I@cRP;vP}B1+pnj~-5XW6UrWx%)rGL$+M?4G&#* zmRYo4dZ0p<&}WooWjnQdhcWu#ia0N#Dilj6n(w%A+lAfQs{|2GWp>R-t^XM? z@~G6^`0D$>)A@%^+i(BZJN=H@x)(k#w~!)e001NyBr+@%I3&#P`^}dZ77$Qqm`c#7 z7}(6Latey}PH~A_By56?bq$R(=uGnZ34YV$EP45)WWol9{yo2LD_?Fv`N4)xwTi(( zQu5>x`=%GE1l?O<#Vy_i|j|^lsEf{Rld1sU-6u@uWY3{roRL zb0yWk03SDMv$pSEEnsx&Lb&(3L~Pos$((zlvVMILg+$_d(b)qb^HV3sY{82t(h=MS zqhFUkcmEkjC6#M8_I04`E@M!a{;PY0Eo(KgC!ssd1+)*be*k{nSw8{We*^v@A%2+8 zc<6|IiB#ljKhLZp&ObFQ=HoUrRt&4!rxGcBnc-Tre0G&H6;W(YHSFLd`MRS>z_;|} zeT0$L>6>ih!m(}qm~9sRmQN2sZ%vY@Sycv^#(8^*WN*STxGHoAaT?{)=JC(Yy7B`j zN*6`#}CW-1Pgf@~^`nnyL?w{7Z7_ z%9`G=KDBwQ<;_1(p8nSPzUEzZ?N=MHQVCV-qU^-rD>3BP)k+@yRY|!k* z*Pf?Bq0rfaJC$|*GvUV|q#D_?st%v2kflVT*~bCh>+b<3Q;G4ERqYaAo&lspA_K-5 z%hUX_;(fYILgvcWcO*}}fivZ+XTmShNLASOpxpd+pYad(zl%cUSc-k{CwhtUhSl

b(g^;Hof~+dz5B9d*M^+!6|)V{gsPOB5?t zkC&uo+%IAQ`b&9G-FpRig3qZ$szcKgbk~kCvrvLyBlin9zRtt3c;`D}q-OT*Q!#Od zjma4Njqu`IXnO|O$K_m;{44YaU(F)j4TZZu(iwuhs|FL}_IXDG;yHT^PYUH&31uXsp-1-T=+QiO3Yc$a9-*GX)+qPymBUEretXV|->CFB=fd>BoUp#&I zXHV`zAp zyTL3N5yU8d=y1H(&!(I;@!O*!9(i<_G*B%i{tRfs2`WH;Vtm!{b`-UARwA3n|-o{J#*)dw#xBrv791q%AmD8fvOy$Yt;jz&rl+Zmqh^!qOy#?A^(XvJnqcQS*o;$GWNL|xKSVc|hJVJQTLxXdt1&5yS?KBDrieGRJ!k^% zVDlNU%jhnSmDKtJR3H9ufI-(J6bbI`#(GqQ5ce2^K$GK^zH?svh`=MBDwX}8+@P@i zTLdLPBVc^B_eVg#NZM~ozXQ7XTR{Jwi?Wx1j#e(Z??3DcN|jb(cO^9^hM5?!Pqh9} zsA>1+cW0d9!CB>!$V$8;6zYWslTJHo=kT}+CG8X71< zN`eL(DkAiY5F$Yh{V=v%+V&kaVu0W5M&u8cKNx?rYY4%H<}Sw5>qXoM#xn@+|2QaB zm5N8@L*rM6=`{ahrdhfm@DNHhs?(ekKjij-%K~erFc2kBc0uqPzS|1+AzLh8KHSA( zR>^KXq3uL^kW(26#1_VNtj9q1`=ItHBa~8q&!#VC_jDl{m&Lz=SQ2^EKl-n-`ilQc zS*72yIEPKY?NCriF4@PR`{ez{>&8v7NeT`K(-IBO@aD&nxJJ??i@&9z_eb7Ud5d^Au3ZJ#;6bi=|kWjl)tY2-} z><^eqN@rU7ou!`2Klf5rSmr&xtQ1JH6f4#p)3;_9lG-CT~SP`2NFT#GHz4 z!7q5OKdxU+E(EIVi*kzTe<-&be&{AAv`>l3d&vc$4`c6}hoqD6`2DP0brLzT7PEIv z*q^oqm>};x_Gh%g|4le%@@LWgSNJd88In%0e zqwq7~=w~Odr5m^_?n{PB{fxO3|FPlKb_Ti{wxOq@!T*CI60erxqkQJFS0g`3wRLc( zGeoM?&nllR(pa!tt1R3N-g%fIG-<6lON{5BW1BoiA#gPWWvTa0RBoB7*fmtVQ=iYI zJDk9FHvOLjSWKOEhWTH*xeO`*9Q@^x84Ma88U`BjW%B=JG6oU~9RmQ3hDm~oMasmU z1Vc>5%<3H9Fgr*7^C`L>n}S71PJjBXyn=n>0**ssv4F6WfnQwm{2wzk(EQ-f02sf} zqs7qd^=v+VcAwdp0+!=RA~DnDY`)0J1|BcG923_E7QV6bD-*A@ov&lxSq7)mZcHEF z{qw=6%%7Zph(Fl>WPS$7{5KW!xn{y%KcM#rC}st^GOeY+j_WX> z_FKS8jhV{YYE4XKzH`_&V+%--CL&aTtyG14JL+Xom-UDFom@+r(>A;; z$`N%e8%PSorl~fDEcmRvjxK8{puC#4pP&-#@h5c@6$>LAP#-%0#I1t&kb`l$;^Q}X z7^&J}{sco*Zm)Ava>wIc*8jadXVdExMJXYch2Vd zLj&IF!%uk3w&Tzng{Xq5`&-!yTs80v_)6oAf);a0$l+0XZPE`K;xt0QV7*}zpx4M) zdj^NC;d5Q(!T9le=~qMdpO5Ax1lwyLAh;PEy;gwvU?L{OA!65}Qu zb|RrsEkb!0%A1Eqje<5mdt;}2d9nET)rXM9l}E+d`MkA5SSFt8wP1bufsnpFodN7* zp^t00^pCsqee%g4=W3rv?N9K#=A(YJ5Kfn@znlC?dd)`uBrDd{ev+hJzsMQd-z3c+ zWW{ghKge0y7joqMg!wo6i+qv&lcZY5{qPH&j`&eSIQuW;i|lXom!UsZy=eZ)JoC?n z{&a)1YyPK`f0FoKzsQ#me);@6qSro!l1ER^05(H@`oTuYvuhuvotHhvFPza!>%%U~ zBOcp)Y>OAbEK3&B86Z_rRpu+^^g2ACB^Qyd@fwA*EDft4Lgs(`N|0j8o zx?*@O40xDe<>qA7+60Ow25?PmLtb(TDN@71XF%W6zRh8;GPQPgo`kyQA+Wqq^;ZIKQkuquJ|yvSij zhhOO3}D(85q(R_w8O$n z7HOi(C{pRD%E|^yKBq8mOX7WVRHx>>eKZYX(e`LMZ2DG-;*kSK3+xc9bU~@ww>m~> zZg8DKb;1BAr(AdanvsXBaSdTWW!}7Jr5zlx2$5Q<%0#!iG_I%!pEydMt7u`UYIZTp zT*pGuP8`I*K{;c&x$cvwfb9^P55Kk_ZLO}^VNhZxEwqzLrU)xIzN+>>3&9Wz#zOe% z8F1=t_Y7!#ck}8Q;OH3g*fUuB4EUar$$yH>r1s7K;oXyRi?RxXNml&uCAO%GrP^Cj zsCv->@@D%!x%YdO&2IrbvI&`W8~jYf zdBus!@pXBfI(OBAA!mXQ>^%^`O zVdCl&RwT|f*lz=IP^!k&M4$IopQRL@YA}r80|UuM^wL+$!4|RAPlkl#pAe<4)u@2m zLJ`KB1b~GKSBW*gU4mRgN{A zXG-PMLB=EEimOrxopOzoNYieVKg6g@^oZc=An1)n_K2YN9i--{4(`3oss8g7>-F=B zjo>-Y6bJIbe-vZynEE_>@s`$gNq!WQG=0i)W*Gr+zyTgeP`%x)$elP*{F&OVyGaX$ zdfd8ywu{iz!;!&UD6wU+A<9!8NV$s5QOU=c4^JJ!t$QcfpG^fpSEiXzAxbisuc+{( zud2JsIX)u1(UwPkbP0Y2W-Jlt6z@z|#Hv#|^>yS19Je5p#K;2fO1c2MA-awi5zu8w z;E6EXB+YAZDrpdo2XEP8h$q{yw>iw?5J>e#v+5>%YQNg*eR+3}m5TCG=E?gH9GnF`O`!P3%@kh){PdxqJoD24-c=QJI$O0@!7 zsv$G5aCzm`M;tVh<)H{O<2pjN#ddZadP!+GzFKkMPIGIO0dj;?N>*r!&A?_tE$oAW zh7LH1uBWX|6fkQTMXDEN;f__Rk5ChcDHq5;AEopZ6SU)X#sS*q7@Tytaa3an6NVWM zWba4dL{5t8L-rB)t6jIxRr!a~H=`A=?n=0@ynA~l1`ohihigvKcbmOD<@{YuPArKH4Nk0 znC#PfPNwB{M%z-QSy)gz+H}9Mi;I$&6UD}&raP%%J24VE`3@s7@`+TfuEI3M^V|i> zC=^fSQDr$%YKU?PD%hHhpo>X8@rX86~q`^)kzU;t7Q zWn+aRiE!r!^U@L*WFeNf2D_ijBpWA$F}M$KG(>7-UJO=cl#mG9fjyA_f&zlx5yL6)yz zpDC|k<5(o+VvBt2ZVG}>8anrnxdry!Q$|^th*Rr3&suz(rnA^?x2o?gPe7rH8-yF3 z$ZAEaVWha(V+;~T>o_jbZ8OovEzbPRMmsDHX>+&-Av~gi2%X}g}sYENT zaL`t5z$%UayL)QL#Ot2P-X?6#MM|z8>p91e^;yoUtPTUCgEl&WySdGgL#1^{*ovvM zv%e6S%a*D#sa7kDF28`Dob!W(PqD`l%SY0fVZa!b1dl+pY_{wwU7nXD|7{1X5tauS zM0-62d5UcE+nAvSoR|%o^Y~U({HmLbbwDrLiX>a`kW`;i8)+8EMXKpPz3$rf( zHlBqv+n@BMIV*0V5wh^1^ znLx=3YY9&%XIGb6!?isnIw$jUfy0n7PC;FcI@jr!Z|A4$4AOJ*#dKMU==)di#7iOy z#;|FNLUaikL4M$PGwK>8^H|J4q-KpOxn}fjgPWtFo5P*d45=Cqd(S#Qt|ktZ=>0ye z{b4ENabk_2$Qklp245D69i|z3t;z8noBpHQlo$|oX{L?TLGw|QdZf)(zwu&!wQYI} zUhaF#jNxw(Ty8+?!u7#jTFO0c=P`{4tV&2G21RHi2}DXtN~%c49o@E2QdI20qEL)1g5E_j>eIJEti zwDqhfJLcPIu`O;;*7)?o;8i0B%FUwKBfjb>!=L2JDxw;%u=25rt*lZk8rH4N4v)}_ z;P-};@xnLz!l@#oMZ)$zYd2U>j=EHudqnvmTRj8Jdhq;6uYrRhA%h>i?8kH`rTu^3 z$a7X434bqgQysq#^t-r$Uh7pd(5Tu|gmVDu`Kz*3OjK7LimpY1>w!!jZW=f%qzAX{ z%Q2cPRAx1>DwX&tw5_cM8USb-Bc2AN?`p9+lQ6T& zW3X)DmjvEA)cMEIA8#1Ul+6?e*REYob>}9t3P}-;$=EpLWAZnl**V$C1^xwXz^(1SI&mnDO3E zRTz-Oq@<*8Ny%PA>KPUa^tpGdjf#pL-2oK{y0i=AN}%nc(^ zVHHMP**Dl=&9>Q8R!lgRSFvl_`}BtU6!El+M;=JY=o$7Evkpd{rB;nM(zG=|uw95R zv00G?RVl8S1!qWvh+8wp5J`#WIj!c!7n!e6Bxt|KVHY7aAWu_~qGKR6;W~Zu^Uk-Q z04hvj?+BXSHWtreVHw<_CyOnVHL*+&@ndEsdA<{hj481>RAiDYJB8&OoskiKJM>38 zPV(rflj3Cpn^6+&=CP_VgKj>UqiFpncdfr335eEdN27PUjRvVw#Ja~V+T8PjMJ7gE zHoNfN3WeVUn+i3L*jFuEa1Ow9`(M|4W2D+2m+9Z#0y4~2UNc}ei^jehjz@BLM#Rqj zs!bK7$7x9TZcG7pG04n0Nicf z!eL9VOwzhKBa78HaKdHnH#0@aj8R-i!EaY%Iy5#JL^PCF=vK&JjUc#OE3be+zpFP- zSKNihD0*GKTej=xF@i$Ns>x2Lvn{jgKjB{Z2G(d$_k9Em1}|@DqcSWB(hm} zsw|<@+~9bc6xKbmMj@ITR+?PB0-R8tV;=pq$2ASi$ zRZE8VGxe)E`^IvPf(T7Rdsy`=^9QT*tI@vp$+)JZAXy@$afAn7sS}~G@N_a@`JTR(zn&>o#o`A zKZ{w`sS!|R-zjxO6?xr&j^zO*d4a^+izEs~fV^5||)E>K}e`GYj- zwR*#&#R$k^4@^&Nf)}eoX^()9oP>iWTwUWva5B6{K)Z}ZbcreNJ^gfr&ItSIj?uS8 z_f_E-E(7bL{QD*?hdWKfnZAVvgQUi~>(Gi_%nYgeWXd@;2_9Sl(yd@*)wIRPkxTg{ zd-kXW|9VbD_*eMHlj#)<>bN0b|H(#h_nHC3TDxWlf^@}VN zO=dCnhGWGmh4kf-*H%b4=U2X5I~^=OP>l1si>rqo?c1uVryV>w4o(auy7azWyd}gjd(85*qufc;(>*}mS#tkR^9^luLD^fa5S8=E8qDeZcLtUVm z_xU=(UB4Gu{dQ3-CC%F5(1Tmz06o(y-D%gN5t%{-P3vG;>60aUuaQJUt9aihdrr$M zgwn6`D=QX3afD2HxuhTC(!jyd;bK$M?(P_?IYaIb?EZY=!}@6E<5Ux&ND`|nW3WFD ztxIxvBoh^jH()5{6nUu4+rgf@EvZ_$ED=X9Op$WOyyrs5+$^HrVDp&aLV?X-wCF=( zT~E=vY&D+z-mgJDcsx2(ax9uCk$TtGn08xfDZ*t7eAYUpem$#6RSlfxR@PpnTI42A z6v4WJl6j}+$E*6$tZ~j^qbQN$_h!UQDGru(?z2Ubxb^KGY_nRGY8kkGI#xiLn4BY5f=Njl^LS4>92ByJ_h!a1hnEv%u~;+V*6JN1!J zS3qp3)~JtM+vVS+lZ=%Q5jsTNs5qijj<)nNlfb?bIu_HRB!l}VTvoGb4o1yiuz&m< zSqFb=v1@PEq;-pCEbT6vhg{UC_pvb(E=DaU-4K#5KQsrK4V#x+TI#G1uVTywqbOsijDTC1Hm}iLivRE~-MTawB03wI~M?o0YCc7f1*>WOt%mJQ$I*|6nMd(9grJ11SKXET(`h}|QX znoMT(NSqVM$q(c#>mPaFmlsX)`L=CgA9kr&EK60Y)CNPuUcDm;bI@p`XacFjRrxAF zLqW1I@w!%>xw>JAvuJ$cV8wE-hDJvMXe&_jbv*`tTUQpuGc{CCRtGezrZ`>`e_HEP z-|KACH&+{GyyD61HCwKV6$|5aHclq96R$k^cqf=fCx7NWA|C6_joz6RtfR5gbn;qH zwBES>l`5mAbFf-wmQkZe|H|jqjw$_~Sz}LcnaUVh)=PR>-W@~{XfO%G$qBXU48y!c z>thN%{MZ90Rw*i{%M*p4ub1O&jUGh2k^VrW{>ty0+uHKjFVHMGf-N*sOb zlWB<}lSVHre3$`RiD;wM;s%WUiPGwbSek6cLMrg`$pyMs=P0M zuN<0Lz~;HvH_tFbz$IzhrfVHxU)qYQ_7NZg>)R&~8tH_F8v~0L8X6h{8v}bWGvrCg zk#Z4Lk*1pc@?C~r%^)RBF_6+R%uo%r%$lmnddr;3xJNk$R8(FuJTRh?Yg=yn z(uCoX0Goafg*eVVo&&1VlF3Z{14X>UDJ5<7fF>zgZ0FLDaU=frRyml{+*jaIMVLr} zDUsxU&?|p}RrY2}mz%0_jf-kk6PBcBK%r}9LnRr5Zc)CO8P{1b(&15aqhQ2q`qMX! zFjkbvOkdz&blkN)&xbMRz0gOObvnqyrIOB>CPfVAE)vQB?uGw7PR?tZ1e|VMB>1RFK=Ve5a)T z5KDA;=2d9HZL-I|x zFQd|3*H4A_v`qM+OnF~TI7jYkc}4ndS3o&Z?HmN$)W--~JLPvkj-ob2nWbjeF)dK? zDqN)s?K~q{=;xF`zVTlHJWstrW|*@5>Sbb6 z|DZJz+#g9RbF(sR1*S(}{+7uf9q&2}@j5K*8Q^rjJ~y<{*KSFgVd85D)YU2LO2W1r zHmpa`wNg1b8JDl2!iLrDK&XLh#oe$UTO7hN6~$R98ATMTGRq2Z6{iQr>!-UIkV=bd zGqKDDuau2(?hNHwE7rXyOG0(NC{0mGBT6b0y zUp1=rA!IGm-tg$+@^l>y`S|oUPiUUquzV&in7_Vse|b&UQakCm$XPRG{N=kx(t!FO zh83G}c|cRUaFv@{wyL1O3UdzjbpKPq4iCc_vf+`;YeWy zL%!|pOE*EE0vBr=Ea`g?EPK=0FL^$BUd{JSTNhBz&sI7?2@dt3`n^l0oO}&I?$Z!C z;J!t$G-_n6W3n}h%pDvTiZ(eB3+Frk-QClZXyN`DuwV4&o0sY90=^S(Eit?bvc7rK z%*&wJsYq)f4Ax=y+^X#G9*o86-H9|m2Hc%1MRm$qUvO$H3J2pn%P`Wf5MO-}$9_!5 zs1giJXujf2VO%egZf&ivJW8;slX<6^~L_R8tyEe-g(iO3bP!q0AObOC*7ve;r(a;SBzSwBJwA=%{iM0HPW z)T_PP>2>-qCS-9U?j%1!!5FrMP|I|P?USk-DOv)R+E6<_6i;>OSbkg_LJIEX7BgHo z={TmN+&(&yb_%TJ$fGjW8;Z_vZi1%-03qOLsKZu~vo-O6n&EY7RcsEF=>^uMjAGRL z+XaZqF5~v4wn}jk?B_+iO}@_f*HsSvS}80X=`}O^2n6#c^R&qr;a)Hcb*xyXc+$iV zg3&b1W{gA%ecf5r$yRUhu;~eY0AWT;6kLiqnPw?0`l7l471|x-Sq+TDGrv=7H z5VJf3^aCGu_v#;QLT~}2vLL%ZMK0C3RBpZ3-3;ts)wHe!OUH89$zquD}@`_ z*g}`36sg|2QN-P1Evt#0HWvYG3xda6Rid{eA%gkQHqzOtZoj`4ne|=i=mS||MP%=8 z9Y!X6>e*Ckvf%}7Xoqd)HF2FV6P>lp)Zu z;6ga`Kr>U=Y&B}BHkNZkg&~eb^lh*xhxWa@SlVS)UCIKJq0*RweGIJhXKQb9Raas3 zYkUo;LyB4>2~QrwO5ojKr2Ecb@b<+GSHIEga!%FcAnkz)oxH~Vdee#4e!v89REnM? zbeK3;(>ZF4>o3+S-b*mY^$uSp3b+F04V)DY1MRp(x2XTssMq zA)5#z)L4aiwmI5-rZm@zLZ7&S(-0P$MiZWVxyY0V7B52Fr0b?SSrwaCT^^TOFS;Lo zUygXia&9j6wXt<;h2eSU2OYhsnHhs>DgKJm+vaZ6XwekD?sX#vk;rShWAWy z#1jX3zUOLNX*0n5a;&Nw@rt&(xn0okfFKmx*Pr<-84DKYGvM`EKzP%cMYwSIxIwux zL6+OFCN+V#B&QUab-B&;v6lztuo;lJf5JtZmageo8ER3SPwgp|bc{IuW2f2!lXpb# zqO=+9Xmw}4B7k;HMescu=sJ$0eBLXOW_-usfFugw+)RO3KV4n_hC`~NEZT?xLFi++ zC)vmmJBl@|eB0OoOP(bj8AHHaNp9aAqNHr8fr-JpY{NSN6MnXFY`N01(PW!XwA=G+ z9S>K~$(y9#nTBc6n3X}Q71S=rD(6O`yM_n-Kte4tl2EK5z3SGmr4~8poFVeq%@9#- zTK96zLR-1fz?dCnbkQO#dd4lz}Dx0ZL|qYe|6BthM2VK~#ZQj4fU3N01= z*s%^wNL9wx)mvBi4afz(Zv4CfCGlg;MHy`MteKs9%g6rs*X^B^6=n5)lng?mpt@43 zX8GV@V!cD-@@kU-M$*!Xl=1}P0Fxd`7fiLyplGoGYlA9IX0}tFcnzx^^%Sokw_GOl zZ=Fu-yUDG3xM>|amkm#l5c$}-H8hrWGV$%yii)#38ccdW+NtHlE0If-s!*CmXG8XC zr8&={E3%XpvjNBM&ICbj_{64kdUBJdUjtr|lC)1%B*@iu=r!47O0y>>S*$Ab*&pxQ z297RK=(wNHk&$baK{GtAYtueF?zf5&#%nY;cZD&{x(prN(_(h@e4^3BdW6=5PvvMSt5YlSK##S3)aB9;Sl6BheY zbvgo%EObw!+B7tlyrd$7pdLjyAkB>`jTMCW40tT{SNR>nZsxU$Nau(jAL55Khi;aw zOE?AHqoh)(yLQt_3=;1eo4wenqT=W2mTKzhkyxKVdYiuxkeCuXg-V=dVPSPtG$>Wi)iMvomH$pc(GsMFHO$8 zAZS$+_nuQ(U8A%G)IqPY4Wnv>en#4~>K#YDmuUlsR^AHFwPUuCawt8R=n z&OYh-9%n+Zs*Vlh(+$4iq`#)6SPz)<9OZ* z6Wb@EV`7?k`uRnCl5!Idxu`A?D{w2J2I)}bq+8OJ5Ej|G{IUC<^>3g^M|LX=4v8dJ zj3*Yl8KT}+SHwuEeY9Vz71k+TYBM_eWIF7^W|?q&*?#_^1evsn%Y{8d-pb(uIU-B{ zO{wsFGHrQ(p&uG$hdb(d^q2bGi4R5)xd0&*&??&Y7ZLwcW{>&VxMjuZ8{sL;@>AzVQ$zKh`%0FRJKp7h-%@m5cP{9 z?`KW!;(p|4CeFdSK5=23rTT<91znaaZIJrb(qMu$eF+Mk5}JinNdB!b3kC*(Tl^>B zQ9aU~_>xTvlQ!_!z-WZbEwv}gIYPawb@}xWbAiEZ#nisB)>ittp08B&NATRFb(f?(<*% zBaF4_u1(NAbeH7e0jQ@*zT8(0K0ZjbqHygDu70Z#3XcY%JMWmWzr$$n7?fIrT`bv+1?Un{|9njjGDsWs#d@cqo$!^+q5hhoEo* zFKZ4##mJ9_(N?a`BrzgeCi$gnw^qsyiwP!YU&6A?K<>5=tX9uq6VVA)?b$T6kDEBlgA^I2aC(OV@%Boi&oxwJSB# z3kcc?>JlsPJl%$7jv-hiwDh z*g7y}t`JSGNf|7I4yCJiP-PBk^olC>rMS(IvACG7VTePCu6_6JK#{&d$^Wmsua2tY zSsENJ?iY80JHaIo;o|P@?rs5sTX2^kNpSbz?oM!myGw#w2-%zD{qnut^PRJ2&w0E5 z?97~=p6cnUn(ChFsh;lY@UtVFqu4vAUwt8DRMg#6)YlSs5>L=Q&-J@f**B}?2$u|7 z#@?0ew&y7|<7jw>kZ{)V*PtWbAj*-I3%Ik+$(03EyV}$pAuby$kFXLCQHAm+1=o!( zVNX<1e?T)N}V>(5n0l+=6wtxrUZDoNzu| z8VQzgRYes~MKzUsndTeMEh^bLi3~H9=y`97vm$Z!gP?wR2Q{1`)tM{tk(mH%q$xwQ zXq8O+mXRucn;P4cch#IH@x1ICiA}OLi|`Q$<)-s$($`NIoJ-cZJYIT{>u#s(aM$oh z>81AFOr>S;vGL{~syjBYX}ZGm;HZlU7o^;uml4>xnYz{B)eVTd-y9~` zQRt9~trrBY!U;`Mq>+)MK}cicq^=UrO8^{mZQ!~ z6@C~(=EL=z{uwRv@dcCp(-9nE*VWRwDZ%{Ch_35|5yb+G;woIUMdb;`UzPM<#=*a< zb~b?>QQNTG&+*M zcRSZbtq+^IBzf#1nfFUsOMUiW!$<~>{_G8mDz?5Ly>R{^9_t(eRf+D^&UfvDZLr*7ZL^7zq$Jf=qE!g2?+(OxuzI## zBU`j;8sYug3(6Cervq}A^XYrqoiy>RuqT{SGFXNth5KLPY7ichL(mS%Ih*LvU^$7H zzsMr)hA-B9iHp4x&Fu9{B{!-aRC{ToT@rpY`Gs1YY`(zj9-%=jwYRKU{+QNZI~B02 zHu6uRG~as`0@N{@(Dk#AbOL~BSYbDnz&OEAASdP9!gC{}6;%Gr5Jm9~aAEm0#6(lU zpITYX^>{cqT@r|CHz3-WP8@-6^-l!VKY2oJLg_e1KNBJ(##15#YsyJr+(zG^sp^v^ zll5eTyNbu!7-RU!nl?^maI(bLyo74ce0nlu$MYE{?UlpR?60)(XO16r_Bzr(!tazr zN40VcaQj$}w;1$Q$LvJn3MZGgJGs3{3Yl=f2G!?U&%dqHks*xuYV`&CuuGLaFHeFw zY-iaY=I94;i_`Ym$M^cH8{7D9_g*3s6nqRRt?gb{z z+*)`qXJ2e+wvu^Cn0)upcr)VN6~j98)s0NX2F}m5B;loUU-;v>ehDMt5r^$wmh=sq z+1Uk4yoFyR&oOkGq-=g{D6&Fmuzm9j)5a&{unl{W6uU-1A5M8uvUJcEo=Yt$|hP!;In!UeSpp>I1MnCA3Ry5g!bn_netmEe&nD1lS zG9OZn8A3q(k8YhW3OH>l*Ya4|c{(9ai$|YdS>uFs-x#^j*T;G;UC{MNTR=CkL6{t)Zu_m#kah$7z%d?%A$RX50$!RwnyC_j)G;%zmzd3(p{PY z`JYo?3G1Np5oDy}PqWCm&vK$hTA8|h&DRQ6#^oThoD(9F8de|VYL%W5qhhN+d0~iP zBvgeN``;AMfpA76ZJ}ZHxKPe z>b1cgn$O5KEaNk?QGAi7pga?=0&Awt`8@x_&`e9jU`0&^ml&lpfm$R7#^Fn)sy9x} z_RCd0``vze+7D^6h}^I1KgQPD?;OrdEa9^-&C4eH$kI-0mo8m?e&U{zzN?~ZD>#wC z^UO$UN1GR>Bqe6{_`?!+tlGS>n3{~xJ!f5YNlHV05Wfn(7U{0|s{FA(-4naKAvHI* zg;L#DpGxJx$~=b}@ah%hfw zO8cD))~49jtA<#!CYfbiKh*y}&#y2;G*FTlD2cC+Ob!d)JJBhq@GFZ-XkA8ITomSc zT!tNR?VN6X8K{gkAa8%+?R9%~cJzOI`9Key55)+E&VYgeA;A9jbo$HHDGY!`3Fe@H zC>h0{W>?;C3T^yrsuRpB$^hr{ssGKk!h;$;!~`qYWddN_Uc|aTd^ov#v2^~X zVf)5m=<8bIN(c9!6Th?R5cG<^#aH3?Ori(F$-ZRF>QY|YyaZbPI}QCR=Mdl&7QvWJAJZuIiaZI!%=fW-u#FmnCkvW zUS2Z$s6W(Pet1zj{+%ViO?%!ev(lNhZp^>PJ1Q)x^23j2yEmWTtbW`-!npgkRPd#B zukGM5ng;Wqgb(b2zG!V(DiZ;;H(SWe0O<|3#qIJ5p(=8%qI3vck^y)VoaV+%8xipu z$0IA2i3n!-AY4tmIM8L|QUA${@2;e-!PoVsD0FSq|5reTXK|u>%HmfeHmuIfJ#}T#6aB>0ItJbnf-z5!G#T&|)7F28lS}5XCy^#iSn`wPa zo4(LHaRxEylL~>X_~*dF;zQgY%1YpA7`%}kV>dz>QJRHmXpOhyaM3Yuw9JOKG@hN% z+#@j|B4{h?Ml)2SnuGM7yQqgUc`|a5;(mbT^REj62Qox`mYN3%J>TlEX2PA)m5T-@ zdRchJxg^;!PRT5Z|6se*7zLR}4P-~I>0+2$dhuxKK3@0$w+6RX$Z*UfLeQ#h zW=REHFT52BOSxw7d+JFpR~?c1I$E7l)EIW3dicEa%+F=-DGyQRVuC(HEA+YRGN0G5oI=OmNJ?S z=_MX{KbD}HL|p}T_a_G-P0~eBK!5CBhkb_r#fO^$i?Y|zTo8`zIOCF{qGYanEem2nH!< zQaWpGKP$n9kOZ$lz92(A3G=r|$A5ocl48{m8ANGX9L=2F=?@r2k-QX)>RgddcNQ(h za_U7%41*DUrA)yZb&8it!w--9jgX^+h{HzbJI9d>QVcSJBSAkEhP7AVa^ZOWG?F5=px(}tiPl7Rjk|_w!D&%t=A)k;oUSQT&b=q{In6Fe5 z=(Hr@rg)=68Csa)Z606Cn`TKb+q0~&h#5G(now3<_E6_t+nz;hfe~8A&P~*xKma{O z<667bIGZb6J)}U`hXj^#!gPBI!yCUm0sn-l7jzHj;qVMywk0l0uJf2l;d?F-Qu*W-98X~F&yRQ_Q=$ri!X0zzi4no`NRD38B#*=|gaG5_*>isC+uMNBr*iLHL;L8ETbBTHt8rwJ+>b5k{4QIr^^^`Nyj@W#+3p9kGEa9?NOc0R%7)Yix6L&((~a}Q@oP=cQT^H>B+BXX1HS7T>MmI{lO5Nz zsDAZjSD#<0B$xeoFy#ld`POx^$|}LSF!x2D7JJRCc%6v4E77cjhQ}$ zP7a~(qFDpurZ-Q`>^6od;_(D@lVf_(=%NBwyUP5NguIeopQfArmG=6mO!PZuH5_}Q z&ac`8d6+Y?##(@$9k87x=`I8=h1-+R&o5@zH$>Jy{} z6I&&5jMMDY3pC7~gi`xDJs}cCaI!QjWW%o}om7!~1USi#hLDcIYm{!T71ST_xRMKz zSjXUvw`%8?C3~HhycccC81@&=-{rrV+E3Q?u*?g+!O#*JB`tay$G6|S$p;<7H}1Q_ z+qU=On*t1nGSzlT$UKI!;cC1#+PXbQ$|UPsjNvAe)(bZoXV~ijmDrUSNr&!ULYf52 zw0>n)=N%#@FQh#&p@-@0QjT7EpTK7(1`M3~Rwb%Ty1Dm4qZ|nQ*h79@>MEvzHJ!&Q z^y`_s(fKPo?4FeRTQp3=`EhmcegZdv<9Md?9fcZxQe;ca4Os1&?QgCR>b^bu=HFvTeS^5f0JqKqeMaddiFB;o0HI9}0#BfQP zp>K1vkG<09`Du7HioyHf;+$ogvMJzhl|kSk6eKBM^#rcFaAiiC7SI|V#jIV%#3Nm$ z5vOgW>1KJwB)Y7Sy?d|yuTpP3*x1EJ!Ogj%FlF42zF z{2ae2rhD}RM>m1sVu8|nwh01ki1~LH`F%y#4472d-@88=E zjiG56jpn;79PwJ-+U{ULk^l^DWRXzS%7oxB^A#-eQ!McYB@NZ76Xo@<+DMQvBWk>` zQUoOBz-iYUp;1fb0r@Nu<6_M*(oC2^I6 zZ;I_0cWzqQZR$i@Wb$nZpu?2DXA&5Iy&i z`ZF8CCt(z#9a|UFz-Y-dOj@`_9P4d0YunyLbJRf)Qr^s>i5V;lhS3R4+*$BW=LCXU zaHmeFxlQ-{`ZF4;r+MEmoslvzs_7GGji+FHo#1w6#OB^}O|}F}$%2Zy@+ZaLaL6%Y z=ctx6sX14wt#UP8XPvjGakJzbRpKiJ5tinX7<<{^D&UawBz3JViPy&Pub5qbmsU=z z*${3Wce`q)Mscn z;ixnmGP0II(peC)F*o~J`I`~JHI{GQpW4np|A(Wf`FD+_9am4FR_X7BH~?6v;R;5q zU$XmqtYcErBoxb7OX(;}(IjY~B@cNQC>fKYvJ}X$MszQ5Yf;ulDEUs90rQkuHGkZC z{SIL?S+f4^QW}*3K>Br!Ed&kl19t&Hh$2o1@Q0ep@Dz;z8~~y5N1_2C0WkbeJ7EBS zWE!ZKcj%#v8U_Yv00`g@Q1}CQfunHH`~WfyHZ+7ctN|#%5I7J3Kv6*{`a|HbAwV1e z2hAvr$`69#m!trYC;)&Ua1;O|K}~c38_EW1R&@XrNCT3hf#!voXYoe{0O;Z(5NHTE zqf!9K6rf+rjL`T4q=Eg=2n9BjBJ>_aibj6`j7$?KNfQWlDL=>}1VaH`m;?mSKpDtE z1p~-XKA{Jc$tQum004oDLxKYd0RVzw74T~VN&VstW}gKL`@4BTDgM_+92icM0G0Kh zL)=h>_uq=62>7>)`Z53i$_9=7a*oe{1wh@hKpnFDU2o-AM+Aq`sgYCsBp9Ly?TR=H zp!m}d@ymNDSTukW!KzO@o4;s=9*;P9GgmQF)fy)vTG&acNS=uY`Ts@|2rcp=*hrtI ziY|;Rf5;H&-8mKPW#0;Z(kKjuS&k4v5sg|?HD{jeIwY2%vRL8oF55pn-=6<6A4X(x z6#h(yEn-1&5>w*NsvzRFJhjiZo!RV)@rqHU@zOx?qjD6}ebS#D6mF66)X}?>D*=NM z>g{zMY>Eui#Sxqje(f*NT0;48iyWa_=Gx6 zAd%{vsMH)!_H7Sou1iJ1?yeLTbFeXOu)ndW;x3o7b@d0D)PWQ&7W*y}cM=x+9<%6q zXk2%Mq-w#$9rbtkFR*Rq-X=dOCO5pi3OMMzZ+R1zixQ)Mw}6hWcm#a<^e0dya8VXV zgRM`KX*_{5gIGuM@44ANX&zU%pr`N|)$||Z{|{LK~Xh@#BJ-c5e&QAe;ci} zuHATWIR$GM*pOd*pdL#7N)vj~1;mfa_A3n}{mImj(5;L$S z*!XBq2H&`MWki;|TY(+*1J9>uU)8N}0&m;9Fw=Ko437fNS6=dZkoxzGkaps@WTO31Hdb#1)A z56W#$6w5Rs{Lh0Tk-VB|13_t)B{Ba?f{NuoI_ma8CLB8iJKInRfwtM}`@}oJ{=H~J zQ2{2B{&FA?00(;vf^jD77Rf?k$cgxGj;y{g9!{h%zd7-$u0hf5fSz>Ygseb1EE*ha zN+ifeFDh}1^Fx4;a2(%xzYm(yZj|TkZN)jw98w(dO!Bm3*i^iMnOpu_!=7{D)rlX?LikymnMO$a%e*%A? zpXy>qEpwT+hW|Z2$k;d44+vY0yOl*^xW{=X4JkY{S#I4N5CFMBGV+7g5^mg z5<$i$g^i2H3wI(zrtsoGmw#@Efi_5-)CCADQ%JJY!I8p*ySq6Kz%_x#;A9gy@JC_X zVqiM~4)`bl>;+M=fAD&A&~L5rQ@7=gIJ2G4sBfrL)UW?`D;gtHh(#r21ee$a3u%%| zq%aAH{BMWE(Yr(nxXjlq4$59cZpGB-u05UhY9n}x-I@epbX zFzOc#$G+$6w#}Hyfca|9Y)!R zLJIlDi&OyDJnRep7b?wxL!H&4iT2P!)0j{+3(pWZP4K^b9}_7lMQol~RY*^2FN;`oF4Eude>Hlu$dt6_8I^*fq} z`{Bs!BEHd25Y@ea3tL)gxRRL}3;!h8!4>~sz;DfQZ{Xf`ZGrxUCWqdT2s5Z3@3$aW zS;|i&0;TVVV)T6WfHyIcG9%%?gK>UV+`P(dC;oo;1%dWY*#68Dj zjsN;(@lQSiBL(QBA*{}}pFs8>#Nq63DXuX@)=w+b{%|tif|4S#e)jec`_=Ba4^XBs zNq)&1)mXC18Pjn4B_^TAZ+wa@tM1=n;!jm{J03zBo%~rKWT^Y;S?>}#$vzvde?yyG zuwtYcQf&;5kL(@OU&A{&`%eW}3>0ZW)NvH#aWO-6qut#mF`q$HB5>hIbg)Q0D{p@S z9Q<4O7EMxE{@*kf?dg5+_)LRPmeUPKM%$JaVKk9uG zdxpoP+Ip}Qk-Sa*6UfLM8TY(`rWY%>3io_a=es#KtC9;uZDq7$H?JWU(ZXFR*gMm8 zoR0aN^__~lo(8vC7?IjUO{*$lR1UFLkDkP|30{ALy0NG!47m?J-7@sK^t#hYxN=7H zSA*1nSTfr9wQoJ2XMX}FHFUC>>^6IzYke6#R;6>T%@^fVT_HRwRA-agbAB~H0ZRLh z$C9J&Pd473s=UkRHyb$ZB`xlsegX>hEel22BUzCx30G4AbH?aNHimm6ek=R2wRjT7 z+-M_$co8uHu?j!bK`FFS2k`{EG9#BOMtb{!^wUyL%{>c(*B65zJwzw*4;#bU*)};} zX!zRB6lw{ZxXi3FHVU6xIS>VWVherZII`Q28SYPRa>s20?=uF*b;xVABrch3C!N@* z^d$Rw_Y)Y<%HRF+BSPlul#HI8mHIGyLD)iBq3rOUh-m)mA6wn@ z?k(AIP zU@RwcjYGoV*bz*CcR-geKne;YkMn>PQFwp@6&6!K|qB>(8H1-U!gu zbz>xK92^@3q+nL9wUIn_)<#x5XQN)1Rr9HsStCbWET}mDmNEGdzNn!={dx0;zIflx zI(FSRzMZ$KtEbGBI5LMCEizk?OXK{Ng4!S@=PvUBfr$s0oF2k#6b@Y$cuJW;qb-+u5dG;xT@y7$ZFbKa@H$>CZ|nw_ zGZ19ih|pPg33*Y4O9i`;W8QuAIK(V0TjGK_Xd~1p0*6#ycv*>Hk>6l`;eA8l*4Zh+ z+KiwhDwhJ;t1wRp5SO=-aWsywEvJ-gyApE6L^ZMEsFTzx*ZdTn(+ z^ejuo$_=@grf71H6j9RNpv`B*+jsNkHyDzQC&Z0gIH+j)zCuDhAL#Wm-?O#t4??g$ z+q!Pi$SA8+%p7C@tIY1UI-WpfB^Nr?a$xSRKt+W>#hhtV#L}_wP}i9MDExBm+>@}( zy4`OMYB&czT`JXk9pY(Fqns=}c+;<3ec2y4EFMibqp?6dX*jILg*`1CFVrWh>KK+) zn}yGYw&)BW0@A+rCDEI_PfC{;#Ket%YyRn5L*1-V*Cu=L6SVuNiVpEoLkZy{OS?Vw z7G?J*z5^!*Fmtd`4y3RXs_LU-ua2;q$?!2PmVN?y?E*muRg1M-mmdO{XDadZQOEod z=VU7ToQ*u?F!IObc6Et78b@)~5c-lmi*9(=;NVq}-7dPW96#EaIzR6yayl znwuU=e|QY6O!R_gtl$9jR*Du`dHLa_ROtrk(zSv|K`Bt2VJtU3$3#C5tYqv*^FYaq((6`!d@v|Gx^x}JE@WnmWVA5$bMw?5~ zT@I6-MO4Pb4tP>fyR60=Pj_ZtXcmzO1?>f7pNxc)AcMTTG98j7C!REkla_6%9zRd! zg5A7iHL4=P_zn~qdu})8qS=_Fc^b)K(&&PPJ9-`gU)BhaGE0HmLj%#yQ;jPuyp$!g zYB$Z_nxyF!7f~t(R;OR14^x}c+xFR1o4yZG<;OIS+Qm11=b!E9$VWKPM+yv-dpz>~ zsgkpIYW1JO%;8^OfvP_TeW^aHrXxGsEPel0k5g_LY^a&;@X*&(o#i0pwJ?Et`3YYz zUj)?@zLTXJ=%!*14;Jt37UGh2OJ2ZhIgLcRxYakgdxKCLL|HI)$TY!D_ZBHTYkArk zZE>xl%Y-2hIRKUog)5NC+0Q>@d%$JXMH)J)FpjKaO7To`sv^*;Q_QAvqEoK@IBP>w z-$Fjl_rM6k{95SkBjHyyAL@z8pk5eAxII&-4rXx)#=r3ZM1-MKFt*tThc}#6Cf@^iD(D!pHItxbP>%R zf?Fe)rYvqRp-ShRp<72vuW#I)s`<34{y?0wkw>xi68&!Rn_79Ws2mH_r&8B?8fkPl z9nIZgoF#eJ{l<}c3gzgK1hG*{_)Z_nK_?>h14ae46EZpawVk&c;`EGzCF?{jv)F`n zYxFVR;02eViDKC@o0Jc=3UBSapD2Z@*8FjlgjrT#Vi#L{2x&>zInI z+8%3%HnR2EGjg2G%+z0GGTY|b@uesgpMwrxlOo>SGRB1o~k8CYQ+~+ zeBKouLM0{nGZYL^rd22f;OYL z=N4-tmhM#LZ9JmCooHn22wBZrL~I4}BANUMI2Hb z00JTA?%VIPs{5~2taDbN@0)W;9v5BVf2Nqc9Ec=L?v8*{%2@ec3en1x3`3dF1gd19 zqu)iR7%2SAV?GJl%F;6yjz=2=n(@Y~-#%UiR2Uy0ofQxL|q??xwP`S9ZC@4tH(G!7mQo zMMvGiNEqdPpjn8(GIn~^o*>+d^~6N5E5p4$9Lb!ds@jpejZhzX-Bw*Yae4YOn#e-2 zTsiC^6zW8T`x~WHv%_KKC7rV+&H#zkGImY$M}@uAdDb8$4KdJMCDU?4s$ceGN7^1M zQ&l@#m`0r9%ef!-wH+O@VsFNZw~Ah7pzl7o-%{}pxlr;Uzg(_l+BtBB!4G)$lg!g4LY*Jrj^qe6YRpn!sS zn}o};o6A+M`d1tl^SN)!xuQAwGodK?@rI42+Kv8J zW@Akn_Xg&Hhk%R);X<Us37HAMb14Xi+z*T(cbe)!JU8SMQ<$vRw>C@q&w+kESg zb@ELrfv`MDW-XiM&{{a`@k^Vwpw9ylaBt`@l;Ga_jdKIQX4kkZ4?;|hUzKcbw)R-?*2g7i}=bDV;ul!plsX1UHXzxfpIWGJe*0hU5lgQxKo}GtBzV6+)Jo52V+@Of! zvR$Pc-s3Y40GA8Dm&uE&hlFW4P8lK+CYN2T3MSz(Aov-)%yWUwsArv&*%^YNG=N+r zbt|GbdOst1W~)mo!4n}*YTXc@t7+YPxm9K9SwZ*K{#Qd?F@Zy*e6^W}?0&GaH-(&DWK+^elHEPSvgvj|bJ{Jb_z8sc7+W~3Shg{7(49ZD+Mgp%_M7jugzmleDFPy~&9+u8lS}~$u1Q(#2Bcyy zoAxlQ=OS7>UP(bKN7~uTZ@)EnUAmVjZ_E@t_tZz{c~g-1RD17OsQJ1$6O zHVT&DB?S!V{I-duI{_7M!0g_^P6TX*Ky-cAsSu3U=c{4!i+_UshH zH(^MeIY>ruQIae@jhJ#$^zXDi_HV1^D>fN_0v`1j_QGB!qF`(ml1|x3fyV4)l+2O} z6NE2>&!4)UE>*w#7K>s04ul(tCB1o-UIbHIcPV}4sHLR@k0VA^`#Lu)*ycwjW4v{H zo118onvxo6PJQ|7_Hwpw)6Wq@e}9kGOm*1az8Zf-#iyjC>9z{h(^WFQ@K_ z%=gQC0%&b47?EC!y@!YD;Fp2q&@zTkO;*&BoE)UrapT($Zz^;2m7gYkl2|){xP8LW zbtfqM{~my4UfAq-O&OSa5&^U3_R)0H?o9cpXU$+xdsJHurW~MS7Et5EAok^@VSoQ> z0uS2ZNFy0f_1~T71HMnr{OB|7-lsYZ;^cy`zJym}nfCSkI?ug^B!x89MCI`ER)PE5 z?zLyb_JzaH?poql2YrUG0`J2IySwL0=l>C~-o#W{sbEUiW&(f5qT8@xwO&4jSMTfdvTx2Y`r!fP{na&>A* z0RUPb1s`kxEg~fVof5!$=haZ+x3FfCvHsT8sy$yPK0)^~v_4OLIQ#s~qOjk-*O1($ z0K5N&A;0@dx1xd(x39FcBj+#qNQbCT$D-)JXf{6#;IGk??$H_MfbKFPDk_^CGHoV5 zbW~{N*kPs{mDazu@=7A*`Q4iq&dOt1GjN zbMs^cBp2Vc7QcKpxoGSC#~1)y#otxG9TLPwl>Ny+;{A);kJEGi)B@E-yZF8SwGrDDAb*OCP*DXqlqWg5%(Mx-nof&3y7i#k z)MvRPb5*=-laKshSZFHdw*I-EW`~PGCQRpIR6$%gDrZT^Wx!gu(e=RL)nf5>_!+C- z?fEs!a+$LoI{-kajkUk?`M~=+ZP;|lefbz@9N@X1EWP~elGh$RiPu=~ zWU<5pf`wB@4vEUMXmTh*1a2)@Ev0y?@$LR%ePk!FKPk-dPwd7&E&I4!1WlalN9xC} zMU#?qr67=Y+KkqXAI~LvaHr6V{`^R@T|KP272VEz!|T%KFqAAw_}ZMnA^jB z?g!x6X0_VSfdJE7Ya81%i+K?fGmozbzicAsI5I&gcT5zSGQZ|3!bAu-n_?Ag_IWR^ z4(Hp1CNbwGOadnlav=##_%~vP)he?qCa`W5sk_Rvj_})CPm4ewz_X1<&cpx2dCTDQ z>kKlQwls_8R)gs=5v!$JTfLUct+nNFGXzcVp4b=d2}ZSU2Ujs>zr@7Eu&my95^;!j zej++`-UZ0nhEq9!JQD@Ct)}Q20=A$on_1YD6qWN*i8=-Hp2p<5M5~(XQ zZFMCHSm}u&iSeHEnRWE*7aLdB!vm%JS&s{inddELg9hlYg*GKhu!sD7r3%$Xa{BOP z>Q~Yoor>1E3jPUdrjZ7HU6;^%k6#1dt}j4t-g57@=hj)9THam{wzY2`YAw1aQ|T!DW@ zYx_?TAJ$rXg!gS=?;x?~)03HCfyBm&lGpeh!H!`T;IC#@Gjweg!)sp5Qt(>+4YSqg zY?D2bUG4nzxnxS`;RI28&Ysr*odsSQ)lXJrjo{dg%BSA`K?V9u3#aB5@pNn_EA4JB?h0L_J$; zezLtrDCKe;R_iGL2M&=u9$%4v*?jtt@rfg2?ej&|tw&;1JIxBoT+^LLdx6-&PRRfa zG3+cs_2tg2?MGV$Xs%8Bwex$Tfb-d37DIb9X~$ar zLIg*X^NQ`Pu$6Qx2r|l5swg(>-7!|xmyV(Tq@%joBy81l9Bl+({cN#%l6^10*_!~kj=aC|=j|lDih~Te@1W(`Bi(a%r z?XNALIjwrhSnrN-RL^HxOp4y5PF47N6%3jfKHaUJw%dG3a_8d78*;W6*(o{Dtk4Rg zoHJJFk#;wlO!IkMGdAZ6f`3~xCy#4p+6Mb=WG~VyV<&1=J%#D;LF6wX^qkw7V!~O| zyJigaVl>7K`1>78WM@;yTp4`819`*jcMkWdEN-ImlXY5WmmEQ{BLGaJ)H;v+7xtyj ze{B9&7Nrg)Q)tW`rhjOY#%F=Fs>lcnhg@bGB1RlFUjUCNE>x@A9|alz9mR-86b*zZ z{~-9|_BfjZJepX)T$%n5%MaC4{Iz??M31xikL&kjsN3)Tjm_iE7-aBSJcEq#A59+F zG>fP{|5J^ugI)}=IoLVYuWbLL1pqB>ei)OPu*of|@Op^6LJ#Mk$N@7B!mZr0|J3?X z(>4jv&9F(n@d7dI|G0WZ5U6*Ud*qd|f8 zXlNjyAWnX^Aw&K8|LV3c5xBtzrf#ZPLFw=(!ot?$7q|ca^sC`Z<;V_}Yc~Az;IVkU z!@R;%`YCEK9aXkjW`qqzgf%|Ynj*pq-{vtv@U1QJtu64aEbt-bM8(4KW~q=`sj5KY zfWhjaS_dK`)shSO^f6`j^a$JhVnRn?7m@(%yZn{m4Ahmck**r^;IO?39zfnjs-tO z+cn|9%6B5(wdRP-Kc(Dt;>0R&V%+uQjQss@dwg(F?%Hu;$!(&I1Z0O{T*LG2pH@+E_&(q_DgYFRR@R=`BM2snfnB1RUBQG&(9%6WO zin^+u5ZPGK(Ii!9Md*5Dt|~3GzO(DP!W^q;0&WEQ8aU)#i@6==AA{QMRi~&e`BZp<$(jUHx*-~>r2ux2~+2`T=d7X*!hTtmH5-=@$oDyN6&p`f)r1q#%^Sp zte~-4MvraQ+Lfb6k$~k~j@I(Iy9mm8)jS(ZZdlCf!beuB2Y}?Q)rMx-(+x<+W|X^& z4;$3pMyQc5RQ=CUz;69!a}wxpPe-9_({jgqu&c?@uG*(nd^D-bjaiB!^+4vF6nGb8 z6Ot@iJnFngUHSUJR!=_Vg*Hlp+I9DrPv0Bs+%VZ}`$&ve>h!@ipsCtCj`glNhzOoj zrlTQ#$)E55ubvq2`|+E~eK6)6#WtIa-HC<~XWkY}Z8!Ow@&scy#22rX$Y17T7`R>( zq|2A9xWFbkf@&4D*Klf7c)e&T2BJ5X*4vo=!WVpkRl?dZa0cz7H`z`!wu>n1i^u5M z@d)8GXOu|RY4|028d`GuGuTF(v~viZoRf^qdGu{%(G4SA)Iee>E5?(0<|PBq*k46| zuBO>Hy`(kzoztK^B{oJK>>g9rs=)7WkIG{518WIOY^Dx8V#h_I*RutA*W%iH=@eqj zXZ#Q8;&F5_%s5%0b+eG;i-5OEduVpnQCsAybSOni_C?Z1&VJGMOH@blt8nih1*MW( zW)zfGUr42-v`O_wc>A?xl5}zv+a28|zY-?|1>TN**k$P(w80OTu0Xq0z|pQiQw58!g@l$L(ZJiFD%;ma;s_Hq|hP2Z^|6Lq;u=lmkXG0RZ%zL+>At$MV1Cc9moFj>RQi&hsa&E&HL@^O;!bRk7!~tG zJ1{U!70gtEou0){5g<3_O=&;#RvU8mkY9@Hvby{FRx4mF%|#1JB`Wwc zQ%vUl#Mv~Z#+`@SKwNQIfEQ11b&sr|ukH0vLZ|j76cWcEl{7>9VaG$NWIRlHF!qf@ z^M~v@wk;}iGhUNODyNCrZXEKS>ZSDzmcQ1J(%p?gIPrb-g468F>ak;lP+D(r# zU8tloSE8f)#zqk-L6JM$m*7e7UGJ^VUzn%f95&phh~wLTX_A$-s7N4INvx;TSkj!M zC!|tPH!zr8>#|cV7R@I$>rYLc*4YX?Hfac#O8No)bg20o2J{;n_h zq7C|Sm6I635SuMrP^Nsi`Oo+gxk1jl2yJBUb7tIKW8r5*SQ2sOkCf6 zuuhVQiG^3TUg(pCXQx5}aG-T&=_hJShi3-=G*MUcOD|IFtcOU<(!?NSae1RegId=SW13IxdY9FEgr(EV^&g!o^!=vB1$Ck<*{G&K${UH?v z+^IePH&bVoll7_@(VgZl{5)%UiSIJvZtdgy|MeFpm&yBrNBd3@=kes|(ON^rL`OwI z`F%u$fQW>Q13*E=rNZOD#-|onad<*V$;k~?RWovqfmPD*IDN{lt{Gb)pk?QhP&SVH zI{Ndqq;Jl#l;a<^-{Ucp7{dK9JxEIq48uh|aRYZh03y*5&USY0Sm&R4yNwAB@7T_m zTzwO1hc_EVZJ}M(&w9+eg{`hibkwhW}9gE@}(?V!jUT-aZrB{I404 zf!yB&sKQOZyLtCl7VTdDQ5{Qlv8RpZ?L!SH|j-4R=8*Aq|XEJgB1D_ob=aX(v+NQ9hC-~vsV*?;i1 z@0v6TRPA~Irs|pxn;Qo*gyHPpG6zV=L_(RR^I}(22IbDouJ;SE$KVX1-Q3*eUt2$( z*W(6lR(dT~4A{~=|4x0=Y?!bBqDYaxbZuvCfQfO39Uk24Ixu=eh!*E=>sFO-$6htp zUO_vJ+|vwC$o?pq`m-eas?^ZU`eZ&t>Qrlao$r2XRYiD;?+Wt)P}`7Z$P=+NeTBJm z;=a?|y*|BnrX>-m;h!|>qdLT^^tBBHCI%WDI6>75c*QFi{g3%!Zx1Vrvvq99ws$v| ztlvu87JTecd z0rS_qL&%5vc~-LuG%t42QdEo<2I2d>FE#2LwCeD%9OW6m<_#Slt#phZ!NCQ7zsWO_ z5ZmV)ivD1z*Z_OW6LBy3@!;Pvc>u6Bgp0n9`PggsI8e#W5Te__-(`OS<*n<$?+qdQ zx1#@wUG`PpA4mmvoN9Fx3yJOeie$~5Lh|i`=kZZHV$`Z|abs>L@?oz*?j_BmmUlmP z$x&zgzGl8)O_iXP2u9#u9X;EHa94gd>r3j+^@Ah_WIA; z4C?TidjR}&YYU$v`qXu9F1jOj_R;RR;$w!G#O^ zrn#wG6=D=Bz1S!iIKIXPZUvf$D8@wxfa@flhRIu^D6x!U<^kokkCky_K}~oqr&@Ww zHR*)DPIXq~ihPr_7>rI4Yc54Lb^Y$2L>Lr`_NH22BP!Nzi|NQ#U|TRyw@SMadc)#a{fbfcS7Nc6ay zr-Jo~92RYhxP)5XsHH*Oexl0-ZUB9EaR@ETLkH6~q)@r(tOm~e_JZ^k z%GD*5@5j(X>812KMH_%|lYR=4O^_7@(;!5Z?wJ~_rOvN7rauu$U=z)9&ekDUpk)@RbQm)9csngc;n(gVJnZ) z9jOiz9h;i>V;9U;Bdd=@YB;>U^l}+Xcip%4{&6c1EWH+Pf7X1B5fdwjy(Vu?eS%N4 z;CqU!h|fT&x`Ua*3RFQ%%0F(Zu)a9XbksXo&{Q4MV?W%~*X=cHI%0AGQBArHQI%HC zGtkK8Sb4rMslTBxCalQQAAQ9OaugvnffknLa#(go>*_`?OGK!hx-h+=Pw0C8iNE*( zkp9to@RM_~XL*rwxzFb*H%!+Ow!+ajp2Z}H3xh3wd|1y@F$fOilC(?p%(}httt-uy z9dJ!)m^pKcdF|rOoCP%IPH9W%rTq&PtTwEkRz9;H!=esjy{pky%vR~0qoT)g0psqO z^Sx5Y&3%NAOg$TQx)Yarv`r*j2LO+1YPu=?nrX@;?ROE`v zFrDj8a))x&DTv47MFatY0jxV)%NMFE@L~_Q2SENI8eja1D}RCQPE}S@U7DGaRP?@C zQKF2d4wIQBT|KU44PLzD`+B*HpBqOtM?uVrYq$C|D(!i?l1-^T*MTb&f~->O+QadZ zt@ssUUfbIjTDzD^R-w#U=aJ-?5l3k#-dx^#>#TBeQc01y!jSMQhC(iS{}Z(UI*I&1 z|9PU-#;!(UhvHA*Vg~6j}_Xeg^prKVTZtQNP3msZ{5usqLXY=w_l6PKv z=hJ^jt?l<#*nsp)i=GWXIk^0yGJTEypux3BEz!8DAk7K5URC8+V-&~pAfw<5swCX=8E__xX^10n4S!<7QiSH$us$z%F_{saZMJMA4 zuVhws*OdaoBF>Y_5_jLR0(|U*hyuwNSNTLlPh;U;T=F2+Dlmp2#3=rdTZ{>Vi?WE+ zvJC2a!PQBWkJ56yZ$Hyq-e?pN3Y&FR`?#SL>%a9qmL}9p0Bvy7rL3lqPuGKc#B5|j zU6Aypm(1Or;7{xCMVl`evaVT!JaGm(8|W{)l`Anj=MFEX<;xruEO^Uy>y~2}OeZW( ztO+O!MV+kS@vq_ZS?dDmBPl*y7Qm4b9+$D4pt0Xr2YcsLbpD%aqWDua2~$t2PmA8P zQ0r^znGLou2oi^pRQc4WtIJ9_6#>hGGGM!-TC))$;F7t%ZUU-~`G=AS$UM&`|LvNwGCJci3_!_9n=FsDtB( zD5bFi$TFCSBeE9tSuGEze|-{k1x|V5tBb`euNX6Zetvq_DtH#_QVZQd^|uo2d7Cq@ zDT(z4I<0H8k=>&$UyiD_YTd6&C5|CnE~oCer+A@q7H5#6S@N3seDC(@AJX+7X1w#a zF+xr`_oHpoXkhiIoo)4#;C^(nioBl?#2v6dk++jXn>JB~j@YD&V=TW#uvD?EdDf-W zVoF!Te;qw;nhjfr;6uiVM6(PNk*IwqnXwWE8YEt2u>0ts!#q2durjLK(BK zXadAm#7C~7I`glYAW|oV&fj6vtV(`7jB-!n3_O)OW-!Y;HT7@1w$;NEy@e+V)6x!O zeGGiH$@5n_q7o#XLV9-$UWX4MQ}0D#sBgH*v}`b8E}b<*$fqQP?FC^fIjc6R zfb;zNkQ}3ES3N7x-`)3r*8OVJ(Us_FPaIp4Au$jm8TlGaZX+kh=HgR!sKXPfbwWgOMdMnjjrl1d=Wx(p{f^>93y+>0{ThvURQuv z+_ZT`T#P8{6r8pp?mZ?(mjY6)`$TMI$Y7bJyF@4{0xUwQuI%Hd|JIbm`)x{IA}*>k zcq=o>ZXqRQ{+ytYTZFJvayWPNn>KnW#u6GgjF%==;`&7g-*7g{CFvQdEXhZVGw-m( zRBNYWnGy}kcNKBKYvVzo*Lo7kpjpX-h&Uf$db7NIs5D-?ij@cYE^>-f3f8BJNi#jZ zIkxusz@3(HCq{UU!Ghw%;)X3V^T!re6a+J8Sp=7f3~RM<22W6a2m>Qo;d249>X(}N zJqC_^SaLu?@(#6e$J!@e(3duaRd`H7{m`=j0o!KdavzD&5e(PM?&V8$%W2)V%= zHJ6E4rThXzxruAfARQO~Is-_z@-9+DBB=x2yW&p}!;=A!FbrlsF%q+Iml4f#T@`x6 zQ};A1N{H6G>)S6#SpF+3hxLAQPZiSeq{G%>L3=BSG_Q>hX%4X^%sNxCc^1U6hb@QA zo6b)+CR*J)ox2li6l@(`BaTqzK{{%A(?Am6Sin;Wb)5&mqgA;1+bTpHz4&Lm@mcSjl{A#Uw~Rjd+H7N+h)ywx*n*?a*!J3 zBaIqKtj45>%K~Nk=(x3IKJ*U7%A(FJ6SPNEimZjuc7!3!?wQF4VK>#eDsYonaFshw zTzm&&0Vz>n(~P7#hx=Z^N5TxACV{z}Y)?zf1A5sCtoC-JJ1`&#pV%tQ4J9Vv@ER)8 zlMvMJ!x3Tmg?#3j;_K zbXXy#`TX_)kTD?F%AgqL_BEagOrx*ZHbLHbzdINN-Yd;-o*#;d#>B=bk~dPI@ivhk zStP<&r3VpvOJ;>q#g=Q>ACr@BzTZjAqnxCHuUw zT2U>Lb9u%qHQ~iqV96B9X36Hh?T3WyqAr0)yM#QV%XAC9|7TxH%K(E!G``|6T}e98 zIGizfJr0XW!>QqzOh2bMf|M0r6Hm3v7(my_z~o!zVwgBZ5ly>r3A-t~9*1Db#}rE_ z@)i+D_Q^6t*Dy=MSyrcZC-zEoBOBBT2v@@*S*>*vYRfv7UA}RbLNjX-3^g!#GX$&TV94a$fq=a(=-3ZrQk5ZzSOO| zcs*+o#=8ckL_M{V8Mi$lWI3sW)NkA<{G@ZurzVy=p6Wsr#(`343HEqi~{cs+xtK(X<{30%+D5 zNM#ahv&+f9=1n~jF>$0Ov#Q8vOFN!7k7^=+ifBktXA1;E&Sku5c3ep2Vkc5EbOcQr zmj%=nOwSBPE*w@ta1pFyR2HiC5O;RzV0zqknMgN zP9)1Pcuxor29?{NbdIrA7zLB&?~i)J-hzJ_N6Cd(&2Ym z)lwHlxmXu^L(V?HifYLu7jr&No)|6G&{C@7HTzRv>tO`Jd%Nvy`I^sxvQeTf+x zT^Fr&vH~Yw4DHw=?1-Go!}!YX$NQ29`&zM!Y-|tp9t*Uh^u&-?qbiy7CidS zbl(VQ0aL#n%Sy2EE(CIznx=hU!c!Bfzj@3{W4(naI> z$7M2WZ==LqSv_x$f4}vI{kskN$Ln~*i#drj4Vf``L>|}-c2&Hmb4Qlc^;_kV2RbD%`D?MW|fup ztp+X)oyDavPSSMgWJjLgJ#ATxOnjx8Cz#i1CXRsdywfGUgfrn<1Chi*Wn`#No$6is zoJL#9v_a))v#)P+(T*JDc&2z?#kU#xD(H=${U%k%rK$rY_^GVR73RSxK_uH51Y*v4 zFK0UlQGhE6C2>^^`V%l^osA&DAr0f=*=5{-7cOwG7c;fTr!B9*hO7TYSUqY!HodVQ z0J%?T*#`8aNhHIX*L_-KwXJ4?i3bMZzLoCS@zxi&o8=cKn$ZgT?aNgPl+8%{#x=mdn4Vcjpl;7 zCB_iZ(+=rZOzM*uCY9BFs*q=L2yAuHbqkVKI&u9#WXucQT&`Xt>(wxKc}e#+#m6CZ zo0@|`BV%=D78_W#yxV60K7}Oh{TaFlQ+oriRX?=8;in9H zg-0)Vguig9hK5tYTvSp9(PbTQ&mSY5ZzYuv#^Sy?kK`^+!Q_oEyR#Jpio&3ZPnNC|qZEyt7w zf%+#fJXqvfE*Ll>dodPYU!S%sCpgxW?jDrWU4Ogu{@v?%k4Nn%ZrPj2 zvHm9Vn;Hx_t;a110dJ@E3)12h7qX;ghAxi(jM(wFLz`KdRqSu3>yK_orm9;$Qq~K; z)`dXJFdNpu(e>(S*nqc;{9`i?P=VKL);i<}M*XbJRFeFoI;UeWICplnLtevR2&2)Y zbVT(UE^hzSIGMVcjo|h=U#%uxmRm_sL6UM|N$#tg`QX9Th4($qU5L%5HMQL0#VBgr zg(}GN_qH_qf|1)g|ILwAjEIidB26P9vi}C1I0^(@E5zx)?SwIsF8fwAKlR|ll0yZG zxPqme-%E0hckr(zQAL4Y=gKHmxx&~Eum%}{9u36|471~kXIs@#{DI8 z(>N1v+`HgRI`}X+L^$7RoZj^kNVy~L;w7;#xw&)p0BCI=|Cn15xbpQ^9KLi5ozYFc z^kGrSbY3IBF8M= zD@hp%9b+(Er+^MhA1BxyU3EfMwjTjrE|lU*(|ULlxi8P0?k+r`WiDA5`DuUzDa_tK zS75B*BRjqCbcONl8zgd9pAOkfFV?~#zW9l5BMAk3(EGZ2PXEdm%x>N#tc(RNS@L&L z9zDrS$nJqhiRtrgmTYgL*RV?BC>hiP_%YOm>5GtfQ^Q3jLT5+<|faL zdWHyJKS}4zTlzTga6#TxA+llI=(|}ijXBtn4H=%Tr@qb>KNz3gIpHV^3fN&|neW$Y z(DZaufMBsy<1(L6NNM!rg{APu-jB9|C*eGn$n2I%>72P->VRQl@?GpRnuIt&Z=Ww8JuTs-=Z>k zh?NSf4CEfYGTW#ZI`OGYphOA!co;AKv))8T=w)wvm{)XWD&amRtAi$1goJ9JxQgPB z8We>MaQHswmM0UTgu1S=OgXCEKp($>LSG^Su}T27C!%nCog1C(P<4FUdF`Z-lp}23 zYCH(}LZ)*D;c8g#Rgb(|EUDD5;cm!!pS+MMoJwA4w6fYoqO2jnN|px>vjR2BF)?d{ zJco3eI}9}GD3#CV>Ik>zdSRw66_-*=dQynbLz}Lcw@P*mUHU%Ec9F`t7rv@*breS}d90j=2*aK)7l;vO z0)w6CNGA{^Z5m~EF^&YB2^JjVK(>bgkB)@jXO1}k?yW%o!&`AlJ!4Y2$==OTAm4Gx z5;FRZb8T3bgv@k5F4=~=9>euyr(Z!sGO|dx*otZI?Hd<`UYnXdD^=TUbqvX1T2>5D zY&>E-A{tn{{3pMaG?p14&Hz+Z$b5|->gfAv>+KZ8q}*nqw1+FXzHFGplr$w>sp*o0bN9mob6?y$!9JR1ETO~T9UO}qax1>HDUt9yNE8bI}9EeL0BLp8=w=^D? zFCNP0L=eFwY+(B%7Ghvc_SDEmduDa(Oa0`yr;)TFf3eP@H~D;5VZYD71{wc6U+f+i zP2D#qAVkKi309V!B}-7=gyq3tzk7ydt*nfJ;p?upVZHO?n_Pzl_F@l_tT)>Wg#^7; z@{kvf%Pt9hLWlz(EPnxT0DrV&wtS9Sv1*lM1J0n#iO|aqSCP(f7N3*lggVofRi`Er zV`b1{3`LuXt{}Fa7EB!4|I!cL!5@sv}un;I-_B4sCH1dZe20 z!?{Kks{V#=jZlCD>{ZZVA79!m5jR8mUgg%*y6oFt&h+rP@2>J6va3q-YcP~dG3Y=l zBw?(H`jV1#dJc?a(o~p}R{gw>8z3F&39Jei51b2{f3jNM`^#XZg`IUaFl%|Ym0C9W z1tN>*MlZH<5vvj>4uUNoE9K`EBfgOS`Cy=|HdWPySMf#gtU$V_%!Ma;*!O-}>-!nG zaSN%yn~D51exyo~qghmxBhmd|83zgOWc;Kvm}B(Z%u7s?b@Ks%4O9eB^N%<= zm8AfBDFG_<7?Mk37fj&^7z(c}({pxsO%1L9vpA#1S(&5{8T7(V5FX4xm}(Yh_f~546$N)fEc~2zMR)bk)dZ=DVNmKJwrcR6+Z_5 z7q64oA6_SPxk7I4f=R3M8>Mu83U$_0FPHM`z2iE&D{-6U6W`gk#rqU1R-L`C7}u?~ zvuKHkC+S$KHf)@j1QVWjd0bym#7?`j!Nmff{6-l#(0>XL?wR&W8eLn|6iqMg z3d6VBE6r(-syFh>hwe0rH-xb-t;C4j8P*p#Ur^PlMXbmx=ieyCkzTb-l=l<29k;5J zVMfhjEp*}Kv03VTqiUyvvI)sa%<^y9m10pi-mq*fU{&V24zWlL5l7q-ymtZ zpqW&)OXL4DLU#o3t)NK{05qLnS4%`iua0^D-F^25kQWHnYg_LP9z6te|LRbs@Rk0< zp*n+75tY8;5&HnZ`_J3pA5L4h-%i`te>rUzAEQ;G=SJH(ExicM-1ac35;868X8Hpj z--<$CXCbG%(}>%01%GH3-A;8#liZlSl$sA;rC=LwqF=DUzH;>%@LL`dG}gTfTJ+C2O7R@&B#LYS{(g z-S3bZg~yPZ#~2!9R4gK&7-R3==>=L zmR(IF5%;xYY~`o2act!%yVEi4V@M65NzNY;HHg0>YJOyMIPZrQnCc5r@5~Y@^)|Um zk5{p!$NcXK(@SgWQebxqt^}iRb5y=Hs(qmOeDuEJQ)Gw8Y8$H1v&+z5#QR)J%s)&% z5C7L4%@+7$aE#OG8?w}s#QVvQDf|n|u=b0R+tKdL+uv1&n~?%M8?Ww4 zG9Rm^|5o*zs_r*c%5SRS@8q`D{f0N1V%!do9+i&=X^-UjEsxOh_TmTiPW z(ymK@6uMdjzr<6LWwqCy7w;kljSKV(O6R2QGvYF5@CC3V9~Y(IbyIxR^ejo0eUg4& z6lAEu(VZ`xecF$nAMa8mSzI6_SVu-$!DxbL^{xoG_DTf=vW7c7?S;PCGgkEy*J+DT zV^3~`Bc}_kmhPa93;0%4rKTN3GEQTe0VCd!q?GXw++K_?>h-w=9J;~*DbDeQCgfJ; zA;RXy`R6k6gN&=OqV-cCiB{)Gp5Ftn{@cLIy)DUX2|N1W^)aZyVat&=|+_{`ZjbTg$gNDCNey(8Vq8jF&rZT{z;c6K)V_@WnqYgS z`Z$?K#X)m4ua--0J{sZQPnh#K#f;okG#FLeZM2d$X#| zU!VAhgwsCvS2!KIM@U+t?m3LOTBX0)wHSMO6?%_w-4*v^wnFX}zjIjl=m$x}DvS5_ zat5_sTfP_7QBuVZic)P+%;dAouL}Q>!2_T&|LFVypjGvOjBAI&Hz|25{T7L%MLJ@U z>~z)oPcG$iMi(1ngc~OqW6QK^OPahfk$@3qGvm*2ICqP*nq^GQ{nyZ#boMQP`)g>P z!S3B|=&3Y@T!m1o_~-Mk?N*|kGqrEa$#@zv+@^IbUq2 z6si~Fy1ufX<0?97cZm%o%Mv4qH=l=T%)+XWnBEKTIEIYEiPiR6IP*&#_$8n7-YB*# z#qNb*RGDZR6MS)Fk`T{tE#j`dzV3rUCGk{@tNvS$Zg}geF(i_VuDd}ksl9x z;1^YY1eFqO6mNP~Yp$v)tAz1#bxq&cJ!|En?IKG5QdRJ$-4JvoI;KGGfuzwaTGm00 z^6CmM@eFcaKWj(B7=cb-a;WwD?L&Rk8ZAekjUvc&8h25=P+r?$XK^gRp~_0V?5n;M z*oT0|h`*aOVx_+B&baTV&2XMKhOtG4+s|j+$Ao+?cEcg%zp=l!YW}dJ=e9;TZMMM4 zI-4j#2zQ7l$Hz^?b(&P^TX20{1bSsYgXFjqu^dS)SS!u^wV2$jAZV;+K1=zI8uv5s zyKojnGNE^v`~lE|-XrW{a+iBSb13@F@uJ6|jSTcXv6e2L<(CG3wj3YGb!}Q4_pz6g zgZ`QVLKuenB+zOBqq6z)Gp>Al&V%;fOPXg#?4Osk&S|IO<-~BF$SJ`b-hF&t(FBOU zLMt&e!(syxRS)>O*^==Te9l!A!oReI13JZ1UrT3Xk}Z*!EaN@30*~xLbmV5rOYd^X z>*z>p>Glg=LFlD-eX$oqf1iS13ZggQ!G@aw%&IZFhU)JU zt{H`KX~tfkxY$bei)^Wwb}T29Wp`nkTqKCqoH z$uO1cqK>y~>Io+Y_!lLHHx<7k3sGMwRZ$Ow>)S$Ss~6+KA{o6fSYoUh=U7=_vbc&oWHYZ!8kB$XE=9E*LGBBSnuS5>547I% zU9ykHpBE&bo=c3J2G)&@WVz_hCd$wOgIj zUWy))%RF91FUzaHpMNvZ$?UA5i$))(jl~Ewc>&=eyT|08@5AEEYnTrOV)+2w1-e|L z1^WP=3iMW5gXmCB{?Q_N@lM8QE^gxM>$M6Ght2|W>9>rYCM(U)>Lo^kaFr|HlkX*O z7!HfN7N47uu_O8>VHh>IhJe*S(8qsEsSEx%HZ ziZU;ku0jtq`%wg8jILmMmV}6;yHu9F@a*-nF9&KG@xWF1r&4oEp2))LJQWMMTKf1z zZYJh2icSzRdw_~x5T;1sYAW%=<;ECOJUT?e8>HJJy&3)OTsyf1eV zarR3VdjD~&Mp7~REi2&T1b&g0wUp8@Xm&qnGtr@{hKhd_#H>E*Y%XD~u1f2xKxe6g zy{h*VG=?9`*;HED+hc0Rf6cPU^nM_?-7z<{KgU{+oKLQKs5t_NJ5Fz;DyNsT;op3T zU2AZywn(1mHzYS91^Jda0&xCBHTapeaFpohGK4bR%j z1;RDi7rq?|e@+Pu5l%N~+CW#A&{X)Mz8pb4Qf&sMm#P^9%{}@+*uk>ut4~yV&n2(} zN9G|!q-H=#%Uv05qj08yIQ=|UiKuH>y^COgmE!frdn~0n=3-(fM0XEdOn7iH;*dpY zWRUA`3B{Ki2KJ=7x_mi-Px&v5(gEqUBLa7KCJAZqPEODjbAC7)S%HVDQc}ippDS~4 z@V#QnSH3BdSax9L4xg$mp~Pi!OABo1&z+xm=X&vI0tEJDe#QAWxlp*4d;sw1XA3qv zLOw%j7m=pkrQv~qOogo}_VabLrBZ!5adA6IIx)N&3YCgvpfs@D^kkt-a-LXMEbe?g zXe=Y0$2-Ps9uKe1e?4`q#E1nkKaG)5z8Cd~78{+Xf|*rWT6)qa@3c_bvMe{iN=LbB z3~{l5k~!6&mK?8*r2tz0GKsfDp_VBTKOt5?38+RKYQBiJvByIi?kRRY;3s@;CR@Xz ztqXG0aeP-^H3@_=HN^|`HRQ-pAB-osgb$zmz!e0d6XR8orffyo8u$Km-hw}m`Mas( zSz_=((d|)-iJhu{GqOTQ0dWIxT}PZ%Q=*i+Sq2PrrtnZT&Sx!WugO(wp#MPx%fZb0ZLV>uFO8p@LrUzd=?6ipn5 zItHuQpasQ430da1Wjy`SJ{h3x*o_hVQfrkVl67@;qxu>xIt+9tnHIJZL60cf>#vcF z@NI>T0S83^xE!c_Dhq=txd6*nYl>q!>hnk25!X$Y%qL17%2)q+&h+Rp>?y~wWkftr zIt(0`8CYp&ZME~b@pEbyJB!}vz~oefhDdX!Yb@aOHdhbl`$3%skJ!xTx0l3 zjKGz#qx0fbB#JtOW%MXc0k`kgzVcXt`pWzj+manP2^^&Svx)=|bA8Hxx#uZxud(4d zOyDymooI+vqO#1>>wHh(43B5iE5E`Zp197!0HD5hv`!+U`P{EwKWoBXl8sPPUFe1T zm3ujPemA5j5Kfnt`~f#GRW3N_vm4svg$C_xOJUJl0Z1x7{zqlM;c@F3Ohg z5^;S_@e<|QU-9wj8~rfnGz`r&&1=YAA!w4;?r6-fo0!!bm|}~Uic2IDj1hY94m#>M zOy3JSjRp1oU0p9uwmtw>>6GaYpROs1(?OZ65HuCIf$#2r4DU{o3}5v4gdq*4MeLjITt=s@_j_ zSbfZ%yBEPH>lA`H0IFJXX>_LBcvk=*5V646B7vUo9qd0!jmu!vKW_ovZcEe_ZRTRj zo}0|I^bAn#ry`0uke_qu${=ZU_?rAc0SCoku?YbF8c2Vl#uQgW9#v|8FwA78mI042 zDMous`?L?_h!hvu!mG(vn6I@}y1PuOF$Rur3QT}6g5*E3y)sTADTNMTbXV)`f%a&i zo+_verck*sCGAZM-Go@XCU_Ni=d?nd;9N%Ed77Yn2z{KJE^SqDIT=Vr6z@VgjuA&F z$jaSMJ69~120nzN6El>b|3BQl1yo(j(lB^%cRRSdySrO(cXxLP!QtQz2Z9saAvh$s zyL*t}mIMhFGAHl8d%yR8|37PH&6=6DX7-WZySuBac6aaUuG(GI3VH)j4KCIXLT=S| z&&}dPf6Ju9SJRJQ{9_9qAsW!&YjFM2i-Vc9A!%|jo3N-|*D)rv4LWTm+mG-otu(oe z^Nw9Z#$1cC;+2%P1@$OeCJDw)8BAo^*{Yc3 z(nc;Vb~&k^yjZl-4nbZk77xMEEJW(r3w#59_F>k{mT6;Wz|1wiOa8?2Y$>M7sXzV^wb*^#W}b&seol@4kzqDT zyhd;1Kle~wXtEs`GiP7e;8wt-P4y%a*YWJLn{|DJL+M+&rxrROa0jRZ7yx}xOI&+~ zn1`DuN>@bXKP0<+!|H`<59z!*XXzpd*@#yFM(*O>pyDa{g25um5NtOfSCV{{-b;=M*Xg9o4)p z#SPCdxBsGs2<*qAG;hQHYZPnw&o2A`1)0F#6r_ns{KFA?*FOtiV{w1sCi^qp`3Cbp zmr(QX68=(j@To;qKrsAEfx-87%|?MH?0I<=o!hFbRiHc%S&!itr!%$(4OIRck6~xi z@L#9@e+*b$R0SFiah$56gy~t?3^HVws`OR^7WOCVw?}#ne@dVAbbSlg0wr?YG4Wj8 zmcF@G`hPE&6wLh%K!WJ3H8BCu&;VE%7aQS%J~=gZ$QPL+=aLQdqw=Oa{pJ^{{QDl@LyWS49T+@F*SS~dFLLI3>5zM zYF6=9CxU6Y`fTu=w^T#>eENNt?@v^R@xk|)MoC*yOuX4jes8jfZQswep;eZuMF-7#dKC zxW?CZF-ohUAC39!y$3err^l4pnZ#%k1MAN(XB zp;JpMkQrzNr(I*>=~exsyPVv5gjvLp*>p1=qvTb(q(|}=etDkBN3%MHxN+KJ?czjB z(JbxpJ~dsE#xSg$lk*ojEWcz&mNygPMV+#(llA;q!TuN3zzSsNCdmz|<{eZ}J4s?p z({Q|D)mjjo4G%+gYlLdL#WKT^o8giS3XMt&O*)KnVnWlHPLXyxx3zW^!*+t?$BZz$ z{*7EcSj~wbM>@`|w6-edv?f%Sdi~<)Mpk`^;wAf>+>Aio*Gr2DV)B|5ilUFidc_q_ z^pM*6Q<-_}>r1~z|EbKlf7dnpzw5e(|6g@o$}#(|rbrkJF8t4y+Twqes_5M3_zejC zwtX?m=d8UIktNZ@t!AYSyabw#7k7B3$1@BxADYDTxK(MyKKrYh3G-gSg`N#dF zO)S&=>b$5V^G0iED^HEKRpY&KF_LEY+zlvV0}DIlOsM&>gZ<{m>XZ7bh?PGXUQ+!h z;;0QG?^FRFc!+m7q^QUy5Vc6@9q7a75Ym8H?pJ?Z;s;kzE4}8%FM}TEM8WCN?}(H2 z7ccU-zikDMG}u@aMjx!m2GOIw@RKz&uj||Gb=+l|jt`2Xjn-D*zafMG+vqjB&q9E? zdLh6B$p)E!fO`U`8m#yJ0NeZp+(N$&G~4sP%P}1r_zi$SPu$B9JeZ2wsx@$P%=4eSA^Du-McULI>wDo@@KDTq(6Z`dsqRt)5N=+)c9`)x z7ZqS<4JpGo80GzCZ{}HeMP~YR0Dk?E40qUYTm0gG^tle@O6%9)ie>~Ow@LU%A4Q9d^C139T!31f1+xQbYaNj97$LiuM`Rj%Dg zYIPR2*<}%en4HdGl;B_Ae*@&OG!rL<)2Ti|jPX{aPZ3?99KnyfUX#bW!US|))DkQ0 zt|J&B_UhY0rZ!FU0l2tRGE*4SLmM?lFr!Y$=z~Vlmh*ZR%J7YcVacU%j0|v+_HvJJ ze(ZWNxL3yDACd%qfKr$gLvQ?gyEP-b++R(ZSnfCUH7IJe;uk&pFOR8^0QvC0I*^y% zO4LurhZ@b$wH0uklm0j{&;}X+L#hBOhm{kWn$Z|`5?D4!vOnTdbJfvFSPL?Of8j>r zwvHQ_7M;W!QJP3|cJjW$s+9NKw&;HhGxsneFTujs>8xt*_UQ`)#>I_jdRXznl7`pb za(vGLVkf(HTXK+Tghr1IK1Q-UH(#Kd#=djrv z>8QTP>%M$1tsk*7%)c+}=WXY3ER>A)ppy{RddNW!z{Tr~9xA+^-^Zm>#t9AfKS8l`clpyz5UY==F>xuq3si<}_hRK8r(i7BWSZBOH80!K5ANfOd3c#m%+ zou}UKXBAXle{)qc?SGmt)WZ$hCkz1|<;lr*-3RF)`@|YrO|{!5th3#hZ0p1wX7n8r zc`;08Ok%pO*KWsIu9c13zvmY!cY%`yS~J71)W=B0Fj}W}fN7&W#hJA@X-Kf$4Taj7 zrlxIEu@w@>8PklC6iYni8?>jse^V7x+Zdq6U=G3f(D-!stc|awVQ=i}T42tJx-NK= zIBCs0qd&~_(M5>4K~`DIme*je-7&EvlxCcu=r~s5EZT;Iq##ur^?Wr07T(0iZhJ91 zDVXc~zD%TkIfmJql*a)MdArDNwAU~D!otKP^FwETOwcM|A4g{>p5F&9Xo90%Tjcu~ zoF9vhuyrN0l8v)1k{9V=8-Z*U-vLfh{r8!=S|!rpvK_4RSdhK`Pd~&pYkdNq-L(v# zxtg(-n8FxUThdyg**7N{qe)tO{i;cChS(Y8lt?3#JX`{7DkbrNehLSAk{6u?vh4`a zT5x9A#eNu#gpyQudQ0v{92!+>0D&o6WbR}+%eV9gKYjyNNk)HW`2YQE2LE|BCPhe6}Cs{HW`0l+n;OYZ<8sIY>lZq<0XrT7R}9&@==K1p?AYa*u_yq9Bd?Pz~-%w z)wS}#_VeZUDNCtLbpSM5>(_qtnDXsx%UK+xST%z}pTeCpkJpo&F|nfRJYHT;4qW;< zI(KzA90$zQ-m0*X%U<=D#vLv&=tuJY9!6P+$%#W44k{3Y4$~ektIyIId;{?&(7!a$ z21HF6f>pLP&&mwh5U1bZb6PvXNDP zO0v>z=XAI*mXu@4hXm zJ?NHM>GRo7{x`u`;Mo0oN@Esimk(scttPF`5+t_5ty>h!Stg#8NjYjUW{eAmDh>jo zRjh6+s7CdGu@p*yw454@L=CQixM_MnDZ`Z22BhMI{mW5K8kYBNn2t+zBT{W`;kb=y z>dNshS+rS3rA7t?QV5Y*ly<1TO_nT0o2>_!%bV3`>c-Lp;7hJR{Pfs_3L$?UOFziE z!4AVqOPZ2*pl|Az00*gGxGZIntzWSh7s<@V1#^0;)}84#ze^B(sGmv?4|Kk-qekI9 zPL_BHemK4KVg1_%9B>{4Wkqhd1%?uIREZ zyw{B(r8tJk(q&Un5OYd;x(KoK-6EDgFK-{WG*sVNq7pFJXH^|F6-(-%DkpCoi!hxi zWZ%hfJ#nNlems2dC9$Kiov(tRv9rO~3BqH8yo6X~VsMeoFJ*kSV5HaEGL z!3V2~7a_Z6F?xE9{Ne}sY{5-MlCJ|c%p~O}Qe9sSS6n=jG!1maW5aN!G~VSKRUt6- zF>Mf7Sl*IwvSHe1NaI?;B9K~I-P;{z#u+Yz;;<4^!ywR6=5sG<<33IOl5bNt)p_Pjy?XW`GCD=4sudvU)8n zp(S6(j^&u;2a1f=Z-BRLh+oFH1Wi;cZN9pXhxm-2#9JXAd1i#mQNsf*(yASRH<(y_#IRUAP z?LM?Ps1?dL9l%ehS~ zcX$Daqf*WoK_oW?99vdk-GyF{sv104e2gHQbawl2#VUZR0zNC| zQb&|w?cS|nOeCi~;i2d*LtX`AG7_kOV@Jtj*7dWg%9mB!y0$(ta>ab1wzGD0uDt9= zfo#1Y6!ELy0D(vMSKan7%O_XNv!N*U<%wKwgW)5|Z zRT}rIi42BAnkBa7#56K675+(b@Iq5V9&!5@s|`3qu4efI>719kz!M~@w48Yas8h9= zBN&Qma)1k~00uY!bSYZhkTM)jD{Ij>Hi1QvpZc!CfdikmScP}1LXf6ar9Omv&^y1n;K@`B%_L`C8Cj5RS2}c z$TDmL$GXjbRH@~W;5!_rZz(S7$#Ee!u}GwFC&Z)^q*;l{NdFNEDmyr0ltYchlJ8PO z)v?27li4Is@YH76Qk>ob#6yOZ-*5iT`2!tY;3&|K(b9a%41I7>(b6y8Xl+7%YZg@GPRs6WV0lV}_fENJXM_&DJ)8G*0ESx#5D zUquCRnUn=pcsB$6l0~F3Tex^DwSqTcIV+>^2>3{;#ArEMGx|mi3i2Q-7=eLHO#VfS`!O*`P|6#z`)YO;%A0 zNtgNoW{-@A?@^8en2+-DSy40SpkeB_vQMzmnPoo96=_YuT=KU}M(b36Cio(Jw_ z4xlQnkgdulumh`@=(XH&g=(7f75vXp6N2&4gXz_0Bfr(s!`0xP(16YJN7`rje0U>W zxdrJ6{JRbdBjo`m?}bL>iFdbc0UU<<_8Z};7BVs}I6SCV+zO*;QKBjtDk!^e=&eDN zsi?mJuwyFtvKgm&4^_ViAy&Z~3~OeC!O63#FFLhq)&n}Nw5%5+6r&90memfO4E;DPy3=Rbc~8#c1C?mdh{1UmQ(lJ=(6D z`1l<747zTW`NzF{*GUPlLu%@jUx>}54pM)ZXtrKMDmI%X(_$xFK2~PH%5nCKrvyz^ zbd!MKaRd#b+!sK?JtM!eA$*(c<8Ya);29x1?RF25BA+g6`z#04QH{2)tr}p%ZEwQvvJM0P z-rMGLr9qjn#89mY%(n~8I2$4>NBF|FN2k!#5eIl9f+4_`r2P(f0xwMXeOiOCcv1_e zotpW`8Vvo0&o1G3aKdkFduhg&hu;|WhUpCWHW77XW-oTFl=m+;SV*<%I#N~{aLu<_ zv|YTq_5?a+5&XORmHy4aD$co|%ihXr>r`0ww6I|}g&A67XA?yj%sJs7Y3IWTrR5mr zd)D2u+WgE{5fB_X7FAqPJ@H5nBqX)%cR}O`nQ~IDW#OXL4q~FB`05-*8+b>ak|EL*GOGMVp#xz`vXuvP` z?>dj}VKCs1jb5-csE?F)B*ns^&F#Vn8f+j-Onzj3caSVM`jS8E(ZAdr)7H&%u4moD zVQ7E`%10txthH3|8av=*NX?101=&iZAT7l(n75LgOvfWG!%!Jo)gqxjsJvCy%x2xd z(XealvxXXU`>8B5lYYKJ=QV*)p$3|L2;ulP91*VHLJPLb=#S2qCTd=;P(Pc`L!Cmh zHW3(pi~$qJnTk0C8`clz_++*7+0t~w3A*@era`yyDYmpRN@+TV&zbu>(zqKNmWJYb z8^uVjexxZ^(R0k?Kn-UBQz$$8t;pfv2;9S2(ve15J|vL@j2PT2yQq+ateHo0Aah4R z;}C+uIS&s6t0aRjw;lhMA$We-hj=%{8_rPW_jtWW=d9V6ENgXWl>(ZQX;IN1oxN($ z2~6wOuW74*P``tI*dmW;s%|fs#A5_~CAa-*)Oe(V-x9%SmqUuea{nBzsUe+xzSR_< zZPBUU%mhOfq<9%>7!BvJZ84F}H2{?tTU+oG=tln@?7u_HT?Q!Gtp7Cj9B;K1sCOLR z2_2<5kPeOojWss}v}y@Nse0(%vItvJhB0#*-JZ`$Xwo#N8OR(`LY8rky|JAx#kgcX z2TLm#4H4_5PN7QpT;2Y-X5(f^0Dpd3e!X6syUo5Kan!J>)$gFp8!8nj+{76v$?G~K z7fDqM{WC?H@B}62zO&Ekx%M#QvJ4u1-qE;uH>@ zq0h^vAz4>#-VcLs1x^7@VP>cd4IPuvXT+Vo68Yft;@u=f%ETz4wwyXtr#F5K^fK{W zpFj$HOlq>w;~4M%PBobF9D!LYBkN$k)sXQsoh#&Xce&Rx4b(3)OnR}_H_KMW=#dFy z$Z>vjXl3SL8+3J`WQ6$!Yb?637^PC#tqmvQ5OY-PkXq6@_v~uUgz&gCs()I}USDl$ zBMJ~|BT=UhngIDU5!h?WLRPr&($aDh?`6Xo#+yigSUAN=Qb=8U6>jcQpP;F2tEhT*R!qdu#TH4naSSJXYtWXo_spXkT&>YzmJK`u!qGAT;c5Y%VE`Zq1bXa{7sAtjFyKYQA(WeB@x#xP8T<{% zleM?8untt_OuJ^WOR&hU<2-G8XCO&uJ~OQ~{nOIIin=cCoul=hC1u7jE%&tQJ6g?S zF7Z9PUGyrM5L&5aao9Nu%JD;*N{ghNu;B6`oR9rVSku?>z(H2yHakAL=`fvD)l{7} zhp79^raD4c#bV`6eF9%gM!lgXDOs?xwTQGiXy#n$V7SH9)Y^J3$jg|d#sOc)- ztTHFkz+ z*&lYCTGv+)B^1e5Xo5lKg7OBZE4EbPA!>BIQ)eSY8+oJOvhKuYkuS15P^EhJ^tp(v zQcoSjel+I&Fg2Rd9x_+Ps-7#a@mWq`eWUn(#mcNVr-_(oN6-$?Cy;i1dL*Vh)aYO2 z&+^45s36#+x$Lb9qaNF|Ij`J)ZrfFas@-Xllog2rxF5+sRt9tSap<;yH^ML{1utu7 zDi%>u!Ou8rNWn?u1J7}_&)FCc3DZq~uAOtNH^}@qKu_COTwa`fdr&`m%_bDyq1H>* zU=1oS#pOaCC0vxVJ2fes0F~&gbyNCr(r~mzX0C>!gDQ4oV5uLEfdr9(T$g98SLOzA z+bTn82A=<7{rYEYpd&muZ55dnPo_kU3uZxsOWgoBz@5Yo+a@dz%{ffga-G@v61hby zaBSv;sRnMit&VQp*Qkh^!~ZjfC2G`S|F>to3*0Gr&o0m|wFatUW3#?Yh9ec|0C7S7%*nYO#EE{RSxJra7_AP5tmdR{H z6&Sau0pN654 zwS~M@u-uqCwV>s4m2K1Zf(?aMkrm6!dqpt*=jr~Lch2IiZf`onZ#F@bd=ocdQdz{o zZ?_jZ4LAV7aop65_e6s%<4I14;1vQbuAF_Cegr}0dxqnF?q|U9jW`cM?$uFoo*8uZ zy;>AwbFJaVmy1JgSoFv%dgIz5C2DVaf!OfaBr&+5?+U|f@7r?YoA|heW>R^>qS7s@ z^@Jkq4TnKP4s5+>C?NJFC;A)5UroOOE={k9;CWG%(GSX2*>x5VAw1MA_r{ucI^CXN z&NOAgi!0WOGxJ7DPrxJNBqO{Mcpj;lF)E#Bq;k0^7Z6~FLI;Zv9q@5gkpvE>r33Yg}&>drUlm8 z4G3h+4eFhI=tZ?m&wkv{#lMK$x1s~gv=SsZm?zv8JV1r7~E9taV$! zcH;Y&Ho3sO28>~Mn%?|a0nDEorlDkCVK^QO)=EMNn=GA=+xdag^1uNXrYquDU7ar9 zbfK)TJU&P=uV271LrOGujIqhA?|&L0R%ysRvw^9iXgzZB`XuqCN$nU4^8Kr(wFRpq z9$9$_wQ$NzINjeI*fF(byY>jIsE^XR)=PEFbJA#W!w6i_MrvOi&Io`GR`cJB+#Qrh zk$d!hbw%#h{i3pUZV>)e4he7V5a!a=*E${6@K;e5zk`=hP8$|Ypr!YaonU}<{0CH( z*+&^W5$>FTUWHN4#OkWDh^-DA&0NgH<<&fX{qiBXw$^yZE%cWg;j-o)363YHnlLLV zfdXt^r}9>sTfCj3OdYrtMXe^Hqvn+=O_pUrW7Y}2x6U}{Z$lod+)WL?D3OmQ*+0z} z`hEu0>Sc2VJ@8TFTp5&_FK7`CjqzCe?xauBQSJ_X3`&4O0`FJQ-teKu>z6v}dwNU> z1u=Ydp!aD3X{6HBjvx8lF`1 zc<<{RiNgbON+pmOcYq=}qAwcqm&e;qv}%fGS`Mu-bk27ZxyMBX$XoMcCRNgN*+~%` zn-160>{j_s@UiIOuIGLYa{?ID*!%`CfUd0#^`REH3UW*Nigzoq+6|ObM2&WpR3mZ> zP+d}{z$f9Z=Dph*1=1y#M1C+Y_%(EW{t8d5(Bw$qZ{Ko~3zH{1F|t721wKGWhc3QS z3ZRRE;w5T`Rl(A0Bf%Sjh#%X2n*C69b`bTfpHvMCm;T7AaAHtKiN%{sf7`y1bOmNf zY5!s~(xLgNEc(xDn{JxtgnAoWFfAEnaK{7kZ&zqnsb;wLj|e?ctIcXR+9>Uq1(gRR zZaD%Y$7ZFH(b>&22rYL=eGjgbt}i*m@R>`x)t&VjjC;}-Y$OHo)0VS5yL@<8s|#kB z11HQQ9l_WGAG1n0$JLuH9!rfg0`{nO15TQJnd%%TOq_*yJHU&T6Im;o7g(}pvnwf z(%=R1`F4E=W0>HWj?;kJ4@uzF)XUrd?8_qpN6B@BKb;=hujG!j18iUGD7k9u49Xh> zW&wEz)1&8K&Grg#6SzqsJTg%k$~J!@(VE^^;f&b907@{B%G-gGQoK+SIPGIHbZLdZ z5T?v-WTL~_vTxU}!vwX((uMo+HvFzNQs{Wx4!3FY>PVtQ+TOkj@!h7zdjgM?m-|fP zQZ2Hw)(MiY+9v)DQ-_nz{hahO<*K=x)+^X;<K141Z`6R1DZ?YBI%!)R-Tgm{m zC1K?(m*W--WI@7Z=hk*i%1{H>=u{;*51FXa-^V?tF6tMAF0$NCOLs6a&aqm{RZRpA zJ&ax^7YM8s%CnqLLEMR_wLFfS)!a;Ye$ULww6Rid=~NE1U#~Hr+Zg)QJAieTb^P(u z`O@6gg9lNCV8O|V!Fv>TDB$VnalyyL+cn6#l9^EEF=bL?N zpF>vI?P-+#8&H2`%#BkKxp(J*z>>C^Sxt*!IW-(j=LEG}Ii%2X&P{5UV1&>dwb2l} zcV8vc@|o{$O_3!-_E$kiKGj9M^$*u*`#JsJ0G9PMKJ$*UYK}#;=0NT7ixt{nZ{N6u z3Wr#5Vlc8*!$`ka`{-{#ZZ_Ou9pojx)xVi-{4BwZt>eJAi5#1w=HT-7Dx1ft`R#;X zq*~c{Ojh77#*L2Q@YEeE8A3tXY>>=-V5(SV)~Wh$KzyjDKAM({s*8bSU8n{a3_PNt zCAGBlEMzmVE1W(kB$QV;c(Nb;&>#7BILz~83I%pgA0Ni#y=;MEsbh+G=TeTEvds|596GInuCn51-p)k%@`UxK|NU^%8dhyKt|zlk`a;}IU0tVf^RO1 z!-z*9cnu5BT_L9tYDD4u?dAaZvE5d9LW)@zcH}Ar`!+It?N2 z5PEUKaTp*W5*}c6k2oSt{w;|U<7@!Sio{Y;n`%00MM;0=+9BOOZ7P-Yj^SfGF%-Nd zQAL=Te{i7WU8SnwS&re)eDyLuGpjd`Uj;AJW~(U(<9q$bt_NFZq>(=hhcfzaeU`A9+q_l^WDX zpQ%5rqOaTRh)*i}a_4ilF7WzvhU7lVT{a$4=+e+lNIJ*q`$}koYcDQ}qI>k+g>}i8 zY5BGyBW>iiY1(>~@iHb(g5~BNb*PDOeMnGv6y*Kn@&#p-zJ|+Gj2noQPKm<+*Yup_ zObT4sJw=WOm~fiwXCDWuu~0rzNsJeSYR|_T>r%$DQsG}weYP>l8odusbe`v4)Rl#n z%|T>CaSmuFwnARo-}*Tt5Zj9Egu1MUWdm2`qZ?|wk0)(q!^q_Yv4|PDlvm5=WV7rJ zhbOhbbLRKm!6Is5Lrl!854YDqCtRhtan3 zyk5yC&|v1BF;^00^nRNdrsZz{qwL$N`kt(8qCBlfoeb~v50%@C)ZCnv+z;vo)M`zc zCtlcxDXWo_Kasof`QXi|wJX!nf0^_`^nFcyuYOoV*E?=n?1HX~&g20(joniP6+*{D zUEi(@yePdpJ1GPYck<+sRYw+$XU%w)*1PC&GF+vM3Ud?Ijv<i-8CAuXXA8<_gY~Oa5n8UoJGCwF4sx=N~h~ldaWz2-ZXbSr9+o zs=Rex^Bj~|&|^gfVF7#W0$86=c8Y9m3d_Hm&*Gac&fG_pWR;;fq3=fl*YG4+ zENqJ#)ehK?pBd{b)02?^X$-$|e0DcTp2wr8O6-&G-ouBpMCgy?bi!4?<*}&|%duMo z^^;F@`0_`ucE)jV!kWAz47dSV-gClqC|%6dEjf4p(ARl8qj#J}NlB{(?xe?)Qr)hb zxOgz7SIxqo%jARcpwmTR9pGc)nx|y9^{6Bu8Q5Z=<_$F)a9sxA)!I@~GP)x_T{f_k zs*erXkRa~mT~F!)19_-C3@e)r2R(UK8XY}U5VSx@nKm5La5$K4dV$WYHTYE^$1PNi z1R$?%GbJxr1rssAa6i zbBCS6sGG=e!$m3HPw^zx3SQ28Hjqllu8?px;d}G}kMFZ7L163Cx7{ttwi?-iZsaLH zr}j6S%iNetsr7-l-=hwq6JLah`l?M@WeYFy`_?Ply2`?2NY>pIl{TnwSOnOvf&~_T zk$9G&im`a1!ZY=l6UJ0C4J@H{5oPb=@THcii zOJijS`h?ZTIsq!E)Ezl)?RpO^E9fmcvP8W-BlpqqEFQEoa425Y+1kcp@g6k8JPnQ9 zq~*|wzh#L`@ETx~M!*?cWDau^D*54YdsOF*fy^8_$3r4@DZhVHtLOX;IQsLVHkcs< zc#tdh(Qdl|vOF=`Lcf4InhV%2&GFpkKq5$6#ky=zlRuOlpmLinDre~rs+a}8M54mS zx0PaJlLkJ2L5!+Oo9*r{xc~BgYkvCx_t4o1Yw&V!t<`nXAvIc0bzHmm4a1JH#Lp8M$M=oIsS!JsoVtl64`iI;>^S;%= z8qX4}iI1o)1T!Qn?lXR<5r-ihiSvYTuCG!CqrFzqY7 z>bNt?=na$K_Bd;241!MXI#_nqyoV$A8Li$byFeB@PuA&FUc(JuC9CnwZe)0_|Nk&} z&?h`&CiS97r_H*UWmTblmAt7vMJ!Ug3@33V&!kQn8}7r?(PT0a|L+I?R|4^EambQ% zMoh@Q)KD-mZ~(~0lYhLZKsLF?qQvBs)UfzQky_l;&!O%SCNaPF_l}d|lwqDf7XA;8 zZC;ebaV8kyek%a_!ws3sqsZoufcCF9ZsSK^(x*SuzklcOR}koFJ^t638sAHS%j3y- z8YJW%?+sQDwJ_41@I@0U;@dLQOK2nOV#- z$VfRbCwlLC&t&#*0dNe21qMb<>3M6cKm6upGPp&4xsy?$?dR$o&xTdyDTmyi=+k0s z5Cl4Ghuc|TYHdE@ckFpLNtWr2;1!{`rxJ&e%UENTc38+^6g)3F&DiCgf{?MoB=s*z z27YD?x?TtMlo1DRkHpW@>qMNJbZ%B$Qx4WUW8VA*uyeYQLEK}=lTLPrRMa@OhGaLp z)&%yP-MmgRy6Lu&RSiJN%Mq0NrA)h;*0S`?Ed=G^%@0b^*Qs~ppNY{NA zYA;PSt|yUUR;X;(u`G$XOziH$@xgzJt}L!lL(MD;m4mZU3?oZo%~z2P>>(Woc{5<&&P=BtIKST7zAlDJsv(NR-qZF&FUT zwoKxi`ULn5@WAm=EBRUx3l;lpPB))r22-W({8r&9NkOG*o)93>)&e9Wk2M&u0qm{H zD73AyqA0od8TjJDjny@9t()?FGK==QD~KWM4elpT6h;`(KG z+6w>fX2H0y=7ItWrSR&|P2cBbdIqY=!tq^aA5W<~W=cg9?b$e)^c$JwYJ-_^gzy!m zzO|-LTQ1y6edOWQ5o&~W*%akOZ5y9djX)4VkYG}+hQ`&KC~3HHB`5&4IX^500};%)v) zP9bM{CKG+aSZOr@s9QU!IY9w0>qw19p+;KLF#Py7AGIRSC z`8VL7%7fBNnSlee*>*wr?i+gkrHv;S1-dGBo26DKD>EE_gUqGl7B2Z}R}ysu;!OqI za#S#37Z`JLBLZI`&i|TymWd>7p+=V9qxyRdCj#<&N%!@kiP!={WfTjSlq`Z|?KOH= z^uO@mOVBIj86T~aC2A$8=N@pVWW#Z*qs$jIXgLWis~4O?QD~oj!cUe6 zz;e3R845q*RlP8y=e_*H+O(l_l1Nz8xG44#C73@Zz(vq9SwsWkgh(%0+J6C@WC=x&epsS#tqME3|V-@T0)2zpd zxl$Cp&)ZY~IC1ExB4>2u|MP`iJ=2a*v(vhY?Ax zMmPcTw=$Bu=8*B%@p+0xoF4J+>1(T|wkni)&g2^gnox+TSSVzCh6iwv09STcy@eew zG3{||2cvQEp#Ague7l!Vjf9eEGSBoFFP%c+!cCEh65QwdG8psJ%k4L)cv;v6+K~V& zZ-*!ao?)G#n%%OiALeu{8mOFP8YGqWwp${((0Mt@w&W{JmqdE0yfv-v(=DT=;`SS6R*hrCbM>zPzX!Fu7 zw2Fh40xF4-Y|wRCDyfIh-+q+v(oc1)3-b6YN^akCjqfMRh8uUPBfY6oSV`G6%4n7{ z*yi-73F|=jY#Mc*NC0r414ZSY$p2v$Cv-~PDI=!gUS%pOjd$ zrG9URrIV zrldkO;juxhzBRrWeHVtyR0UobihY4ON3|)#RC$Z$3G_Pe7Y`3&4op-49Ytvk#Go)B zkFLhFUsY9x?$f!2)Ol(Gz|!`8s_qW?CkpGUv1Npy5u&|F zs;<0FD5+qrh8RWog8dR)Sv;Dm62J=}NOS}*DL7HVbxlj;z{AId-x|L2@d0s83??RF z0#Inn5RhU}yw!!M;{(gT`>R!e3ZCmZimm&WA{@E6w{CrJFZ?)rgd^9 z@u|~aQ=D_SshH`EVmu=}4ef1>t49@GGKz|p7}BQgON+T)TE5i^b$VL{rEs|ts|M6m zNrqq0eCZ()k@R%}&%Z2rlpAx1=`E0!(M;863|Pd|5JO?5h8PhL!SYDaJlhc0?i;mN z>}?-PF{w#+Wmowm>YC8z(8C;wYgft_knm}dW65=2`|Scuo^L$(S#TWHKp0Og#h5m1 z_$XnSDK9-DoDw(Q(?{>?kvr5KN1^d_!PXPnrAd8+3M92cnann(7%T%_4HYP!o3Qx( zE?$Qib8a6R{?+GZYY8~uN3@(?Wzo|eGW9F@I`O)rKgYR%$cT1qVf)?rc!zo;LB3D#j zw`Ficns~5IeV?@w%CUj9zZfLRC~?uokHI77Sl`G-IYvyyRpXCvU6gi9ds>Vhz?@%kw9 zcBS0SktNuP@j)^OYLi|5d7>F| z`J4Br{QssEcBZS-sBy@Qhq|J+Mjs#5hyXgLDM!9>a7(R;EBX|VO$p=Ov3p=(_~c2p zYv5{g8k?Kqg{^WbL69MET9Fo!AQYPTk@XWi_I^D`dtx|6fOWa(icBjyC7zNbS_;eB zyFy>%S;WbF4@9^wM%C3En}C;~egoexC((w@pV|S{!;A&rN6S!IeO0e>)wFf^X=lQ$ zs##pLBpTR}c%J@wwIlMH(%OS8w7s?24LYcD(?WI(|C^P-o zjlnvp8SzLaDXvy1d5vFN@M`-wX&r(A|HSeaWBv`0L5}?tFBWuv+IlQL4?W;ktoE;d zI+FpeXoh$LPu2JCe*GhL#N|Zc#-G*m@&LPRsy?nTx`Y%##6~)m9IpRWz)5Kx&FGMb z>7b2B-DzUfs@40`F6e6J=XU;ex9}HVzfTv}!RtHEE&n$~TX=sCitGLj(11wDs1O+% zV&4ZD#4!=tAbXSl{oTc&q)Dq%c2qRdRHRAML(Lq2Tdt9);#6C%%o;ba&SOCN5ocOE z#RjlOS=0=H{`-K^XDRyo91<#_FaQ9seq9ger+|WD0oVcnnkXQM-8q0eg%LPd1i%ZQg4sn->e|rf?G6RNnKwgK z_?_4^V*+ma01%X+=xxzJ2}~#y^FJsB0E%(uLS%&uZvX&-@F{>hJV1~IAvy&JKq+bp z;{X^`dkO+!tpQMLETQgzG;tyc+&|QLqEL_~(uAG#sTUzj07$||0MQa&(t>w;=l%c$ zijGit0eH5Bq&x~E0MrRv=h8)rem=Fea!XojntH1sNQ7Hc7iYPpmCJj>Ae|_c5wfO#S3`na~0{-Ko zZ94Qn-~a#^h*pFN3ktGfGc+75WS`|fyEQ|QVF4&PHNIg=T6m;}75C3mG=1OWP?z{; zGiJ;()Z(2FOFy)83zX8e#x)%Sm{iqh6|25h@rq>9Riyntk))nPc=G2v6G*Nlkl|MVlL{vYd)sJHn=9L0x6< ziH}M{5z5#20ZKM894sOxZulvZrjq%0t1^jKEu?d8=yb$h-rQ@ttpZ^FD-jN7SmR(?G&QhjD_y32 zs=jsQeT`^<@1=>GW!KK=vQKYv^?4B9%008Up3&M#p|tF$uO-{UqWnA#!*tZKM|O*Lp0e-^aVcm333WoX+a(2u=S zH_?prw1YP{Gf-UWiQ~$7Uv$f|0d*<}L2?H{|M{cy{a>el5Dx!E3AxV7y;x1%MKFY# zTD<`QK`ys2gNNO4i)iAcFq}D{VoZ3oF<6{GqqjNa-=!= zkn*4b_AN6~rHzj^QkF2+{Np#^$+bUEbMuGZLrS=GoFaqu_!B!QneiZvAtgIvO6PuQ zYp$h;%!Nev-a|db%xtgqXW|cKBZHlx7)}+XZnHeAsoStLS_^=!7|;ypzUZc^>*M5` zv`0DkUK@lcOQ_164C-(gr8X?$R}YRpsrw=EdMCmN0RiVzgmE{u2z@Cp<-PneBL?>_ z;?yNatet`dkJL*@L}}Zhb&=aXEQpzDe{d;R@b<(ZTuzHB*{GL+FY3C5Klk;NQjnXt zwm=G}F)_2sNF^t2{<2oU>Xtw|r_!r0K00Jzk^(M`tHF>t$&U(@gXGG{Hx1hRPv7|X ze)sZosoplFyg{rF;MMUoL62JhYPnr9+Q6ei1KaPXdQqY1OTLC4P4pj4`gYr8i%p3V z5?l}naRD+i8Y4j@s6rJfieV-_9pBD|-eCHQp@f2M!Pvd!T@O7FloQz+re!ARYI%PA ztx4`%b|aqaWkzpbLH;xT)RSugFKS`&-sz)(KkggAL{f9OG(3jHxOg-fQ<5|&H8t^I zEDBkfC+!FTuQ8z90UwnPRBz*dP?q6)Gd2;piUHuGv=4=@ML2bs=OUE z8I5P3zLC2-OSo_HFXD9jqT0T2L|LlsK@A?z(Sy9q<0<9aHL+bCE_a`qrfp!P9~=W zA4?CnkWa=v9!+|U(({xpizL3{=mKtA=4?xKxYP|jpCIEzDs+q?YO*J($5pQ4+V; zG-UWFQ6;IA4sXE1=pakB#jy0NU|0f1vw++LK)#J6=_dR-V57aw3yU(>9(`4RCnPmAI6N)j0$odFkKl=6Yr#aqsm; ze!3Rjlo5^5wdrzBp}JnfTLo3ogefr;4{BRlkqe@Pe}8oByvyPLA1rhFG@p01;Hg}N z5mLH-##P@}X%}3o;)Kdi(A2b8d$F0N1=;r~&Zi86C@JXKc}Y9a8NLdEP@6eT0!3M% zc6bEHKgB?*9{`AaPHW^0xr#={MWfRPjY2p@vjo$!ntL-1D!m;&jW0;0$E&{3lic_g zbp{nJqo^SB5lgWG&!iOYlgoLBmW(XFN5Yk^U;o835Ri8`^*4dKgRV^6#vQg3j9TLn z*6gYZuc{H(C>@7_!y>XknmmfL1}8qVNw8(^*P2YTiv!ZWN+>34FlI2t&%E2>4L?Mh z#RQ~pn4oH)#L{n|t+Q^QgSQu%X9M1(U?cd6e9*7DW`WxBF}$G>f0LP8KKL52s%tj>M5iGDmb@fks^+(Zd;@ zR4Si5;m3orQqI_9UJgemNN6QOXlu*qY-5!z z<0{h(6Gb}ao&m=$nmHHf*#LL~&=&_(C~4ZUZ2RLnlYJbOlwiO&yn*rVjNY1pbyZ2F z?@Z`?360=d<3~~GQ*+;Is$pB!pQGa%RIT$Lh%KH+HR)&NQ*2D6gCNSTdWGZFH+3+C zgi5=r<=9u8FGa{DUpRW3y(J}yksw3>(h(ZIloTS4{G8hTUDpNUw+~vaB>uIV8Gg37 zx5xY9{46s_;-`Gh)h`9)W{WxD-zJ*885A_KAyB(~TK;+RWKByjLUWJ)wv*phQo|>b zlNFC6ei9i@+*OC1HmI_rf)Y?YsUo@}RddVaBSm@%8q^uCK()gSQdylXRo$_>qNJ)J zq5>XDtQ+kyMMg&_nceX-}B@Y^m^y@%tAiolN@c9A3zi6&LI4hcaB@EkmLDx>C?6D zWn%+%<^c5}eqjdshia>oiWB)Vg(V_R%>Me|u=eDXfgx@+y3?j6J&1*Mj#d4Ymk$6C z)_0&-ax;ct3RrO3d4NrduB8dvV|M;w6BSR<8RqtnsU1vg`O(1uQb6!Ej!>VG`A09L zFB)W?cC7rH%r!};pmQJPeHAU>2cT6Q1d_rA`@)b3HcVlk8Fe1t4R{@8a!y&R}t67Hctlga-^y*{eHjaMkI7YeqxjmlB*A z2+k>QD}6JzuEuu-@Y~}RgHs+$(M>e_OC@DAa+J=oWZ(rG(uiKgV^WXh|9w2VLx!+C z9}cTSy)SZ-0AvlRnV)Bz}mN7H|w5yjjxw#*Rk` zFn#~0cSFpy*}dU93mCt0E9;jVfQcj}K||DT{p6Er5Qv{uv3M0$2JA)EWm}?$O4Nx} zvqry#n4;KKN{3AO?X(Q(8wfp7mq`G&Q@&tP|Aw`!n!XtAUUgL^DA)+JtG>x4{~}Gz z@7v9qtiL(x97CQ<`(VcEZlj|+Y?#wbR>s^AU$-d+dxbK9hbS(xIJtbjp_Qjamr`AE zEk>gMF<&@su&$92sR8MiU2oHyWs~-s5=m+~3R|}$WxZPWo9Vd3rbbHuXfWtVHd%$c z%u`)aJUsTcM$GPIxkKLqu=efD-vAkXK~Y+q5>Qm9Hwq77?xpi?6dphmt8e}gGmL8H zhTuRiYaAl(kZW}K=n3EYuy>xK-qxg`p{IC>_+AKe;-%RlLJ80PyHRJU^X}C;VHbrhfl@M}b zp1WsbU77pT6d5 z|4}`!<8`eKfA`EiXy&4N>2;jW5<(Lg-BHHl3j-;ZusT)S9; zcXEqVHMr-irj)6LB8AfTjTdgg;NJk>J#YqHk7nBB_A4|{JZW>4AD~Ddme7wJ$O2WH zqeJRM0AYd(i=?jfb$4EG5NG&E|8HgarEpC>iC=){DK91o)q*qCkI-@!M7*4_L zr`(TM_qu|x->wP-!w(99onJlX!w6#oXqM`sIt-`^iVQeM6o7ON7!9&D=-)U=Ll99R`%dH14Z){{#VB+=PWdx zcaFab>x(#Q&`{M8Q5wnbi1BZ(c5Q{*&Dp1yWKl3?gC92D{QT{|^-$)y!>E;IVXYPo zK>h{{{_PGS0g(|yME=bkLW~h9ID#X6LK5qHC-v4oWB+o8{6W%zms!%O0_rnrjD0Ip zEi1N6G3~~~|CblVTByK_pmX-gv+1=@RKOB43m)LHyey}W?qOV5m zCf$w*r;lX))|FxYR)RZC+mOvb&)#3@&rYCvZn&ZbDPFfbwBOFmMVV zItI+Ab+9wX%$G%`-(9KzNIpLRkJN?0MOy!w(oTtf|VX-^7L|*3rEGU?#1bqcqq- zwXm1NE3J7(zEkGa*4?r3Bf-%hL6@Yia+rR)O;bx8vemKQ1%;UWd)02YhvXfP=N|_w z*PORoRQ(1VrO`=i>k!MG{rV8p9e2C!;N4%{1@f6^UDnypvLy+SV*ola>5d1chldSl z^)gB%d3l~bZv5%y3?(QtO-~i6j=G7@tbV=~7(8`%q8%49qY=bf?aPJR%$S9YDvL&j zkZ#o)%v}7MX_3Ew)>^HFFDbh_{=HQ4*aB6V;Sfj?&n z&mi=Lc=)P^4Lc%Al}r@IBHga$q5T9EX}g&Sq?sx!$`2f457RD@caUg3{S z1DN2Q`~mO|C`^u0Rj%_DaiK&n=lq!b=O~49<+j=8Pv4E}3f7nxBUqEL>)P_on8YyB zU^MuRIJt;>4LLX?KYUAlIF|gTwyVhyXSrZ&F)glKzpZ+fCDTtl7|IDphvBJE&{&o7 zPMr<9XC;a z0Fhu)Tj^Xm3=iWKwfYlm=$%)Xy&}Q=W0H-aYe*A4Pxg>_j1a;)fj2=Rhe2tpj+XIt z#(3*wO}&lY_oGAB)a54Cg!U&~N#^r;k3O3n1Tr1CdYX^}F&*L?svb=_W(A=A{rORA zMZ^#z>qmn8IxiJ+m|9RZla&r(^tA+B6~2fiKBUK^chemmS?;_xmF$ zn98!CfV*+SJE>1veLk(PY7IY}B4x;d3g_$PPK=m@0DEG`4r%mz&cf;~SiDKAv~8lp zz`D!^%iM=C8~num;)&PNfKw-|u+Jo91So!JXWTguTEU$6n1;ogWXVVsCkA8yq2o&aAJ_#HA6 z_(+Mr6lo`dw`@0Bfbm!ZC}5HnhPMGiHBrkvie^jfqT2s&bpb5c7n_ zJ0}^Wdc=^CT{i1)JahfkG3hM7Wtr{iNdo_XRv=BZ4O7YD z*_)=t!|6cpIPZN(RZm;<6GWK_Djwe1N2$^a0Ycs=7=zK0hRgt{DpMP1Y>UThlXUGk z9o&-)r$bwEjo`n9I@Ec1wOZsL#QQ1`D&mk{h1*vMcoqt%M9^=*s`Jv$ynfNY_~%gA ziwHi&FIv{g2_>RVTo%6Z=_nm@ECg2ymkbSFRv@N(enU6Oz^fSQw1>fzB_bC6$_@l9 z;q4}7!%pLSY4pnc!w2-DFB?X^Sp6)h<1e%8ywQbF7VNvx<7}Y`X_W0e3~^3662U-P zp`<<>VB%UH2wAu3!0FiCOfma~h@EFwL(yx!6%)+Q8rbt#)A(s!IUX5JOAmJ-2j!{) zQG;bTTnui#%6F3v3dE}d=tu=P)ZY9VgyG+Vu={dxF2O08Msek5m+Z}`F7-li=BSty z{kqc2T_-uRXU=h5mj<-mv+eI2nu*22S2^4G0cIt(i!YfGu^VnB+iuAyuD2^Rl;RJ* zzY(v~L~%-?<6aq@BxJ%!KiIM1zqblX(Tff!x`d;7sGpNiROdvgvmooNQv*P#prf1c z(PvJ39tL#`Fvh6jRuEc92(hBI-gCv%I2Bb(HBO$O6HOk8<(?~w^QP^O(khO^7;~Zs zDx|bgVX@b&g~W?F#0P4|r*$VU->nCj4?2Gq+sD*#h<1EonBpMerD8lP5l;UWiqvk&k8|2Dt$5jT{yBNhJW6NXA+DexOTCtZ z5%vm}?`4rxVx?swM*%=K#|$)#Z1UXZ82`5Yha$|sC^BP5p*6Xe^htauQ6F${I+&>+ zAbsRoNfZo8%CDUduOV6@chReMmB40cTW*auTvU2OlVB!*@hxRwVx9{Q?g3ollyf1c zxl=+f_+jTFHltIjU$d%S78v6~_9FhaGjclsRLefdPzMR)?%awnVCkmN+9H5L8rgIz ze1(y-j-A0b7G15|=R`iO@kMo55x7ryK4D97E#$p;bNr$vol4)v*humdNt&XstEPW# zHIq->t!p2T@EX2bvHrCO_O>P0zzVLgiUjTS^MeXcK`?h|W#*gl4(V&D^P{ z1RWxY;MJ$vQGt3Eqr-9s-1a%ZMER3_JB+1K|^*9YGXX`+O zd2I|0({;&PUZ{0IVb!V9L~nV)>a?o9GreB!k9|3T>t1sq9g(wkb2k@XuSlmv3`Msk zuxfAS7w+`odJ-}j6PrG&qXBCEwh3smx08HYIfH&^gl6AnzgyUQQm|4hDbo^r`Y|ff zQJHDQdXVD<~;H|=xkV>eXiyM(A~X$$I|AYS$c*neb%3XEd#ET@Nv~J`Gs3s5=l9 zv5lfE1FM6bz{l+7QFk~>>s#c0*aZGlFqkss9SH!!I=a|1nd#I`VyPvTam^MwNVt5j z3k4vDYiT3s*6~L;J;(NjiTXl2sqU-6h{9K31*q7~>==awJ}2lZn-Tp2b59mCLWL)) zIzlj*?p`H0jv%XD)CW`mr}A^(f?YB(!bdXmsDf$D+v(xJ0uFk0E8W8%rOY=}4^lf` z9`sFC_bMc0Dn`E*OhR&ttl_tf2XF99jQZ(iGYVPSb}q@bt;pN9*@7;x)+i4; zj_OMIh`!m+UJPW$8ksc@Ms@PYb9}nlxy7%EPy~&lC!xTb#DEcMa-7{!1U)G`0nE}i zFt($9hYkj!vDAN=zP}v#hBETdXRj8v1Lp_7-MG6yP0a4)P}}c+{g<#$yy$s$6vRVu})3UrPy2|1mqunW5!KBjgBU)_$CXj9bINnO_e#`5mohTZDgc{c(-_BYy@+ z2Kf)y2r+*~1pdc2GI@mbA-YCFuteX;U#<~iRLOOCRQ`{P$l0vjTGx@C)12-n|6?bY zD7UkR{?oUjH*4!_Phky!5>>v&IEt{DPD{~zK%KNhabbBE!cSOk`=UfM6=PJkUZQp* zcl5!xC!ZNGMxDD$(W#s4zX91fBjfkJ68p;yI+gnKOr*YjajerJP;~O&!a6lvQZ}bw zDc$~T={+6)CjSRYyai2PuSCXWZ){$dG^#_^WW~$s&ilC5^ejk$`b>2s@<o{{A8mv3uy-4)O8G!i3Cm#p>(@o76UA0G(q7S?S#<|Z`+>zV1m zJ9ZsoQ_Xa~7!6B3^OK|;5p~)(?7LE(e?0U#tp)g0eV$D!pOwVqVi^9(=K71}zPQWu zS4?K+P;Q)CJk0H4j;`5j&tzS_SV9_wBRITAKD6dU1+&|qi@QUD#^Cf`#hosfDki(x zCU!XQT|OY{jg6|QVK5S_qG5Jqy%R4@Bpx;05U)!Sus0cV)_>dXypH{burlWppa`qC*?Yz3e}7aYh4cMc3 zR<$HnQ7yt?b0s0-UFOWiV>G|V`cn?8gYIsKU##@2$`M5sZ5xDDo-8?BoaxdBZ~9U8 zWs97)eQC>wij~`lX`9UIV_~&Nme%1JF0CXrJPyacO$$L1HXcxRh-$27drakeMju;{ zO37F=jnOzb*H!LUr?%cB93QP}rtr`(!aW&cEq5;3cUb0xJLQL8;m5<(4p9 z&k^k`AD4ZSKdw~LCO7<}g1>YGz%sDBQ76!Ohx6MlT+^?;oLTYnWR+w9>sWtWWUDNn zu{o`!?mUGFjq}p4=MQd^P1`lQzTTCis>d9hWh8&5#&V-P;QRZQza)JFeym2kcy!vx z!B#as?Pbziq|rns>0L+|7K-KxRgjBbx!G>N#A>7Gl6Gwn!4nVnZ2h@Jt&soecEF2y zfuFvg--5#_1Ft45@YO#rJqssdR#5feY|)q|9b7uug9V%2HI-E0;0tnodO~PFG=P`_t(C@SUbA)hb1+dNEx-I5hGoSRLvQ> zN*Qz$JbU|3b&FTJgpbayv|$NVRTQ!btENj@WCPc;xb5F!HAD&Qw=?QpnU7*yKk0m} zp?Im68xc_{*Eh8;CP@&1=~XcRXwJzao~``I^ZD^Qdn3}8jKN(8ga6`r(ZlU#G( z;?8Ndr5=`SZpKHgEB8_C?UldQ`c7l6j{j+VJxP72iaftV(Hl}1-IO!}g#T7_LL5RJ z!>Tryt)iLD8|u>X{Wi3DT+kBe_eKV20SRESzXu54=g!Vr7|?|#jnr%}wgYmCWec30 zYf2lqZ#zGptvohWVG~>!vWb;SP?iO2-$HAX!^HS2Y}9uN;? zrpsh-!e!rZPQ>0si*i#LhbDXAx_w6UUY4z~nV1Lu1}qaH?|N05yrnqCEk0k|TG}X+ z&!(#UQt2U=HL@LROf)Hmzr9)02%5_cF=Nqap6@CcFmRLyqa*ukqY^ySdTD#8Z8JJ# zZC?idO!)plR<5Iw7$Xqj12irlWw5ghMY3%srms{v8a#n8uPXqfbD#`^)gro?U?+q^ z&Z}6J84#8bmJiK{?x;LuJulunY?6#oB<(4kfPWX7+|h{g#h+qgOdY#pKySfd2(orW zRDHjp$Bj*mYPyIbNna(vZi8K4yjS$Vcw_&JgKO~*{dvjuq}ehjK6QS)el|HW;u>S~ z4vuWoKcLgYXq`u-N$vq7L8N-hHjy^=w9e#l+k@|SuJ>4mDk}9m@(7eX282D^x+^7dJm&6f`}8 z>wUty%oL<=3!kb*@7gL&0?E_m$~~WjJ$4~Na)i7bdz4b@DX1s0*g&+R%2{o%V zxkTEP$g&u8|LQW<+JR6Jl3;#girR(d-TF7S4AQa5d-wWw(5o!hjb42m7Q;RBNFo=N zEa!esCFaw+bA=;Z-Bqv;00+^VfJ4nJy$7=58LGavwppnOB8}eDDP}`}nGE={=$;uu zBsix4`0ft3SYkUO#b=i>WX3R82FqRR(K;bVTc6~{OsQ{ClEnEa6LRXap&KP(=~UuC z-&QdfLSw7m;?`iOdvrfQUTXf2&i9b4mZQ00kwe~F9GuyEDxf5na1YLE^l|Ej6 zj8doCtP*Z&amN{q|(fVf=oCavs4(pkf2t+#;%@>$Bl{1IWiJcjNvd}*sU zqO!uG07dpaisH5jsNGM%cEf987|^3j$K& z6v!FsR_jTkQ=iFazfc%J{7}k5v__Cyty3;zQ--mUvz1+5%0XBCBz5F!NXc83SJXO{ zvv~(f-(OZdImwQ_Qd#ZyQ8+v97*uvQ%_Q-O($|O7x8_eH;%}=~v&3!dg_DphPN|}x zkpwP0cVq@`Fhl!gNWo5dk>C?RFdSD;OP6GpH>aA5XZwk9b?akb>1MwpIsL(PM|@fv zFGjJpi#x-L=JiD|`$YL#a#m$bL@sNmU3TT?j)O(!t8302{stfkwq3Fs1~HAxxsu1G z^NaeMUOo65p5K0rzj#tqJ67Lo(+PX%CPEcA_#IO(dD0}Z^O21OUNI%2Ym)wL_X!!< z`KLeHPbcC+er?J9=75RVr#?N@A_*OmGUm>~Vg!jXN+Ft^Pon7-r-Vy%vQ%%UqeX-# zU&$k|NbWeKRy=>moDs?!i)E$mHQ`C5a1B-Teh7{T+ud^}e|3a@tC`Mh+>w8P|DnH? z$q+c<@Kj@bXqdn-`ybxP+R}f?K@`pp|k@Yc0 zh~z?v{8E>N&HGYR*N2&Z5PJMj=0P9@UybyPihR=t%&C6JtZPrDz)G}vfaVp8=kf2> zCvj!bnZEDowfpEkjBKDBK|AeoWcaAnCs zZpw7QrpRc^{0}Ts;&<}PKF`}nB?k8>!aTV!uUu_D>T9+&42Xd4WKF2s8*4c!7@CCi z#wzEAgbOM($M2h|MsWA(?uT%(8O9%mQO{mN<_|P_4*^0bB-w9+LxV}bf3Pb2_PVZc z{|<51Ph9`(H=0M1S^NN(8eJEi`|JTMGm`RlWtSEJ|F8iA1s(TN`uLpWsdmz$g<{g(-V+cP{z6$#Jo z)XldP3lt4%TUQGoPVY?rQJ>Eh+-Zy6&Y9KKh_{)AwwGO6LD|Xe3ivHDJa+#uH~aN- zlXfk$wamzwir}kuVSju&-SvXzEII=QbFEfJ#iZB2hmhvgmM^YxT z6cygAPqPt2Q2c`Nk$lhW?ww%FR%KBOSGgID}di z2xJe8#}Y^YaE+K^i7k*}OQtDiic-Zpzdi1~@wSi*1Avm=xDLLkp@;o+Gb|#x@seST z+kI(y5|&9o^(8B+2al4|{H%(43k>h@nJ_={Up4JbK?m!A*VHb;C zXKBGGfFsZ)mFAO)&GO|coNgLpf-?>-`aqQ5+zC9kSp06K4BP58-#PN0QVby*$y8& zHCF(N!V(#OF5h-Ieh8ij2Lw_G8oSYPUa2B3wZ$ zpd%`75lNzbw4(|-!~`k-TJp5$$gTo_8u*$q^dvA$Z!&#(F~ymj++}r!HzzM%XrDd3 z`wx4^VoVN&r!uCt)SV8Tc&A+M+0k8iSDQ&V!`e{6sVdIM04bCOPw>YLV5Tn*3B?PMXzR!Ov!19+q?z z1B+2Sb_QVcXM}7lR(6-=YU(^hnJ;dtr8L*2WA@^WFzr^3!_^AGaXOM8v(g>tYdfpL zRt=WP=?+JfXdG8*Y8^LbWipc02M>6riYFUBO5SQ$k9BD3XfU`^)^t_P*uCqk>!XL6 ztFoe>{wOM9?E~lDUyqKg(C2e!Uz`~LY7;(U!I^3F&~_2A=0SRt;@t40+hpm@*H>Ee zAlj@X3P@GaNiFkHMBzX?1L24f;6`&H_eZE~h(h&rr&?|5*H8KIU)Mr+8>U(}euenI zc%hsBn7GD@j$1~I2I9W3xfPx7C{K4bj^w7-WSgHvde3FFy&v9~{-1BanU^2r$VJ+g z{J;m_U2$HJ&VFFjarABe3u7Z=s)D-$Ec#8VPJhJHxF||q-4lpD3KxYyt0(>!CymvI zC%3Qrm}5JjoygJH2`$|aV=eLbdG}kWr1}s_;x+2oR1|4DN$VfWbYu6Wm=xkl^kFf+WD;65L5}hadrhgb3~gCundB?hd(= zJpbps&sq0;JL|n)ZcVTL_1?Q`S50;G?yBm&f6x402Jn>SmE-{s2n3iRe!%Z}K;{qi zze<0U{I3-JDg3(wz()g4LF`B%d;p9OLc$0A?gFR)5CDQ8A)5PN2^ARy4IKoAAYmZn zWdGFrUs3@0BSQW?2VfzA05BdB9)ffC8-V{uj{mq67WzxzPB9Jb=ZakKEGAWYKj~dK zqqZFN!CiXJ<=2)YrUaX>2PGL38e%oKfmtf&H^#mE zbEuZJbD!dR9rj+vIVRxH8~qTOMpaz?W&B8;E+l#vqn3cs4#bVO{U5cXM@C3Ox@7YH zgT)A?0tnD?0np42f(zR*)iI;ZmcCwq4_Z=C|QIjx^%(PJQ+dpnU+buQgkO+0D$fjDG6zoHT#U7 z_et?8BSwWsH?)7d$77pEED;0tCVUa=m_|plhmgpkz(@fO01O>;0CWW({t6wqN!Kz< z|25Mbd3Q?@%5-YlgGyn~D;tchj2}f!B8>A$5&#>~N`g4>nSl)DL`Gu!J<&`vPj0mS z0=Q+=WNJTGkYd-i?#MH|97K1gMWz5M^s1me0VkVJQ1V@76pMHAT=YNeos(9~b1lv} zPiQ%if|c2QA-1~9T75l1(SeLd=9C}tUablK@IMpg8GwNiH;%G6k`8Hhw0I8M(z)#bd zKY)xGJDQy*Z^<3m3t~*I-dYKVhjqN}qyPLSqZ&Wo56Z2|&A0Qf?D}aeA3)v&V<{+G zk)D%E)*0(o+)0}+-ez!LH4phQbY!T7x>(y|CSDV!Ed$V6LO8miK}j3(nKzH!Y@!u; z5$n32=Fo&dhei2&=n;29MP{&(ylW~QF_sT4I0=AM%F%%TR;pR%zMa|Co4|Ju%80F(iT1$Ra>_yjuWq+W>$v*K)_F-Hl2+1u^yjQ^Wut2P`PbIP5rq46~2NSkS_wGN)|Mz7e!X$Cx zAuJXcKt_Uq!3YzCNDwd*G711fB_X8|fD#bm;S&*aX~MYC=*;L5b_xw)+(5|S-$3tc zMTc_t1!S<#@uuUBq49ATmupej{fb*#oA06pZEugvbj^mXf3bfkgQm@=*r$OweVd=Z~dc5a;)h;#qBR^PY zZ@NwYYUh7_MTFg=>m@7Lx{;!5?EJ2-w>N-s+WID8b<>Nkdtpc6&8Nw9lKS!|k8QQH zOoxko+Ot*MLqyWoaJ;yC*mrDWd?%+#Ps_jY*y?0i4VU^1W~(2ki)62HdWrTJ@4Se~ zoLnN~EwAR@(h;*5E)nWPl#UQdT|@C=>Au_1cr*J~IVb0qwwTFqko{|?1@`x~Gt(SlvS0n@kQYb)I{ zT#6I3ycfv`$TwdINj%cRLA8;6*;A*W%(6K{r!m78DiR$VAo+A9A4nh`)J5Tbr{i~R znKw46M|5^Ge~RWE0e(tx z6%M^06VdY_kS*_(<^If9fD)P1*!Ozs+4^)9vBJpD0wY1^6nVFsj@#X40X}%zKtakW zS9J&EEM%wGH-0gB#6&5FrjSo)0W1hpL5K1`)*tJ6N!{1*5oyI>M%AX zmshVAiavQz<5a4~f2IEov;szP{Ir?2vS6$>9IVWHTJ63@lh~D!tHHf;W~rA^*qP#7 zh55IeVj6Xq8-bExS{lq3yP(Ov9!e*UW15m~52dlD^0P1BsD1;J;X)Bb6ukak37f9Y zDTF`nZPHeM%p5efbN*CRaq=s{peoYBpPEZbq_XC|A6N*vGVeClqndx(*ssuJ%JtbY zrs|?%^U?!s*xN%ro1YWd?|VG=_7pe<8q6YZKT(4 zU^(MheQ+t&-_XBDw>)AP&x7dX?Hj1AhTLbWJ^rwaS{;7MB&8-kOO15mUHo0=YZvPL z;)3VuR2+h$ZtINvUE<<;88)Os4|KnQ6Acs8K+l+_SxBkLN@>#{A?!)`b!K+g1 z-GR?+M{$(>4N0Nk8bVbakH)|Rt1RX~Xc28ONj2rJpD9k#924zQxIn7w8SRAMTw`An z?})rWVrdm|I*M`ixl?zjuvA1C=i?c_Ne2-(y#Z?X=L6MQ zOOym-q1qtG9W;<8zt74l(gLZ3$4F!NyWFb{efif4S|tky>Br9pM^f$`xG~u)vSN%y z^4NhKjyeH`i|UgCg<>Aog;eq%xeWy;%hpP2g4SeLO>lOP4D3GpPq-Q9;-3<+i?e1? z_18llm6-Uv{AP2)1I(H)+s6@6ol6{RX%iI;re^7{tO>S$yDh#msyp70kSEs}uDjVC zJ-@|g7;xCXYSZ%{OT2t0*GGb_HCJfh`F1@v#@iO9isww{6soSCRFY%(o*>17bcfKhdaRF%GAMmM(;h6o9JuT$r%jW2&{qLz%liVtuu zAER-U=1wEyeQ>b(uEZV3B^%?_hhp@BmxOCUw*IJw+!y*jK$ZqciZg$Qqi~`nwJI$l z`>TV@Q%7N15h+8rvSKc>gP#xFgtsqy@t(tA7e2A#mpv$U5q++6-YrJmpC40cohzqg z_o`8e=8DNQ<}Xo5jeRqy*1~Yawmh!(Ptl1NyQx2qyoBg6EvmrNjToD3=EssqEJ6|! zt@Yc+u*=r%F(lNa$?sB%#xx~ST^NEGHmEY%bJ3G3k$yeCRPkxDUKxnm#Ul8{f*QhaevSrS(cCcy{*wpi|$YI(@L~DKb!R*B*z%PqQiKfJ)4P&Zx4ad zD8t5o#chQ`Rd;^&$0S5J&<`X)OA5n zUJ6_YIU8C}h;*m%s6jrAb#%#yCD#51!cP~(CFhJy;`q!C=P~V`yjV?!@n55fq3_qp zOd#>%W4~l^4eip+i~HUsP3tfMZHjKV;Y#|+Vp6yGc-H$Qt|!+$>n@0NXr=PkXRYm? z>Jp~FmRFxxtSuDr*Qv-Pgc}&2jF+9&b}@NHts=?Nq%9Za)a}Vjf44+NJ5xLmTTFKL zq_JYRp=fAXdv}<295C3L5-736w;R>{WM|?GZ3Dd&+kfcxFtD3r^*-H|=^@yt`<8Q2 z;~vuV=ECH4?hU#t=^jP&5AL4L{Se{LX2UKsyzd&Z{;v^fShaKXRgl27R>VYw1F_Bm zfsv8GC}5<2rZOOWs5uy>m3V~2t?7OY!Q+uK3!O&{0+$Z>+ic)PwQ-JOaGJi<7SWH3rh~++O({h!JHoB3khQcE)A68bA3Ex zNb0w$d&Zuo@?RHB8~;fm*nykgu2{EMP9{s?{)ppFfb+1^;^0o;aru?)xx`Rz`1=Wq z_q6|;-)D~SEsf6K0HO#QYDbC40Q$Iw<0(G;-*}-Be$l|`0%7c_JNFiR-@DZLULPa!T{-Q z=4Hrik$ql*6)C~iz`BjP8a(QW7Yf`Xj&q!}FLi{$`NK z3F(n>TmCY-AaZ2>IRv}226;1lWjgp&#%;IVH^15IUm~i2e%F!jI^I@#f-mL%KY}OL z3L$^{T^ljS1|va`QP9y)QIOCPeUBgkd?*AN#*IQiE2TvvVD6qeNl3@T?-7*tae_-) zJF&K*{RoeU*CKyxg_vG5xZoI7$Bcx5Nm54FvZ(H#c{qv$Vjf=jQGd*P+qvHd<;-xu z(Qrnt_vIYbk=4(af*dc;^=vp>*x!3UznJwpIjH*7T*FxcLQclyU#hAc4t}V ztj;Kh-FTd`=#`B=mbU0H-Z(xuC;sw@GZsuvq-Wb@&gN2c73cUU#0k(CV-~P(}!avl1FZcvp}6@X^lXy6EM{c~{!*d&}wKaHqBQy~`v_#iTT$#HpLh{R=X^}WU?eLeu!(pArhil^))uKV+ zzQNsV6U?)Z$%mK{jF)GXf!?#iQkO<-Y#GiJsp0;VBIZqdzlv4llx@n(yj*E9^pnuu z@MOEUeU@#M7djqcKJuLVVxKnDH_Ttdo?ja)XKG8W%hp~+K42?ez_S~VT#w9QuR8Sb zQnc_w)&j-YOpV^Qg&X7gbMqF4?_@=&p2P;GJu$wb6gt}o_UWLoIWp($=wgGSvOW}A ztfo!36F%mAUqy$-NH8Jvu$gb%sBMKl($CFB(s~>N)hO$kP`)X&r}z6NBYn?7f7>3X zwm)ro`>nrU#*((u{L)0>(oskcU3Z;pHepc6*~O7C3(7S+!lDq$p!7pa+|ZgYQ+4FT z;xMPKoQe7!jfch>g+v2A*+HRqHT$w{1@A}gVpOhbzCaWGfK?V@_OeS@t=SZm(JKFA;rZYc&ReTdVaT|;gq1<%NR^77 z;dt^Xiev!S;J$7_;}|TKC#5$0M+0Y696ZChG}~^D0hT7@D(<$LH5CT8;^Hd!`H?nx zeb_)F{Mu8RGKoXqONq%ReVo>Sifh{$#)8R%#4hYd_RF4n5zE8XRk`=lKRE%)%QYWH zpp|mtN5jHlwvC}be8fP9@v(U!mBHb{`}kZYR-j4Qos~2w=T*TF(XD)3ySQk6lb5I* zGjHi@o>^O9k?~pQML{&qoHj$|L&7Wp#jBPsUPr@(G2`f+p)zF~@~5G+MT3MfHz(m* zHaRtV!W+CjkL-(DRuslo#00z2t-UORlaeno1lFv4GBI@64uS6C}@ys&n7J}@n!bOG)Cy>8(FhR{Tk|+>Xtn@;~gKD z_`-hoZXPkNm#o$W3Ze>Lb1@xlhj-`qbn|p>;el?wk1FPnKMVR85EVQlYUvita^k8n#rKZONuV-^|nj1DnF)g8*kv4 zf)D-hZe*QL@Xuc(U0+;GO+L6~`Xyse@HRg$e0^sWBb8l8EaE&Wse(UOMaW^8Dp8K# z*%iLo=zz{LvJ+v%qov!c_>d5xx1P_R<&9;=7hy#}&%nEE&1~)HjV##9OuiA8HyJhZ z*tD7hQ|hgX)QkJMakmG&;p&AHU=C`?S|YDWCD7driDkyfq@(fnThoUw+bPkS4M=)+ zzt>Mn6`6#iZmH&${RR}J2}2AiN&AM7It!?k-xV8Xxeb}4yK|5`oY;?0t~5&9V>f2T zq4J{Hf6yf8nYqc?iacuHx}j4{pYH~Iwu7qb*i(y1U-Zix_R(}j@oVoEq5DVVGDhW;+MFxX zDAK}E&e2s(;)DvFiLTC>*kgKu!ku7ackIw>9p03yBGukdUb`Rt+I*@y9d7*5tfe5{ z6Y@vD>`7W{`@j0LG5x$yms8Ywr1>+lZ1QPz=_$iUKL9UrAFCq887XB}T)5yXiix`I^nt|=(iMiqord?VPa+VPmga=RJ_tfmi^nztA_9tq?P z=w423^~LG)a$d`+mJbA_e&kHTy^h11KVU}Agdknq`ZN2NW>ZPgjF1{8NC)X#vMd?2or(!8gk-dRf8%T<_> zBlhdlH^!rSr#9A{U*apsJ`aLMuH&)@316^?{db?k$Q?8qZ|pb(XrG@w@!!Cj7gCnNkNdP9VYi zZK+noz5|3q8_jgMbUQA}EqcMR`@K`@p}IpBaabxrb&}v#7N%FWRAXNzDmvxH!)Otb z9;432cHr}b+gr+a)8loo!qOAwqveOXxx#;FH{q)uw^8?i8X6b#L43+m&yK^4FvdT! zTC*T=hTs+qhvqYSsSNP^is0y%Q_G7=j!72#4P@~*qC8V((J9HcLGkA*nb*`SVybT4 zz39-g)3WDGO8Ox7HqL}#4cB1aiNUay_`*TESVC8z-EHDoS0?3JC*BO3;cGL})j%Xv zZDS{G{Q7obm#{C-11xFO13QR!UFV2TxQvLDB=4U|f6w1Cl#x746ock)QQ~}&jvHqZ z&s=H2&O|)6$VwSEE8XEm#YI8k29_v_EKCF0%sm{*A z>S*T=*2pOnE0xroKFy)S+Nx&IY0#-nkmhcL3n=Ck5C@PPW&5tBZ-8>1ql zB&cnL{AgFj>w_L^i5|yj37a0jDrEIVpImlmxl~C~QnAGg)uAibVald?N)VCH$E*RV z1reW)y*?Gg3b5)1?+oQcM9-ojPyoo`Dr~qZV>;-K2-IMFz?s4yWSTYF0Thh}0N#qsVNHaQQ@CY^}w zBG>5Gk2J~%i&-mPuyj;Ms`SfOaptj_Sa@t~#}^k--Ssk_pH+TavtF-b5YG=4bhLP7 zucWM#u$IDedM>I6s9M3WS-udW>>sTu)u*x}UaJh}(!YUEdJF{5vLcGd2s{wqxH3O! zyE%;ux2q(mZdo+)7c4e zu^vdO)M-eU%DE!u<9x5&1`&+ER#-zdF5o_~O(AiLXV%W_Q-5)1k)B*x=*O*3sgs4W z=&k)AU&%DxcGkh(_;RyT+bCLd#`G|GD$VUzX?L|#6MHaE2j1zf3`}U>b6PcN{m+GTF~h-_*VC#IVDIcUW^OGylVQ84Inijp|jW60@~IbS%pQ zUIYvJY=usTN5p%{QU{MA=`hbp)#3iEkqYAdxO8+V6RT`1DJvaNr!I@-7tm}m)R?ed z?ozd>IiS6e9+4zJ#J5Jn0c%kw^OT8$@l|24Dk0IrNM5i!uca^UjHUBj8T7Al-k_f_ zC9`2_mX3#-JHtHQ5-HxV;#~cxVpDU`;Ttw{58p?}_yO98A@uWGaRQf8JnxxK5qGbe z+83DBE&hV$UX*V|L@9M*{?lPv(%C-WM4mY8Si!(05 zeo&@x;71?Y)GFm<1|l|}PTNqSHPv0Hjnq&e8cY3b2t@c}#p^trRd;xSVu4+cWM*9M z=v;fLoh4SQildKJ1HB7%SEo}+D_c0=pAb>yDR5%X+UD%nM5?22mZX@ktXqp($gHQz zqdqngIE^D{WzSJ;;1s1HVN4|AAm)AYQK3dr{60tR*I#Si|IPvN+LzMPIVl0X&pVSu z)1!Sc+c_25N=l(HkmJUvw=p6<%D*2Wu+^AQ$l0vJBdwb`n#*Vzj;Fa$&S<6bq&a_( z*Gf+xoaMdI8w%kXwF=lWoldc6czqydR(BITTK=7N=WY${l|kBi`M5ioI)y`*%~&Em zD7tAR;upspbnR_17D0|U=}IWeAd*};yM8fc@@HW@!3-7HzGa1|mAZlFh^MauQqAf0 z5)p@N4#GS?L73;$zvlTiHvKn17d$EME}f?A)^-+B9B_5WAox?g^S zL_08M^WEk%JF#P9&j4ZFS{U&wR=ilt_xvKO6^2EL!t}T|5{O-MHavu9)}I7I`p2~k zgbxF_rL@f5gA(&4Yme|~xZ1~7jy28x?H(Zk`wjf+tvT*ZKErywk*2It4n0jleS9jP zNy}x!xMQ_QRLmX3vnTsKt96>tR?l z{bk_(&Rmu0{E9%KyI@qSPmy4`Hd~#;HMdgH3~oew0s-!CfK#6nZ|0Zm_#J8DgOHoE zm;Fwl6O9oJ`lt^}#q!2evij5AEP4%k(8vBLRvA<&3Z+Q}{uF&H#ZDYEHst4>0bIHT zGUG~YHDbQ~(#6?11h*Mt9Cjh;DK6DQ{XbYeTiDiK)g7aqAQ=}^yTW_%rB(IAX1gr|1$eJr$Xc2ijq@kdmW>kl0%aiD zsLH2r$rnRcSv|7sGXCQisgYr!tbG-YrImhsON)qKO|4HO-w`ZrYb~;ol8aFP9E-^{ zA46}$P_(5w`v5V|C%>~ybZU#k-pCEoU2VjMAfrnz44=xYDGA6AHKarR#FvvD#&|tC{dWO1(&JsrXN2mHDQ?Z^A8beVP-i<5E1Y2q51NRYZRL5tGtOek|1(cq8spDD48U#&op5NmmpH)+VHG<;!Xzp)T4pR z#5c<(F}v7q`!O1(2H=4_XY>1`LG(v*Vx-nJ1X*O%P4=hMo%k$3jaR(M%9=F%|M1 zT?*AoX;OYIVZ}OmRR3@rCbtK8VIEN;NmOkAlo7V`;s>ILI7Vaco&bk2TiNo0+eW&&@HX)MDZK&YPiOQ z_i)auT`xMt^zmxO%5R#OU65KRrX6ab#Bm3~m2?4t`jlTiEZW&G-PSV5o~aqt6>hQ- z@>VPd!@?wD*BlS;vRsy?$h+ut`d8x2eK+fUJK{+z4$}#;-L%4Qm)d(Rc(>9;rbN-)&K>Jn`LFGCd9QAl;ind>-0` zm9trg{{(usqn zyhVf3-_jSw6l!ov`nX%bv94+QOW+gyytVooYE9B0{a^;XCE0MbXRmxhe*;p7*bz*b zRT3Y#2R4{waa#&*R%%YiA|YmzL0c*pE=Pe2h*uO=wAm+(N7imQSys{!x3X62?xGk; zA&Kl3WFAHdnTEm#dbZiswWZhi>Ee|dWr9L|fFv{ec%#;7n1u&FdSU;Adf;$k($#qc zQe}lX75RaD0e2Ynw#~a(k0UcfW)}*Eg=0!<)J@UzxI!olMSyQN`v~#1x)}DLc++>+ zm#DC@kYBTc^ zms!{teFCN3J?6{MD2uHy=~xK_EdYE=}gIKQ=O;Pl;LCnE~bKX*ij6#c|Qr&Kc!Bg}|qMHMxE*u)>t z)!5E&q6)4iB_$R`4_^as7IIZlEyWmr&$mk=YF)@GrNd!8<#ON3(qVWmDTMvyn^6Dr z8*my1iD-oFqI!Z-pVA|*P$CGtP>0m)khH4l*AtO`Ee0KwxV=a0*pHKDTeu>ykfE8WcSk2y0H4M(*qJqGhDKDV}TZ~j8il#;@ z-}%kzrK+1x!|m*(l}qEU`cGL>>slA%WAq*v%u8gQl2;yepwzHBMOFO-p%}kY37d6$ z-azXJ>jk4%w@pPERW&aL!1(D~%kBzs>n5a-WqWfT+zrKkf)vx(-gPT>Wmd=ati>64 zTt;VA#aS%BTG$Ani8hb?(h^#e9Y=@&7_P!2WTlWlrN$m6EU!W2KMF5SQYV_{snAA|Ge2Ti_@;EEtforIIg%8#Cn z_{eB}j_Zk-yfa4+p*M)`G_k+w=TJ;u?y*=x`+|oQR6xHye`{e=DD=oQ#?~AjNq3>~ zP+vfbo0J_!VN8-dA9PFkir7=M9mawefqlA&nywgwo?LUopO6_wx`xU0Mb*5y*88Kp zoh__y38~M+DzmNRHF3-n{cC?L9_5(E8`(%WjvW6A#v(=qO9;SE!|)1 zBR;fB^zsQSgDYKf4kK4H-uH+d?GoR>HNqE-deX-#76)u!>X3;8S;@uKA?9fA3n2Tv zZ|TDHqT&PJoE~(~>WRu{kIzMteffEc_pj9aIXt*}QeSj%pM?2B=+=nvGGI!3V3C%t z6;ZIsfbBrx#cp|#Qsx6?U=mZ{Y}cWiAh!D~(IL#t>=RRB=OS@83tQ8CavV41%p_g# z4(k)%=Us5SO5wV1T*q7V-jL~}LY4%lASx77^eCd!u%-##pT8KoeQA6QDZ!=Dc->0Y zry^*YJA%thG>PyDI%zfajf8_rbWq*p*6|OZ%`PLu*(g4Xy+Kkvuj??p7Dsb_Y;S$- z$8!~0czHn651R2r<3{ND+3JdRZfmE+^=y*5C+d1>y89|mZDRl#RaJjx^qG7{4)x|~ zhI5E&Oki18REh&OJvGBQ*)qnp!l}{qZ{X4JZ=j)!_A9MK%v1N`r*6E^v;BTm>t`%c zkuM|rZEc?3D$YM@lttcQBY0!>mSoQR^)Q+eHM{-8?WgoLeNutZ1z%P5iGqpbDT_CI9AXRpv5%Qtn@5lxaHkU*ALOdMSn}zaWc~ z^SJj`vWL?#+nHRWb%C#nxva21=GIBK$}4PmmxTa{<(EyfC?|3}jq; zUR?EsxF;3lEi^Xt`UKmoFsx8~Tp#V@j^C~CNw6&;76qd*3-R9WVCowRClf=#Yn9Is$z%@V4g!)l&7yJsY#ZZzqr*~!m; z1B(hguv!upShx?rr{m#~W z#SblHAsEX$K%-!|7Va-!G)3%XAerchp<(YLlvl<8iDZh8{ak717R;Xb<3yHKz|&+% z@lnO9O|SzQQ`I;{|MMxHk*$o$)0DQJavF|7eB;|*rUOAyB8iOis4$|OlqlGV?-rVK z9~LGUylNxdFC}77Uv-(53}~=c`yC3<7Amt_E$ps7#$I-e?MO-$eIg;XS!C+Mo+@Q- zbl$^zx|}Ezx_%!{TBQr`Oko@9#a^v#)y4m$5+V(X(p9~dNUN8X;7V3w^LX34zDR^# zt|a3=8x)CiWq@S0lttc!%mdYR(V{2Owphrq6-ap&F?brIJX8Tg%aCmxy6XMN<8+B5 zlEi&=5cHw>E6#O~WS%IHfv4W9VA>r{jew^~cr&slhGVI;iL$g4i8((DJ&O z3x~$7m@-yvPj7I=p^f3% z_DL4>@51X6Iul7xqOX3;x2q!uALHlAI$v3z)4p;jYrJnC7Vt3e(lC7*zTpIVYT?~= zXHC+ir-I5dezOJz2}9P@n7Gd}G~+!O>zg!ig$)RvXl>p;x$esdScvgIoF>DUD9NQr z5<)-Vn)x7U!bfL)dKsE*hhdjq^3nUAknRVUc?dJEcq0C9;F-F+qe5h%&DJw6v2;3o zWVGa!RZI5In}j3Z8tDB<_a7&_9|l&*8pvmiaAR{6X-xSIx(vw0qGdl_l9&!ci?b_f zj6L!uYT|zbd~Xu&I&XVet|hK`-q(!WH|G)Rm)LOhp#&b0a2kK| z*ceRzA7DPmL+@O}Xf}uq5h4(XSP}mXYy$qc?BV_6boOufLreaf0I?I>nUSzJ_;&JX z8{q=De9vGXqgjL|t$`U>+rKmYMaln0{ZaDAqY423i;ac=2viA3kot!ij7AStqKE(C zBN9D;5Ft2!T0?^;2?2xt)hq~r27o0YNg$d8DH=pRgf>JTuo69<tF3VS(}{G)ahT0N4x>m4QYBSA-x4JP`9A%>WHt z2?9qH+}1Uu^qx{wLx6i}w!&0bJ>m5RLigXhWNKZv3xf z^xtLvCqzPQgRmf+2f+~JKU*Mw8i+_B03T+aKZe)|abM9)Y(EOBJvO6(|J?-n<9mts zAov#inwgalG>Y%`0ZQ?imn#=ghtME_mw9Gaav+^W(<{& zKvhOK8gym-wM}7Rla;)~- zlL+~C`@^_cvIfWN?a_q!-9kN2hw51PyUV-no^^cWjLQeE@DYK$Cw>CFtwE!TaFN!f zufKslG19)SjKXq=-4gm8HKQ^VyMSzl*)(<(jLqdV z2wV{^wRH`Nf{LVUWgeb*&?9CbfD%3`n0}M9y-iv!Mt6Av;Dz$re~C#nLoOMOqkE_8 zsSen>olevBt1yb2ykmOW9z*JSmI)KW8y8~aeuKvoGiI5A_v-`p8ahoz8)YYw+)kP9@N0zj_^n)_;0@cLi@TFM=KnL%Ib&40RRgQcyTV;x~j0f*mq=^IOHHpVfXS`+%?$o=NVY2GWTuyaZuO9Mu*bWD_q%$ zwSQ9p0g=+8Sd_=es9iF;2Jb}W5DeW)k&427Vm0CeQu^+FKVru#tuMnz=YZo7BNqS_CLALwAHK z3JFcmDMsTFjhfGV|LGluVss9Z#4{5-BCJE0fwj=TI=1(9RVw)9VSip!-G(-T}UmNq! zHY<&}d*T>ZQ2SASEuQ9ziGSZ(dXKvs)(X>@U)m20pjuRu z{hll~8MTadYizsoZI#$xTs1M2-Tdk+-9axd1H)wXQeeRS`7~QA%Wv^SbCh{}j*4mJ z`JSy0h5<_i49p7UFlZ>bu!bOQSWEz}6b|Y%D%=whf&^cNQo4HI8Y9&dp1NL64kM5G z`j6?L$v=qkS_@Lg| z<$d|I@mMAH{626=?3LR2^3zt^r#Bm~v~LH0-QQi+3?EQmq4(BaN?cqYZany&yIyu6 zESkziz(j85^kFsgRPbl**OBTr!PSgeHhUIpC!$C!rIr6Gl|@+r=$g2SNa3R@81L`6x`nU6BI6 zQr=;}!2?%PWM_rD1q9SkwsHkyeXt7-$H9v8d*_5tuGt`j^xbMKKQyEGvD9h<6k9V` z5e37gDM?q65l4L_ZDl-RqfEh$o)v?G#+MDFC7gj-0chvaNPHd$2M2z*SKO)TbQdCI zK!$WQzS1{1IyPq-h^8rWc6EmCsrn%U+O(Y|CxRgBwjOXKY!p*OR=z*)7hO~YIa#|4 zGbF??=qpT=)R#reaD?_$uHXp$=&_fbocyKt2TDw7><`SeXBR`^7b0F;G_)M@6c&%U z%EID?&|*cfbz%~CGlH1af<-VcDqFpn*&$%Pg;#5VFb*1042X14h|uBw2IiP!v*PO@ zuTYv$FDiXW4oc$yOnZ11hG?_6BH<%{QA+J6dt50Y+I=gVm~Gr6JItfC!YRtb7!Ah_V63PIGhORJd)@bt;oZz_;8WG^>&C)0^cF^ zW$A|vK3IsMEZSl>+^yu68C6!;nU(P|7tkVN5QN2~!Y`LC-?3r_X~e*WwUeYn0A;?J z83W67T`_!GbG(ekh{&o{d%%uB``ze%bcf3ZDLs;fmZm73&5js!jfl7kz{wjziAGl& zMbc@eFES*-UvOOb6f}>`tw8hU1NYIOuWRD%fS3-H?1vi&Drnt|Ij+e{q3mj8Q9gip z-H>{4Ako3ESo<*EAPfi-u*c@E3uHE^7%&aLmkwiyJp z1#!u=EF9X5kQt=vF}z4|W2k{u0@a%o=mJPc#f(Pf}~>1RiWNCmY54VN%u3tQ(33`$2t56&k%$ICIC|D#+cw+jo zCbickA@3``1^8cIt!?h!KbT@(Cm#9)-XhjKCFHR6GGf_~Q9C3+J5;^J9ZLtLqRF!h znu;`l?2OA@+NE39rd}|pvuV}BpN9G!rVlStmnnB(%76WfA&3J&DqBVa#v72!(@qu( zV0K7yU{QwW<<`77G=1*SUADsbRb=B;ax>`yPv!{bH!>^0Xv5yqUX^Xv<0F{$YmS^

&`77;s z?p{bAXe6;j;Ch|yPYu5;wyo$(PmOVQVR>l+B)szEips$#YVL$FFiCDY7rBA-G@EVa zj3^=%kY;jU-+~n59Js$oxw}^GQZdY47A5al=WTy}a4;|)UG!+B6m$9k_4TaSR$-{B zz*&byxOnK{iv_PWJmfrLTQg<%bDlP#1Vu%-tX+YgB{xF*W8H6|azF-bF7FEH{m83H-A=<7ihqK9;g;};M%3153b|l&vD@dpAA|hVm z4m^joD3)h979{(8inS3b2q-Ej1aY*n{h}<#<#-!q=v-=C|iZAzd64X=C4A^xwqPVRy&1SR}IT|QJan#Rw z=*3v6^gR767z@BOd|{Y9{wFquV2W902zT3(H8%~el?4oii$6q#TMkK64y80yNApdU zEWA(QLt;r~5PC7~ZOoTlwzTg=Pz9&4v~J7^7uuwp*Mt{hfi)bzWFX)M9Uq=z zW{B4Dqw>O%K2lX|&wQH5_IR{eV)bI+R^{boYyQO&PDSf_EAQo%0w7yAoYtG_HXU37 z`zQ-{#udg{)S%Udl@v9_$`>;N`K$_?pmwc7&D#|`VdSv46xi`rf|^;P3}G?_IzcC3 zLQ*U(nca&;g4%65zJfaNAxw!+%4j7_RDd5NiOu1#j1=ef+3M*gmzkdS6Xm^S-e$R2 z&TTJu#ikI?0%;y=Add9o%OgQO16d%99d+{68Y$95s-WD=N(DJd*)aYc53BcH<-A& zmv^RUxubT7;P9YMM)QC?TscEUCn^{*R|IPmIQpU&8P4TaJem<6O1D^wzFNiR{(=wv zn*y#nG+0cgv#pKE z`+0Gvwi$+QsJJSjC3J6z^1hPc-W2jGsoc8#0pCC1`_uV+&Tr?p^Eu~yPJ>v~Sfk9& zE_Tz_oZ#mxvYv+A6xTO0-%PgPF(j;_o0W3LdRo#5o#c2ulu&sJrY^Zt1|L9>AHz#zVkFcJ2k*$)PTNObvD+=>8XSy!4pKOo zJ8l{zlm0BtM~!9)mMgmVvZsNoZx1g-eEJG-p4;)*?Lq8rwx537bMKw!_Kr?R`HPul zX$t&S>fs*N{h5dwwuoc|7`)q{pZ+*Vx6<;*;wh7=Gzt5A&DjV-NaVHtl(c=vVNXzU zJS|nBufKs9r{0(Giv}Y`5L*0dL^%HYhpv&|U37H}b*(wtI?wu5gJK&Hyk?cmyQL}*eEgyS;6C=Ai)nl&Hz{C!rFDw_BXg5DZNgxt2u!mR4lm_5C70O8<@x z#6QVvk6RBIMT?d*;?5i`Vw26FguE{I8AL*_eRblY;kkpF{B^Bih+KceCk8}uJxB5s z1uce$G7Mp~oat2E!X>BZ+RwYn-^2?hcK^mqp0D)@kdYF7W%Y*V>|Ie#%D1E#YV{{j zr_@hDe{c1pJdA|R%C-Dux_32D$P03S-SKOnNu!MTzSK4JQ)NLCo*SRqBun@A*y+rZ@vPqOW6Pw zDs}EKmlyybV()o#^Ag5O#jcZR5Gu0OI|-TS-A3HA7bSvFw#T zQ;Zgs^V@T)ku1*xh%FZNTi2`w15N&|7Wj}=1ZWIp^Hh8-S zdfHTjJ++b*(Y*DleNvKG+pB(qFcB`JVLw-@xX?*Q~3;tE;oBcD=3v1*4weTwc%0uYws$qT=dyw1ioF~?SX~`j-&|{ zOFGq`!bkKM`PAQLE-()HRrP2RS*!4kXapfEv?x`@?WucrLoU4Qs?_C??TsHcIAwY1)7+OT1)!c9kG&%S?M z*EC&ZV{eShn+jtj)NrNk*@&0L+r4i`f)Z^11lQ)5488f*_C?likQKG@87QgPY=3>%bCAkKQN3Tdmo zjRe|l*~fi0?@vqyA?rYJ5?FnQm$KP~(B1~Rx42oT)e-95MlcXtmSJa`CpC$H^y z@3YT2-}9Y&?~mK`Lw8rLwQ5+><{p`1Q7rA^glX& z5&0hx_=Wt~33!1DIEQ9pdfD8Z!fI~!pvHRD70*8c*3Xgz@hW@J_ zR`W0Q$Z&{ADDViVu=+*7Qv^5wJSGAr004LQ2#5n`XZ}S80075jGUls#ngDm+$CUv9 zNFexH01|)^4g`P$e}IcvTIqR1vYQg|8l+@r-o6)4OB;W?=X992d?=uHqB-tNx|@3< z#OX{HJW0Xt3O%$N+LLdOVCCE8XFN|g%G~3}{ia%a@Je;KV0e$lV4cQG$+W&%h(aATSUl0ctw|k+Tb#+D5ewy=5%ffPVxmm|3hl?${LvtHFt7`&$ggjWlR&*58aFwH8P}D~8cP za5J6|V(pN9IXg#E((4%`#Y8@K@Cc9`M@YIVFmy?wIPSYk_~>?W^avQyOG*052Upyd z_DQPk<81U%8!j#QqOvdwHdEG07$Sm#ud)Z%L~JV z?Ij+<=7+=18VB&vw&{(?=LmobgAO!|c)~tbO(w~37XI+gF93|~7>SE9B%04Ux^@+W zS4sp25i%5tAyF3^s=F2WpOt`ajh7;xuLDz>9q3t-PoD42?awNG*g?iW9#;3v3T$t@ zjKl-|Yy|QWJOY^BIKOk{7v3AToMnWZBa;f%Xm7$=*n+h%^|u!GVJ+x~M1h_AJ03z5 zx$ufjA2QZ{uH&z=c~`gNLfMbox{&k@OfGiX%4*>;2TdUhPv;3TZ1`Wzd#Pnv9~|FZ z?B8#j9~=SzJkbi+Q}Q^`BM|6di`jypQNB=yPZP_BGGc+s`qVJ$R3G=Qjn_?gho@xT zgk8_2y2i%HVivq?4$N6Rx8yl0?96y=<=}#@j5aZXw)qh@>YA4pCql*m0OB5)5$pak zVmjLdS)|B4K84A`%j3l(a;e#t57C#0*oXLI!uaQ#Vjl~pNaHhE>RV}ls!zcw4ufshb+%BEB4F>~&w$9%X-W(q& zk-de}=J8U(nF{?I=@GI>Q6YS4ruI=t@AQSIl`{LwIvuDd{eEM`uJBmWZC4wwI+mM~ zr%=KA^2w8(@5tDYx~fID=bz&M!1H}3p_`Bw^Ed$b8k>uBU;!_X^p6PR(w8hm;p@gC zk&u#j`100KkL8RKWZiqX2JY=y^k6=664;6uY*Dg4{2{Qmy5F!@K?p`h?~5ruwgJ^{zJN za35~)l_5N&<-oqs1Dqf0ue4n{$Fz6`0?leS7Cz-VBczuvGU=(iYkRZ}xfMShI9yHf z3VbfL_ncKp2Bxb+n6BRZsjFz0zH|YYu53hzNxwm68udpn`9uI6cRKlXADQb#wx{q? zsDz(S?og!ln_C1-kMP0>ySD8 zEzWwx^a`FXAYxD10hXSt{=~Nw()m^ifaYa%Hx<04Ar=G(YFuUD3T}S2&X%Q2;GUfW z0OjzWAdr1|n;2`u4=5vUrVqB5{WG9$rL*mZ_fw>&L?T08MfbbIn?(>kq8MkV(y$Db#*Ep-}i zJV5k*;)Ykc`s^S6)w*X&1i!4SDcS#P{DB)8;^{XnZ@rERpkm<&a;V73slW;A{ps=^ zHr|@A3~*{Hez#-#!)B)uoKB)h4$B{YVg7#L<~Y+#=m~W6m;R-dsy#&f3A;Z5VAo&n zCFyX0LiLw>o%ZEon*Gke?{Ki*V*~3w!@u>O0aOPOuZ#r=zDs1B69eJ@%fE>){!a%@{i&K>%^-ED;bSQ4 za*4kG-+F7H#2`%>?HPk=pKFL(?hIBJ9LNid4U#d}qOYra4za?7SdlGGQ_RE8Ns8HV zirG<$=~0TwVT#E?it$0(SDkUD=~mtqRALL!`EPW!wIsB){Irw0f1Qhn+KUo8M4#%o zoDMIM~uU z2b?*(*aDq<&+0$eGP@w3UHjRxxdfc^gxYes^qxCM+VZ&~pV!CQ3b;!Bc}a-0<$)Dy zhT3w#Ao2ZRcv8R6e^G%E`$bdJ;kOoG4N*EHNoxvgT;2c9f}RrPBLI#k821v9Z<6$K z`V9X&woBGub?@tI8~BqArLcuJ;@^zm#EwWbNTR1<;}~Z|@dT^?g@Z_W{X|t_KZVo^ zl6U}QEGB#iS5frjU%X~?b_!ACF|XN#U0CR+v^j98eu6#H8N6cWLws5Dm)=GjhjRSXC&E{{2Ug<8Z zJ>Dm5r4O{BQ=Zj*_YqhIxJ>&n*AY0#*?P*1`@A%K`NdWrjhs=>m26n*wuKDcbBvfk zKKfA2zK3Hju)X>eQ_-Kb+Xj{qdjwY?YbmNy#n^j_AE3}$rj=jcr1SeV6@X&cit zG?&<8RD)E*Su+ZIkGXCkj=eEG!M|&8_wE)?<3|JQc85%5Z;qE=WjsNyds5|8n2=V- z^CLEodnqnliVk&rohlwxM{+DByg!tgc78|!^m_q{@SQLE_!9b_a{}}q9nn+#yUaUx z*I3WF1TW4v>S^r<`0VNF7@x(<(qQu5NRW_F2+dfFGMVb8u*gWfYL=fb@Nrwf#}6BJ zyBhQmEEuK^A`kh!Gu@BeoFD--OvDT&(-<8j=rVkgv}fX0i6~#lHA^gr&Fm2eH6<7x zz1u-g&nyTrZDvqHB}rL9!^hZoHbcQKD<3(zp_0mQr(ID*@AM8xYs^h8?vty94w4FEe8|SuuEhD9B65avCRoeCtXv6-0;9 znm{4smh%XJXT>Y|t&82JoUe_c_QElP9{l(;a1olG5~pr~L{HEau*W#hy>Rqd{@|Bm zxBZLc#`bSU^|pWQdWWe0rzotu8NXKnx;GJ}JNN0*iNUu2M^9W6d!`K&?E9&8v`x4S z-mz4(QI1{0^U#}KkImQ6|6c_`Q_?rWumSkZ3jiDf90DRb1}X{?G74-AhJ!5)yZ|6k zB4c5*p{*uWl5b zUCk{bVQk`fn)};04O?{(hI<5{#L&K^DFUG(Xz>>rdG{fH`1yYKhfX!Yoo%i|hJQ0< z#4hT=ujhyN;qCwJW7sa*zX3*lc%R|&;K0^D$tpbWx zjnzv1j6^BX8D_TYJ{nFnZYp58r>#oSd{LpHqBPS{6(@>gNvQXI8j3td;PM-3b6_wn5P!qYqfZa6s? zUO_S>mla&~I7(b$cgV2SrN8cA4F01`nDY--y)5t-mSCd4`mrJThd4Mg?O$yFlzncy zZWAVZ`-gw41eolSf5`p^K>{x;0taccdu8c2+%1b1DI47WaFtu?Fa7*-J@!>W6tsfs zGb84AaO2!z)gN0kNXk;gXv6~HIF~2Bf3x7cT+E;0E~-^1hNxPmsQv9>f9DZ!zGAtl z@3zahymiwKMjhUM zbaY;_-v^hl$XtHErpfV5D;Y2eL{`TN$NirV+uI@if z$+MVF5!QdHQD{kO?h)YLdTBb|JW8L#b}f74robmVjRF)DTVmbr9~}CsH)p@CDKxxU z`Z?J>wJ1xgLZK@l*3>*llMjyXn0|Lg_tS#negB*@Q#s37W4x3HAyc`Jpece6UzGw^ z9cI3|sML7bWwk1EwfoDBy>eIAv0Qowy%T5PF!m;=;(e=wXLVg12+7}8+}YT%#vgMV zq?&5)4uL?jdPNX zs%rVIr+zFlFv+#9b)srcKS=(bj}DzB>`OxeDU&<`OfnYiv`ZR~0|!pk4&$010qo#; z1y3h$6Osa_Cf1mZbXqpdUHZstGd)S^W7Sw<*&&aTxbuYR8aI{zSOR^MY&)m>d=hz* zRcBPIw7J(fr@We6wSd&^dtby8V@`TjkFtiWrg~B|2{$^gbJ? zD^es+D=G1mQFd<8`ao|eX8tYBXy0Af?U86dzs2Gqp)(<$0{eGh_S=FTaZJ=VAXOoY>h1w+bgzsFVJy*&V3_)czX^c zq?-S<|3^Z^;Baj-UDIfUeZO3hyc6-T62UN|zFpiViOW&mBfz&s*xbv?V((|1+q5u` z#n6V2zPtW26jQ;P{n5-zr|bVZZV0$lZP9DA9ydMzremtxqOYu9JaAjBV53vyIRjxV z9cqzs({*Gd=UqbM#|x)wk_;zR!sX+=aaz3TeFS)tUq8`%{iL}S9)CS*(K^PNO_;6% zv1vAH{{6OmXt7$|CZwoCkv}>TiEH|!vP|VqS#n9WOJ&ug(fy@)`Y+eJj{wuT^_WFg zm2Z_xu5R4A23X3)>7e~8E}FAwclWg}%zHG0z#1>WH?pG~t4BcV>d(*Sp1b%HiMP2& zhph<~19GW)k!kR^ZJ3+ZAKXjLQTU3m_IaPs3l$EQLmhzKYV5^oX)bC-Aepbz_msMp zU?k}m1Xf1`iuQ*EaH$6tMbnIxjINCkxI8vwV#X|06{8xNnbau;v7%wc@+pQCx$(T3 zQt%c}q%?bqay3;ybcTJ3g@5BcSO;RILJ`#glI5hhTzoBOLXvD=E;mM`TuwYfGpCNX zuN*V?i8RkxUsWu$$d098d{C4zOFRSs-Konn9OZ&eE+N)sLDyIqOh2){T|^<&Q6U6R zx}wU(^~Pltm>(fnzE7s*Tj=W3RR7*pZnfbJB+;7R##esS>WQ z51hy}8K-a7Y@}q>7VrL-&3pUFxi0?(OEP7@sfgQpHdca3p_|(ok zGg)^P`-Dz>X57rqp-@F)k?sm|BU zs)eUZbT&VnEm@JjRt6poB{Zjt@iIo&jSt47OpMBjWvXJi%Lbzby(~dQ9i6s|p2}`F zNf4_gLf<7~Obk^(rOPD~L*1zn%xb4FJ(iob0xA*)c}iCEq2sDpMX;XqnMsBKcjA{y zCgQ$*fr5Y5tp7mxYupq0mk^`3`&$RO%9tw&UqG`b zt~GB4%W~F-=DJHW2TASd(h;g9ZID-r zB1LEqE#G`{ejFmv62rTd?-eDz%(Lg!lx(!U^U&oPCN6Z~$P-Dd%4Ac20GCntN{@R= z>k2|cFI1dALps&Y2y_bW2Ws1=tWmmek-FI1Ejr9Q8=IM}f>oZ87$=aq&IIUyMDC4B zI)GavND@oNVd+~+Q_Bkkd7=UKDKVbRG@zKlKv~&ijE`!2qgLgUIa0|$?;O%wHv=tN zcOEx6J7{x-L@nWUY+lPEnIStmsdkWR^R-579CmWF-!veS`@|bb|F!QUV5POb{Nq*0 zG)A?h;$CAmL)y9QOO^r^>#xsm&>eqs)_3GtC8z(vSq*+U>!R`Y%vtLZ5xf5zP|c(g zH8W1!GfzsOxmAzjS6?yZ_(@`s5ji6^v@MaZ5iTS_04U@UP=ruAxkP0`Zj{8wyfc@= zt*plT5gfdNE?!RY2`xB_gNCpj+?sUD zd=j_mQ%fQ4tgZb%4ep25wb5zr);oNcXK&b*?n0aL91%()LTZ{TEanU~cANtB1QO5~ zm1os(KXjoNct{t|$=92SnyLj%PA>PcagBg^}W?^`?mY- z_Y>$2b=6uHBI9l9bR6aV;lDz(cD<9dF+jW|%*4h+SKq^k z9QY>mgV+MLq+MSSjX>*>#QZsA%JV4IV6zg~TLB$1j{tK%gJx@s&Vy$I zZ)?siA#WvZYP50uPftHCiUb!`Z}v)K%tfLcqbK0FMn{FEh-pKZDO6BpL`Vj3j{GYk zo^)Q&K2ZI~cpPypvovr}xQ3tWl9XGg7;l89D*fB65WTzRu=}UQH)GZhDl`?`%T9KjF?;nz{}CW0{^}u?7)frP*w|I`i6S8%mwVpOCLJwr z-Ec-snxhO`UX5R7){k)b$(h7}L{Ni6Gm$#$z*?q7~W-DGryg@(GL<;I&B!et2!VKX~r|?x((Q}}< zn2c{ev8uZewL6LY6ypONJ1*+l&-Xy=$ts%Cp0G_43%nRzp{zp<`3A;t{~*xM{SrvcI5$9qK<_&xy2UV zw12(APXOcSG#CTy7-GF!d3Y4Y?k50OZ(_WCacZ|4>BoQN(*u{`IK-L%&0_d$(ud#4 z{`sgG-4gmkSh#*+Slkj7cLNW#Yjl>bQejV&&}T_5s#B=ixCnojKwml%tx4{S)a_k6 zT^=0_y+;83HzQ68k*2Q%16DFfZ%i;M)uteMXltb>+Q!V5d|4Xz)J~)kpS`5VBt8sj z89INW)5VXDc7U!z;1gsVrTJ2t-YLhc4Tc*jF~|vAzFa(S+FecvT}rY>UpuF;8dgVSvU(H&yWb?0fz>(9Yg!352(YQRB~h;LiX7ypOL%q<*NS zaXX4>#BkQDcaYiw+B6YDY-pybn?Ds1ol&YW zT6*$t!=w0#OnMmkanz!83>A~lw+h~_qUlXJ2IX#OsD(bIjdSf-PX_~ zm0)hEG?d9OuAkB6y^_6&&V`Sj+?SYlc#{{;xi?vJm8&0~oxC3%nFc%NW*1U~&b%-` z)nr#QSGe2Cwhv=OKogpfPC`Z3c8NWH0YB|WDt0Ws?_LLJzG(y{SkLWnOnG}0yv%)?l3Sg^dGPpPb&#(N`dw)GXq# zhP3TLnHvf?zv)1CPn7E>Qj!Ooi@_RLk+LcT^U@~h=uhkX(Z{nBH4#2+6X?8AK8?vUlnA{ZKMUdyao(2E30RCld-)DdILQ{&6gHo*P>-KXluwsad+J&uac!2QXmAPU zCa1NA+OWpkg(ak($W_`DggvuK(3Oa@EOMZItC0s}#04w`Iv7pDUNF%y@xb7e+Q}sO zQ9SH0g~WbBxh=Y1Va@FR1D2RDo#I7&Aw^iJfp}jxuW3M@w^F%rdN)a1RXw$P(C&T@ zC>ucch|#Ca8fOUim>bEL2;r;cVD@`Gp~3)~WQf_^eY>H1DrW{)J_qSR-ggr}{pjEK zZSqM8i;XF-BJyB;AD{Ml5cm?877gV7W-e^o&ANmgKI6Tf^Go`?W3|G(vgs|ENO}JP z@UpgAe2*k}QR&DG0%Ujt)E4XPj21fuNpkt^Yk0#Ms(1QX*6+Dive)qp>JOjU0IjIc zinf8h$(Cih*PFFY@*FA3%RtK~Zp<7y)(MHSJ-E00iY3MYQrp}H25i;tH042>JqNFB z3BzYy3_q7k#FcP09C5>G=@iRkZhj$&zb88$iD30uE_4mKX82mn7@z$lL2ry!f%~?! z+m>|toQ0K;1upR`SW65FPM=?*vn8mF9$bmu7wg^$4FAd~qYPyJJ{5t~p*@>>PI0_=Nzp9d5%)=ovryx%q#N}r^+-`$c9x_)Vzd%%3^*66 zu2!@~!V>zbH~FYaukXxMfs1)(kY;s=0}m&42D#xSb-Yg#gJnK+F^&`~r{AuPtNseH>zwOs@Rqg!i*D(wge8j8kjyTf?$CFxxN;1|T zjVkg3QsWSwEm5j8oh=y?&xp#RC3Yj%->j9|%Y_qQAV_m3$0@Q*F|&*947DV-u`51= zCL9m#P*)kdnht(pmND!CPo1ua+p_0^LqzE68id6>BW)F;SW{D5f*Pwqy9oj6ZL!Sw zq4@5l!qLfMqx-3cZWaL;;!as-7?@?DSG4_U!7OUa3-pd$Bn`2Ib1C1%ugktZKuDbA zd+24r3Ub!yuf_$s5x{oI8pngg`iMR(I;GXKoKAj*}8RPtjnMg`^XK;!Y6y8s7ci=@?L5tXOE_C7fEBO<>>1k zU3xm!$tk{IrC1!ZkM$kc(Yk4JC_&Ki;Qw)}L@>iCQ(Q$nG3co50TEz0?zgzPXlut4 zc08Cy!6&UK&9b|OJdMX1Qm7E(>T$f^pG+^G*8cImg;m^O1L>a@5n(H&zYE*IUiUDN z#?wFLOl1tLZA*Kim^H(V6F>N|tg+*XIzNQiRQL0{WXHlZ%iD(X$ZbqAmi{#qW~}~+-_qPLna?N7V_=XzADdh16V<% z+_O+mO9q_kX~_;wj;kS~PLd7Zj78CNz9(4Z9=v%@L06w;9uwro8SvR{s%>vXNixsc zCt4st?zCd5^|GFD4s$e0q3IMvnoW!dkEK1x6<(6es}!WbNN zH1QTpu$N(?mG~CKwXRJz8Wo1L{$6l?Q;l!rwG`3h)UXu)kC5JvB%2^@x?xvZs$@sS zkJ^S*FZ5|PI5x{VJ`s*mO*o14-#APdOuR72zP97)+G~|4$V`!JIo_*Tw*Lw$>=Nfn zLg)OFIyPaA>#i*Ps;uEq)+B1@#~J&0H$P7%!uA4Ki*A^H3^n@2zltK7&yWLMC8 z%HDZIoXFgh6bKV1RYOepN?)u@(?%=lRqWLxKxTwDg0g10q$LkNVDKHcgN5JZdr&i7D^N?@Do|E2Rkp7sQ z(EZEgB=@)Z_XB|0!$luIi}jTIUx&fpWcyD1OLVlqM0eCa4>0Dgu^Lr)&XuJ1vrERL zV38RX4)WMZRb+4gG2s_)nValB4ShZgs@oeRXBn%eTPk4iXnx(FVw0M6c6dt#`G2oE zH0wM7<@J_AN(jGv?m+6gYM>|tz)373s;q&tyRo?^10-9CpsYsb6p}3+ z0>Kj0F^}3%fYFtX6N7&6%(8Fc@mYHODwu-{rfw6-;S;c8gb<~ zd;;26-0Ds>wbL`Fcp?%;j{eo1zwPzI|Jv)H&th}f4=GU7L)^pN$r+x^ zQLoN)<-(O|<5XmuLK%`H%5>io87AEn=!pAToW+F<@Xbkr9Y_>BPV07>w;utZ-5z2a z+ctxXk{>yck?Wy{IBy(CW801QWW`2UBi9=1PtDg1OZIwakAOS_7^oEtbksxTZ^*w& z%Kt2phmo`V6DI#x1Aml|!y3p`7*Qqs)Ltr-H2A~^r~LK+cTNgTY%~&X$zGV}{NzLB zV8m#nnwdQ=if10-jHZ??2G=iQWMxCsV|@8>&7EgbOG!<_xxy}f$!5Bnt-7{Q)un&m z+<(sGj?om~sTAx3frNXe44A0pFi{)-DB$>eK?)3l@c|~g=by6wQd7ByvguC4j={yy z1Js0nvrgw`rf5(w88@X5uVRn)-qMxot?iiXJdw0a2zfQndSJ55k}APFI(>?9&1$9y zP3}Od+?d=RDqn_wI-^8!?rub3o4921yc@o;J0Opsi7J^~Wl2P3$v*&LATLDmCA4tx ztoup7qa^wH90sKm?){^sl%t{y0{2y>TZGdP`fm#rgb(QN%mUn;kfSE4`VPZ>yra9d zH0ve14u1G@_io@Pw^^^ZZ(`DVdYVtJLtxb{wHHB9+hWq`r`C;ng__O5rb(O)Ur?Y- z8DI{FV*G1X9zDF^cO)4ESx&My1@+$_y!0|gqsTMH@V3_TIl{hj1Qv0^e=p+C9FQN! zTu`QJUUukA(S8rcNslx-kGQGmeVLJC>S6Q)_!rZ}F!glLLF9jQt)A+nTg&nSxO)6y z^BIo=--uy=x=M%ij1rd={r3>5-na}TwWsaf8^~#zpU4$cT{fIXlOoj@0tRHimbHLg zPoSMpA&foawD;Sept>%7Ege08dx7Gg--%KCIQFCMss zG;Zq@9|86mH-(P?VzLs?C3^41vAb#Suutl32QdYYfCAxjl{2sX_SjbCTz3DvGTr1k zGx>p9`q>+W;^5bv=};yq$Dtlx74K2z&*Ok5^ab!y6P?0?E!iX zJ?_0*%-0j|yGfk$;vU794-+BG(f3&|Qx$&}5PjwSW~4-Rp-i8N%KOz^nqTy)b8CBf za{kQlN%i8-5TI~gp#MSyy(p-U=fq#`bvU-=eUyPUrc|V~;=yl5&iK2Ldk@S0VdQ0p zUZsV6i;Llg*M`X@(z)Yaz-EwJ22EHXC|?4TUZtS__wfn|GDJJVcBV;fTe z%DZ^(x0!q6B!x%@C8v1=WSy1K`N-CkUheevjym$5g;Ev#LF8GT(DA$te>sPS7SB(t~t%3RpmiZ#B?8>kC z4e2(CaS1jWdUe_zK!quP))%?yX)&1>1p6N!GEDkzbz+52Ek^nV7#k0p$kn)F_KAGj zt#Z>-GST+|=F6aklTrA53(@@cP{(NZ3%(kQ(2RI)ds|4>`6wpFA~=YP+ym0)D!b-sc^kH`U`JJ{s^h_ek#7@gr5LDMWX96N3Pa3XbeRb}j zh{ET(y{buulh4ygpPasunsMap!Fh}_eQ$Px$*rs2J6leZr+yYnksR%K+)hyK6_#|G z@im{$#S9keNvU4y56*qU)~n^%e11D~IPc`N;>}xiE?)>#AGZiwjZs$Ow8tP&&9os^ zhScLhQN^Z3CdI&tWadK_v5F@7nhF#^hU7sj#|v7A7Od?8&PrTR$QK3;zfD~y36BVi zS&!=0%rULaF{*}!rWKHyg-SMJO~*r?mo}@_bQq={CWKbb5l*2mr?xQb_V&6ZZMI9jrBw;8d9#hp6s zTfA7obD6BN`eJ+BQ7`LEBCU8NHzagbyul39fvzQ{_xYO4I8gKxCv#@m_f^JJ3U4X| z<9D?N`CMd+h+*_^$RKPp&kGaOX-rHtmRODbGe_ctVrLJ|;$zOS*sB>WEhmx^qgBg? zN7!Fvh!3ESF7p&d1s8gE*yxg|8VwQck$Mc=@7Lh0Tct8`&FOmk4#U1$0}nj{yxx#s zn#10qPI7$6i+{MU6yC2o(;ax3IpEF&bh|M-q8S^NZKkO_$Ho}WE3#Yasn6RXvoMTH zU0~q1do9U^=}G(4!IKUS&l^lmsN#YwM#9H@OhQW%u`NmXQ@IB}HQ zUypeDsitpuujsbxz)e{eXT_QPVqs0pB8OFZgC?8%#z`Rz1G z+Nvosj$auHmWeMlW%K>QM4;e?Z+Gj2l1q?x0%Yyzf;pqy!o2I(FlQ*0VyTsPWyv!e zj(RionTwK1pw+XfI94twQ&Z9rgFk!Rut+Pex^r+mId^q3&Q5mRqEyZ15S9Q06lHu3 z{43}9D?SLHs5cBWVzV-uk}8lUuF=0S{x19dBHXZnNILNcJ(4TSY|F%qbKy#Bg4WYn zwfS;`q)Lg6DI?#CGrwd68BYa_?@Oq1soLVoOwpHHOfooT-*IN8F{Ao(z zEtG_u*!Ve5RTW%wis%%#R@F`Tt(P3TQpD*j#grQpZM-b9?V~?Gq0#T4br!uD)FpbS zudlBXV3J2@qpz1g8K7;hXUiSsNXz`q8eK1s(Lx=U#L4tpul%KelAL*dh zSxShBv2?jnHWq+wG{6+poz51JcR1m|=a=!mFkaO=< zZ313i;dGg#^q{lU%ZQu;L38(iuc6PT14(;zNeHgWx;rx6Hy0ubUt$3au|5GGumB)w zcB!zQAd^OG?dmH#o-DK7Rce$3b$!tMZ55*|_zPnke_ zqhlwS(_wSB`&m@q7{RH<#@)Pw zaOu9J^koo{MYrlin8W%+AdkP)#~kQLuY(j=K?P%wX1a7ukIS5Fq8S#YN%=O#mnFuY zUL0aj=SqewXn)_Zs>}C)t?9XyrQl6ljzEXcdCrTPG-BF8F2|>qGugCeCiF;qn!Y`Z zV0S7d-epI@s;gArGP7vJp1c;rYBozrU$DQCt|`7|P~T1n;tQ9+O_WerFs=Kz9aJQ1 zaidkl*GrTXpJ*uG(ltUmyJ9k~<4KtJc25-qY_Ra5dC_;~vk&B%?gKh)M^1Mi%j!(Q zz8>LUvT50h9X!>GU~B1^E-eBx;n#JyPCD;_m6Y3~58{()+53u(K#|p|DanxoyBfq^ zj_6HV$u-MgK5vy?%GE4nHZ3vNvZ(n{ zD679gjz-^S>1nKk#0l`94el1TaxsF4rQo%YjEmqXH1-6xahvljRdD&=3mT~qsJ`dy zP_&rQ{IK4VZUT*16>MT)s#aK*N&{F`oCTa&N?sl{4&NS^sso?VV$eY8B`w)DL)^yj z9J3FA3bQlkWpwq0lQrn4Q!n%;La4v(jS-P;{4H@=`zvv=A$IIl!&?f0Sajo9O}pmM zn3f6V)NYj`FNsf0EnEyX>B|vc(WWN@->1T>1)wtGOq-*znQJ+PMW4pkELF9nJc)mZ_K@%`o{j?vu}Eu0DT?dy~<>ZeF)YPycdnT23%1o+!Q2Z-p+eesYRZ zN#RM%Qy_@JmExUqAes!qOI$WuQ*HL=0kH@6P9A|^br^vHSY*k&6 zQq!eE9_N9wWJfDiT-KYxr*g`q?gNGq}xm;`)2sawN0rVm8??lnu{^W$29-V0uz^XPzs z{A81rAJ3DUbMT}mrqVfuy8lXOR^r++wz2g-!!k?v85=jh!KCoQcy}L)BOsHQEjH+B z?~FR%9>;a}eTvy)^+8;X@%GO4+$9-=5dj=?`7w69IHc;WvATZ@N6hCsxo6ym+tc)d zJMLDsojS~Mwjpx%yCf)H>lFGRVlihvsD*+ysoD`5MQsoh;~QSWQI7>rF#93zdq z6>VaXNk5vyNE~#TLglQWJ=K?|%^#W8=r#FW<8&?h-h4ZUe!*21!H<=}M^05>Bhz)1 zY&9Pa+O;09VwsW~Y0KSkpecFL4PS#nmlOQ*03*o47G( z8#S*_C)ni?U?#6-KJG~JloSO4WE;UlqE&Xcn5q${O#el}=KdBN{4FKHNpJMYKto$# zqVbTtVCN47-n#zjTq1J1eORJvpKkn5VcHA-jb$}auXqLTo%5y# zm(Gtc$&Z%81!PTg5qM=q5|dew{t5l`I3K^>?h(KWflo$Ul!>t-UPc14pieyl43T4h zPH!yBR|du7boR!$GHx|+GNqzFrNqLPbE$RuN~E{54R-`z#Y8n+%<$n!aEH=MeX4ey zfnsLotC=G6Ht+IOPfxT_#*ZWNMl`z;Bgp|c#m$U)oX{fXU?BQA<1RUhyz4iQH3tEf zuFepM5O6g^kk8baM@ETO?D_=L4|&`#6d*$eNv({2ITlnIOl5&q-0QplUULy=MN3#( z;avXyFk%~9kYlQnGdv29)zr50euK{4)adfWy~oB&!R|u|BRvRglP~s$iDY{l@(B!A z#uAcH^WN8^O?wX0HN4H!J&ti-Et7RcmX^XN`;+WWD<09+aIeZ(@}xUMnMM9VZs-jN zOKBEl>v^%FAdnQ7;&MjijPY|?W$^24A-V$6Ej<<}xbYT{68e*fL|19|$QyP=Go zAySdE^4St-d@=shlHNg)_Ln@)bl#X75tF;J<)5WV-vu^CF%W)o#pt$$5$Np++O-3j zT=AdoC@$FJGPcpKCdE7P&`4qp{JS#e7y%nR}X;{+4LhDfAZGgn+o7y9-=f zT!^8T%9NitaFns`qSki zZ4Z@X^-c1AO61UI(~N{c9p-(GSDV(mx{$eLNH*q>;aeb2nA8so{_KIln*+u-gINKj zw!HOlcDbui&4f0TFm2fi1ax>o*BQB`=0aB$nu(^{+yy z#~E|-H2pYY>xh?$#dNW|_Z;KGtx7~Bg=J!<{5bd+?>RW+h4ty;{mJ>!P)PNN31zO{ zb=PQ?>&cL^s34qtd<578LM$PWhL4h27GRl4bQ!J?_V?OY@*@CjPe7qp zb28l#p)85%4xw4}$KYwMN&4!ET%dVm!Wg(-&rwr#5|dX&zjzKXrM;Ahia*5jTY76Q zgU=s$-D>&Gi-pv>x+1I~iIXd6lU4qgVep|C^ zue8p9C6C7Lzb)ydytZb~#S*zYweJd@KQ2=Q7Qn}~2@C2Vrpf{=il&G)dbU_+6aF8( zfO|JAdb|Qt^yGmHd=_@Y*(`aWw&u|UJawmIuAR+qpJrSg7Z3|x5N@9`avY!KygvEA zcp2hX@UH%~v7&(qfbD!BBA{ZRA|s=sz<$Bt*XNS}q!(D&lx)bD6ja=faZ@u#njB8;7O>))^>D?h^wg^`JMjpBL~RB@b`*1 zPob4U5p=Dd19xSslWzGdNA$&z|NO?A$+;%IsNP+6KjqiWZ#V^Kzn(J&f1$$aTmC5i zXHvf-{tf;&{J#kOcQ>Z-bV|IP^R`nPY)2tSwPQkSjX(J>gLi_9i_@9flC?CV8Tsv1 zbrPg=9dlUdVv#2ma$iVr)D`p3Wj9-z@A~rL6)I2C*rQQt`Ygm_Qfd41bwNemQZH_d z2=eHyWU)v{R{~K@&>LbQ&q)yyM1G!|EUrv^H(7%H+<@%#Qb&TV^rRyB5s-)fQifs3w=MVf0A|5Q>=4LD0}U2BeEf=%M!x(xvw%Hb8nO z^eSDN^xmXO?;wJp6zRR&y;09Ozw^ECd+)k;t?&EeX3a`6&wgfR&z_mRXZGxwJ$Wv4 zq%${p_E@Ej!t2Zn3xq70(96#lby4ZbO?put$xct!nW(hEJVs{;)8cUtwI`jdY0nHE~{Q(0v#%{3S;(T_Az;W zQ6IGu3M#_7)FeXbiL1$c&=P!b=2u(iRJ3C0f$v5B0h;RkL|ovlbbxp zB0W1<_gJ=#%*)g1ca0wZu94n7d5vGyo&Q~3DsRs}v}pfLiWUUGLZC9XRv0{=&d014ADrqBBFHvdDn5iXvafEH zHLkoI67gf*`{dvrb3MFOf_5=KGOuDhq>+PhE29<*RA}M+1x)7s4J?ezbN&Ui_ye?U z!KO5FPyYif`U{x%7toIpdkok;{d!o-56Z=&&^+E(!HpcbTalG3PD9v~zk&Zsi3MV_ z{RPzjmF+)e7W{)y*}o8qxgPNw+tAjpl2^{Av=d zD3b+7Zf9U?w>CxlU>FB(Ro=m@y`2oY)$%RtQE5EIwrXTlwoGLtXx=S0&x3jU%z8jTne?n^RE6PA zwC}Vf4>|T!SOQ?4F$@{P_|Ax#ZF*cLnY{B8)LC;gK0jAZ88zh6)-{=#y_JM2As=Qk z%h!ySdQaT`#l7a=-*eVCzliCx)7WifUwn2ydzmOU$Q~0nO;K1Xnj)`JXv{=yC7N-a zT{vW?qQx!i84nM=j*``IpS2xrmIqZYIg@6T2c2XU7JO*(QXiyvh#T8-p>0&1~EUlLdnwVXLWJ_?w5luO>& z-1Uxc#N|O*R47@m9g%=TZ&V(T&$8rUqD_=6Ff8vgrNyg&(ocb!qmmH{QI3)d#yl-U zTd7j3*pY*W(_Gqa6|wWeHJ6Ukpp40jxS2qsh>OI> zn#Pm6DW?rh&&Tl0zv*3bSULEgTI@%ao@9-Quy}ik6%D57OdHLp{6gy`!n4@bTfr^> z!guXtN_VkXjquuCUf%w!Ol2()lzYOZW}y0MpOqW0{F}&}Zej0_<8Ps-zfqF?PpAIf zi2~$3mX0^GOU@g5Lm+G88cb$V1$-eEs?&kIyhBAXTdjF;%}LpKys3lUwrNqZ1@!v; zWa;zNmB3}ZjdHLi49#MFQA(a>gksk7pyr8Ms`LX*K|IvTNsK^7W0 zKkBh=mdFxp#-8;y(w=)`Y)8REN2;x@bkg3>s$iixwjff;7JsC!sx4=^3OTZ38V)Th z3w8B)z16;cCF9BSsTU3JN;l;%kw+4GZFV*HQp$Z-IQhp>J{`I-ab?Tq$(oRJuEe{e zvaC%t6`%j9ixOuoLyl2%Qes>S(VOr2`E6er9)>fJ3-n(>45}*4e)}dct#vr-Av^v< z=qIr3oRH>n^=Q1)=1ZyC5hdG@+sY!{am&qqS(^_p&Pw(hcA6)@&lq8kKh5>3jCv@? zpbt0?$@zw_+}P)~*Uc+1)04A|?s)9W)?d7HP3^73JL75$LCj{$8jDv6hqP)z$!tWX z;0-qLr)a$Uk4GtVuA%wg)7I})m1Y+a`rRMW^b6O_CM%%HUmMYI^LkUNY9X$>PZ9sd zeCbgpbK9BkrIV7*Z%N?Z^b z9cs+TTaZJtjM*T|q_-IVE^(za4j0ZEt^(qS1p>Y@sS0I+p~_!E;8oAo@HHCJV|H6@ zHwd+q&?vMDuiQZbnpolUHBg#c=OMpXS$<$eOm6&aJ%3FD=TMJ^CX`8|-a_V{oO5LA z>zKYXj`@@yaJkXQt(gsWnhb=PV7H*`waetYUmu%&UC5Tz*ktw>FPIl^rh3c0Z{TJ! z&YTuGPEMqxe(=`fL=ByjEI$Laqd%HkwRwHne&D7@VNJ|f!hp#cRm%&{-wPhFy#G0u ztMXc_*u0jRE+H^iMt}d)kHD49=!qX<*Mz>`EmU)A)EaX+$Xf=<3tO8+^_B9bV^p|< zqHguRrQxul%|L_H`y|s?RC!9RTA0(5za(iU72C{eNZG^d>f;sK(D}g>y<|5%G$&|s z2;JlGa`13Qc#2vb?&rr{8uAmOx6P8QUSvLiOe%oZh_9LCL>v`gp(8{_kWa{RKfAGg z>vcid49p$FFPUlyf!$9K+N@|g9AIrLIUp(`C^idh@yiutd8eAhSB{)&9h%@5O_905 z$TalO@K$Vg#g@|Lx0(-kOeRteKU;VBwdUUZa5Ai>A!mc3-s0GPkLKXwNV-oU$WMK@ zunRO*?k>+HA7UM&Uw~k)PX18Br2mhOTO(7o28xZ5(Uy5b7O8} z@4cwOEtjj6t)Kbr2S3BwU@9X+#3Lx-n(-PO^ab3@+(!pjrGy}u|=%X^c%GL)2kHmiQQB|6Gd zkRJc5ziVm9SNlRKPmP^bEv-gsx6;B#-?3Jsq*3$DA&oj&^TWDDvP^QO(7k zf*T7&&XV4=dV_vZOdTXb5t3!p5`!TfL+Eu9KyA`p<1dtJw(^G^`ZEq(j)f2ZGT@ocI72T5Uv)?+Yp#)0&k_~ zxfjvHoj9h_`1|r7Q(XEIxC=!M;tp9}4W$w>%A{+q>c8Ua*wA`%f+kO0kD4~7$x9<4 zm3vyEu-foiQ&V=BTlrY*wy*c`Pave}>7e7J_C1Qz?k@$w%gymt#;BoD+-dSoI-!1U zr;p~HZ7vbd(_6Q#iV;pebo%+muGGV~m@W4mbaZEg>~?loN)mT_(6{QMLqsw`j^vib zt_Ko6uYLj)f0L|u6Mbj#;jwhU^q`TBCYelAg6aX`YFoY zqwgp1skr>F)q=bj`e5xtq8gDGKbiPKusVh5u`XSTiI2s<>?D)0!!IQ(yRS9XE!k4aG9G3icS^kElI{l5A z{S?W#AO}7c!G2BbnS5G14(CB)^%(BCFw1Mz)S-&XI_`3UB%@>W8N!yF#W96nHKNbg zV?-j3Ggs&3Oq619(_#qA(&ZUYLKG$Gx?-idqe=FNo((Nw3SyOa9H(Y^nxBU*WX5ev z5pK-45EwSV&zNtS!hb*@icwOno9e)phKJfz5UlE8Tmms^@e0EgGu(=8zNyYR@S zkwGE})A96Iw!TX``K6v_(f2=rZ`!&}UtAQk-#O=JF<-hJLE;9EMcxkdsU1^>?57K0 z?CJtVQ|ebOf;Yruad_*a{n^PaLzwIu(D3}Hr9C*C*I8-%Y0z4$LNV~Ocd-{Tf&(Ai z_fD#Hj#rVgM60tO8%gsD@n;RwW+gt7;@)V%ZN1R?_?q)p=!dt>v00JA?_z}3E|^wo zY3=xa0_5^X;4$WCSVWWEym~Tm6N--s23>7vHw?`3wRG$vM?%2lU{DV{_MLzKh(jTV zE+ii}A7Wz~294Y8oDXFiHO4Rxn?CV1R7@*r@xG$uWaxYH{#|HlhE)=!?3Z%B>rbmb zmmg9)j+aHv*HkQ)s#Xy>h+w=Ox+Licw$3nwL`8?m#gf;!G9UR!wVo{y~ip}5l`_EZjbr>p3l_WHBK71ZJIxRHvf=;B* z%cY=NG{!c|It0}qs=gW{-XW*e7f&R&oL`fwjNE9;a!t~`lI8$n|b?E6C8G4*e0L7JMBCZQM{r~2>Iaj~nSUQ?pHtAtbdNPO|$T2FiF(cS4w)Posbgk`>- zzP9CAVCsjFzN)Xjf)sbZ__99#n!2FYxbyjH>U_rTE#YIK0oQJ~gv*A?)7i_T`T_cm z0xQ0+GJHNISE+}esqm21SfLiWPz)mo!!19&(6SOd$o9!>%QYuc_Fn8_rmgs+rbgFzPI(D z#?_>f($$h<2Qy3``mBN1qij3DyZ}>^K=T!zbx}I?HA)@JXYu+6rvy+BiGi`QP}Zm_;s+CQ6Jv;5h4&Yd8G8g@zw8)Mr zE+ho<$l^fcSe^!PU8v1XY^h+$r2KWl$^Rgws_cvSmvLg8w z(9Ay~usrSSe_?4q0rkbq!>3O;vGa4`LN(il9nmQ@pBp>ZQnGIz_F)&=Mc9Ql*CWu=|y{8@Zc+q-TvV?=V}0*Gm_Z z7gHqF&vpNQnb^xaB8!8a9q~*eCjGE5BqqyHI3P@A3QhOHmePfi$7-DOOp8>610hl3Hg$AUx?zjGJ5cNii+0T@GL4LWl6A&6698-e85gIaBxZqI^CF z`!5tkmU(R#v}wJKKg1%X)V~)um#G?(nFOJ(-k7LIdf!5-7nHFGd!HABRKS%4 z+wZdd9 zVNDt#NRZN>2}tB%NkS2$ay4nJM!FK&{RcD=R$0B8(ue-I83bZUTuImMVMz+(=++Ct5v29@3s;6*#RiCDo?(95_K|!NZ>2uMh`=zdPB-lGT*k&Cf8T0dvB2a;u`ceTc-FNv5MqU86bu(5P(cV(OC;oHx~? zTV8%!B~J(0WMxz=k~`q<(1COQ4JCjJCx-C{lR>v~pN@1jqmpL>eN!`g4~Z-2LK*?Dv(GXXJ!%Uitl>-blRuQ7wWS~@+7Gy>QW1Xscm zk*?xGW;l`>EF1k|fx6?>pj`#f+uk2{k3TD@(UXj}jXLM`I_&5up+j}uv$Urd0FSOlp^tvNCVlrx@E-TkLU?a70%{!&Wz zqgCAPuYy#jkRv(xZgi|txPJO z_OdkIEj-W-3j^oyUoHzi*J=)yto z*zl>5kJ(TZ`_$X<1yR9OtX641F1QZs= z#0X85TlF9#X#l)sgZNJXC;cFZYOD_DNupx#jPbn)x>*IOAy2c`W|~nd(i7u_#P>^` zvboG2TbD*I(`mDQest@KFa4p(4P*oxA}nrUT&O`-G1$~tazVu~GlLYFGn3eje>hI3 zsunsR02kK4Km$w3VS)gFBoUxbgeSs7V4Vz>0fps#da(v}ig>|IpnORv0CtOSoXnXQ zs@JwECy!zwcS>H+*#Z&63?Uzlxg=0IS4-Gk5*2a}-{OD>y9nij*oi?77>8$x z{=+qw*RH*N$z+_nScIs}iHg99tHQli6RC=xl_$d;8+y?F0he_0Rsb<)OyEjf(_-#Z zlla9J^J3a2bPBp4nY0&I`a2CyE@k(!Wo6Eu8Xt%SgM(RuQQoU=PrD6*jOYV4hu^lF&hVqApQ8N#CoxA8}t4z;i zwBB^LpT+}ZuA(h)KPqCZlZV`iz%^n$-ryl^TB`KhoXmF^Bt215FaU+jXaQ7YoTq`y zTJ8#P=S%-2M^MrOkKBoiA(rx0ZE z{2||Q;06rM)1OJ*I9*kIPETAzZ9SwD3tv<|a9{>w60e^|a-bPCTL#=$P+` zL1%FdlRAF`sctxmup-@%I4D>Nnrf0)u5`h##G-h5C0cQrV4q>vmW^*V%ePOtfRk9- zwsc40#tq>9$CG@DYl}SJdWm1+I^pIRJeOU?_~?2Oj=eHgoba@ud+n>d&GlO_eu$#F zO~A3@{GIr!gz=j-F9d8gvJ(+6VC<^aS%}QZz_SYxKpMFtNGR2VwqVi)SD6E@#00q; zKGsxLc8$*kjwZ<6#55$K}va>1am@g<29R+gImY8vn_fBJ?}Ke4kIIyni}Cjf`wOc^nHHZ45<1E zoE3anATr{!g_BIhnq)S4n;PVL6tnP3UYHgLVAVw8(ug(!Z(eacPwd6)zl70if{dyZ z=kO#;7;!B@R3sBwDnqvhPBmA-{I}W!N*pR-4@zME>+HCIj9FUH!_666GTwrV z?@o{Fs7{Goc@v%V4Titp6vl0?zw)r2D?qAIKL?i>#Dy#Dxq)E`HcH*~%?r4^k5pn3*u#PA<}-E=!5Hd-A^}veI=j4cOCu-O%Y!+irHaN@6BqB6_xoP$3%d{N-&{KU zZ+blG+HtCg-7#gtS~&f7EC>8z9&ys(qaqEx24x(JTf6V||6hMbyT0^+X^NS{0B@Bi3eMIv?pU_c`t?Db(|lz|(-H8_Fo zz@7Q;IRGxA3E+B$03(1@P!IqT;qAPNb%X}oIQPeTqtPS^y9#gunE;>)?gS_TM_4+q z0;(CEq@K$Fu2~I8%)1IEgv86~1h6IgOGi`GbV5LqLBC2A0g)>`-t9k*zyP5i4Y{f`ZoqiVt{%XpbLDB z8wR2GN8m>hrU3YeCVwyz0EpL~)AIO(2Dl|b695FLxuUx43gi*N07Iqi(qS{lE~_}qFt=fLM1b`J1 z=e{XeJkZX_8cOhJ)^zsaR2f@GH!qe5-%euPbpJmAQjei|GuQA<7k44VbI6RSagzIq z(a6+PGg#1Ki)K;F_YHCKoY*{U+wVuw^O@-{=})!PcI>{Zh{Z;I=lT^xp^v=?e{k)* ziuh8Y^5$-#)UN0Ay_;JcdDsV)s~u&ZDocN-^jLhv^(Eq|mICLik{9O!zn)ESRqYbJ z8J{6(${mvNhXgX<5>Q^e<|Pxlei}XO^-a2~D++7IW{)&iRPJJ^5osv>L07Cv)V2lj$Tz@5A${r<~2pE zK+j*4>bM35)2ujv;d*JQI?Q2`3`z6)ae{$xWqgFq~d%mqfGp4d#d?;pGSLa>E^>5I%5}HS$lek zdbi(xsqm>bKD69ve&0X)A1H@n#-KD>R#_%OZEQIZHk8m(`i@2$O5j=I@Z|A?7b=S{ zBKb?j@onXQvoFL88jPhiSnBb`#4$V16b z@T~~ESLgeFG}(q|iQz3=HkD7{<$FC}!|w7hCDN_S@e`)ad@2$C+;EZM@siSvjMIQj zgpa&4IPO`3-GB+PaRg4-7wg_m3grR@4>ScVFzUABXyv{k`boN1>U`PW>o3@AXcHgZ zak*dnDs8EUtg^?{$33y=a9aTb664y|0>NH}vk5*;VtBMntl7uOL(CHHiyg!qC8ewz zaZGNw6eyu-T0-6xTy<$KS0+kM%#qjILO06QJ7pZ9g8(jArzI#YLfx*@V3=!(2T7!M1WJ(9v6drcBHB+iuFjsc zmp_n|CGjH=WQU5sDafonxyDYRq!f`PfSsoT)HMD~Ne+th<;8Kj6yZCsSwm2Z*~0Ba zEe6bkLVl7NmP8Ge?67Unl?DGpeXO^tsK6XnnxM zcem`f3&o5mUn6!@isJo~(Di$p72=mCU3bdM?g@d>iFfK?sbj_9|7IvcZFDv;x*0$TT#o1~E{ zw-cdtt8UumGA%dht9tzhllrM9f|CJiN(Ot|izmhltg`x0(&uGq>d=uU9+({~%*u=g zM#!Yz;nCx*;7yKas*c{eXHP;hFg@n~6S#7#7md*6^iv!_gA}>m{&_T8d`B`<-Kj>eyp4I=uP|uKf)Afz&%dTa7~w2?EQCh<{Oa>$Kyc($!#=5}%bpEDU=)EI5g)xUkGnA> zIq=O#>DjFlMZXabSvo1Qs*Dyg_;@Xo$_NEIBJ6DoyjyTwd7S6_UzmGQy<;Z$-f9hDyBE@Su$k+^FDTxy zv+OEtuVH&2->aE4{{%y?lWf$X_T(FbuoQEZ`cP!Ck{O_`8T`H=t(k_oq3(tn^TCgP zC^HUXyHkTyTZX8tz~-w?>shuwzOd^Z0^%CgB9s}T6GRxUPhAaxJ0^_n9c79j%0h^t zlsdj8T_+cWxWjm)q2$o=*9eqn*|BZC6Yaw`eMW8zcSCN5UJ|%d7#x}Pq`hwSCQqZf zzV$j*f8-)vTm}GgP`AhL8qck~?t0#I*`xiBDE3`?7d9X(!;d9j8bVuY!X`kL2+rBt zJ9HG+(rMx?=)@~3_?*mw7BUD{VsS-$VrA5lsSjXrjd&?YSEev1s>cCC>Msroq^3?; zL$ulT7LqH%CL`XuUg(;ZVyrd3uu ze7KW#jv_g+1eiB9%N=$1Yd~I{)ji1|OgR}Dd8d?xWL#vJp@Jf#B4Z||+$tk5PznF{cvKTa@6^}der0kNaD{MHAp~{w!8+yv(Ez5aG^1WDcnm4d)gz8dl1OfRbEA5p$pL&#UGK zl0^vk4fYPu1p*_*g*9aX$MqBmlIn7NcYt97SC_DMsH|eSY~Hi9t~!=3 zEP38n#z+b_NH(ue#aLODEYc>+@wEidiAiL}yf|p9SomkW_%$`*w&c3RnIh?!h_wqE z2ApZrf(LPO{DSkJ0`nltDycN3*H`YzPLuWpb+7mZ@CVacy#p@H!+mLRGli!o|I0`i*P#&^xe|u;JrA#vv#rvXNsrs>zV>b^uuhFG zOfrR#Q5#=KJG5&-yQvuwz_;yO*c7K}h!}=KgAiI%K>D*aZAN>f=~-n#xDvxHyMT$N zP8Asq)KH-vELl@hcAIgC9hVPa_?+P1lRp=VNMT-TXyREntC=+uM&!ZTlyqT5d&EdG zs7eeojOY!)>gOAB@>-W&^8bi{S=qQfY836ob5lex{pz4w=am;Oa749+ub<}L0@x5q=>J9y&ZW#?~InUS5 z20V$7kjaSa)NHylJPJNjl(4Gc3U3lBK{A!jopwp1RNUECBD^#kMO0hf8wSRjYTu;D zUh9Umbr~j*T@8q8M89(sVl@)Wfp3e?Ug%Xl4}QkXjtO3&hE`h|)r8NwDp4wp*l#^* zn9A!%zHgXdar>rhr*&5$D=H3(2QyzU-ws@qC}U67rmy863w0l_d+LI<2YvF1{hu!L z^k`CB)pD6MOQWhyiZG4_8PO>ra$t3h!o39ExmY4Z8Q>W7zK6d>i&0V$43A^Rz+ayW zgTYw_kFLC0`_;t3^Zzir@gv)hUU$7#y!HO3s(X;vkKG@5SW26cSwF~HS%IQhNd{L7u< z5eyL!kKJ&dR9`tp{?Uy5x=2!cwVOqDU=!|WlMIF zL3RzE+je^Wkx*{={$Uup2_so)#GR|6F{lUw>9je523My_N3 z=eHhztJp4vf;e@gwe$FOO-3MQ`ZsKMI}0)}!Kc^AFs#=W6+;=MsJQ7|vJT3v+ci>y}Aa*uE7e zDI?a;@+<^f0ousup!C2gQz>E-&|}_dsFm{!DP*AAf>?@NJv}tmg`K>)PQ6e`dTL z!HCqS+Bi+ARvsaOp&S=LKEzAOIuv51jhnX~d!^+%?(afw`M=isURQ$x7?i*lA*b&5 zSA!%OfDo%BZ`OBy8c_7G;8y11J0(rrM|)e`DV{DTv-=;r53Raam{`8&WwkO|UsX`J z)kYq~P!&3JN^|x_@Kop9@Aj#D=Er`Q!u&Ha3eH-0WiB8W6eLM8%gBGNRyZu<_4GqW zM_be~>T)oS+MXXNwuzFdM2A-PYlHoi#Lqp18hirN1;LF9uZB$@B zT7PG7@M9Bks+a5Z4}q|tCIhI)GkIY)8w7XMTk0=se4~tZtg8%fXhh<&>+R!F1;MkS z(9ZYzb+S*(zkT~0g&pnBDenp|{Tc0*LFG;1KiEPQX+5+lGDk%MD#x5{<){G|2Y`b^R9z+@TLp)dKD*K<8pWJHEla8O*? zqV#Gl3L)O1ej_P7y8^GJ!lf7ws!%Lv-;>qLEC92Zpoder2$LdKy7&8+Ju>@F@tl}0 z3o}!{4!L_YkjL$$_$pgaqKCvll-FfM%J590w2)l17{FDO##X<=&dMi~SNx+#G z!VVySFy`P@m2F>5*@712&OE6Hjw-P)M4exhk$`bPD(Zf&)P)tHM^5Qt4$LeJJrT}% zHPB3<7@7yZ(nm`dm=;P-yMd-AWJY<2$LQ2jMzVgc9x=W7(Q(?9OF!V;nY2K-fQXWa zp6EJlthsS9y#D0a;Ev5IMri5H4-bjwavgH9e%%!H)a4PgPEz)Pn*!p7K25mDiXs_bSc|s z{UggbX>B5Kf;O0|`EeK+uL+dPF;Ddd)k=hw5u-Rfc{zB)gQG}oc!ZU^JF&OMpuHrd zMep2u1@0?y*`Z4G3~${n@t&qSdcVD&j!LOe3S?sTY-K`TkeE4x` z+XE^SS)@cw5%UNrH^q}{Qq(=bBUI#!CQ2|9dyCRsI1!eX7o};BGd-r$ zV0gy$%;dA5PV3bc^*4U^-m?}giWTnTZ7FL0&1Nd|b6>(_69GyLtnUeSM;bJ}Hi#H`T7L?$K6N?_aGa4a0$& zRkOga^DBWKekiQ|er)HCi;?0bd`rLva!NO|c{Cd%0@Rn1OVNhRDT2%xfHwgkMwFTE zb?F%UdtgHrGDhM69EMMKYed-7NpDuKvTfSP*c+xol;4}lAkf0#bR@Z3TE<4ORUrt< z9Yt{8nl6GrSfVhz%VuG{ZQ?$OfUdNm0XqW!Dlz-vagaDipf$SZ+n&OuAZ`V?K>x9s&db<-jP zl_qt`SuqC2x8|W)Zln`0b}ZGOF@BAWD@7CK1TlkU-a{w~*l~lwBf=KYo48dYQg4Ut z2Lv8-Y|e!jmxADn^!TJJBd8Bd8}d|f#-B6rSfH%rNOPJXq+?#>%f>kgDe{)}c*>F8 zhMO8Dpm6_u+m^I=BCv|+Q;*$^uC4FBIwvDSylNXxInT_j0v5>aZA3K$5n6JM?6 z6!nMMtY-7lnUCG^eV_PL3Jn81ng+F>#c&V-A7%XT;tJbh1N=ZFx$HbI{V-!VgoEU4 z44?<4`;yTHIXbAxtPvDv7xM=hP-ik3I}?RAsQ^AzWM#aT&mh1{kkwSj`h2_4J{i&( zL{ZgO$cuOY#yg^BD%xf(5soS(gF&e{;~d)OW zdOaVLc`+mTFh1@YVIpnnF3Cpry|A81Q1-1Twi0Ws!C}e!y<$5Qzb)j=13sn&0+Vj; z!j6y^-P2bgO5rr_ysMmBw+EhqWVlDbx`j#7s%Ml^_Fd}J{gdZA&72=JB=MjyL>vrE zkScHmN`vNOr%ht>=6RG{qR;}d=EUght5Y$OHPuIk$|P6GTeW}VONZk!a}I#C_Z|x7 z@S+*B)dPG*A~*n zOxL8kmr==7W_b%i))_JUJ#bfd1?92aRWwtWUkpH6N8hcI^VX~tve0Kt#)PL~2JhV# z+L&cj;0T;Xx|Vm^uNsPzjj*nITRAg8qGjxCL>OO$TY(t3GxDS3M%BnS7*GOD5Z4s8 zp$LZBbAxyJ6KeB+=BVO(AEZ)6#&mBw!I_&F*UdGDg1QjXFylfpTI-=BtF z@1R~SSnQUu=tUM)KIQIhy*jP`?s|LW%fAM4;3?bSfI8@!B$Gu&0km*JUnWF%xtMh~ z|JJ$;jU;1OlW{o}(ZD2-%x<>Ntqw6+KEZHu=ZUA{`yay8x?oLIvH29jN|b5wkvmHd zav?fHRvb`cfD|kk$&4h_k0Vw&mB1m1H8k)P(+&VRxXMOLz3IoB&A-~&0?(Su%LrP1^0NR>6=lZK}x0Ty}`^kgN9(sMuEvkxy9zDqj{UU!C^D-YZHtJ-HatJ4F`B1I!F!#Saz?^<=)lT6?rHH37J8JB#pJzstS90|Fz>K=wN-1M z8VsTr#6BZsA;nW`hx^&yOZUkr+fRV*dhA^2-%|eGveu_NH!82*__K~$EU zuekn?1r7}|Cjyo@&-x#nf}`L*0#;@!4je-DDyN5j7i!;*-4!g@+AIo?ZyS(0@k zTiKJuj9nyaNLeO%-%vt$#rOX7y?^}v`ThI-&Uwzc=REhG^W1aqxzD}NT^=$8io_ph zMaGVs@tOad7&)djgqC-O9F{v<`}g;H?L^oWnf?C(lx3U2X15Q0>}HJgOA%L~WfC6a zube6DiyNYWjAjFbx#RsUCp-3w4p%I*v9^`V46_S`Z9f7Ss~6^N>!P;*dU=d2++rLf zui$DHEaxMFyzMdd;E}8xm@1rKVcJBO!rZBC=ssJBpD*|FF;_&z3iO$xRAtb)`6rxS zUeF#4;O$_UQOSW&kwiY+7=M`Mz-S*%%SF{stTsRwPo>Y%@f>cJHz6qcF6yb-#0YrnA+vm8DZ&jPde+-G9J?=O=18IrX*< z=t15Vug^rPmpKdQ3?0i8eP{z z;$<;zKiK1wkT)M_UysWrH>8dyMHA1eJ0;DT&z}|viF6wM$Qh@i7izs|^V9HtYhTS1 zxi2@bJ!jqLK5n(!z!t$P2MXFa?Hvy3-;YVD zB4$D6o1X>}5y4y@o9^E*f{;(gy9EOCv)=<5k!5Co_9$jnU!^jTbpi>+VjQpvmUOD2 z$f={ghnGgYf(@o0+GJhGWgnb6@)1^qwKTK1Us-oPTho&$(AORnY34rh^bU5MmEFZ4 zRBOrIdT%aM+MdOEN(7@68-?Vp5JMu^#i5&ys+MZTMN4IGmP;pOL)iz?Kkus!X z!}Nj{%@tZWzw@Wt_0-kw?D$HScLwNH`v%b`N~+WtaJgU#63(O zKA={-Hrngm9QVRDyziPOSA%LJrIVU|MOAcfj{n*kp!%#pycw`Krns>^@AR|SYJo?C zS9f&Z;9{`l8eO{p5Y7mv#9>sglZbENpn=bU+5NpI#yN;eJQxHrhPcc!pep2miAhun z&Z=?PSmu@5E9h`2SjJ;AnpcrCu6vD_BG`NT9p}o=&Tkz)8~+g|BZQhjh?;t*JKmhw zsnpR_!84vnzHFKOvZm@s_Uk@oY|XW61E>>_>f-u8I5rUg9bIjw+(q@8dV!fTfzpb3 zt&gz;ejO}CO>Z`wQ7URJ?onv|t_zy=xrQIWgVh=8@BVSreylzMmT@22pR>HU%A>_Nog@gq&H5tiFx*MC3j5f}5rEi5uiNJJ@v zW+}#3W7CwDihU!o{A0meJLb!4!_&b_t^K4sKH4ic_H&ok)y`S_|Ek7>{xuApA1|rO z4zz>0SoHCqv2LArnI~}zam!^zN8!`2N!PQG>9gk{V?`=z+1wz-S>gm|GsSu*3!2yp zqua`myd1$MWVOz0PZ%Vi@CxmgSK8uwj6l81i(z*!pQwNFihw_P>xp!{MJC3&nS)%c zELA`Qj#lWARN=JJWKnW*xiv~ij%uiS@8{aVn}0a0m`iD%yALliSAH7p*3nQK?ipJX zopcmm8*_PpZRxTGg_StP)1wmf0s497Lc+Elyi!?WR>{0^iA;kgD1}STwQ@+U8zqx)Ctr~{G3{?AnW z&FIif)QuH;0oc`?ZE#P?X@w)%_diQgDCSE~I&U25cZqm)|aQ;aL`wA8#I3ybgpn-v$A|8#;6L}vOU#5J!uAqX%zYpX) zBpRxo#30J?`CRZ5*W}aN$~u$vl)DXfS(SJl=RO@DpdtxWTLq{ph0iC-i1_2KIjVHl zYX+G}Dc@l0IPtMv+khM?(FhUFS)C$RX%5{J`}&fubNgQfN7&&W+JhT&&Ov<>q{d03 zJU?{4z$=X%GqTJ%;kN~l&zi75F9cEwg_QjgE3bY4I7F zfTOYpbbBjZ*;1fO^5wsq)*S+@iH%kWjI6)SF!p|R^{R(nSfR!V3)>qrcYn* zOT<|Qy0XRrdjb51Ge82Pf9)EPnn!=Jn1B}1DBcPxY$mUtVLFp*dIwVEU;hdtT)OX8 z8FXOQ*l_O5<6A|0)X!Se`tA0s5+P$$fTD&4@UV7U53b+4EBhtwQ`^v`+o{FtA9WBG zx3ZQZ^v8YZc+ngv5y}mnr>QPzO=N?oETTOg$#U}|k)7>OKE?Y!QI8ypz;rNf?qt1g*+LLf%~5ig&yFN?no*e%P^~A87X-~H zZ>aO$oFt;>`+BQ%{hMyqSWRfuouixcXFW}4^DZi~PL8=zmEpeMpGeHyyq8amIREtc z{@0f1*q;3&FQrj(v#879mdUslDx4SUN4TCu1ipy5sFvu*_k>rWfaqtOEEn{2Ca3Fi zjtUOh1NDlXnYv_GERf6HcC}>27N+bWD(oYV+lU4K`l-c=km0@{p$#el5_jA@Ai zT*V}3e`6su*+d!k9EBIp@y*G6>G;6CCc&RmDXD3N{V-ac`<8+ zs~W)|GPBZ8jY`GC4=I;g(pu&=6O_=O3%t~lEi`yszY3IHdC*1GF7HPMTA{lc&Xk?0^THE>vX8MY|m{K$vywqzx4Be zdyk`Hr7Oi^&naM;C}$W+VTzU@JP+`JVtw!fc+p~Ep*@10_6Mk0^vN?)S}2Yb+K+LN zLfuo7vzU!U#d$|6`@yGXtky~T6_=gjd;#U>&6Iz;eobl;H<*y?*^k|Q&{T>M=N2@q z<+=23C0-byFm?$~Qu9GnhXaEfo$QpzYO>uNOwU0ZF%Si+K*Pg@bNm-$J?XJ{5I=zf14dyoW6^tie2@vycifGnhCZBCrbo5ITsx~BCYUX-y6=vlt`XQreAsc^zppOr_z z=Fw?~_xM1#{}#^YWFicpwK$8@jDjqtZyP+jqFTC8@Qf7rUBh22xEu8}Ht?+Q&!X$zS)@*S6i28KXrT znQDp*ii&7MdgPU9%iu_GURu##8d~I%hR@GlfIw^WVzRnH0PvZm7!o=%Eg0<+nwrQX z;2oD@_Z62G7Uzrg*Rq|cj-wNlqR)Cw5!jp@IN|84K#WY+gU2Ehf>P7rC}c&3JURSE z+y?{am^2^WXpocI*^NkB^;wfNOJJG<5G+*EWq}uxDFP4jmJMHZi$+~9!xFQ!3MjBt z@?OE6Y`-5LS-;lb7w@$H$ImuZu+K=Z9x_H|Rl@GM+4NY6`_Co7h$MGcqU+-3RT)Tz zq?8WFD^o@D5x-ffxWb2aD#2z zTKjMAQ||4Cz>odV=whwe@sZzDe+O#HiV=SUXpX9{-!7dv4s{4tb66Mw=P8o>;*mdb zvA5ywW9+QKsF=@VkHnUX8ok%#^OkEfhkw`&ehIC1-8Q)V-z6xwNZ87Y5Opd)bf$$4 z-(1qr-cO*uS(P4AzM+Hy^Z|s&B2eOG@kJ>K{PGtUc}s~2!gA*6sQ&|GrXMmV_ZY6M zKTtdPJaJmiDOObmrSD*K2KUz1f}5pDTn-uPm>7NKuy=dtzq$%aVl7Pel_YON^u`+) Ld++xj{5|_WS7>22 literal 0 HcmV?d00001 diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/laplace.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/laplace.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..1ce3be3e571244f24ea25f616c19f66ab48e7aac GIT binary patch literal 48125 zcmd?R1yo#3(=a$_f(CbIaJRu-g1fuB1PvO31_(O1yA2lHLkRA{0|XL0c<|u=kSBTN z`QGo}-Sh44*>iU0!o5{pU0qe(U0t{9-g)@^@C|?=FC!-dfPsMlm_omRhb4gIBlJJ| zJTmzoF?g)}@DYId6mSZ|3o@t^0&XPxY)7Q%!+6C9sBvQf`C<~-vyMhCV$!8JeSOzKw) z2RCzh4>D?6Joj*w3RJ#`Y>vPTZn%vclEmt{@pFtIx}S%eu>W+miTttprYGD4oo-Zw zoO3;d2{fjDySBQ(K3U{8k$EwuyZPar!%_lo@f_()MlB!waZ-(X@gZKue2pMvh)jM;)`Y?a5G4 zm^e^Dah5}3(ZwH%@dJQ(rU%I|`&heLDEmwGci(jBpPq;jV@X=gZ}?Hpykp5k0yQJ` z+v{fUF{qwQR6Gk6pOi}Ru}HCrSX1|c1!|33{w4XdwdYute$iLK7PT20 z*iN_AUBotR+f%5o0V{qyDvI9!VzL*4XQ)1sUks(mU`Es9mL_{{evM zXtK{q|52G{JH>A^c4JfNIbVrG8%b?5{QQfemn-``D{-74Os>a!?DS+v9Z!h4FV1di zu6x|oC5~|Mdv#N6*d7+gy}o)Ai)B0jfOyEde5Vw7C_IKb#s6!Q?|_zXx5Rg_NNWC` zIWOJYn911&|HE#f?$f=Rad%?sbMIv>PJTRNA#J@2_>E(Ha*14%-XrgGkBScfz={Ho ziW1*HEH)?LuzXD{l_xk75WA3EhU46?VQJ|2s>{nA3H~_3Po&P+>Nvvrv)Y|I@P=YR z?=*5GE_?C;fVJs8MgbM94=PymzX&Gn+b#JWj4-URtxBiP-#OX#LTJ76Cai)wCTDuO zhNI1swUr>!tnVNsWS-NkIh)P-FrhEfA)9DgK&jfLp0&&HB&(SF}Jba9%uC{XZ$y*Iw^aNEyVu;H8;Nl1WJ)CND! zRDWUC!g+I#Hg@6zN@(fCYd}EH%gS>l{#D%{f6dz-PS>DZsN9=YyQ`QbQ)P z-{XIe%>Fz$auSG?EEWy#GIN#i;2q~aAl-Y)Zh;i^|b=6m87_;vz4z44wGg)I2wE zNRdNp4FStGL2hpVaTm0UC;oJCitj*9yJ>;P5Wt}Ks8GI;V$ni`7RBD^)id&{x>qkf zJiafsj&xp;g*CEvVa6;CUoFuw8vaak*%Grjf8**zXdd~$5pYU)KcL^YZbxKwIk{RQ zpznis!l!dsq4y1+jbCa5KP39a2&QX5OpUCbXF=kg{wGGWdkDA%vQTFVfU!)6qf#%O zx)p*)4S)SOxDNFoZ7+Lj|8g3e(7{#xnZAls?2-7d544vzGb&2`1@ZpsWxYKz{qx|r zlfXX&&@+_H0;J9J51WIW+I8p{9m5j)Ck>R%GIX$H{TeKa`lREm$I6@T$M6100l=j_ z@U_!zTyHoaZ9U#yT8)1F&qBDMjoJEjihm-YS5KhpE*Ay>1`ZDJ6cH8{2I1H83lD%p zfPqDPf`N&RhflzZOTod#4Z^}a>LN>H_@g>PzJoF@grj{HbWH7y7hS z=uWiht=hDN+Ki-+2P>sED~&cMgSVynkI%)cwGKTN_-|wwF|jp(vT=Z}b5??A)h<_! zd5o)t@$9qA*v)KNJ6BKY;@O$lR2_AFV2eDy_BBZZwXPmU6rnenuF z4SfqEWxE7V!4j>R!*m^uHY+3Lo#bEORyw|huceWaU5qCel(;`lM}5)ENPZ{i7pt7q z)L)$KJbyJ5o{FzdWT;1GB%JyE_Agl$H4n+p;Jotw0pb5Ul){K8Zw*#+!`KW&acVi= zUr*fN17QlfqOZCZ3CUn4#x77MVdwVp+ACb*3cEhbE>>Rkfx}1~;nOKr)uZ`ZLH)M& z#Z;Xe%g8ZF@S4Uv7TFsr0cE)KY|aB*9gRy40m>mQ-XNQ*!qk=S?XVYD zTVm5%IsItGuUCs9X0+HE$2*^!I(oc{ev7Gb_$*%Ax#wGTdQ3CRtxg|EHRwClE{FKSb6Q8E#>*3v1^mo%e*ieCJvSGj zAw@<|$sCR;3YXi$ZE z^oXwd4%A+)l~z-pHWYqAli?uDh!XC%n3}?p9`5u2_@PGgp@heX;Gjk|v{QK#K+N+% zOeGC8c~+Y)L}8r1&eBEv&mt})nKx$eI$kY)6*#0eod`;+C6TRXvDBliPrG5icNNtB zZD9I<2Y{iMEY4dJ5fXzph-mRMBIalx>kQlw>0J4@H@&@7Y|w@%skVa{6=A7OJ=HVS zE(FH8YKR^Hi@tc8t`?=GD(K=nW&UrO(B8R`^JQ1CebGGc74b^Ee*jdK9Qlk!e4VoE zXb9nyi)6!L_V-UM=+59ntb=)usGthZsk?#xBQ6w)uJeXc(tri%n8JaJB~^dKxrg42 zY&5A>OD&`Der954z{-Uq;8?Wtl`_wH#A)Q>$KRbPdbD_KR)v{))H>40BV>E5>QRWk z>^Z*-A)ud&rUE?xra;=;di}1GbTXV@dq^Tu+E`BH{RftEDO+BT#?Xo{pxO>O=5?d< z5kyk&KVi;5icT^mCLX18K>2F|6&-ci7YrLW2`PD$EDoA`#P#jTFKI7!E;Tt;xUY{$ z;oy`XUm=2k{#Mn7%O6LbWsvQ$rjdNP`LS#I4&Gfwd;{Xm;wQ|piXgLIEmVTl7kmQQ z11kSm=>1&WL zW;op%M&@E!8Rpn})>!;|sJndi?4OYBH!xMj4o=6iW&ZMl)5<8>GIjZt2J>2^-PPjK zi{qY9jJ3Ajjltpc_mY~1Bk9SFCByyQataMFu>%Ai^9Su$a|9J`l$8U8ghO8 z!n!G`jCQ#W#dX;PI%ji~g`D~ybPkE5&T{Uw*FVR{#bW4OP#w&!&s`5CQqSx+J_ zv+bukbZPlbj*z}lAISr|zoyw)=H*vv+~rl*pBqJq&p5vy?lwE&I#+4C1|algV5I6- zm#u$^rTLsT=?v1EW*!q=lefl0SDNH(`KCrWoCQlJfiOS9a4J8rA61$1TsKO{FFCd6 zHvgjGFAsUGbaND+p*{-Yq*bJ0ri78T?=LckdA@@5WQW&bDi|$UM*rNoA|wCUU^CpF z`>i!+4~r;m_AZ?$WFs=@YpXs! z0Qwa-zhECg&y91d|r3@T1;HFZrhSC51wJPK+a4fmkh`i801AGqvXs`=fMK#4@R zU@=K4^J9yUg1V2t;xy=nizv*!iuLe^;`HF4aTSoR;HwrO()^p$lYus(JFc?R+!3k%7Q+abV+kYz0t64|c8)!qmXMf+}2L+JM zvi$O@pge!!ez9Wu-4qnf@Yqyv*GOp%*>@=V`WOm8`Tha^ixA%bSxDpu0PBC2^6y%B ze+*!La?bxuug|H5EZ7KpefE$DR z+JW}SEq!BGF1~g~?!@l;#SB9Q=ZXFRb&6la&j-NI^#qM${r402s{>S}YsmpQY(25k zDt(MMLMCrWqH?3OF1#NA==`{%I2HTHuSnb&Uoq$`tljc$aCa!z++yB8VwnEIXx?hq z)c5^#9fWvSx%{Ml01#|w328UqVm<((-58@Q?la!aR*1fi3(`VM<;7BVI5V9p)3mG5 zB^!MJz*6UAeK4NSZir*|ILd0pRh)6t|Kf>yiFwAMHaP`wDMgIosJP>C?kyvSAt#l7 z;+My7@c?Mz?Jm_|#btZ|pndj-_uu83Uc5Yca$h_m!tch|)^MW#MU&(b^E4;cIhscx ziff~>-CQ(?Tx>$bIX0IIJsMf7&0dTAF5;JhD!5e-0QkW5-%5A=(HW{CQ2-CLiEi>o z6HTiB(fYR?JZ{yYD*m#=Z;a-x`d@Y^|5c*ZlEv5mf0pM2ivu0znbvR!n$qD{`4s@e4l0Q zn%-|i__veI#3t~Lr9$)pU=cTe4mHc!^UP6>^nojAxZpQ5b{B~4dwlN>1>*v9MNP;> zxcElbxuVDaep-A0kW8P^-hB8a!0a5~(%l5_SeC}2!26gd3v4H(wZ~&@5`i7jItNeE z0*owTy1tzHFZ79QRDAywknsSZ{EGqI-x;X?W^n)PJoTulYk>O>>HQB;=6~nHx?T?D z0=_vYZ*-cK5I9B*`!mPdx5;7i4u{mK>p3pw7?&gN@zSYEC1V72ie)|`1i}SOy^+J)@ zkWaGTFQ-fvnHs|QE~M&ad2-BNCP{?&hJ;daRnU3a&T&vnjs2JkYJXd|P=CNsIANy| zUYM5ese6EemX#3hy@5iM$H8k+&0rlf?khXpTgO;02(Lv5_J+xon_wkDw<%;sr{)+t z^SqB2y4u_u+&nv7KOYxuJ_L0Q;6SxMj=IY`eO6KF+I*AalF3*4#Z#E*S)GZ0CYNo~ za(zS90mcmtg>AWGEm@;nx|d_{ezcZ^lCiH>s8RHx`YGs|1qI_X2sGB7xU@(D5vWrS z!a}NG(KYWv=qx17Q6^L>ja=nkGD^2(D>mkYd_M9Sn2)LD@mO3|=>dW=6ujtoHs9(7 z3y)fz4Z1H6?tkEjll6q8IO2WjX78125Oo>%SioT+`p=LyP6);kp<{F~CwlbR#)3updCNOJJk&KZjUSj_ zD65!*IYxj9g|J*W&G0pvGPt}-pF`iiB(aE7Ha4uk486pP$}VbF*WV*P|c|8=UrI>V-J9}uuH+3 z0{Z-<+)W2_;>AG=27LR&!THTBRq%D4G4tK|{3rH1dYDp}HU;qULqIk7=;3S6^XvQ? zb9=!-(7Gdx)?Du;XY>b$e(?-J!68#3RQ*w#KqBU)rgUbB8h!h?Cd>CJ+5y>qrW5*< zDAJ}b!4Wx9?L-4AFDdGdu(-|l69_`xyYtf3x=d8M9w(c|<7~Y>HfbO;JNJx*2Knw| zr#3k4v89FL7YavR#_!3spNRT#6eUcg=vi$I!lZgzK!TbaCC)&yizqCe*)Mc5-s*%( z`)Rt)IPfwf;O>=M0M|euW13e=ICWu)oaKWX#MI?TaNGj&SMp6@nO8_7X$jF2{y)~w zF0U7I=cn&gDlf0+?6q47VrRW885MAjBMaqlX~TMY7~jAnQNJK0vD7>zbB)`XO!Z?W zeHFCw{V0Icae{s4&T1k@C_A9Mj$*O4r{-IgMv{J^t9iKI&Ug{p29>F5IB9f3{0QR( zGasT+X1)2s3m)4duqLNa@8D|xiAA;2BIG#lzo=H9eg`Rz*R?@*YJjSsZKD31$fe;=lqB zN4{y$S2@PwNEmTvgf;7k#weayO2)lRLh3S}6*4_MrXy*CaV9d}nT~ykpX9A%HiSGK4d)=qps+rj zLbdwslwQ5A2Y}R&2D^{?W=~-z*Y!jtANle1#Oy$4O#Uq2!sQk7Oa{>p-C)8Eum{Mo z&y+8Ae6Se(+HVs00R^R+No{~MxY1~igN~0>2xda)basJFlx(JKa6?89bSi{? z%>|?uaw^nT)S+eMBD-XD;|rQT+sxHPz5OBs^^UE#S9f}+(ehi|obdB`g#@RAb=&EL zu_=I{O=@EG;v3}h=e8Nl?VqkGgVmJ-5w-Q!v*_c5rJQw#QjRoD_i(+mAUEn`SD?-% zb|n%!T%?xw)`Rti!p|OCMURkA^A5)?>G}7UaD2Gf*RUnty-)JUS#>7jj zq6SC@3R!@cgK8L^7iga;5qs~Shgv@t32K=&cOA;z`mtq zzD7RM&E2^^IlpvGhhJ*B9F8Fx$wv+5oib+clhbmGEMov9=}^2QbO-z0J^-p6Z?-9O zAA90)ef-ZWgo=+>S-ZA<74y5wz5Jq3$Z#RMa`5mj@f%pp&2hf9WTG=F+ln`^a>cbw z3f$8}z*A7Ur4OwSImqQA81sn*5d=_JH`8i6nXtme#l+?(5&B|1Q&@#?^v4rs=7{~> z=t}eH4w#mKnka{TZ`6p}wiV}BLMx6eO9!*?jmp^zhDGJ+x)uA~WRN8pCRmVOl3T8wTn-y3M6bLjOuz~K0sSG@V{ z6}>&SQxjRq3?~4B+i{Z<5v)tO_KIEb?X(@JsDmREIZLupWsVLg*t5%@@u)E6TNR%5 z027ArC~A$rnENIUaQkjX%fT_pOyJZ>v(J^%E|1~FXALoG#=5468unh-9l786vrkV) zsTp0Q4C52OUKnYl7n$Mjd(ZDUQ|J601CuGya3|soKbs{d9CbXMEK%K9>*+zn#PtKf z-`f3&2%|XsF@LOcU`1wEWQDkSyH*B`@6nQEitKV)VuWG+*bLr}aQlUFYBJ@^i*Le` zulG(@J(}E@b_oyTg2{x=>33GpEL69?3El>5XXX|bUaxHpFW|5k)N;S{>dbC@02FXZ zc7pjD6{tQ;yK%Hf17MoXrr#p)C^9T<2MtqB0M|a)jQs^tlRiS0qmz&)3{aJLsq4yS_ z-9!ZS-G{hRY)?Tnr{EhptEX>zo`Vd;Iy3e^j&dmsQ>$eSaUL~(#6d#Yu~F?ywVpJ} z*417iM9T4d_QO@0s58>oM+&p2*y4>JFgBs-LV7lFjA_@aw#lLURnvs7;{@~c0Q^aj z!q5)aY@;)^wYDiBoyk^`tZ7A#s4hltO-n9XMmkoeTCVztD81qh){|HUU58j+tf4B5 zF!SDcxq;LbA76oQR1MJy1>9BD+FCFr@vl)6-`z83NaVTqQpuFEXSsZRW78i;pRlM0 zuO^K}NSoaH^(E1TN?SB{-R7cH>~`=fsZ}h$h3WBNu(Z`+1(PdkB_HQIri3T4=u?&L z(q49RmK+Xt0FrE)efb%%;i-Y!2jhvqr3Phw6zkNRhes~PotAt$T`2lU`0 z3$q;ig$qhg*dpuhNL5e~TEmiM8H<&JTRCwsv)VQRVMY9jJtyuntOm2>gbgg#Xv2lG zi(rCko$wc$H6QbtG28L95|Bp-Ol4_#HBu}xkt>LpT?>kA0aB{CS`uYin#Rawd5l4y zo;*$zoIAAlKmO}Psd)?^ZX!WSdNucxNkdHE!?2J@GoI!k)>G-}a$*vb?-oTXjvA?= zp%6}}@n3O$pSW=kH14X{*}C>mTLc{n8>_wBDmTl^s!F?@{O)b0mtr*`%mn1Bc5|?# zv`FBZ2d6pq*wf^6yybQ_kV-g?nXyWUjz#C1hys@qVt$D3__<_YK0aD&@^Z+Bzrxy1 zx}KQ<7|-oX8T&;-Dxys&^vE<-SO_+jM!86%r%u)ViRv2HH#frA<3XE=`2MLpP&5?> zccYaYZ@gZ0H*HYFBH z$bg9hSdJ07cF+Ag+KZo2YfcjDq}-ZO2R{I;5`o7(9j|u|`Zov1VY2C+Btf4XIFG2n z>z(2qHfAtVaN4eUieLDb(e%Yh6Gk_2g`>y?|_1#--(_yg~kN1w;O(r$-d%17A2>yhI{Y)1j zqaxL{{a+7#-aD?Py3U;0iVOvM^=aZ+6yvift;{TnonG<(;5&7_L(g$M;akX7c|Y-f zE~cgEt?^0%wZ2+6hf`;hr~_`!a7Vaw-eSf4G?5Ef+owALH|~IO&j(~(HFG=@Yd<_1 z0hS>%HGhr88wZHfjLerg)M8XtDK$SBd0922yr`y!d(rbW-%fqg`}+Zb{Rfumv=o_o z+^`H@2@A`o!<^}EFGz6xi{R}{zi6cvKi!&=G5ygm_5uLlgMI}7qyW!i&PsJ|j>@Sp z!4{)S1?C`y0m4M7Fu1y~*>?Z>wW1e-pq0gDn>#11VpI<2Z8-~ApQe!SiAG3y&CKeK z8*jA}flOSoTfCS1+rqFAEwYv~tf@_mMxAbeYQ(VlBFZSK~zk5Lys2Ai3 ztW)s;zQu8qzpIoz0f~XNaOx4@I1MP=0N5LR3pc7J8KdTv=b>tvwfOX(fEk^ANdvS8 zgqM0k*fMILR$4+!ZTXz zTN*~!O)Vlr6ry`H(+$#UIG%P;`QYknMdZi7HmP53Xywxy6qG3n+xNrl;UVoaz*3nXZ7vk$M@tc*}IesqLkuw2)w6J9cBtlGU)mo)Keh&@#Z7;=|75lm4%KZ4ikHF4iT7xv4xN)hjHvI8O2>AvMrG<1C&n1nDOwEPdQA4#^ z!-Xkl_(&Z>LspOwCqIL8^sM_<|5!c?LW50%ix?PPI*27;& z3%!Vfdt?(Lxj6Zx=9v@v(+4NDRl=I-jmC7jaz=oRx-5%zv(;HCF2-KdpB=?A7PCrKJZYy^`1IAuZ^ z1nRW=GLFC0u-JJzZ%IU}zFv6QpC@A)dwgV`Gs;ugg;t@`kl)3ho-)c~ktjJATp`Ug z8%tnC4p*hTM*C?&u7g@H3K-6}8sqUC)wPcZ-7=P2E9UtfJ&sR5dS8Z8gG-_orNRrN zU^Qa7K;H(dLT14sT_ElvLn%?=j9Z2@3YqWXY}rmXO}ghMJ|KdJ3(F0euWT$W3?W_9 zrhFNL3$A&uyROnn@ZIO?M*UF@OBKTZLaDOr83Ld-G*7F zom}2pyF)4sM9o&d7>i8@W-j@;K|SFg|LxoYmloI*-A!s+tM_~iaf(u zr)ro}H+G>SH8~PC(*V%T3tVPBYeXAGFx3~UBM{V4-x`i)Gla@fKo#q3?FFN+N0X`< zH@8$jpdWkfl+!MaCp~0j!9HvL)S-z;?56E}ea1S0DnlGIQPHK3*usq^Z>%A9EQ;8* zg`}zN0Z`gBK~&yYUu_q?Tm$sQ5<~>j=&(@d61twggeG2pE}=DQQEmuiAO_$Q6(NaC zM}fY@Df3UzIPAYuGqB*GZ46Q|J4Nh z?EISR`_b8})=uwMZQ^ql)!MSzcKF6hMnRdI@`S>~myGep@52R*bUAY<)@JeXKwjge z6158yayGV+YcXq_k!pG71baTGY@CBq&~0dv4>+)pmF+b>^aFrn$U z^LSI_QD(k@M)dE^YZXM;^*$%nGy;> z?ggzXSNoH;bF*j}AI*k^ybM3AZGKzj1DH=dR%JJn|D8kpYZqwccrc+iR=}sxhwA~b zaTyMZxyIwSyfHC70KTyj@9?QNnwcD8hS(3A)gz@8I#2cG7CHeMw?kw(UZfi{U&IEvUd)LH zFjPo%1atckYpYf8FBgGZr%*5wq-ub1J+-ov%j5Dz`&K$?YTm;@SFu@b$H^WCAwYi!E{zb0R{6fJFE$_n zxaGT~;WAK9>un5|gz46fts;cWbi!kr$m3JVi2|aWLHUp`0b8I)wIbOWY`fo$o95|k z6Jz;SU5?n!ENuo{kP}r z&0ye8T9J_*BUiwkX(a{5y7;)7+Be~Sv9|B0vW|ETY+EOh<>)7qQ+c-9U(^g@Z0xPh z*#=2!XfI|~ym2V%%P3OyCz+JGTz^;Q9@&$UUQx)|&f)6Q>S zTM3rPiKotZ|DG%PN3sRxm;TUf%i6#1XI}m_lCJ>$pMS2T=8LYl{(YYV{Ok7Z_O*qh zzl*}B+#|l4#b@l-ZP4^!>(*6$RKSqVWZn(Uy=0RJZ}E4P%b?%62%p-9olML&_g%xd zM2z>#!N72aHI9DnLT)N;Mno|z?+fbJ0}Q)wHdimC|9^L;-+j|hL1rx55?i$gN*UWh zk&Unrqr%LS=x(vv{C;}2Ml;psx8S(|)4$^@P0E0h)f7$AsqF zVd0-3p&~<{gnWDy5}JMoV8UZjav)&ih^wi)V&GCxahiGL*K=|6NN8xf1tlaVO;NL} zf@`~bCRcx$10^loKOWcpO1#59Cf>g!aAZo}l83T>?uE=(E5U@&u*6?euEp~4hvL+tK0jXgMZ z_*iO?aF=n9E@jHHFfM=+7`wY=I@ug!c^rx|`l$>jLrqUvLpI*EL4~OGj*3Yoxd92A zpSYM%i*DSG?7aH{P&QotXp2YTe<}D6b$(eVd7C@Ve|wD3h-uv9vf3YNrjAE5{lSd? z8-@)^Q*l+uRD3mZe;x=mTo=@Et*7F}Q}t6Omm_!U2mq1&%ri&W2$#I&Gy-k$|T0cIH&#~5}gGMDJoNtw_$@Obq*q>LS>~2B!E=V zL+z0pu9RscDzO~gIN*XkUZ5axMDQiTx^48cW#Y45^_$(qDY66HizlZ9blnZriPsEK zD$EC8@M8+tMLk@mF%^y=DzbhJI-ki$O>+^ad{w@eFbLH*zc2o~#er_j)%Fge9JI(2 z$j6Yyw~gf^$zs(L+J`lb4k{jKCayoF>B!P$gCqrxF(e9p1Z-#91}F$LE_o z0JLu`%~PNoR4kw|ZQZzwNv4*__+0uGIl<_6M%c6vXE_yuqf4iIzy_;I882)mDt&3NsGz7?YR@pyv`?$rN?U5k4&8u;@W%rn zty=`G=qzqhQQgPJ`CEq*W>9;@t1R_)=%67#(CXQq@IPI^5t21}X&;}Ou~w6$E{+UC z9VMPMx{rqq_{v1baQb$`y{HobVqnJhvHYxNq03X|jq|gbXIpd!BT_x^RCg|x^5qO74Aj@||FB!`zu3*M!uXHfbUrvWw&jH)C5{6~ zHo(xoVNw?yNSLe%E^?-QWsZPEu`rdJj);4A-G~e4GOSI8hS1lQ7AvLFwn73HtaU|_ zesSCOoI!&f4F(4~J4WnlxiW|b&@HNq&=z*#Bd(UMW)P$ZBBe-3IQTY#zx&@<#lD0klAw5BH zOmc1fuJ&$WtmobW{-;3ll&Ca^+-7!r@Nt6-!eN!KtNbtL&}afvyq2Yqtln1QsZE=> zgA^Iz+9x2c*;0-KLqS6UeXm1a4E6w z#76%N41?H(OM#n5tRYem=<<<~hPwpPs3&im{LWvDMO^RGx|WW0L%t4VIJ7dO0^3dZbW%F>rXP>#Vd3b%FsX)&sf8t zUIEVl#3@t^JAD`HqRE0R5v2`I1XjzqL*5}v#mr$?+!Qdj6wRS_MU9`Nc`}cC0Zzcg zbYfLp&JpmDNGEdCj>V$ZYeue&)i@I89S)Ecr+9pnu(i)!$KMRBov}!L&YIHe2CYAy zyjEcXBgx~*eS)=i$~t=X1a_Q5E6LxqsGRQbi%S{>lkAgS(f>Nn{*0FgfZuXK3&mhC z8{UWF@py9I+C<$7HgcJUqL}bXp)DRO4K+w;9tO~S*5)D8k1J_z+z0um+-u-VzAGjt;MQrT~b|EKXcx9il3v75S%sn#U zx8ZThioOyQryNI4Grs3B+)F+e=gbcFQtqKMKnul?cpJQAtwEiIQixHhtmY?`r01AK zV|EcNeM0e(#I?deP1@`L|1GP~CnuiBN~P`pR<+;j&N-n%P?i`3i=~QHASiWmTmzE1 z>3OlliX!V=7c!(yo1i714$-7dkM5IDuNP42N0!%=2u(sQ&V2yn#%_{^iYy+0!Qv!l z9??IWziN-rA)fHBrn<^ACcmh_32{xO$PSk$rhr(M>kZM(!<-QI8C&yBAG4K@GXXJ$ z8S!AvW{c(Qi&x(Z=Wui=er*3&0hAyzNRE>F2*apWOM&vUU$SQRg-VE>q%_@`)Ia>@ zpZAL$6JgvtY%P* zz#w9DAree?r5w(e?+f!D5amh4t{b6~Eo@k_(~$ls5*OP=(h}ApZ{?=JSVU~Yho~LP zFqQeN#uHb53U`QhnsMqJD`AduD@}qAg5u`c5*nZ6Dpq3|T%r_N$v^6Os5db&fi{Jr zRE{JeU*hUaj=?HER0?kUKzmc{*+lF#?(z;j-v+h+=?_n?V)T#ySp7R^Ym^y`&gFKV zT|mCk(AXm>LDZ0-*#g+?W`U6$m_m$%|XZtvI=$@-u0A@71~*+i40if zxs}kUgj8`y^4&ke8a7Xd=_s85bzjLlNDJ&o^tUca0=p|bsjQ}0NkJvOp>N&O6}M^W zlf0Il2DH<|XVs+I!og7o0!3`mw5<&FeNT>~Rhi4bm^&zo*EYK#8UksQ=_#q~O>nFm zJ~%``M1WQ_RDXz(PM8c>0u9zlS~N7`j0`ID(O*yvLbdxKWeo`)U0t_t z(dJ)7o5nrS5K!Hx`V*R=EE}AyC4be-iGKjtSL1Uu*y5c_Ru_NKRWPg6#{w6vv=wM2 zaYIfg3Iy_$rxZaWHH6#?dTH2zmGaP%mrBpUY+6I`-B!Itl}$w@mdY}fQ}S9gMrK(D z(}iw_aE^T65Z)!zL#ouJ-IQKa@|H_|PAbC1l1}MyfKEf&#i-m$iCi@ec`-=SCkTZxKG$7B|Umh5^i}gVg6Zn5D%*k?h2B>a_7Wf+E-(#Sbc>+vywXemPY=& z`Y1Jh&nPSBrSSv8bG0v$k${wCr1d`d`;^{5iI_$C|1nGbAE6JH$JOIPlu^j2Kt0V_dTBdi zMk(h>W&LSYnU@#HEsM9elJ#j-T#uy$!UYRiX&p52@6a1w5Nil3ivH%A4%3kSRIG$Y zJKA~7 zeHC_JrO&Lbd{I$3vN{f9IS7+~X^cssk{klly>Z{+rH{XM54N?BKwp=ArcS|Co#&vyLZEt@T z_bUW>;vXwSr^4>Ct)&vOBFvcdvHpL&8ZF}JH1tjbQ^9bQVUd(5R^yfy90FS>h0*`=+1wh2cp~S$YqUHd3CQnmva*Km&>t}#m zJZkQBcr+5KW|L3YCDmP1W@&>Gk{ZOMw9MT?Li6W-zx#|R3VXjD6ftj17 zNF|rCS;!HIo~RcJAk4M)zUC4cJwCGzNMHXxdc!p^nSNmtSID(Lapn5&$lI1j%E$W8 zZ5DD*eo@~WKC%cL`BmY+!~ewy`s&O7ahv~1v{ncO^tBSZ{v4v~{`W6l-LoGJoZJbn zRdQD8S62pDH`p^~`_#X5Q7N-fRwnrzyen9atm(qK`%*atG{&3nNUSE4$MUHUDQ_^0 z1DP03@LNY(aD++Gda$pn_hEF_$I^G2X_P^vmMDdrp(JVImxoI`Y}umq99>t)xliI2 z(!Rc&`6={jwEyOPY4`etG4Jx({WJ2qd=7F7%2HCdFX#7MFNO76AR1Nt7*sjw zHt^(HRE~zlmqsn7YTg3-g~JuX0tJUAAKCqbJ2?JhS5Tcjwpo&Abiw zmGgjZ$HSdt;|4PhW_+?(2c|i-d}5v2b6n&*;kpHL`G%Lb(_gUOCs(3q$T!Lek_~xK zGU_Bh5rgJoL=%{srUIW5L)*a*m9mO^1bgKTAFwmZSq+9H_&+JSLq+(->jgVABq3g z&>t~y4=W#q{0%~7{sqQ}{F3>%M)TKF*@1ykwYe$lbHGbqQHU$_D)lsLz|nc5-ODZ4 zP&MOtVXv*0UceUcj8^}Z_Ogxf)#0}eo(Dh_c_7LBfy6D&1K`EUWfOngG5rr?=rbmL z&$?nAF7jU?2e$b5Es+fTO!8(!3tW5ib%5tawF^O%v3m;nQ${C!z}x0>3+TR!A5Lpy ze1QGQ`pGypYRZ}B<^K2legp+Nt(pC3B>5~sYq6W|AW@zu(S3H{>xOK)w!m5B8TfvZ z1BBN@>7#cN)YvqltE9IQTH!;9PiYcJ!w0Wmfo{Iw0?+|g_TtX+2R4c@vq>6b0~aRy zVw&pCHxhp8YqO!rjp6MjjOV~YB@qu-`j76$0@J=dnv9!_y8RoD`d|cE?TR>|lQ#Af z8qdR}LJ?;}d*uF7a)Z7^>kbyjOm1ea!PyAs1>K7bR$qbQ4iDw~W`7?Nk815&tu}}0 zx|IA32kPqn2>kI#^5-SAA~om!T?erixAH>x;a$g)N9!4#WhTM7iTW|zb$#tFTK0pg zLmV}(A7|bXoF$}7J%76KMK}X9YO_J9qeNOCP38+0u9Z7bYexG7R?5zhJ8YkbhoP4h zT%pI1rR*vq*c)D}rNWYOJjR+aJkEN*P^-BLbLN(6vCrXgN4TG7w`8ImoeEw_` zOPXKNY`_OMgTfJHaS{JKI*}B&&DSfu1C+@6zo`4ms7RKsZ4fTpY1~~33YW&+-6^1O zcWFY!N&|_f0^0_mYQ*Z@lTijb%y6s9=WmY{CoGi}dtTyRO(`?{^j~7e z7u_Z98ec(mi-Ny6P+q`CX1|y)eYvI_IL9!+tR(7YL!~o#ES*;+w`v9{4E@dvG{B4) zw1(M`zGbFHjqDa{6^Nut^N(s3U%!<+e^T|P`xmQ7r-gw{Rgabd=FuaW4waUly+uxM z744Oi2n2}13P?xnEqP=PWm3(;Z{#~TJX3K%j2swndKVErzw~UcRoYEm($9_j+p&AR z*JkaemY2D1S1T~kqPnkZXztEPB4 z#>&*|m4!p``8v{p^tN}_=`P!K`GtL34o9x*FVFAS`m)NS7(_D({CGdBY@cqJ@x^6t zSygAxsE$)&peTS0A?w1M&Ni4~)Rv5%{Pl4clV&Ykop9d!O`2U?_Y}9{JVy*Aqg;0H zkUos@jTsS%(3SO=(sk3xZUf?--*$LSw(_pyZRSo}PODd)b$;ruUOBk8$sis!bi#WM zAcQzarZ_`Lpb6Zhu4c3LJY!hKEuaGToJEshiuW)vh4W@ZC*jHI4Z;s;$DJ34Tt9`q z%iAM+2yy@Z6w=*8?LQ^*}EDgds%-CvE-nuxi)Yx+qV=@WEihyUu%!Yy^#tDIQIKU53#Pv)Ys5Q3JqwuOxME#Z>MrgQ$vJTD;~Ni*d9@rXuO zInsJ+_QTGb+#s*V&RR+S%0^B9TZ3F+Nw7+=ydguU?Iw51nqbxDTv@%3;+6Wiqu^cR z&VB_O#yiU`?j1riV18>I7xs>ePu(V~a8dTLc;-eDU4Wh-hDT=Kl31Q3rD)#?FZc3Y z>{?4~LCITUtJcldwMj^F>`JKa%F_dR|669=5!L9i18yp6xtY4Pb@nK4HkKt|@K35# z8j0H_@0c|pos|Yg?41eG7EQMhdvu>ZF)#0W(&_|RIud`aIut&ZH|Oe%ApBF3LiX!xBS^6O z5)O3LMNg{zl}?RN{#bUOWuaxX`@oV2iHq-o0})@ZCA0g%i5Z?o4(v$RCmZj}QSbjS zEL$Q;;cpjd{f>$yr7CwcO(!*u$fol&v;7o24kpTgVxEPqb1G>g(30C$v_g*I!kBlm zr_fGyjz+A>42gd)cAEFRG{@ZkN%WoYD|HnBO?Ig`x-G7~)sjM6^o@BT!d4X5e@=%wu3;J*YJ8oj zlxFdeWwI7&5kGFNNY`mx!=OhCKexaY<6QsDY4}az=Ld(s06+T)75%EvR<06TmRbJH zw<;;?mYC*u)m^kr=9QhBZWggB6`k2}KAY_1<-&@71aB6I^!M3{iEZ63L1*5p=rO44 zn>ZfhSe3)8OJdK#_j!Oq3mE4#gL^&pn&F@|rmAZb3rs@b8~uV^85?9|hAt38Cpt;# zPP-4e?BJE0HiQcqJm%M#sOM9X&9RTyJdBAq58f6 zVO{&!r{+8}T~8~e*A=>`HL)p(p9Ck|Rifqp(uD?x6``|$b*OEZGYq>4M7B1U&+TBR#(+%8T$YZs3Ylgnw*ydaSbV4#Kct7Ml?V>!#xd@%t7XG7g0 zf)A*JNne?9j)EWNf07DYV9?HwS(c5gv|3OxWwz)w$J7P*FN`0$HO`<-82CHbmo}x7 z1RHOp>%*lPD2kLg4DS-wZ`7~A)S+V7t%k6w=qGkdomDdpV#ez0tFVsIUeC+xyVbZZ zZtM=VchwEUurF1~zn!)nK6~EPZ(L?*DL z^2Dmu-n(zyN{Ro-yU}e}uLr5wH^Z?x2+&N*-vPGZEC<<$O-hMCMoOit`aC^L6DfRr z3N)W+3xA|AXBT3>oRQ17@07@t%TE8Ol|MYd-=1f>&`K@yP3<6l<6?+y;db;vguQTOjao0clala071&gM~ln zA#78O{<-h`&#*8+^Dh9JDfJn>aqG;n(^mcTs=^eDw@u^tAy0d86dhI`r4B_!dSRO5 zL4|a8TrVH=jp;&` zqlhmklP<^l8JDq~;!K=isUEiz=A>GQhzl0xS*^@TX1A|Rcx22?tG$9*)Hvs071+Gkg!$D-Hk!QJ*7)2iI=bc&nMEk0K_rTUi1# z^N3<+g325K(%#(bnI?wEylnP!$2X3}th!;Jz(+B;dioBm805lqYN1hjj``${UkSIysg;T+>1_#!GC8(@g2PGWaFL+d)c952AyyDi(y^gQl0C`*Cm zI}2;w3+uVticFjqkohK<*-EArocD$s8^E#BY>?Lt#)hEi%04P_nyq0{r*-mC2H}qy z^Yby<<@(k3cQ|5MxWwC{56ws^V8N4BWXSxwYO*m|J>`TRpPDp!UcWC}6v{6j{{#H~ zL#=Z~!;H_LKO@dB$3CHsmF0oHLb*^FKaf+3XxLt=E{P7c3hZkNcH}- z+0a#0x};P#_hzS`#AgRI%jjdbn+hLJ8+W&3MHKgllwiosD)~%30MyRDyXZc4F4qrO z4o_TOnQ>bOZa9h|kfn=;-lQhx_$qv8uBtK>CdC}nz7vqTceUm))cf(XYB?ZYK)$ua zq0Z?mICl1}7!#(Bo;I+RS_(}eP}L){&xRReH2avl``5W4^6&j3wjx(gUAoTBMQfx? z4n#8+6cV5Crjz&Xx72^MBVJCU;~Z1za;`&hW9UFjh?dHBiiKn|U%1UZXjg$=I`78) zkjtQLd0-Oz4$T=-F}g-OdZh*TFHAPRxeK-WKcRW-(f0(Op)C6FHC&W7+8;UDGsfx_ zD^qA|G7L9ldb?$k^c@=&_@=YBP+R2M>;13ltr`h4P?u(Rx4o}B&%HC(w(^pK!NX(^ zbO<0BtdA2`jW!j89BMt&op4F@t_?C(Scu?EzK)u;!gO8fiy_pQ_Q`2<2~LEoM2i>y zf2MswmBA5>$-Nqa?CZ7cDsq@pRz6lZT;J}5BaGY*LK%KuHsZCkCm1wZv(&UXXK~f8 zQk-z=0$bAb0>X9`$+?q8tg8)80&4EMeOf@R+AY^sDPA6HIVh%!!0jE^c^(^%;oKeu z^TV~R$M#7UiwbK`ymM#pG*-hJ1teqD(PvAr#E&d&AC^641NGLcZi?8hePHh~QQS6x zZE;ji%{eL(4Kv`f)kWf0g-jjQ3$J+iCRpAk4vbmx2Ts5aA+Z6fI`>> zI6K2+BW#)Jwm~Y+hYg=KS0)cUMb4*K^e(Q)joP@nnoZ8agc4>{B)u663n_#ta#T=6 zW?UB5X?7aMJToWRw+I@{1z3ADn2-6^54HaS-24SNGa2G+&l4dZRK-9~_8+4}0+Hi| z*;zYDqQ$B;#AR2XhbaH)2kAFjk%yE_@AZ3X8Lr!g2g+HGX29QRu{FugUW_l8Ir2j~ zJovwy$g&+?tHqD9g0Co3JL~dhPK&{@xH;5|&I*Q$f?YD|D(XpX z2E-~oR-ha74Wa6w@_`-qN~6|viy&xV?G6()yEEcJ2zuv1^Z#+Lm4Wa4jN&>UkLl`lkCbr?)Dcg>dlL>4w4A{?z zIg`Tr@`IEn?O2Uk)l1X-M?v+wXv2r3%epxNlwPnRVw032{)trFTUW_njsp^PwbJpi z*8BCbq{Mnncpd8w6(z!cY!JnCC$f<Dn4Ip&xFhz5%4i}Y&Ju~Y(a|Por1B8X zFbIyQ+iTO-7T$%oyzef~1Y6%>3nOMy_o`jrxRk6or3G76!)x%86qj!M#3F?(Iws9; zhV7AWw_i7fkmM76OjPvWnE0-iotTJaN%BoFhCJtYvQ&~@THVxXgaF9A#KoY%(VJ~3oDLh0 z7eoh2r32-p=G@QBJk89n^M2X81$%wgw)1=T9!d8V&!>nyqcX zv275qI6iddbV8S$Po}9!^OUA#T#r%kQ{VwBPl<#Dh`&Sd!mmGt98PDJ)(+P)Y$dnx zZTy$XWu*Iy1Nod(wfLISK?pHPWu_>wOljPVRJI3+@e$MX*3KH?^R%kz50yJT8a@3> z2Guentrl!xfYD&N>yJw+7o7 zl`2$shK0d%12#H10W>9as! zx!er`HMzqq*JTULASlCh2ZMj1)S6CM&gO3aTD-pzYaJtN1Wzl@fx38th-#I#gVp4H z#-_sPxfms%d@U4Hd}`r0_D8`b@&kS03O#P{+A(bBQ}ro}u?40O4%7XCrle|D8VwDi zJhxb65@x1|HKG;IIYptECY=g=qv5(H3rtCX@lE}bw^o31b4A5UJm2{}3ZWBAoPbG^ z&h8-+N9m&a(%4`htVgZvoW@b}#mCa!ak!`s{ zzskg?6cu9fOSf^qVL^Lak0%StQeh70NYBn*vg)?{P;=lo65j?|u(b7pO9Ng1ijY?h zb|0B}QlR`}sCkfMN1LR;X@D6t6V9&r)^itaBY>3bo|YwZg0muZLNk*Gxf|jV^}bsO zY4RO=6yc}Ekt(krP^FUP-{C$nQ_tZ1U+g~J4gC} zCp*u#o)Tj+6j@G%V5$d~c10@(6xoLa1R_dW2*x0(zg^iSa-rCyJhsST#OA2>Q`1CyG_!2iIpCYF?Ucyn0)ww&Vv(kMl13WB@T%5GNgClG&STD%!#HL1c8-%`^Z zI~D)5wO`zMoX||&RaK7cEYJkK>9nac#jXI_#xCVc?C`cEKBwTEG?B&Dlg+X{88W^g z6>TzsNpLy|G78He&0eT8&{9juL5E$w0mn3Y-qj7HdybvLVjA?+yjoa=)w=XFNihR3 zEcfK3Cw9YyIU2NwTjJU>ADB;Cxp&m_-3ap=YniR<;(KBBGlY|qxubQIyO8T|WyC3p z8>br|50Kh{wRubu6_$DSfk%=PPlQHB-F%!O4@-)jPwTUXEMJ4myh&r_u?qs2@q2|3 zo0Z$aHwN{li`zy~2MwY=kVdfrHI51|*}(XILut!n1B|R&=XK{AA_X7MiHqGJRiDnB zWz7uhD_1VdPmsHBg&Y5y_pg#AYAZV6U(uuQJrS=?%;_`fwRbhiId6G6Dp;YY<-rs~ zs3}tBQvuDyw;YJC$)(+DDeI77H6m10qI6Oe&x`Oyen}JH43u(~P|$wJF^}QOFtJ3f zj`47FUa!qYWX}Cyluf%DAbK!cmm}1d+>L1tey?TGvUF-yt*$_sVT19+Hql%MfF%jC zToBAB_o2`9v<`pgO_tmsKhQGROh#_j_sC#D@-2I z?$eA0>PZcc<{7&`uJ8yYuW+bumM+M$M*e|X-=#XIPXpy$tZZj?-ge*;&Lr} zZnc@HE7={~%Wt}uXH74F&;PJ@E`pzm$C1Q!ND`7|926s->8i&rL3FRL*Vl-Dz~bRV z?iVBiCTFp~#!s;7N5y0a`7|W^=~J{Ssxr_rkhwyWHq*ICjWrGFMhs}~+Y~}?BGIPx zyVH%MBkOYft!Uyk%jE_*XhNy34ybn^nGMfk%Gm7em*xwo_90C`f2zlKz6+bi_3Ync zwt3&SSCbm9k<;Y;giSY{G#ybz>|1#nqkXj7z$8x;3#* zZlbH@a)Da6x=m1eg~7#KX2Y)7)@;07c|*F64iUn&U9t5zCrnrEn4>Tqe`Gzd?p=+m z%4js$Bsp>ZX`tbB@)saC3u&+ZUn||mNsheV*6$e6%?(QD%Pnu%T&>vOWPCn%{oEj_mlGp-%FSZ+2Q!LWSTg7$^lim@~Y1!#a`072=iVGTcN`iyO7lOtDQ0>*q1aqZiV zNww@N&IiVWXVxGdXRE?R{1i0%5iggp=tKu&lfYPl+1a{kYge_~@rDN0j9c&ZFHPiu zP(>)!M|2DOJP1Xo_vK~O##t*UD89Y6k1s{tjp;~r=Bo2H!PfgaR4_DY2d-S!SHTx| z?bfD-WDCg`DZKX+maV_}bmwnOr0Z1ie4^Q7yN&>C+Yu{2dR#APG+}0a?^s2sfRWdt0#OMS3Ve) zkH#APrK3}FNMo|EnTvL~ z$3QFgjRcpL!Yr};>@cIT=bm#%x`TQnCA9676p>6iMS+jXOjTHu)*D1Ej~0hB8gpn@ z&0@vx`h-vw+sT4dEn_)i9Ek#rC$RM6vpY`;Fa#gztGj=!gdS!ktM%uOL2N(s^g&j- zsjFWXAO`q`&eqNzAXs9C?y%T}OoIT_ssXtYRk1kycRu&fnt8tJ+C7hUC&RqxG8=*X z!4i!!4^*$-#6>vx49@L#1sK&%ic#aSU~5r_F54L4>Pjw=mE6|U7SpKzTGpms!x4Omtg%#~8@Gro3?N^(IfmrH5 zkJ*3s?T6p3P%jyb_Wg?;qtp8;9Of^5_!7S|7T3i{$tTn{FZt&tRP^@>7fKRAQ(=0n zrENKHL&c9{}@3p7dS{?_zR#_dt{$_o3F!K~RZHoToJzGS)EvY3px-NfcYn zNtTETls37>;}!e{UrW*|cqU3b-q7=dk#WNLmTYO>OJah>H!kI;t7Ly$P{(({);-WW z*cNGJQ?w7S)VJxq3ra?xjmtM~oJ8GP|qR*tN%(#2S!VP>?tG0{oMt zUsb33jd!eY3|oE|8oc1=kx-(j2Zl=B9yYM80q!}W^+iXB`brXTmHoZ9n)o(s~GUSctYVffE35j+KKpTMc{bQY#4J;Z$?#!e_7nXaig)^5Xp^t(}az;aQx#i$hyQ0@a+g@nuLZ`q5yDpgX zfX87AfU}uiJ)GPW{lPJ4!?7w2-9O_S=|+md@Nj^$V>wJlc4JpNAJuL)W8By3Y~)w$ znl`XtF)I-#tMcjau@#wW4nk4^`b^f*HC+&??K%*)nRw-r_fUGLlM26tXJ@lZ3_=WR zOzfUyQ`B2DZI5E47;ljf zDQxb#Fbg*3iKR2?9Q)$h&>1%3gk5xN-nOgxw{+ULajhq~_@qvg#})XNFeZ1@U5IsX z5bhdYG-eU#CmD%0=98<+l}@~B-Y2$TX771zvhYv*!}JE&dvahFJT&Bw$opH|b+-z1 zmCCX88T_PhtD#$_nRK>vQ)NqA6?FXih%q$hZbS`<{p_8&U8=TK9GePsA#JrgexX;4 zlG!uwoG;DngXFu3I>o~8+zbCRs5N?>tdCPHUdewTdr_B1-KDI`=3Yp_eWsdOEAuyH z1HJ6HE~HWRy#!2MXYaF>NOoV+gMAG>Ya5P=m+#4d-{Cp}AMRL~-o-udVsSz$YI%^^ zi&ZRV?4e_`@Z98MvQko7rf1|5p=aw>B6t^-KtB%UBTg$Hwl17i6CD>YNU<%4pl;X+NR`D)& zY4y$5UWq)u{os(8AYa){YS-;JPB+|)8hNONb%%{7DmQsG%YusiKk)` zNub~8WSGRU^(&zK;2_{+t>#6Iyw`2}IoKRdFId*BnLt$ zK~T!p4862|j*HR2@v>Q~OZUrWu%eoi>Y|S`jo~4kfrAbu25Cc`;rS#xj=sx+&1RY7 zbnoHlTQfMAVV_pEEXJSjq?lH;9yG;cf{gKQY=$4iH*%a<#*b9m7F;}5Hu;GC^urEF zm%u6ziXa>8%&fbnf5y=Ni@4#c<|&Ji{<*4P8&}R`8C7LBU8$Exjq%a3Y`;x|O!R$_ z!SN70H1{jQyPMr&q=my|s)?4SBzEE$1vV3}`Cu2A5nmo78)WK?msFXrj@GvC#_{8A zo^RB<2rTR~hwt(yJ0m+Gk?$*0RJy7~)(&kIe=~?%T_HNQfO$5Ql7>ju*+ErCiBv(j z#*|X}^8sff7G)*8TdBuZ7yKFY)?QAqo#GYBMe%p$Gh;o`@ct1^DG=d7%&z>#D5zsA zRQG6xj-|*6{bAy&#BSSpv^|VFz709e&%krkdl1Du1>>x?fut!kbwcl>;aH76bX$%= zER)rkVpTLKQ@<%KP_aZv9wg@A5zTe1>Fr_TpUhe#6RN^HR4zR5$t z-Jt@s5)FyS#gsq@(=4B>C-7!7!tPkAry`9+FsdB#CX?A2Xm3sD?eZshbb5*Y&V#jy~r`x~zD z1K7^9Dc*&9nM~nlb$e6u8P;lVcU)d^0oTV%mq7^HpR+7f(ubxFzV&lYbuP`UHZJIGL>_q)F<(U4^v;2XdDBQa_(m@F^8FB^^*Nbe15ef!VkvDAFzkf>I-t^ zsy3CwuxT0WFj4NW-Z_@)Hq~N|?|oSc4%T|45K?~j`bEP6B24sEQkxfOZb<)byBCRK zdtrVDvU1kAK>pOg#?j}IlzYU#t=iMW^(N~L?Qx;8dA%~9{qIsegIQbcCL3DD)KbyC zeu8>ybH&Q{UUGF9!Ys&+4VFVihLDDpQW4kVo^EeAN|XzZ+qD+Xy_Sb`m&Uc0d>iQ3 z2(ED>+)bhdH$~CIYDj1M<#o;hs~evgulAC$;LA>AhYU#lyjh3j9?xOZRGgLVbXAdI zHKnP!NQ}jIR(w1DM72<8C+7hQeB?hgZm6L%add&nwV%%#sCtvz@bILiQwI7x?_`uG zS{?mKh!Mdmx=@m#z(`oOG)YA-0e)Z6%j99FIJK_E4?DOQwXhz;u;{@1wJ8fqj%gZ& ztx#I1FtYI3-i0PHO%;}Qw8FHyj4GC+9%JV=dJV%9af@ z-l2BCt0Q^^!Q7?<9|+$jO*!*7v{tfJIG^#|?;@F=6PyuA8*kOyGyZ;4yourr!V(nr za;`J&>A8gva)Y%jTa$vw&#aX3A`BW-{3mh%tlZfg`7x@j`MiVv#L;;K<5`r(qrQ~Z z3Dr&+2|+}VXrqviGgxW4Mt2$KwWRzg)Jc9E#q}MggPUelA7xVue5PrYq~qyS4R^Ci zWLw^1*^)qJdJm~^G)XQ2vq^$H5{(lAC5mvNt9rk#Tf8Zk+Q4q*=ovBfA>)gmpwm>{ zNSSwT#^b8CI~P%d7xzAKIZ4n&6?Mj8i0nHG$~+%x$6>Eb@o9r6Ud7OaD+}Dra9rHZ zs%|>*#;l0PvEZLIulmg|qi^~J|DN#u1!!W{nGq_?eS4;FE590>QhXVj>qD|tiGvH* zv?S9`_x3swo^xsNaIIMrzS~o$w zWPiqaPkJcAFaZI(mpCeWc77L%fA0R~Sqq))I z4T7gNG%ihRv=vgpqD1F|+c^m(7%f`+Y%-p8W~v#<$L2_WR!77S>=^Bi^O-vL`H`c% zm8GZ5Hn3T#h__}MPeI^3%GuStg4`>tNi5mzn58Vg?> zUsudJUJYv#h03<#|Gb(fyYKy<{2kQ%|8w=rt1A`vy`aBu66x~mQ}MvJugc?`cdH#& zR26v!jv7t-(V@G?sU2L$^u2NYANs}gjyT=5ei-}LPnmu#`_}njl-@U}u;+2!0SX99 z8ah~Y)qX;vjvTN3Tyc#qfsq96Qu$2qftU*FnbpbshvWal$pgg+jaf8{Bizo`##hyZ z_f-p2_mprcPs-gS3WOCtq>!op2qt|q_iwjzLhi&a%p;m?k!6G7ZL_ef;QCiZt4GG+u_4Vm}Ef^ z8MFdm|M;Hd^QZ9Yr`YygK*!;e+vN3^w4H&se;))}tZVR?dOmpQPNp$v{BN#%jr-?s zolglSZsGaX#zhJIC6kC}Y!kvRM4eTi5^tm&8d9)+X*c?Wa+fR>zpu)0PboH@#bbC& z@aVu8sr6^9LdRAXK8rFA6k^}tb@v;exp;wx z<{Z(!4DMCG6!v9YJHF=fJ90rEG`E(hld9*llL<#Vyf1&e*zi7HVf4Md9^`F`23h1RzfM?2R zwf%Av^XD^9OH-*pLE&+1^yG5rhnqaJO z$AmSQp68kNP#KqbYiK{`;okQz9cqn!^8w2v+Rsjd#t|QdS}I=+9<886d{i<iGG|6wK$LZ@Ks$~f<7d8 z8F~^{%Tv*%uN#^EwZ#OIBbWkjdr1PR+Bz&#qc|*1mjU@C?8#1Z9Nesa z?lJu$xb}lMOztQQvhr(Kq7V=aTUn48d14?7z@5`kTjn-I z;q=8-gK~F`ui$^`XaDqh+nOsUN26+B(R?6aS_cO?X(80ExI>36aoPZ+^)m@NgG!zp zUR&T*;)(wJ=V%k7*?oUgv*f8qRtD^{HaRB5ka=>+^?sYkn!qS+~x_k#zH+=R_Tyw^~pmwe~T+ zj5IRHc>AgiNi&$4TUb}gUrxsZ?X{6bLF7FdaLqIQUiefMS!snVmCQjSOUFWaD8A1j zCt|evodbHu9V=wAn5*)KGYm%(9hD3r?!IZ-J8;^+Wyg%?*2O6T$;onuW%bRM&8Sa| zsbVg~qn=MVai7RewtMD(%GZS^MBN|4bCpL6)Ng$>=6SEfvcqn6zeGh;b7DA<7Z*Af z)hQ=`N6Ct_r#3yNy1$gpFqOaCPvIgS@c3^L_kYhUk)9Fm$%3er4G!4SB>QINDA8A> zfW^Q3#{kb1yo0H5T%&{acQ!mf@LniSb z(a$MtULf!yrlc~&KRW^208?^h)zTOyf296F?7cEH_(iBqU?u@K>^(m-@k8vD0iDNz zB{&7dd7$RpRKYzFlM+TYzkrJdWY@f1Os*_3=U0OsA;FP~PYzf~vbP~vc9IS0znV{2 z2j&msn+V`_>470&nBLx*ZTCYrT}U&cfL%8FQL~n+!9LPFHxsM&h9&(fsyW(5Y&OQH zIgC?sbFQ-Ai0Bzx-U$ZAZxAQVtG;~G|3?zPq8rTmxvx0-EJb!1a~LzKKN@j;%rTI} zD38+J%cTifkeBL+uof-d2>*)c!YvopvZmC{+xdHtLdE|)gCo2)X)y#6M?$~#iRmsCA{(Z{#hh>L^*l(|`r~IcrQ9gLi zWk+@nX)YGYV*HHnmyw!2=VBo@0wId;f=6Z(Pj9!ilzG`WHN1gxhqwgyHio=`?en(fsF0e3?#IO6 zp!X-{N3%@d^}Z(ayCMzgU$qgCbH%_{P|DScjSk{bMwlqXj(MtoMcq8}uhels-#_7g z2ze9lFqhka`QBbm%1~F70TqQ!av+iz?PK7;KaT*a{T%-d?Unv3Zs`jASw9qip` z^&(5|w$IybFE=(aYR~;q?_vu7?k-fbc5654ai00G6{ls0Hi0k08aon}EM}UWn7M{F?OO`^29o^BZ(_%YhJq}kTj7QE zk%UWy{|R>?HIWC4qUkUjj`DS&N`iDO`<2~|%OrfUnzH@IBJm;}VM0b0Vo+INM*hmW zsT3fYX5o!kVtzG(jiYbxLOmjA)l>j< zL2t-+PL^Lh$j_$#yDnp|GN;&Nt z5kk`~-Xy!E*121HnokEf=0VlH2znuCKggTT~0*_Qt5G8 zHEfN{MtRK=o1mqY$_gX8*9DG>i~ZKG;#)tm7ZR;wt=Y5;A#AZkt&jW4&4CWY~a+m^vg}A0Q~PvSmbohMIq?1%#e zll9>PO|F_AsDoXDCqa?Vl9j*dt?9hNt=HZ5uqWf1*d2LQdPhtj7RGyVj&!tiI^UZ?3ianK7PG9R#-{N`w zG(r~jW+0!T@fdYwAX1BD4ct%W)Xp|5!4WzR+aBG(9p;UbnL5H#;1b*OP_#s3n`A-C zm+yYE07oXWJ{fIMZreF|N=ilOvlFDN363j@YYbN_yOYd=lQ|JV268R^y=ahD|GfrmbShR|pl9gv}GB^96& znq&w3Rs`rHrfD43c5Lt{+TEi3y zq()@TSm1CDiRdey3(V6cZH-fRjNp6+0p{$E)*~O8zw{o=hIo*lqWs-5{RP-VM2yI^#>vURZ86f;s#wId zR@Y2&$T%0sWM&-QVqSK4^cz3l@MdcHZ4ab@L?CD~g$6|aBElJP{5BBsMXu=IU0u<= zWOd$Y>)-c$i6+7iLJxepT6Ws?*S0Jq{)HJkT=C_a`t?%X8Rl{`ut!bt=rX%nWwVOY zGXhXGhsSy{?SFTO!C>H4QMC_OE|#L4^>$rVf0`Eg!gdqb+IKhi|L`eB#m`RYzytN` zL>Uv|bxsXHcx9BuL22K1`Dg$7hL$0<$f6iZz${5T5)PIG6imyLJVaEV`NX1vl`|~$603@1D z0ALF8{j1|jSSJ8ZkmC~oYm0>mCjd|r0?@{Y#sSa($cZO$3_<{eVKaz0XtkqGW;{OT z03`DkILbM3@-P4(A_VZ7V+g?5z~xgf9P+}yaukG@SjQt3=nFsu0Eok80Lg%mV6859 zG#UU#adRjQ0FH|}N388D0B(aV1OpJJiyR^bzykn0F=+6&Y0%^WH_ULw0E)0NK9=8lxQKGz7m!r@Ll|KEtxOe9@fZ~vo* zv=!wwWscFY$o9c#W@)Jlp3m}>^s~%Kfhi5h6r1)pn ze}$x!ZoSc8SkFrcIV>sn{sMf~e}N>qKkMy1KrQ?-XkRI%3a~3z|8TAb6F!)Y2I=3L zfuGeRYAR1Uf9O9Dyi_KG|LcwtHO2ZCYsB?M)s!AG=`jE_L}_$9_N#bfptINsdFI8B zkKy5@9WN{9L+LqTGPc@yYqriy3Ni1447lwn-OAqQGeuy&y0tTc9Z`w>_!PR!6dQ_Q zPI>Ztt64H>WhLTwy=doaT2wS=pFbwWi#M^M6q4p4WzChW*EmeegLcRgAFmcG#!fRD zO_LW+IMm`2hvyWFEUlg)Kx=e-sswrEGpC3|iHWijQP*=7KrMOgjfN0&M#xoS7`w6d zIx%dQU-5uf3b%7>x1s39_4VV^-?jG0HG=06ks_YHC?lfc5QLqfX)BvwS!GDR=%D?q zg;RFut+u+8tkg*OyfLuUKY$o7of@3v)~~5?bqMx)E1Jf!=EQPbwHT{dGtW z&NkhZQ0-4@p;W4`iWL4ysB~=GBc|d;^E%52I>7wgtDr5RUOG&5s<$o&ZRqS+CB4CS z{@U>GY*~LRe1!ccpSefYg3F-SQvCY65LV}1yQ7xP&JnCb&q zemnC&ykz|fKEFO3--)lpp6OMv-S>YYr~c%n0-ac9@HB5t#Ohx+=@G zlenUOW3+jBbL{Ira`A1w;By72=T}Zk;e?}J9V$qbD30L*rxV;s_vr(eh+BI#AcC8$-j?j#7KF;#+7)#f2G)$@y`FJ;c6;!$O`0TReg zA_d@VOcD%cOE6uI%ijhv{qQq^{Zr0|K|0)~hO%a)ty<;i_#x!6p*P&jt(7wt*-?BD zn(mS1c599@9Ai|+1SM*)Rx$i2y@LX}bq=gt@qT4c$BO_Db(f3S5l#l_STS;=vx$2S zxU@*HmCfzP9T|o~k7GUdrj~}d8=DXovEo6oN3wgOT}Fh8*^n?{i|0Q^e2$|E=Gr33 zk9ny}5=$;%AJnAbmA`7n72W-u$7K91*?#g|x*W~$s`6WGOdd)gM^|)PtYj%2s>RzJ zb*+nh!ytjKeb$-ly_ogAMv<-ybmoatWqQ<5{?7=V2#zh%#mNLZj60e%bO2~$jQq6b zSAXYtrO{+_YmT$*x3P4kA+^N-avDGu;B7Sx-AHJ$5CY<~m_0laYBT|zuKdz%%x5*3 zmRIV%T7_%^NEit~br^+Cev7y6Tf12ApViONcE%0r3Deq8mOjXL^-;-?*Qrmd3lcpJ zYOIqMQZhEz8#OBa4vXF<=8wfh>A2^qlc%`&=*2)Zo0}63)4rS0s^7tU|GbID!TE#f zXDMg&-LcW#(XG6DuUHy#2J0Gv@kMxCYiskU?}iHaA^oznRMFe2T79x5q2DGfX*T5x z;IR|?6__cdGiwJ{Qd34ai7Powlu#JQL{I zm6d>I$j6|T7YZZ`qn$^lgs6Kt(V|4BY3+!<-*%mppuJc#W079hnBh!e!-$3TrGBWa?hYBzMfr|w=v@>EAlHL1n_qqpyj zhO_G$o?*!7WAv7cF@#Z~*GQBhMvLBpAc)>V2*F@5I-`?_-g}82y_aZFLUe*iqC|v< zyz<=lb3fnu*8Ai8@qX`G*IL)inKS3w*FL+SbN1dgEim||We>lqET21wDv^WsszTx+ zDlO_&RqX-20WyZ^A9`r2dd$5&S_i%?M_eMh|}IBsvz&rKSxt$Kg5|X}8H| zzaJLHgMwlu!7R~?@fpLFrI)64$);s~?qQE~4o75HzwYkvVxXx2G%p5!o|LvwP9jK8 z3+*IqAWw%V#=!C_lJQD6f1K7G#kDy8L&@gT7X-exhFFE_BCz%zGld)#i|0C)22P>y zOtM;uM=`}gp(a7j8Uh#=c)R()ut~}5OMZFY`qO}vw(&2FF9Vp5TeEVqhdUEgVFYL;rHDEq zRTlZ{e#w1#hCe!ApevD+VbZ!fUC-_i92UEl*jy!e<*h96mAADKuJN}3FzE*qc)|vl zVAi2Qnf&vtd}OQ{0tBBZ<33_~I7o_@f<1c~cAx7BkxYR>Un+yHGFb_tqQ|J6Saja4 z^ba`~uQJsZs8Zw$mX~KA$)aUU_Nf8uT7(78oe>H5b-}a2Dpi4Wg!gHq(=s?@&TG01 zM5%9VtQh=9x_8H=Sbi-sS2+jK98PJ4%)gTTRo5YvMoMerJ-G)bIq7ls!q(Cl5GW#!aXODIFUG;wK)ASY7n zBY99wO*EB%oA#?yaJkYWcP=O&sC}eXfnDd@Ek9`ulw7p0y84HJ;+_c3>Dp6PuHLX) z07urVV2-iWN)fVt9|woS$s-;M_7haztl}v7S;hPMAGE*Psec%hk5ZvQY6EiD2;#?w z(A6f|e(^z8Vi}e8=M%xZF3KfyFXwoqOkYKXG zO>zKE3?;K`NGhBV&?BZp4|`?5*;7FsN|JW9NCuzbsTR&CJB>q?|g{Lvw zthbtBxhHN%YUe*HNI%4Bk7&u*W~8Swzo&bu@cpvuAL+5>9rK}`{D=;SaMYA)rsYnY z_1)gtb=<29kLBBRE9d#97XvwS{;p93?e2tp2Cr3XBjRxp!uFiK2{9OPNX#z0mb0iV z61W)_yP;21^?p@&94z@pQd@$p>hvcKsXgIqydyTzM)iBLm0}uDpd3^&yH|P@*Sdnk z+|c=-c}7lrgJkDcgFUnJ0dO8}&9Tm)YuaW74A*hlo5PUO3OHnCMx5rY9N}|If_2mR zhZp_H-12exn%*jJK?pCwbZIF;%^%iPot}VwV1FNHuP7}-OJ_<)R&s>h8JAr=meEU~ zMg)N{5=Cx?C8FIFhEPh+aNnnk0Pj_4BW%HF!Ox{S6O#!kMc*uhLYR?qkpvMO1gV?n zwbHLzj#~f2@}Kl_6z(#HPV|Fk^x*MBno*HB3Ns#w!N@)WK~j0^RJmqRDS=^!=pXUl zrA?eK7`2JG8rbYUSMY{0M&o#(I$RoK$L&|ylgRl5p)DZnwnV45*Q%q{y`AV!K;$-F7^uQV@N+I>c+Uu^@ z*3-Fv3*0BO#nrFnY#Q^VcM79@bXmLU>kz@g8AJJ%HeR!k6e8zr+ zv67xSxm?G2TWrynw#xU%GEG)qKOD!u_!j2_2xz#;h%BuD)bjLDCFZ`W_Q$HW4POX5 zwKcNwhjByAxsPyDdY(Qdgz9&+!08m)4?63IDe_2m_=<^WOQ-uBn`QY4#n?~;2vQ&@ zl~GH#F7%WetuTg?=a7X;L>fr3#*(9?2$I#n6WlhfcFcBMUGaP*t|aQl6Dkz)(Dz|# zbRzNIWZj4T55cWozX9a2MPGyC z{;^|z17vTEtUN#E`(peX!1o860>l4cGq5Y~3i`Rwh5s1()N?713HDh?s|4G?3ayxS+!wpi}msYLR?rY4g^2DnPEWy$PJ9#u%gqA10q^&rT^|q zQ*5CYxWCO)F6aS0s6{Jl2q}zYw?uh1RM;gUjzlznfv^#y6r!gb?4LD z8(tsdT8;-kJlpuvc6j9czCB z_L@H|8OpRqE}zAH>W--b=OzEEOHL?^sEN-x;VerCo(Qx|5k9P1Vfits#QN7DuD3D| z{vPG;|NrL!=D;Wj`x)j$Yq^m_Qa%*H7JXB~T7!60PTc&s@#h>h4rp>^^n*?y2YB=<4cd`1&-k zfQOe?PLDc?vxZMoG=b(N90cDXC`LSx@*#;RxXjy*VhfHU4zY;nAXQ)k@IYzw>d^_I zU1G*0{2`h#T$B-_7~Yp$0q!L+?^M>i>jiH%H5{2VL_r=Fs|l0P8>|xrKfNh4)`oaA zIe=YA)fK${bS&$RdHaXONdg4c!qipvk=jgzjj4R&jNvU_b#3avsk>E_hs_V!I_}z) zlcGCYr5-8dnIiOZn#64UdxDd=-dBlY8s-vj9y4$2g=?_L z!6=eIcdJ`f`S4rpfC=x1VktX0;z_+Tq#y~n$i%Q}C|ZMESrA0e-)8RrDXQ#h^w_WW z_u5@woL?3EsT8Pi_i~IX)Bdpg%dq^sW5$rdpxreCKiBhZf*dzv#PRuPwk3Qn+-Irs ztof2C{grE;@<)!nUzIs0Ys(V6Z(IU{Y#%S~%#GdeGSoUh_{x&JBEhTw>Hzeo@j>k8 zPsRQ|-Z8^8yRV|&?Y2B?M2RF3*%Cu&2h%YohDoq{5>|axlMB1oh(jDDdd|3Po@?f~0 zR(w*aER9|e&WXpko?y^rL9%5B!!0JCqOj*8^nYMm)=E%S)R9YC&1wcdhnVGukiIEp?&5O* zmc{wDm+T}U6aawG2->_%&Sob0F&3OU`kpaFg&&S1L zEeuF!&B-1Vi*O}KB!XYi;^-O%1rdZ`5sQ@M>hc_{rDdA%fpW#&QS5MNFAh9Bnd4{2 zp7|0tC|E7M9hM1ULiBx(1TNIe5wnwEv=K(SA~4nPV1iqm2vq`wVC9gvs)G@5IlzO? zccUNi()PPWX;*Jms95*PSNgB%3q3As+sL^5y#MXkc5H}oGu~fb(casUtO=&-=9)C+ zk6amZ>n4%ohJt|wB#bCpSCv{WT9q^-RTo7!7)!!aSx)Du1`vumElQJ1C~_q&%L?v| zJW2~!5Y;?aaj~`G*;C2_g=deCZx!~=rrWG^LF}BwY;=aKl#*As>^d?OQ_Hh zL=Sl#k+`Van^ZdmgQ@}rgNS$toe|VpWT}#$y8Mx%ZW#YsSbDyOl6`w%C(Mt>}VX z(wmCcW>Od!wWI7Qou5ygYMST^f-T}j+)R43Z}1+C`uQLt+7;nJIv~^9US6=~kP$!+ zd`-DupMw53;QEt0E6PQEaSy)y&C(IZ?liw{;&<(0JrM+Q>uR6W_`_RuL`l$~QN&CT z5h0&4-?%GooGF{c2*4y~vt!FCKB^O|ApIg^LLRJ=;56DvZ>iAf=v$pwA2Dit#i|8p zs}JVRUD#kXP2^(jbJAif!ULR6>({CS-)xuFt<~8CnnEDB{WGfWsccE>LJYdSJg%|m zfawaq4RiiKTO;>wn6qtov~WIprj74KV3ul%f1=xjZrbn(mceR|hS4}J>XGYY!~(*- z(RlMXSI-+rKN?;OJ*JQ=cbOX~2Q`I#ZAO!z~Ng)&~NSAG=&de}v|2E1~+>|2gjbLvNOJ4;| z0OCvniOMM41}V)7-T>L76=1b{efvqAsX#dzExXaa*>U*y`IDSp5rgWkR8n_^^ojcx zDn`LVX=w@sM)u^uYAt* z`%5zKBwHR`oL#m{@5)RDJ}$o|+ng^-}8>I;4E_EW+$n;j3KfQu;w?B%|6wEk~ySPhqZs;a;ZQf(>RU_$A3z6FmVvcjBN1 zL~Y`?$E}n<8tjc8ZEYQZO9137)tWj|eM%p=IwnWWW}y5qlY{yEUUKf7c>J`_*aZM# zOc|ls}Hr@2{yPXL!l0(;1+p(yi);x1hta6 zRdyLCw)Ms6q;;ndLwt&L6K@%jcVigQC}(5p%nyOdn0!h=W=nJh-e^JB4c@sewf{|M zyX#v(8|mMiFY?*(-N9MD4I;%D5<<#q7Ul5%)KeHFC__l2tOg89tK;u_H2-5RQ!kRV zN#m;seam~N@=zrUJwQAVq*g9Y#29eu8VtpdD3GaexqcO0;E!3nA-$dx6ea2De*#c|3*hPz5)UYo@$@74$@?!!@pjer*FXX{ zOgt62-%;w}hX# zRvDiZg3UwosdGHj1-YBF6Ee-|S1lkxs^nQ#R%YS5<<4;dbIX5AfY9yx^P$|`B+UXD z*pbT&_8s}S>;lC{XkQvwg2xnlI*|$R$b>08v0w!Z;z8WWEH#!q zF4*$xTuMN@)wFc1vtSMg6a&De!6tL^SsvtV@Y{N33YD+HNp-@|NNf*zC1G)168j+l z1u?Rt=G#nPF3c<{<_xjbLL>$&0#AKoUzezS zrr-`uBo|ewsUaKLN{(Be(~d0ePGqIaOg$Jy^UfbvQnm>t`A{bgB^{RT***kSz}@Rk7) z2KVdd5ll7SO&3{C9{RzXcwl&Qweo(K>8s%8yF?xVq?;FX{}au$8Sp3RCto*tu5v!U zzIFM>@Biyq1n?RW5b$rm|LRzT5ajAqM9$;^%>Bym54u|UKm6xd1SBH+=LPOv#%`7U z$j0G{GLze(qZja|>( zhTWv4x6frfY|UTq=ABJTI#v9jiFp(pr2nvVzq-Fa7-f?d2c$`F^e?vtsK zSwyzS;Uu<7g1qaey1hShFYRZggtQ~z>L`kp( zO&7wnde{1tUl?5;uF=Gy%F#{xXv94J)0xk`469uo#f~}bRnpybJG*xSP13~U+2>Ao zjkRltGq+wE7-(U+!;}E^cG@)xy#$PeLKt;`MJ0d|h@w)t4a_OJ01*`Lw!1a}rWdwX zUh+ct$>y(koc&aqtD&0`zqY$?iK)L~c~;(0P2c6SHP!G|=D0o5#a-jE33KW3RfiNq8m-809wY}Q&CA-zuK=7cB5O$}cqw!-z6 z4|uC+)bAUs~B zNJ}djGn3b4f}&epnij&ZQCow=5-x``H|~G4-Mk1WdgL0LeIA%6;V$YTetPxMNg~nx z#3Np7%R%(rK5lsPrD9qBMB|T??qC~gj#Jfu?DQhBvg^+qY$!`~-ePn3oG#!stiQ6O zq*8GaYewFLA8(m>>LEih*AQ0h(8P)Gvxh!=R7K^BLMZkXFFM|vb|!Mrl_l*bpNY(p zp9iZO>1L+|hI95Eyd|2t4UizxTD6){QK75Q&+6~b$jlNnNfB~<<%)Hb2%U<#6-X*E zx-LT%61d|2#OqUm%-$0L9-;N9tWFY^O-vJ;$yhm-b`k{b1hmga8E3p=nhd{?Va5jPEDd=8oa(frD>8a1HUNE-A= z^D0wkxT~MOr>ktj!)r55^O7c1A&au+jsuh{Z1Ugcoq6A+cev5{P`49Wcp@F=bLf751KsQe8rE0G~ft{qSzRM$RCK^ zKX}Cd8}I}y865un)Rf8ILvw+UIe3K3WS z1+t0z`F*)sOBwO&l2UENUmzL!N(qbM7(zMapG${t+h`%Q1oJ~ml{~6b8B?4uCC@v> zbM%1(9_&7YrXbABZqK{Ck}8gMSBaFZkxHL!B*9vCzK{`p{lIJ3Z7F5wB1yKFHG7*H zg>!Hs@f5|pfy=L`(-uMA0oRr;vcy8^d!BsCC&<U`U%70G) zZAMz6V>$Jeoa!doZFVG^rNqod+c_Mc@*0x`PoU*X+MLg*-rS27+zri-IV{yon)w zV$Isal$zN#!`(e@;{`YRTFK8buN2AYmhlLpb5B-3D46A#mr?6OJ+m_JOxPCBw&-wJ z-}|5Ob(Mzb`~fjn3i;dI9CAZu>c@*uc&0lu`}Qv9Q>X;h~SUcA8|<9`E-jnif-AWbu6=%>0#BYRtYV;hs_J_Wk6u{7w*x*fe}|-mXolp3qoq&Zhf-g&cH82|yfwF{A(igl zg+!Wdg&VMBP@!D?pmv#b!2yKZIQ}84kAMrdAY`9G5#?S>aE9-KYc@yxQk7^>LCK!e z^>8EB-tb6!CcQghQzm1CvrBgoyvnzJIM43{B_A45_67zW$yWS|;(V+k$!oYM33lDak{Q^F@sepse=EwdrFs(CJ8 zE0BkK7D*XC&MINT7wj{#-`sB$C=0l}fS+J@1S$tz?5q-O)a9`!HdabKbBqThtDMHnzX;Q|YSqUFEEFb9Yr3qraG(DJN zOu@{=iuP)2y{80##TH~%d_0FM!&b^+fN^w`-Ebr=f@~NCEU41*IJur9p^GmaFaQA| z&C{wLddbC>x>F5DOI*}78=r-eo3FB+LC_O6yf$NuWAmLwUGEVcA6yV=o!fT}DW)u% z%j8rH>-m}OIns`CRZO$?*!8XS^xka!+`x=mC^L(paelFr#je9QtkHdwfFrbsD%!5=!nc5ho?s7@{D2p8~7Hdr7oEkB@z>8 z#N#Fgulnd|5Gj zJ{nZZGHE5IqE$MD5iSiEov%~4>$0rwh%L!v4F`cxH9cvTs&nQMW+uG^tmN&eLR!L< z{YNq^I&UACnmvZaPvUq?!=!!#Oung1A9B5Y8+C`9CTDIkPXNCd$6&DK=3}Vjubqm( zCm)6;oo+l~=kFZgxb z40{3*tB08V#89_eTT`>bv*T(7jFld{u9BcM^iDppsc6tTg@;XSqjzh#4f)ay$R;?)C|ighB|om%{X(0>2(#m5N~wSn*?4(uJ?Sw0T)f63WEDPq zKz`<_^Fl6>5{hhacu%l10ia}j@1Sg!T5yd-Zkm%t%hiM?K-j(~{C2y^!<9mK`}WM0 zm7)=L93bP*)?leh%`9plhRZLTV{4n);JeAy9%rdtutK)vKX|FlJg@pS;@HFLBDTOq zgB;S^CXcL&x#das&XH09*30Q;7~zM%Od8y{=evhbNQB?7vFUb=;k_lOVPra)Zm{x| zospC^P>G3oVr<@z=D|J-W>2Sv0oNOyqOZx__m0T_&7vvcN#^U@&j@6WbQR0K?02cs z@>+ZCSTr@IN_gAwYJ@D`EIkWsc-MQszN6!Q-u*w+V;IyJeOJCiP1!v^x7e@$xHbMG z@l$B+gV%GIjE=`D)!t7IgN>_UctS$hc~^F$4_Fg zcRNRH8qd$r#yIKNN|gNnGli6oh-x1x+eFH`7EPGIE%! z=J2qE5v9HGTX@aU1vp9I)ti1LU(6WI;Ty8rAf9@<`_2B3)o7N@Q%&|Rf^M;RwqLtC zoJ6Zv+_>`|hSB`#GC$J(qSoX*Pu4)h>CA2Ohuh=GJEeOLY960+*xUU@!#&e*#o=fg zlXpK}l;E+1EUUGu^ij%4FLur2!QS71XP11H2{m^}d<9}Y+|hWE*SSAOs^+~!0=jy^ z%u1^Ci%`#~R3D&Qv6Fz6iTMNhcqO|M87V&U3rYMqI}nADw9e~G+~Couh*@XWG_Wz0 zf`b-wwVt_;FFu@BYtGqj%3y2VD87@`D(BjIB0-Lc0P^=pr^Fdm5UMc1pHgKT;FJr& zPThAx3Gsrw{4pQ&hHSU8`KMCG_4QpPpMKdB!q!uKhVU`$SohT!Tefn|Aio-1N}Zaw z>_fvD3A%ywYtFQQ%yf3^Rm7{rG>gFj8TDLptqWAEG)Y;n8+#iu&hIG+Y}E3{j?bZ5 zvwWz_150=;A7(vtuoha5r5qQ^2bFaPb9pg*UL={k*5pZ*W*)Dz+qBXiqeICe-=Si3 z+7UgP+T}X-N1J`BCrZ;-Va9MW^QwEeK=%{JAMVtRTzk1o{vhCHzQWOCR}wTKW8)iZ zZd7-(dfuXh3 zrIXjzbxW!Nyk2J8@tjoVBFRM1mN4IQCX9pX-0LmxGrrjA3lOpsAKpiX#LYpSvxS9z z1L#mf+wYx2U`>yeIOcwGmsq@}a6-VqJ>w4I@u~+0wtue!VlXY1Z&73DDyD_7)b@#CIERS01eVX$QxR^s|`9;!2jH zTygh%E_l^`emiIQW80qdYJa;DAvuA6e6jk9AHM?SOVZS2Xc;jdKL`gBM!CsBJ z0l3VJfoq$eg}TmU)ZGF!r&(_#*8lG}Owv2}6;W9+@&wFhvz;1$*W_BE6KIn?xd<)q zE6`$|Ah4<{&~;lY+OSe2nf?XTQ%QiMBA|%>?&fo&o$daTfx1BOEwBiOD0F&NUuR6> zW7g*ojRS0a3@@JR_$&5U^V0QkX25;%i!A4}+lxof8aIz!hqhM|$2xBQPc(CWpg<1S VHQTLZh<&7MaLu~AclY(6HQeu)~05C8xfC107nIbKn1(+0XzYK0l*-jzyN<8u;5TIU=WaS@X#PR9>@_0 z5c)4M01Ofu3LN5o5r7B*1^@>_00987(>tpu5#PTWffdO4&JZbkCM$nA=12tq&;%$2 zU;$`gKmcScfQVmjg?8;5*NXRaXYtunX$JG_aRf%^Q!Fx}q2A)Qp#@l74+7X1s z1j`S~M?}`VPo93cndxMt;}mI)Pub;)+F+9n?+`cZn!q$k=_7AeWmI~a?Y!`+k8P1U z|ETs2N0I_2CZ@E8iYje*Zt0tP*hwCt_%sVeBzTHk$pUuvBxY&K-5Om7`?F-LS=wyw zZ?dW7emOBSU;c0b+{LoS{#{T(ty;Cp)p(0_Un}x=pDK&Ld<3AE%O(VUWS~f|4=*0` zo-*TDbKAUqDey;&EFSAjrgcmDnK!C@PKOyRl|Ju@?yw$>Abq&@+JQdy=CcR2N6(i{ z3?^V8{H2Ki2^vsk0urbwfl_b<}yYi>tK zF9lF^LyVvTk1L&G%mIN^(LmcLaq^rU0M~i>?vaHC007(x6pi8E(eUcUJ%EO|)C8C( z%&ljlpnsY^Y|0du=vVB(M*HYE_5ry)CV_J#(GX-1+;O+Ye*7USP^+-V~46nAhQ8uRjC?@Z2|{ z^)%2q^(Bys`}lLj3GYLtoGhmto_*cV&XiAG7Wm3CejIqu+DM&I9DV%9ffDQLk;ML}aFJQR8V3QV@bNUAPdhDyHs8#6dEUbY!LgBU+uz<_h)aG0ReznbT|3L5S`y+X`5@| z02&U|QvkliHLO?w$1`dYnq!Jo@M|sEyx4zi?=@w{b#?pL9?b2T4`1poB0wK;TO1St z{NUqW0N@4OyW%0?O04KntzPBNe3qvKUWWmY->rQCsp3ZB1bf820%$4;CP;jc0A!C2 zzO5^^K+u@;kxdNY{YlAdYMbk78DwGs02U`VKAypo4Zu~%48|id01!;kXKd5buK|z@ zpE_SjxO7b}lxviEeIDgAj>(!Z$c~ByKqw0UXqimsFFOEGrXX4zfyiUsYZBoruH}=oXV$ee6GV``Z45wwgN>~ zHYr5sIp)uI={A?g0W{)6WPcwVflDAVpC8E-m?D41a?tyvzPa;T4gerZUR-s{^i?h& zUq>w*4_#l1`2HmY4+uNCw)le;!=vde| z#4K#=PcfM}70@sQKoc`8s1+C#_+;#W2`esQurSGgbuk`x^qty1mqz_a$Q-)YC%<*>^DfBI^O-_ zFHfYkt>+js3TFa_T}zVpy1YhSIO53$VQ0jlLy8l7ewwGu>a^mvPh+D9%7>5IxAhqG zSY>H+UAFPC9Pp7L!x4vls_5aqpVclnyYjLQ11H)ir>ZRKhxWoRPh>ck9Fwm?G0~;y zsq~iI#pe0#Zx3y7qB#@_Qzpr$Bsq2*M-ICh84S;&+(`(Gz?3HjAP1BIIo%=3U7=t6gE_H9i(Q4Aci zI8~IvSy3%a&ULN$%L!dy3}@D@(4h$jW0S{Vr{dW*C<~J8Vea=0-<$D8U7n!cwzuhXwIAK5a}QX(r7opXpSh%J z%BzI0=j}i3w)r~AS znc7Qo$7K%ube1T4>G%TXYOiok+7)`$*5P-yvSSXqqBacW%wOELWJH4WK!Sn*sUQka<%2;m%}@?q(voChu5!!?{IPF%xX*Zp+qecLszy8Y>M8j}U| zJ+EZ{b=&h9p*D1O(Zi;Q;mT3ix%kb-P|fmeu{G^8^$5j&c`T-?Y!7e9Tdkj5R##5r z#Wa-Sn<08VS`duAwk_FZo)Z=&VPGKBzMhhm6^nzToh~55870ceF8?v=Z!&IHj7*TJ z)vZaD?MP|FmzqdIaa$^cce{*wXe@$;lcb7lez~1T zWaL63d5rUpC;VmjWY{YwJB;>h z6v?|DUA8bj>4ZFqgJoJsDDg{e($5Yerwo+lz-b@UwA*%~ZH&%zm4Zoaf`0aIu-^Pv z7?dt*t-MSYxol+|>+3H?;j$E~P!JZWW%i=n-Cs=#nXA0$?z#v^=gHT5f?cPesj~PD zc$*hERyG$BbnqL@Ue<6(at8%hsM_d&azrglTk$T`$4|&o&E})qQX1KT$=@_>dX192`&?3(IUZ-|GWe@gg2ZQLkMbG<)zAq!JJ&Ru^cnh|0^(!^M zNS#9s{DaW?rw|t;1ZxMgVnPt3_^$WV%1X~FIpy6G^9nE`a?!_UL?a{lDP*0cD)#pEYZNc=Ud zyrViJ{oYMiGu#auPKzf>j)+xr^cNJK{zyj%itbu0OdH4V*bu8ZGuTmRxS`eegJ0sc z3H~d4zCsT|v^If1qh=ab2a1~H03=0p<3mXQ0VtdHJ~8rN0?k1{?JEfo@LzeD|6c&Z z*%vbYJ@7F!|HfkdA5^QRfq(j89`=7NWSS2C7ro&tq_OX+jdm;XH?5Cj>aXbh9hm>4 z8|hu%F@(L!d3Z4!*&G4tjx zXn|F-4v~imlqgt0evJYfCs{+t`vjFV!Gy=1msFH;!1>5sBwRLjckL()BZFkBa~+L_ zQ@A*wi~Rp_7?!Yxuf8X#2xnDhi*(8)k|Ho5*q>Sy2{J&DF3;}$l>W4kd{=hJMfzf_ z7^CEM;>K3wq@$QcGa0*1Tsot~C^orSL_C}5Aa34&P_P|;g|YB!xrG3SLW6*Y28Z~$ z+(Ljuf|gq)2Ot_cCJQSYyMm&!igQ9@ZtVvW4o5-5_|EQiQXwb5xJj~VVxyzSEpdJB6Pbp_6C+PhLL#uAB8{QuG^(r7HFNp2b;lM z^&48B^liuuPIUmF6Rb9Z%QIr)PeAycs5=S&n4X`)`Ka3FCr|!g)vW z)qUs%UauG#d_uS*Gl&EU)Dz4JzkTAAN?((gr*!Z7CiAxJB^m|QOSz_qkAqpOV00c+ zRqe_*XvHAQCs|!8Akyt1(yxD5)&^NVms_U;SzajxS=Jiv0}=VF_}d4s;vYV!fP82( z_1^dHQu_vS-|mO|Wo{5JJuh0T+U;(DN@a9VRi-wDq?)NwwUzuB<%iiZ+YuKiP?uu~ zCF)Y~Kt5D{O_(AnmE$$VgrR-{(I==?u%|C8A*e@24pT!#gh)|=p@AZ#B&e{D1yD+g z>eG;ZyA=KIDpEw~NBtjCAyUi&;?Qs;57JrK_kp;iTTkr9(u15!Ng&qI&tqTqy><<1 zi84WH^7pFWx4fng$v7>_)uojXxD>fHd2(Ewm1~=?32{iA&ET>4DuQJD`|e*XTP;6Z zXqU_i+=NmM>$%3{4S2lgvBLaN5Qr2gKl`Nm-(fj1K_32lY|u~atMVUqJ^!@JiOKYj z*zCXTA_PhQu=}dqut=M|>_p`+x{x+$!8SdkGE@JI9#`0~z zoy=wB@6`N?75KW+dB_{f&*!sy9REi8(;-1n9CvDYZ_!YmewGz#QHk}lFZJx6Wk0QB znt4Ht~Ix7W!qg0xXaoPTSMEfYVpb({zQBu=wy<{1cvidZ3=IFu|c3&VK%tV&V@z z*X<@x98zNDKz#?I+x+$8>D?sL4)1Cf2mG#*5L7qS-c_tepB}$1sOx{Ql5`gS&5HF^ zRT@mD>E>hCvp`I=%!>>7KsZmwouTh669lS#ifhUH?8z*GC56r|O*IBaEuJ{~IoLEqPb9`3>*LBc>o zz=2k5aL|`^PylFDG<1?AAOLTyFnb4|mE`SYZ^HyZ^e?5q)%`Q#@L$Ns^b zA$B{H);iVh?NR^@aS!y0I3BveKv1Gsut23VqZA40(=kgU?{wW~qOyvG8>pDD3gVdH zlb`(YQb`U*Q!?8>T-9XUbx|4HaVooc$@Q&R7ad+^MYYb!E+5_CdsL>qTfg8uRb_MQ z{$jITruQ{uerZ&tZ5gGyG2K$Tg~-s3U8&KUv1`Sm;Ba0ys&!Iw`RxsYxMP%a$+>x^ z$C`IS7-|${Fy!2l2OJre-Rwf-AapOWdt9sD6k6~5$umX6vWU2BDq_)DS3K5Z4q7&8 z)K*jiv8FL2H%fKfQ1q}_eTklf&d?6tGcny!JesC{yb`(4O3OxBdLefAH>2@lHn5O= zA*^}m4X4lPT)V$8Y3-xfN7iRCZ%pSw6#9YV`lf8JX;4h|y~!zfV23X9xt&otii;oQ|dzetnI)w|ADPtWooSB?%nKu&blNU`O`a*r4p z=lVpm8@Mf^b69p#fDjql(EZh>Ri^5*RM9O*i`eYmV>Y&1S;~%rJox)p?>|bWHyN|LTI)7>bseY*ZyM>?D|DK7BJrewtJ^E06 zH^~-re5lH{Tk(O?)yZdpeY~iCwNj7Un6t3IJodad*ti%w`pn3JeI9@;)#CN0sq&e+ zX+$u81&Vi-z83bN`5}JW9CpmMV6}b!dG6Qx{e7V;`rB@|mW|`@1DuSHQEv0L(5J|5 zvb_iB=<%mP_jVJ_hP2O>S0HTF-UGk}Ao#zN7LH$Ox5$0iW0l#)3qFAzL9XG&5$|pt-0w{Au@QZ=p4{kF)-lD8$Gjt4EaF$0Tk zd7u)QT9(=)o)*1RwU3(3sc*4#l<5ySLMdSRasR!jU$E@kU4?u`hF7BwmcI1D-&9>= za&5dyKT{@;)D<*q_+wIOL%3L=u&AW;0Ou5nj>U8hlT0LC$RVf_^+z(FdWOh2=TFnt zhdWcpgY{T!@_jD~Bo7F;`NVY(Xn-1{OGT&M?i%ebflwvJ>LA>FQ*+cVw35cNDT1f@ zv8UDmHnT=VM6_$>YKE>}IF}nQMk!(W5*Um=U4av?w|Q-!%q4*nC`uO5M2@^A(EVk7 z<1y4se-HIa|GQ~tu{20HRnLS0FTD&IYyl##M1k4?a{Aa!NB7}NXxAliLK~cx6RW3Y zB@1DJpR*j_b}twQ@F?Z+o$?`dBQuK=Zp%J#n32&M8d!O;tk?QU@y%pTLCM(=0mPvr z6TWGZCR!o*Nc;zJ2R(Oxa*U|N(2txMR9Mq5DU^n+10!jj?QD zKT=&zq$haws693podH>tR_H^OH6oS0(ZimMw8W!c;^=Gex4W%hMB3P`@}=DvKNf>S zdPu#YjFOJx_njt^WW<+I&Tl~$=NkC#>rNpq(3+AI#JJOlNvv1{?NuM1=D@}DnzUk9 z$YG54geQ-&9%CMA)+LvUHpRbOu|g$F1U*AJ_4@++{AOH%qhJ~YgHg#8SW(DiZ&a&ajK4XpSI*h}|-41$yPp=&~GtQqMFl?3@T&g7kjt#qB&9B^9> z)y{CmtPN|G%0v51b;BbqlnV{1M(w12bJ!8T-@AMJ!dLX)a^(2Ybk|#v#mm$QjU}>t z#gT%d1U&jiN7PcXW}Gl(Ob26y(mge_TxefL&u_0oA73Qx}lFnt#|vbVvUh|3sFUwoN_;qc*Q9 zAVDE?IGhW3auWYZwHMX3B;_e=ENvKkE*$)tkYqiqDra8afv9Xr!@F-`R}N;QWsS~H zN}i*~4$XkAFsa($cQ}8HQ8LqUTf~^MywJ6&SAf*JF1!cCTipY0>sRX+3nrFJr;O&x z*sw~~{U`=PD>|ZAqHz0h6y4y4X$HmMZ36t{6*|w<6_ZF+NmY9#UCYX@Rn@b5CU`cE zz3kuUu-sa_Jn(FpxKq|WAZ?b+;wU1WULPo#N=>#FEf<0jr4&qF+%^c7QFY}3y>A`P zIkXh*A>*hMhxgqIXOfP?KT_@~CT>ZxjL-|a@ngN-*i7Pz_#iSK4=-BtVUO)K>_krA zE1gp0*Z^zs$OYchO58HoP)>5Gf|3jd?Ka0CJX_;y_WAC6fJ;G7Z@RN1eSXQFIO@}7 zMM6pF-7OnTeEf3v7mj_Hv=JP8k^KXN_H~W!i%nk$f&W6 zN5`ed#}CeZw8r%hqa@|ICL2S+L5$?l$&hMcsdUz2<4`~?8;(=+EZWIu0~ZF9$I4R{ ztrs+GGvCVJ*=RCA)tbU4SeGcL73#z6F{T@{Al+8##T_lm%p<+ww^a3*^XtdA><7y% zJNCszkT8pCs08NaSaAu>o2iUg`KOlUX*FtAWZvX#%9y@m*;DDZE<0r(9o3YTpfnXL z+K6I}2lJl1B>2Pj)@+s>aCL*e;Q_Re#d#8fs-lXs z21~pJdE6lch5kknLGgZitu8#H^t5m)w$Eknc;1W(xU`v>NC?Nz65r(IEaffcrcm`N zcy1hh|NG$iULJkBQ!4e2bt!Txf`vk^##U z#2_5+j*&I7%Fz_LN<~61Qi*KYS?ki1mC@d%6g-K57A4k_dWs6MxarB45+9AV=4dXb z7rn@`q}8b$xFW0jP1sna32e1|x8o>!EaCcJD|2w@HW`E&I0&dL2?=nZDnatH<-`{m z3J-C|VX~PLc*M_r&tPFWd}BXC8<>_+*kRMz`v2t`MaC>rVKU5317 z_Rx!d9eaBf1co&?sCxjz&5VwB-uI|tz|<{yva3jnb_`+g=;v&Y4$EXMYn4owIUPml zC2K{Akr_x)j3h<0ZhKpg%xf>HZYSEABXS%z-Qt2|tsq$$Of#W52MPvZpyg(CSNCkR zR{x6t%3@9m9v-C;HIZHV9z(djy4gicPr-&o^h#iWWd=GcDuGG@28oWSYhKZ`{-Cna z!o^)t{HdkqPpMj2iEOk2k=@x6Jv6Yn;VrZoUeP)?Tgw;Awz^QQ zk3?E*dC=nz$Ia@<1}_v%vd>G_v&iEL?MHMUcm2GN+6B$UvhHQN$muI@uG@qtIMLnL zLTF#C6Z)y1kv6SuH0(Yfx(7Ibek5@8Wj$!KIR?ismKCj>)HXJ}jFa{g4!#h_R$f{t zX%%%G=fY)Gw0b`8CJM44ePxZ+Qa~z(Z%t~M8Mq>&gQ~Q2z8>;(zU^^watDhNCW*r7 zlOj!p441xsF*Q8Ld301#g;@V0n5?~*CeHXd!C`m-n4grAiKTK$>&fjmYorY=z#(z2 zw6PYR3Vh=4l#5a;9m&8-fal=Jz?_SS5Do)kHsBla7z|MR$J2%4g@l);F;r13MlU6~ z(!Tu&kAJc!55Mdg5ZTmE7(7`^t#6!+;>IgQ_JShUO+9fZBYs=rl}>RA+XqwLtuT}) zIA$)NB{Eq%QCUb;JJI_lS~iaUnwvc*?zD9el&)vipHjxSY4JE?6^g@b4857g8M+`o zN|1BFdr=_9h2}RJ>GfKuTu{A*y&=n36gNZC#Nt%20S-fBu9)1#l}|=%(wop)AVm`2 z0np{W+N6R-6TQ3_s67LpA`zZGW68b~UwR@W==f=x5|h@s*0P`Am30{EW=-HmvT8DN zXX@nC$?=gRTZckzY8|h&1gf!_eP{LTU|4FnV;H)B?zL!X|0vpFsd5RdQH!lwAxq$3 zhAQwT3+#Y$QlwwclQ$ft+s2&+TWj|@X83^>;zB*m7^mDop|YfVCR*S#JX|Lt$s(ro zIFSv2#H52SbFareV4Ygm!JUsw?dR?Ga|<7a$|HONHeA?fQ$ZO?d(bH;qi$8%BU@fG zojq9r|804(VxzDh7qjH3%t7y!u9YK3OIrVshna)9Qy*nM`e8>5ZZIX89P$cmc7BzpRZjU*4T zRVSm*fP!Pe!e|aNoQbzvz{^SY)M|QfX4^KMZMV+VBFA}PtOpKtA%)fIIY}e=;1gE4 zF&89_31i}h00K$an2;M)0cy0VXqnK&{CIjfw{S9o2E==S>c~2eMdY9-&DyNetv{4bn`kB(wBsr1aX;u+Cz{ho7vA^02lT@K{7jTzL2zaB zt@=}+^Lzs4MM1wuje{-x4AXjH~9Wmgv z%i?GDr`VLw)fBw1Q2X3WN6D3s+6xuR0z;rKhk<3>^TKLvi8QsMjDw4Pm(@T?Tbydh zn#Cqjhtv3Go4BPC)=Vu;)MX^szsfUF3dpP!{H#{rQMv^+U)W^9auDT-GJVOHU_3zp z8x8q;QrV10ep%04EV zc6M?=Uecx9<>EFHBwGsS(v9ZQ(Th1Wt8zz~ixPa5!f{w|W^EluPvowVR9Uao$81UQ zV3L&$cz5RUd@MNF1+RoRh%TaB8fPU8lVX$`zOT`5_zfcu#fC+egZ^c%Mw9XN#J9yW zm;7vo_h@x`8_KjmC$0^nAk0&TuZwSr!3f~ex;ratJ?eTJV(a|Crzn+aZij+MmepuJ z8k&;JbkS}KoHR_LYb^#20nKa*p{JvSx!wFYrBEl!Yzp~m0MtdOp%dw;tHhZ~{$GRD z0xk}$8vWB71C)$;mX24gpBQe3Ar))cD=azG5-9lgDvBf+6~6tB@jiAf@O?ZDJnc0b z)p7l+uTK@Su@0xqD7Y-S-k6cM$*W)&NLeVrOL5c7ZlejcEP}J{OJrtpQ74uWZpBF# zO`vWCKohvj_PLnw?2luLwllU(kx_;5>#4`J0@B1j5?mM^&60P7sTQ@La~@abh=z|8 zB_Z>^Atb@V%|`8ef~-_u)rV_6Cn{p{OsLh_szgLxNl~bz*GE|RqUcba1v(&0Pai5t zB=#hpwzFk|5@GsXIXZSw2wTl3s{-FI`4ms2L!J=#MB>Zh>M=)^Bz(=g45!kbyQ2n8 zW-Fj4G;M?ypNH*w(ChYOrCu6nA`K{*N&$FT21L5WNHuJR%A@gZu19Dbj#WkGqE9u- zoQea%7GEcxxt(XUsjC+}2S*ICeVOjrg_b4Ul}6#v+vG>wr%l-uih}zB;|S2U^y)-y ztg0G1cbVIerW#EcYr&;aRMP?uRon7SRAZteJaJ*6N*PLK>69KfT0wJg{y0}tMo1DA z8`^;m>a+e`*|AiNW+l87%1ksp9H!yur6?APof1#v_HH{KBp!pJc`hUP&=*E(A30CM z8#G}9n6VeZhe|_J3e=6Rt;@>F%Ij5K%kEx)hM3)tA?7*pt*}a5i~D5hYZp7txOD7B zky%s!GI~gKObQXvI$8r#bDK6zv+Pxwm*BB`_kbsANWoksHA}J^C2&^$ zyq&`@!lY(GiEJgvVA6d1F-A5VA&l0fB7(;*P{kk}#qo?qYr5&8t>6(%qeqHb%R^hzPiw?rto}6Md_AUR1#zF$9wd zDJO`9Eb=q@OSIoXVH0SX8jXQRa8fU+ahO_$r2SoWCBYe+YNG0uXtd*KS3UKB~TJoM3vMM9co)a?Zw$W0QN8xVcUUV{H~}l z0NIx*cMhPXIv)~M&ayjG=K3Dni4gagKzU;?`aO31MYqFS%IQ}tbSa4H$QO+H;VTMW z8J+esP7-nW`kkT@Z_l>6GmMs!hcYrv;|zP`=g~IAMBcAEv%K$;m&DComCDSs7QOdT^`bTXH0+^1q7?3%Sd`0X>RhW!f}y z*2Hph%Bkc|`7XLM(w@ox?HEI@vC2!~iw;Yh#mf(Kk4x0$7e!S}&b*>tX?NU{rSH(f zO-`Gm8OT0e{m3eF`i3)@EKW4ETP*e-noictqMyHg+Y2oVZQYIT?Y1&{or&G6$g}bO zSg6{*5A?`TS}(fJ=~Lr3B+y`5{rgz*v7h8wY5S*!*p+mDZwP(2o_F5%awEuS-DXt+ z4f~d!g7oWhsof_+N!F5y?AT*c;?Eu*Ua$q1WJokr6)j`6X#1m>+D7Oybi&j$idH7C z1uSUi%}Lt}eyNyzQP@O$p|(^1;Un-RowL@iE$s8876#`=$R|Vp-vE)vM#W)ciJAMn zxu!4QbVrkKY?wu{l5&qjj`>*H%Qlf3ly}#By!hPsy!l|<_NCL(>;L=0O)==pBO2ht zZZ8N3sNZ|KKzqSRSb%~@#D;Oqin-OD3J!h(lk30s5b%@aq-RMLDwV2~snRC>7Z=h? z3hB0z0@$MnH~^>Uv+dQS(f4#2uhcoeSK3_ZES)qrZl`fKesZ!K%S>Z6`9r)j7*F+A zBT$2(KZHN9xocvSDV=Ss4B1gEii42r70`p1`J)VmWj2R)mXH&SPXXt<>Pq8psmBy z1GQX*1|GU`!_rJ?pW|sQw2%YQQ7y-b^e?2a?sq}aoU-%^ma4}=2H*n{MF!%NxR4fG z^#1+h-n$VQbKD0xL!wehVQ*w6Jz6wV*wQk}5MACyVdu$}!>BH05#?On0|pdwgiRKo zJKqwxF@CvyuB}>r)0RNXhVCVM4`_+Ua-oAbw0W7>F#82cczz%Z_Uh|W5u=^X*Tzep z?)H}#4_2NX+#!*pyuNKQ+CCq<@d<$I{`}rHwYklyef3i6DYAsHvLDpD>Nc&o%9C6d z*kFx=9T+Rtl8ziXcz86Nk%k=+JWhfV9h74A-!Sz zPZ|1@OO>X97C56BAG$s_6ux+&MOz@_8p^@z-LcblpjiWay&m5oviAwUWY5|fmoM-Uy3eI?pqImq(GK-VvXYV!@2DAWCmP{f-|z(%sQpvH!f6=0x5{F{;olQ}jO(5|7!dObiZh3SST@Q!_L4nmH; zp7`2=#)^l}pOmyB5+k4K8wO>7GH09fCYLQ4yAgmqQJ@shF6|nDh}mjF%EwK?VW;`I z!n5rHJ%PiG$*w$XMFz3c3$Vz>UNI&k@h49NgHj~96|NU#;dpl0x#36gc{$2HF<+Qs zKKqVY<=#Q7e(iGiYjEttGjI5`bOS>f-H=ETG?cBZzR*;|!jKaJQstoBiAD;%!KqP< zMI)UdENL^E1RNF2;1auU)o3D>n$)i7@t_W;Cz_L`((nZ}x7m(lFY-;Et7iZ5_WEw> zmebANd$l}b3M_0iR`FRElBs$>)LvxP%=h=plKb(EqBP+`gerg>e|?`qyb zNEBrOlZ9)!UBY*mNL{l=0t!CaJrNFv9B1Kf@JsatEz!Gf~oG*$x)9;>_ zOrShTRwCbLqzp{T;18ZaoZ96ME#3<-bWq6ETrD7~pb*b!kjmm{B!LWPdupky06m7$ z-LFarAFn#Mtf_yldUy}$vrPF-HgkzxNjAL;ZEyhUjmWTN32C2@rw=W*e9p-E6vnZN zAw7&u0nA%8ML3&CE7J3(L)mBkQ}3l#MOFIpsTVk!vA$|9RV^-6ZOPz_l)FVrGV#2( z1%?8qfdEyZWVO1S48PIlHYDfR6{D(b!Eg>NqKBBYFMNLw=cak>z+OUlH%-z-FOK9(F#bKwk) zt(fW8G~>-kY)+w&W2BcjPIEi$j=;zqep*zo#pO(`jkheSa#H;bpqVR>)ZC2P(vz_c z2Opb|WUqkLlaMv8-K<0W#kU3{r6Q;WT3MzysF0iXyP$&jTQo_jYT>AkvMxE4Hn_~O zaF~_x;lT~41c-?3sG983poFgtgizB+<5ML1Cp>oMFU3D2p6!;xxCCs++#RF9X-a%_$T7<@5n9 z>F=?83@=&Vjcjkc|2XW+JSJ6a0(25{%rT-YmE(5G}MiYz7Z z@L0%eNFxZX<#2=$$3xalJykOV6W~Y+kFQ}A%-ezOoCc1IP$~vQq(CO*w)OOm=!luS ze-13M0$(5%xd#M*woLA`75td+??M@!d{K-dzgSn_T8$q=+PsU!*==LCGRlpkcY;kUiu~9sG^;jnkaX9JF!KauIV^Jv#sL~ZqXb6dBcs@w9qBBv2)Ax} z;1jqir(JU6OMtSpbyE|1OPFWm?$gI{|Goe&v;F)zaiTvww1{-5KSdAPIT|DoAi~uc z$9cmJuHkC?k%XC2hGx%~z9&qL&IFzL+> z!Mn_be#|Ln1--mXN4QeLi*^?=AThPo5DEHWQF=OpF6;D=F-Fb%4px)F1WFmueZoIy zsj)S}7>RwM%w;NN1lK@?jgQ}GfLIHy0;Gk<*VgLZse*^%81^B`e`+{Up1`ifNK#7NzAd3%`0CF_R zhfE5A3m^${klj10Fr{ugMh^Tpr0BEKIm~oAO@wr z06?rDC@1jm1GNBvgZO|B7~&>h05~iF)EIPt4bTe!ARqOUgSNHiA%Hfrg7^@FC;^Q9=%iC33VJaQ_=3aKwn?Ktml#16iQCbbJRY%bEzxZbSoD8C80%P5tHwhk>eKvl7fqryxZ*ng!y%VjTG(p z0THNE1(1;;nVIda$OR_+m7PB2uLbvmZbQlWxC^*C0>8|VG&b(pN+@35t@+Bpetka# z2@8b75d{+D5riSpJInAVb`sSKz#;dAv+9Mdb^CQGX6N~>U?El;GY3EhHpLS9k%D!P zzNXk#*W8eN)yg{P@(ybD3#YDax9RK`Z0)z|toyF`jEx2@%fZ7H^VWZ4B_fdsBF}Pr zeR-ioB!Y?20Kf@ycZc{V&=ixTI%%~NZ<{ExF%;y)?x8g>^4K^NX`S8m~v`~X#>WM$cOYmPcL2o#Vw$Ks# zlm4O#4|D`iHRCO>p#9~`7Htu34DG(uZ?*8He^)Eq2U1&E_nSlPBuK$p-fuj+8Ey<0 z^>6qitD1>l9HNEAK=3zpbU$6lg)qNPiIrh{q8cCcD_nFwb_X7S)DaKSD-Q~faxh0i zhL)0fa3GFf8cQs_B@ix0Kmg$D?JK}2j3eO)CW@i>uTGq5=psQ>lP&Pn3qAM@VBZVsS~oZ!R&xHhMQ*qvq8Om7=g!I1c3$wmK!4yP0nEt;0*4(Z z5_lC7gE%G<_Q(OGRA?JKXaZ+OcslMK1bDGeY5W1fO2iV(l#m2qVXiLr{gBPTQAn8t z7OW94muQF%01H$k0O%JN6uFD7v z-KXic^WWCuUazU0MR%zpGq+k{o+`AwxdB7GwWn>EvwCbp?1OWFiOWG{(D$kdQ%hGk z66BSav0p8n(hFLSL><3s4cFMf;JUO!bx4h z2E3jym;J;OKF{M|umog9 zg65U4Py+g%&^`p77ELrD=Os++C9mPeV_QG7Q~9?9G5+bF?g3l0AvoYYs1xmjWpH8K zv4<`&Fz!~-!;+GeQiVO_k$O}dFTK9FSDjiNxo>91c3Cb z*&;_pTuZ8JJI}5wRVtK9B2X#Wooa|_uRh15RcYN_3QwBHzp-sdX|Fw{RROhdf*-w5 zo13dHHAwG4=Ty?rXw_lP*Tk3!F5YMuMm%$ZQHm?9CN2mEKE=feZ4FE|vUu*l%e6=` zK>H1juvc_MK#rQeTZCnoj*K6tBzEM5?Xkz_*O=rk*Iy2jd=_1Lnw{uzHZ%4|D-7TE zZW3m?_F=r$lu>DFUW+uk2h{T$q!8VT^cR?^IV}hb+tt2O$#NDMuJ*ZhGp*xW~r#d#OiretrAK%3;$;fP*a z7>U80=*F-zSmx(Dw9vA7%8*106^(!ooL#kWQ|N8>Ad?=`u*;-hJi}!?scmK4#2XEK zi#&P2Y}DiO1_*o+m|=hu+L_sq+aYAjJp;P6BzS>PA9HhlP~jmb`6Z^m9`h~pxT|3X z-^j2*aDE53V?7jtY=(ki`fBqh$^JYw@)1{BNlx;XJ3Z9xbkH6yx67kMV2`jGG@`a0 z4DsX$As$K?#VxqdPI7cHSjaF}>TAY5TDW0{AHX@B%lGiMOSe(aL%M$k|?`ZYQ`({%qNt`?6P zW9{nPa{yKE9xLUcplY#Z*nd0bs`+QZ6PaC!9 zVhS(6ho3%mUzXxvR-=^-gq1NWlpz;44pZx;2*z(98G%-7%4`Po?XGYKb;>L{SvBOSbm z?USe<&o`a7=6*R8w9l@QSQ?^*^2*AD%Hn`ysAa6zs!?NK&!;w1dD!ul($3@~L(hts zrCARURbav=HebKV#va8&vif2ZB#DmtbgHm0EaotQ5RzEj>|?4o-ArY8zn0@^>8neL z^$UDXd8x zGPWuSpHK$S7W1Fmr7eCfo{@2b8M_A{KZf&^)3=bop+(IzC}0P=0Mhp$;sbYHBC9K= zaGKLAk?$nIM$M7HRWrufSjIViYl9y64&QCuqzL99{fMS_EAO!+g>dVbkt~qcTI#ej z>Bn{Gcq!GkKJS{S&Wb9PpVi|Fwzu;mIL-Ws?I*p<)b;P-bVozg9);(`+LTgnh#gAB zt}xCQ=685W*yR}$WC9%9i8ptR0#FpS$d1dV_)6M@<+W^{eTV#VP3d&0b|*@2=zFUh zPO6DZ5T8kh(?+8o>ELXi7|6a@qcCSdr7nujU@s6%t9=jP!u?=DbB$+th=5~&KM|hR zQ8ig%sAZ5ROywXz`+`_8Tj~7P^hcK{ABYe68X^hFPr+QsCOzR6Y}2C!{i}mQBqAAN zH(qLRY$@K@0X(=^7L$j(B&$FE$;MR0Z5_9C^xk6ajW0uvXo-vj&nzr?H0bX?ItxbY zm{$U^1(raMVK3AL**^*Q^bv2im>L67s(d&282CT~8nj=bI$+Qg!VGDCUJx}$pD~yn zzh}G7B$Qn4NH6rOKbQ+xy~h8kKKkAU@?RvsW@yU5QXz)0_G9IF_-j=2QBEUI7*+3&$(lL zL88(qd2$7RHRPx{MyEBb|8D6C_9TjiVbrV$#yTJFmK6J}cFN{S*+1&2PEX>(RvLgZ zyCT{Lu?7y$6W6x*DkEOwajz+5K>6d%T&o`ugiTO>_!)o&LQ_%?AA&6iEGc8TNjj`d z@P7nNQ-+d_m{&Z7U^CeL>y1l}J;lfH?b@PA1oxFCN1+Qw$a= zO+5fZ{b^+FEDZrmu=1?d(b(tIBmZ&m3_lrg?E#CJwooi-R$O59}2;(-5pQ71F+GCvmgNN}^ zF5XUBwMUaY+BQU|u{Y_w_!ro&Uj>F*_(o8dazi`Jk9^#ZcYTB0Bf~5N{CUNdpn`EQ zcJ#aJ(5kx3EX|pv65_)i9rx|nAA$(ggw>N+ZH!Z92h(cUQQ4D;n{Y0R(YFA{h#(OD z0dVJ{F|yx!FG_yXVfjm;Q_RvB`%#&Y?mQJsOJty-wX)Y^NB7EU(YEM$OedmG6dfXs zW@j_gb3(g?U=n~6czCSSW^q=WNi7mReBCPcT#i)!lv5^L3U=D$zV)z~ViW%--679r zh?PFW;3u-y3S77>5;p3jR|r0Zv$^p`uf2kD-=j5f76ue7U5ELI*jD&4lo21OtFLQV z0PlC|m3_7abhNUDi^TK!S)rLD`-0CkkEZk^ZL`FXUQih%(nZZ2s;D$a@$<+3nlffEIR`^poF2(cL?~!nQ3bc~~sSUJVgFivfVsa>X8qjR71l=|{4* zY_gP%c@O(k1td^!qJ=%>ur2z>PA=J9xTXxL%p563qbn150 zRMM5Nsu_5(%R(h)iSJ4U9r78=t9R)C%srIfPD z`hF1bmc&>YpW(oVPwjoGVsZLa!NCHjR~*B`6i@?h;XyRE^+ElU+Z$Im=6dGa?sP=%Vp#9l hGYf=c;FO*0SW>gfJBFYLWj8P2A%*A000XGzWg~LAmQK<5TT%9V8MI>a3D}% z?LS-qfPjXDfrPr71CYULkZ4e7VAAw;C>h0HGVo#@=#mpl1!_O|r9c3HK!QmE4Ils^ z0HhRvnpI(!;NX>EFSsS{HEwopHemH)c}c)N-*cOEFw*55#Uy<{ecPO<&)6h~PH{`H zbZ7hP^^dhv0q6G)vhQ6Zp#oPFA$dmFR~4#RGxCR4;)IHhOJ`EOrx&^Ok51?4WpRF&PfqjC zZrD8+{#OVPKF@73k{oI?Iju1jQ-2hKB>k5guFn1b-%0R+Z5;h&Luk3-4hYJ=Hv$#^ zT`>dL5*XhGXCQnCYzmScYG3_E;~_wV(C-IxlH(!2MD!zRaBKnZwoYMEQr*Br+_A!e z**8ei>l#%YmlYpuy4kM~p^mD7CMY48;AAU0(o|;(3G#5(*Nlap5u8ymz}w2%)MwI8 zaee^M=EJ1`a(}K!Q1`ZI*k|0FwBze1f^$DYH+(dHOnK)PI|n3Ej#W6)YTEKj0N;lB zXv$_C?4b9YO-9O};wS=1XMR!=3W|pD;JT*w&%fMFdCwfu$r`ePDs~z<8~>VEav~kT zJb#?r;;ETKL;?p0J_!AimaGGfSQdoaoY#k$hur^B-g)ur(NQfJ{j66<0SXOPIx3v? z7PXlfzKVSGUIDiUT!IaMO3=Fx)=Na>7DpIAp50A{w-jL=zn)8i73Ia1%)Rt|6whr6 z(*9IX5y-)!`HXng62-)R?G_#njq{Y<@MX9e&OU@sPaZLW6p9H$&jP_AxNpFBem}dE z3*b58Dz#v>`gFCFOESexRpnu%xxWFmE8jDoM<;cV_&EHJ}01~*(MT6SY zNRq!Ahj*WjKz5E05(xk!%g??6Kue|guy(7CZ+8fs_VK(GzQgX3<*cG7?8T?;hW3vpRk3eHajU zZQ}=^Lj!QM8leHtF94`d{6_$Q9(1^KlIYqwb}?BdFKE4$#X2==Nu$|$-;;hFU~^k( z^cLO%xPQ#q90mk=gU$Upz-Fwf>tA+_x&JmZ2{vQ!A7&20W^{j>**Sg8e;yF*&G2t# z4#3`cJa{7)nzOLBjD4tfEBKcLke_9rk!sHQ>Sf<9uSK8r-YUi)C&0rdO3}~`^xxtD zKnxy~nZV5o1`-nSeqjDJG=oQLI0#56cyvr0Ts#t1b`EYd7A|Zh3@l;rKn)KTf`Ea% z_{8xDO8EcL#R;94k=!XQ&qTAl^#r$zEhPTObqZTj`0}=W2^@wrUyeg2t!#M16^cdq6I^%uu8^= z!goU;LM>iIEnZnI-bgLq@&2s!SF4Rt>(03bpA+D->U!ei^~T3%2%J2*M*RLQLvghJ z&wM^Al%w*VJ{>5Zw!pwxS(DHsk{Zf;OY81P&5S}xdE8yr!IPK_hj9wOW^7C(2QqnE z9PY@?hC)px(!D>(laV5cSwpktW=5nS_xf#9v?C{LB$YX7cXuaPOA@07SW__|k{;^1 zH%Q48NntkD9oWbdodgxJ(DFTM_&W?=E&q<{jPDB0H<7lLB8@j0bY4i;kW25l7qE6e zofaq>`TFThQvC?Xc>cttc%Qj#;ptc0yiy~K%dW2Oo|av{54Nvzmx_N$3mM)#=OryL z?Jg#IZC0PmjwM!7CVuisf`8RzA`&9NiGmTMq~TR;+rQ*b#H&&_3U^#hPoApDx|#`Y zaX1mk2jQm0U{XlZc*^>vIw2_!#Sf)0Fo&0L=Q)o^XPjysa_i_ncOt$lu$I|`Rnyn! z+qRE(ZOA-;fhEuSLcg2$P^5e+=crUVCQdP@XmnKZS_)|$2quYCu z^vuSeC2qT^H#wOcgs=1K+HUns>-oG2Xe;>@4Y8qV$`gu9hC3qbhmdC*%`JB#qGK;hlSPtjvlZCMLysm>wmv?H+|< z!s-n~*AroPRX1!=V}P`a_Gou&euk!Wa|g^PTt7X^UU0^Pac~wAXjm%lgi$6*f}8hg z`1B*9Ey<-eS43a#YS6Q6*+I4R*+|?S;Cq&vpJAZ6k|~bYL>IH)#b$t&6W%tO9HX7` zhQ)3`uCKE>#q}j52Z;VXw=Dse>2XwLQR$a+r%|f(6uS+sdGcPX(T4#V5so znLHHjbsR|P9iH%vpQBF2vs|;@obot15I5yZr@5|La9fMhWpzfYwd-S%N~ubbnyg%p zXk#-WHqw$^XOUKJMo&4z{ATsDrBmI0{}OsiRe|5|7?Bon?>d^I^g~jUD&Y%(X2q}dO6!7QMNYZw~upLXAgDxSA^={e*HHi8R&_}PAfFr)3H_qG8%aWM(Kn4 z-NppbS=1Wp>T0-HZ4Q%Im2s8Fjt)WAI;DS9v%vT%=xy~IN}+gS>%rvjgLY>s3=)~x zs$Lda8}Sw3U%|(46*>-1Ij|>q3|!SdROD!!uh($=z>5i(PwqQnpPdC6^dgTg2OpEg z6{%ywGZe{JN%!Zh52kjzm*q?))Hg{Rn(#b{Qa7vXGvF1{NPB2`C-z@Ga;Y)M6{KY| zR=a8{d%pcOz$~ z+CCSWh5gL=4t4C(b`p007k-TvkB~I;zmR-8D0kN%nysMR&DRU)R$co>+R2>lfvlE0 z07xp8e`sKr%-^$CFupmqtz?TT=P-E61XUEC#r6K5JiDq+U#yzo-T{+$09IQq!H?dy z@0;mqOqPQUo0Updp}9VPT-4V17=h5RlMNFz8rnuxOY#Y#f|QDyohy zG4WNT&Na24S20*c;>MJX{K;~T*hOP=KR(Qv;Mr9O0(7Wb2q`b4OcAzaagt2bwh-Q* zS5));9Jh0M?=fZVP0&$88F^G?8_u^tM=)`zGE%VfLT3uSZ^i)0vyL zbQ0U&h$CdES1==erjgr?@Nn0U5w%dW9_l;`#pKJ=*C)$hK3Gww8wNke|C%uJ1D9~=0d2<0hkbkP2X507- zY?dwkOGLsAEgO9xEoh=fvg`HfK5e@?O{_DnWlpV3teSbrMTB!kC%&BYrEf#MQ}Q{! zFBVmT`TuDBN*tj;{oXnjm8(Qc3$s)XdkN>}>XkT5)?}VR6)HHQ+X#Z2kXL^~BIL`n zf1v+Eq_}?~4Hzkzspo!u2fSljlc3~L${u?3o`%K@y(G~zRz~(lt~0V1Jp{3L&mDf^r`W^_SP3SVlmkw zlyfoL-3r9krft#03_`Q_(?Kf8!?0t{!#LOlCG*2I_HTB`B}; zx!sHssm9&>SE*SBFO4!cDp>vl&+Y-wfPsA>VGC*DZ$PuRwO~N${|R8_J>a7wBqJAa z`|MeNX^y{U`_l3WhBNZd_#oOKY?uMqm3i&Bxdyq4E3Fn4fj3xl2>CmwN5DV9 zK_L8$0qSp?T0d=eh-wY)H&9;s{aq}X;9{{~C1*iPIO)1~=I_ugz@h70NP(a!z5gM; z2yXLR4EBEnbkCEYZ4V6Z5WV-5(&OI&X{I0Q;hzEP9?BY;GW`9Jvq3V+zkB*iF_EpA zaro7L7MtFEvHcD)`oB~hcxC!;AwD!GX>f>OL??rke=pOBpH*0uFl#6KyNTY4s+!x8 zmJfMz5a=&+j98AOL5uACogOwN9A8@o>`UH#bmi85K7Dc=Qut~gb{ z>yd|8sC@L^%E?=-G}7shn;5iQ&njLIQmELxF@TGg%AZAPL#2uN|G1j=BV&kHXJR3? z2D~mw%((=)q;QX*l|l&+U~@H1m)pnfCqxxHv)-Jg&P7WwNngdUZA73uNrW_!i|NMX zGD(f$Qdow?a)=KQko`9@wqh@_W{be{s|0v{g@S}Zhk=2I_&vWuK|+J)R~1Jz3`}fR zHg*mrWmPqoxcHjSq?}GUT_Q%YA6Lo9o&95~#*RcuD4rPyJWR5X_mgZfdU82`XYDEC zIEY@6<-aq@eWjOCHYWa!)k>YFZ6|L(a>Hzck#CNf94EyVHzrK3KCHi|P)B^jc$_i! zgw-l>%mc4>$g(B;xmm5nQS$Gkz62xN9Q%dbAL{7py=+$JzpF)^{juRkwX>1K$TuX)YB{P})Bq{4sSESiYxi;9aDY&}vQp zRc-f=>bJ6rHr01BnRN-@2VD_Vr>fg76>{i3rf`>O!K$n^-}kz0(eY>F_vyA|o;Ibj zU$?GgE{!zYboLOsHmVB_alxurG<(W(*wm#Y%9pmA%4CwU$1~63UYB_Jo>AKKxqct? zB~$xnl)oID}$)ug#Zb zN3VD7n@jK+KLD3uJIRrO%?@9XOED?bD&}#tr)h!W?+_^j~POB|=Qj38?gA2H0u3LVAVevfv(7ucWk15pDNdqutyc+BB!aYhT~ zA{qkrQPBK{}tQ^|kfJ_=IoeNt7Fk@L!08)}y%^T2b?2j|)F<+#%O z1PT%xI(;@&_wkWW4XkDU-_R7@S`GxmNVl$c-f7CjqA_7J<9YUGx&>X9Z=k*l@M_w# zJkv(y-j)~rpcd`#Q0(3{bsz4q{m*d!hD8dJy@xH&v9tDgfc2OY_!q1$oj%LA2>x5X zh2Cno2@!$0(^^l4H)*GZIeK(b!i{PzM!!qS5bQ9+sNh?;znOdBV#M95i$)fCyl`2E zOY*xEVLA)eQMJI)v~!mE86zBS55H@I_=jLjq0l5>3fhYI{}s!{LoCe0)+BfVPn%9( zFbV&zxko_?St|l2%uhuZrmB%K7e_O{$8P-P&$R&?D$h{gdO5h-KME{-KlHd6V_x~= zQy2BxWwb}HzR+Vk6K$Xyte+&@d{m1s!XWf9mV{S8j;+T|?19lAdzO?e2?yJ5O zt~HjC#oW|j*`kFe9HrEj=Ccf@yieYbb-D1-T2RRQ-j`yDFv-hUGqD{tgllOSS~w=& zs>ZUAAp1}oX+!OZWBIoXNUUwNRh0hb+&x?rl?=TXh28%jREg%Uq9nKjiUk_*gA<(H zUUKN+m75io&c9ciu$e0_KZsg9$Yz>!cAO;V@{-a{JLh279l>$6K;o+j%Fyl{DO;Tj7L%20pK}X9u0tm1TPUo!6L!HLct(_A22{cK*IpA=opw-qzPzb ztX!_+*d*jE9GuRvRW%cEY$DZTI27zkMvlUw@sz5@CjMf|Do$=OwRMxf)|6p|AnyQb z|B&@QWr2(g@X}bb(71tw#bSmJewLf-nYqvG^jOMwi+>ay67fL zcKfERC31_W)3`l6eV!3GIc#;~DlbZ@hQZM8{kW2&@VqRqziw@wC4E(#OkTc1%-lB6F!2^F zYMKUTQHu{!{R!Cr%e_e$m-y01rRLD6MbgVtcdY}+m@B(+hwK8m;FHz7qbU!U6vV369 zvz2NpWWTy9`Q~WVs)u}dM~(6DDhqpc;-2>c^q7I^)g8d2fE6bu{dNj5ErusJx+YPF9kzXOs(Wp7-U8{>q&hW6g3=~tC@R<##S zmsuU9TkO>B5qMMMU)g&0Rv`AiO4F~V?{v%pGeeKc?wNEg56r;|ga>BnPv+pgrt2Qt zPbTOmv*wrPz)`rtJ=2>U_e#$blm*{&o1|Yu-O1`(FkJ@Ld|>{inWSG0Hha%3`RULv z%>F+PRsH1dyQsc@r0Y!&QIl>^M)@#I%@RG>t{o z;^ui+CWDd+Kn*8mWeoMYe;;)V8!|X}c09Frp7Tn^x8Lo1JvZ@|@|cJ9&AV58i?1e_ z2NW++md<<8GY+e`*bSif-Y(A0Mhx}nFZezN%75!^PG!4S1+Y@N};1ChWK~D zNqADxP2EwPqX+i-hgOs&m$D$HhMUnBl%0I-m)_UQJI4?*P$pW&b>${hHtfl00K}S@ zsVXEnIy*Zn`tm!XNP2SNu(UwEJoB)KBnrd-)P%fqvyFuLBy*M~)l=-TVzb5eA{px-|94^Oy?g@#->ur%F@y4QPo{q;UAum0ePw1yS!SC=%vhg!4l81M&KXLpHtFs%Apsk-@PAj4xv*HUC5W{QW=OV6P)VCHtgRz zgrO^PEldq#3!pU%X0$VW2S^PSCRQ8sFq%6n~ zA$*0n`Ja4UOndDbv3J3T!?Z*8Muyxjv>O}YO*RnH#v z6J0ljr?~Mk3zF0CsF*x;&Z#DwROf*HmYPFl#to}Uo;tuqqHw6NemIj5^+fy`1Aa86 zt~r+9rp%jRE%@)=3aspykB?aw1?EO<^!gG9wq)x0*|nbH6C$_F)maOuonURvZ|u8x zlz-+MG|@r~F{Hvm?{t1@PsOI@thL9pN7UX{RoMjLc?XKUP58Jk>3GX9xvq9@qEa=j#(wv?uh%GXYsPp7YIG~AM z1uxy3EB9%-z2~|qi=Z4Y@Ih&;3knKa<%)2Qzl?}S7M9J{oXn*Bj4IJ?v9w5h2jGk* zdB~GLf#`42^->PJ4XR=~0lj%p%M9aNJ{)m8lL`w5D06+2~IrAd< z>)Wa21;qx9gN2gw*VQP+TU1VdOQQ+8%zkIH1{7(oOF5CLnTh4jB$6(@=Xbyh;>ZTgI@FVdV@G_D{9cK;ZpKr5A}7XFy3JxJ2`1(G zrY(_2snv5wQ#6t7swKrZK)eJdeyiKJmfDsV?_*EY^Y^t`x3E<0h0NOq)`uPkO^J;T zBaj_VwnS{hyil_iTr7P=3X|Jv7MDJS4}uS2kSdfhep;8d=oXCC9Q9@08ufPQZE+9J z?uKsOgpDRTcfFWr;-jMe#$>Gny{@!+jw&sk_x%eVP+EF?XXRLw^h)^EJ+C)Kp#l*& zdf_|V&|+Lr zg7t+71b_89z=kc7&gj&MmyD7JuR%0U)W^cwYfi4+GGg=f0G5`h*}P2_&zhliB&DX* zN&|tb)Wp3r4hI5Ses4JNnFxR6;|5>bt-bAJ^IBtBe1{614RRDuDd?CbX_*m?tLTkd z$qe&n%cWZBJ%o4*8RS9p1-4G5*dRYGvdK|*P+7KPSI?9ab{>K*2&xoGThOG1wZ^PO;s%PCyP5C~*vsO?)Rt4#S^v!#7?peZ+prg#71cgat_WPN+WaU-e5WCHCt(UOJiGC>b z_|ejQ6)4EBEAuP+jZh7pS?De7=KCyIBHY!gd7ib_wZ||*# zGW+u7SD9s5Wo^=L*YmKLA|*L+2i^gT?XDJ2a_;~WRp7p+B+`tBH1$8=*f}qBsHhZ# z#}Q0bi{aFTOXH_^gmlM)#4O!VM%mk=ij(y~_1=iE5mW1HgX*$`L{xSvd)15z&q8SX z^e`36bwBi#zqX)uE97f22ATKl(Y_0zHhW+GjH%LE7gI|VDbkM0lnmSn!qG<2A4VT= zzlhu!pN=0fIxc#iVrmm~ok3@2@T6v^`bTirG+jROC3~&+I~_z?vS@Omy$0h2=J&%uj1)X6IBD($>UKH<#ms72lC!9Vym)=2gwv@-K za4+^cKfqdCh*iDrb=?yj1V1qv@x2yc>2lof!}E`(U^Ew-6J=+qN;L9!kQB6$P7aj2 z>_J~Hnsh(;P?P)yl_iN@#i<^ZMH<2>8RQHhm6UI8qH#3;y1`(!VKwn7rxvbO&&H)m z;v{8V*prC&kDpp%qLW2eK~cet?0fbfJrzYEtA{Nul$r6D3ksi%S)I2@a&&=&jCiFU z`q>t$_70%ptJk4+I?$D;o7uP3Xl&->OMuh}%58Cwk_wEMCE}7M#3|fvytDtF) zx52t(dYL%*3NYd(w$Hs;FCRa6c3tPKTlj70vtGL3qVoE?l1T?29qsf0 zUcGK2ib6LcYpHN|EdRx_SN17v*+U0J&r>7AsSYX0-+Vw45ewzJ_)@`GG0>xj+oeIG z`Z=f?=)7H?(ZVtzY^cV5sm`EJR_0ngg<@HN1ceWb8V&~*{URu#&Q(Hy>edPn)APN4pkP}lO%O<|-Nv{3Grx390Q=9L-Ps`*NdJp!BLGOmA>>hge=+} zdqR_L$~12rCGRm9c(^h!Heo(1ls7D4^7pHTx?9K~%G8rSO#8_?1S) zcc*Z1+5Hvi+LO&e#Q^UOj7ivEG4cdnr!<;Xj~W7U#)WRrsiK` zyFJg{CKdLnDLmDvnk@&lST6Ujjt4>YYS;=_>2V$Ez0uNPn0@kDzGa42QiLd=kzh?p z?3KYm{S<@Qm$2_U-ta@#@oYY3IJM>pG*t~cDNQCTD<(n5T)o?ATI>=x!9p7|j9QM`drp(1kc^f$ zrtjH;pHWzs&9hS!IG;H4dwCy5_AiAgt_(H)*l9;r@ytN*d!{_nd5MkJ5^r-x%>1fm zzJ^k0spjRZbzBslxTC)~!Rx;8vmIr5&(N=#NK$?HQ_c=XM@bb%^>d_`;Q`oep?UoL zOM;12)|sC+3z1{gWQKCAR#cBo>}(UJ>GPClksdW3j%s9Ju_D+ORl~gLXR6avqEp3_ za}bVtRjDKY7*;D!r#PYBp9bkNjD4zqe$DX_3pJ>uw%I?3Q$vvkF%bHxp3Kw-??u5+ zK~LkdlCt97Q4S3c@-Of>ZaxtVnKEFp$_p8?rK@MV0L6Z$iB#S>Z*dKI-)2Ehf`q1Z zGQhMlZTbBfM5f%=158S7^Le>&oaMxdRF%*&z4Y?QQ%4kwxlr}kF@GnoM^ynERhuJU zh4v#CBWB~@4>t$Z9hnK{m{xauq4&vDQIGgW*E7^-8Z(_4#lWNWF)O9h%S7mU;?M1$ zWF7vc^<~^#RUHd{x>&*+RCET{`nHKaOLLJuxM10L711e`nPntsAr>NJ<)$a@L>vbT z>eJ_;jr#Mv=zY#*lwZ`V#f|xHippI55}-r3K5W<}$VRQvlRHSK`Lg8n*uj*Mq?QDK zbzx&{-YDEd7zYK>Kis_kV-RO12YEnxoE&%CYy#Tw_X*MmR zhcg%{Q~E;){5}HSJmd&T_Btk0@{SV-izvK%H+Ve9!Qkzj%SDS)KL~$ z-^yD8smIw~Z;&z(QE|c~bebK%rVzo5?p-LtF#55IX+*y!atE+nNnTv*qZ3)&a4r*H zDKwx=jtm`W^{$r-89gmv23p2UQJ(CdioK6LNIldvy(~4b76bXZJ@U1*IXvhFZD%{h zB)2q&!kcoa1s8wh{Dd4rne&*VG~URNg&h4Qb3*M<6GKGs^Jd$HRvjEdevs8kNEEB0 z_Yq#y*+o=0)6J)z=hdZ&jyYCc5$+un_bAPVvk>v@s{CWHu92(s`e)9*>=tfvon!${FFUvk@s zrw~#pqf?_TVV63eViaqZ8l>m2s5bu4=WX(AhcIhapnUvwAg@wu_mh|$mg{>sg56V# zO+5BaS0T(7PodObo<3`IxC3B5`7&Fyx>sb=k5!G5Jx-mC*Q^>5Wl1E8?{>^kJYcu^ zjMl4{K<~0{RSN1|PL4tDG%{qJVRFD)SgSktibhQ#X(iR%qFet$a-i32#Q8$g03~mq zf7MFKF*(wN%|DVIeR-MB9Mjo^IApL`+KYB30TV_8E`z#FHht z@p7uH4ykJTmUIV*&<}2(-@!4)D4A!84GnT;1y>2x5g;}B;4{RCmmudoA_-m*HiwLu zo7At|`FE4ao!(C>sdbQIe*`hJnu%i(TEncka z*nrjqCT7=QJ}DB>Z06E5%CvGsSlb|nzoc$_9hM?Y(86-(O;25uBcU^M+{e$Au8bS+ zKD(xVsjQ6%X1HRJJdrjYE*k28pS#j;=8s9v$_ie1z$Bug5@2P$rVF8{;rlo*Mhr-_focg!jcy$m zwrGSqz%Nz2x0)Uqvb0z2HfLQ6T^mdz{`XYc2O_bPo|I0gVz?UaE+=w zT-&zU=9Y$_>kD{kRG=4njcr(YIT;PjZQhZQIP+HrtKMYW9v^Tu6KFXHUo;B77F6Rfn%5gFQ)f37 z`B|k_)$1v~s^{z&V^e+UScBPP_6sC)^>S?SaINdQ{bVPlG}s)JX3A+cSUuj#Coa`Y zhc5^Xi0hh7W3WZSh(^E{HFj+pF##FIW8YPq#m9m3&yqYpcqrjcO>#`X9i|VgV zz|Wu^I|EGx3qxjT!rv*X+$k2AOCduYF6S5_X2Ko3nNP-_Tf0?_mV?h~+EHjg7fHpU z+98C${?f+cyZ$rg;C=#>Me8p0-huKLtrNZ;@ov#UGbM@0P{vO~pLT;R*0svgxUls% z=oqJUk`d|<1WSwu1G9+{?FZ=yDEnVI95I0##Gg5J`2}&fwTqW>R$}`J5p~_^^02J< zW;u98xVPb9q6IG$cX}BJJj}oUCd*vU{_nEaVfV5>e#)vhT=?jJE;iC>Do%PPPTE?B|{Xg5DWS^ zE>+hf2>sF%N!}&I{&Rv?EkXoJ0#pxwX{?05GarSNA?<#rEMneexJ=mK?CpMI3ykG!7Y2+s2T0`{yC2yr zLuhwykaV5&7?iBkkAo~BCnzLVB**ZfEqC#0`oAx`TiK^C^BF@18Y*e%<)t%w6v{WZ z+MUX9tiL2+7R%5;p>N=8_df-{=aVBGJXGnqcfoV0H-F(-U#fZ88mInza=Uc&N21D7 zxut-DW)sfbR)?CPgqVzG+fC!WWl!JNWlxu1EpRl9*tZIO%=iD=ZbzWA&ApVh(&_Mh zx#1DxY1&uCYf8c*2HMh3%FZ4{TWS$$gTXJ*A8DN&LDk`_87fi3Kx5#6>c5B+@S>U7Xv<~w;nIYOF(UTMpw%6diWn>m@@g7iR-izrIu)xc*D)(q|Wo=33cVzDpXIQ z2{Hv&T=dVitI;yI3MvvPjj|IbMQK;$;`u1ugqmXw<)^D)W!qbGGrQ5dX=?G6F%Flc z471+pQyL^r(j9j|XQPQ|keQRgr&9Kw&02|9PEy}UK8A7*h0?vvK|oP(NX#WQLhY{o z;|n%&u99V;5Gt3if3hp4>M%!io~+9b=BLOJXPSbPaz|=0;Ur|lfb#;N9$s^cb%3<9 z1KQ?IFe)1ZXHRwc(Z=B&Q2+7{Fg}QKE4LbW5wQ@R;A$M2`C@)_P4agQq+|tiBn=_; z8fZhu2I=)iGgGDGcZ`QIN(rVS&gv11sH1E2B0gjO4t~U}>elKw<**=O*$qgOisoC5 zvY+7yT25xi-yUYKe{}QA_Sh=3An|CdTvbMTmzbA+<7dKnOpaSc9)(KMIh3DX;=NHA znBkK8SacwG^wEoEIX4w-0cfey`pC1mNmwDUHj)MqX+7B(e$&fO-qyl=; z>p0!8W&)uSQ2;q=$QINsF(1fxs3u5wUtp)yxgD}MvllSQC9hId@d_NuPqbK&4<|k~ zrBKRC($EW-2tjYx>5!h3{VCJY1EF#2mXF_Gy{`#}rREY5`wGvlZa{=v!uC>e_YQF6 z7_-O++`@VSOm$eEKhHpsMS{;g9Nd3I#_=Vv`ib0@wfZN6n9<+u+I?5GgW}0j=rLPx zSdLQ{%Mk%4>JX}UGXMMBoSh2P90IAdZTJhaZ=jb{86T~d z3+w|*UH~gxNM7Jq*2BhDT%VvCBvYuFU+LNJcA@dWu0rhM_%S-Mo0QOWr@ZkChc|1* zD%`a-un$KUDWEm@+F|W~2mGm6OvBF9Fd$9QAJ$ZUDzjs}H@JYRPcK>HeU79n!Ps+( z%sMe~T+$ctl1L4yplx`m)T3?NkEI(0L2eUMrU+zIyEIi9^JZLxOO}J4;Oz|Bm38_G ze2u2@DzFV+wKizo?JdnR-ScH( z9m1&#rbpnd-@fgiMBB(V7B;VK#`FTeJ$H%$&j5#&gReuj&S<)s(+x?9{$-gsq1(2{ zg)idSm!MCK=@{051ag}1I=sEnCG&I{y}r%|CteP(7p-pJw)yIyj)w3KuM7E-hq39a ztqO~pLa6WKcfSzfg!Ua9K^p9z5@rjcsIard48>fTp$uP{alCBhW~(KFDpz(XX(k3e zr&%M_m=eX`4dr}7UK~9stG-o!4cqwqXFL}o&NeE3%TKW}#PzWmw0ozcp>INAs@Z4= zqRE~+L1qf)E_|>ky8Jl(#)XCjgv&F@2X79EtRWLxzgrj zA|AX=HMFG?y5!TQg`He7?77)bNIMp=Ch?$~tZ)mrlT@(sC>Wf~Q2aE)lA~T?rcAfm zh!{J*mx7*#fAY|}ec_qxx3wx|-iFIz68?z)$hhz(Kmk?VO$N4h1rrr{*DClG`~aQW z(eua8*avXc-l926XlJLp>xpp4n4McA_KrAghR~ZvuS?CvMc!!ZsK;rNv)*hGdRtyh zBmC??zGzf#yF;tID!c?2RpikkNp7-nSQsnxZet&*B}!MNFDZ)%`Ce&_pU6Tbef0j| z4ciFuvvhCa(J*y!epcum0FC(eJ(9pf>D&PUFoDyMnGXTDs~BhEULc2d6)f+A*&kKe zl%?8A)>TCk#u?=ct7NUOp{V3i*uXaj|Mc}1iGo#D&hoWPw#egQN4rUd??X?Q8NPbg zw4RLoHz$z_E*ncLj)}o{KM((s06>9vA)&$5&)fbvS5QeQVvA*8Nb9IbQ^aZ_Ck%KO zDw}^UWhj(o4DFod&?c=7W0d%02F#FVKWu#exsM5e&|=8`mAwCwL8AF9hZC^%kdp== zg~5A-Nx*xq0eAodyvZB9g`J0~1A+uY3Y@zCs|FRM5I}?i5}bx70RZd(5P${`_`#F- zfq5{7_lRIcse4H<;yn?dz>vQGl0o7Dq`|@z4_lRS!HNJta?b!CNZD0>6lmag%K!k( zg_r>=f(=3g!hjzDIDz+|-ybB=U>^XYfZlruuw!5@07ug32cSs-(cmmtv7Z4fZv{jgZaJV8tf&2&Z>NbMr!03lQSlwRQ2)L zf0g9ne)oRs7@C+r2CP+gZw^oL1T`i>;F^h2y4(xQw-KU_tnU=4I1&C&upp6$OG$?9Pk#xD!Pa`NMs=SAkdY zMw;Nv{U;lHGqbE|;nL3nnn(o&faoCU$AAj3|A5x=_z+GruxIv`wQ@$3+gH_4D}BR| z40dgU6<1g9kS1o88gA0xIvZ8zi#_~`qzU92DNd)EI@Zz*8t0ojVSf@>`^!4fV8qXO zpFbpAv~gAJ+lb0T(8&fgb&GFsi$QG%kO={XA%@&xx-~qnpF0vn<@j_twE!WxJ3gNi zH0P1fKHUL5BB+s8ZZl{Zp$YR|uL@TJv)a$QAq^#d+$xa>`$=`42|I;2k#JN67+n{_ z-;@ASZk%5RJWt1(bE?s^a{ZTZkgH74UZW#ZKzX2(r>$)an+R+q^%ZI}*aq~@I8@aW zJtXK-Ol)|x=3yc#S0556I9y#p;0%UXw(#o-^ecb>Ad@)qGloyZXQg1-@ruBGx-c{U zTk5Co`3ph@+c!vrMxDJge-ghCg1q5b9wP;dD6fS^^~$ei!UYnoj2TPYhFl}SM!evrhZAlr^g8yy{Ik$O^LFfsk@m}iFoPp z*!JN6u@J)WwX|sC_P2o1E4;JS8J$R94fN`=VN-G;aZU55*pIEw(0nXQ*|p(E$JA$o z$^;Ad`KFKTEdC(R`TCJp?r^)y{Lw*MbQ=5(S~(VC&R|fjrc;)y=I*@vrEbS>C^{#CpcMsc!Zgq|ZqG-y~RP)Wz7JsB5Ter}j&{3Vjc9 zd%}v`m!O{TC$`}5XRKR2*uRRW&nOS9UnTwvfOW>Q8?25a{zskQUR^xqw>mtb&_rth zY4XXtSh`=47^$A$j#k2l_r)6|#e)69B@N=D`vHhTx#PuoLzvKaP>FzlD?^+^08Pkt ze*2K2G`atDvKk|U0+>;+`ls-C;nQ)6cynnHk&=@2rKnDrKI~=g30I{xC=P`}1~GPU zhm;%)zF+unykwiS`7<$NG>wYn0zg--@1Fq={^3t1evXFV0naE0Jm9e^JH}Lf6jAaY zBRT&01OI*-51O0;#HS8O9UNsDT5*~JtlW5&(En*t4BZjhx42Xzrib(qI$SguOSlRuLerx@dFjd?fB9b%go3BDz78} zKjN?*9hO*xCb*Qz#dj@~Cb9&P?;XS1(ifWDY%u>J`zLt;7%_DCFZvggnAeb#82OWS z@FK%UY<9a-_#Z715Lp@Wu#?!SLPH=^u1K*^t)T!*dYQVcADP71MLc57nQc07wO056 zC^W4F;Pu+(qtfHk%T~`5kS`MhN2}Kyh5Wec_E6Gr=`A{W>`tvzG~}U$FxNH(*~}B2 z4mca!opd|v5&c)%5CML>4*G-?Aq?J9tsUOzf{=Nq&K6O}WMbh$C^+)TZE{+~^X8+( z%=)D@q7cx*B$W#I?L+RZJc+apev1>DVT;byXx2A`Cxg94w!_nws(?#A#wicK){=>@adNfeWCYN#wk^K*SofGdBp+7EMtqRCQ*%em=imaczcZ_9x$` zdNGJKIJL;@dhrAiFyW4IK)-ArOm;LgVJS_C5(Sx@`aknPTRV{B0l!$R4(iJ>XT9tw z-+3HZNTjU=qT4d^(7J-eyIxT{C>ef`5xz31+r4C`&h8cB!RFKL3ZuslBKM|%qI7uW z2PR<|rAOT68ucWEcgEPS@2z70un%tq`nDm9kRCuw^xR8bhp%&}sOXS0q4?iH)sUT&+iJt*ggJ^F$M_E;6`!4(zpa|4Up zHotoc$1jS5U9zXs`V&ED9}NxKHkX zxSBZDY&Q@cUsQD~zssdV|dW8c8fWzVA|qdzHhhh5v(TH@>4 z^>FRX%8%%NUy~Awwoa ze~o(R0C>bhBM|{FYgT<6J7RJ4S6V&(^S?SmX!#wk2U6Y1t5KngC*B%g0b^U>$EBUpJIw7IJmC{r=T|(Hkn1R4Y~~kcp&IbJ>~iTeke9p?h5s-!b>&4v=>`EwZE2bnrP}v}g7y(!G~U z0MTe$jAy}R#_1vB=4bTWz<8b}K7JEGKmWGxKgQSa*dQX>+J`e}3l|0cV#pfBgBz5BG^ zd^N-P06wdxxsAKvDOOR>E*sUMh&FeQfjhRG(xA?S= z#l;uaP7xJ!WOWRBO#CseF`0|N3rw}-cDA^t7Q81OrI{nr(2!(zG$N(d@XgyStH?RM z#ma#QwZnUj4NHmFK`D`B`R+%~bmOS_7!qr*1%+;zoNyvMj?VLWJ4g15*6H#LLEhJS zBDL?Y-BzDNv*$^hsmMNq$<`Rl*fQ2($cXTIG>zun(x@Uwso%^CJkdtNU!`GxS+-Drs zAfmfAuuU5AoM>cEI^<}He=Jc~vyKZdT#*M}IJ@*@)uSQYNpG~E#A>%9Q`V~qZ^(3y z$c#NzKe%GeU}+c|)aD=1PZ{~+W3!#_zMid*59B9I(^!l@Q3CYeR1odK@sWNLGvV{<7Qgs35Hp$loR|+X9dW2z%f}wNBNDy-Hr<~ z2a};`?_LtJiCC6&j2hZ&O%)ld!;=XK>+K|GDH-l_TK7K$_> zHrx`sD1BL4N&e8;`dpo9!SNB~Q+HAM^!#6d=ltPCLb2qc>xpa#|Bgb{Rn?s!XQx8t z?19&4p(2iiKZ_aX627-Aa)W$(}liUMaP1=X9Wl2-#?jIriWuk1(FGlvdk%2%t z-0+V~;W=qr@R(L{+vL!m5dgKc%KYj5<>!swfVc%p(4>UQooGwY`+VY5Q) zG=)*OF=J-9=qD^N{d%$lsjZ-VU4Q3a4Xf%HF)ZoN0T~wxOmj9tnf_XU^QO?ls%hSd z*{-*X$88Q?g0v>d6xex8i?do|8&M3W`6?65EEnwg(tNjlrs#CPgBz zeek{Fpv0eNFac}5wi5&;P^g)}^N(*Z4!eDRj&8Me%zgwR2yI7;>Y&^F9Gh5it|e~s zW!z8s+-nmgLsUK$A!%=M2R@fs^&8l2IFRRYi=4H&gT%=qBL?EBLVUT7UA~};Coqo= zY-Zp9iY$OOa@Oohb>L@AS9z7m9#)jiaa2hLlho^lw{HQb$vV>7fy_pFjbL?vS2coo z0%gjSAHcM)K-aqXB{wbXFL5c}3+CZ}G(|m8Mqk>5?rxpJ^;^*_hTNeo!|53iM^H+` zf*&ZV>G7Caq*$*)OFEFCA)f2Irw0_fA%!gLy#N!xYg7FF@av!p9)p+x0tgaxqS*eh!fHVRKS z%)HL3qKGw#A<~Wvx#uo^vG8aWs-xW6s3Iq*boQoiWoIFWO^uP2r1wGi2hJWC_%KcB zV&g)%CSTU0`RGS6M?d_ghU64Kz!Oo-CTz^;l)j;KVT?unR<9DZ%kT{79I`}xQ~gLD zyINU)^)y%*aqfYcY%ynXq^v!$g0;~RGHdT2DK~DZICR6+O266fk;}*{v&!J&W3Js9 z9M0oP{4*4Ne)=ny2X~m;=py&AcV+6Zyl#QtS!usKsWz3fwq0XARB`zr=m2-0+6_>z zh2_&~S_SNfuQ+v$nRt4*Qsp)vjeiN(Uk0fxH@byQsP@@DDxj8=t1G}P>ZG-$6y2&7 zzU39W6Eqxs`QIDtzuY) zJtaHicw%UQI8mS&(t1g}&kdHg&~S^j?KH3uRp+~{@Ax3P8Fijp}zGOplubb81Z@C}&!Q;a;bEf6` p!@#e^@sG20vmMxu$p1;WrX7OJ#d-I%ZuxZP<-t3IoSgvj}RZe{#6i=A0a(Pd~!bzz(9O>h=Yg&03e***+xqF{S7Ez#czsO++yd&)FV-rsT_dC3#y+2e-KE-U={PL|mt(cXf~Tn5PO+;l zg_#yNK8J@1eD7(|P*LQ}H)}Fgjc7P|f$IE)`34afnlbo= zpbGC{T4f4kpFnxFri?#n2eW704IZ@aR!;`yPb!33$yqxHq><6&R_l@})8yvYb+8Bw zYwU*yo9EftJ7!od@SAx&>WH4dq5c;efG?%|s^|?`3IcUKyFETNTKp>>C-(p&>%W1J zPFa?$dZ6{oOwTR}jnvupv|!!6*gpgyeLxojtvtvE*u@Y}uNiI~BL0BITR5!w`HnCy zvb?IvIop0J((d-B@3(n?&xm?cYiQor_^kytA0)GVHx-d8CcKa~Q%H{sAbZX0dYXOi zZ0jCi=)>0AQq}R@q~#;jxqD7s96&l|2{Q>s>;1F;&iBTC{1@UB9cIt(tmmN)=NSUrCrC;B&Xub+Lx zY8$=IDsdjHK5#)au}Sts79BE7BZ|U$hW|t&m!dacec&2d@|;<%Qe>YI!mOR462$oW zt}4c7B>u|d`&s=b0O|0Vq5?Qn)B) z`?{TP`cJ<$2-sCk+RMLjw+VMCu=0dl9G;xik;fuzspI03k!mHjC_aS%QN@O9+YPCIe(0f<6nQ%VA>uXg?v!IgINw--U9ljn*-2B|XQ z)+TRwUp~WcqC*qW9Y#V1$;kUtLZKi9v? zM@IUbY5Y|k{crhStB50%@w$(IIx7gH{;&<_QT%T@kgn|C`(|EAUHk}gE{ayTx!Cg7 z(G`1cXus?I9O5*aLTlP&E@GCc_;~7gD5BOL&JiiWxpphzyZ7^gVZ3gs`FpWz^!G*W zqo3JBGM;`9gI{>b@hPO%KKi5%68 z${hKHskgmfW6g+y1@@W0mlzOOMBBvjnhSaz&CV|^e0@`XF|&Vrdv(Xy$`vL4A6-lf zoi8BauU7+(W=M~TGo2GJVZL5nHtaO-i@%7v1BE@&XxSfasz!_cyd_#sDC)d!lXt%Z zJA4tWwJ^8!D96T{ku}hzQalAuv=ER)5E{_kJOw+V^KCOC7X$5|Gc2NMfV;X zD~QghLZu+y4A#a2*kLTyOmXf1XCD~VL8i2}R{W+Z6I@W4(4A;ep5WfxM9MTTGF;)J z*yviS?BFo(Zw;z8Vq{}8Ogs25thQQpETYv&&4Rc5{()|!;m4PTWYiBU0^o6%>gKl$ zM)4yn)GA&`Mz|c{my!+gYhzux1AkL8@0tfCTR0j~)f1vs9 zcbdP0{hEshG7W!_@p&Kv*+(LMnC^cSJB)<~md}1?St@sNZJX$l^&O?(zaRji&s)BY zSJ@SLUPqU$4xD%3{p}fMF#h0#)Zd{20ICPaffEOS@aQoB1^K~w_}zPWjD-Bqg!lvn z2N$1+m_(3}nwytT0FOY8i=XGgnLv2}LwNjXs#-v%Z;p)4+&(h)|HZ8rPHu^^hIK_o z!%Mf<{yVFW$3COy1rW=ka75u4kW)7a{eRI+T%SM(-J}Xs=hTWQ;P0NCcS4#+8<46) zV$184kqrtD#U!SD3Z&-($r-1_RN>N?tVw+63JCJIR%ej3j@XtcuT#`e8QLw7I+mUS z=@!#YB7=tHtW$DMxL>ZBc1d-IhDH!lk^n{dK%UkO-^OBBJ82b=hD`_&M_I=Wo+{#L zA4#?9QVE}Bp4vg?CWG{&@?8!fbjfBVY^@aXn>%}&oV^;2{vt_3xRY%rQH8t&5Z;va zlCoY3M-3Hi&R4T}ewng|em4o)x#OCTY+ECii58aZ~}% zPY6?T-fv_rX7^+{dNoL0L}CVG);%cSuAjf#P@Kv=t1fzd5AaKequur;mr8D1sI2-T z2-*_f#Byo{e;LU9d|Lms;)%r)=TaH&~Nu9@~>L0j;@ zz;)p+=R-V$FM~-^Um(@E*xJViQ_S|8QjmW%3U=%NkXNWgMZvwY)BlE55Pg{{SNR%7 zkz0c|ggF+B9r?Z?n+zrF1@{+)%B;5K3isVy2_$qIXOkhfot%qcd+?~Mq_+R7=l(9? z?utP%q`pJxaNr-q7F+qjr_&YhLRyOgcg%2CzSb|{{xF!=l1tItr_sJU5Y}#Y?kwQ} zHHNTcVl#b_L+XqfCsN3rit&G@ibTK%0~#GPQ32*xJk2?Z!U*jiQt`wSDW?obELV{KGEhliKv2T8R>LL% z*5`lMEs|xQGxAoIM}uC6fQr3cwF%j$$3QTY^iEWHTNPTy{=DeV(hH`Bpno-tbk%Ydl9L zF*e1kX#*1Ryo$q&RS7$EHg1wnDKh-c79wfotrVx7Nl!(1n%>jDj-Vbk**N&=$|T^u z`%J5HG4xHXn7i7|T?;rPGo@dVle7LbB>){&|HbvosCi4{tt>84)Q$OE>}Z;W^_^VZ zZ>8f=dd8ux){eSu>(eXufLQB3I66iDY*}bXy!XD&DDBP@s!S%^soyoW{~q9bYP}H2 zV){{TJ)az9#D31Ie!9&s7s7$^{EZ(v0Vi3>o45U-SM4_7z;Jx(n(IL&@%M5v!um%s zhX{3*y6U#;9~tT;@?a5IRo;BlNmh9@rv6_GWD>;7A_pklGRyvu7t`&HA&RknA1PWG zOT9_~+t&3AO)JcPLYZ$+Gt}lLgbg2lv(r5=^)u`a*sBp$Ids;VL$kvI1odeenD{lF zf)e7~1cF+|>Z@{QxC*PZam4F0uc|_>YSL4cGv56;qdzolcVXBY{W16Kc>cR_FCi~U zoA`0ufQ;69|2|Sf9MxdP0x-qn(16#9qLo z5RR86!%>~hRJ9hx(jR_85nNM%;}xN8+-*CId;95|@y6%kgv;!fCNh1p=brED0+! znRE|1ZBwN}f=`!#Oex|5qEPukFsFkb^1Lua!%h%x)NaEbepM2)=V62Oxvs0v(A=l| zk2zqxz0`wDuG!#p%*!`sGpZ$vx1BS~h09Co)H`aWea7!7$XgKDOEjNi(=U)N5J%vM z)n^TlkZ&6&j8yOFwex~|r)+cw*UmkfN7$BT)YOhnH0lo;%5@5pjsq}TG_q?saMkP4 z4(=-ofS;sN{?+e`hkpB^cvQ3f$4PO?#w)p;m`O$bth8hY(w~=#k=bb#k5OnL!jNe5h@pU+@VrHtXxe{0`h-gDEQXzH zZstjYckzwx@2}-M<(uPpiH@6(QPPn$L|tr3;)Z2Wk9XWWo24cqx`=jED0;}qMZy*e z)7baCH%=nJi7~2fF9YH6oKU;*r2Xr|1$TD7?BSV;CRkyr7;n}y6W!sb(o^wBKh!Va zzuU5(>6c#_UkqdG+lIVL`GGb`Jz;pO_CcvfA?0O(JA)ny-}^2p)3So-N@^<`P-|SE zKvKi^CiSOkCYuQugRf@0UQxeSGE_)Z)=`TNG%L9dVa(^|;=nl;;2O}vqFnK)ODoQ5 zGOl}~yqW1DIlrArZ1?+0Hqzhja}RK17>Ic9Rm2kf;aYqn`I(g#>Nq)(sM+)HwLX;j z{S#OW?Q7xif!{6p#z@;e0D;&p@i)6N(x2^p_lV;UJwQ4`Sw4f|^gES5tA7&x z9shqb0!DS+k4R#%2_tHCwT+kC@9NgnekO4?!+$t*<^G^I z@~}Hy2$ejQYdf0Rtj7!9i1k@&A9Q5E?x($JHcDDlpo){ebZ=*Gh~T6P{~2@-U~fox z!4Q6Q_)gE6`JE#1()`c5HFeP>!Bet(0MXy>xX$kZ%B0Kz?FWKGVYN#mO@mmB*}G=8 zD4tbi3gQ>+Ug5j{N#u$($Bp-p$*?vAz1#eAu4Ijj!$ zZ$EXnzHhw_@9b>vTFF`y;ac#e+j67b=HY*WeIQWjTWE3}`;La4szFu4TZm}p9`LsH z+|Jr(n|LbaCifojzD+DOa8LFsq4wSCbJTAudPcWLms91rm-$9oW=Fku$d}iaR3%hv zcw;vK-h_;Wt~3|cQ=|AO~4OXcjR@FXA6aeD+$$ZCTRusl7XI%Dih^{ z46KX8q5I<*nAPjWwwS5t5{0`^kRAa@sjHQcM&UcmZGqM{wHuWt^`tsZ;zL)$&zx;XR#wv+9cWV+Wx-avHba%WLG z>4dyi(BD;%9!~OJt|%cRXSo~xgcsfPMAJNTqQtJh6khb?!oI#B$ZCPdU67bi#*o@! zy4i+Q;2SNTgRXJ6Z=j|g8Vj=@s?b_F?ngpe4)dZ+Z7GVqCWZu?bagQ04z8DVXuC?n zwu;3VWM$C4v=tYO?<3qcd~xdty1HAtyf&pOAzu5>slop>H3u5K-Uf6*xdv(#HKuo@ zl?pA#V{yvqC-My3l*f)dEOceX^7dJ7Q6jk25v3~BrGZn(hkI`!`m?$MxEVFa+H|6K z^v?Yp@&feFh!D$qiOGy=#s!XfWt|R%_x{T#z+W zO9(p`HqE|ZjZ(4|I*}=;@Y+FU@XHwWMPCfPN1@fHdNrsD{+p<74}?B#^4*<31bzgx!VP|QpZI`BGs zDOr19~M!b3Ux>-UyEI@V;ZN0IC0>)TSt52o9$4LWa1(u0^n zc+jItvW9pqRzJtB$+S!|n8b8@+ddmxH?Y+Ryc1Yq?tMesCFyKr(~Z|(4N-1|3iut# zN?7Yh8uXHyEvr@GYf|X|ZkVfJDgxu5J-=AaC59$`9#R`Scjd)ukgpkNo=LyH@MxdF zxs_|j$$ln#gg=^%^%#B+SU&dAohSC{I=2+tdCPf}>U%xi=tDT2DNw1bm3YBXaJap@ zU9Yje2f=kJ9i%FpGyXWf_;HNNH&THi0m|{&1eJ*uFnl~s2QmejZnxbjJ%IZb9$Sb#eIrDx zn(C3-ilrYCP}VdUOmX>&*JMQv0@VtUOMeYk{{$Y8Z)I1o(XA9q_zGjH(#Ue8gc!cD zZTWuoqkQ|;=Cq(VRoNBFj>|KV&~$3u=rxnhW7kw_dv-P+KCuH{?RYhR^C@{ydwAeN zLzzM9wbI%b3#2Z+IGS=mhK7B&)B=NseW{u#IJ6Q&&qul4i6&%mt_$fQI~~Tg0CMu$=seBTho{}5*PZ^A@UT{7NjFm-rv9y1KYz+at9 z^Lm!TaSBKuak3Qg5RE@m31hXqacD6P?032fChP4u;U>Ro5K!7pmc&D$d(Pe?j}3$o z6TZggEB+uwa*RV4^+-D@f#)8;y0v|)HH2d|Mr~e`+x<{u0oe4x zyV>Ol?YHlfWF5{LnqYThhdm_(J}a(;FHP>_7;aBoqwJxz=`11B+;K-2)DiIx`i_ zO_j*JO&hdt2cnjSlN8I$$-PwM?K=z{70>XV;UPIIm5@!>2+U8|S)oQ3Bg}6a4 z>GN6%!5+5K(%`Lv7tU{Qxv?k1Pl%!ynR3O*(e3U5t;^T&TCub6y&<2`Zv_?26+Y); z{4PS*Jf%7&Yp_;5P%5+F+2}2bnc`H}>&f%bUE$%TRX~aRwn)O<*juKMR z{OjJB3Obd#LO&^5#drY|dX>j;%}kqi@EOzPV%A%AyQBmWROTt5KNxm=_tb90WOIV! ze9J&s*wt$0%BxiXX5HSYkapCU$+v8DYp2ZGHeAOcT*Iq18rUVNyi>bDoC{VfX$KNN zXNM>^rz|9{x<6j2;G69k`k55#4^rA%SI>%2X;jETXz`niX%fHCurBktKCP8@5-oAO>uzC^&!*PE zlIe)ek_vw6H(|PO+E2?GMyn$F%ms<37?zfnH5v-;f`E2x3#5nj=MoYX0)PdDO*-k< zmyxc@od+s0QzM>zksRjKudAOk0)cgFTg71|$@XdpvVfiWjhbTT47n8^0*6DOt@!bD&eNY%qAtDChz*obDV zRR%lsi}SP1?S;&vgoSv-+vGVzEewM;l^n6(x- zuDsk(t~{2KB9O05bzgvW#3xI((9yh+m@1P-AgfD34oiZZ`6*q6y+**PpC#CW5T&}f z!0QCE6!0+_R2*rdmA6(}RUES^4CyN!3(0yf85V~WV1XVP&#T@+G)f*@_40LEO&4rG zJl;HC)kUMVTp(QhDGbIc*9e4_f~3o1aRr%1CbqvGu5O?C`ku^k?5n?F!z6?G&&CIo zO5sYwDtTzj@m=i38X;A+>pJKx^WdY2l!Cw&$0@2+Ns z>VvBp{87=iiLtaTV=v1w@r zW9blHDb`qIe7Q(9R6gKGS30GX4W>C=ICq})JU)@WQ_MLM<&sI@cNE`>wR<8l^&jK=MEu z{g%Y>gn?T12d6;`R@w#!8;O*IeVVVdLSYpk<-50dLMYS@o5kA|&C>V-@Ti6mIzIup%R`bA{bZ%0z}|FupD7xazgYuyS3<*zq~BL}+%a{REs(%z7!0Z{!eJq$tY))H>o&i)_ntq($!%Tn#II#wU%+>a zuM@Xn@b#**Tv?P<-||aGA(nJUJUZJwOJ|3O6QvA`kYhGkMm9MY&L=v~7bb_`SmdY3 zWRweVURTQniBNCl~TK_$j@Bls_*-Gn$RU}krWHwMy-ws zT21+2JevfEvO4vd)7K7G)^CNvr9!9##Ii7y-WZ(C=8-)}%jx97KZkry^)1ImiJQ9`yOG`+riYz(R zLjn(0{d$C1aX)hCG#nbhLIwM6UXn>LE~<;*;e>{=FwA%xUed>;x-Rop%+W2jqsjL? z$5&nq#^uwGqQTl+4mEwf$l&~$mIoF~%92V-N}5V)Yi_QfPM` zBJ$oYL|zSjY_7swI2uaGtF51EPjZG+^MTPgAW!S%FR4g4|;&^R1n5yewKg7I!^{Ss(=pbTNJ|2WwIPxlN{fYbCq>#zptg`-03C zu88y-jM^d8lh>1=Cf#)b?nJZ zkEMlX(4AL%qR`1xAG^?7=Eln6xkZw?kv>5%qNs{Xr34#-=Yo&Jw?ub69KHhe_3C)z zwiRsPuf|?kyON4tU*za&_!VR%N+W^g*$o99>WsD&f~XxKh2Z7 zHpt$Byr6vx|9K+x-y!# zT696RTzYC?)tMRAh=9K}y6lm?-jPiRG+!YnWisL0t|zcCc;p4ETE` zBJEhyXxmYDA{Vh?f34QMAONEq4k^z?iHpN>E+^z=sA{1s)b{!6!uX_x^Je_*Sm^%f zF=o7)X$%-173-PKDabI>-8oR5Y5i2DPkE_0G43Ja7*l`u)F)%1{+km>S6$P^e{&Ij z^fr5j|6@JK?hM6b|8AuTk2#2jXg=;|60$b21zj`69b0E+E>*%DALB`#&f@|p#D`pi z_M2-l)W#QqB9Hl`mX!PF7%HO+R^PuvQ8;agg-{@iD8609)!kwdeMK&6<7_iL?G51 z*oDPmn*~)SbT;_!v!K~3gywSTTZZ9A}TZ}D2F~hO~k9Mf5;G0Ju^uk*bVj+5Q<}4 z7+i1xw15z;Sb;f}Si}-KCCdfHW2~JL)V=NUSZ0jl)x(|VWUe8q+w=JF`iQQH+vm{NNO>tRcm*3Ll4c7koho(`F64OIQm+MNmkUxT zt3{Gk86PD&b{?_)+mujWYXxFbCi0X=wbBtI)R>z3+vPlVoe$1g z;my0Y;mPk_^*U~F?NqSn8Z7)!b$lMyRJlKz9)GYJh$g+x62djVFp#iPLy@tDcGi8j zASYd*^q?L}UB%3BQ1udrZ1d)7crUt2WCYqnrdB@gx(m_}JI|Mjv?_=EzWepOpsrW7 zHld!tg)OsG5Yj}@TmfbcH4uknQY$0wF^GQ0&}SC}0;8N#*h(Jqom3H9^%xSNEJVY( zY|3j&`t592$I6z*Y$~yz6GI3R-#mn==@`Pc9g1s}R_Jcn^?ijnn<_vSqvYQ+cJkY5bc~~9e zF|#CT2y=0?jxvG!Ei}ux1QPnBZ2iW=NP$JEe9kr0V)E2@s1`sPM}v!=S9=#dmqYq5 zy)ZmyN^$bK*SP|xJ8#LVF-uSe19k=J>XV#_BddX|a)Skd9)L0M{iM!d-Rp@Vy&1wRjRLo9OoPX2)+Nd8 zV{TvCj$aW!UtDk;X<4=j68M71tSah7CM}2}j@Po$eoo#a!Qgc+)aBtQu#cl-sr*w^ zxk7Tdh9o_fgo*UyMj*mm-9)|$@M8Ssk-9ortuTv+YBW$vJcDZkK^QVr!p*RSNvM;8 z6sncMrv`~Fw47kED0lEOZn2xD*Ei$8;^rDWqsJ$H>%{NgB+V$FlTQ4Wdye>`LNHB0 zQt+v@u5;ad@M0X$Roxb@yng`$?oU*jC$-+C{0dIO|1C>H^+xLa;kb(%MVKCn(WOC$ zXT#xTP{n~JL_TW6%ftReil30M&zZr)*%04Q9g4@ym?QN;endK!`*Nj&td1^P#NJ?) z5tJW+WLqBmH02-^4+lZbRj354;Kv8_6D0v2xc}(lI;wNVCowOYCN)&;izR(r!e>7( z8C8;zEj{-^C7oReBW&6CKQ4=B4hISLel3@er;*x_w3T1%@J|O+C+mtY__E&?*?An2~;BQ zSvSqN0LK%+V5oK4v99a0d2f5 zWAY_*Oe0lIbRa!YgxKZ#g}mL@-H>ioeza|Ly6f5P|4MveC#8nGae<*~+(khf% zsp?>L)F&>LNGKPV8dvvKx`f?pSYB}q zOrTn$%W2%Qh-%LtdY!%Wisz=O(w@zuhbbxOJL?z6Vd`F5%y>-nv9G3f7_v$uDQL;0 zo-=j{FPpt4RNRXAtD%}w8$nWrKIcYG$ix_8+PAZwnU`jZ_5%)sL+jN}qRz2i_I|tz z%`WaWT$=*(u>xURdJu6oyoS(=rtBJet}_R|h(_uS=C<|X!fU?C!Ze?tS&m7+QnJm>S0!l2USw%% zPZ(tcROGx?XSu9z3I|D*oM>K?Bc~s(*S4(n1&Z10MDsFS9j83!pieqR87HR&FCg`8 zt*f9-7s{D?C91F5ba7E)70w)?HdJbFtm|`m2JuM6cB81;bW=Jf+9e}RzVfvVgD!l* zOR(Z;8MSm0GITN+XBv33ANG^AwKoJR6-&eLoFeU1SMWk+#&WuT zAikBHyqL(4uhEt;G9G2snc>c&G<-!VOa3&RSCGRimq-*G1-wD%3J%f4)8|I=DNu8 z6k%s)3UW%9HiMzQ*!-^B8V0Ny2E^bAMUK`m-K%Ru;*#ut-)VgCYJcrCQuZ_fUGdbt zcSq|ke!vZ$Q&3sFfa>u~!$G=08>lVZ057ysnbm%J_X zJ>3&Rs;HaD=iM20#`X^4F&7FmbPU#^3hL@?6)Y^cz*xKzTq2m10xVylDHd1(Ynq3z z*h&TAZdhEVlCqt6J(?(-ZR=L6<3zzjkF+FU3ku_x(zUTzZC*^kH%%b+Z0UB>5KZtT zkOAQL2VOO<4F4O0QI8i$MvprFa=YK{!= zZr4aAxO~?Pcu%xIv#qdTPKjCfa^?Trk0nbZ%fl|!!=WAdheJD$5Rs6OG0~A8{s{JP z^829*K*D{DN5hT(Qr-FC*ba||2_ZEvAHRgAi`)Bz+!{KNX>5E|N9XwJH!iifF?v^V z|341wJo^op4MwPW^Ku z=xyG|V=qqUsLtmbtv$~$F5@mq4tSRp7tKD7{AaV$Js|YoApdL{xk`D!`?K~NsrVOb z|4SJFp~bIXjBdbp#q04#!oG+4x9WdDSDwp~rTrMZ2iR{v zK=c2C9+@I{wA<#qEzN#Fn|VO9dI54Ssmy4<`0}n8v@xy$qVkc#sQrR=4>(75G-g4J zXdiehrII*=XNXbYeVm_+)rB9*qK2B0%6|`txfGUc7A}Tm2{eC9ZH#xRY~iFm`C-K+ zBJR&Si4{MLHseMq$UpGIV=p!-b!dR>75Sk@!V?Xw8aaGd2I`aG4jRTZE7XS|d)qeW zNxA%cULK9S9;45-KeUBGkm=sYb%xp|u@A*j{QSW`lTQeb9)`HC`HRJjzgYZDAl|azhv~crU(9Q|Kxw}zwrO3*j#-dXlnl@TG#WJ>1i_$A?pI?ZFj`MaN zm&8&!1Vr|IR6>Y*Ce&}CR#_h3FnUgiUbs1p;0k`#6pOj+(!Ri6X3It%extfk$mq50 z3m9_3HM^Agd4lhA&by24E$wT3yVn2BekA`+!So*qI2i3tcsyimJgdFQcz3DdQc|}w zfBs@F@`1TEyS73fn&Xg)c}o+XeuZhGj|D-W^$5Y|##s=2$+pLPE`AE-x){ z&rS#$;rYuY{agc?uS!D{y~hEsbO_0FVCEW-m*?2|c~V6qBJZbW03UwdG}TG(v=Z(O zJYQ-*CQ|)i>BDrYi7$HA?q?|&Qq^4g&NH;_I`w;S8*Q_OnZ!MS%xjx+T#;x3n#iI& zSw?SY@J1>Eh=r6E0UdlZ`Nq`y3txg_-5M@^1|Bq$wU`4>Smsm$o4MO2LRlz4d4R8O zdKEGS*t6C6kn8OE-fzp;qf-vvEO4SRZa+1^xM}TN$qKerb{!fzlv&AQQBb|(HPtLtfFjG-SiDTK zl2O%blFze8<$zV6->@d(2O%a(+lotj#4;28q`87!N2kBhg5RpO+we32nix`o<(f{W z0I?_Sue;$XgY$A(7UIPgxcMQde>ohJ#xD9I@?}tKh0uZuF*O+Tb{lbu^0V;}$+8j9b?@SKm_fI&F}RA#%fG5JAVMK}IqhS}GJcPHjo0#@TM7`S;yo?RiEd4`iU z)o*vr_$XaJcoKvL?cd&1U&E#A=OET()kjDrk}9hm@^YJa@%3mXQH(LN%oH}ttQF_m ztg5%K`KF$hlmczT&@{FB_Gc*B*y|RLB6B}wcv&laDwIvYWGUXu_svzFFPZAhB8_OD zjn4aMEP#~@9BsLR@VKxoVQQ}s-WITM3!vb zkk6N1%u%8UUXmy*Oh=Z~cDm_~Uzw|v|C!J=B6_?+8o9>ixHpqU>-2}(k7n=eg-g3e z4m6Y?tNrXdve5|1-DMCKl6?t!Vy`oht)H$k&tARQ4Xmy5TzWDilk_kVsF!&XHS0#xGfn zMV$p93@Nu3FC4?tt&W(Fn>I`SKEd3=J2sk#^kG^7;1>(E|^%Mg%p$qfiCM@R9->H;erbtQ;jlTM6T!8+m4l zFDB^Qem=YV6}mlMn#Ok!s~KVsX_WI754*5Owi;;>k4gp$`v`~9j(v zH~%G;w`-L_vAi6qNfgO0O-%951I7wiHWjc4frC&Tc`rsuymE%5mq1Kty=~bS`ZY*5 z*PZW^_9?*Rgr&3Gj31D{&sRXwRV(apW)sx6CaPPkTN+wYG8G~kgA72QnqNx_GSFn< zU}wimz(Cb~jrt5>VFnL9Gpkx(|L})kZ4|c;iO@V>xAsoUW8%{{-D9;Oa!m;?AF>%^ z3NRx1ODiGI6fXRMG>RzYa>azy#Q=dvE=ad(M8rzf73c~NhagoMSyaAWhBI~4h{gV3 z3C;5Q`iYmU>s9Mir5I!Lp4~X*Rq3I%ASBDF0&i{lv@{QImtu-Op*Z4GFW)+7wRm}$ zE11j~RZkJ!=Cg#pG7IQ{E(0jmP_l52mL?M$$q^o@h#DAvs2G|Qekoo8v|v&|p2B@e z5<84qO#1;pOSJ6dD*<02ojc`81Pg+EQ~s&DIX3WuZ-o3}|MLqBuFj)F!KWAgJhwIB z()&F9Qe6jo*}9g4HthcJWzZn-V-5FavhfYhc^?kFe#JF_b#KI=bg8WucXcO?m+hvD=uCY~UeK+DtFve< zwSDBSFh6z4*EyAsiMN&KAm*blm8&Ev<*nh>wH+jJpGdIu3Fo(Cv^8dzSLQ*LNcP-( zDYfF>%`LB?2Fg%d1vJFi`56fz8pu*kuwA))P!`9ky6W0EX!$_%Bl!`IRs#J)cD%rV zpu%k!)K4An7m(hs@cB5E~MV#{D(Ru>U zX{O#T&t9@3RzQJm~rCL<#FZQ*tr8PIub{XU<=Tu!gjzT0?rk8+%m_h2<@5U zaNUV_L+&aY&@Ejb4+GuDD__L8QNdsJ8D2fsR0BW%9=b1PIBFI?gua7@ESU1?u?(LcG^WfD(eRFlBEg$f2NQ3<`5+vTzvnpr=&W(W&pI-{;AS|3b*Znf2^ zl+xLkJA{;F+U}!ruM_@Xj@3P&Tig1r?KR_(JgHTSbgnn|li$YC$V@XTfzhYGy!Wt<3639%PtRg2wpBbjbcMTnKpY@u{ zv*Bjzs@8A%r0i52t2a~K0O$a10>6Deuphy}5?*8Nh_2M?z^{`k&lP4D=JioXYSEZi zo-C0$L`x7Br!TA}ogtYOFc$IagB%N8Y_UZ@QF)_wh5P4f{hMC=xmxQpqWN{p_<`ap z5sE!~i>-(trL4BjA6VZV)HE>=7VZLjyM-d6d}IjYgD5$#xJG z04EFo$SXo7wDtB{>KF+^=guLmoHsp224><4sd^Q}L<5s6{7_H9TnAcpkM^M)E`?9zqv+}HhS4>u<(C-ZWYL2VrY6uva9AjXs6tiEhIRm-@^ z$hO5`QTy5ETC~b2-XgKq;KdEiC@U=8Or?r1)c`R7XyE7^yNH+b@e$~aTLneoa%;ow z$V^!k5y+;pVR2ov1e}2j*$C?VAO}Um{Jxff$)fu- zGFB}Ig`eQlB6IPg^Bv@+u0))F?c>sv_;ueaA?IR%)l;o~Hx?QL=bxQ*+~%GluH(U< z{${e|#kw7KG15pcTC^@KgjYSZjx`i%~4EW&AM! zDm}ShQo?jAIiYcyLz=8>!>Xph7p}m`sirK~;$e~6NitDg#OD#C{l19+H5X8Xb|6aj zre@C%vM0)mA6Fe_!K+%Ruk=`-L7YZWMV?hO;kxfqvYcjXdN75iivZ@|l8EN>YMaO> zmD5TgRRC>@GoOQ*6<>3&m$`t8{40Sy8C?|+T+0qtVn%{P9V!XwWO7bK8pX$iOp8-| zO0?B)E?VBqiDy&m?HLr97qACloC(<{H@#jN}EP z=q`-&yaQ8ARl^KfNX9+^np^;xP?FlR8e3t@Tr~Z0wE*@>v|7A?>HH|$u+SFlC@`Yv zTQefr4@J+Jy;1%Z&FFo-gW9!OK7=!26_Z^*OKX%Fc+On$oM=qsAy5u9w{(K>XlS)a zLTtK87hgg{>$CSPSR(laBGP`G!VrX7=nq)My21qcmBW*(+Ul=sEw0joio~2?GX}I( z_49)=op@TGfMJ+=zjTAJ4TP^?5ugx497G;pqc*@bqw7FIv0iDe`$e}nvDMOg`urOK zrSf(-B&Q*_|AibCQ6WRp_2wLC9&d4KKq%p9^rmmH7@5joBAiUZGDb_MBA`Hdb&f`BBN)O|X0Ol|`*Q zWA5}0P+FL2Qe3RK~u7G6>)+@mvt*`U3Dc#!=> z1u_)<{;$yL?f=8wJ4aX6Esvr*cE|1*9oy>Iwma62&FF1y60ohILZd+bs-kdt3*6PXE4Cy*ANqJcYi9wrE)0BP{GbfPjmg`inxECbBiBrt~?gfW;YA zr6_5Woaq8-w)|$$IK>1~3LLZ;ogBgDdvE6mZAcgxSgIaM%p~o~i}&5B1|uu}BNPvD zEHb*Z0^)~k>L=8`$ZNFk-Y?wQu+D{_BCxEx_|w0&H_SHnqo2g&9*2gXQex;uNtx;| zWsAb%HcW%$CD`|0o{DT%L0DGv>voNcX-miXeq$9j#u;|yI*+hla(eS~Tx`gwnk`bj ztz*h9d2N{=ZCm5xTfB(GY0=`-gNMKn*h-vd)99vzXj{01hn=`UaR9oS0FBOUM$rxP zN+gipW-PE5$WNRmUj8ReZHK0xIF)|R%p@-+pLHfb@do`ryweA;&-H;LUQ$nx|6t;} ze=#xd=W<5X`Cm`@Z#}5~i!1&iBQ0D0C(2kpSC2+ZfIXL0el;0#vaplkK&w=EHdrRz z6$j%=-wR#tfOu8>|Kmx0HP!BiuuNqk+If0?i-`iJV!;ocBdIW|HW!?G;09vp%~>yu zEHTU2eDPe{Yy@`)#s7OJnnU1w(dR<90ulfe^mA<+3=$R+5()z5GddLv1RMex1ptYP zMnsH+&d8Q9_g8ePgHvox-8?BXtAIi+6b6}~{0xJTqP|_?BEOPPe7&%;p|78ROwYo< zQkp=12mJ$p@(EtjCHCOve*O4dNKPVPGhNB#vz)5q@m$F=aec1i8^69a@k-e{9DijV znn-ytjY?xapL(|YuQ%VC|El?~_E|UoQTv|-|E-JvW9@(R{eKe$+|*per@?gdgQ{}i zwcq1)(9iElCtK+H2^I@ZhG(pK3d&~_X>IOaLOAxehy9oe8{&$2ZZ1l#jx~+u9dLT9 zp{yX*ffO>3!j!@C%h+2GJHZqIFR~opiZCOON5-ik@@2K`J?Jv8+`Uf`)YfE968SFq8E9DK1KMn#Uoz)BGp z8rybPxq~VdkH%7yFoAKyOg98)8V-akEqD`NQo1W|z7j7wl(4Qy6W>;)avihcr&Sb` zWOnvp&!XaQbqL(t>WN7no*>csL1r?0kvr-pr_=*4#}bgrVnEtqg>&7nDC2!!{yRhe z?XJ9+e%`X|SQ$;L&Eggf2>lwYPYVS~m4Rg;tI^k`>cSdg;@guca-||gjOVoRBH_7J zB&Y7usAK&OZ!3FkLB*UJh3&ZN=Cgm&;pbk|3P+X6+$Rx7241u$YZ0Wu#f?pADu2`WLA3PtaKH zpOyawn*0SO>@6Dqv-BU}XWPF)xmw0w|18CP*5x(%7ijcK@(=K{?Y~O@dhXv{efIoU z>3@6B-)M+Q@{(~F-RH~g_J)smz7Ni-Gr>2s&tyF}k4s;Np5k5U zX@35j_;lqvnI1&22m-$%L+~{ zGbn6N$h%L7OjZW(4+>pK5;)%+xKQht(1Hl6q-uBK3l>cA2k>|j`TNem=|syF*%C#@ zltugfBp)<6t3K`tC)4PcJc$u zIgu-&uQL8y;gA5^q;4luP7DS3fRp74uQu#lB^#Qrs9567uo-fA@cd_FmfHLlBkN8L zAuq=1@Rb<^ch~HvRqMZu(c&Ltgs_mu#hQbt0AiD{Ve@+fhTM?R`k{wB$4|b>kD5@# zwG<^!P#aiKKpu>->t#gt$I)~+0m={+&^MRVK))>qp!>GycHw!jLLrQUHa>W6fOxy2 z8fY{w*iw{vIhY#NmGjD`XvA~Mx~PLYpqnhx*7Cv+Fpw0HDk{L%Ft*h;H;&N37g$9I zW3!=JcUUr8+q**!idJBKDPnf8MNhLSq-T&vWb{W)j4&Ahw*p~W1E3Za@B9HUIV8PH z3>EkMb(Qtk)!+99sR`b(6^869TDxCU3b#gXzX-TkXt3sb6Vq>wT3VSbHcPLqY!8gh zuQUlnaXEC8ilj-J8??TwT)bPnEXK{H@Lk5SoAJI&WL`p~eBENuvIO}VNmtrxzu}1| zp`xs1;r!N6asRbN+~LO!tvSg zM2k;eF2oo$|C)Nt{=!*wGkG+^hjoGBIDW5xB(%R$;@tAN#4&|_d9!dk68_3fx)LE4 z98OKqMp?mPR%_fEJxmpQS5p(rx>g)>9-uKEf~{Bg)Lt$Ht(Zo_f(?c=f}!q!Jp)3U z%|%j=6~`yr6Sph-n`+aZg*URG;r(YkHXF5aW~RtZjYf*&2}WsTwY7B5$ij$xTU2e! z>gZ!>XI`(7UM_+hMDloek2wiGcF3Jv-=_t#k~8X>;!0QZ4?vRO*Vl#noS(2i;cq*w zn;!U(r?KYMC4Q#FEH_3Jj9g&L*fDyW42Hq4z>x7G-RX>OjFt}3J9FJ%^R%%(R#xQP z0(D~9q-ibOvqWa(MVyC4*R(K@H?$%R+a~JQ#ISS{)X^UJ^E5>;YW<8y$_Lqr@Kn&T zPdAv800N|~cAKXfRKXo8ImDw6Y6uEL{_Xs_*wfx$SnrVFUko@tLX`{(bqCs<=Fu*c z!=OZ)guu(J!&V7QPrqkZ^LwFu zTO(Ijx^dS>zIn>WCaN=8m_>n2042+lg%4PQUZWOWu;dJo(vMj7AQ7MpU4cET&tFw< z0Cp(cSwSb|Rj3Zrj?zD34Y_y%QBe=Lc*@eMf*qgAK`2KXJoG^-pvOvw9`r287S;rg zOm~;qN{PR+aVnvwjdyX!7OmZ4($}MKwP5{bfgtNu4rD5KyZ9|q9%Rw}Fc$}O!5Pfi zecLg`Hh}%oQGgrT9MlZI1^4eD1E5S}SU(<%Tflb)i=9-2!G z0e8Mo)~zC&1|*Bq4^dr^N$iGy)=%IpdCOV0q}mA2Mo_F+;UB6jn*r+G6=)qeV3fgs zFIu`le@U2dZqvmj8GSfeLTjz?gZowNoao8dIveTWLW!G_yDOIY}fB`#!M8%OH!&#Y3oZGdp z+-(daLC)Zf@aO%8Fb*U*{*$}{#lTHeY9^TES!ZO#ToXPhl07GeuZ8RxQYyjH)z@bS zO%X05Z=wE;d_A#w6{{9=(8&_ZzTo?mi>(RERaU_Igmz4mJdyiM&>jkmf*eSwPdTM+ ze?<71Jj2uJEpADNO_wDMr@1nXrnt zC~>vgJ1C!$sxHhGrMa1WpMGo^9L_XcBcQddsAFdQ)-}xo{Rg1paEn@5gyE)SW?mSr zc`J-xu+^mZ)kXIC%Y}QQEq3a#FSKzWds2jj9|tru?8++!v|cEye@@XGsAs3u7`2ds zl-Un;|MkI6XKN=#j+m!mb+~w2Nn8Hk*n}FO49kb>##9}GKdkH6V7vqq8!?N63xs>w z1knhiNoaj=i*${vQALH#yP)lD2{zGK= zoKF7$Aovv2VX!}Q!GcR*$H@8;GnU7oJqim0nK!;tGbfhA9nu*_@iqPc{I7jqlFY9*xXVMSDc|FJ5 z8JQwRbOFu3qxEL5Y z&KdSs|FNRRjXD!*5`T)vp4BmOw9^YbF;kOr%{S`;W-}r^E0eS3PHuU65Wf{(C$yR(q) z)2m#aUlG=^Tl)8MOkUyJwe5*^y@b3W>-qFy=v!lK?Bam;kgwI) z5`-%8p#Vm1m8@Y^jX801#(mS`Iy+XiU^mMwQ%Hv?%o$8j9YKP)}1tYxmhErjmlEft}QhZYc_cdp4qAJU2`H!qJ(QFcgR}< zM=;BcE2wp`6WeW|KPI`}$z{AO|C3MGzBn=Ms~&ejX%`VptyrPOOQB*qReu ziFd&aA{`w9^sgfbUy#YF1PM$qi*!==*Bu(1EHZ{LUs+Ggt$WPzZi(Yl_=nu2ts?I| z1#`7jcz^+?R?W~V^$~jS=dqe;^zseSlPf_bXVCMB?SB$tK2X}5CnXPg?A_d)M4ZZu z&uom@T_mLFfq0U$a;FV-q&wd<*+LNA3m5R`Fc>vbV*~l(RCP3~g12Uz!=X1(^O=Z^ zTm<~j8eQR4G4d62;SF1L)LM-vW;JJx16lS{7*Y8spg*?5UFL{eSK^U2}NFKD^% zc7DuE(Y9wv76c6HQp#yfss&Fw+5G`vR(?!go#6-~?h)rmgzYSp?G%3h`3;xx4`>S7eO83S|Me!q*|($LN|3&Cz=$5$(= z$mtJlCt!hT&IxXwOW3E{+y~SY=WFX#U0`Ad1N~(u_JPd!MoyR~7@W!3MlhDJo5)Oi zDE&4Y!UVfyC2J88BY+)0-~eys0AYS@nIw%C5?nPl-pyE2msBUIJ~YbKgllY}KJ=L) z&mUme9-K?k>8e4Js1+_m{9R&_#jg7sW4r47IMb=o!gaP)DNJ<2=`c0APp{kW6Rt~k zd|xd&)QCiGv?l64ZJPOE*5xxn>&DIW34XFlLp`Zlh~+xYO|?V#LL0K6L5t0Iatp4^8UY#55>^K0s%Ldpa8#p*_fflO+>0CQp63RzoW5I z&^d)Qg}{E$86Ug0IQShbSTqWuf`=!u3v^$kU|H|Rxcuw{pajP}VSM_T5WM7H8-!Mt zHL4k+jgD^yB9rUsP+*3(8N@okI+WZ`7g;1SXfh!uP2FM}*9J{EQ>!@BCXM#gD$0v- zoK~+Qfd+t*6~02i=vWk2u2!2MqVG^5ctXmCspys){s>ggIZK!v4X@C2hd|x2R~>DH z>JQ{qDnGKd2?^Y_Ux9bC;AVH$l98{4>SSXP#cIKhAtWM9!4CNY5b%B3Oa7Ia1aVMx zm-Bc%@m-T(hkARVBU(C$(Scu7iLBX?$_bz3@Z9#4ShF;rZXh#z1Y(U%5nNIodEf0D zXm5U5>*%?zSD4n<*FNjHwWfYAnJK#Q8b#@-f@^9@qP(&DOO4J_9gU#X01)fy+3Fq1 zSVowNRjfHl>2p?Fcc!tiA&yR2@`jf2lypbck`rb1y$U1MHAItvH(S*J3RF7FhO-dp zh*jbfa@4r8a-~sKB=`WUKI#~)eK_}FN&d5T9gS?^!h%p@P`D`825YB~p;f?1!nt~* znh^0emVX};G*&a2I=W87dQfEpU-`UP+!WUR7%nK)@GKtGB+GW*o}j*oL+lWgO7qhq zx~0U^8Xn(*<_9AiFiQag?TZvKIeSGOBt(udnguiJ=eIjKvFq8YsWf}2FH0OSeeyu9 zGCIWYoXaCV0fym7heSLjVg=e5-sYuUomonaMEq&IZh?!|fvfl0NnL z*_J@aNHZ=X_VS-T^CG*8gJ{Q(7P!ST_WbP8QfJkZmkd}T*NAZnOH47Yy11X8nl%`2 z?7bL?LWyXwQ9C)m)pc(-w%f-M^P#hRp2B5D2EZ5D)O}O*U|iB*B5g3bt{Cu!r^D6c z{z+-T%}?Y^RbVCJ8o5L5ND-=B%jSy?#_T{V9>4l^N-$Bk?JOD30}2Kw5nEG@6g)x^ z89@VQdD*c?K%krtAMPwt@55aiGqGznXlsb&-RxIjl}~W)f`kEMg{~TdyX@wK@=It% zTeFL%E&nhonyET13h8>a5;#DQpBP-&fEe6x1{pbDSnMh|_@*fJavIJpw7zsUe~kbu z)lha~O|m-l_S&l!*Y#nAk?mVyNfi}G9GU(SGgLB3i;H>$9{lkGcKrJ(%dyReBDGlw0du8Nt%=?>BRNIjELEhEm&b7{A>{WdCh}cy|UimL) z-mh+DG%e9^Fb%rB?Q9k;#<(E}&jL$wigmf)1`FkWdlQLpnW<Dj*7!h_ zLy03JM+yHLnj84(L{)a1Yu*6K4XL~qH>0DSAO4oMU;%g9C(6!%lS}DW&ZIr)a-Is<>IdnAe)*=OW?7Yw7er%pMv~eT^Iwf9q6+dAxeq7L()*b9YRySuA3%i~3a; z^#Y7WT9f6RNTxiX_{+2=GI0|db6KMCdcZ76-Rgy4Et}-W+PP1FNKc)?I8Ip{Fh>WS zHm*^aVsT_2s8SPGn;H+qJ~J5092R$+#!aK%;$ycwp&|?(*E7*u8t3T{9fKapsG-7X z$|>SXBi=m-K>8xbhHGiXUKG{d|#jXqPm;Vcv{I(;nj56TaBG)9^UE#`;y+kM#7 ziZvSH85eKr;QjpHvZ#0XrT|0v6dNTc2?{}EMTDhIcH9Z+!&FOxd`GEit5qmi2=`+6rKFUOw<^rTen3w{x_0$WzW!4mUho zOd&$jfJBJ`B*OGDCjL_cM@bI;o%wxZP$(JfyRrI*WlgGxuZ~LovDTC+^+DM*obX|U zLG!W+TcM;H_!x~0T7`zxQy=n37W(cR_|oJq z6l2I(%19y>MXZr~kjq*{ON|wcpdCsVWOD+3ST-S0OFowB;|>tBJ{2L^uMxy4YugcE zZepR3n*5}8<^8gk&wKQl&BChoD{yKltxm~l2qfN1e*l5ZAb$X3S69EZj{X2r|2-=c zXfj_d9rW$$mY9&|kx-^yUdi&Mz$7?iu)H&-QJbGEn{t^kGl(%Zu+EV&fZZrZFY$v! zI3nQzAbF9&kdGso$%1r(D?(~i@U%!~jVtF1(>Z`s5mbIs%6N*W)$zS#6l_vFKH-B% zfl_o{339yCXr(T)VuV_xSR$(VS;mUvSZC+Oow+Eo4Z5Ao7(P{s#mC4<+~ZyCi_yWq z9lrJV&9c(Rfg%x7mvGU;n3u`&c>=;!hU!t$)ru4O&FmPyC;H6 zjwI`JX=Q#cLDOY3;I6dZySK?0GBD~hzwhXc3tt~fX9_bF>0acOxx-FA%-IQ{BfydO zN)!h#!9e09)5nD`cxc=Th0`j-v!1L#H+~Gu`kqA1ePK= z|6smi^o)^tQJ>TZTVa6ted`2@qNtJoC{!t_QRu4#9sFA(u?rFAe(=|G$-mXinftu& z2tTz0cpebeZXTR$80YEAe9P(DiQ`T&Us5Y{5=%ld`d!ixmm7L))mmJr)hMNVK?>CU zM|kyVL9!KinUek1;PJ|RQt)=RhKza*%Vxo*GpC``R96VjtTkzW7`-0Z$nUG+qr>(I zL@M{|T|2llHx@Le>9*PILq^hl&`*|>2(uszDG}!2A`oLGqVfAjsZ!l+>t8aJG0Ki; zO!P}A2@z#b%@^s7!~Li^Xc>o@N2Agr${)}ezsL+TFx%rU>cS6vv+5F!!>0C@tuz>< zY;Juj1A*}h1Kx181-a|yNeO90oaqfGpW#RYQBOibqNJte;0F%~P^YD6W=C3$)EvGR zQ!>^cCi=w<(fq163M(*ZAt8^&uQ+w>W#yd~hy-;yKQee%xO1`{I zj>%w+FmN6pjN_By#2r7LE6nvua$Mew6*{ODAhe8uzQ&l&$U{-uKppsLQ!?@Nc*6x0 z{Z21|{%YT5cw|ML)2Grzt-n4ZHT69(;^B zzM6*Hxg>_Q`3YHr1vWEn%`PqEw_Zk4-QbXv$nWLtw`g_RBgU}j%TlU22{mQMBKgXp z48v7k@fcRxW3l&}f@)SL8AhO|#mYF%Jj0?=NR%p0s>;CW78$9M7cP^wzB2U2iqbNR zlm?+iZ2AEIm#yz_^Ud_MHhEwHTR8oro0WGf0vEwn z9@G|J`5?=CO*pDXp3m{PS4@;WWTkx|v|NicYCIW*_kIuWNo;5t_?nOJpVmyu7ZsVLE5llebW)J;#>rAo zdgawFH$k>)E>c}}du?>2=AY zf6RQ(PIvqU0@a>D5f4sJ@stWl248Wcf=Uu~He+neN=3w|=Pf|Di^npA!f7w`-rDQxgGMk~LhAgkZMmuYB zdN{UagD0C~6A0Mk2_>o#@EeHK$P*%RND;O$TuWMZQ=n&`^|oauGTAFvfp<4rigpYK zvx4gx_gV=y;EaF^9=;hfzS+Fc63B#FqR4#m-e416+9J<}5}?;#dQz9G_}tZPLdy4M z@zdJ0_pcv}?^$#~qumpA0I%$2u!e!6s>&8lvs7U+9yyunS4~&e-O^%{O1)awGX)z0 z=Qdmz$i!|NGs$3yEK~?sve)L)4YS4LB_o_EN6qD9ITL(piCW>B)A}<9n=jQ#$($E!rVk zA6TA3dulaZ-NBr)`~E#jHP|byXjRpoxjX~7lHrp4S49I1|Aa52w4z%2&C}I({QHt( zRjU|t;J;bGvm#jJl24Til23%SMR1wTafkXA{1Sqsvx=0aev@LAh;q6hEoG(s zbJj997ite*ubI3~eAORhxmr?ixu@#=>umGaq5uafYWB3;y_Oi9CCdyc14X8 za+iq90)AMtRUb1>zo#?#+=?*-iUJtA^?_bL)?savee=k!M4w5+s$5QNNlSxuVWR*- zrJ+H~os9xp{-nH&CYC~8T=6*{0|yNoCR(Lh)hexV8CNWMWT~-rKQ(@KicX-@g$?OS zh=l^1V<4O#NkUYNexG1rOuh&W=W-=COI8f~T8zb!r3i3F7^PX;_2Q^KtT;l+G@ z=mcqsPwJr}2r48{1Jk*HkeQiF44X!zpKe1tf_=eDHL<(4&_5(Qt-gR)KSfO!aKPP~ z!De-m=#WG(AJ?~EKIdouLTjKpa(-#hK%vpvUXqU_Of14cZil3FWt;W45M1%OG&S_~ zAEM7MsI(jTYR;Jjf1*nyYgWUce4AvJ``KcpUpWJjn0Qny_r836xnR9q77)NUkn2UO|A@J z6_-haY$`3J{}z6z-*>ZNE(1iXdDvt-tn2`)YDq3|W2H6WPt7b2K9ww?kLJY6qDZWY zJ}xcQt!dOYoL>DZ4}C06rCq`l3pkVVu|ZJXW*rYg5dF>of9gZ36H{K>A_>oBXHj=X zL)~$oUpTDRBs^!dSbF$rp=$)rl{U+02FqxU;2A0DwhnxH$xFxg&VwS8F)1N$G4mLa zB$O2nI+eyNoGr^ZEwc58Z!GLem@m9rl+{^mUij{z-G2)<q~IS7GF8m?O=( z_>ya^IuXX@?925+2uOt6;2ytf-;5tJ8q2;;OmEJ}vG06E&$$K=(pRT6%n_~IsBjvF z6*FH|XK9=MQx*F!T$p}i0MN<%CK-oaHCR=vdRC(M{n7d&l3e-)fhzdp5%bdF7m2ZM zryR&|Uz&pIu0(ik3BL%C*x8~+#djmJnH^&KM5A^4yjMZ=g6g4ExkkfOw_=oyF%ekF zL!3BW+rZX2(F$*QT+Psea2sfMY{pJD={e)} zx#vXvjv`%)gJAuF63qcOdV;n%gmSIqk-3h*&nGsOqcZKO=spqxV<%c}XEDSh+)@=W zLqDwZ1S4Krv}9bkFL7S+vFWh_R?Ymx!h*yMq?YX1!eVx)$)bbtL!Q;+!ygK0<7a%* zRy=|1)P_yfSzM{4x<6`g!XW!sTCQ5Yl zIcd9AS@e%2bUskwKwm-TGRSq%#}VzfWg;CwB*alQ7iAYpfFUIM@sH4e{ut>OVvYtZ zwhcDLHMS%mo^_YFtfOwDyXDXbvtW$)1~sm5a4ZA!!F(YxNvDacO-3D<$H#nEE~!| zzo=L0#=K)tRXg7o)HBb=TcuCSOlLgb(_SiHz3EQXb{F25TrNxbW_VoLTG66k(~OSS z-~d}&dS$G?2Xpf140pH+JTWEK=+3hTF`@mh+=8b$Pq_lNLJc>g2tGMO70k1na|TbO z=FuCj1mclzb$D^#9s*n)1>?-c&!wN0*d_;_GI!4HHa$P%UJhOGcr`9?CV=|rcM-5P zgiWoJZ4FXOGDwW&nz2`cj~k=%u|{DAAFN@=^p9*3!cW$}U+blCaeySRPd`kLcx*@N z^bwk!u*DxefgT3444>J>#Yd8g`tt2eCckI6aqWS~9jn@~=oe1}(c1GzSgk3Pe$`T2 zj9%q#ayj7HjRYVB1!^1yPkRP3kQqal>nJU_lW z1ItE>UR9iaCy)^^5=LQQ6iUCM(+^KQd$@}z^#2NuavFa>DW}e%UR43R5Uvi*Y_QvK z>P*%ln`By>bnnjXzQGrh`4M?_da+dW>8abwDxhAabEMK|Qo|}`u3F6?NH3f`UeHT! zORGBtp24pg5?;S~65Mg3jRol687W6tl@A+j0Bmnv+meSHE{r zxGGXZnusEcn1A(|iO@w@vYg)a9>t`?04|jCh_HP3rdjflUcn;t*10iv!yqq4TU&Jv zl?l8)%)MX-i}-RE>V>Qi!~wFvFSEoH|yr zIt|#)4qx-s9{?*I+?dv~xg$Neb7S7o%=TF5wL-_nKyG-hu zoLhlhb}xwBN5rheMjF-ZZ2~ASOwN+WJm-z*pxGY5o6!)x>YcF<`>lCuoX%}c&rGg#rWcLnh8JVi=;k?*Uh%8gabgOw=m2Vs zYc%yHDJzG}sa4!-Pqr$W5}pc~Y~VLK!9z*Cwg#=_&I~zlS1!H9Dz}f=-jif}G4_~- zxK;z|vo&)j)#%uSMgE_a4a%1kBAVN+$Jnb0fX83Cydd6eWkB$;;#JdUt*D{}_Dx_8AmIfduloNgF66 z7~t=1d_e%nL`aN;0t)&uS2;C34Dxn9{4;-V-}{}&$Npya|KhX33Kua#1;*WD2>9}J z8|D0#dGXAD6tD|#y$Vt*W4C}sVZb!Z~Q=t$GDQA>8hLY- z>i{#*byCr7u^DV|vr8-mzoX8()yMS53`P(^1`2AtymWduX|~J9wo!#9+~yf~($i`u zotA|wzF#-^?`Jm?4cS6ljf0%5nXMmRxRabe@GD9eK6FP~D^73Are0|ZIy9Hv)2r-h z8z#JqJ;Q_Jt1{nPtvn_@HnWb;zC8b4D_m+n>Nxp`AVvJUAd?|Ve(s9SK#(m$-Z6gl z{6H~K1ny00O6WL6IG}J?r8?`7vL@j^2PQ;60insi6PuYeS&!51$k{+S=hs-@QI~a^ z?rr8{QE)ap@Jm8YuqcE9bD~{KKrBP@VsN`NBMeeR2xPjsd1oLTyD=pQ+%9D4%y1C( z`@JTN!8CO?=+1sHGLJN+AXk|;VJ(IAs5h-Btv4VMJRVCV?b9Hj$8?JRs9Z*bnA0hW zi%jvV9#4tZZ#{S@f5#LM_N!e*$A;rGVJ3&gvVHDcW?P6LvbTk4ys(4l_19cOG$%h8 zl@@&}75zJ(BoZlz6E3N-Xg{djMxKXJjDfUd4aJXM6tW_T!TNdxD<6k`7!Gi`Xn**(x z%O?-(PhOSqnFg1<2~xU)T`Z{a(<`>_fD$^E7X92+iBuTOXWr#oEeN!hLXJSX08D@_ z>6te;nO?lY{5Bs78sbgQ8BJ(lhK!J*@o(}Kb66WqNIs#*g7@}`s5@(vNf3?Y?s?V` z>JSTUOZBfa>Zq1MIN$$%ktvxqDGEr8xIh_(=NAdmRb*>}bMF=N1~%blsNy4e`?u*; zt=K1MR1HI(M4*CvfECLYpljbS4{bR+@M0)Pg-PSXZLqz9BDJ#sT(wnY*-O{S1a zaIe?5uS$kAOu~MTZq;e%-Qtkal1O)JL8BhRDI1njUR(VG2zLS6Y**cGHOW-DO?k|9 zqa!PfG}Y(cdgZ8JSh}OKrZZ3x`$HBx7ep9vHQGL;noydp9y{UWbLYj?5{h-`sz92U zqJDW}&N)&5lQWa;n;t?{+qSJl)ye(?0E}rnIOHG)rSsE#HCL3_-)Z`InX&de^L+?b z)|k$rP>B@$asQ)Pfx(MNtHn&;3tDb4mn{dlj)boiU@>5v7703xEU7De`KTPJUi@R! zMV0wMDB%J)hnRx~4BMhi1#4sr8JfpX0bxei7#-OZ8Zm~I8y)4BwX}|tH_(MsQ7izF zxV=-1ET|wqY(-S_G z?>7VLbq8QnzhqHO_#b8erpKwamh1^^%Vl6!G!$>%`xJj*p~w(ILdvKk-D+KV5p_fF zIVdVsFF~JMcbG~^w62`{F|WZWu)8N6)3iI$TYh>xoHYFA{O#_lWNP~849E=SSf)1~ zG;<9Hz=M>|K4>d6hFdY{Po_KObd#q42K<^s2KVXi1^`&u`{ZNA8YI>zWt*dSOm`Ac z`-@l?+(tK-M?z-6HJmM%LmA}yfgZ`s3CAkkaBo7$dBgz7ajczl&Ten8*4ZX2jnWU( z@@Q%H`_M!+Xx7B+Gwjo?U^nVMY|Q9bqu{oL)k_p~6jlPJIrUpqRmvGtv|zPDWqDx* z0rJx$K|81fBdR(eR?3$?uG$W57RKdJ;`7{lsJf?r=kZjF?8 z18zsZkDsrgj-)P?Xpu1@AyHScz>pac514bWCn@a&96I#OUwoOXPk@W#u zNnkRyZtjG&(@ccHFIrp=t5JSkv^px4tK$&xz^@O78fFp9Ata2mpFMIX*iGUMwhhrEms#6qwIzZ7ER*$Ou~GBR?BPVEZAfv)qz73m$ZFJ$85a&l%Hhc(A!Uqw@TFZ5#`!4CrfV7CbT%_*-&PEL!Qxn>k;E;iIA`pRNj*M@sIf} z3yaPT%Ti-AB360Z`^0-$nlP?>9)d%9wy6>cV!Iw|8n4}pWDO>Yz2l&lDL&Jdz1FL` zz|gPRNFf(auXtCq9z{kW%WeBTtRDzmNmUa>fn<#Kohx^T+i1DmQ`Jj6D5VK##sQTW zTS+t68SS3|J#>&pFd3xBEvmr)A_yh)| zPW?7haVtVozh9WF*L+%ait1zu0dKmRnCr{H2Y3!^qnBLK7s-E$e@{IQ)ytc>gq@aO z!IUqDY?pVCaq}6Z}Uv9c;@qqGXn-3c%H712Vb1`kme zy7S=Qa$wuKZSwyqsZNDsaxbLFEbNvi-$*!5=&001IyU3GUC5i#aOnB88L4+y`Ih|ImCXtFI@WC*lYN-rTT)+%p6eWmdI8XcPO-mcCD zMg!>*#KtUOzrxYv^4K?%9Sbv36kP#=nq;EHVyqcw3_-15N|@^nFxJ|K=zt$T zHZm4LSYuVE6o+p#@jzs@t@9#(`-foWsh1Ln7RVbxsdTk4cXbhRXX)o$i0)LtmP^Af zhDTxYO2cRFAiTi>GSHo(GAF43kfzuU11FQDDrzJC58?JdwiQ@OnI+7_?5BRsm@=yAlOmj+5Rj&zponig z=lssS&-dIvzCZ5!?q{CN%$~MqX3d(Jl~u-{KGbrXM2(mdHqkU~efhbPR?%$#u3}ry z)2~C}2k39G`3;VL|BWnu!FbqyQ2bs0S<&FVJ7=_% z&s4fUW3zrL&(uJ7ReaEri77G6QL{*2{DG&Mg}n}@u>DUSoCZYA+GtQj_v5=Ileab7n`KP$&Mxl#Rli>?{(o3c%oQ2)dKWetRUR~5w{8hw# zC4l=!^iwS~SNDIanc}WJAY6DmN77O-io!rb7;*8bz76q_i{APgH|BZvEHsq1`BzzQ zIHwRKV!Vj8a(uZc0el5c-*704wI z4S9WCt&#uby1OXMIz^VP#TeLYRlnpC>rjRm&RUn}?x`lS(d>qSXLAsSymT~ZgmItB z2wOTUEV~|_IE9RzQECLZVuX}5sq92NE=QHKTz(kJ5QX7O9EJ#V3%&Qw9&jp}zH)Eu zFq6&Tc5wZ2UUm{!+Fb*|4g0=62!=ojWIV#kS^2%{M)RL?hMiY{l)_B1C%hHj-4l?w z5@xf+F@MHNq#z*E$~NMh_2?i^OxJgBw%ENhde{H9&g3_#S9zcHB$sM9s=e#ZjgKu_ z*WS##cP3O|Q?*OR$_^uxX)@<;C8pDa+sgC`M+EK z6aBkk%l-dqnKJr95id6FN4M&#&12m=oxXW1*uIGDdP`4FlkIrB3>=&+=B@k&?{Do1*MSXpKp~@Gieb1sc?MRMPK+{YAGfEE zh0%?QOpqwl`c4%s4^5Z$I8L!W$Fx@UPsJ`f|H*)_*Nb}1ziP0&xs60WR&Zk>#>q5*5-Td*|P@j3|| zPz@fudVf7vcrz0>U68De>JFW{0RMX-&^tt3g^`+c@3z-s)@na_O}~k^`k*V!{DKKYzk1s zgu%c|P!R@k+lxFnR1v z^q`yAcXjkk{K_{7UoczZAffx@rJxPX_%B1x^A$}w6%HCFq>`Cy>{7o2E?YK&=e$lqdsD|mg>>6HVO3$4{#KKyhK$USBz^}e4Su1Vp#yF z)h7cFqZ{s#BO*ea1T{f?V84iLS`@(@TALynr?4U%8bl9fYyIKwE&u)A`>+088W$Sy z9Fu_Al+pM>1|-E=ubIJ2i52Uq#siCTS8(vpILoTCIGRovG&DGR|TvZ5qv{tf6td1V=TT;$k%AvSaQ=_GUHP?!^h ziWB)LLC6mdu>>)ex6ti@(rbo!41UEV{>O zc_wm6&vX?=4|k=Z&`Lb?n~EFh!@q$sQ%KHqtp}@Tf7y987ta@2wPUqJK!C!;+`Wo_ zXTq9Ip5Wuy9FPC!Yy;J>vJDU8N))^s$&qCqdIzt~?{sA6H4|8;*1(t0&UGPs36sh@ zI$W+GpiJ}2QUMM%*QSR;H5C#Wx_j5&j!Ny-2(?GBn?`gTZTml*B0o`oTTH+dWtE5s zQ83PaR1 z7L3zg-}M6#)OpNB;|SFFA^jIrDhkTfn(kwDL|I-joIm~-#hw-tM)R9E~Y8yg3o z<#grG0K1PLcxmb>wXG5T(D^PY?cS^$7cwpG!6(;N1)K9AcDZAZS3fwCj^wLtF|pZY z#%s935_1$L>CFA_qz!3+RR-V!xcbq@d zeU)sn@Vs!CJks-7eIJ!l^Hu9nO(P5M2%HfeS+5D5?RT!=5V)bpG52eI`c3lm_Rd4Nmb4)kA7|r*_jPI{hCa-UA_8ZMylir4+t}0q{B~9fvl`C`eTrz2p*H3 z9t;)VV2sg?lpN18w{KIg1`6W~hYE`8xjrp1BkKh7adlWtI8U_DeXt>W3@g;5yT`(m zy9^V@9GR=Pl*4a}UW0S*41h(8xK>b8_T!KuQ+3O8^?Vs1$C5gTcTUh)78i-Np&Bdt zq>uUO7<2j08t$f!$s?M)AiA&ZFOo}%+;9?AZ}?LcpjmR5s)Pj|42(zf*hPia6lB~- z3o+Y&Vp>L8W~Ql{SdCtFB_}6|;d|8J`%pV4xQblpiDAklN&CC>coG3Ox(regrl4-L) z81HAiD(fc3WSP3=f(z&86dKfsNUloslQvX*Rrk&K(xZ;*cmKj!kaAe5X2B|(-VcQ( z8^nD6V7vZCw%t~}4As}|C6}U*LYn&s$J#U*gen&kQooqahrwUrw5_sj}7f$-{81t(i@9~F~>CRYShZ(LTrl*!-D?TuX)JIoL)aM!)7 z?Nfrka4TFHt1CCiT4Z`8BI3aDr@bW>}7R=KyBfhn-XW| zhaE#(Qq8n;(#klJNAQUdGDz?+$jc1Lw=4(lMY@ys7$--unBr?v)%wl9TmX}zc>vKGjFS&`r%XWGna4pX8aMkzG zf7?sS>Ja1~l_V0gXhG^T(wdXgXdB!P0?~e}*IIL=ABf17Ji@a%@rAX^#7ia`lt9Z+l3x@)jm?-Kp1Shz zW+$u4tvtQm)8YM~3y%K+AZ-+(Onw$Z;ej>`z$oAfns?JSafpLLaC?=~?8vzcA-o30 zlJvh6D~}&Zt=$`Bgr5MkIAURMuKLy64nz2s>KNJbS~XWAW(GLzW~4YS4Bw~6etk=T zCZL8NR$) zvT9Y{JJzG@FIbd6p&uf$sFOi=m5Ziw^5L}d%~R5fRb0DG&-MMTeHk9E5d8Ch4*sWKVXl1rd>x9KSsF?()$>HWJ&{v!Ts1@VpkI{@nO-K$Th z{>mr%Z$7t9> z8uAqXlcNojEP#LfF(?v2B5Oj0wbNX}Vc@_g+P=P7_5E3pyu4a1y^4PGG6BDVuf*)$Va~mPPXG4iLWmNy zr)AA=D5D%c+7jwqR&Ao2S;+JoD0+HNa%eli>*`lwEh`t5ZPL94djU<5rtX_s;0AD` zU0`+Zs+zCnTc`cRi(EgHPF0}Iuk#v%^QTG1j||V_FSF0P9AaK!!{6(fp;6EzaL`Uu@j|EKC07gmpp5^`?%c$(c zF&UJlr#82e=-BKI28SXo=ZD&>zv_)Yez@(oE`I(S(7?8`wZBNM`?7uZ(xhSkH^9`= z)_4#wm)>=|1Y@O8*n8VKy>U*e1KT0*iMJ;_<39uG#Kdpw9eR@1kes$Y`s#i~C?vJ6 z&}AP)(DrQcna@h~WvgN3WfY#zds9C?taZmrbD^+-_3Pij(c|}@7LDQ$56S|4yyHg6 z_g=)z@3r~1PsyW0Ii}n48m!>}NR9~+BM`-F*KY{sn^ATCY?2S3Qdd_uLg;UnCRH}v zxlh!d$H{I9O&-S2LzUsHWdP=!6;?5#g^mdKdzwUYM@C&U#dU9l_ zN&3jLiXCdpOU>UCOt4POVG^}L<+$f=DJWQo&o(0Bo&o~b&tMvOh7t#h3Awp7*uH2t zN$hs-itxstuFvCBLsi*Y#sklTGbi;T*u=(mg$~ags`UOI6c4fb(3whU&f4IE)tBGG z+MdabOqKT_5`h;qy;cNl5Tan7Cbt&ynASj{L#kp;E=q1YSYn4Q)yrfSWjAJ6yA539f$yCOLP$bMI zm=YCCp5^c3g&~gOQhXb4>jEG+IDH{{U_wKY-xrP1)lhB_Dfc>`pW*8-bchW{71pFu z>7q+M?(knklc!4RY9@jxt+*)1qhi=Z;ZRH+0+5ZYPwaXZc>Gl0h4V|*XOE@M{+VOK z-G{z!M1D0l%#Oaw8M^unjE30Yqw_4C6(8-T;yXgp2NIoFy|}V$g9kHZ^j8e$0lFTH zE`sr4CQ}El!pr5Q*P^kC6#2-=@DPC;bWYhy@-E0`IrAr!@UWb5Ke_uH8W`fuSRj=i zPEE*FTQMGPfL9^|OOLVJBtQQ;h+uWGSV2XKs<2O3@PYZW6+%N+w9(2=JgI6+gfe$q zsNt!}M9l_l))}T06Biz1IN+i20M1s-;lXKv{9E;Lb!7-p2v2tUOq!%kP2DuQm zVs3ax`}Wp%v8K9dCvuEvH};YN%8=0IWQ^BiJYK|9z_a}^$?!LgU1rpH^uohyJbd=q zl&%rJEbvU$D61U(StW?N#Rf$cE1~2AdkqISLg7GX32zx<_UKn|PIoOF+GGs?K-2~~ zXjBL%Nl=EeA&fb9`8387WkY}zwnSW6c8H$ZIiJG7fkKT&)^d~$Gg5`M$Qpn^dZ6Sn z`8wO;6Dm(ru8wIN5w4)&2L8c~#!&@T{f~EBRgG})$wdpZ1QLT8U$YS&cXwyLZJsVa zwOsgFU2wj+CdOT;Ym7aJk`G1}wv$cBtXlg*5|=rl1O=T`fdhEtWLDaJDR>;pXl5V9 zg%6)u(`nh5IpIlmnU)a@%FuBvf8bT~1Q%KcwL{r}$aSCu`E88}xQv4#Ox2c@&y*cy zp&?ci()_i3;LQZHBY7kQrKf@SPN_&Kvi4QZS5Rjq{Y81yjO!t7$NQQTvDvetsP^8B z5YKnEWkwq^uGTc^<15NG08?aI)*?!lnl^I~25H94Te@a=?v6!vO>M{B-mZ`24*ENu z=%Ff|nGicAnku>|+#Lg^cL!CfsL$xE!*RpWeq`p(&9f zGaGpWI3>pPEfYiJ3%FY8VF)gC4MdoN6OBkRXo9lgItZ>Clu=mXix66ZF$*TDRIa15?q&T9N?oc?@}x5PWS`YMKHuXg;$n*%QDm;~7SuNt zITd9cra`AO8RqZ%2X*fY|Ffpyo!f`MfrD){|3FL#ClA{H5LMy+@QqnUFdT*{_aI2- zRqfLpea0x_9H+odr3?~+@)Q{u#pQD`=3?yUsy@TV=&DWi?nFmXFmfytQw#_08L+f$ zPM>JP!?v8nc_r))90z5IqhPpkL$#seQO?3s_W^AR`3B`#X5?MI{se7nYnHIA;!{<9 z1=(gY2$HPMlo(wa`fAJ^EFV?I7M?|en_CD2J8HVia|HV;^Yr1;h7&{z1`|Udj^G}B zE|0+0)0n{ik2n7!==o{C<$XtamkMOLDr~qadjitUkeR>KjC+=5O z<3@IY-Icf8sNZ?F`66n)Q&T@seFESVZVsMueCLK zbGyG!%E8zq3XoLJGE1)cVti{Nv5<)t7;d_Yf!&tDUtrfS2y^V`-#p(cCK#ZNjYKIm zT`#=qfx7pTVeYbW7ZSQ53Cyj=38`sZWdNt0-m{<%g8kbj3?JOuSdE`E<(qs(6^VwC zIaTGDwDC_jSOrXdL<|@1F)#)+)#L-qtFPC#^p%JmXW%?d%D8O30UD2OOR3a{arq-7 zHhK8#&1?M~$X-Wpi1XCSNK}k@OFds>JtO)~#uajiSsA{YztGNx&+%=ev+eQfljg@f zZwfFyfc_U}dP82Cvpnyw8$YB?DE|ZQm84{Fwx6?P>SZztLlmrDfO*dB<_T_V1#kN2 zj7XYNx`8@}ejC}3G$KN^){Ns_5JWQk+w3pz$}7JM44QQm)-(`mjCZzY_K`3qI8g7s zA)NS5*JRggP>g|;I}-aJD^<=>mha;jpRm@{&_r9AuAD)xUK^pRrPoR=O0oo^^)#LX z15K|D!r{ewaCUnv$l=bPRnCoI%2oOv8y3A^c)VnH)Og7*!26ejy^Nkf-QMI4rwXBv!VuLj^wG#Al90+7Ec=WR9{G5K`5*Yw{0)JOUoun;DR%0>1q)c>y zOD>3FIccH-pFlfmOSayEXwK9L;??Dz<7HS`omHAVl|Hob3u+WoJFq6qH7;)8c>TIt zB*~f3is^f5vONx8d$RgcCW_qR}YSv~QFd`v$+j86%q%`AV+g}mjt2-kIt%;zTMX2_<8Vb)*Vp?$oo=JUs z6C31t+|YycqvEr6Ki->ZxG!z^y_P7GUd*aF>otL_U}6j*8x_+dW5Qx?L|Pj3xXOEF zEw^|UL$IM~V8#NV*x4It0aWs*k??ZY!Ip1d##L&6iNw_%zx%nU{^R7BK5eRFKpM<6SAg~E)1`q4;ce$d8uYpWmqH?G-#E5?iFN3j1s4Zi0+URFbOaM^0eln zvH)_5O#`8gS>jp}T+*WGTX;+Y=|@^?vanG?$qO+ie04N2K1FBCWMSlB`8h_Yb_ESG z1gYNx?_nYYHNvU2eqQe(DOe)6u$;_Z7_HkI?1lNQjA<`zT z@MGnemo>;1!-9i^k}9LjYrfA)(;e!f$TEvTFiNx%@1BxpEP%*FguFBAWt%{Yk@B(` z#-&a1}#`em`=Rc_z6xY~gvH9We`_m`wOonGyH~w3W%F6H0UtHWEeW{Vs zeXWcwg#*nNI@M4&s@Ak#L1#6ceQwlYtEiV(+c|RR?B@Qz<~TLTo$y%`JsN*#J7nWV z!&T9Ins^iiX79dz|NP=|>to`;L!Uu5_`!W4HvgGcXy%)-?DV7uc2}{8}0BaNZK41v#_>Q_ky^YwmbD*X6m}Rto6@A?X~*kB;`Q zMBxTe`QKuSJ~(C|UahcrQ&J0n?&deB!QMm}liaW4(sJz_`*Lymp{QQ+*M;x9?$2JR zk(3DHfMdyv1|KI8zr_p_&mw*Pk_sym32wu;-%LO9Vs%{2Kl0Iaa!%lic>mnwiphIk z@5#c`y>E;DA{62tWZrHjZR!%AS&m8tEz|)<8l|%YKZwlRHkNCe(}TxhqBYS3V^xny zuWXr~zNfwU37Mvcx=t-nqfwZ@Db;grFCxS@Px$Ci_p^0?^U^9FpX#!q&4t}tI-?;o z$Fa?4D}t9*yG&*`GOyL>dQII?xb%B>=87AZwv=cMpe0l7Pa{K)0kO@eil1c_>D!x zxF0fh9I%qT*9cKupPv>sm@F+5+DNbz|F@_ zvbjI^{K$(8zvq1EuWX2KV)XvlpeX%F%a4}EU4hsn3cnp*D$piAb?(F808OwQgBl~Q{y`7aQOe`<-K3u$ zJmar@6}F+1ld-BJ!U33f`AVFM8l_&(FE4)sLdOg;XXiOKWVa#;Ne`KTR{@pD=caC= zqZ)K2Vg_I1scwTXPsH8)tQF0rZNzC(e8#u4XnSq71m6qEZynvM(!0D@M4EiR{IKDMBVs;W`A!u_USJ||eh0rSifc;nre~XA7cv4b z7ChQ$_AEG*7o%t)V0Q4JwtlD{*c|6^+ngFjlq( zUAiaANQ)ctI*WaYH3pB5Ih~Cs3C8N1JMdA~q(ncP&o>kvUOVhSd zg-z4K>r|R|z#aIOrw{2j-%j3(n14;?N9hT&{p9k9pQKLsL~ZxJV4=>iZi`TGKE1gd zj+7%RpE8s%W8l>n7p|b@b}DX{yPS83mP(ZCtxD?(*Cv+NpWET+wj-ofUpEF%N422s zaO)Cu$iW1$jY{kvT%_Y~Zc&sGI8t-Xwi?7*;1(k8$u5H*r*WliZM1oH1`52B4_*F- zH9hn9_g3Bg#r~2%w9Cwt;6{C$^u)gN8FyVCR&e5}Kpy}FnlOazZQNon0*+F9zI&w8 zGz(nH2ZOXZ!iu#5#O=o3Ka-OD*40EX|1Cbh((|aZPf2_vYg9*Go>1PNQghy|>b<}9 z^>oLf=hJ|nC9NCllQ+q9dM83IIU-d&k3&>BU&Z!tV@+A@FMwTJwkhHZ6Iy0#yCEuH z>Ts$&G^5MJBi!EiD&_x(F-z!fUx;`?TJ;-v*82w}@3T!w7EWfqR?DG~%*jxkV!>@Y zppv-T{_7YWBlJP5$#qUx5qFCop^j%ZPep*TAVGQ2bg|=*HMRG%ZN|bwNHKBQYp5uE%&1rml)5Sh%N&k}t_wwrJ?}k`=I4c5j7A>1VTpK1z>6r%k zZ?2g0!~M-jBta)%pRlD)J?MQLQy+-YKJUW&@qIA@tEpQQTuGeI+v_0z`mxuM^&HyR z&_plzdcu~!$ErkY_O!Tm8|XigyvJ2(DTk&NYi(27eAZ(rm!;wus#Sa>^-^bwaT{4( zYWgVxWMw?Je*>=o6cy|tPkOr0_Dq4Bd!>eC@*d~ttJFsQ_sMYDV&S%dk8*lK7==_^ zi`yX+cX-q_wFN>WfD4?Y0~6`E_5jZtv*wLKI({6$z4SYvgj$I)qm@y6N(C5!&=ptO zWkx8C$iTg9mK)!dwBaugm_NtM4_@pFhJs!SB~AvB!F0-G`VR4D#=cXUf!Q{3C>vFxt5b={1J_qbJWNuCVYJkw)-6`B2< z3rssmq@e$_$1^17gs``3q6UjK{NfIA+o)n}+lcirt`Y9;H|rmJ@VgkH-sgo|ri^gq zNu$idplnLfj7TEo^?Hs3>D_kid*`DzWwaf#T#4kT%I=;2pswTo)118!I{=aPe-|QJCpI;;zq?V z=wYY#q-L2ny!2f{%Vh%#!puJ!8x*sts4GMK@uu@0ZxQU+8@6RsggVo9jE(Or;OW^% zRj;rqUJhwL5Q|Z`!KbO4ELWxR!#@YV^|9MLYzE5ZCB7QK1%|~o@}ayh-)cSAC=<|( zK7|2B+@zZK*1JDaNjH$6z`R{k{W=vptO4hMk&{!W0IR6*&%>YYps4*$h1a%cCmb#G4}a z=Bsg;b~#}`DY=-YFA^Ioe*@fpyA#Wsq2*o@)y)@+c;KURlCR3X%>L*ZQ$zTIy$KDy`t~;x3cBrb*nWGenr+ca`OR(juXWe} zV{a6=QH14Mbj{ESvwWDuZufcKUt!o`{8&Yty67aui*#f#Xp7X$9G4t;--rCTuwGJ7~WF5M@!RD^j?j%MP(bS+bLFgDfcLo2#pV| zQd9D-zi{v0+Ook#Ei()Xd^*44F@i_xznUcN=w8=sBc;XluxF!djr@Qw#mK}DzEp0R z@t3}Hi=xEV!VB^<-eV50WAJ{e5$y_oE+c?~p-`GBLV87P=CSoO1cXwEj(Zl>q3V|G zH&Nx2xt~sqwJ{DCyGM?R0a}aqO@5HD^^wI=1XACxv}P&XDrwmaTIq+%JtKX%Ds#09 zv!ZtWkvtYb@b!d#-}sjz7IJeS=y%r!q*={JUVF{;I3#m*ReYzXOVF~@nJMA?UPtJ zJK5#YF9Z0_Z`lUDecv^|uq#@|(_2#uX9;(2`z1k8ltyfj))?IuX5`A3Roh>p* zk2($tj|oCVVS$JRbH~Ra9!!B0F%C&3TNT|AIJ~-VbX;hAD!ei&Ze*jZ$wCJnqJlB{ zPiiTNSI+Q*c!7b6uDGLI>`qwNopptJA<|BLaF0II9^AIhYjwib5j&q+SWWfh$T>r&`D#DBWppKQ(}9JMe4DMO;( z8;;%&x1BS&WhsRAB*I4%N8)G{YBCAa6Q_FW-OMMjs%VY5xVWe6Kxe_y867mUypS&d z;pP+1a!wj*;P@7k7UTPN@Tu)>odM#=y+EDU`ITt0n`3-g+7gkxT3i)Pt$~K7>m@#< zM1LJ|tc!z|PoBQGg2+Spxwg!mk{zEMeF*K0KxxkA>DGLEMzX~%vHVCU$Y}XdVisJC z+{kotfQ85EA&xmqFnMJiFP6YV{3~VV`j=GrwM6LNb@W^ycEmoJV&^o6mpe%M6ZM}J z2E|^dil|IxFDPb-kC8sxCnfW^dk3dHv?b$3ubt_b=qLpYQcAMG%-a zaoh-82u#6pNvYJ320mqU5U|t6dkth(?rE>wwYykK?DO~yAa&p@=p-go)c@o?zj2IZ zIHgP>%#Z0K8lHf>zM#}@P`li1H=7M$WMNkR7}fD}^WJQC&hpNB%0xGNPS8WqtLILa m_uhZ;ZT|SxdE|fPwYMBrKmL)S*iM}9pp(&V!jFXC%l`)n{fZ_4 literal 0 HcmV?d00001 diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/source.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/source.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..e78681b10b1a6063bda803ff1f60033dfcaaa55f GIT binary patch literal 46641 zcmdSAWmsHG(+y<9mfh4%g4DJkW!3GaO0|eLL?iL^f zCwH>#-S7V1?>pzY=bj&Tv7R-ntE;QKYE@TNS2sVu{#*t;0?8}M15i*<056e$fS+>! znO~*a3&MNfrxx z_wwio-J-#Vn%kDW$5s$f{^JdHmn4Y07k1!s;BVJ5@>gaxC^x^-AFoe->T9_3Fv7NH zNBiz}Yo+#Yqo@0}IH)#QEFL(r$Y z9;8+^$-Qa!zOWBKPvQ1J2Vev6;w5)j(Py!djbv8pQu8ah@V@-3Q{ubZJ!GfL-Kh%^ zX-hEubPjtzTYD8vaVBXh1&v8EvkW#l&7}-K?~`oWVeHW8OV1$zq-LSg;Mz}$do_4~ z1QA`D(S9{LLJF1VM~=AJ7;TYjoPIU9rn4LPB{_sQO)CiNAZ`#5oZr&Jl`k> zUD7BGWRP>%&B^h+^P-V!@z_h8s&_eX4x zCR_PWJZ_E4xdHcl`D?V!wpM0XX6NwLM1BmuSRikE*?J?W@Tks=^Tq}3ok|9vK9@tB z0`iQita%hZ%5&a+6O+4@SvbtyCU~7Dx+I*LBJ<@bSLKyw>&gAB@! zeJBJHT1a_qRpt<3{K15pg8qHvT+^*6O_ubmrP0CK3jH4|q$D|&wnb@;^sLF>{-U%{ZN$l2M} ziiWdNpW{L%v8>2+i%s&QueWq_msU}wZH_|FVqUl;#egK6w9oFNNK#axM-f^ zGyqT@1`B#;wi>cAQ5gKDs(TbU)6M78!6VE0(hHx(%)sq0&NkG1Z1u@+edd;6U&t*f zT)lQDKe04iO;w+u%0`})yYA1N*PN>4w0=ql0O-DT3~NqT2K}j_;%08FXYa0&8me#n zCi!8^_vS6Tx5_7Ty*_8aBUQa`in6NxM137gr`wvoVx&`(F74yh{T3U>Kejpvov&T? z^e%O)1CYFPA$i~bJMTEnOCN7M0`fW(9xA?#ax2$eJkWW)&OmH!$XVXh?Jw@n`|V@v zj@nTlI~9eo^jztx-~Ej7I-?J6992Vl6yBw-2TrAAuXJMp&JTl0Yztb`#i;%gK{iqZ zTSyVqefZME3sA>5R(U*NebV-DkLL4ci(1+?rDDeQG3?hAe8W(y>C2su^ATrc1kHj@sQPPjU(8on5a^`g=e7E z;;??$R`^P7Ynd(-QT4UuwvLV97ujkg+0K6>d-Ue+cotQw-YK)pudIzaf9{>mcU-Awq!hO-{&R|gRHrPs9H!0VWGk&O0Kf4=(Zsq1&NIWZ=x2`&sVxd`!sZo zr=I}AAsur9n#Olj(NX19L~B{>lTjmVKki?5nxn{sb@-NcpEnyc`FVBXpE@L-XDM)> zy>Sk?_z6(#6FW2bVi)pUo^{iN_Xre$`C74SPYcN= zrtj*rd`tuz{P?lbFS0a{-xU4L{1N4(#O=YJZo63FfLCSEnc?l+Wzfb!sdA4w_t&nc zUl10%8dzAeg(s&Ky(nE0Br_$c=im0-ezO;sNPFRrv=?_2v^ZKSEuXb4F!@kWMd>YT zI)W-EO0Qi;F+bKl0%BSoK0yWnI&O7(MkZclsBjM%LZD!vjuzRH2j~26J-+jp zT0QBO`{CU2Fyx*DCGCaN`<4GMUzhJ%R#J$V13IbCD9yO?=ktvNr_m$ii)6)^6MG(V zL_Cg)pf(`lAw%8-Jfx3!Nbz`xk@o{0LYyaLRhFGx6w3^WyHAaN98SLPy~KVF3?d5) zCJRU2A!HGuWD#Lxk>SW2N#iMk-OK*v$##1PzK9q7s`&elC;PK>A^x2F2vS76Y80Z#MRS*RD2ke)YlaKC1s}#Pj8Rm}AVHzH zgDA-Ah*uj23Q7Fb`XgRuJjmv-YHDzomoolhANpK_=g9+~3DMoH>7Rg4N6u4Xmzd(a zRPL^OKLN|04SQ5K@2Su5ni|i30zhBj!8oQ_G!qv0Bt6SD+iB=c?o*^Q3GX_3s*3YS(u8wEfi62xA$0eBM)jl@R+NVUr7T%s)5D;r&vb-O>;-)_% z=0h^71f$_qY0#ZqTr2;hi|Ogwii&Pt(#|NV{IyzU9HBddooD*T@5zX2$?TV}wK4G! z(`@DM$=buBNXdP*r>Lt8EHp!`V%m3+qH6wKRCaq!zCIn9!3%;(!b6LTs{^w*v71we zA4x^~EZ46*QA)s0m|R@?b0ovlZeq`3czM}P zA5Yu!AVLRmC_d@TonW^kZ8JPte|%o#J{ePCEy|i}6N=Dz9Ln;zWWwR3K8%@Tx}Q9f!fjyl z@biJ1l5W&mHr!q(b0F=)h0tMv0~7c>u;qnoevdmtQZbgpGklQDE5RB1j~{>By8iKz zB^vWB^Cfipb871QztCD=@)OP3Sy={X^n6QR)Wi$z+jE1`SMX<7vx$yvEwcrx`54=1 z$w;+CBn3J6l~5xI`TRv_d%FHIJ}X)AU01 z;4bC;M{4)KMe>>%>gTO>0z z9;&ch-KqLo+uH^3P`fP;$R&l!N&|~bWp2m-OnM;FIYwI&W?kdq^Uy9!>yi%t zIX2Vo5#$={_sLyyYbR63t2N{{N2k`>xyawtpTZ>&U3 zgd>Nm*U1#RL3g6Ep;8+8IJNo<*jxh7=wT)z$`bx@=Gv9C%M*9ZK;HG;3aJuAFHP*3 zu8-r)A7(&zPhVS3;6oL${k>cewaEL1@AdMZA`O^QQX0HZ49qDjT$7i3>FxKc-lkKb zg*6*_H>`rhYis>Lal~rjtmb^KKi=#Zj};U=o?gwnmVfK6ngrRxFXPneSP08Vi$Qku z_4i5Sr@JQ+l67lj_Yi_#r9#PLlY8_eP~c6>lb$(>z%NNuIUHWnBp>0kRVdH8?1J#x zUQ6y0voog5!x&xcUr*6`k48>-t>8>-{{)PtNlNqi=D4`fSCA-=|0N$2?W^IO>66yr zQ|(e=C9SFF^5te<8IVS;WTymYTjME%#Ou{8=Nd-u}s`CZ!u**uiX`9MVj zk-`WpNzl5YVDPu%9pr~2{q4h5mfTv6)Sv8*cz4BXsHJ|48l7dw?e~@dl93$u7HN&t z>WWEJDKoFkY&UzPkt+D6!RMznX8voK*Aq$t&JA@GalZN_O1_*rUfinuV_e_Q5w{7yPsT}!nmeq& zKD(dh-m_)Ty4#4~D<|%%kd=l%0s9#@?@@6S9wb>UJ{cISq~HTEFbU`gc^(nbOKH5qBBABw2Wx6MyTw7P7zAA3*3^!z5HoV~NvkL1 ze3LaDm5I-7mwb2pTl|7df;>aHd;SFULJ15dz&!B;_xuDzW21cEnZ7hYJia-Cdq3ZB z_-c09A>IaSeigHI@5i0aLGZJ5hPhw$cTO>m|Djqao%ughzl_4{e_<+{wzH37gnf0ski63S@{j6=QPmA zpUtC_W(fb_rhM+tYW$m~|DAzHZQZ+?z&hur9=D!ub}R{dA+Sv+dp1D;G-u;p7Hr!! zYZ9s6^#)8Knh%>Bzhs1gUIjf%@+If8%*4~~+gzB;>5Gqt(<}7^VVhNL*-JMDVX~($mPQ=!IJ$DZ${e?a zega5Vc^+O_cWTAV3k|^wySGK%O5SH?%SM2X73WG-BmA((J+EVK3+6%8StUJ_16d7@ zt;hBLcK4iyudF9F{6`#9TMd8k&<8vuiYl70H&`!av;DGov{70T`gF5{^0`3NE#^O6=!wdd}=Ka!E(UylFD+D`y(L!`v(IRES=xLhwlFb9H9G1)oqTeEa4+qa6{;e%^8^(&Wlx90-&e5p4xfm??;4j ztFYcnh#Ck6J|_relj_df*|rfZu2U_6XxgS}%@$1ltME;C<@sqvC&<6vz1~-%WGZu( zrbr+0Pu$X9&CV}ZNsCI4P)D!Zw`uUu?0y2;7q9rNjj>24ldl-Gsl@Z|Jc94&A3TbqnUF=p33458dZ9EX**%Cufs|Wa_KKKEnAE zGAs(xd*g{(ii>d+acDUhH-U;dIL1aBv_An4&-0`Fhu*eb>lILWaW#w)tui2}-4&J8 zE%F~h8!l;@rSwNq_m=LfquPpmM+4VRgcqjfNlVS+)&g|B(4;ggg%L3N+VOZ*$x2~? zd2f-hI*!B{NaC!2;KL}a+P72e5vl9K*2agciU%gFEw4+Qwu69uPr(5x;l4~9_Mcgs zIf{txRk(wvJ4yKna5!zMEkA=6Aa-w9BKhbH<71#;SVlDrT8@Cu=t$$TnbS7ZA_BBl zd$A#OApPO+0BCAyw&T;OekpzJ+z~HGY6zj~nG!jRYt-rl!ocEbM=NuRM26R8048EO z`_=|{vG@F|@lU{0nKQ!k2!$i8Y+qsA%AWu%pY6e%*Obq7%=Q!JBSd@+!#X}VSSQKE z9ph(SpB_#0(YYLM5z-YL{zxvqWDFrN2C{&lnfjCybJa%EUuSn3oyt^5U(;J0B8a*0 zfnXSLyW$x&@7zn#faEY-VRHua4qH~~Gt{*2J7l&5T9TqvgwlS9&RFIzox($!1QpiD z+3f-Q+ouuq=J_obuX0dWm3-E5-pzxEz&h&je8qK7j$vO{hPs6K)MxyJSA^z6OpLG| zm&{7S=#|5AN!=@0APww+`4jycT1*cQXp?izf8B4#rjx~N6mK(T^<40E`m(NP8Nwg@Weeh4`nC+Z&5iVccB|J>aA@pTB&NPMb&^UWKp zGhp^7;G5^i$XU|2U1ydOed5!)LO1%&ZF6AJCY#Y=hi-)`_=}6^+rRJnEKymTF=iYeMLuzUaDsf2xCA{b68IibBtxYe52 zq0zNh73R|!TP1gH`dkKChI6mjAU3fuMUZld$OM$W3I1aDxu?&~@%@SaY7C#DNBpv# zJ^{}x-kGi_C5Io`kuCB%%!Sy(lswp2y*~41DzmYO$v<{=9j2=%3EqR^U3kbuy?sjlxwP|o7fAgHepu%21riQ5iZ=Qd3 z@`B>*3gYrLqeZ5mKVXR5KkYw{Hew#d}%so29CW6)wX<>0^zY*G$M6l z{n_9Ibr;IO9m7u#X8PS&2aVi{a`wa*j!wI{G;|j+{);2y`sK)0NSZ1d&lgq*cAM5X zEmlNEx?T)IFNN9y;l4<^18GkYAW8P1&DQ9G&zXH!Er ziANIjEzr{6_VbT2__0~DMBfyU$L9xA>Bw<3QKr1b71nNGut^dUXT_|o8|K^z-t-yT zmD>)&u*3eK>yRn+E9B7nxF!4}D;Hx}3)N;SUdOfn=J&2Obwqm8M*We~4d z1e<+KGIXX89JJ|wnqDkF4AoL=+M9+l{{-MSU!TQ8JO_-HL>V_@zeI#Fg(*77DEYyP zLkM=&PubR+#=@-;lYRP1SbnhRqn;|rRwQR#jmvOLdZO0k>=NlKgk@0v*Od<-5CL&A z0c*XamzKU%b-MmkA(j5TGYCA9TS5$!Wq|?pCJj3URBg4q4k!proDRvpT8gip>I!Ty zrNpl?w>ba6fxZ(lOf5EH)s+0E{HmwVrds0ZU8euHy)mV!$@&-k3>rIoyNJn&59=Na z?g(mqN_S|}vV|Ih5Ktuuoqey5wZh2eW52lhbV1f5tn=v~q-{grT9r&*lf*1Ol)P}F zFuB7!A5D|~CY)LJd4@TV!cb^_K+MOlvbNs^(G`lL|X@4GX zVl$?t7y(vhW-)j+X?CIlu)%3=BG3|-ud|sb$=3d8(XVqmS-we2qEf(i78B4ZetJl@nVu?a12*4mp z%>$ARxVpJPiEWTluK6C*p=5Mx4ZM2FDfpASn+=^7zjYT~mD470V0T*u)*0rk?Jdrf06B~5oipnWb^X^iaM&V_nBrc79L_&+RM z61W>W@>-H83UtTx7%Xcg1K8WDSrFt^z~kKoM9(swn>g*MeMtN=DBMpa>0G_0##lzI&cSv&W1|os zA32*!AeQ=lXG-W+&(<%o-Pw7|JZ7Z91eeFH`#|k!ipsDpgGK_?bmNv9uA*j4OJzbd zueODq{+L$e8iCPt#-#7iTi>RnO5y0_lc}(%A+^!gBQ6x25zt2=~YAenGQ6bpfV%*5tq8dI@h?8UDuMFSDVP(od-5&0QZ=qybvSa1UDGo`JW9M~A(X4bBvT|XV5|qonN1O>r)D0P zQ~yj?$Ve#FRco)qE#IwT_w-8GfB1C9MhL!B<#53ZXO*=puAhCMoHeNzyKCeQo%Zn> z<3-6u(FWQB104r!yeV>5!_E6-9`?H7%kp>LW7Y5)9E1>|;S%F#oKr}az?8+wE-J;S zRXiUjklEQP-v`C7IU;gHz^lY^g4s41H~@IY)MGY$F6`DQP3)r)cC}SX8;-0+;c($A zw!0KqAoto09BY22phP*vabIHa3*Itxau4-X`_w~7LRo!cHNBZqGg^MN+BI6ELY3q_a`3M^blcIMxosu(le|D*n?#5ZjI<}Db zhQ!{R+F6hY&I48G5U=e;a93%oa+A4aba_tZ z>I^qx$Bzt#vZ)2nh}X(XX1T8t#(LybBM@TNk~r3y7D}f_L{yS5{*5RH_yVV#AF;d3 zzen^Ptx=&!Yic2~nbOE8)^rtf%OtFkdBL9dhO$GnEBnM42cnVT9Ae~Tj=^Rz0uPE- zswwJSi@Jya3u(4$P=xU>G$g1c*po$Z4Pg7eNoj!R7fzC0#NUQzIF^i2xM);yJPeOH<C-PA!KvBoW0` zSaA2M$tEa!Xtp>7?L=&}y3TZWDNRnBI&BO^He*$Y-_?2-cFX z)t3Xx84!7B=sfxK)eK9-xjXodX7!%X7>#DNWdV^~yZmT%@0VJclyLPXCL2|}YcY>S zyoKCbeHguHfjI7}E3n5%@|DyK%h4DkQ21OmqeM4% z#a`%3?E1+quVyMv<4Q$$rO^I&s6;fOdty2k9N){(c~|ptGU7|5+r?_jz4wg#81g(Z zFg!KO8Q-he)5+~re>aeUYPLV z%b_p4kF-kOk4xgg3f5Fnau%`yd*y7C3lYCB?wR~n|B;t@^Lt+A{6TGDhDGqUq1Oce zS61wj`4`giX4w-*qA0~h%#)b9V~^uejUaT8hCm?bISc%e(2PUmFkMh(IL8JD?&hU{AkKTTxD+Ke8PE(Y z3o1G>msf<+fZOrjBdy_uflAAGHA8_ii!;Wi*2!1AdSiXmw2fVnc67bhDLm7!3$~iF zC8NBIR2J+&GY$rWg3!8pZ~ml5ls4z{_6J5sy(M-k+1C8zc+2Oe)4yUuRsVm)gficR zKmHaI>eWFx*i^AW?~1(o71i&SDNHFf{8x*WQv4TP97zoz9NZ!4r5dRqaO}JX0l^n+ zpfoEvgzV&9OaIhD6t_G{*EA>h$119;w6c}2lG34j<#MKmDRGvM9V(PV#3|xGfs$=7 zKx9hl5oD=`YeA~?#DPEJTr#gxA?q~O-}sT&U#F30BCdqC4@$=<$-}C zia*9+^;<3R?)mPbr+B4QQ^n|y6dAI|mNrj2QPE4s>jXG zl4LhEMrkv&rOfAVbPlru9iKA1mj8^`@1Q-ysTI`Khl}_L(0{kCFgB+Qv4(u6fCe>{ zEeG(PypQ4tAsJ5la850OWvFR<(1iv*X`ITDMYV4rqWs)OoHFu|TTl7{iCp(aZ$x zyYD2Nh&|V*__Y*9b$@2u2%}=INBgsPdrJ;c0b~YP9Cu2Ghu%tMqmMJ^8V?WJDAHzY z%0=|t#yX~Z`nI<}e2qSDJ`}5BuW(;K)p(VHmn(|JZjQ&%K@hnG%DDNIdf&J; z;+XP*^DG)Jz2A34p75fsCRq?Rw+3)^cD|pW%dLFjs;o9QW9lu}ckw-H^CJ9BijT&g z4wu=D)H0Ex6)FK4Iz-Dr&8MmG2ea0WlxQ(xQc;nW*h0P!gijMJ6q*x{@O?SWVN8Xo zf#W{R#O&FR53pSA-&?Rq11)eI7lp38_KL4|eRAy9wq6?-w6)y-N7D0x(&YD~=SxEk zk5_i1m!v%u&CVP%yrOHWAp(h!Q7KBj4Frin&bqNpy1Y6egc^NxtfcUU8b_eZiqXLW z4_vrfZmJdEMuE-y=Jr3TpEw&5mpG82Y?W!_+tb}U= zMeBf@G9i{5E^J`THY1T*^fR~n9{gri`w-0HpP;Ljs(EHhQ0H3$%xsP=$@LP(2sE>s8pUSCfhUJ8xRt%DY}Y?QVL zY`B0i9;${dZx=M*605Re&*L>mRWuJlhF`b0I-IL`O`8`m$-YJ}3dw6q(&twP^6Sfo zV=z09b^HW)JMKhWoqu#xIA+fmq#YonCa4jnBrtN8;eN1R{h0#;p&pb0lcfJ#0-b98 zVWBOXkXlE6QbZ)cn4t<^rMQAD4i0nvNaNj$Ts5*iN}!iHR)?cPAI(*W#7VJ&!LCf? z6DTtFjb24;EgR!S8&9AM3-?WE#5&FdZvVNyX{W#XwZ2(5c;NB0=W!4J&g(ZDaiZ*) zhF&{p4E7oEt{lOMD{6~IB1+-C&XdeARXHP@?Ze^}+{6qB2r|K%qv(ttl%oU&XM?k) za1;d=v2Yc02C@n4`gF80{88i7)YYHvTn)0j52nGAH(dqK8rRt0Yibrx8<*;OWm89C zO&WEM`3c%m*3Tx;Kh@)`!e%inOta`n?$duqVEp1z@D!*7;uFOQBSJSA1BFraBhnhf zdtlUDLQYEjl>*T}0g&LmSN3{eSGPXZPmcS%k~0x5(O-B&IoDm(AMj;^LU>Lnevgj< z(cucTk@stYmgi(E5?3_gc{4kGcgM3+SI5D5=cTn_H{BkX5IjWJ9L*VfVp_c7`CfL4! zScvIpc6!4bz6EgOTEH`+5a_veLz zF`Bt2C~kG(N`wsAaxuP*s5rz-#XZur96cLw5=!e3jOV z!)9?_jY_!wCop;E@kn71e@PDo+k@R>CrQ7QQ)Ng#f_=QUwGbeJl#GC(&#N$w$_7l-Zz$dI-hxfNPakJOeZ~RUT|P~e z->@1!?`*|8ueVrNG*PaQ0b)`}pRHTA)GF$bHtUP+u&DbGpJ?n{`>{`5&t@V=lA{_P zL=-12kC?~hueh=?dHx*!ZQ%=aqBr`JZ%-?g=!vXjf^)<%X6712o+46 z9LpCA%~~EJRJr7@oHT`K{tsb1_OI<#e&jYujcy1JxdG2@Y~3+3KMgingwV85zx{^X zj=!Ng$`cG(j`P!tsd;{a^51$s#$TRq{3byE;&PF+xbnYj<-Yl|FItk{vT}BRZPOgq zZ9V9PSTCjUi|bf;?%~oUW?3V?_6GrDcf;iX=%pG^YmOf0he=iogbsY6zReQbY0l{~ z8#Cv!bCIhwoWo5|=B*xGdiaeBt_g>HsW}X*C+m_h%5qa}7@|Abzs`mH|E+Nk-DxnE z36W}AgoFybFMUNbOiCJHE%;UbWjSzqaHmG8KfRS`1&o5|d%B94VdzHmQF9;^k0I3x z5s>(&2KI5QbLKH5bBwS5uT{bwi0(9zM`V4AAP=6he zS%MSH6wz6kq*_DfZwz{NnLG-+u*1JRAE5;vbNj+b76Y|5o^)N&H)b|8>Xznb=Q2 zIP>J4Q&<|%9#t#V-&)@x6D2u<0_%~Dp^NbTjo?-82;xZ!!jf^~bjLm{Bz{$A165C( zh%*o8N#2nzC8pEB&3JiEJO3r9dp(0Kw^)Qy)&KFDt1%e4f9bj#EhMzTb*+7?(X)Az z{}W)_Rw6vV1Z_LeJyW1eJxRJP&C+sitOWYcRw7Xakf>G?MSpAj3!?ic1c4+c@F$Mu zuMUuq_agkyrhS(y&(bGgZ24sY;!c*9T$+(>ZJGZ?2h7$Z=1Gv{ad+wE*7-+#ZV@Z5WwNMM_BY#0 zne;XcQ7RBZF7grGCN?A9-F{#SW9nMz2@$?x6MdWqkSL<+w}%uFCD9~NPnGHgj(^`? zxEy_)A{-1vV!^?jp>X`S3nU8fExikXx9p7fQnoT~(0E$pViqV)yHq_VhT+dnwXXx# zJs(#;r4ZO*UN3m!wdD^OaKtw|m%lkCG&&5M8+f&(TKb0$)B_5LoJO$?nhV~lc_&17wVJwWcNjz!ZwYogK>Gl|ZSkIZk@Yb9- zpE6F<%kb;QOI*|s9aS1Q>T;-|81i9M{yD@9qOXPkCjoPct&>-i#GBzn3C5m+?_cdj zjC!+?W+x~boYFf=G8*z;@m2^LLO2%DQl*5dgwYhOKiU;S5R<9WRCwBAs=QGfz&j># zIx0KSx4rJ&@)UxR{xT=OXjCLWJamb=aHpRCPb_=t{Lb>t`A5@cuvx@_^JhyV@9#zS zm==eH@dqPZx_YfJI}(|xuC9rz1Cp^|0}a`1TG}EEeE$ck$I${Y(|aDyf_of8N`mmG zd(n;W1K4Dvm4p{MwRvdL`6Enz0%8Zsb>%V(II~lYKCPLoRx}+>-X9}b7zx~w(XwYb zk%6vLx4y6-LG!4#$lTB5#11Knw@V!Y-cj~Te!{_BIwdKu0h(iKee)XIAj|`C7f-*o zP<^rL6if~Ik>=IPG)+LPpy6!VFj9DCjFP-W(gnZSF9;v@4{v?Fq(hqczRHSd{KB<& zg+Qf^i@lRi6Vvt;Zb0EZ)I1$poEsJWDJu)}NsdCpl1dDwsi_>zQLq&Di-LT#Ndkg& zkoE(cB%VA;MT6vWBP(Oe_fLf(m{aR*^sZs(!n0Gyqw`I;)*m)dzJSLog>5uhJ(c{5 z!?YOzYvvQB2JiCdfs#0JAJWZm`We!~OtFzTdVNO7aH)g{wtmjXI7i)&M6d^X2{=Kg>L2yrGaT&yybYJpqG zb9(e&`<1FTVLY6rV?#|J@5h9S*hd}@-#Deae%5(5`%L;7!v$f|8kP42giX+RB2lYm z<;7|c;{tFl#t2=px8CYYJ?mJyzB*7>E5FoveQO>jK zT9{~<0WUgc6><+4)yYX&^q|7+5lp6j$+q#iX^D*9|z1}6Lc z$WL)TPo%JbFF9MK1|EoOFbj8Gqgop2&Vc8owZ}$_vg0NcJ8Oj%^oYp&CMl{GuBIRF zUr#%CYCD>h>RcG@wM1$gB5(`eR5W<;-N)ACB6HU=79uF^e34s<3u>C5t6@v?cvb(1 zfN372KC4~srs|CRuaR)?zNM?l*xit{G5$3AE&(WwBVRPJx5UDQ`In708A2vsZ}}$+vmV&ORG|Ukg(c4$oA-F>(qvXtjf*bW+GR~_8k1(*dFOwouF6!}TSd!gU2L~2=Gtt2F!HQR= zPT3?KiMcj!o8n6tA=yL|vPuo(V}qg{AU$U~)%O|WNhWLmGfV!N=YuA;AHg7ZmPekl z-SML}G&L3(&d{f@2C9s?+``#+1mR=Vn?Z8kGUwqn;s!P)RCVx~ z+Q&6otC92FAWe`uI`N_yfg~-J<$YCPmuPJiy))=A;wNBosIVf|mD%Cxv-4THf|0_+ zfM+K-T4e&c%vSmKKV(jo8)oz$rCa9{muxrj(Fv3k$4fmSsO7ll%!!e2gQG-dr1N6k zteQ97kHi7?)S}CzQDbmJL!bhW0=}?i)B_E(N#HJqS7^9&7+r_zSNfGWM1!Ta7VCQMsScBqgPw@Zr*p7I6S|S4<)qnSh{25n;%+@MN>jHWgC0 zBbeK3BoglWO|3FLzdh(VCgF0o8xE_uO1bk{67c_El*O{Xon;^sne$)7!I<$toG{}JHe&N z4zE0^U#QB+W~x;hxdfKvuob*yJ?e?G)~iM&(CgbUK``=7lYxw^$7n_C-Z}SVtRb}# zQM}NcT6YHa4?V;deHCLQISO|mgQg7Wx+k|TEm+JD;>ibswdH5U+V?%Kt%XM(aaU?| z@C$}BZb8=un?;wsz9?C?mD2Ao~o*iMo7`)|FZhmR4)p)g<8uchXMgIs!rg{fh zzPPCEPe5q(L0H(Fw2SJF_J>3iO*JASrgb#W_3Qhj0%Nzfj&$*Tv>^-kf`K=9G^p=%UNoa{IiF*F3ZE>dSH(G zBsyd1iRsB~Wpy9}P({9)ivvqsXtUc!h8eH7Fgjkiv^@G=PeCE2k1Ns`^nP<&g2%+t z|C=&=yiWb448BiDKE!I}Nun@A0N2HX3Po}$c3<>6UiL3GvoG6#UNmV{=*|llrd1*` zlh(kM%Ea5>`+%S!r3xC4kf)}fyRq>uGgf`3( z)+xyxGP1=dXctlyS7-&IpXkaaaR%g@=|?*a)0dRuw&u38s4J%mthnoqPk1p;J#d!e zlD4Ea2)7wz1!;Q}1ZZeo=cdnBMY^wDJUp01sjUCaQ)8)lD_-LB4U1i1n!S!7ZK~Vq zTJjt87e7k=NQfa!KK@8yt&QWdt1P`X!J^SJb_5(g?MaYNWvjs?dn9ZdVLX}%O{v*{ zZ@8`69M!qf)IeCR*_QJ#ZG7DcQZW_`s~6UuZ8Pa!27dq4d2%3lcJTSS+o_|4W{J)%B?iK7-KTrCPDes+(K$${q?hs2jeCx^8`!!e**+2oUAVj@gv?0uAvQ$n4^+2q>w z%uEXwOBYj(o(ZDaRA3WNC5zv_y9|qgeGX@fi5nfA1HPx?2NC6h(w>6UQZ)+gV~!{5 z9r>@ftr8QZxnf(DE3aOKbpmCuUi=9N{t`6VYW5P&*Fw zhnLEj7cF@Y5cBcaV~EinTn2GnrBCCun5gdu;X)j&UxKDKMv`Ff(g9j$>1^rX zlpFUSpBb8dE$Pk%{>=tB4&D4qS_y=RvY^a@QT_{vS`ia{;Mj5g#&~kfsy0smIGZTJ ziv_5{Z- zD@S}S2)3he>8L8379DHqoim=03Y(z-n~o=XrA%$W(+-R6p-sMflX89<~r!_9Sv+NaTExP&|3U&JE))#>H zB%D7TlASqCY?PXtWoZ-Mh~+{L)c(U<@u_*^xk+#J(Ndi5D7oV6TIRE3WRgL#Im)TR zBnN8oX}yMvL4HqFIAZ#+97X!f&Idn3&h!v~1Hb@028ae5vPn}m`(Yb(YAq@Xsg2Or zF%)S|cAPv?Z>5)>13MHmGcmsk9xEWju3AriRX3E$9Ts@(Zm7x%FHFx(|5Wh=7{#N} zgcG{SxF10O{x6FTdh^#JvHNiFaw$11^D-Ifqj*ei_2sbk|I?iRoe#T@mVU^vwsIcp zcPrYaL_;e({PgIz1dH+hQpROy%27%Ta;d|1j`Az;e=X`brqKI0|53m7#`KBLrrG6@ z@BhQyTL8tme0_t11`X~KWN_DDAvg@~Fc1jtZb3o<6WkeKkOUYU1_|!&ngkdmxCaRY z2|+^0PVRm0{lDLSRr~E$?QU)DQ%_CX(><-HPoM5Szo9=OkZIH>zB2~re@&U6FRU|| zpbw5ro=Hq49U`~hdT~7~`Y}`s5(A>;YI_g4Swemt{QuG5b=Q*U*(7tRh?M5w;|HfH zBdg_II}g0=B36gH%&rs!7B&{vzt=KJ$(XUpS%ipLg+=A{{Gqcbv9p+hzO}b+ct%lOMxulWBa^uMwoOtWQ$+{_zv zyk9N$cnAlb-|I{-RDnf}j1150zt5^Srs~b+mWVwQgq;B)M!I(4jQRbjxI6o#eu?$7|tSej@tN0^@A1 zth%G<*uxxk84tAYee7WVJ(Qzhq$%5T`g>k9hvvrhzTt8kt!BZ+%0-mk0)-*%42T8-koDEkWH~>Y)C)X zMxjXhde8dsCYMJcSz%Ag+o2%Z-}U=R?=)1Ly-ddbi?<$9d;Qv9M|$XCt4&!+u`{aV``=8QCnoKvM3X8hsi7G5Xh!O1X!v0eiLH|`3yZL`r75KlZ z>VIF8yFKURdC?dGmqdJP?Q2?bTD^O6^$Jcu#4wZi^e^&l1N+tLbIk+GN@>^)SSEku zfVmC|1DR$SV-HwJUVEmG+eKDHGwxn8C%aTK!)T>kK(7$-_!E=OR>>^1z?aQMPhL2|wCpzCG4(~Y_o)2zUUcd8y zO_o&j^Mh3KH&nno@0EOs?KWB8KasPxTkjq2yjLDcqW|Uu($bKQZdZ@V5CVRsBEp9G(S{45eT>pVjd^1qWZL)kK6K{?;KC*--GupBG5!9vVd*Wvy`^#+^vJ_h_ z=yJ)^L+n57SwM!YrWtK<*$z$#!xG^o9Sr!aSXAyfpsv`FSB639uYY8H8&4KazMo~d z(c9De4Op_y^gdJ>7X)jPL`dymuF=}tmYavS(!QkH?w2sx5SIQ8AdXMAi+n{0^V|~= zxRrJDLmc9qhEIKZIV|O$@yc1?bi1UD>8e3T=3q~Yr|h(E+TtvEPuPW36-^oaHx z_xzqSV2*v;g2LFM^asfAYiWbqRvZk1u-2lCQ!mmLGu#I?5jNpi^R5ogR$?K6n~<*x zED#MLJVa4HXahh^WteI<`0Rl`5wY?Kj>NH#7S*A;dU71s3#R8e%*l31^0(dxp1G-w zX=h43lu~|IH(Vq=5gkwaVn?(lIey*Bux=4D8G4?J*c>Vng{^)_al4Dd@|vX?dMYJN9=JB~`|y&}T!X`-xLtqbykVc3}Q` zWq0ab1`{5qwPao*23du7pE)=DAc!XR94SYKs^c?X$2G(gu@forp)AW4)v$`Q3VOpk zmc<>uC(5#(d`5N+Bg#)GZ{TG@_D3?^6J`O^eB+^w&rauv3w3h4Ay!HoPa>Wk zzSF%eZ!-93!L(djoNO4cjZ0=EA{)~>(bs_ze zqhx2>o;{{JpQSagw{ja(ysI;&UV&4}qG=wp zZf>Tyj?Io#)qg4F)SVxvu8UDqMCxvivxsEGD63IRHhhlsn8g_)yN9zlIz3`1+@1RQ zer()1h-=D)xiVi=xx}fhk)JluC1^f_r1M1st}R;c3h4-f9W;1j@1ba*fj#(1-5CBO zGsQU4%$K0QBuA7pLt24Pc#!qUe!i1!Ffk`96}vir?9+3XA6$@iYLLWXP$$6p^M(j~-HIiDVrwWrt@nA% z=U&l6CBvb7?~)`ZI5ewB^ry)rH{X$ibwr%R{hn-^X^@$(96 zq~aeBMg5`qYp1G_knSW{GkFeoL|hHdAuD*p8z{o9UWJ(|6{Siq&&cyb)0LpURykn0 zzCbb4ur0^5#@zjyy%MYA0Gac1=Cy46{WpTWq!9N)0qywV$@SZwx&9<;L;0@t7XmXd zdVWHWqRMh7QK-|kTft-35b=TvZLs*6X+)%&e1i4ww7S5cFio>VEYr=b0kU-wob^Ge0 za{;!VwUU1bvZd<8t1m=VYj;bl&Ueb8D+9ZHcAf-s!hZJdmi76+0iF+jv0cV2#h6a9 z3Nk)A4#u_pX3PQG?)R%Zk(eJFp;jk5W=Z{ww% zBb3`Iv90qKnsVkV+k77+73r%jz`UY`srsUML7EXCxr_ODIa6k^5%r^oDl6(=&-o52 z%Xh&CJWO$j9QzY{h##ra){iGZD>pY)J-{~Kj^T%(%TUP|`AJiJ&k7z16sASfxZ13E z+dBOQG*o)O!%DmBIkNole9WaY{_(8oM5VB4L|s;iK1*I)&f8bPtuT*B)w(faqNP{> zmHn0<1ID`KCiS+*LEQU~5efb*2%=c!u2;%CE&rKm;zw^#GtJiv`zJGp`QNR#(C>VS zO)e=Fw}!Osvp%G!lk3qS9KD8^W8K8e#2i|5>nr*lX_S)rFdOFN3F=j*X|#afbXa4S zONmDli%gA~o|S*Sq_mw@V(>Ud77c9!PmKtUrw!s_0=iQIVkBbD@VONF(kYmBv)gysCtm;|xiebELb;!3w5l0GG2K6DIw}vw zKY#a>>w>;N4gt%<5q?5`uBRtsLZn-*`mi)e!t96+Tll2&Dn_ZY)V3-mw*o12RpiB? z(N9$sHY7jeo~<|afor>JV!PCt`K?2VZT=iijv8BRd>?ld`{RxA4Z&fHG2SWQ)cMzL zTF$wnx6jT&O5)DP+ z64c-X@9ZMuB*{3g43bg)ShgnAp+F=&Y@X_HNaoe8eZVhM0l`~j%uCev@aP?EJ^1Z^ zk&~1g6gerj$Oh^^(9o=zuG}?p|4mSSiXBU~-D% zTb&Z5%4>hwp5a=#S)8ZN{Wx7N!w5u2ZRMOrLJig6FB<4ozf$PdHa0docc`v(C`*#vO z#xOV(t6xp@#~_^*Y~M7>oMa)KN8N5FAr7y+xe)iUW%mxzs9m0hXXjKD>d+e^v~~1l zGVIrk0Jx22DYL)V!jAEW-J$oE2PsEusz+~JfN>Hi(1UFM zREoD0ddIWMP(1_ZxYEa60nfshkL9;g8tJ_}Q?Fu%9VqG6m&vPG!RI>VgdQ zach`*kjmHKqFh^;Bn$Huu`smRikkm6)+&JzAAM=^sU+Kz&&4gRYLhwc{)wxZ9`t!K zONkeO2eX!j^<>$lNTxAF*h9Z|0NZ;itOI}V5nmLe$Y!!Hw1Rv+HXlszgin)fLWe!q z+gE*;uvDJ&ffBdhkuKQNk;)hggsp|H0pyG}BvUGAlAI1=l`rQqo{@;Of1(sTUPLfG zRYz8tZz?2hSB4iDCbNKM-A&w)Y#L+b7R7Pm>91Kjif5PPD>Be=NWBM70ZXi@$3Ur3LPm0A^($z^?M9e#1Rs(~PW$C+;H}#8tNx>x^KBB+%-?y`=rlU*@ zuTv1J$dce)*2Yc)%5JG(6yjegYGtHp93&er1w1aIvf=~2-WoX#EYUuEijk-%)>M@m zW#Dh^_0nqFfX=)26lRM=6vYeG)nfF%1S~gyXE{|M0^QRr2FmDG%xm9-c| zKO^iL7LGZU@`AlB(6#m43Jq`!Q=?xLn0~L(L1fRV3`yqEI`tlC@HR+T2&snZyO&n% z&aNoZ^}Hc5lxNYxJS(o9B72ZzTC6(;^k^|w)u%Bf#d%B}4b?J#RB52wNkElNg zU%KNiiRn`Edf|{K!d2^Zd7hMAkC?38xkp7iSm95#f5_-UeOt4jy~+M3>d`)yI^@u-g@>mCH+6{*&rw{eaMr$ud82Z2# zLspxSks^b(uNFv06(qA@S7H|A;sAojw^B$U-g!2c8R43l44L>juZ^pTM1vPxr6rL_OY!IMnyeD!4@G?SgC z`MJdh(w=S>(bC6Z%1o7Ul7;cQsV5Bzy$7wVNx;{k8cJ)j2bDU{qqk2t?7@S|h3YW~ zs~p)xWtqAbwE+$@Pvsh1xs#%lYlT!S2B6UF%*+5i`S&wUwSp)%TzaQ?tMWJifC!su zw@5$PsGv9yr}i_nsDNHFzU((32ul!}ZO4H0C~4QL81`oKH%uGQv$TUpt^)Uvx$g^c z64j4VdR*8BT`FvS2G0@T3vTrf`Z|Y=H<-wDo?kGWE!pBv!oo`de8i~>Ap9qWaM2}2 zYNp+nTSXF3M1A!Nk~SwYuGv<<&Su75%;8Zx*dqe{c=LEtfYL>#j3cV zG@wga^O7{ihNQP8rL<6eoL=W4 zFie(_A>kr}eLw9nmtgintZF2?U?u62g!mX9uhta`?T0?eo3En<~GwP^aFf4~n&zn(N4% zN{q3TQ@_`;y#Yr%GJo{t;wInm9;L){`0UaqIcOyMc8>dnJ~x=sVL4u7+@bl9J7p(H zpwE`^7DNqneT}u6PI<-hH4q~r!TS^ZIpWLe<$C-_o8D5IPhE@cxb(5E~$6W@!UM>*_qBZYLc7ZWs~ zaWPc7q&t&=*g^Kxw&v)DJK8PcggzB6e05ilgV{D$c{KMy1K`0q0hov3Q-xQ68=|1{#)z$YN zgHfOe>69a(_DZz&s14%Ygy8t>y*H1cv`ZCM)!2pKw1i9&8MrM~V}||NXC{CyaAyx0 zMx0(xZ{Fy|t+V9W^pIYaH$$s|7{cel?*%4q479m!njI~)$d~qEVE*JKc5e#hT~&+1 zSlMn$F6KCp5utiNtFvgLqH$CP$+0(`T6kkT*G$wZ$q`$y^OR^ZV^p>cX!7|c62}f( z(u{0XWuT1kd#+R5XTzX8Z=<(H_Mba%Mxl8n2ODXiSDq9`ydog7KJEJcc(h}wTWf&t zYtxn!=LO#%Q0&B~J{~tKsy?ro({2QAV{2D)UT5gE&^$;z|JFY8nG8(Ti)OQ1=*ZLM`ieR)AKGm% zbGy)9Xy2J@Z>NMEBygXOu=%`;=Dbv#TPk!Zi{o-Z6qFQH7EnwpYEh#uzPmKx#hM~k z61nBv419uwgBMCOdEN@;OE>L1?|{V*_LtS)YKX@s?l}F-nxnXczJJ>G1o0@%U^eZ? z4F3DD!BoLI2L?zLe-#evFp+6roWYR1T}r-vV(6uX#&thP zR`YZzxvyij?#B}*-lUXS_YTGc?_rfljOul*?<%=&Yh=?F`t23RcC=x`77KS@BNb|) zy{UBNBw+%L1)>zbH*vHP`5%nb+x?U>=*|UMe2?= z9Ga@4*jgEht+Wh7dIUT_)oTU>%APjp=b<><6)!;~)x6U@tA#q*=H$YJ;E+iWo*ohg zDWzUYNik6h5dL~#MFU!BZEx2sa*->|#qZMrP6IE^-z`z58iRkNjPWDw4%6Jx9A_S; z@VkHJT>)U`$KY?wfS!~4`*a`8sIssruL&N|bciO{mKw_0EVD}t!sxYW#o2Nm2tfxM zC^pW7Y=_>{@r_-{N1Mv_aQqpW)*Yhje}v9|4b1p?GbO^UQ{jBt+6Go43@%JYrcKcn z36oH~WczeKa93KJ7ov0t`Sf9W(ulEC<7C)(Aew`+t4r-wf}!81Mt+kw&3P0Wde1KX z!PS13K)(d?j=@awpCx<5ecF|@QQnlrQJmWmqZ(J8PmH^lo<|>;{RTWaV~({;82<@% zM`WxIclxRIXzYZych4%Q;bFHsD~#s({#6H9Nx}J4eA!{*im0 z+BVj}wC^}=?7=*3RGAYchyvIaUmBLoBKYq5m(;e1lX$_}uJ352@K z*M!qk2$8UJa$T%^AgFG5a_`7zS$>dP@uP+0(9Jr$BbZ`6aIW=p4PV@3eXX><$*hKk zE~_tHmZL&)q4HKl#l}D-#b-9m42mFsce~3dsb3X9c|9FW(%y5GRNm<&e29&r!^~2( zF9RRTv3`H*2>n1(WK_SbJ7RY-SS35}?3v|kO+olX>C{dfEGLKapnkK)0cuhVg;rCK zFL6~^(C3n1JEXw&8QD7|Cv%hvCN@Qoje?!$m`JK3a;pZvEbomt{&QiKs)1jv2xljs z0ya&}gs8$2M!YdD*2to;$0&&|4%M>7+#G*W9&p0>2*KH9P-cA?i62=3$wD;fXfSWo z%eHREj(}chR(uW2hawHXVowuqd4;$iItxWA|DwAMbY)?FX z`qd7`x6W@2o6Mvh!Bi|9>kM)XeO`I!v*GjUe;UFhJD$MWNvfBB@}scK8Ufl$0vDQR z#I|nbryq~C32Tn~A`}g@nrB9*-Dz{UA?0(1&_O_wFRe&P6W{fQ#saI3i1!tVyk_Gh zRd0IX@m1|A$ zBcYBaQ83b9I^BFYsuWhR0n091Ua_|noCqzy6&d383$7KTOPkhn?M(D2(VKT4Tn?kM zwa457vuP)I98t*DBt%lh=0*qjuZd94k&??!tXeyeW`RF3knhK$um74oIb@B4jtuMB zN*t#~KiZOiy%)LI{u0eDe^~uWqGp0Sth}hEBqy$(u3mOX)15qKpoo`ubOf|tdy?7f zowk#$Z2%h>8^vsJAg|X~LMAtUx=@?;$NpktKIhTs%Ok4MDbE~HpHvu7bR&Ul`3QRx zwmwkgh9ZZ%lA-+8qV5Du>By8mnbeM|q?)tBL*!xA!j{^W*Er$ZYPz-2GDG*X2Y}Q~ zDn@!>n7Qs&Sv9qn-_znGyvO}SHDh;~zm%>$rXG)%EeWLrJ>vgaW!a4OP2b@uiBNJ2 zd3yGqCl-T|U23teUvY-MqP2k(TxKnW@vziFNT9o4iy)R)AJOSE+_#-dZDGOSQB@@Y zAxw6d@491nF%*p!|8sH2`Zr)_PK|DAkL!y~9xj3@F1XpQBnqU*R$-_AwN_W19#iwqkjF(2%-DD8oHdH<;MF1!d#LEtB#8JQ{K1Ev;!d-HY1fh$tM*!)%yYvO|U#QX5(w6MF zZp%Gczh+QsFqY%q%3WSwmM1k*)_tilp~Fk2J2xdAjwii1J5-ViBJ!@X$b^jIf=xzB zm1d4!`=Oz=XOTcyvbAoRODx~LxN;fgoy_E=fKJL;sPlW3L&oDsRAUXns*^%luN#Qv z_3}XY%5vN;tff&j!2Jv8d8NuR;9!W%8O(;mFegUObG}t7=^M))DhL!UcXr-lm$)h| zwk6cOq7err$=%xO8^p`;nf{X2G+OD zq>fPYo{aP&;B~S9A;UHbWy2XgQ)KaXBVwG4tSZOAf>L*ssD{#g6;PMx=Dygrfi11% zE{NKN^kcp$@gOCNLgVZ~S7FUD0gSxY^nS=t3vOX(ZQZ~zG950__dj6l_`^n!p%{D= z8{zc#YOF4JmcLDJoSQVcf7BuqzElLHp^G&?8^LQM>I3?H&6h~xL}@1Q;s>?+tr+j*E638<*ZE&YLNq)X%ZX0Gpb=_EjFHCm`S#M3eC@>;Fc3cg&JB zFuZkmI65#~#rryZ$WSs}<|N<6)D^zy60Td$$Fz4=*;r^jOTt~p+`n{b~HzKPN6B8H5@SXg7@L{;E;4i{zs+iz_@m94<+Lq_t=AW!ydp`+<2@Y4K z0l+z0=#o^0AX;#il=BSv3$>-sicuq;*~{NqI(dH1B{cskKch=LC@8=U{i#FM;`U2#?eMuJ}koDkie8E3z@&P2wLh{6V)~8I~sRea?XW@dn({h{tVE8;`4)-~q z`CmLMt&zh!8ECmybzu*ly-50Tn|pjIy?pqh1^LahVr|`^tGm_iFPkNOwlK<8L-SYY z!W(wA|LM!JkWlUamOxoJZTs5rS*qJlubF5dS#bl?Zt=u3gI<*%qI7;S&lfX8WN;H7 zX0oozR7NDC?>VIw^csBTJwa`Ra3`;NyS$Y>3I`}BlhZOq4?sutPDQn(HYG$33+4_f zeTg)m#a@0PcY?~joUiu!=|9k)8HW2Mo;2}EGzl;CH(>jkn3BY*l*Q3nmv#1#vUSt1 zPQJusm;#Qkn@Ss=t7OBfsw<@Ypy%c3%(tL*N!#=im&xyddqY#@hs~Zvt@~RT?c9&G z7F6dy&Z{vVNVTEQkk!B12nPAD{&KYb_2GR-)sgl5)n%@yqiJ)+>PPo7_s|b#KXcz) z^+j7O&->poYZpcBB8N~3C}*ZrTljP$F1}DGH&?%%T)br8#iq@64hYhDb=N7{vG!u^ z`L&&U$kpy<$=XHt`ES6$uFiUo03rWX?p5ZjMMki zZ;X7F9}WV{RvAx~$Gq4|^G2@2n6;3tC|%d{k>A!cqcTH2{G8ZwgY(1*JYnPesK=ug zPjgTrz+*?54!d_A0Ez72^EIdGfzK&RsJR;7`_IIF56(4t*sjeZCxuN)?mz(j^4f?| zlhMx4g1cgtZOr*$@!OIRTuF{Edrw%*Q0Lk#e2*yTjP7Gq4MP?CTYb!R6Q$h)!F;k- z%f@0pa2gvc{U}X<0*h(PlHsF0g&LzW9Z6B`YNI;gVCsMvsxozSCby# z*Cw1pYa+w^&G-KneaNihSy=toN#ov*T23*C7;i*K@@NQQQ>3We66HC|;~aWDl{})4 zMqNtMjx+COWZHEmHi#}s9vj~Bs|Th~=QpFf)7YO%WnhmCCYSF)#-61;TDQTWoVUdo zIOdVigwspy$5Jk16?l~qU}aj%X+pN1r(8r;%D#&|v8kg@`D7U&83geHM@CQ>Pg+Ds zSQ<>3K^{2*-X(XXS{GXO+4+J^*y5(;mRBHrfWX0@|Fqw=E}e1xu{ipETM#gOy=qtG z{*8)}q6E(_6QfzY=3!)kay=iHXmx`_wVP8nhC;7-5u;z1`WkUD8Z*WWp-C8aM!Ii8lVWA``;GJ$<5;J!$Ya>n!U~IwGwl-l`Gsxr)JvtE6Th-^qj?1^(NiuW9ugFept6W0U3U z;ud^G%0vX%#+UTS_Ea#IQ;cf^o$FMc9p|JNQMx;{FNplh>8>hms2 z$`W&Wc`Q@@u%=)@)$CVD+T`0^zlC^;A64FVn1c{prk|OUgRD-56A=U3c8|X+%xW43 z{RYIp|ChcHC520)`M+^TeyC)hEnTM!OYmkxRICFID-6_eMQL;$5scK6cTs#+m5D6b z!(WdT@cC$lM4{8uZbPsOT}w0Z&P$Ui3(E)k65@5!%5qsX#SJxVOX=}Q^aV0?Ghx*f zVfq0m``<5_|IbAzK%)$H)B+B zZHUSjP7C0|oRncPK+TLGB|&;c6g4{M{)@)?_3vo!rI^eQFjKK$2bmz^0)`Q|e(9(} zCTquVmkm~hxw%Oy#BhYz1OEEXN5WUgPkz?vqlhhEPRtxtakS8g*mQ$-qA=7r4D`^K zvd;B~tbvo^(wR}+CL;m5JPk=C3G(VBxS0B9s!ly&j(HEA=%7JxaaO2v=P>p4Q(SRK zCB`Q8>al_`?^1xGySi)_J2#|Pyk(>;_h0(DIiBoFFrN%`@N3RP_mguYe>4#PO|bUv z9qYmobPl8^iUo1x6Nf`lb2T0SK!7^o-|J?Z*lD&OFt^0?iG3=XfRh{ZSTgMRMdeJh zKO=|-S!1xY0KmuWBp0LJEC}Z`kSNfcE>?0ewTSc}MXS1rQSknOvxuRzAYgO`M+{gh z&Bt0oG6q_gX~N_ex--!K%7T=`5351*V8z0DTsF#p>g*5ok#7=7;5_9v>p6p%plX-WTWR$9E0^#ZEZwI$+ zvq)uT1?j%zGY4*U1A?U(~Q}Pv^s=U z2GsWr9IlkI0+@)sHsEFnRvsC;%e%aw{>5nR=voy~p&%0K%LWi8TUurHqH4_u3On4&XiLwxJrw}bssQk95TR%KFoI$$ z5AW|u=w47IBVH+!s$*BkG!T;afj(g5MN_oG_w?mtZMgQNZU$ze(ugGu3qMa1Z5X&mixm-8r}G3&b&B>)R0vzmlbzl5ex=^@lOYR*wFRE&)} zocbF&e_J7zdyGezAHMFYLz8$)>q)pVyd?Jp52!8$vsb^ah(rIx8FY+x0UnJJ&cw&+ z9UpyV$w&Cy>3B{r9R%wTUhfJre!IZ zM!onka--$%&bN(Hnd!O{-Va~eF+QQa=W1pObPdWP!|GjJpvBT9QO4eq(6)^M0MNrw z(4W`6V-{o2MM#uq@`IuXj3dF^KOOCatkNr^D5CDC|~x-pe+- zcXvBwY$#>^5bK~6anCqk;rBKSk`*GT4v!`cm!qtd^EI(Gfi+B51vUOiw|4civSSid zU*l2PE9*Vg_?s;~gOqqZOfa#w!z_%K(^_@U7n?}RV$nE^*lT^jtk;rmSwbBlm4ytZ zyiXnHLs;FL-*b9<;*(zlMRajVEWh9 z=Y2)5LL|gcijP=2eC;(uy@??#>#v@!>o@4Ia{>beLueH`id8E9UcQ z?|6URiSc)%p&)M24Nr;wu9M$@2xo(VznP8o$P%a5eQ(`8@VKa^o*}mD z+{44EB_x;bF`uG83McA~-cUScz9zoIdG&wMgB~DE_(Da}poPzVm=ouCAwB@%-(7VS z3ZB1hsJ?VGKm5=#m+M(6wQl@>mp6ark+;e7n@6ZsH314l7yU07UE&AL%kKZa8$`@N zXoZH$n7KX(2sI&y91|@x@O(MOTRO&@(Eq7@!o7V$`qmjZ>v{~Y3qvsj|2z`Ba{pLS z=0kiz04^HJLi)$^0b~z=uDtAR*XiQc3qXT+gkeRr8?_Q=p%nuFocwS!^D$;LG+uxM zinqW|p#@+9d{eln2f_f@kY4m&qVe`6Qz2_P0Klp?Y{cKxzMcuI1O*0+!^@=I455}I z2H;zvD8rwBjI)wr%x7{zp&&zs0jJmiN!|xhseyop@Y--e08NdLBsC^K0DY8KP6hyd z$}LAOhYkRg;u8}k0#o5~`fz+XfB@tJAWzPhE$DLdNEd*&(F#D9z-F9DddFl0KyT#* zpk!05@b5K&zD~dvi_u+6uLk-w=i>H~?Ml!Jpr2 zUM7_B&(n;L^N$r}azUa{HT}<=^Z+kpP&5^iGN8y;G-|tPA?W>&`X>b>;q&r2Ih>BrYK+m@h`280O@Trx~>A zyS(7m(%uDCtW=0eXRw#~{67(-n?&=)b=i^!{XkL5UJz^fT;mb!LbS0)F3HUTLn_Xd z9t8s7Tb)Y8vv4VTUz(4cz0G-TpmXSq4EPxN$Ku2#ZK2Q;clLZJu=?TXZJ-#!a(vtA z{Z^;RnI+Hldx~FLPb*5xMsH0n$?r>3jQ)!I#}95|{RMyiBl2Nu!MIWwCWILam-5Fj zKbbh=MWTlPonl{K!t$^3zDO<+%&5s?wyMd`rEf_$&#Z>vKI&pxqo+X5B1+B^Sakix zHLy35-x#)id{cg!$kFo!g5Lk~TM0TQ4xYSUL~Uf4C<_3h1& zUtU?vd*4fW9m)BwZ;*5r+m#QjWo@oZDUsaw)g{$4BVF;xlAwxLK|e#{@ATyvoqb^* zoy)tooE|Z4cDN}VP_lO`5bl@NUv2kXkmm2DuAUlwd&$VNc8z*vOf;H8*0KCL{x)?K zzf7T=HjV_}UK`zdYPB0*R7P<;FKt;>Z}V?yd7ga7@Lt(ds@2B-)-@FQ8_*9~yrCnS z`q!s2?Vh`CEZDLcZ;{99(sSZ^!r0^44!`VdMfb}9qML6k#lJE-4>w~M4yvyV=9}1M zkDT=(F=)@&3w+2G(z5cs=ycKFxHex=>2k$iaB+&Sui=t8Y4eL_IRO_JRK19zbeQx= z)p*d^wVjqx+cH*a>{gv#=PZJfVtKDnx9Pf{E*)O~2Dnyk|AGs#6 zFbYh>gyToB+nAH_b34b5cssC6KQ8FX;UG9s7&eU|2Ljd7s6k|-k}E1oO`@x4TrnGx z-=znM^at$b1eMiUbmnt$b#}SJauUh#c5#esEbS>s9NC&i+B{b1OI`puP1m%!;GZ!W z__^2w0o)@n792H9Os3cf$bI+a$KLsW8d*pG69I|-0J~c8XG5rU)L!C^A!LW5AC%2< zkj-{-I6W>AL5+{5@Ez|HIxjCm4ij5hDb9l`9iH^f1nTNGn1U=!m3b2O0L)D)7CSHYst7_-Y-TbJpdB0?9lh5|bd%M>R*?ri zicApe*O>8GcD4W1X!aNK8oIDJhbnt-$sYQS_KPD;PiZ@v{v*oiTWI?^Ys^A+d{xCC0 zJdQb|-o*AB7Tr~g&_LLNtsnKI%vTs|~C#8$jk z{;n}(^&7wuIvz$U@tm~3e;?%hc-XodO+f(@7*iMq^XZZzRv_h!ucde#_uVp+g`O9; zzL1=Y3bRXTDDH0P3%i<@I<^pna-Vy) zu;KcW7cVw7L>A8;i?dB+kEDr^i`8C^cMq}8_73dtfzOU>j0tm~(!>vnQtQCOi#!<; zR>-uorUqiRT_hRWQqt1rP0|jriD-Uic4on8jQ|s|^rCtB{{}#&zL*eb_h@;x=Y`h^ z4r+Ea-8t(CllJS_T7{t(gDT(Sfj-MwQ}CACvU@QHvEF$3x{yBGA!e7W;Ev!jR&|DP zPloez;HG>c=Y)x6@Y%j_HStDZ`JaZVja!!hZNi`lx$|NU_RpI~G7DDrLx*3MIAO3$ z(kHzegLnpVB&hrbJhF`1o-W?e7=>ESEFLc9`4_t6psST*Z7JWx1{DudL&lZtIaBh? z^sGcLt5+^$ZT{0PD&>w{nlL5s$U{>apgaMaFpm%gCPZI<0qrMSfT8647$=4}se+s- z3oaV3v32&5Ra8`!o?36KFmeAN5c7bu*fR<>y9cn8f57umlcysG38Yk@E7*&I`NUk` z2$vXdjIUJgQ*X;QjOr3fuW`W1gh{-tLL$oQ{3we{s7&>aA{h8BIARI7{7~A5&cs`G+&N^^mJ69R2{aqI5ey@Z{(yn^P?wi3ou~NJ*WuHL zL5T3SLLoNu`*;Nw4LKwU`7fS;{%8dZcY%e=n2|fIDeR}TYyMt?N8jgvRveh&+e#lO zhs{PEJD#2woD&g#o&=7KdHpfiziVPGT~l9=zph@=TJ0-}I(zKHw_DD4>SRP z=)`zR8HPrN2ovrWt7qNF6uff^z+-qiF4Y_3Y`TJoCR`fe5IGsU$^cgb2|Z%v?8AUZ zVqRjNzug$_2j&c*n#@$&q~-xN9S*OyVOWwC))_#_CR*WzrRsUVvWL24`QT1942=(l5&$5GiS{-4k-)^o(dgl;Bk31E|I?|J>66;E6xU; zIPy&00=QeeZy%nVC6iX4vuY$)ys6kB!chyj%Ky;m8Nq^Ij)3;f`}LW`7JLW`q)xl2>dT`hHcV-vqCEZ+#zTCDj(Sr`Eh&ppBJ%u}#vV zrmtWrSl>0QzzzAi0HxJMZp3|5B(6_n+|8&>)92ktPY)Kao0=eL%HCFJyh%gjr%@)l zXQp4sQ|;61$3gaE>~1TUcK%OalK;yDUGmwL*7f8>r|#PqVJ?u8uo%$}L_DBWpIAhl zZz~|d!imA!C6_{mo8Q#XvXV#e@_&-ed-)-yvQ6=O>+0)BE0r`fh0ifCta-FOH{4vz zA8#^**dQfg1ZAXT?7lAYN~B@RGivB08gHQ2@WlJ`Wk-C@E-@5l3>F-kF74d|+#tjS zvV;rsLq)A_YMG3T;9NMwSi0Wph6%4|sLS6oa1pk%DDorv&ly;OcT-s69#t@216JIhNdi?5%}`h8-I zi5|`yE>=kr^n1soM=e6&q%vmvZ(@p8T0u{_TmB zvWj3}(BMN8(>KkxuiEv5MV|1E>!Qoa31Ezg>YGLFM35;mzx*f*mh?GOl8e8YSTG$P zXNywcjFDppwF>dd6;ZQzP}*NdB&v12c5v|)FNT+gnKn(PeCPu`*5<}K?p%E?97{zR zgPDOzj0gjeDre=rLJfp(ki>tdrJBy)faG?yr5tx#afm53Co4aT0@a{k3|YdWo9pID zGma9e<#dV&r}#A2CU0tG3TCmXx@qsCpbRxm1}ymsROa?L7DG)hHJwD4Qw<&5r4(ne z@$rA_?YpCzYNCBlLV$##Nhs0@At2pQL_m=Mp@Ra_q#3I84x)fSsKEfCSCuZkNmW3I zfHajRQl%*(MZk*KZa%;7es{g~?jLWhci&qpYn?NbIcH|g-m~|feSWjI%u!^|>{l^d zA`e6R-O)N&cIrqI2;}=RKS!R$^IT(-eg2#3u2}NpH!(PN_BwTg*$0Cs<@q|erio^* z3bwAE+GkWIJD}mGrC|7{_omXOf6i=I{Fh#;>^vc1#6+L=F2Ok&aj8rO`X#6nn$hLWsQoc{Om4pG?4veMq%JThG= zSA}>gNM(o#5$QlfviAFT?(moe9WGoi!{PyExG~&;;dPsTot(&XD^cm&R?pv5X(eBs z+a{ex9~pm&ajUhZZeh{6{>L~}%C8rR2|Rn!_cA7(=@~_-UWio)uxiv3SwYTdZ5&v% z=#Aj>a;~xqvF)XR<5EAk$>4LjprS^RR6s{vM6}U=2<=0?VI__ z!D@#MhF1)xYjvu1!k#>V1N~zLw*%Ss*t^Ly@HeR}26{f&mIxsR69JKt)y zLc(8sK6~Ed==R;y|BoAJ=;I4--{?}H4W3)4ZT(dP0nPe2tA~MKhPkLW3Q0E(M^fgW z`~pgn#QwXV+{I2Mkzthq**F>cQ_Pr5yEC=M&6_eMOVXa!-u+y`xo-~dt2<5+QfEAG zo}2k`z3%X%&A~?d`cvO0`Lz6_j|Bcls@>vLMn4XUQzmO__ z`B&6wKouHJ1&)et>YG?K#vc6bU->H-J-pHuRtiWtaXhGd;ZqZAQ@v!d)o%WORxl$u zU+C`nnd&y`#-N3QcCCjTy5Lq~MNh!Uh6SINxY7dV+ZWsFzDn0bjCw5pTd4DU^v&kI zjm7uu7j>`H?zEVc4{iTb?s?PdA!D7;oFw*7bC*{?B>$I(|1H$?$~r<+S5zuJtMhGb8XAPXthS*t4VA@Hh8vE~DDvsjP%_T+ zA{|?>yJ#(Lk8mcYWH9G;M9uk>fq3w+UYNTaMM7MG4K3>|t_`^?w2j7-&lReQj@gb8 zRCf!`g?ESLc;cbyh-kl=>w}iX7D<&co}29=Srv)2P^@K;dwy9*z`kO+`5=@0Hp zBnX-PDNf(&1kKGj1W2g#sAc_yG5K@DO3PaJM(1X@S#Wry1W)&GU?jdF@1Lnml+eTe>Mc6mcVv zC+nGq<&Ey;UyQVxlYeS5PgaE4-%kGZ8+fwl1<`v8FzF7<7(g106OA#`=}1Kxd*2)f zt_-ebsZWJXhK?R8Fgcf`V?-&D3QwPdD^Yri+vS@SWzwmP;5%rAGsx4?fzWn5E+(M- z-rbLN_ou7ebX0Wm??qvS_)>m!pZ5X_B(oQ}*) z^iBO~u*?w=Lk$ZtmvzV~q$X2;pr*&{^(r^B;&hxzwl2us%5O3o^S}v6t5W4&ULJEh zf_@ORFB~inJwuIID;0g|`zoRqqwE$y3pydI`y7;)SeyozLV-6}yhNR4p*!9lQbWuv z`q82?Gr~&bd09a^OcmN60P8EqXA1Bk#rqQExS}1nyhvk6X|jl1=6FeHBdf;CW>Gvl zgqZUH9CwR>bz|1ixlM=3cqO$^ZcQEE>#Zd$Ru@MFqA=p)kqI@J~yv ziJ4-91;HVyNq7((%F^qt98{TW)hftX9Lv*&t*Itky?w;AGkAxwoOVR^b zw)y@Uiq;pTjH81;Z*QDJk(_U_eo_}qV{@grFmehTCX+}K!R3(JxTOLdE18M!1FqeQ z5yA^?qK72=7gvB>q(a=Hk@i!GTIpI`6m;TcgCns8&w0g1ijZ^} zY8Vq04<3lWdG+3-+=&_m%#Yfal%r1@rzNRj!u?urj`DHT`_c3tnAobhTHWA$(6Lwh zANd%kMUw0cd%4?xsc}G|cd(|@A{&{^(edOa-hsHWDRePbQ$U?8*QaudMTkcW14n;_ z%`yo!i%uGU+`*cefiNZDw#M-=+bpwFg?&OZCdRx->2N#g922mwaqok5Y^b;X3LBGs zqGMbvDYw4a!o&*Jr7jD)8GFePM|JG)45(^G8g>IxkhaT`xs3iKiLk)6ENEITvX zcEfZl28Q3sVUhDMGm#?qk_q%pm!I$-kCZL7cYO3WE5+6yJe)s#EOY0+z7t$fZC64I zxU8VzC7oP|x7XwPnhtQdYCK(-B(M}cx!yFX*eyd$q;o;tPfU?jbfZJ>UJ8oUlh+mE z9yS`%VRG?||KK-l*5@xFVLO*t>)e53?QgSZ9r(08{u0SqSp`-!A+sIZw}s`loR;L~ zhJzLa&2#7bFD&ft>e@8w+a@Qdo)2<+DWuT=VIdP`^X3zx-YA~jc#ZcxLqiJYPBh3~ z`A>>CqA0)anjz`pYyl64))BnO2g;QTDCtfrB|I5fhSW9@dCui}FMYEu9iBb8F__ti z(3Ta4b88V1oNfg*hFXa@L*~%9w>QkuHgSVi_(rRkjoleMSO*c8jXb)8=+Rtz{Nz&BZj4q^-F@=V0pOc zq)QyN3W~bIQ0Zi9W(Ga$wG0uTjiXA(fhH%O!GamhjF^nhs?N>gBz|v=L^248?7Kz6 z)?Sr!uLYo_pll?HA$ocrL7#x2#ASb*K4m%LRP;2y;{>sL^aNn4_&NHRS&g7;NH)|N z8qI(!5}zg@rIKRtx-AztoN`dPD0&hXAj%>>OjE*<5x<2p=Zbwv$Mmif^eJ}OxRfISlhLd@+0l;7E zVRq!s+*vY8vH%yOCls<*@NolFo2qat`nr+kGV|5WRa01FQDqEBrj^e#O6M3dM-ORM zm4+scFdznzSl<|;DWzlJ=fOcqd;s(SK%nugiQK>kXgH zHt^Lbxs}A`tbu!n&6p$$em0GTGn4y!CH!7Q)EN+xNnSRFIV?663SNE9m4JiBP$iI? z#xMO*HW>mc7Dz{o_nw|V29|Zk7jI1!Nl(K8e341`+NcYrfUb$H>s2A2K>K;ar|z7x zy>DQ#&xFzr3^Xo4?cw>F+*(a6iGfgEXpaA2+^iLfRdsPNwfy2g3-b%q_>^qX;r)kx zgkCmm(IAJyT#?og0@-Zo^|NuMLN%_RD5bCO#HJo2%!C|C+YwLl%6eXSr!?@!3Ab{m zau#cr4Df(@3OWtWJ_|(|wiJ;!wzEA3TK2Y2*4{@J^bhlB3iIgKQ=L5k5a73U-TUlhjNo@II%jzwZwz%X>rJ)vh-o{M38!U4gul;U>j1--9?(M=cMx*o;-LC%AaSN`y4Zgq3mgHgF@!e+@XNn*l znC?OuPyI^>w@=o3wq*$;dU+c00l~)^46lP-hgQhh@i~zvpK`-8xZ&y|m98crF(vUX z^7?1pQbtZioGGU+b6=8XHpSV?_yQ+PW+axXpqF&wGL}N!vK@>j+!fK133UL8`7JZg zM>9axMcoYZ_&8*=Wvuy2?>)?&sP{DbN1LAgUgf7MSd9>|+BZliNH}7IWryxffQdq0 zy~VzI`HfXXi4HI1eP~(3#xYK>U6G+0?}VIza>qhH+(B%%E^65QEh3^T&GYBP6Xg&PK^BPVLoxC}LCFz5eyq+fz3vt;7c z!K+Wv&V%3vxYC#_6q%LOD`ks}HkcK%8un>v*WiN8nmH#9hbFn0qt}jN8#AC=3ky-3 z5f*BxGk0p?ibj*ULqh84Z9J7*V|&DPTj{}M6EcsI`f_|Mo4RIiGRSiq2ARHu1T*Mp3V`1i)$R#Qaf>`)yank8yWUhYwS;2L|buatPt&!K{ z_e_x35f+O+=QNObs=*26dJVq<*2N@%Xy0|*SGLr4CjTF^S;Dry zW_R4g5IbL69*^-+#!y+&oV)KRNskp9d99@Ut30kl30+Pc)wTmOj9ZD|lKec5UzsiYz1v@@&)+X{3zl8cKhTB~`jQyfxY;6x|Bz zOy=2W`Nt6JyW~qV>p#^fzv0WjCCvS7Z2je8xOY7)npg15E{ zA3EFL1p1C_EN4A^Bw29NQt?N<_qYH1>T+GZAs>P`?+-0|>u5KaD265bF-*)#vAZYq zBF&?3o6ber;4*cP30&uv-7xRmv3T2Lb7#OOjN^aumOUTsOaA=UuRJT*V)n`}oqrD8 z(6a79=>Qr8{kf~DNmH*6 z>&{>N|98P>-BjyvXv4cT&NI zo$KqN1V)~_J<|T?_x&o&dcVpUk;s~}6va}ufFX@fFl6ms|p;uGaL*(P2+GU{!HmmS;Lz3%D6W)dF%WE64 z(=?=TmI|>LngAMo$oS%#(0d)wYuP$=^GOj$t?-{N5r$|vr06V!r)@hKJcvl^5`GL1 zEWsXA;SnkbpBG?96Yjg&eQ*5vILtmqEoB)V!{;Qg)R(b;gYCK_rlyFU&+8`x8hbUQ z(h7W%p@#-LNmlQrxvYD-tr4!L3s{BtT+3_Kv(+xxzj7W^xtZoL+N7>AxtN?gsR{9}$rWCMcE`LNr)W*)8N!F)J5cm!3eQ$9vMO@2xY_6l< zn^O^VoT&#Y%@esse0=xjGH#Wz?Wdny-)hE}R{n3-@L#Qe11VN6PTp#goQr9{hCWJ- zy_oY731MLc8}(`O*gXDJl3TV{(srh*A=$*xeOrI&VV)k|I>JjL{7P#3wV&ORn_lwDB}3q zd7YD=9!PTf$8pGYjU}|P))$zGTUfbII@gxOx6s^ul_siX!jB zx#+`Y0AC5{KcHU{!?rBjTC#sA9=s?ADP7f=rBk=UT8kH<%k-Q#W8&(290gSLIUCHem?hYoARszmcDJ(r{64tjGYzDtSRy zPYD&LYY+l#9Pv#*1jV{y@&pd_absvIK`aJv;T;hgOkfui9;b}eUuu#_(>JOGtEE7r z0v?TpHKmP&9R4+HuWL5D+15!-e6Md&L(^fEJ*vZ5LoDi{#-6+*Cj~5b3^!!x_;}dU zqo#8ZIB9BFfaw*lBtuJfXeEi7o^E5tI+SECnGKiX=&HuO=<*<7C=Br9e0x;uKHZe; zyI+c0iRz56_JT(F=-S2;^N)l0O^J%iC-M4C%|CWdInovPW5MsX;-8*gy<3#9)QX(? zULA;^+OSC4kUbF-pP`#`Vs71;-AVIfp=Q&A3l__KeG`ofm$P0SIq>x$Ml5LGw2VvD zCDMa47h{Ylx9ajI?D7<@9W-gHM()_(<*R)@JU&akInO{qW043e zQM6w1laNX5%`+z2ZG3$?bVNyj{6W0AppECjRc?WI9!pcZy47_TvKR07Zf7GQod|!& zc|7B2W5_Gzn9?;L6=H{Aq@P{#phD4WBC9uX$?oTmb013&&cJFTxK~}4I08Z}=dbbtI zW~!NaLQmqV`F9Y=K=UF6Olcd?)h1yOti`5u{T=VL={cgm0ZyVQSwyA}u}WZXmZ?{= zLo} zqvKcfNFuUSH|<=GIzm9yu;LhGr#NwA6(3{IUZBw-UZ2m5?TUE^x6&~Nvz-) zl9!Vx&@&udhNz8c8{>$CMPKA?ybiwBkRrCO z4=l))=_>O*AMB(gA~YR723+TR+4C1yA1JaywAYgjTN@pDswUYxt8DJClE|g{A+UFE zkfM*A^iKCY2~X)(p&6?iv19{Bs&SWxQXS4VMo%QR(n2^c-?kGWP6Rtnp|=VNcR0^+ zD-@p#vaT*U;@^%nZRUq$NL-&^_9OZv)Mwgm5zNgb=uuK3+X@FAGzXdlH<2Ck;Q0G~ z6P8EHktS;ote8z{{P<8TGgnI^#i6yoJc($zFL`~q?a!pI_w4%q;Cgm`TgY^d-#FUb z7&;OWro4!?cTwlS4N2W|XSAmK+R^>w5-|7R)F~x9`U{|p-pW2`DsC!(mNCxROT$2p z%hjoPMOrWp>VRnm;9$4~B*x0duRr%5(_rY_LValnT%x^y+_ewqs%340u;QVZ02!HL z{BIpEDf6a@>jh9tlWOCY+Tc5s7ElBUj(5d^WJmGrXv&R8!JH2W7 ziJv7}iYmEccG^neuXvAJ?)jz=HfOyJ)Sy0Fm_2brp;`+EC?8utZxpvf>vGv??aVU& zFK&+B5(8YCB-*fQfYP zZH{~h2`F{yRaYQT(5pM3PBx`e#aw&=a{YtPh$Z^FlX8?uj{#Tl+>S7qW-VvOBUBAbtrPq$ZN;-wa0rFOSEmJMm%Fe zkzz`9SHC&}j(5hVA%}91!#AvW1d{UQI`*L;h#mr=pvM-QX<9IhK+q!g4Uw0RpXGJ` zcsi6-VTy}PJyI3B%fN7H7Ds(rcS(fGGn@=fo^)<}r>7@HFvNp3ED|D*h%n-(0}*|h z=K;aq8x5%~r9;b614bKxdv=j3e7^ygd-ON=MZOgc@FLHK$;**VXFg5l`aoQTSc1f` zf>OsB%f%<;cLmtoml^)Z_~qKAFY^mL4%d!LOo~vgRG0F1PBuzOf~wO{TF2RW=2`Of zAwKLzL&>q2y(QLUqPatpe^oy1|-N?^EGn*n6AC`Ln83&``14GT5f}?dW&BTeaanYfI~e?hg&G z3Pw{|7yJT-)Mw$W$r`9!m+cR|`!_6%@AIzx`ZLy7Cv{$?%z7GYmgz56UAR8|b1G~@ z;b99cHv?Q{R^8*pV#rs{BoR{ym(g$&dFaKuoHqOjq<28(7*7%x5|gf*uCJ$*j>%4w zD~k9sQoCk`T017h3R8nJGCB~d=qbwnlJaHC?HZ+}EYjZVW*#@><6|W{w@SXtFyxJwp=Ne5YpMC zE@~upI{)bdhB7-=2QB&&dI6JLrkuoUC6_%)=6)z-^{k}1BD1Z_iOndR<%n?>>pXMq zRA}zCnW(}yuRUNvOmUphMoy%r)AvGEO#t2xL zO6M8hB)=xcK;zR7v>a65Ne0eyBzSHNzW995Vqr;^S$gw>*RhIwBk#fOtPutN!A{YM zve-;D;TO8F$+r}f(P~jwfC|ZsW`CCi*AfS%3o8Wag~%niO||j8wk>!QyFK5`xMhX1 zLm@*skb@5E`$6#d+{y-1L+xsS@QRCY`LY+r33YD&%WptGT2PR^?R5+j;AE_&l_m-5y?5XLMKjvCW8PD->`+KC{5mvkpc@rK&P=s)Gnj_{@{V zY#igUeJU#~HCgTq=ni8e8=18C1z#ipF0mRbq0B1dW9d~K4xJ_Q5}V4&rCXE76NeM!VE9<* zfZQHEhEa)d%XUQ@yeAQ%`D?*PZTuM490!;Xn4ivs0V3!7?oc;_A@uAWa3@KP}VIihi7J%}SH`=RKZ%7QkbJcs30W}$I)9nfx zn-@cyj&Bjf>OXG(sEd(anHtZaAb)ui%ADfaLwk70MG;3&UJIEMa>D(l2 zGXc~RT{FmfMf^q0Mu&6#W#_-OjPNv;D98~hEQ#o2?$O;TI&7Fc=TK2CDmJk?+tGvx zFa!BK+&}wAA_wfY(^Tc0ryvdOEBz~$g&_P)v`q-)Qp+^fz&ng|9yt}H^~;UD&7@@j z@)j#~Pv8id(wAu>HYr~SpnC`Shh+60{k*C=7r9E4mzJ{gwEafvW*y|;LXw;fwfr32 z{@F-%8Y>@~&=#lU^n5XuNigQg@l8GfRv`JMllx@bs;+N;$RWvp>50I!b(a+$L OpenCV 2.0 + + *Author:* Theodore Tsesmelis + + Where we learn to segment objects using Laplacian filtering, the Distance Transformation and the Watershed algorithm. \ No newline at end of file diff --git a/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp b/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp new file mode 100644 index 000000000..424a4901e --- /dev/null +++ b/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp @@ -0,0 +1,168 @@ +/** + * @function Watershed_and_Distance_Transform.cpp + * @brief Sample code showing how to segment overlapping objects using Laplacian filtering, in addition to Watershed and Distance Transformation + * @author OpenCV Team + */ + +#include +#include + +using namespace std; +using namespace cv; + +int main(int, char** argv) +{ +//! [load_image] + // Load the image + Mat src = imread(argv[1]); + + // Check if everything was fine + if (!src.data) + return -1; + + // Show source image + imshow("Source Image", src); +//! [load_image] + +//! [black_bg] + // Change the background from white to black, since that will help later to extract + // better results during the use of Distance Transform + for( int x = 0; x < src.rows; x++ ) { + for( int y = 0; y < src.cols; y++ ) { + if ( src.at(x, y) == Vec3b(255,255,255) ) { + src.at(x, y)[0] = 0; + src.at(x, y)[1] = 0; + src.at(x, y)[2] = 0; + } + } + } + + // Show output image + imshow("Black Background Image", src); +//! [black_bg] + +//! [sharp] + // Create a kernel that we will use for accuting/sharpening our image + Mat kernel = (Mat_(3,3) << + 1, 1, 1, + 1, -8, 1, + 1, 1, 1); // an approximation of second derivative, a quite strong kernel + + // do the laplacian filtering as it is + // well, we need to convert everything in something more deeper then CV_8U + // because the kernel has some negative values, + // and we can expect in general to have a Laplacian image with negative values + // BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255 + // so the possible negative number will be truncated + Mat imgLaplacian; + Mat sharp = src; // copy source image to another temporary one + filter2D(sharp, imgLaplacian, CV_32F, kernel); + src.convertTo(sharp, CV_32F); + Mat imgResult = sharp - imgLaplacian; + + // convert back to 8bits gray scale + imgResult.convertTo(imgResult, CV_8UC3); + imgLaplacian.convertTo(imgLaplacian, CV_8UC3); + + // imshow( "Laplace Filtered Image", imgLaplacian ); + imshow( "New Sharped Image", imgResult ); +//! [sharp] + + src = imgResult; // copy back + +//! [bin] + // Create binary image from source image + Mat bw; + cvtColor(src, bw, CV_BGR2GRAY); + threshold(bw, bw, 40, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); + imshow("Binary Image", bw); +//! [bin] + +//! [dist] + // Perform the distance transform algorithm + Mat dist; + distanceTransform(bw, dist, CV_DIST_L2, 3); + + // Normalize the distance image for range = {0.0, 1.0} + // so we can visualize and threshold it + normalize(dist, dist, 0, 1., NORM_MINMAX); + imshow("Distance Transform Image", dist); +//! [dist] + +//! [peaks] + // Threshold to obtain the peaks + // This will be the markers for the foreground objects + threshold(dist, dist, .4, 1., CV_THRESH_BINARY); + + // Dilate a bit the dist image + Mat kernel1 = Mat::ones(3, 3, CV_8UC1); + dilate(dist, dist, kernel1); + imshow("Peaks", dist); +//! [peaks] + +//! [seeds] + // Create the CV_8U version of the distance image + // It is needed for findContours() + Mat dist_8u; + dist.convertTo(dist_8u, CV_8U); + + // Find total markers + vector > contours; + findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); + + // Create the marker image for the watershed algorithm + Mat markers = Mat::zeros(dist.size(), CV_32SC1); + + // Draw the foreground markers + for (size_t i = 0; i < contours.size(); i++) + drawContours(markers, contours, static_cast(i), Scalar::all(static_cast(i)+1), -1); + + // Draw the background marker + circle(markers, Point(5,5), 3, CV_RGB(255,255,255), -1); + imshow("Markers", markers*10000); +//! [seeds] + +//! [watershed] + // Perform the watershed algorithm + watershed(src, markers); + + Mat mark = Mat::zeros(markers.size(), CV_8UC1); + markers.convertTo(mark, CV_8UC1); + bitwise_not(mark, mark); +// imshow("Markers_v2", mark); // uncomment this if you want to see how the mark + // image looks like at that point + + // Generate random colors + vector colors; + for (size_t i = 0; i < contours.size(); i++) + { + int b = theRNG().uniform(0, 255); + int g = theRNG().uniform(0, 255); + int r = theRNG().uniform(0, 255); + + colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r)); + } + + // Create the result image + Mat dst = Mat::zeros(markers.size(), CV_8UC3); + + // Fill labeled objects with random colors + for (int i = 0; i < markers.rows; i++) + { + for (int j = 0; j < markers.cols; j++) + { + int index = markers.at(i,j); + if (index > 0 && index <= static_cast(contours.size())) + dst.at(i,j) = colors[index-1]; + else + dst.at(i,j) = Vec3b(0,0,0); + } + } + + // Visualize the final image + imshow("Final Result", dst); +//! [watershed] + + waitKey(0); + return 0; +} \ No newline at end of file diff --git a/samples/data/cards.png b/samples/data/cards.png new file mode 100644 index 0000000000000000000000000000000000000000..1e61d874f6b874e6d87420e0d203e6a21b0a97d4 GIT binary patch literal 77914 zcmcG#byU@17cPiMOC3Ux?(UY95{W}2prn9G9J=cuC>_!b(xo6EDV-wSCEeW}=e~U3 z-1%eHnpt;d)?FTP`CEryz3<-p+0XOr5Oq}r94s;{BqSsp#aD8gNJz+jNJx(sG0?yf z!Qp?a;GaiMnhG*Vr2`aO;6G^ZlojNV5D$O8H5SBzBbfHD^qr89o)A9#eT0;fMhXt1 zJ1auv(H9;OV6)*-A*axTL(iP$^_*qxU@!|?XCzri3sYwca~d~G=Z`cBicodkPq-vV zNHj=_a?;xFGrQ>?&PHw?8&Ym?UB!eSoyScF4rwA`lv)@JpJMP6&{%TSWoi3p_kQKQ zSR8U%?Ca}!HlqG!KmZ$-&p?FCfSpVG^;6(iA=K_x)ijaF6=-#XQpJz8xqaV*TZaW* zqhA5;PV+XfK8bO!~fxftp0EWd>*m< z_dqZb(SHxfAz}UJ8yIv*sQ>u}3dHZf2mCQ!{P%z~%D4abr}^J?-v5=``~Pyk|10PH zzkhrG+h6DZ!|nb5TBiTYc}=y0vtK=x#%H3b=lDc}(bhqSElbgzRaS;4ukq)7Wv(Wx zwvN1xjsP@Pp5VW~+O{SKhf9eW!PDpGjVh9{UCz$T`JSnab_hT#raAI0vUWs^i?er* z3-SMt@7LxR6rj&m*IjRyXy_3C)}+Ni_Vn_^Xv2q&zA6}f+`QrVTxC>lT)^2-VN?KI zO@l~ZGN)oUn;I^Cght2k-N@aRWazA0fNPHBzqRyJgN`yreCVzc%E#iJiW6^E8 zwcIHnESP0UmH{K|<_*pMW<`dBgCl1e{2F>QQysBW-*Zsif0RMRw#FA)OmHbVZ4l`! z3S~;|6u=%y#!kt{36_4Yht)PE7qTLfV3DL}Vfu9CkD^o%mM5d%@Th1vqOtM0W^ohNu$xL1s zdn=peX5W7=p^=f1VFdVzMAAk|YZOWl5WV0JpAnW5h9YJ2Z z*3{_r+vIIwnDZuRZ4KI`IZv3T)}B=jk(($dE4&>5h~W>+Y_h)`P$D zL;t(;e|7GW{Pr`G#hr8D6h1fS4y%74>)Zx^cJ(XKggm72Uj~BrN*San=&#E`#PSsD z;jTyeQd)Y=|ExK#R_Hs{8H{9kJ^e%?578iUD24gBd5NhC;(Xt^{^^Z`p8YvAMh;65 zM_xw*hu(u)+ON2nNGXp&X?h`Q=f=G>?BHhg#6ufOpE(#?^kzseK@guZ?pOK4zIz$W zgFSt|T~Zs_E;wnVyy$@dTQ8dOQEozYu4BRuiGtOQ4lChmO`squ3`yrQ~ z&_}w9v%hJhrkU!44Vqot;b%?-3bld&*`*&(yZz6wH6>;NhLIw;% zgT?2OvUL;Y8pIy)N~n_#!Py^a{1bn?JglgrxH8!9LoQ?6<N-n>o)z?uTqmWn$*Wb)m@-uUBD?S=1`vXYl!MD-Jy- z``ebN0}*4k>gZufKSQ2oh*F)3u^qScna{<=#i)n}U$RP|ijde>O|?8t$LJ^i7%d^3 z_qdeee|A8+`^#Qx@^`hTwp6U)iM$3#pY@_*(;&Q!@a@~{T}WJV_O`c|vx2CytLpSc z;}ra4vR-^}6W*CDE6B@RtGJ%dW>HGvP+|vh2-4ljdcW)^Wi}-A-v@=oN<`hH`Om+;#%;H634EvsgC~auiJ`EZ9nmeD z1VtR2&3e_QQjjerOkQ(hfSc~6>UOxC>xvK%6fAe&8hO+{`2-SQq=NQa)AV4z(b?Jg zr+W`UJ~TDZQa|VZ#QUto*y8T`Oh8cRZ;+S_#opEuc#7o=ul;9k$Yrksgz;P1iCA>4 z50^z$QRWpT-B*kjS+O=nLnJ{G%QF9zD4&=nbYN=X{k}8=a?^(;*F8y z4w*b862>g)UND_o3$CBa88)UU=P%;lq?fG+0{sh$dx_GYgm2XM!=}>$Sa@xlN z5F#o`6@-FRsBgbuu5BanT`2ny8F`3~Amr0VT{mpD>qK0$i0H^20^gVpv&!S*WpMdq zDl+7;D~g+70SaL(8M%fa&ZNVkc$8yZ_(uKd*+r$(hU&%KRxYiVr_>L>)QwNUP(m8AcZlHr!U*_xBSQ8lME9o=CT%Na~4k^Eyw z!zab3f~j$_bneO93}FUBy>YY{;)5q$2s%qL4F&Xpm8hJuH($y<1+9D}%2(2ks^lQ| zC1u%VM$k2{g@A42*lQa_ie(=&H7rZ#nF%jQ8dv8v8gJhWZ47qnRbj0Zm#Fh4 z3kSEnF*M_fl^*?d6njs}3oXQ^ctdOHTO|0Iv^Z49II5UsOi*C4j!T+!xt`0%#|M5kS#Buk)XdYf6069JW>OH*K=jhF#vt zKZ4Rrx?&69FvgQXM@lWNv z`CeBOei`%_Q5tw~gCgyxL(E19MaqEUBX_*eR&GRpRGe+{3@yQQh{uvZTJYW8m9OJs zQRIGDPVl*e*PqGp=zen^l~@BwHKz3ZrYz}mF|dX09jIsC8hM5!^S|eDnGyI`{QG`d zjyzOm_eQRhZs4Lx!Jx?G``aLoaUJza15#vDm+#uLJ`yFR4Kbh){q*U^^^BzcFudGp zUa5cewTp#36HYbhP^ir&5*d)q0t)9ilRV1&AB-39o!5U6hj_J8x2Bq5iT&9~4t3!d zef%h84Kia3YUicDhc*TnKiYNQMIl-isv{2nK-v_jJH5`Q8Du`{f7B|7xgQEr=Xg?8 zC0K+i4qY54qXopvnWKX>eY` zYsiICXEY|cKS3S!RYV=fOp0$ixMc^fU41#2mq{*qj1@%}i` zz4@A0$(sGTScmyLQ=l|O_hnO5aj{A!ZvZP4`hXd^ZRh)TK_egpo0&RZE`Fn;;tWD> zv;eA%V&w8~A{oUKW^s%oma?5Z5^3~g0h-KmPl6Ouv03jfg`hSb)qkE77#fICnqOB zn9fjo&a=-CXh;S$D$bOWeQok#>N>;=C-X_Z>NNz`WpW%rGUwOGK^GJSFJvHUeKOf;_se7|HZ{KfdL$$|Brxv|0 zG%%`R^i$=JPp)?e*^^=qI^QrKKEt)+{W@U&Do;}`XNX8hdTfz(N56#m`(C9LMGhGn zw7w?;6jvHwy-1rSUdefPF3eK@aZ{jHO3F`;FdMEQQ_{zlo)0h!dpoY) zYm#f5ob02O71>YA=crKnV(ml3iIhX8H-XE!%`a9h_4j>9hRougL??T8YE*0=!tQbM zxPD$Yi7zPN$gU4&syS&9-dr4PxD84T-hO#vnJNTx)}pJzn3xuyU?AZ0^U5F4GBrOK zrFqe_#`C(YORp+)>_^Qz#42+fq8s5S!gLoM^|kIqH7+nvi_?aKy^ec@)a=aal)dzo z)h!z9DgdO-Wl=p^`6`z)&BHVBSQwMIJbsr$xJ8QIxP}JyYin}SD(BaMv?*3*b&hou$_X4@XFN9Co3Ej;a%(<~3#Tgktz3 zm2HpDOwKBIRu z{qy3`a&yde!ATRTbJBQp>W+n9AwPeUn+?;2vzJjEl96+Y56X@TN9`t@HnUX%{l$VByO=`XbuxY+N>r!b5;g(Z&@pe+?6R+f=&vKirSVeA4v0y1> z@bU4O&Llg$##|||M;S5)qtZUA6A_K7SP^-7weqN!Ick+-|0B#PynCujQmg+aA z+@a(Cv|Es@7ZcO3bwB!(D<*)m@j;4y>gS`A`khsoXJavzbV%IsFGLb`Mqg1h7?1!+ z$FbwTLBXDqphATyo82UNyEltzd2f#1(tAB4&&0E2&PwsJYW~=k^vN4VGY}2Jz_{J8CQ*>w&af zWq#~I-!|R2PtjUkOsQMiy_A==aH)ZIJTsqVLb;JXo?7%9z3slI^Y#a?$J>71dt|B8r1qqF!9_@l zJ+muoneK5ZTIOmYH^T~;8D6-cGwr$G%6!*w)gKbId8MTI*$J!dk%rz~V}fY4ChK-d z#2XFLW$hgdpM~xt@km$KY#rmU?5|3BLjn;!vCLWvL-v0!#X$}cQ3c7BLel*!Xd+{J z%znD0vsNt-Xca&7Bbi&*PXQeB7w-A#inQaIlH3eIQMa<|xdEh;8f z=XtihbcGg3I}odu&8g>CZH{^0%Z;c1qN}k{y9wb_0iq|d)OE96dC= z*j+Aj^%Tb5JW%u1)#=Mq=AHZ@Yy694+C<#WKGkLKCfwwQnqC!k^2kWrjCY3NZ`Y%L zm4g-TTbeWZ&~|}-=`i7gx{X8l8T+v@LUf-S{+#2HrPL8h-k>*{&%F5enr5HV&dDuh zo0Qt-JI*g>J3a87x90bUczGcfZF_y94n=ZnK~fA;)e)zZZYBm^Fh2IF6EvTL=aB&i z9s(*U0yI>CbUoSkzW#>4gGEI#jC_dU7q)HCRE#JJMV0TIPZ%?qt6^ycnJ;11^mpiR zzb1Rr7&cu05LRTh$kk_Ee(q6*ZY^Q)Su;l+M=c*nf@p_Z623EN$Uxf*Bj0?Nz_IDh zGGH5?p6*HDeEZSbI#$06^rJ^AZn`9tH|IwQ!p&DW%$<)MUMo+MqMM2&O)XlSrn}sOJ54ih^2R=WM`E2aEeYT6tPi~j+cwd*Q0`eaBAVPC z92an9UVx?dHl!@dnn37Wd|e#ZbpYG*cHur@aZx&3lNCadtnyCsNAcsbi1UMP-%HbF z$K+i@x2&ovoUtdWA_AQHncJV9vpvNG6qUI1M-#lr!SxzZ?B72|wYqr~&2&ZM>5O$1 z%C)nznvAx+zi4rfmU*wS8%JA^O2sKL8co$k69dl3<9Jm#-&tGm*-Ac3#~0+Kf%IB6 ziFoHb+iCx%cc|A59*S~X-sDi2(o2`vsGFN3A=?h0Wz4aJgxp0h272RAJ`>H<{iTT2MN+6KG$YDmTj= zwFKd;FaeV$o%|Ee6N%yP2Z(J|Yvm`S0DTq?%u6udl@!Y+AK45 z=#5NCyINL5Td|(C;rkrDbG-N$AY_I%paxeu{L9{xp$_8{SyK?%TGaR0r1jso<)gmF z?;_S^@OPl2;L#@TtiZU~YrosGI|4C;5lKNS?i^hBhI{LNm3ebCKf3 z`zsh!J@1p$to19wP=DG;voyzK(6X!^dt_W+4G!Kf^DsDVq?^wp?jP~$+)E+Q5ml3g zH#^O$p__xW_sjD}*9T3hChcg2@-47ba=DZFz^Zl*+nL!_W^M{^!J|JHj zF7;*5Co(XInyM=Bt7BbXu3{{MymL_pH9qBY{}aSLb-bN-z#o^a38W|PEL%cciaL_|c#_^-vUC?-TW;bqG3T{9N`56j*=Z!h0gCon+Flp$L>_@@F=O+Rs-MIedDiSgwlcGN|)g`%q!o4VYC$Q8Spr|C0{<3ES zv&OF5ck$7??pDukv7t9_)nH(?|1oE@z_%FtQf=Gnx0vF(x)}V$!XMY{h@dp7`N+0U zrL=PadH^j5HVcSC9;bB`ph4_UlSwU$KnOKHr}Q#L^B6Mk?=$W1JnmK$+D|cchRF^I zyqkn-Ld%*`F1;iW;;m1(u}K@dibLO#hNR00iCxarKk*b|6N&%3sG>@YTI?hcA4{e{ z)GK=9Z3d<1B4Y8q+bIZbCX0xujS`o!ymmg$yt9e9C&P(et$zRlYztSa1ZU6pBv4X? zwHgj2k24Xsg~2VFb$xNHT5?E6SpE1YpZE4eq^=~M$!#OA! zK^`j+A)TX^bGLbp$=ae3@k;5JHL^tc40VBsF|Ya9 z-2(8)DS$S(uZE%gc9X4YpR7H96Ml6P(r!Ow{Y)N`U#i8HSgy%hksJ<`NR&nQT%SQI z0eRE)@Mo7BKONPbDH&6tpOuIh3y!U}8AUzgOeVR0p*~R^%SdaWm4nRN!Ih7cjg>@i z4TL=m&CE1skr!`RuDt1rxnjCAvRP!6jia4=p0U4X&`|3YEinlNbo;^`*LeV5lZN|v zVcElG_%-pmVrLRBxDP^n2@>Sm&A#%)sfyv?mcYJQRF;rX45GJLg^rD$WkM-0#{H2I z7DpI}0;PpZwE02bmj*=)m41Wc;|sG-uEdL?;Awr#lAk>Gs_^9%Jz5kI13+uyVSwp> zmMw=r4|sv9&J1mW8T@`)RSp^FDZB-R`QjA6oA=FI@~-2xO?F+GDu`Oo`;eO(+C+LA zJ7!T|3eY+B?UXU^?swfM!li%T*nYH8>8nMDU~20S%WK$6MA2aLDwr-1fQ~SyFDn53 zx%Vdiq>&R&adCz&Hfu^Cpql{6;JmvnYUShONT54TXnS8(s4qB|le8Yvw(=(Ta245B zo|fxrY=i}cNMEY;1Owd`g$wt7<>%Z7psI&p82 zkki)(Vr;}b*qZQDg^>QgyVD_;C3C6FOCD4iJ0t%_Vg!&4Hmfv=KP54BHF)GoUbg1u zb0)TX-poGWOTNeXio5m94Oq;@t?m@e(T&bzcc5x|FB77m<7JoWeJZ!n*NioIJISJy zotLjTG#`m*dxGM1k*%66Q>QIsZ6z}-uuFZ95AS`dNqvQ}zTDYS?|XhzB)ZpL4l|g{ z%gd9`hXdD1_arS}w%LLtcDo56nou$>x=I)L|6O+Q{v9jIUAQs&?$%81(}^{v7#rgp zZpl<|T$xU`v!k)S6M{#jqN%|g_FLqoVqS1)Cxca%rCFKvuNf7MdW3IyJbRDNzS9Rd z#uwU|+pAT{8~yBkTTt|T&2UVz+lD8<(_TyuW^)^gE=P0Zl}6S#-&aGV){H+YxT*OLgo*|OnS0F7P#C#3vCoy`De4m* zyLwCn%r58lhusq5rkOcCdt7yK3|13;cUqwFVSwUQe+KLTFm@_TPNu`nqn!;a-$>B_ zv?O&}f8irxrL27H5lDN~gN_NIP=vjgjJmpMj(QfTxSl)l-DyPP+t_&Y_1%JkaXB4p zqXWfhXBeTI|M@us-@njDc!In8*4%iodq1lL?NkpR|pi{iAwy#2Yt49)o+1JrRK3%qnXon9 zzU}e79vxbUx~a@eXG1PIpgk#4Gi_Ys%sDnW6OXdb&31CY&Q~0pgI;CcV{QNuL_5@^ z8+QbLh^`0rzkwN{i&952@!MnN$>Dsctk>jkp4Z=7?uVm(HlI)~mvVt3Pj_=Q$Sl~f zp)c0$;y@9k!o=^GY0X)&cB44Ausf}&B@45Q9z5>5KpxK*fQo4(`kk0@Fjgu=my^NAlO$~JcoP`U_wy)Z3jQ@Foz8UBc;BJvf zK1)@_Yr@RFJ{*5Ps4A2uLY4H7H!0}r#7v5S*O(2BD*Od;SVy4r*lem^XErO3-1j|hSLu#i1etqskpI3ur*THQvkKQB80OGt(Fh% zamh<(Y~iO>s?n0*tscDM7qaVJuTb1iIU}Tpd861#&}a=u|HRp#Y;h%@0hU?$r2}Z}6 z=1OF}n_JKT;iHzXV9wVwNPvRkb&kJ39dI+6`EK5uKRL>m2-fPKdrFg-syCOtL5QQ1 zO1!h4%3D^4R#kV^N0sqGMEqL%1yd|-y6Q^A>zXEv^ChPW`0_KkMRJs!;_Ow&kJGy$ zlNl>U8rsBzF6ER8*xyhgek&i0)i?LIvrqm&UUL{n#w#sl6daj#G}Tx#4a*EFZ>xJt zv6>I84JC56gMtH^so_KpMH{N^?XIzzX^>RNtv9!Kif-o?7{5%p%_m=eOpj0r1aD#m zw6XI*N926_@;ZQ8ye8z7fJACbeIaEk23Q<9GRhok&CCyf9)CyS>A=$aOG%uTm7I`R5Avtjf8J^u-g9xgkvl4@&Br zw{JdXh!PK0bI%Lowdc0xsQW1~Zs`HzQJ>4sP$HLns+jQ6nAmee;Xo>`^?DbP-UP`0 zj7ytpXEjK}(q+ko85wT>NyEk-GD;QTYS=N#+Mb4fyq66;cDmH z!gPq+g(8RsV@LqHE*3>>rT?t`1uyoyZrZ6z^dF_TZ{DCCQC;vfoMAypn} zejs>S=g(bTQU>h+qUiL2Zk~?VGiwzx#w|^4%z1uLuCCAP-n2nKJ7) zo(_Y`z}*eWMEC7@r53y-QOFGQWK#TyA&1u-ly}r zm?I4%$*e5VAr%L(>9fXXOZ`{V2*!L*DOTn@+W-l#!}}AdBO#lXH$aP|Ja_^7_zJgB z+P)QPgr$g8+suZ9Ga84655T%!fi{OD;4WU58Yq$m%8_W9=rkufjO&c~tdM`?|G*@K zB8Vsp6F6Iu5uOF?8-Ah^QS&L|kU&M|752p21yKgtqW-j=Tm`B+hGn}&g`W48m&OQo z$UKSZNonuKv=eRmTy;c@YL1W)0hM_0vPX}3JioJ;>fOZf`Wa<4ju5Qp+0 z*Eoc)h?w&~cqETKO; zuL&>>|Km%@j%7-!SsIC~GXC-ra09iJ*IBZMwpGJs0kxLj)Z}aagFKK1IbH>ACqGp+ z+>Xrd9JgL|yL$Ez2v@IC*oMxsVBf3syAx+h_UR&R|C(2}pMdQ}G|F(-bYa@G#(Bxz z_!%*x><;8ZsWYOnaYoh!s~Wq1qXFd~AzQx>n80ShPhC%{CL)XS3Nb9=$$KxrVc@O8 zAF?hF0R*_T0($Gj4|i~qW=q21)&Ri0ATQd@Vb|lj8_H43>40ByTiLc<@22l?@?M=i#4;fBCYK=CG$ zC2|G-vngLGZ)7YQz9PbK%!ln<92g%DeebLRS^|3=eEcVknM>f9|evdW4D!z5e`0EfLZfJ>~|`4S?8n?GrXo*KGEU$>yElaqJ8 zG>=31wj;7?4OOSR=i(Z-QT=5{eP0HTxJX@KxR4jbDajpW7Jo@t^>VjfMCy*!fiwo1 zHw5x3#WQ~sJ*X?MNn)>isWMx7!k`?0LkS*7og~SUKBA)^2GXXM%ii0mSG@}M^u(yP z>t3jj2MsTO2m4#Dy{s6eJfL?A7zC3n_GF8tGM){%Wpf+6x=LM;Q_OqFnuR}p5L}V} zXg9A@0d*pAFshL^f)KS=2G}9Abx9nGKG(RgJLJX0#ixOur`SYa?(VKuP5hIn1rVB= z$<3xKa_`-O`(3LXvjJzxl*$;}^JjxL7>T0fS4~Y0s}kRiTejynRwYdVLr(ulSgQSk zBwLiY|Hb7>A^(SW0O~uIhw}kDsb*2f>XQ4T40(($(_kStt}B2co_QMUGSe@<+`A=O z@t^`m&b%268Uti;JlKrD7@jv$ULNXMw^WzManGuWb3M}ceV-0qK()lfZx#I0#d^GN zSCZ^Jwdj!1uby`~h%a7j)N>`Pyxc|r1FzZDFJF)@)(4UUEJ@u<#mZ;S1Ys?N?E44I z+k1>tHF$_|-6P*K_FEDo8qn&W3QjUCtCQ*+U)>%yA`Qype>3M(+*YpFREXfg20qsM zdQoki&I|E!L@kjvtl|OV55vyRKF`fvk_$8OdutUD#-%TQj8&Y!g#M^>WoT%MC9@gyYu_^_E_UakLh1Q=RW8f031E2;>3LrRm-sOIawr5SEL#E-{lWn&_*9~D z&W9!=f(ui@)$Y9OZx=$()`fHW(?VwcrKvnA%JU6kL(++V2bbDXc}e!-;Wi+XF4LE{ zCk8&_I;$sFVM04@9HB25velrHDdduodhsXB zSQX+IDr0BIy=CM2jaCkbzEql}O94t&G<6frC9+wzLaDRXS@N#Khn>>{uRV(%6NAS~zma;6xW8j*>3hb1$Xsz*B2Ne; z_VVbd3WA;J(Mfvj)@n>+Zw-`1dAh_0XgIVgEmeB{bkIdZJ6=T;x_LuVUBmJhbBQFd z1IQE&$5>1Ib3MVYjuOZvt86bJq1Wk=2RBIx^3_6^FYyOH<@#|G%+1a1&eX-$G^}xk zpoZpZzR(%YSN)Pw8@944X8;WY8`J77`Aph#x%70|;|_vAg{D%jBV?k%CSI?PG8$+N z>oLu*frmpcL7qsyJ#2o0>2T~GyTiu^;PVG#4hZ^eZUAxz#CTtS7a9Ie6CSNBE6tC6 zeSHl^1KOK$!4_`&qsf)%;nDlFRIl9MTkwpXc?m;a2h+bzFydVvH5SxIBt}lrIS%~mJ#S||L6R85+iiloW|?jN0!*I6uB z8Q~wuXsTh^8B~8Z6z3e!*JtMgJ=$?LpFB-_LZq@q`I_ziA$?}^UUX|EiN$UW{b4!%&jFh`^19>QiE3-R&GlH5qhXo%+52)RR zvCPbt^Tg~i*RKZ2Pm!EW&C5BI8;}Ou^^|Y^)?wPi?nC4t%=o0^jQdb8noVsKNr99L={18C&CS|^?XZ*l3$H*zp1 z@#d*K{v&r*yFYi7l!YzJone>;y4moyO9H4*i83CpqLMPT?U2>_#w2D1{l}(BiK;4aW9kG8#mB)k$cLK8 z*XMgGz)G4qsJyC1nh4x3n$J3iy+Td@i10$-Z5WWxw1B@?jAd4y1`zd7Gpz;5wc%dW z6rabe3wv240awY?AhdbEv~>6gS(}JRr*ok^0BD&>EOVh%b@dy1Bq7yU?Vf(tGk)!I ze=E$TL4=Ty%WG_IZ7mJ82K1$iB{~bSS97=3wx5V+Ed2ar{kn{blKTaeUfetBc;SvR zuMAF!$V-g5s!XiB92Ni;AP4wWZ|$S>rNzL2)mh!M?pQ(7_7$JH1p@q}Mhi{p ztg+0az`&LP>&?bkiChrBpg36{u-N`(Sy(JuVdR(c(s`G3%??fu5IZQC7`_)`BRi9^ zfLPQZBUi&{?d`$+IF+1{{DM#L-j^d;Zr3qIK2A(tWqVW6+ySH@a<(epie zEaU`0H6Dn>ToZRumf6mQ78i3Emm?240PmM$9v`2ee7&jxi~7}o+fQomuZ7zFB3%m& z$!(d4=RTRY4Ex-^(qnTtO;PVs6#qImEGVP|o`f0W+f4Z}4JU`lQ!q2))k|7IY~?i$ zOHSZgTLPfSP8gETOmHaMn`*NWpSwB#bl%cj7GVmd(oTNkHqn8TK@5Fg>-w zw!!nG71`4Lod}*(_*sY=zDI_EIU%YJWs(iitgGXX4aY?hdTwML;%=8@af{GeD28ma zwgo0v_}*;w@YK{RGc)?Eu}h4VxDudGX^Tpz|4!-MU^NOR`Ii5wlyHAec`J1x_qM7G z;^LaElHslN83h{Pol&UW=5Z(;3YyX{&6iKu@Fbj<1y`bZIy{aQfg;0GR`*_ShlB(x zuKDbT@>{XZ`l$yG+2&VgTfFraGg6i!B8?)spD^63yHzEWh+gF5ydUGjw)EfDPBAW&Sl7|Q}3(q+| zDDC6i1@LAWyXKxgJi(X zZe^mJH*0=KjR!xli#WjVtPA_MpQ<)a2;z)vt&&C3-I5r0_?~Jg8Q-x1rSkDbVlx9E z<5c)r?MSl@O0390hG&T^&%IY8MT(9?d%8Q}`q@VChSwKnyesqxQo5IRQp*9PmU5epm&MeAw&i*mr zs7cNw!Y!5xay9Y0J4|0WpRx=et)L7_Mctfat~8z!3`f&p)4CoL{Lw-*T2Yo7yIYZxPL`w^>v01jd;@xcJ_;EwRv zX1$#>qTk`_2Wsbka#dBwweK{zbay@ZwRcJ41RLl^F-J)wZ9UzB6YLd5&!|_-KC?Iju-b$H-GMjsih4K322{n zuMtUmi7i#bh@?)E88{0qO=eLPB28{DyEh_NEy-FVDW~ux;u^XH0E_#x`Nptsyo6g7 zrF5p{9byqZL}YO7TT{mIWK*EaNq-V(5X4(k-vCzXz^%C**xirdC*-g4g8#0q*5mlE zlIf1Gp5V!X4$Hy!l%A-!4Q!iV>+7pS#PaR+6FSg#7ek6!ru3dafsEKso(o|^x~rcL z4Yz{93`PB?pANz<3Ax&7g7_9uMgHx)O&5O##vuN99|nv8vsCUu6FF@E-|DB0`i{m% z78q<5SV8-)&%X9E`J0JPjs(&siyh?V6$B*=sJ_9aq`O$-HuE{n5xBqXR)noQ?=b#1 ztBja)Bea?N8jS8|fD0Ckb>53xx+kD8# zRPfB0-1%rGlOZ(BP^iiR4BYu~JXy=#4A%_v&VafTV-YpqpUK?{3T0d)$jKe3JwA^- z#=|GU%3*FXNc?-zthOe$@ag(mxFN{o|K=r&FAHODgVpJTSsL=zq2i@5HR z-kk5R1p@n(jt(+w;8QL$Fn9+Bu@p&`QT4)XKhu`)yfgCRoH^YhRZb)z3Co?okR0pk z>UC9lLlH>I{`#Ac5EJqt#||k8mUh#P;VwG0cg|t4hH=zg%j1tfewa^|Nx?ancz(i< z3IATqCCa@U9MexxU6DK|wKAi>YQ9yDQ;cOY(t1T9OPZZKy2<_;n5xy8)Vi+r-M}(H z>81;WJ+#^I1LaD?009MNYu@we#WLrZSISzsYZmiY?I{6GDu7qxz10I6Bre^lSgL7W z3ngYREGv`h*ss{$7CzG6v>*iVAdWvAktC7K`x%xp# zFWV*5$I;oxJ{LfCkF?PW&R?>dkvn=~OtZiBJo;K<)C)s)_hw%fTEJ|p^-2dN+j8?{ zJ=%BB3mEt8@u(QKca$dv1zImcdM)C3vA)LnHfesM3kb%TT|3UmU``ULZ)0m~PeNnRxfKgD?P_~)|H`&v! z!)lf!FBj{Kt!F>#8h)v>o%5qf`$@+WN&pe@657J^_M38XK%5zeL{ch?BZyF|iZECH zvhz-qc6yGcs;8!E6sQC_irM~8FM#4U0-N5_lk9H|s`zAHS###q6Oz9#AD#n>OD!30 zSoCxAFMDj8`(UhAqWv^K3wO)Xb@DhJO95hT&CysVmGxn4q}-9QZqR3cSL!VoHON%k zugqlJ-tU56ZwXHzA;&3gT2Fkv+5Iq_$VKU}ls7i6aWM&pW@##ty%;#90xZy#4fQW( znTp2(^QQgpEpmY8Ao6K7%*h1dJ2Nv=SW?zq1p|QgGfiyIqR7A%>inyJc+y|Ru^(ub z*q~LBs9(PaC4vyQC!cRF%;voLySB%B2KAIZGSdifWx;iN=A7^vxtlJ#wC%?Um@d9> zr(KHDLrJr%t7CwEn$=8w@j*&V2L)s!Fj>*&+547@x?GZgjt5(V?-}^@@`spOe;@8~ z@)L}R!RE!soKupD7v6I^iWp>-vVggod`mE!sSh`udPQ>RA>w>UlXbxFb#8+ka*Iib z0ZNCVo4b*pEW8+J%0jL?65`^<3>T)tu@#K4zpzaUqGsCPqV+!00VG_E9(+txa`0FF z|Eb}HAcV2b2br9`jdl z)w&H=B$9Nw1C$ok^4JQwV*QG|v#vLcjug`s1Rhn-KCCjhF5hOC|1wg`cCUzx{iqw+ zoiWA!Bdnp?jCdrjP$mQ#H}wLEfozz4*tWUg?p}n1hP51t0SUR|s?UTb8fCxnz+=#> z>EJ|i@S@IjciIUxFfj0AVcEvS-fT1jrb~2c<_rfH*C}!w!`8KzVc7DO*?iRbRXoF2 zi@sLrHh@%kdHhaBGrQ_wJTgoB&?N zO$mw5&3AhzZ97XrN8U$a35O|(AzIsfQ#UcSnTUEZ3yCdO$he`Ugmm(Lw;MFhvkbMy1E z7FOd!g@h89QyP#^msX7HBj<-d4NnHqD86R6Yx`KwAkFde6yU>SZiLM0!hOWh!aZje zU_!^tx;Y-zzR3PGOk#BDj%#Se7JcBGIJG~Ap?l(U;%>hHm-c>09iQ$47a0BoagOH74d=RAmGw(g(73 zo{uog))iMR15N)@%gOzK_hyxTNABXE+m@^={gYWvd9Y72sQiKPaq|F?(a{>vG4KAk zv5`_b0b;;aa?=sUO>l3Fcw%Ob(t4m^ZNqSUdlK$*)%&^W3JPw&lq+LB;NyICzo;b6 z7p{LAbawOM21d4LpBf43BegaCPa?&Tz6#q_V{xq?SMG@VFGOt&EBqLAzS*mlJS&o4 zcx3*@2_8s4L72(}9p!0x>L)ZA;e+ChIgl>aJ+w$u^7@Aif&9Djr1YB)m6!M^-jdW= zCiYgy>XW?JH^aZ{d1PsSm2!nUZSeR@_c?70MNunIQg`E{bc|`2&{Y&-oLxF(19MyX z!Yx`(4GAr~^@veB>9f}GPJ3-#GbVzhlR2T6lNv&HT{nqvOOI_v`EN;?Q3quRvhEZ? zt*~#31Oq?`bM2;d2UJR+v(o>d=_lTc{-@Bus}> z$GgFg-v&dt)l({V_I8r8QgtOdzd2Z|k~wU(`rZ$jbQ6EXmi?zRHil31uKxCAu91=&B9V<#s-{nTH;hdo z)m|B-&xk-!ob}Ss1wW4lEBW)p5HrSuSKP%O!(MEKDa&w1z`jMH>Nk})kj$sFl~n7s z`aQ-0EON*T*-v!`9596{5H7d4&QfpL{mn!caS2Q%C0oH;nhYkdQ7mDUd_?I4ThD(= z6guv&|Af*;Z|)*-YuC*AFYY+@zIk^BuMH#v@%!PZ?#vbzFS%mR!KC@@Xj?xn9Tfw! zg%uxKUhLtpoIAK9u;vq@{9_7pn>Hci|~G?YExI7$SN-weR7_ zoKM*Ep9cTRFP%W@?B1dUI_DjmpW@lS>m%=c?|QnynNLg6zsaup6+l^6XT+rL`|avB z9gjwidVPJJK+v|GD7>znO`+PN4R{c2xgW`npnOqgQO1=prnFOlOwO)#pevvgQ#Cs? z`OaI4T{SM9LG7SwqSKfkSx-D}2Dqk5C1`0y`iHdfFPRcCeM%aRGVZNY^N|R_#0`2X zlc#Q=udt}FP??g!^DD9k@_uMOEjv5=P6%wbmKcHPiGlr$`fno5-@(w}T=O(5GZH4j z%&CMo?q9ZGq(o?G+g*>ED3;t%#3k|$;LN($NkW6A6W0d&Md~h8v@##)HB|AgA5?Lo zeW5G&ozz9XQLaw=yayh7^S_-nxNzXG|9-`l!Wnh+?B+XvQ_rk|jnoc7tk<7mSb6?A zO5ssa-)Z^}%50Q> zEW90sSHKDBS_e&qQk6xr=LX9a-i!R7FF>%M0ePlfAI7J(YLOTllR&$r9$|?O!=+6x z`U{PbXF!L4hdy-l_W>d^f+!g#83fUEel8jZu4Y^+bUA8#w{!b!RbHy?FB?I5I4NRM zpe=-i?e!~ctNJ~P&oNau%(4rYtnJ{gv64Tgq^3xAuH#Hj$UNSl3fdMK{>&+cuaD&B z5QpX?U9`T$a=(gkd2r;@ef(&`_KmWU>9T}8H>6`Xr#b6o)U>pmRf%WbFni!j-yd2U zp$%2>UuY^x8hc78j!|XFvkLr~a9px5Rwez-*m&M9+ViBlH0mhcdyTQOeqj92ws*Gb z)PM==qx*20czK^hsZy*X%l8J%!@(mUNd2Z=&%(%B(t#Cn;_8)@2SzU*UV=KAz|+Tu zhaf@e2T8Q#mNg2L>&cTmk1G$Z=lt`zqEeTgFOA$Tk7(159^gxRD6t9G71EmxX2eCY%ak1*S4 z9A@d}<|oR9hG1vOumt7|14PcCxbwzv8RiBQDcxz_|?6m$5pQ{WCRZM55Q6y;om_6ieZO7akS?a&YPHjmg*- zU3n6WpFeApIxRK``aX(VGnJ|8QkHoOy4kb38IqeYBQoOke)Jtdu;R>x>dhgobKwN6 z%;1Buj=1>v*|5deZ+ynae!0lTE+sD!Xh*f81Rw@=z*3XBtTLCWStzc6%DHE~ZTli7 zCFQfNEelap(Cw`od{E#_xntp?_;()w=+AO<$39u(!$TzfSFX9r)WU7Jo+n@B#DvEk z(?Wd(KkSc>jWMIXN}P|NkxEJl)W?tAMLpAjrJ~CwFT*!CH|L5m7v%t%qEQ^!O-OaF z7il)UVSkgF{+V^W6E#Sx@s9CC_YupU%+?j^D~;2(+WJ*lG!tDI2k+vflmt?Q<(-wI+X0)V7F+US28PBmQ}jZW((3JL7;$jX1eQ18N10JURZD;jyr95W*Oo;p zE-nrRsXA6|R-%pV?Cfmnv{WdDjxy7jV(a{KL$b`z3Vw@XiLLc<1eX(1`~lbS0-W(K zmwQ6!sU3xasPd$``;XA%WND1LQI0j>mL$ksbfOSSr=>|4BESRi5ur0C?xm2k2kK))iGN_ABPS>K z>j*+0U!LyCWzgJjk4zQC;7K3uyRR+Uw?IZl3F%o6_7ws~1K_MOmUJAnGb?ZUd;8lr z9oB3cq-3RpowzX4FZG|1l!4N+RZ{{+}r z%d)(Q%GAC_b1<0!$&Zy2H68B)H+5ZRL$h957|5oW;rHWFxCJ)Zl;O0j^7^@w+op`5 zK&YDKl!;M5wC$qP`_-qbr{FFFE`rD~(WOn;CDF%4i7?ON+REA-*l{-C%cZOwLcvJ{ z-i0VdNQqJCr_bML(SrPNT8I8DeKqN^*$!aWST4$>{E}Ku_2UJUP z8XFffggEzdY2HeK7^>nwyGJ?B_JN*OL|GX+N&`xRSlr$r1_s0*ymc(Lb&J=HIxs#j zD7?{Q{7cNIMo&Zq^vm76sFrK8_9)ma%Uiqtt21hki64$XB`7OH5+E#R$}PlVI4tY7 zo%B;jE6J-J>JZFi(m`n$DGgWlE{`T?Y_yLd{c>aslfW9uWaPq7boc)YquYdiaPx zMvgKov!tY?gcFPx7^jUy7bGcwp#rI^zX!iBunTk;r+`SVIPgFYU@$-?)_E@>cpts6 zpcN`DZJPJ*YF+*!jskQzFerrYiLalbh9o|5Bslt_esRHj*^_heK1?Fp zrAf!>aHefETf+5XGjX=wlNSWJ%W-{3NIS|jfqH^1PkI6g8|t~yq%9c{H31rSr0BEs zQa;9pivU;$xw-Lt`0yctDQh5rLaJ9TPx-5_<2yEpol#1z^11T`r^zp6SicQ=?Qgm? z&(DT3G&JPzE6icB5DbyoPj`gq<4=9kiYa=`!(L+|s%hpU%309LIkh8r&q?Oeq#`#- zw(H3ak)9eG16GNEk6MGK-$Y<(gW9PLh?tgj@8RrZhtecq;KWF;i>+k>U zZeyWf=GfKN{{fipg%8T0c91|Mqa`Vd4k=feL23ranngK3dE+4fB3b^24CXCGZAzZ)4kc2t;`jL{RFWwi>?|nxBC=eh1~Qmot?>@MFGeTaMw5Z-f+D zx5;n!D}t>!mzca~`w=SZ+I5C_uo!6>sX*-g6ok$?vplj@tZ8X)mKp{}I#pKIvD$jz zBwvj&$rd4cdVRI<7bBgJP}Gp6yrP`Sn~H(qb#Gg}fVQV@UJ@-e|u978)CG6aqd-nO$6T>U!e-xX$3`I%=@~ zr%;=WpLzm%+Q%|>Yv%(p+p9&cquTX;K^*W&FI^QqHUDv&+a|w1ALlx1#O`6JnNeu{ zfDzSo?83KS``{Ew$lG1W)H0Z0IWH+MUxU!Ih2`$?-5w)CeD{A9{16Bxj#wFTmZFmD zlOKBR_F+S@tTuALISrx<9}$jN+ol~PAtA7c2EZq%Iyq;ofQq zs($UXP!Vx)crb}bz+>}6bE^20E!SVhW4{yzVPU?=6{mGDH<8Bcw9>ea)>%X^ef$j#Uy?a=*1T0}iVhz1b>?E`1jd zf<Hj=)+krYJ2+l&o{jHc&!*=ik5f?K9F)GNLGiAn0?MXsU__vS9U#N>EBv zAxEB`+JT_{T&q-A6HiV~8krca*l?LlB5K$)C;%m(YY(9$H@C_*x^~U!{hQ?DR3Td~ z#K?%zxhe63(@ht?jQ5WjuwtZ_w|k=A$9`vPiw9JXU!y51D7-{NkZWB(kx_Y{nk&VIMT!*xrNa7M(7&=m()I;Wc3NtrS1&~S zg#oBT_B8*pe*B0C3*+GAU~_wJ?)Kmn8Wx6RWCHCrsK9#7CW~EHKZoU>TW1#O%2aAk zfjYhcd?hzFHzCg1be6Ge()_M5C)AmpJO9c%r~MIOeWL^;<5I)U4jW9j=cKT-xLt_(%8#?HmCf{U#`^-} z#SOry&CFbv{p{>?Gh1wyQ&d!9iDB5qD(Iitp{du8&d3m8;^+gFveq9Nilb1dq=GFA zsJ_#zES{eV2M6bif;y-`S&3h>p$^D6GYS^cV4=dFcviZQxbBCOEHG#CswbH9dA{q~ zt#s-<6S@a^4K~viKD+9o@w$HH=;Q_;gmlGBZ!f3QmyYQ>Bi?f7v!<)rb3pt*<)0?liSBAK4-&3oTDLUx931s?(4#I4H9uXxB*;ml`h0C#S1R z40KnhXlP`}FmHEv!2|yJ)`i?C6%P1pjC>vZ_Y!q+SYqQllx%6)|Y#sC7#5 zI+*vfb)S1^HmjP{$?sEt=f^(*pQMiMVTA?*I8Xz#Lz4EdYB6qZq?rKJw6l9?GCx&J zUsx;_Z46So_4f`$ct~M^z*4-?8x~pYyz#!B9U8w+pT4T7KnGuGzG}+$`|I2yy>>Az z$oNS01t+|Okqk^``p!oHWpMJ@w+S|b250QY9`7{1%chCFJx53X<}<>3d46NKqm^pS-8{Rk3~U%q@1 z((w{NQ_pBxXjRWCEJTsQxSsI5!bF2NH?uIBrYrptmm#NMNPgn|D>P&HEuNrHK2PL8ho;w9~Q4Cn%sJm+O$EyRrzM;X~ z`g?XuOXg`Pa`{Kr(XY6&<8Nf#f3wQUE`T~Il%D)o&U57GmXI(vGh49)RPcgUK}@rf zy|O=9C|NoE_~PQwaW7&jhjj+=(zp7^a$}TcFfn*O$cq2!RU)2`pIvUDhVQcXu6MS1()v20Q*-nX88Ga9KDEPt z26{FapT;FoV0-(9P1KDkK`n+D!$#+Oq1l6&KlEW= z{n@71SPV{#l9EfC5q#12Z+yxJh2+u8>0=W!1>fca9bW15K=)88(&Ut)u1a?UAt4Ss zM|n{$yepc2637r(X!c+*X{uN^${N~hknt?VUl9g_r8@^5wrZ< zKPWy0`*LJkAP7KStJWUtW2KprBqpZoo~KfALjb+IPYX8@2V>fNz^_H}*Doj7za|Pu z&24SM^$qnqM@JIVY-3|%;OTP9p=)Gqocc3-s`z~+B)%jdVcRw=Ucyy?RO2D4fI~}5 z*Eck@+2p*uxlm{S^{Yxu21I`oepq$yjY7_02fBK*)nd=-Et@RBYCr&g&tYZcYRPc% z`t`HI(D&Y>ps>(Zs~AP9oDyr?!iq7JEYRF;zZ2>6K!5MhP{*a^Fz}JvuiIu6 z2$iLX22m80VT$^z`@yv8-FIC~P-jQa`-_(%qsvY&;e-rg#T42To03U0!PS zP~I!XP$Ejv`IG{L&$xtywJ$ghvZ)SsnM|k`SLCjIAe3rx$V*K-o}1x_Db#|R>f85k zX5gb6*(*$z-NUA+2ZerD_3q6_hy+grpeP*L$x}(bsF&(fELgp6c*knCRb>RED<9 zdjZ$ip<&zA>FE(es6b!)$i;zi;p4Gfj?Ctz%+iKgl|>GnP)z35St&&)@^>6r-Zt zrbx>isjaXv(mVxVNqWqZ&d!|1o14>{#<-~m8}8tXW9YiV^KMg zz*xSCTj6y+Ov7E>+am|qDnOx}oH+KZ>jH#G^pFI`MU|8b^{n57UWPB}gpVq84$8nr zA+)Uu+3@5own>e%*_Bu17Zz49l{~)A=!Um^UaVah8iLQS0KXDWah zN#_m9-^0OuBS)7!%Zv*8zZi-#x1o2^h#VZJTbkC8ct~4$8bRE!+*)b#XNxN@R@KVpi^17-8V(@UVpDL+SP$R zqE7vq71CN}0Cy;O|A~&6g`e;$vDrxMxa9m6L^E^je4xY2YPsWmE;=QF|j3zleUSSX-I|L&%QHMCDk!3tsvQ&crJOM9*zr$(tK4 z4r_5nM)86oQCbGEFF16Q2FP2Ff%*V~jHU>Yq%e5T``lNM0t7E0ARq?;hCsk0$^O$P zdd=!^(5r3C4RJ{n~ALgW9n05njVa=VLf;0x#Wyk$G$U|ZpL z3U3F4d3+$C=I5{2Xf4jnu_^Eox$*Gs?(A=1*DQMtd8q*0abkM1>@h);%i;QXA3b+G zWG}-#_;Ce+<~^rI0+an)ZhM|*^2Q&`>bTz60i#d$Uj>GwlyG&81Z}&W3QB%S5aH{( zZ5zS!=t=jv$j4dw7qH_-Ltt<;Cr=((fIqpT$b1f!|H24Xy}ICVbA$X?v2G|g1RwEd z^OHbH5Rr(CEahYOd&D4g!^rTT3$E0PUIKRJ`BhSl8lqn?dV70+f^acb{_)P>`R!z_ zp54tphRFXNn-E>hS28j-R?yH0?K#NM%Go+qb8&D0pU($I#%3!zKa_x#(|9`dYKym~ z{R8G&74XyZ)=GOPE6MG{v#SnR?})dJveqsYfM7&&j<W^n- zx@Hu`A%&1sJ2TCVX45z?_+(eVX7X(?#K?Jql(o^o8MPum6*iL zC<-Zi4O9D5ts$Jc)2RoEz_(q}OYE&Z6p_27|QV49b;9bS{eqNk3xypn$ zP^Lzqe=U!61|)hleiiD>gI%5_8GKUL^ULCkHRv}$*C%-V;uL@{iHUyWAv-=C+t_xNRj2$JVM|yel(e=%x$F-XX3R-8&eJGsD9C2KtgTZa)k}62JxAj3wGUEFI~i zy~gz3W2n4zf-S{6Xu856MC&a7xDkV;1uTKIz8WQ$%O~~d8aHvGz5?JOs3Zu`bkF*N zpBaMfffxNB#k#t3a&pHUwovbuJQFvos#MUzXrShEMVs&$JdUCLp~(K~9QGWt_G5?* zEmcPo&PuCMZs%C)~EO5W_YN|wA8gw8b zY8Jv*M$MQ>CtTv*f*FF>ezT=oIv$*N%GrDzXE=G-_7>qgK+J3~LiAl56BdwO=Xx0W z7O1}pkXe|}g%z0DUwi7ku=gO?tey{kSbhq-gGhX-wraY*l1popaZLZb_44xa{ELo) zzAheU36@Yn?KxGXEe(v_Uq?j1j*Za(@P(b>H9JvHlK!jR6TN5~3K4Vj4sW&7%HVbIUQA?kl=EkGA?(Zl7pVQg{>`anqh%n!~ zD(cB9t?W?+4|#re)A)ffB;lCggE?dLPTvL_7;#f-psOIc?I4C$)sWvlB)E(_!YYWm zuDrYln%&5YJr?$%e~o9o^!lGDk)FG09>WU&Xt=Ph+Hlc) z_(00a8k@^%`mDseadnMTR)JDuv8GIDdH=(~#l;PdF>+OuWp~ReSaH&Aig893!T4EO z880s^ChxhgIK zwDTO}!3zxJ07Q5{i{)m|Z=-_h;x9yif~wL8sXu`sn46pRRGpeODl9Av#LnF8Y`^aD zJahAJ23-Zp^xF03p_%IkoXbXiF_cBx74X{AA@khdryrxw+#uU?)h1;ONrkE%?nkq) z%6`T*gha>qZl7iLT#r1x&2CEXbU(tpJFp&I@+Q6_k0O!BU+{LY%&1K)S@U2{AF7yN z7goR#$@<&47iMLMB9!_T`nxviJRB7;6swC@OWEa6-?TIW9I?8R_CTeJi#T0|ThkPk zJ0@i@ey_9{X(miCGS+pmlg)KhzZV*|x#8>lTIA24Khf<_GGIQ>?RZK-9Z6_-(;vUF zy{&tGk5#;<^SvM zX7g2<)P)ZtO7rPZF%NHCJ+-7dP$wEe&6e=7_?aS0n;t>IZdo2lsrn9D6=+peJlH9* zm&C>U9uQ_uY|L08%p4ruQvjbJevq#I#wsBZ()7p^0_d|(<6nTd?5gPsfWWFtqdGyq z2K_MxM+V4Se*VZfv;yj6ssKHGebS3OxzV|?_Ud9y0DqT7d04&~=C?%Ihiei3n5|&L z6-`GQ{rx+G(vD>39ADpQ+F;f~8o}IfM%90L!+J?JkNJil{F8uh8cyTxI9c!M)2xpx zFdkrj-SO=^o0wcWkC?johnNoA-L!V2;m-z7URVvzhW~g@Z!eO{!$)Ub1t6|(I+w*n znBT5yfo%cc3*Ff}_|=@%RvvB3^UVKP)y%)~=0K4XnpuFAv1r5&Ia84e{m8nvtnAez zO0*Ir5j8j6wiQS=_len=D_u^dNUg13xp3!jcNJtZ*iWP&BH1FVFan}-6@p1iM<=D# ze>Jk&8ZH|Abm%M!968INFw30HK3`P=M@N0i&0{xJ1cTB)V}-z;sD*>;;k3WdcM0Z< zZ#^18A(19v-<^TO!NGB6+~d_2tH5`3M2*N=Z<3N5@{DCW008E|wgxm*2EaJKyu6~$ zvIi}V5w8b~MCIorEwb6tgyOO{$~crXH)o8+U>S+kIW(u~|Kwo9a04K8!ke5%J^=Ba zZ<)vl|ItWQw>B6SbS8t45CNS*a-U_*@-T2?-8tGb1p7xStE*@2Kon)nkj?#Jo+263Ip?^irlQmVn(8BBfx*9j zG0^g`XIHP2TElwQQBlzpm6ZeB>J|h$gwz|}9C_baCjs0ZjBc|&PAL5jLd*K9N}iK9 zVt#(4_`BdU-#bcj{&Y?2hKOR#nvZO3oh6G6fZi^n2I*+%wHIwD_+!z>hvm0-v4W)M zK(*!pJ=IJY!3)*u(fDTeuCA``{PKKe{W&>zQNd96_6=<&08dROnw8_#4j?lT8h6yP zx)LM%)12iK6N5z{x4nrX5fd@E7$xvoMH&TEG$M;FnVlK@W(NmJZ7Yo-dNPP@LL{x1 zn%0{@2>6zeaI;Fgfy;g?TpQEE(9qtHS$563S?V6IrFCbI{Qk;&?L11OOifsHI+8{^^=W8>ok zatk^QSisO{-4T$X6F)TAJcNp3@Dp|3d!zxEx)mD|7d}MB>2n^R!RV{WNq>@p&!0ad zYt~pAS{V5Qa6xCjDh%8Ma4)IP$a{Kv4*EW#%&dPijMX|#lfwc5-}TY;d|1w=0rN#u z2^xspSk_GehG!-llZBAeGsr-{4ZSA3&QT?@Kh^1rMMgZp3g+bG%=lS9ztiyPz0DH$ z!_|$x*a=Ry=-Fj?buqZSk+iIwTKulf6phIuiY#>WY-b0(0@@G2$QM+bO5WsX2xjlP zTVJdZ++>)i1m>DwdkAn?&$eIB-~(~X{#;Y)8kDS{pkU`c{!g&5#Mc+by?hWOJw5&W zHR{YOeO7B0*;q+FIcHPq_=-sZSquyuxTeCeW8ma|UtELHv~=d3nG&jpk8fz$3Csd# ztiAkj5cR0At}|TWN3x@I#s}m!Ue3Y%1sR9qMG+XBwBH_nPC0kWo{1pW%L60DIH-5; zM-%$M6`Pgc#bP?us4N(*IAVpk3-M%beUho%d=`X0jhF8}0K0dfze*z{lt&aU?df<% zVFi`*8N~_{79{)_OmfJXTRI^*lsT^-`w9C6!AHuL_4Q z&Kjn5C1Q->5#WKyJg=ZUro8-PTnO!AvJlSb-CaoIF-#e7!<3P&I(?ujsWI0$6G|u^ z{(QpK`w=MAFt7a3b;ivBOiK@Lw}TS)KtbWbb2%#XJ}f*ORZ&s#c|Vib?rtq#2A#wc z0UERH`FP%FLxb&t9ZG+9L{dShIlijkC9W8(u#lxW>E(bf7LcR0v9WabKopg>7oIFc z5bL!V0Uy|VI;Q~)#c7xrevt@f0xP5tFb$)T$xBeZ)Y75v$wVVm@>}Tq?u73I7GSt| z4}SBz4$mKLGr)VWYnDQ<+IIqnx^Cqa{%UAoXfaA7BErxh1BjG6UN&hc{NqQy!3T6c zArcHdaC5h|OqOOE5-Tg&Xz6G{7gcL@VtRgseRokB0-C!7WtB1ci*jtNQ&BNDL*)#@&In2%u&ibVZ}$0&iM79r2ExPFZf3auWt`-(BKt@x6)hPJw2STF z{DR-UmF()85^?$kYpjrlBj+ZMu3<o0m;!n^Q%BO@u@}>h=)d3Y`62u4Sc#BW!;><6}0P_-?oIV89 zW!yH$B)nEFo}ZUu!!V1pd?GXN{TqR|}Ea={1WsGJ+bP8y7_S1>!51sS0NpHJN=U@!#|}%))ra@%1S2!I_h)83k@})!Q&uVo zEFJoLh!#Z5q_D>7CI|J76JJ%w)fC(N(s`w3D}vgmbv_}%k2Kr_iTvCP2CQ}2qCa9d zkd`o+T6m=S1)-H0pP9ISVYIdOVCLw)1K?R@H5R|E@4-<^6w60nlF)=}_f~D=Y_t$zN~U6Ov|!h*8l?s5S9i%WSX5W$++LAvA6ya80t*LtLZy-9*nYIX+p+*}3Mw657mO=eUxM)BZ}^ zS)1pUIuw6>-T!GfGr0MhOx&51D?QdSA{>I*`AZrgGA%Zv_FP}yzmwcwlwMmA#zH_h zNaO#0f#}3j9KeKC25Z1&;fgmLYs-S_=fV~WS_GYnZYJ$HwlrH473=6&q}IGZ6i1QuCHN|epl$Esv~xYm5QYh4(1@s5 z+uQSV+8xW1)X`A|SOp3IB411J5GUty&pPvgbXioa86rkjSy^qjbF5Dx1%)=I-QVBS zH3ypG*FLSFM<+0RvbGXG`X|EgfOydQGW-joa6;R5giVE?7BrIs&{Edjhf70wYIwo~=#5Vs( z+7j#jm0MsC3CP}U@9v$Wnn<5lFZW(@mdz{UgkL&=`hzaWCWD0(R!qSN?DP7{q}jbY zruYwd5T0Gm<2>|uuUJ^uAREYF^TNA4w&m~fq;guW0pZ6%!9R*(OG`Emj&g%8nRwC%eLDkT4)FSVsGWCl%GVF!yH~_ww*8x&6 zUmXGdv@?6O&o%xX;!h+POEd)aKBI>vdG8csW#vRpqnhMLKgk#_fUn_7rQ(6VR9Efk ze*OM$o(nJ*9|Bv%02I77rU*y~d{EeQ_5msL#j0HcllF^v90vfUCZ&XyJ24FN9=3v^ z8v~U$-MxqeVX`mvb7xi4|#;wezUl_dX;9cd0dda09)@c^XgGnCIPoA1eYVb0LX|K^?nk zDObCcTbw`UHw}j7s=y!lMn@gc?NArC&%z3rKmoHa2(%0}*1Bw&58(Vz0c~#E0dJSc zNQoJy{c+4Hg$gW;)bD`5ydNcoIBkXV+iv}+FdZqk+?y&a@ljoGheALZ{JFm$X#b)4 zb;--X58K3qN;y70k+D@$oTy9a1q+4{7;#aj2~F*N#W(p^s5m|L`*SHcu-h#T(D9SJ zTshZmx?jJtz)qd*mPggNd8<#L@hfmC{iB8y>ibo}2k_xSSCo&g+CO=kB*f>`-4_4X`K zgcOZXKt@C3o%5nAHCoVm!BuiWfy;xJO0ia7Uk_0QUcR~sAmF0rWl6}_Q=s07+UPN2 zVu9 zoy9eBmfMAUdiY$#9vCd-uRw>5(WFE7&pDm|S4r6x2anvu1nTUdMR6}Vmk%&fu4*f> z`p=Y$BAC7)Q2}}E=+snq*?JGeD0!2eecGC$;Rm3SWtC_D=)bZCX$1U~UrLgVy20St z$6RjD#R7;R_`s&H=DxIhZ6w2ElsmUvB(PLfA0=PI=2oAv%Wkxb zbe=m5w2NWnTFH`xt~imG>O@SM({|h9MLYez)EP@CF z7%oK(jeby!aI33IP+(=%*Qdm+t%-3TfkmYXIMo(QgG-#t6zr6nxmaB(xhIbh@}2r& zFe@XKEbj0CU5=IfW3|ZRrsFlMQ{Th@V@^nXFm*~LxjmSV^ zKK}*gf)(Iygk$s^aLoI#o+}}9;D_8?>iK#*k}!C7mNPv{2S}!gqLj3>1{a-|hrs#) zE{~1iNhu>pj3mYJ%|lVk<{Pw@r^h&Q!?W~9pO=@wjKW9gM>~0t3;-MYaNt=EQ1tbihlrDceScSr*TA9e zsirvqxC|B^nbUJzwOd11Z`p|6O;f@evJ`v=`yU~>Vi_yMQC(^=i4qs(#*H=JPrgrl z)N*MnphL5mtA!{Rs>VuE(2vyarLo%g6D%N9oMcR{_aJ$1^MZoUQA2QUa#Bpp_2$>m z5mSv0fb8@HU&+)WfmU2W_$9B?;Rfh>6_n#Smt6cnsAb{f<2wUGPSj^FWE<=u&zu+@ zXyL$F7!E3rXvls#b^!xVS|EKs&nitnTxHg{CmTkw3MVk!r8Em*-Cb#}*`q}pC$H?# z0PF1Ja}v!7GUj)`3hGlp>$0T#Q}*AdEDZ2%gcX=CIButJY;DQ8-7&B+_K!6A_*!Ya zAAn#&!2&B(bnNFYc5CayQ{?%%L!+1AEe=Np=q$ht-qcL6*X91;G75lO%y0nIb^H4L zv+AJ?I2_2O@%8P2V(h!-e7LJHKv;@sKyN!3+FJQ6_vianTosSgk{Mo9v={kBM%uxZ zFXwUODdfre5gt^nbf>um_SV<*QY$tX%N0SRg4eI0;DVXZoS*b>%(|Ed`GM{m)a=Ic zofO~rhHo{kNFkCoZa!jSnwpxfrpp2av~;uOj&!~x^n>dbQX)r(RM$h+&~yRMPh>v6 zPz<+Ab*#Hf*@p|qbTDbx>2<3lDJymdx)#AlmaX32I4=oT_1Q_!OW2nMZOMSF>OUg_=ut$JILJf($P}QS z0CrsNnB~uq(&|LM(!|8Xp!WIEeSaXJIW}+1=N7~tL~T!hs~+uScvOwMpm6)Fxy zQ^{}NTFh2QxC`D$-FpclpcAeg)GX&VG~CTGxsIXRkS?fEpT6+PUAr$=JZdToKh=#$fHyI!p;7Yqw#0#$hX4$Rih^Yt*Vf)) z0G&Rq0}7|6xqmKfL08A3qrntX&zs1eAMx9_Z##e*eMY1l|4meqj{trOk|D>1Q-1c$ z5Y*yxDXU7zI$ji2;-xrq5$NIXSXw^+!)0J>zPsho5&Cp#4aBa9UV_h!AkVkW7oioL z{FG8;1jwV~)PNx{%agFT)lp0~gR55Q$R=Ocj{1SyhBq@3a2G*!~Y%Ip4hBe=rm(t^EcVm)haJ3F3{d)qLzCUkglDG(fxt>2*dNpW+ zzsx#9vbFKEriv8z4H<(r54e~EOx7x+!Se^tZMMh1<~tgCT4+|54?oywVsC#ose}a( zy3%!;*=HF(%8z<%YPD)LsP~IdUrbc!)wvL#6~7jh|FS5wn#Mq!3`llWjF0L0{(|$x zLLQx+G?sYEJ=F{A0y;u=D2?5}iTwAEUQ{Ujw6rsg(J1_kq|Fnt)Ng^FFTAAWtmk(O zY>epfNl(ZT*gdEqB-KtpOnts!->K_?>Cqe}YIZFD6nCv@t$hO^a~C2Hj3eMe6dpl3 zIkg51cQ;~UzRR(*_zGhkHGbO1YZ0>kZiY_0{Qp{jPu2j2>Zil^^yIQ`uHJ;CU{dgYxaRq`Eaa%618@O*R z_)&e{Juc!sBrL30>`t-4NvsT3&;725@YO)5$olR$&KvM{MKi0oyV+zNv`x=)WzW%3 z|2p~=;8@p-L`4NN0cGC`>xf=ubq1a;-aWqn@w&VKcT>T_?gu=m;1l<#X=cF0gmlZ} zbW5b$`Oc=U?*FM7CBILv0ilL%kq-YsBnci_t(FV1cUmR!17dL*1hV|(2^GA4X<}Up zgaVkua7vBVRE^?bu4$&ymGx@Toe78=32s(iT{hoP5vHdTr>^hr?uwE@-c}_2cd_{l zKi<|PS=k8-eZkq-94Ibmmqrp&fb9_C_ab0@mu~wK_zeMN^5vyd!t7{^c?eSweTC~d z)pNqTCg3bNIy!2pL*^GKNCi8hJg*ukbi4@}r;%k8n1T0<$5q2`iO9jCol!Oeu)Ok` z_-Pa?GZD)r)K^DUVuyEGxUjSZ8XZSRcjC24S4-@+b9<2HKnVL15MrO=~Vvs7^$EE6*LKiZy$DULkDd8ourmiK^!TyZs!_aNM+l|4+2IcY4(cYsCqg z{4)O$nPS2i|D{R|lv$#{YU1IT?{6D>74ix&<{w*^$62MBx@kN)K z)5D#Y+8%Tt!zf?mk-<2=T%_Cit6bb>RX+X_wa2vY{V8*n7KM^*mu9u z283|$aDTl%#OWDH86=~YPkT00K0BQ;n1UGR6wul<2sWckI~KBQvxUyihq+xJha(Gt zK~9P6G)wEe-1@nU;At`#potC(rTM_H3Cx%(ZZ(MM&U^dnK7g|fEXlT*^*zeZ!#F!D zrI+jOw(&mr?n3O2%9ruh-7~)yf7%aS1GiBX%OV8!bZZEViX*liwd{*q(`I;&D|zQq ze!aWHOp}YlrHc*?V?bBxsI_ZTD<6nf(Um_=Hbn%+L5*te*iv+Ff%vt6{d+B-BR()O zoj>%_JCU+A)m>`TW5kiF+YMq8=lqE_wRr&fkdi zJuo^b@2i0Ag{RW*2&9a~fMJcZee0~rNL%ezW+7ee*7Okfur6u$wlBeQcaA9i=Ie*b$2wh;3qWHa&0P>(2O? zW6s1^_f-{TUJYh}$ggU_d;CYj?l;D9?B6l4vCB}`vqF2Umk~svtIXQK(sHq3Iyo2} zH@xg`Hkb!r8ual)*!_2yOCsp4prbkM_zhG_%&3<~^Ff@XtAxsX&R3Ia-Di-fpUw^# zw{z20rEuY*(No18cM{(Iaj_L_YX{{r3t)$g?ZMpqGO^w_ko>itzAbwN!HGC|`8E$$H5qx3om#Qv?p-Em;6+4pY}2j>k>`vn>S%yo{w zjSU9a9y`?M+9Scb#XlHECe9q36#ZjqAv`HLSMa+>?Qg9q-=1 z1I&X^gJ#6|#{iN6wR*3LgnvX3+zw_i@c-}MH#8zXN#7BP{!ijGw|95k-m5ThH#c#K ziSrkV0|EE4*c5Hs>2)f>$|X-4^?2-mIi9i4)Q zAeQJpC@(OzG@fTJGe?JoS$li$kD)HBHrriIR8U|FIE9~p&7~9}As~EbUaacl6H{NT ziGc|1?(HSpk&+@}{f*3R4K{09r((~Cj`r}I_6HhdaqpG`1G^oJRpKZ7>|b4vgRDUU z1MTmKx>J%|$Z+sY?t@-FOPIm$`?)MHVM-l@PT0Qbqy#XIf_)29O#2m#)*f6y$%2_N zPzWYio*yMBzrHGc;!*T!PM3=@);u*1`B|^UrrjtB$XBbxf#h)e#TcO<`GQaO7_bus zv=iyP5S3yWAsLy<_h?&^JwcyHRg8?w+zfnUspZSTg7`bP<^22)a+%zp6cw>X_#W6o zq4Zu8@)*nr2rLrf7)nYIMMVp{n_4X44xtZp9T&}4t55ga2f1Oox0Lfz^>gq-r(~3I@errqD~ZRN?lLYB;B4Sh5(m6QH{EoW3V1`X(`esuX@#k zS@yYYYy%FJO--JcL>g@*;=06kwq;E^GAKREpA!bNNOPOdDbK_3%4EigSQQ$R851Z5 zmvq{x<$fM6)a5wEhDDgMA&=6O(<`ay4BtBL)jA!#JFv7&<8@EC_k~eOOb|FG9c(-G z8;2txe~wF|f_YA_K9KueFW(Sq0p0GE!x(h`0Y;qdGkEoTQ^odFYkaUsV#15{@x4H_ zrvzl@;ZNPx%(od=%#XYB*j{I{wbC7mT=+Q-L+@f@V)KiOwU6hWhPm8|bYvZ6hsL5Zi(2huo|p?JzC1VDPT)UJ(n)NH<(LW5Ck4EFaEUT2wBNABamz zwwhUpmKJs(!|s1~e7IDE1G8o_mD%AwKhjUrLkva&`HbktMN5mj2;RB5M6^r%NFEE8 zq-ZZtE}W@wW^#8rqIwr{96PPM-K%NM!o{^y^d0RGi5dp~Rd{4179uQQDs&UlTdc#F z2X}fHSZ-c@VUGaQXIR+SfXEv*N;?Zw3%0clrc)96jJ(6sUG-gUeckEvA1I9FlNav; zm%IkYJ2WL_^*%5WYyRCjFQ?XJsb<_UeQr(zbv#@)JBWnj+5`Lv?Bo3i5-0>|8k|-i zyL3hS`-A~zep#D-;m!q%ehEG4$M%>mu#pT_a1zujlMf)q>FU7 z&f9S*RHto#?~|0NYzV~T!E4e1+(~C6-{5;r62@UKYgzL9Y~ugn=`5hCPQ$IQh_tkH zgM@TQhcrkDh|=944N7;HfTVW89f)kW~g4qK8w6j`~v7D=X$y@pEYUH+sB8 zslYbiS<;l*CnokQOCo-L&q_-bn_R%bjUt{x5P4rc7jE&yB~#>K^tvwouMdnjgrQ=# z|5WUZvf`jl-2V7Se^n#D`z)hMWTbXd5*kP9&ZJMO$_SqdTVGybLxunj1q~e3^z_Rw z$Hk)La3m9PeZ>`&+Q~>IVU^?P$aHygO>OGH@nbN{qpIZnEn3>!)p@4F4MWsK&N-I_ zvmf`-Mt(nMkh*ldIl1$j5+frF%E^7|n}m>?;}+bcL?Y3`=L|t`!GMH1EjZX?Q<5uU z;h1i1%``5S*2=u8S?*WG;}n4V)Mz#iZ%nW6xP^axB(8dNWK;D;Zyv89kyis2p--P_ zE2yjE-MPAI^4!4XNR2+;8W-8hyc2$_iwq6=Cw17yXvCO^u`mif3+Wyh!>nhEk^n>k)na)X#Yr7w5KS3+92Xl=p0)TZHO zju50&OfV@NRa)UCCcPXXM>FOVFmrRq!c_L zlUla<-dyw9l^+Pnq?a`Ky=H=+(t!C&(L;WtYM!q)1zDTOk~u_(JMca89*NX`&zh!S z%p{PYq^TQj(DFK%Wb}xtuyo)=J0-9B$c?)?3JuSE+9-W0`J8vwzpTYtE=l{aj8e1KX+J(4X|34 z4_@yJ>*`>d1l06L52W#hYsrE#|hByhiHq`X_vYW8$@ikzRk6-SR`f51%6-Rh`PjYe{ zFOSnz8z&9pgGu%2P#OFO=oWoM9V;t3t=4uv6waL(RqHw3SLN}!qQ`_%`ZLqyiYaHL z1kZk}ZgL)YV*~||#*WO-4~-6qE-NcQ`4IEyPf@;qB3nME@v__32)i#fBXeVN7Fx-? zB$$Dv!Q;^vVteJXxkdE;!nu#CNI+31B+Q0nV7&I1QhkdVPUWSf;uL~HDbDK z2Ob&@hcS^c+CZFJ?T>YhIl2R3V^${_h6PsCc5F2D=!9UT(b9iYmW%+F7bqZge7hL+lX$OUPM|x>r zx{7v+Rar|Co0vx&RbIq>j7hqcL(j+2bkAvw1)4j$Ig2?~MJT28DgL(q7}F+Fzz-T1 z@o%0t=%J57qNj%kt_~QR`g`8gPh>j>+EtZN+IFATFfttgy;K- zCIqWP!RMx#^fyq$lGyPih>r+V{=jlbXLNO(K;OdT=eTv2|OV$LZsXK7m*Z#-=E6fZGB-Bst^KS5U8&sOh`lz zk4DYS&2h0&sBt3iDOQO1H8$?(e(zQMJUl$z`Q-t6Uj>)2umip^sv28CCI$Y@^Zf2+;uQpN8 zc;5Ej(#r1^eZ?V4z6sZ$*gc#1vW%0x?)%~dX-v=+J zriKF^VV)O7>n_1GG+mu0fV?<>w7+LsxnhSgD#+ty@`Hn28IijEXCDS7y}%UU-i`fh zv?%?oU83Xf#liIfwOQS4CN-H98FhSYes{3cINs9o@X1blwSUX(qR%o4Xr_i6g!53b z-vh-~mX4&kxpLDURZY-TXUi%>Mm3XWzvV^%91ixxlzmbP%1xv@nBm-zV!_CSM?kRG zQsaCZiW;DOfi)n(!pi#FU=K06p0@253gT^t^h;(BG!59tA;LLYQ zRW)O~8~0@kK&Oa_negPv8+aqSILz9dGrh9=-{_ zNHYpz%Qvc*+Q_+?jN7f(Kgsu;kYr0|VjEf## z?{WN)Q}m{Z;3}G%Pi6Vf?@s{r)wM0*rKy!+>NtB1Jd}`f?#AgyWU?T}DJ!ehd=VLm zmKSldh_ieJ(T4l4=j4zE6p$g!D-xpG3a@8`3Q`wD-Xm%LV};)6g)?Hn4lx=cso0g9jQ#wH&$GdICu;;$vRssJ2*g zfycO+=T@kgL8HhO5#U5*kl@4_*y(c1L^f4*)790}S(mwS2)ChQV4C)Lp{eQ7@I>on zdQ6?5Q{&6=uv}W?_B1`@$qHOejfmZ;PG=oC*uQ1_&5(&vnfiP-{#i=U51C(^54@K> z5U(gxexyW;`S%ujN+WnHyxd*$*NzJ|d^eQ5~_4HqZ>FL5qKp z-%a;&3Df2>EiKJy?Kl2HB-M*i82}l8%6w#W-9Z!&895d6YDF9za4|o3dhmvL8usW~fhXN=l z*+)stE;{M67Sw1nrMxyImEoA&tXqXGCv>tIFpm20A#^*FB1D!h`M>ArHWya#?yf^N zz(U>RW%#@7N`FArm!6ikpzFw~GdYQUyXAh0MUnfJ-!wWshl9gk0pX}eeCFQCszZz9 zS6BKUKUGqSQ56g?8{)+={c_k@roIs@`*!{?eRK0H(=s8vtD^%MUewuHx5RZwj?CSH zCDWBCjF`UE9gS$4J|R|^bk^aGV{*QSsDaC0nl-CSbc*+M!X$^1S+^%^hti)SE+r-X zhSv$(kftay+%i#CpD5$%%KNU?KDWL&a!n}}Xck$n)D91H63y*qIy(oH8jy9mNT|9f z$fYCVi+0=YhJ}ZHoU2%Eg@@XGhn;*m28BAIhd>gIpxMxGz1Gi8abDWB(dKiRmAQRP z>^pwvr3r4t=g}6;%*OfgA)Gu(up`Wc9CechS_S=bq;bw0o~gksg~`c?$ViXMH*C}D zOWrbRAgA0{Qz`$@;N>Y69v1jEGbSxF#)1TGg5N1`I`(`-sl#`T_anK$RCR4wxit@0 zNr}tx)FYtCIJ;8rQwTbtEog`xqJX+d^rD{^ar?pq2*xRBq3xf43^KBdW#s@;>01bL zMMVXXd!L0`1kgBDe~g%G5dP8kBV}$_I`JzVo?gViBb${BGW|V20bBP+Ft%tr zvTCUIu=R9C#~kIG)Ys~Khl-86cg!OwsPOzb(Qx>K=RpCt5L2&(`5Q6osezuac@yJx z`Z7b=vW~V=U)c~9<<%1B60ovle+*Qw4^WU$khSf2BxNR6Tw)uigj$e992^*vSaUW7 ztn=nbPB%~Q-oi#sS>R42p{4}>T4J(}&Y?m+OesS}$%vtmnxsi? zB?~;|#%u`riKU!eKff|+?Hg2`!s6mc_tD`NJ#}sE9zb4zw%=sB=4D#pKXbd^Vlylj zjL!nnvg7JZ?@H;A+A^5xX{xB=kGLmt8m>%QWw%y*zR2mrndpbvivsI&Ty8An92{4B ztmPl~Lo|!fd~Yt$sBko#xu@ouUyEPabi93QkxGV*8VqAw9xkr$$)8>nPc{8(t&e02 zW+MRWXK|TdGMx7w9gc-;3#!UROTS+=N7sQ^^Gk7gSyp&JSi1=$BNMD&biO6L>T5eJ zk2g_Orm34%^0Efcp&0%mB#t$mjVKKkE-YN1s#~E^9SPL}e9vqaMw(?~t9a7tgkh+t zaKfg&IJ8~viiM1u;fbFCe_Vuu`3-VHN~b&5Q|?mXn0g^b3?vP4G<~NzhHn`fv+a!+ zuK5PA?r?Jq#wmD;p6or0qTEzcxTV#J5&Cl|OL}iJU9ZI{{c*g^$Il;u-!$s_@9&*H zAMI9}z&)Sv16UUIv|L6Tpn4T;^?iwud1`AL0or=GFC`Vdrv_{C-twqIlamTj6+Srn zDWUgcr(C@vb8X2Goy>zr_bn0(WMLBWv(@0|)QFUvm+)K&@{x9WoXA(EFKnZ~X8OUB zv(~(}-x^FvOg!qkiXk>TYwb|W>*Q!gBus)bUGfPXC0M`NhjhoUx5CEsU1P(-SlTcXnm(T7`}Z?joEBTr7ODo# zj@s_(d39`6?~R_FP4A7nh1a}i?zkGH!CZ6LnU1(8xT!Mnor4$Ixts|~Ok*eBi5Z6DE(nv(S9R}Il z;(AZR-r&RaL6Z$(U?a%T!?1%B6pop~z8MM(F8(W)q_jdp=`fOq-HL2O4tJCsRC1u@ zQt%aP^}k*Z-Ta4+1SbmO#aGnlc_)V4M>AGYokDg>zc?IJXeR-3nbrE8rpL(D75MIZ zhN9HhT#@sajNe-Rkv9PY;ZNnzl=|3aspDz5c*s`s9^~+RzDwaU4F-dU4|VxW%ur7! z>MYV>RUD=CB(v4&e-HfL$ZIjAo{Ijn4d(Xtc2^D`_JIK`oO|k;20&)2OmX^DQ31=5 zg_UAWg~X5z?FR#qk3M_=ZX5*Q5@qweApC4i=((c{{E%UINPRYbHfy#91>DCrQ&FBM zFe$ZDA-Ij}Bi`tsu0PlJ`^yN|%bgwAyv@yHq6MjtehlXNKP|u-TVD-|Tm8?I-3Pk@ z{z)Ma-7NC(tPQgV=wy%;kH^bPEOT90SoqUDE1C6$aD`I&M3rp9QyPF$wv1kFAa@Ij`D<^wX^Ts?Z$&FoUhyNzU}_N zvck=fhe(bA%-Qgw$*)kt6mk?e-$$mZ6%-YFJUv6ODZX#~Y5RL&m3E%xv1ouWrlkj>m|_iIv<=VXPSV;~m*)grMHc^V^J5*HWdFE@x=Y9uw{CSO9z zuz4zOh2s`{1|Y7djTrR6$7C*f0-!FPPtR$E&2|y*qFwoD$gRgnnYNE7Y56}RU4_^2 zO9^rclJ`(X)%^ao;8DM}V?GN2oAENEqWmvA2i6$%$=m&C`{JgcoF~TJ)%kY3{^ykFcKBsjCKMNlgPywrE8}W#=IqfnS;huW`^?krc z_zs}6jTWr`wHOl|pvI>1FmewwUIaQ}8_2B^Q`RpmV&ZfI4HFY1!nyog zujWnKTeladaLd7rY;7NW{(yQ^Q!^4SiVkI4S>(#~zYu_}>B@{gjDMW!9iqAh?fbdo z2rbKWWr0UZ%%T52ujJ3J>ntBFzNBtF{_@l#;1xV3M8DaP*NzIhxZ2ARXlA`nv}9P0 zbc!kw0Tg@~+rsgc_aDf?Sic23MYifwU*z>{AgN7S&ei2QfW4Cv63PWReagth6)SF( zlbW-$sX$EYBqYAU4MI=2Epk6TG%On@#YM%r%Y^iry3TDA-)_7VnwfG&|HO`8P;Z}K zIEoz0(cuB$rz7mI3(t#E{Wr&NyO(oKM;Q@=NuY0dh9n3`CUQ+plU^*KlHLIA<1*() zdZhy$w$36iOPEjQOUPXDwaaY?e87Rc*zcNuvrTr64ibda(*;g zgHt~6ZO^#HqeJm0AKd<)S6(wvYt4b_sN?3s|KJM7w%5r<^a_jR73kx&Dd%`BVt%~dvxKZzu{yIMV z*OA+L0r{3aGT#6)1RD;mp{cY$jJHv?7-? ztxd$F>5KUHj|NQ!M5P;%1ZbO^%=hmrh*OVbJnH%I0Y=Fhx?~#9pZ7Pg-IaU~?zpK+ zD@7F*S@pN|ieVOkHgsh=?a;x*Ex06{z48y&e;#%tGEaLnnnRcRmZqb7=&Dg*Qgn>3 z7nhe;9R>_aO3o0@w3u^E$jv_BiM#tfhEEv)a^Mho_TmNSr2hS9$vyuLWBk{6Vc2^S zOwI7s3ISb8NvXN?@uwb^eC^_sSssaAQCXRF+Zl0IE}ELrCnqPz^|#V%A6;r{!Ugen z*XS&C^5$kNR~q;*31{7rfSU_pZO(Y0zYEuwoT0<=!I0?@#A?$^hj%2JfP3d=M#7&R#Avc%Zad<39s$0+<1k5)Br(Ii{H>4V{J1xvoHVOzrYK0-i(G%k zzIaFn4!!bb`dT{8uxF?~o^Dq-+KIg;EDy&H+@RoW;(7GQ{icjtJWhzu+>U*4CMVsK z5e*umkn3Y3bl*L1G(%3QCm8Bloj~06^z@XWGNIgY38X7AVV36I)taNu$9 z^L@1(AVbvmfZQ;)jrufRIxzL^_xSI{oD9>g6!wkb4QcPVG4QmFjmgbd+nqWFpM3r4 zUa(^RZl-^Ho3@W1FR?(L|7Mrv9`9j2)^edkW~nrTsenhOkQpYH+Qrr27{4$+QFM?s zU)1>eGRyX;KZ82Tp!=e`WABP8;cz`NTSR{4%=zNFm*+W`!an%<_N&V# zTgsS?b@b<(-$B+T?WaF~kFR2Bftj}c<34NvorUJsu!&f|FJZOL4o^=0TLurn zFso*nR<`O~^!IqVxE@PO-+{X36~(dayR*%PFIU7^rmCu@C&zf=P60o0`~sYhA3o=c z+WL_Cl;8gA)^!}!gQN4Uw5zKcD_0-U9-iRFSCvH{(b{_>I#7Zh1C3)8wL6tUvM zhy{2;atN0$3gV4ZS$M9KVN@8Lg4BCV|{}4h5m26@` z+F818L)?2C1r`>PMXrv{fk(L74RNcd^a>+EnWRLo=JB2ya45GMyScrE^ksgn=Nz9c zHE>~#zVCnhNd_XAE-Q0rA28CbuIGaXu$0)SDl+oWo*m;zAp~f&e&>}r-0-%&yw2IN zxMP47>j|?^J@R4K^@y?Y>FoaEV%W>RcbJd6NcBT!;T?sK4++ucYT$dl5;*US4#A-? zeyXL&{E&{8rPn4Rx|5N;F9x*i7~zUWZ1vQrP*1*oHB;wDsh9{9Y?stjz9IDBbgYu7_$JEr_bsqPc z>q+A%0_z0f=G;Dij2ey94*9RK@YYqcMSDjtPN(O_^CC7+dH-Fss8!{{69I$PrkmB4 ziF+a`oe&GgGPc5?VMt}Fkw}^TNCh!jp637c$F~2#8MYre2KbuV8*e*0&=T_|zO{6A z4jKz;&peLiMLx`j;V;UO>k2~X9h+t0j44)P6DVISF4{*$ z;^XIg-<3Bgm$-dYmiL(1yvWo2%7fZK9l5WKf}~F6mUl6%d zQlnJf34&ZF*Z3zR@hMHMsXoQnL|T>~A#%vsYvk0vlu+mTlq-EBd(eCE@bI!QGuPSN zXNrmtr_LVoL2)1FfIY)kwxIb7Dp*&4E@px45M7a)nTpD!`dYeln8jrFz%X;rxKdR? zh_xKb!F7nBE**;Njz^_+as+Mjw=Ot7Z$!f+po%-w#Xwl+4T~E}Jx)jppXGz+3%pT=P;6 zcWb9NcMXtaq-cK>eJ~MjYyg_}Ek!BNMpPG~u#zz7>O~Ka%kQH~d|ju-{qm{3$vUht=soDgt)5w6|4k zcKI^u3}o1&zh`5+muanjKZK^k^tl~F9}l9zn>bho7(gOahJzfciMXR9`+ooWfbjsn zQj*zOnRgjnQ&TYRXs>g=BPY)-EW&+pk)WxO@)_HyaBk054QG8Y#|nCRAVxOrG(bPn z1bS3J+k*Mla@U(4%{T+34iV_+Bv)6^d4oU1LI+Je=W3oKRr5|=Pg-(V@#htgRRZ~o zfSOvuTMhg}FXzS69L~5(^=U!t)Zoqv`YV2x9C{u*9U@;}U&L-ohH^7idsZ1xl*N!Q zM2d-+V z_^GZx4pPpLJgjqzRI^NP+$eu1YQH2!31S2A-^6KS0kX+7e}Au{$bA8E)ibad0M`wK zI)(Es=I$>N8E9ECh{?$T5?bK3^&IYo^=7}mwE+rYpENMUz^~`Cv8&jSRiwSwNDJ<> z+Q-jh5_6rOb7j2ao}S7KwM55@l{;W$02f?W`jqa;?xgE7av13HuaUS7HTllajbE&T+>+m#>ng-~nnNUIx&_bhNaA1JDz(I9HhGs@9|RJLv|<= zX;$K^sr(cjOU^I&t%Vz~*)aBdTJhF9A$V1+JCnAK@_&_IF0QGJ=K}OWpSD8csVx3{ zGP3v!VSYzbb{_H{nWxUJ`@VkHb+1Q+IdEZl*w4r1Zk3;IG~eBV{JRP^4*fdZcZF~C z6ixVqx3K97KP>v~dBf(4xY<(GwH}DW6ryfu$bkkTujZ022WS9{Bp4qKw-TYsy9+vl z+8YJ@x>>M7;GTm0!)v_p&5O@YM41n|V)**MFVI}wx$`Q=^_AyXS64S)07zQ%xKrqK z|pK9)A1{G{}RX#Rt-ZpiOCpY)#Ld}W_&$jaIeKD+6@`EG5K z{(Dvm7lMcE^Xg)_%-nwsPxkH_G;UijnEj;r>JDM!MlGuH-WU1h(Y!G_InP;fwe|n5 zd$?YmyJJ%c%Z&X<2BX=*H>3Q#ydb;g<|D&qFGr?i8%#mTQ7FMFUk;=xQ{T}^0VnH9 zTPP;ZXUEC8IR^r~B0CYrH;-aZ;;<{$KeoCrz>8H10E0WgsR36VJ^zmt4fG+(SCcv1 z*VH%kuq5szqe9WDZWMoj&NuLU87KUu;v0*qiINOCJ#P_^gTf)u&G-XWqyV)BjX2N5 zd_2EVf!(G=&yv_tUc{QmC}4?-HH#*yYzP2Yh$AWvwPg*TGP9eS4axU7excc2?sjMn z{QShNEDKC_?=+P1!WddoGI05VoIb5#5atcc&DH#)|CN%IWPi3;SUBXgG2%7)q>ldm zoSx!pY_MNSO%wcd$luI6olwSWWv@}E%&#bk3r7Puivh`!`t4v%9C*VYKYm;|O6Luc z%Dr2pKrPRV2yFS4EhG&i0PoXnTJ}fBxrV$#ERnEh`S|6FY+_c$Qa2v&2v?!W&)>h@ zhqfLlN{#aI)xHChVPdt*gQ!9wc#t9G53ztcI|CkiJTDDY6Lr5}vT1DV;|tn5~_Udhj?5n-H++FxXM&8|V~NA{uRk8sqa*qZjg^bIN02A7FWb`&cWXudj74 zr*;8o=J)QBpA6I1~^QEnOMfv9z|1K*+it z2&J>X6X1MUPLbu1ZENcQN(B-vtt{D9?)Iz^F5VD;WlQDa?od)vhD z&W5Zb2co(D0nqtXY+bK4U?VoN<6peHhX6F8DV&4$*G%rK zR%&!<95#?uChAP?uo9JDJXa@#$DT3qn!&*s^x`YQ#rCif8&5)RGhqe{g{{- zc(RLaY*yCRD?PWybddbde$qnNO?^CfAgCH%xjo$vcvO!~F@uwdM&q$#1fOXl8>dZd z^oOmU`g#Q&9S^H@Win?u|2{4n!nW-l*}bjyzZdfdu{0^5-kBRbXNuScGUMpf)Q{u2 zIqz5B#FyY0F3}hC&sI`V*@nzE_1+vv6)NlnM>9l*(1|dGH(mNH`HKJfvz1fFyy+51 z75P+0U6PriEA`Fy${~p%B_R5b{jr74*{auwnb6eK2UaQi-KPR_;g7`d0Vq8_eEr|L z>HFR(uPC34qO-HI@C*owRqvn%3urbCGlPMst2@8>wD}C++4_*NNJ4xXr+UYZgBYOT z))`?#>)tNnwT0@ScZ6FEen8ZHZbc;}@FElP3bmSV{{|@t<3K?(G@#u zyDrLb3#fJgvwG3td-C=zA;6jhJ^wNk+K4}w+sT+7fEwPBN-&^%$5KyaAdmkGeCP5? zGK;^+suS{SJ!^`kaQIS;n1hE9BOVG92eiCyGiss-Hj-p{i%w}V_gN9nxQ>HmS3_7a z@W{%_ZoszE60Ta)9bZf7OFxO&Sj1L?d-YVrd=^IU#6iYfJ&*k*V8l><@b11Wn}NKt za>$|yJkGx7HckE~R%h3LIft`-iMYjPRWIc7%ZnS+C;3cOH27lsQaNEnq^S0B&lx5t z=;G9{9g=)b7$UDcaaZ!Ig@~6If=vOh0a`b9zk;^aylsOD-4ALq}PWZl#Y&$oR-d<*ZtryS&;e(X;zYww<}7z)_$31 zpp`^JXk@mexEEkqu<$y|n zFCu(o<<-%RcObjRmW-I3?Bl0T`5+w4+}`eF+8sq2Y4-Kqo~v9h9ZyM4#={P$v`N!u zTR}%hzFu=Ax%pL;b~$vD{&TC$Our#ya$drY<;-KwTB+T>v}8%cj6NwTsZzqu#y0Qc zN3=d#g7SL*QD}x9JUDHt0sbJO-+>8rSZPv5V%y=6Sofb14USi@2;Avvu!$lpy!miY zf{zRuyOcPaW%yFs+EYTS94P0aFrdqt#>XU@T|UlUQA;WB>PGju+5)?02jm=S`1vXK z6cyUQXGIgS6r|P|&s$RKu=+jl4g&+jo1}N-hl9pi>Rjy61Wc-2tEZK(hO!?N(~itI zl%5>@92sfj9TKa3Uv*luGySiZ+z~PEZCmE*uTD|Cl4jJKEL4Wg;G6@nD<0fWFQ3WJc%& ztm)o?cw)5x|5ABJVUzYD#DwPD2Q4w=0YU={x?krj;k93cT?;fCijg~=qNonZVM6}9 z6p&)k)FcXL-}6g@QR|o1G|+ud`YMdOE03DnyS*(eWQ0i~R4`9r-SWpJmjxq;*>poV z*CyU+l8c%7;Zjcmy=b*bJ3Xf&Vg}ibUuY8qVXB$-dR398{TNnXNaKxkYI=>fwWFGi zXX+$ZzSN>Yh7y727Q09%0rqrs=N%s^$x-{grBM7xn|Ji1eAH7x+s;8l>-peyN-9z< z_n#MsnTpGz{=m+fmm_Dj)sV)sXXabg?6;q3S-6-6K~#BpbzxY?#plWXzM=K=x6 z$198TmdeLfeUPLTv{OPKaR+h{Bxi9PfX!$pW7*qSj%;lm=y)`^3kICcD+Tyw$bDQJ z^)8_X-Y2+d$Y|m1uwNRU91f)G16O8ZaWLJ^Kyn2E7$z6a?L-!LybrkXwlta9MV zonV+l)LnD+hEeb_?Jo<=(cs5c6jl)`sz-!KmD2{5{~5_RA*zw0eRoDFB()JM5b^C_ z;TP@=4|-;d2)eZuhQvI!8007TBw_59FG&CccG^mES65eejvMjCxBdOFtGgR9NfLRr z{grt0LEU&B^OSn^kN+SWfF2m1zML%;gL(pLS>?T}4>%!(NI)H{(TB%q1}*rK4{rC?&7;Fu#fBzEX; z?a01mfbNlmlGFrFHqc8Vh?0s*?L1iTV&1LoZ1qy$iMLiDh!J0!XK4elcbik$#yJf^ zB+@N$fDU1DudiYStQ%erpR~I*<|yQh|BEQ}`^R(b=>P7(=;9IM&8R=!<<*|q-Qb1e zDVf35wvFw~$-6`{*U*zW+L$c4=Ft0G)=>`VKW>oJ(gN-vw_8@s(b21wCf!;G%0e5L z+)@X{3@Jich{_tw6jyoiN&rj@hCF`-G?!mpo;gD{1@r3&w0!2MU-r{$3hitIu~N!U zkjjX;C8&{a+nIO7VZidTc>Zp|Z2KY?d|{sgek0PXJJUJ#HnL(~W&Rx%t<6S{Sm=HL zwDGjs0<%|9G33&ZZt*MSVsp32NCg`k4NR65Q%JM2?_g8(H@aOOP=gSLbDVPzCe#Cc z^me}=dU<(447Yr|i~?*5-0Q#YiaLwgL`Rq4H@l(Vq<#zWTf_SJFsR#%ICjW~Q!(iA zEtp*Wx<|fU)i$$zFHgOH82alQq3bf=*nPXQ+@0Czitj;YI_f}r#Dcko;WPE6?t8ES z1zw+g3d0Bw4;K;3{m(XJklhGu8k2NJSScFu&^?Hvi^_9aTIrn>{uUkz^A7yTpT8#+ z4gIe3V*4t>Eoi-(tuIdVcjn{&lQh~TeX}M6zh5J|`E9#&8;Tea##eD*uItN)gi1tB?j9fUFNN5n^zs-vKeS5Dva~P-b zkW4b5nB{W*oG2HJL1u1*cJ^43DbT@N3gkAzbAjjq8SsHMe3<21LW~UDcF(OI0M1dv zcmQWsrMMn4TCflSI#BJg7)X6$mc)I^7h73fy%E-0!>&iPQ{DHY&-2hqBEPyi`S*tp zs$vVKVq2{=AOX_w5{8Ja{WeTQCieBt{2LygZRQ1}o(|X&LuvFF^bO9=&V#o&JJ&vy zR0;5F=s%NzASV#Q_PB|i1oJ!f7yM01N+cmCr>DTq7fOl2UK806k#_P9F|~7lG$ei_ znyU~pKL0wn0h4$;{Kt-**N8@YUPM}A5TIGvu|L$Rg;%<{ifg=FIHjaRn9=jG6jOIe;=%ZUYAEEO8ZPLN8-KXvdSLXZ_C=!q|hyW3dvEXCrP)@ob5D?%yhJzk3H(5B4{Eh%(1FdCa|bevBVGa~1I zxupD(eawycTd)vQz?bpO(`nSwP}B4DE-u`{yK`TRfBH3V{>5P>sVYk^3ZbFE${MwJ z^M6_Z4ee(#(3AqCd$3u1TRNr0rM@T}6h#pEqE#UqFBcgf&&0tISL}%mmQQd78NSOc zRM*k!jNbDiMnjVLx2Yzir>7){?`azG-hzbL!UCLACtXhKQf4GHeI%(IcrR@;8ZosK z!ELAhYNz6XQK6B%Q~gg)!>V@*yBb3e6z~Ti=9D98WB!Y_fd+I#zo7Ybp?+6aQbO`S zEe1KY)%;wuQg+m+KF4{fLH^>B?8)|YZ9&)(B6zad5PG9MJN=YVM9Z zw?1SRD#ahzNZkg}ednqEv@Q#`LZoU?tX~ilv|Mk60N-|J-6PxGUXIMt*f^lIwYBpm zF0sm|8s%X%<#>IvM{OH_IE|51fapNaBxPaW}0Y}_y%??eS9*(QMv#l zjp9zC{)^Zu3TO1Nu&mPJPGPz#o&_LljC?ne@Y`2Qu4lQ`jS16g2pLU2aWH%QkY zl>Kjco+HsBNL4v{9-^J1Vx67} znzrLDVaQuT1%ANTjmc;8&?%Kw5H-SSFp#GVfsA{;#&r+jVQ#oCCFFk*Zl`(mv zU;3;IFTht0fk=>r=pb(V4I<<~upX;s3rgDB+A4Qc<}g6m`bdMj0_zKWfIs9{CmU-d z&X%`;_EYkvbtSmN?iv+}y2WvfB*4__;P@@MJ>YoTaip_a~ZF z5x*ku=fqFMR&mfR@8aEZS{ zTiC%OZC#aur0D&NayQE6BGjwtdB!ufn~*n|F$h)0K18b95+5yLLbnX?i z>rj5Dd1;8NM>97c#zx%z;Blf$*HGSaC z^?c;b60v%XLv`;KPdO@~Y=RS44=D9tmo#0zME<$4Y4WGfs0$p<9IU~QZ#wqrtABX(*f-E#6SPiw|IFh_C^WnN3A=;-CIt?YJ3pq+MN|?CcKXb`0PD;vO87 z_kL$OT*KgH&J(MidC_4B*-=4=;Dg}^1IPEcKPCcw-INeAsGf)rz5E+BbIe%#tlSO* zRuWQDlkrjmhzdmYijOR2!KM~!%WV%Qg$mc)?QL%{3qa#=Wmjxy_tFOx5*Z-;QO@NF z##Y}Cs@^(tbITF%G6J2v+%z;F=k-=5HgW`oc+BvyNs^>i^{ki-USS~@hmo9wFB!NX zo{p9JdW+>So5sb#5th3PVBGJ4IB`&p?%k%f-gNTAVhf2P87BD1TRWGhoUCQQv4KD+umv^6$uV?O{itMwXmzt%n3`g zoSXp)b`;j_%|A`#s$H0WYi3%sXpIT*G|C8@~oQffw?*R~vLn-ze1y z9hDA`|CQd#Ff&V4gyf)e86;ZVv`em(D+2q3%DO;5GCg0cwb8+oFcXWVrD zFtFv?&fB#oNbb#niE)oVp3#5bk6|JN zXXLuAtD3%i8DU{Tg9hm^Gy?pl*!L2=w%Ct$9YUH!j|Ao(UwlX1_gNzFsLJU!8&a|q zrXDpUcFPJo_ku7XmKLQf5=4QY^1h}tZmL1pOD$_@_UZzN2nZA&5Nz2-^+o9flZce) zB$q;oN#I(=gCGNm<6joJ;dv8ATPQ_pit5%cUw-&j>hPrcQ{QUa@&UwS8s{jKF*v4# zCKRVI;-j0EPjuugzRrBjEaTFSlKAPliQf`|LS96T9kGK-dsZ={SF_`_$#Wj=GU=&fbJqF%Ga?u zXSj4ASo{nF_G15hUT9(aiqFR+iO;#t`-+}!SHhX6@!U{+^U!si@RrtZMoV#O<@*ml z+JJ(=gD*dCV(g)2E7#-EH{@i9&j}XNS=QWYufq7rAJsVQ=ps%zxJXM+q^sGcUsTnm z{)+dvFG9H+oZ5&oY5Q$D*6)be!JnI>&u*^f3GRh{UzIBP z+;LbxG?|s&DtVMm1;}oFXaP278=ZZR@_UYtPnyA5D?Io!PJY}SBRX2hZwVRj!<9s- zFnE6-Ec8sE#LgBmEeXe_M5M~BLgSQ@9B45h#OmQU85!m^@7wcEz${y&`z*C31}@QM zuRkmW8D!p6n<&3qK(@khd|JuKYcEcEyVo*q4lR#)>Cg+-hY@+NwW=R#{*@Ychcoa1 zemJ74eA0ySa`H?)iYex-Q9*?^Aa-st+>_aw-0?7)^)T$f81ktxgD;(Hjn`K;OyKx4 z`Lb=N_$oRdShP9lY;5wuZ?k_UxLD48&S|h@u7(4@b`ySScqhDo>}K|+Vb*A|IAt4%jLJeh84-*%<(0)@S>v%U(Bi%K?(Z{mAc~w z)jy!ZG*k>n^o~Y?G9wEIYd&0}In8Hu5~pX&<6`4~x?+1+UnGh<8UIMY0|*6x46P+@ z4r>Rn_C5>fYOS6@$7ASykMOSRYZ5TF{#!tOc@URJLqxpnwSrd=g@N3X^J}gd9Q`kb z&U1nWHC@VG zsn_;cE%PnLfS$-mBOUQ&uPA8vTy^xf(j=q6JxiOgH0>hkf22 z6Tt0_9XNynZ$XyJv+-YoV_XyB84&uk5sqzga%vVEV@w+MrELg9dd304atyGU^=yBP zg@Q1F4Q9Olu;dJ4f5F!)Xp0IV-yazUX~^FF;XNn)XFz~Sf(rwwV3dSO)ff`eMn+tg z|E#{my4`rs8>*6&y{Tle*}UC}zzZ{KX$O0_Nb$g)b@-RDgYu8>sY-UH(!m_e>d1_B{JWLzSTE4Q+Y!=glp_B%g05l;TDHWRSe*{gNS%B4`KqQrLx z&+pZ)Q5i~8nCtX_b3^K z+S(*>r(0JW>M==7+~?;sWYq#^&wdcODvS)E8?1?2xCU80;gV68ajxGGbwQrx+BdLc zc&P(+E*B+CQ`NXDQB_lo*uK!=(O|gcH7q`e&3m9q{pRxmAM#u+&gzOHTc1=(Y1eTF z0rS8B|3Oyx!};W(oZf{IK9^ugfII7W5O=u(3rqFnKd$l$02Xz1uig=&j0HKV+balmT7N zA;Dxd+R9VhkVrdr!vxHC_Z9EmyH{?tH`h2$#+ZM7|L_p+$B#GUl;GH`~{%S@ilSH@tL+pqdT#tTr~1Z0wzau3gV>EY~kJ zSQ_uME26Egl1NLxs^QTh+}zwPoZFF`{YIJp3DbS+xyaoIm1~22m&^CQ$?=bILM9dj z*ZdC}B^5s^D%umMAc5C@aTG#QAb{y3BZpC}B(AJR?}|*%Y0&eVHj~6537kZkOL+;Y zy+3VN3GoYFN9v8M<~m7<5{iita&(yz{@Lx?G&En_gAATxaJm5#9*@*r;9E?T>A(nFOo>2>qmbeH&QiJfn(n8uj7@A04C zagVSjhK(lFZG-0H-{i!XmE$cJ7ezq3q)=U5^C4=w+1L~VW=7UdZ?1rV2ED@HNTxzY zhm=rl{g?`qJ92u{GH*$X2|G+d__T1oA3jXwOITmnMf8=etM$QFbFIQ*n2y9RYc{z~ z$4iv1{P?`n6T0MNS|Xw;BnpEZ1$$iuv^)hcR3tKR7&OSJt0z<*2!l;dLrT(wR)oqc z0)`Waj|<#&2!i<77~PYRHoVsUUVTS9`hS1>((c@epgzY9RP$+jo*TJzFTCGEYZas$ zkX~u{1*1ti;Q^h1ort$6m5=!RTe=kcN8Aa@`0YsrOn{CC3PaoKdXi{V~S|8Q?b z3`1X4mY^iOK*)TntItxf9DHhP9q_cL#&g&`1V$m!)NL34hNcx|N^CE@IQae_O(>^PxN5;rCzfH;-$b zCG5TLxo57q;_LyccxCe6X1~s!cs=Xib&>9`mDjqLt}(&R%7Jref7xl&)~Vr4`yWqS zT>s;YAH%4Zp3^owP`tsn;FfVklyT`^VLkV!bC+G}^W%)HUop^fZES9m1Y&3ttMi6~ z5tgb}@QFiO%R!f-@xws*OPlw+akRlMT-_56t2Ub(bE1)7*2_|$=Sj~)b+D%+DnSBD z^TFgJ+xL)0q7LCojs{PNLl>N?ZQda}@EOn4IM&rU(ElGvU}j{LeJq-}hka`!ih&j5 z3{X@salV-KOjc!VgNU?j+kD!Ppnw3pU~k$CtEs@-o_?wVC%5Xf{cQ1|$FRis*}r~4 zxvjM8dvgFLLb?%_0uq`CRmv;->q}P(&$W!;@mC9&XaZvtn4E&a55#vvxDEeoMp@9* z>Z1CI+`Wqqqs~~rE!3TKzwdt2b^Nonj!d_)3LdK|_h`lk2E4y9d?N^Ued6nzd20dm z-s=>yFDeC83paVJjc=Y=IgXglikr?0;QGFy|34{#zP7VE`G9Fz&Q z8g0nYfm|*`SOfEDwjtSp{xbpze1KCN&JV>+8!d<(Onhmnt7gm<1%q!z%39U-G*90s z7lvjU;@l^=nLP=D_>nIYr_~Q>(_gv2R4W%UOKMdf;etv|w-rX$IURSh(Ou4cFbBf& zZ^P22rWn=L3DzGAxzAJA1xQd2RYQQW0&e^rot;5qNaQGM^G?DF3ir9o%dwA7IA3sy z1MTd{(Gf^!z$;@SDItp#O?UBAKgg5$;6VRL`VS0K`_>LXXemWhX|4*8fbKGFuK=TUS^dVZS|s z$UpAylpC}p6qaxNmpJ<)UCvCGAV#D9ME`HOWmI?1_V|mQQfo&JaeF#{!YyU|%KlGt z2zrA1F$zCO=5GGNOiteWI7-?dm@oUwJr5};R>Uot`UP(I9m~rhtr)v+JM|I%SJ8Pp zF_lbI6l!k5>b)QLUXOtT z%HrwXB7odkFS@AiBY!A91p03m*4^!1jiLmg!NRCYA7p4Pj|1__4)ngmg65&Li*|5# zzrCoic0O+F*N(-;F~Y{79vhv^Tl;X{PJ}FxNL13`Ir3a4Tjlj1gSJJxOP?n4qUf12 zyC+4E7zyWl2^FPMayN5R{N}PBpBZaAmTO0w zqu@hsZf&`EMxviylNSP<4P6S{kBsGp4evv;?GAU1LcqN;cyBM0YEv=c6(rB&Uu`zh z9Zfr&g4v!OJcTD!x(RBDV^*HuR5S;3>XB=7$Fx#<%?A~JJcIPyhjLo+!5xd0acI3( z3r@AwKfVVzbG|GKwZVz0zBV?Z-UBHt_*rN0pHg+N^yQq)hU)5B>y+reJScBPbbW;< zN#zr&@{|Yrr?~H$!S#P#Khkb(H+Iihbu>&}Q|1&m0!Zeu48HJY{#h0ka*b|6jrc_y`jNJ&-6yUrL*MenxH zr&d#=*3fvevOE+0A%4BC1zu}*WdcOY^=hw`s8TUts#q83EnuM*((pZCmj`twocq`~ zrXPe^;@77UGVTbs2l&=s=pdb_wk9$;+16Yq%baX`dm9XE6e7Z82%yA4wIgGC9BBC7 zUWFb5BhUxD&1b5>4Hk=g=Z!V)%j zpIzH4;&q-mag2a1jQl?enKzqW_*ci3gM55NbdJ8>rb(orX9)|>0IJ!=A7^Ru7lv%5 zoV^6Z5>YPvpPl66)bLQ>NEn^)A;y3C{F^!8ErMtX2aptjO;Op;uH8(2(h4eJMz%@^ zXEh>SR#5(MKBgAz z%q_PRYOd*@SIKHmKee=`d8M`<^iTEwCG`J8Mk&S2)N&=)=>HK>|Lc!kg==x}&pe3P zvM>G7LG5*00%MIx?`BfH;^Td#OsHO{+1!FF;1AIPhxePpLc3|+`W?WT>@14U+0$>RZLHNI(-WN06SP2ZJ0OZ_y z40F70*5t}uu%EyjV;X+Mk^C|*1P{Ydk_NPFs>&tbf~u)7%x!pJlDX}q0xkB(vm*Y; zF4r!-Fysgx8V^!cv0r>%U=i)U$pUFwn^R4R7fh99ROMy5pDj4CQ6oMr4J(?2e^!_f zR9B0GnF4lav+H#+>s+}&s@m(n3FGDuo8NooflwvdF6r1R^y}a>V7~`Bu!U%re32)Z*=c*mm%5^z{*J7GUSYRA z*_K04@UsOe?6iTvtw?dI{OzYetoDYgP5`G$E1%* z$l55ZBdPhUs}``?LRkT}H6qH^q;~}y)1X5+b6EW5_M;K)YAQXv< z#Q=OC=!+VT$s_^-fOHWPlN=hQpxC1$kB1ueA!#R-f}-l?n(rbrnNHM(b%#DpJg<=M zvZwJ?O+Dy(#-xNoRv)AZYn6A~JTT4NS1GEOyqvqn;UsA0_o7(5bZz&#Il-5c#S^2J z{!V)FkHoJ2n5^OEcy0@Guy3UW)kl;0xw7)Ww2eXh}rkh z@(ZwE$Vghq=wfDpzXwMYBj$`ln;3j#QT?1YT4>rn&tPa^3VhI8-8-qrRa1j;_wC&E zRcExgwKWU($Vg{iP5QqQG{0LuCHR2>Rt!b@xmJ(rE;}~pm|S;7bQJ{xR1Ab(zN}G{JL2gG`(orXU{oAUEII^`}Z%oVVE?~LuuCqB&bcY z-;2*Lp548>WhOb+yj>xa!H~&+T_sTYLXlY2_tslqV=w+ZsU#ZIWGo z#bkU6-!Eoyy&om%-g%?ylQ)Q)<+zKTteldlmF1o;oB(#mtH5m}wPBC~J>em3b?|d) zBllCql#5mZFVvF+udY+j{1zIVqJfN?O2zSLIe_<9lsUvCoj2WCqgUiO-;2nynGJ)j z0t6TVgfj1x1=+v3V+Qo~CZDV+?Kyp^Fkr+4E9wDVR@S2-?8Q%kj~TOaT<1?o(z%@Nqm-Ch8-yM;7K* zZ(waPI;wulHUJC@lAM;7R*WCEZ^y8(fOhV-os5lbI<2~LHZjo*^r>;>oRkvug{0Fc zMy4O0h*!hvCy(xSA8%I)?jlda;B3tBrcw{bLp{-b)g=}sMlye#o>4b-{6 z%<%P<2~GzAc#VRyGY5E zL=QxL(D&~TC@sGaAK%as1Zm}evcLo6Oa#UyldP;QNH}#v)J6!e9K_`Q{jJ^fW-4Ef z%d-#seSjxji)iTiygfxSN+wCjCPn*}k#nq^jI(;uH`}PqxUv@)p2w@;F=gpsFz2Sx0 z#CXh{MHFlZUT!dsRgI0U2-ttDd%tCF!ug%XI33 zDq%9JcKp>`<_n|Cl<@7Os`yZCj2_yL*x0)}*H@?x4z8CTh~!cCw7k6OU&d%B5aWJ! zuxeom7J`FWs_JUW1{$9_R%hCqn{PFbQreBZ7m{De z+53}_4KB=TBuDwUD&3|`7Zi1TDZ)&loro(OR3-rKY}m~# z!f2hhdt>A{{hXixB`!Ah{VDJ{_1UUjT@RcE=&0L`*@FO*Q_)MAVGWZk0d?S>4;2*l z&XJKS3Xenk@6OK@4okx_^B}y?9;iJKd?wK&aMCL~4IO{Z&%EF4@&He)t*;xoyvOTX zruh=%_me@+en%CFPp-1?{v1hTP=&{Z>9arKd1jeGcE`+nHzzld`FU1P)_ZH5hc^ou zZlf(>jpRTX-c<;fB^40t(*&HWglX863Gx36hF=ZVX#iiD?KQ~Ft0LJxXoZo^=A>5# z`g~a6BA5(BfHXTkcSU{<7Gi4eI7nPY(#_`tLymX8d$nO6yU*XLB6|dpCaVB*(!PFr z>DlNYmR%d!FUC!uqF_RL7aj#yWtr{~0#5#^d&Aiip3*RGURhYyPgSfWniU5I20@G- zlr9&~Weq&lBl{8Kd(@UV4)gOO`(o5@WJJ?!eWOe6Df3v1YSoTD-y^4>^VlMlbmIfh zfLzFvdExok;SK8Imex{swrlcK4SpffNB-l}MNakg~8mJ9l6OAeu1{tt(}NsyDXxu^u47-zFx1YPLb8E{C)J3bWJd z?R&7W@9k>7=u*tInSHfoa2;@JaKo+6Z`1KeuLO@E)Q6TD1I5%=IEB_uB-&;BXw3Ef zrn>r1&4uYO8Aj0up@;lzqaaX_rY&X;J$m$9_XLrZm4*72lZ^e|-jg^Qk5l$8fbXF% z4jP=bsqAwX?3GKn1-UR-Ngy7?s%vFB5cKIBqwFVhUN@Hygxxw?Ym6O;#i+~BDg9Y= ziZhjm&~c;PcV0%b)xXiWL!v2e$C=Ub0k{hf$sW@ZhuQ&HLa_JZ`}gnsmjhzvFs5?^ zTaW^dtD>dc47PSDDJih3QP$R$1rH0?47@LNxvKAm$}%O~C?T{95~|~O-l{eB)+9_; zMx26hv3ZKpkQEQ z@@k?CIFhTWnR#?qM9VM>L#R9gW-wm{0vNltyWwqZ^;9$QvXxM4q| z|MYSA+qDlmp17Tir1nfrrPSSFam3Eu7U9Bz@*5ip9--UmVevKD(dY1Rs9!v_3+^($ zAIQLBuf9IzZ=v2du+%xMMNA$OYGB@d^AL{`JBc%W#o#{#)qbRAK5lk z!(vUN;s6a9^xhu3BC(E}N|iPedAW(Cy9OA&;%m>=4>)T3v{Wx@knb->Syv4KvbsVFf0xUPEj;1d=f?knyjw;`BG0{)M!_zN>9M-n`in<2yUevc1$WP44x;!p|+>i3t+)gGx zd58HFMkY8rswqd~U3vnWj|OybUlxizgc+FYc+p*h4FSu%#rvRQ<7=8*eql<(D!y8o zwki8WSuv7@B`jP-^l@orSzc!i`Tf7vd*`hBu=Y>y=I}9$oZJBYt%Eg~B0THkjC=*2 zj<<@ulat|zk+u>*kALQ*H#ATfP!%5*yip|rmY?aO*L0Jk1=8QYJA8gvV?rGg#7Y9Q3*;7=5y14iesv+Dj0d~)w2Q|o z9#`^HlEbrW5>c95yx%u?Q)EH4{8j>~;0?j|jONrBG73qd_ekyyUGXRF(_jQ;URptMu(w~U{SXWhp52U$#F9jHozVURG*s)1076|>9wQ42ONG=L7n1<| zO*;qn4$$ZLZFo>3Ykn`<7RE7a#Xn_)#J~m$EOUqg0mp+6j1utlv{&3%@6qHiXbv(2 zLp&W5S=-h1NtUx?l*_q=t|GtB1&P<7ll+$@0e}avNZ2^IxIP@LRZbygTV%TOhlPKR zR`>z_kX)4 z>GO9}(_tHH)_Ts)5z&aGjdvhn==vpe^|aw(&OVrmL5SiVL_;Wp3~i<|s)%Osv8jYB zOi9aN{{jhMF4qAh5dWPGO5e*~>h<^#NoFhu^Ps;vwi(t_l1n;sC^DjMOZd7%Ck{^g z?Su&1=O>kg7036rHnw7uYbLd4VM<7voLqM!``=zEK!lc{Ip#leti6AhlhN|7nkam1 zT@a1Te}KJSIoDqIq3im>GFam+s}e@Q0Gy&rFnl>V6gG2HP?=9+I^Fj`{C$U`HU4*pGz@$?BE& zTiDmOx0h1{+`s_{(qr=S^K9g=i)oGii&^W4$T~R$7#KDTr=E*E(6tqF_?z!5#*yLz z%a^=Ek8HAFnfW0emtJhMhj%^BdX~0l!!I;E(WpsV!F92k ziazeQx#g2<+Z#!k*8H{FnH~l{%Rp%)2EG!CPtzqn*>KsmRZ{XE{I3kSfgn#k*i92?gN3Pt-?)F(d=cgzI_VD1(^Fe~ka4<4poz4Dt<0U6rJ(L#x- zk~_<(Zjp%URYty)#INnIa|uiN&n(G8CHMNd$lmkPD&Z-OLHbC1flbc0sh1|VXe;%`vl$gglJ8#p(Dg5%7s@P=o$fO0WNLQk1uWl29 z9uM0dk8IMrHMSX6V5vJcnI!QP?<_7)E_I)GZ8 zP0O~hXD4emk?G*Z4{l4KNE?Wep_S1l7|SdxiyazL0Zw*`p5EnF8_82GDxWRK_V}-9 z?Jsn`9eyyvKzBCSc!~v`R5Fq*!Tw~qi{}B4^@7yf_1BmlV|)fy*K~; zv*VH%i(U@sGUUd-IO(n^L`1eo{ok?so>3i?|>OP!Ck#@Hc9{og<1nwnZz>EFHx zlnL30UxRES1gEVJG!q{yk7Y&BtSnDBu^p5JV@5v@^C;Fc20c`Y3 zLH9Vc&IS2OATyr@A)>N|hLov9o9`+vmai~x;;qdO|Nd@^^K~c<#wiLi#9;u7JvKI; zH}n&%;+g3(OUAzuQvs~U(eh7J;r>p9=ZD|oQ%ML$e=6w8y08KYFMfdXz<#9%WrbDT zlR@0VBG(${py>{2=rm@7%34&a=$hRV6h1)f-6%!_@;2W6NqyOJdV2c1fb%v@^Ze8f zEazgg=~^>sro8^LTV<#nzpVI~EI1sJS!^}I5znG?yCf*=p*%jn(yUQ7@LrK9s=kNbG9F1~eU+S|VH>QP>u&JnM^KO0-{@>Q)5y}^NWII$UIo!lVVQiD; z9YubMv}x3jR$(Fg`%{1Zx;)K*o|k9VJ*E&4arn1lU7IWeC`Oi-xn>;t!%`{1-|kb( zfiVhe4+lX089vCssK9y>hA#mrjG1$%ch7TQ1Dz+sX41dT>Y=?yJxi<#VXZ7f2!gX+^x>NSMm*Y5Fv$eH!3d=Z11->Ir<#?x1E03FU#IGXNkYe zfSVp9Uu^jPix*$4R;F&E+N@`YTdf(i%@j}BhR$h4zmm*&0G_Y`cLi=e#~6)J11 za1>dFgNG^!Yvko%1mrYQ5BIUR7=?l0ZF?Kz<;x)GUbGJ{_T0~htAq}zYik6tRMl0% z5~q25oD}M7S^56~4juv-mzP@0xS_ySaOMH?Ke?3&B2^-wijF%s%598jC?_RD603+_ z2}(%?@iRQ(_qE$*lU&vszOQ;b&R2EFf!@M~M@10)%0l*k@HjEjp3AfhaCU3T> z9*=D-`M7iJ!hv62x9s`(IdLo5ifo1yxQd)W)Y0DT$)xf}9+|II6V2~giP^_8EzazD zrzSzDC&eD-`lMN%L*{b*hfKp^bU|2uG~NnI4K+$V6kmxQA$kJjL>;GTt2k7 zw;wO|6bLkWzyJ5CQ{%yX2&seaHxJ$~p{aHzK0p-`G}!lXz*Hshzu(uW?DDYGjr_Pw z#MghiYxEDgaald`{qDFlCFxJAS;| zVyEr;2khbB*16KM|Tps@t zis@Fy06gHp{wqb%e6ZPQ^W)DOp|=!NG6XPsLShsr`wM9eGy0Ctls~}r3{K$X%a=U8 zlR+ ze??ZwC3HPI>nwc*4VWQ&RWZKDePY`*JJ#l*A{2|n32Jm_T>mf>m(rZvl0juR;*OVu zy-W-GX(fR&bOa76t|7PXW)5)UCjj0To7Nw{Jv9ikZ(Z+#9$ZXs7z5Cx5BQ5TGz=8Y zoO$+{9rBAU(H5b{G44wU}vu+5} zO}9C$_%^1ZhVIRx`gXvCwl9qmHhJw$Hh9ex(y}u$j4b$nMx$m*zw4(MDr)?kC@UW+ z@aEyA=Y`_i?P5tOP&i6x)lbs-aq97KIQTD{ntl~54s>o_UfCu@qgBd+p*~)>cFW@z?3M%&nVrc{YKzeSOd;6ow$qpffqD^*)51Hh&=?s>rQreH;=AJBF zwhwjjfxR~j(EP`1unkaspIIKa3MpEUj0n(g(o}dR1K4#Wa+)b|_c@tVjTAFOkkGHj zwANq!#$N?O=T8h23n8nDCTH52vVqMV*|aZdLeD^iDo*;AlkZ{`cFMw~aGPvZdpB4d zMNJL=>*My?cqE0<6|7G!pr87|Schb8VME6usGuQ*K8gL@9i*JoHZW~dVs}xT*1y2`D8dtt3Y2eJSo%z2#S-J<2PV|GS>2a z(Jq2$63t}f|6NDiNN>P?%%+S78`tFF-JP4mG^NpkUnb;cXUy910XYn@adCrg8?l_x zx3;EO;knx2NrHLT{6UjU-7K&-1v^mQHW%MoBY-hU)S2JTrCDWAC1VM4?MSK@(+C)J;-+eXMX(BsTY=&(5DZevVV!;6rx|WQdMfF zqXHZu6c*^fqHTuAphPq~$d*SXN&k;;-Nc9#G?#V1xMUh0(#rhaJOUqkQQF7ti+jC=L+&ipT@0#i zm+q|)5gEr+68LGmr67;@Q=-LBbZLT(tCJ^eI)QNtCOG$joo)|K;miLDek44YQc^&>c8NO3~JGsrC7e(7jj1BT|x?53x`L?{RE9+9Bh00J*{9X zHlX;`Tio}H5k!)jPgTh0v_GEy_$5$;F{Pm}fw^398tOH!E^wFg8=%jP1ynh$_{uTuq!85qUZwQp5S2XNwv?{1tUF+W3ac~vc zuFDdfUgbbYYgt*k!0{KB8oTP1`qFn=#L-ld32FTSp(M~uj7g~RR)ycM2Ark_=D#Z4 zLM@gC-#kdjiE%$t%k;Xa4D(pPYHc`^cN=ZrEa!2PK$fAYH^UvgITMl*Jb66Y*(nYW zui%OIEoRr-2P!ikVM9-%rp%yuCrC>ChWohbh3ioDJq^RYjghd3h=V^39*+f_!hl_^ z`C;D-ae!rp?7-5^#2D0W941BMz|rPZjKPjA{k@pOArCenq6(k&s`b2EzM5*e2v9%= z_G0-bQ6=|R%=u`~A{^jiRWIpBOY@&R-UAn(tKNrUzs#s9K3Sr7a31ggDQMlKBj~a# z%}}>4XtRqRSR8C#VSa4_altg8$v&Zd+o*;0#WlaPlf#!G+QycCFN^XPv5h^cL!Yy~ zd9ET6plAI)6*={M9vwpFbzGSV#G`gkPkl9;bVHRG{0le3;A(TQx5@E$D)x_5Mq)c` zSaQ0xy2Zx9bG|0G4kkTlDEI zX;Cl?E(TC6teq^L^_tyon(PMj8qPxHGH6muyr1fS+b}V8~y;6l~?43_0 z+Qb+rpf$t3f8T!ZA2?^re#LU&E3|}90MdF0j;-35pCrbPkka8VE1)7S_2QF59HK@C_OaiBSi#PwT&%gkA6v^eU0+|&nKYFyP6ovDm=0WinQ-{ z8vJ@z@sI`LV2MQ{p$slzcC6jjn4tR?#}92=iASLcf&SS=PBt=ujsBR&OYGySLZ{WP zhB}cnoyb^JQqt{OZ>Frv32|}(x|95f1`Hb>C`2En(4iJ#2+my^JQ1NH4pmlC{V-w> z5gy(*iz10vOmJ@q^mbo)B{l}ARoEJW1}E-?_(mr4?eS&i;bGw2^fZcpsI`OaU8n!p zW5nm@N1OgS5(Kw8#w`ShYq`EO`{<@s@{TCmg4fy*-KoBbOZiQr5<~0!03KY3rzA$t zM}U)$_#ER&%Tc*&H(Yj4PM>0@gF+1Z?!A)}lg9#>)zwa>%3~$%?RdT-AWv+87W{Lm z{ePnDE-!0`n@#U8J7uNu%ltFAiECB#^gVI*BC+nck1C(+Y#bZZQS`(Ow#^kthR>6W znN9BBC%8&(q~0sKc_;M92g!xM0)466;B|b-kcIKh>WyJ|_yT;-Q99a>L%--~I>O#v zI-09Cs6xvaY!%oVc2S^5{3HMV{m4NMeM~IyBsTs&r2%p{3$@2GlfhYt!^3zAfFJzw z@<0~dE5Dcc%qVViQyH2UA?N$xLqVN#w?`D>6Z(G$1|T3-N~`2w>C^mQU1l0J_u4*u zMCkuEI48)3fO^DM>)N&i^HUMn!oJoy<}J)-xb>yh(QuAy6NO;|?d%Mk?|OF<*|FKN zf4&*|gWsoqkzE6h{M>^7Bc*N-0!!RpKK#n@Yy9rN)W4>QgC)9o z9|khwLRK)G__lBF|0pUq(dsx_+|rT>yef*cl@!Y=R5V`0W8<`K(<1im|1>n@JN?%m zeiyr~FV;jE7BM%|zJ^nBVPTLmn?)(~8JM5bf0tB0*&-v?jgNgY)dD>O+s4W9xjRH~ zDNhY}r^>RtLmO%)@M0xGlUE)G>jemTF^_pc@Bje0rFq=AL|Z^BHbaX&;p=-SK8nL=kmD;4?j<;6?q zgQJG2hyDMwWIErFidLZDVEW$^zbAt!Q6}sysuNghY5Ik4l&5KKM3!yZ&-baIPpN_ z=N$!hiJCmAgHK~`I8Ac@b(*hTzku>h0hU$6#)Y8!oKO~SIyoCw(W$_Kt2eV){jqkZ z*VPyUBj!&wk9!QnKEwCFQeor&=@Z%7@I8;O6mD1{V{ki>d{U>0R&!|ERfRtPo$Mvg zO%~NH?FyA$;F(W+d^{GPkHV%7DGe%+MZ{%%&8CX=NJ{fWUs}Jze}*)KsTGgb!v4qW zm%&bqOWlVbGanv^iuYwMT7SjDDjMP?B}W10 zQ+j5G{pKtUKVEil$-8Plrs}}4HNXyfQ9EAyu;(HhfLpzu4UC!b(1|}w4+s9>%k-o*v z{?H?-DKz-{+PU9cxV_eR&vMSk>WPV&xu{#8mR}7+K5=X$WHDW~6*T-Z2VF zT({^|P|VU~mC}hpMdq1ySu{-y6T|k__@+Igs2m$wIsH3tZlWldO1S!1`x}Kj-stsJ zal^IaX;B>k%T?-j*-t9*1!*;Sg12ub10gnB{h*tB>~9Ar%WBIgS0Cp}J@Nh-=Q)Z# z``oAT@t{_8LGNNs|Eq9YVYhy_$kT#3-#TlZzc*|cRdcrM;Em>dNTVw4(eGzHMdERP zx0*IuhO4sT-``G*3dA^mU*21y-4MDijyGx&^EEGAw@B%JWY1p+0>Z(8gR{21S^JG8 zI0*T@SdKh0zIy(oL&GS!T`lSE;ix+Xp#+1UU!>siGX}Tg^nKhLq?2I!Ow01!bSZaf zlnR`Zdh%{^XJNNZos7C2$H?>Z%<3K{m&xby4vsoab))DWwrm7ILS4!)3bX@5wfOk8 z3*%j#$$s_6{=Y@3-w~wt6~4px8Gbi*x#z?V=++#G*mU%C^DpniDy06Zt4wmXTU6jw zrZh}8*WV8v*Vx{k0EEA=IsX(Toeq`;KRg~HtsCd6=PO@CEDIW?ZEsrA2lNv?e&>c{ zIZW}+=lv5b6#sZM>L_o|&4Nf?KIO~99}AajQp$^~E=9VUj3NVE43*Qqtyd>vwR`P3 zm8rB6WAFb%X0$=^kVIl8e6ME9<~t(jjo&D=l)^`idtXwGne~WuyJkcoM2Gk%@DPrA z3<8f=c%JNK;7Yu9RDUWQni-^09o8zF>|GRX^*nSv_vtG|#jIby9;lahGtLvG6R*JO zqGUUufV}lHcZw?wy#SAk6Ue;$rtXI3ONzAq>gY;e?w)%CWw;fV@0!x5%w9GLSBknU zDSTo~rDH_Vr{rPJ^DKk%ffkd?gMW8!znk<0ECXQ>2|WID(tsW-3w37)@##4Aq{(IO zf(ml30({5Fsw-oWGz&7T%N+;3SQBKW%{WMYY^YrDIT)-(`=zDlVT4Gy6xE?@MoAf7 zt$NXrQcAtwr~P*AxmG#zywSe_lV{DiBnXg@p7R18ZXPjZwccg>HFMif3?U?D5&OP* z@2<>3)c05(gDGieX&5{9Li>Za**~)1+nZyEkQ!fnCU;p(p7m$P?q8D>#`ga3?2cje zGWT*SnR8?UU#!D;g=)QF^<=&mLq zC+s*jPkdv#J4b9nYo-VoDn+JL%0mk^>jqy2u{rZxe>}Nz;wS!|T z48qo{zid#(|5GnijntJz!WIj{!NGg;@K~b}v!wRXBq*S;#ZaXpXgZhY1+Tb7MTGa7 z(W#$vH@l40rteI_d@Bkf+-{~~N;m8gurh>SX7@g&-EJFR`%2W8fnaGRX5y^=oDsSZ3_X%j5Jdu0o%)-Kl5G`o_lSlV#pADY#>m1!Os_Mn z4;(hQ8S*fdsB_(XerFQy&dr@9mT zVe>>j$^C)X`$tro&PeE-Vcu2xx|vcuPFQU@wHWPhy&E-R5-m4(J(H{dz8_NIChIvn z>)47Y+p{vXn3&zq|+7P3QW0}Cvde*OpYpSo7FuG0JZLR(U3MFt#S0K=<$or~aP4)ED7KLgJvucE!k5eh?_BJ4EP!RYu=EdVM@Qrl#=|Kav41>X4~*)5 zu#i6VIZ8n^ollOwv#=5r!YhR>W{cHn#lh z94!wgf2bVvn3qp3)W#4Jw`-p4x_mLGpvLsxaJHPsAz9jh${Qa=goyvS? zf_Y1dGMq60E}dwgihYYG_GhXJOx^r#_jt|njn_^^v&iLT7yYeG>>3t@D(k5klm1G1 zxmnyf9C`Hj9eLvqqyI}VZ0t+hwFf0lb|$(d9DfsR(9d25Tg zrnV+?z63K3*O$4)bx%Swf*-}p#pPN!i9wPH9|>g`6*Iuxz{Y?wcC4QDBl+{S78%mO zNZNb8RN{^QPI^EFYWL2C6dah6@7|4k^&RZET+;nWk;bmC+CODmDE~NG#z4VH#6EOx ze6+*nb#L?0oHt~}e7nAGi$fRjF7x0cNJ#-WTJ)?LeSus^|K^}wN)QDWC_x?`c(0$b z7U=$_KYdsJQS$o_B?d!46Xo0>vm>aWpws>M*MLnmTHGfJU#}$$0uTYEGca7W8Bnbu zr1np|D0lgT!ovr%FJ#fKJ2C3SJ{bZPy14OxJY8GDe2N|DHPU>}qK#L_=D4GhkI_g1 z@sg7>=3MFh1MjK|p1gEnYkV9K6CihxiShUNn=1ue3JmD?h3nVpgw?2n{*}IcDwi2# zoclpiC%wn|{e{zG$fbiMz5HLOKl3nHu#OiaT0HiPHyf{oi60{&|08B!KOCCFEeus6%$RlOY_%dDk)jYeTKJTq?@aDn z@+@b}0M`*QyY_aY#+#~%@}bBwmVx(?!3_}_X$w-c3`Q5NuQC2|bYy>3%=Q^)u>r?ep&M5hwmPMvIvR{jVi6;Zk^-EKsk!@$g|(UAkFAJ(vq$oG zAThmh3s<1taTU`#j|H&NzR9{zJpfrTPGmI1y| zhqXJt<3puJ6w>dubqMAC@J_H;5_-P$EVIAlM(ZhMFLvcDoN8>;C!#e*(v`BB`n=Ml z-}$u9WkP*xCvbGk`M7U&&r>MpU0DZHj8B~-6ZC{xxLS7`dx((~DbH43+$ZRkHl z#WbdSC&>73&MB1WgW)vM#?{m_b>V1#b_}YcWB6vAz@}?(#jQV-k=++h4I_e5>dXgy z{(3**_M?ofv<_7;u;FTm7^ujge1SwACwgXkeHZis*yd!S!~XE~-yZq>7RtanvwnB> zpVnELHt#>{Gy!?Op!#H^sR;>8#{xHx5eBYEgs8&N^FV6sg;r)qj!hzbwBIh}w>w35 zp&mQ11gVY=t%AA@tP?eY%|iQo{}{6-h?OXLTWCcRq47EG=sFH7N2kYLU{;0?j;_XLGz`HzC8!PPre8Rj^uib}EBT9QehI|H-F&tb4+B6DzV!$9!| zLD>1G4`hV2l{|A{Yrr`A%x5_5fO|M)`{_#;RoTz8D^C)oXYlsMORcApYO(9Du3P}> z@PA!Uc*#}FAX)$5=GfNAYuggzKZ|Q66?cYj6sZEZqR&Hb4US{sy?sAqf?2GGgH|m7 z@#1zS%Fh0=Z~?&<5{xPBe+1nAXg;HYui!`bclg9^RP1NMJ;V&e@`ZM2z;f(1wK&Gx z2sO+0+Nd)@Q4#NtS4YwirPU1eH?r!+6TH~&gb=9BXqen%#fH{rY$;b>?KZwC@BzJ| zG1Lgn?CLtxj#JC4l9IKzABN>I~$;p-G&PUBN1DI`lTPTYh^@z~U`DT%~ zSCX?v*+tmBGCjFd`yQhgZc0vxj!yQ8#Se~+eM-QkXsxiGiWeed73@vdbz@Nhr>JxW zjFlCKjA&XThSGano z=3`p-(RBm9IKIlXXV;0xi_d;5O?Nimt zM*e2e!hbr)@ZQh*PF}vX3>I0*U%n|+S@ZG!zuK<+AL_U3k9|Ux5JHxQrb$s>*<~4oC^52ch44l8 zkWmz4Y*{kOzLaG|_L>w)DffIcBufZoZ-^m;%5&zqpTFRF-S@oamwCNrK672?x~_Ab z^FHUiPi#yrLGXgpC;dF?O1jcf?r0j8;$-~LscU)~1vo)^k4N3r-54eh4KgmuGaYb( zTM=_w6O_ruK7#*f<;CjCyHvC-=ACG=Sl+#xHZ`wW{Mq#prkTZ6m?m|U0xuiUqv6ka z3!5^$JYygBMO1xVrkL&ig%3jPENN4E!f%XZuX=m1JHmj_)ZQrzM%(g`Yh$u~iQ7u+ zbKo{Bu`|SCJ9GK|S}^Cdwzfu#aGNOX3$!hBcwQ$Ii_-VITC_W+`5Uj4)VDmqmy%DX zAI}-Fx~6?+NY%pJ{9LIey4VR*;yVgFlW!aWi4#5b%j@SM&X;@7Wj=^%eoUck<-5*N zj+068WFPS7;LGm5Q3t#p#`!jL(>4V;S%i&Z%YBr24{;|o`(FCBoLjC>da$J`%iywm zTAcaLb1qp3xcfEBtatH1{_!|^i>e;{1Ffm0mC^0do8B!`a1J}P?!BifymJlzGe~cm z9%6*jyPISb8X8u!(C!75C%m06LDDULpPy}ya`-OKG`^^qQa$;HD74}dBW`t7X}ZR- ztXsC(`gP6;UhX)=sd3{B+KjTMYJ$OO>4NXso}rr?r4>(dmcu|(^F+vsaQGPMYZe%f zYUrN9s;3Fr2ptu{{mT=F5izIz^Rhv4n8Yy@R#s@8WaS!sP7#^ZJQ*?QImf}0LG7VF z-oRDc%vUsrg3r+boVe06!QwMkdqP4&pcn&GeW0^wZaGxI!K@BR5jt@g4(B1+L4_l) zS=ufJZlKf<2!2yjLw%ga^xq9JcaLTp`Ns~*eO4}(;N*^Hl^k`6JbOF-Gki#GU{4y7q5^?T&Q_XN&{ELQI<~uA+!b z6%=>}_}KSnJHu>`rpO?BRrbP=t{3JdE$;xa&s+h>B|@F=bO{WQynW9zcnsv&xR!1{Us>@1d=Idu$3q#VXt~m*hKTk*90-I`C;Y)4L*mj4xzuXNMIJn$NaPR}u!_ zly}bRhfuaBGLwCiGB(kt}U(N4K5sq@7 zuh0S#IJ))a?i*gcssx#wRtqZpp#p3&HBdS~faP*<=-CQOn|j&ee=nOG6J#k_lyBJV zU^LJ$`9_VUdB5=aYK1{e5~vW;%wBw=k$@4dM-V})xR~LubxblLX!j|gH^0Ut%ddC^ z{`eV$vk-C@18oB}&JV1QcNCkJG&esOsjuY+dd9U6Ofu20<3)#jDWOKlt9WWrOm?FYN?oBH1t><53%$P+i$ zO*7eLYaqMWe=CWu8jEiOuOk*B zeZ3SMx~375g|eNuS_L>^5XImSb`5FXiuxC)j<(bc@t4Dk3X$6-G3*Y>(-E zGzdR)=B-csLuN&cnC;%f3WtNH=P%%34{=Wg+Eq(__y#!cL#sU_ z4+FSwu)uw;3{A{?z6Vq?jtueqzK`^}BCC*fv-VJvKa%m&%=^GEMi8!jRb&`=B4vsQ zme(U1{7tmNWw!e680<8=<`qT0z)+NO-yW^T80zZ{G`Qs|1XCo9&zx~@J+8{nJNXR1 zBh!5nD-GUuSFH#5Z=g#H*XYp`1gRVM2FXMUMT7TF3LMR+Y1Xdc7i~_FqZgutgTMCo z(D9!rzV_5wM6HSwbE&M3fZJZg(1gP*%)>Fg7MYTgRvJ1lKqhgR3Ih>Rx|sr8@ad)% zpnRatvlL^q|0dmi>&YzHA}hMUuR}1`t7%$-rp9kBw7pkN58t0xTKAM&hpkijwxVhJ zsbl0H@87!z1k|?uHn6c-9C?wq7C~f5_okpslj59C5=n!DAu$D0>UCd>?T%+#R^gOY zPp6T6jP?80WDs-OOZn!gV)}NjQ+SIa57F>zLpxtHd19eLNN30>QeRygI(nN?c&RP2 zZz~3@t8hAEUMI!H5?Jgn>~EfZit_gxypFj0ppxUC1G|#C}l>6k?tD&4$Vba z)1L?Q$s+fE2#fQlXGRDv-_R6Sw*ec4)>&h2RJ^4^A%y=HNcJ)QEkyN%j<_Yhxq)9jub5_jBV5|QelCT-@gtdd#W9@k+~3I| z9dU@XDaBmYhS}4K!W^Ic_QM6v^eE*)s zn=~B;xXFOVhT8LC_Pz4>ZVX!Mnz4-z0tjEypZLWVlh+iGXH4I&u9}%R}a`h{$`eND~>V%A#F@XEG@0#d+U(lad zU*j?+m7Kn#Hr}cRC;u{(PT??68z_1UPIZQP1D=ODy&bi7MJ&k^=yaM2=jcFm=x={v z`E|sNOEG@DfgxJ8-)YMu!Ts9^!xWdk-EG_%b<0_rok2a*HE_aeIDs>n34hSd6 zhA)l8&lTbKvpKdXF42CT9s?tDp@U11M70cfF1QW8vNA7N$?062lSP^wn^$yi4UhA= zf5BQwt{PBT*<}yNA4Z<#xzQZ?oKba6m-kESabOgQg%F%xrYCQ}L>frHGKn=zO6r{~ z>|lpCh%60S)(rvnD_vQj%j-kcd(z`4^Zio)sHrD!j zK?AW@f3}B8q}1yo%=+SXX{7}65HiL}n|Npik0aZFMoURcyA&NQ`KestIU_*=a3sKa z&aZwDIg9)>mYg`40fRb72$IM%D7k4>b(`$Nnm96y%Ic0{TMSjWX5R+I(RWgz%w7{>5x7q_K=fiFIooDl{CQS$I zSWu;5M#68R%2s>&XQw&a?j-q?3M{Khic4S@R@{e4HWLyK^dMlowB#pzdv%bILrG0V znW9CK0U4&Ru7hiItfnSn9TYm%6AtV0DTn&owG!*UL-Oat1lUbpyb#dgA8wEzS*!5r z)3sg6oo8r|8yqJvx=n>wx6~uK=f5p<{0R#mPVhce$7*6I5x2a9&Y`X|??}y-y{4IN zV#fG(cQATznyDUgLEWRn-p|rRT_RXA6)$fV>d;pm97*w5h#Fbg)L$N|mA7G`;d3;X zXvY{N=QZ?h=2~FZMF@tk z^?vSWqpH}taA?~qr_3=fbu50lRC4psnLC#h3g2E|3L6^WcV_5ufoUVymkDm_EQu7- zQhy}d?7~c70AFWWS!J#lw9s2pHuKjsBM(lkMEm>2At+AY8_vg5Ea6o2?qzFiY@Gl8 z&7pb4w&<;d(tEA6TVzl7<2jvGC9?NgQvDsDd>x_J-a?tKt1213Mwl2jVSN4l9B3!o z>cXukmS`C@H9{5^t1Q}7U$w6O=t;fl!(uX~tD;<4`(&^Ydkz#Dny1|}E#KR1CpH-$X)y_H<1SFko!@?VGO{+O6rGL}srA1Mkz0xr zAIY<06aV0YkKcHM=Vx1Uq8NghsEVH0bkBwAr5_?|Iy$>!Ap!_U{4yY+ep1EyE*8Db zqfLEWQ4WSu(6Cdy9*y>)n?C|wrJ3>-tT~k(EPft33k$-n(~p4M2mvGW zl+-Uk-L7mQv=lT-GFP~@XwOF%=vxishaWntbmO>7QSRYhoWyY#B+t3ULB7zB)e)u0 ziLjvQcY^uqL99uoZq&gV*q%27#QY`aOX~M{?TY6TB z!-$C=?0WYO>EPhV=%PP9BX|EcI!U|x?7x3z0qntrf8HpiQ)hZd204+ee*Lc($0U=s zw7r5gz?OqKO>VGpK`p~8Wv9<=v4%~^P1ve^Mw^R|W8!TgIc;a-d;E-gow-mNT@H!; z0d?t6%q|LH`}D-bT4-JT^Z(GdUAf~$rJ#;pf4uP(h zlanhwOO#R}CMG5_ Date: Wed, 28 Jan 2015 13:20:04 +0300 Subject: [PATCH 94/98] Shape module tests refactored - common operations moved to separate class - debug console messages removed - test results are stored in memory instead of file --- modules/shape/test/test_emdl1.cpp | 263 ----------------- modules/shape/test/test_hausdorff.cpp | 280 ------------------ modules/shape/test/test_precomp.cpp | 1 - modules/shape/test/test_precomp.hpp | 2 - modules/shape/test/test_shape.cpp | 406 ++++++++++++++------------ 5 files changed, 221 insertions(+), 731 deletions(-) delete mode 100644 modules/shape/test/test_emdl1.cpp delete mode 100644 modules/shape/test/test_hausdorff.cpp delete mode 100644 modules/shape/test/test_precomp.cpp diff --git a/modules/shape/test/test_emdl1.cpp b/modules/shape/test/test_emdl1.cpp deleted file mode 100644 index e52351bcf..000000000 --- a/modules/shape/test/test_emdl1.cpp +++ /dev/null @@ -1,263 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "test_precomp.hpp" - -using namespace cv; -using namespace std; - -const int angularBins=12; -const int radialBins=4; -const float minRad=0.2f; -const float maxRad=2; -const int NSN=5;//10;//20; //number of shapes per class -const int NP=100; //number of points sympliying the contour -const float CURRENT_MAX_ACCUR=95; //98% and 99% reached in several tests, 95 is fixed as minimum boundary - -class CV_ShapeEMDTest : public cvtest::BaseTest -{ -public: - CV_ShapeEMDTest(); - ~CV_ShapeEMDTest(); -protected: - void run(int); - -private: - void mpegTest(); - void listShapeNames(vector &listHeaders); - vector convertContourType(const Mat &, int n=0 ); - float computeShapeDistance(vector & queryNormal, - vector & queryFlipped1, - vector & queryFlipped2, - vector& testq); - void displayMPEGResults(); -}; - -CV_ShapeEMDTest::CV_ShapeEMDTest() -{ -} -CV_ShapeEMDTest::~CV_ShapeEMDTest() -{ -} - -vector CV_ShapeEMDTest::convertContourType(const Mat& currentQuery, int n) -{ - vector > _contoursQuery; - vector contoursQuery; - findContours(currentQuery, _contoursQuery, RETR_LIST, CHAIN_APPROX_NONE); - for (size_t border=0; border<_contoursQuery.size(); border++) - { - for (size_t p=0; p<_contoursQuery[border].size(); p++) - { - contoursQuery.push_back(Point2f((float)_contoursQuery[border][p].x, - (float)_contoursQuery[border][p].y)); - } - } - - // In case actual number of points is less than n - int dum=0; - for (int add=(int)contoursQuery.size()-1; add cont; - for (int i=0; i &listHeaders) -{ - listHeaders.push_back("apple"); //ok - listHeaders.push_back("children"); // ok - listHeaders.push_back("device7"); // ok - listHeaders.push_back("Heart"); // ok - listHeaders.push_back("teddy"); // ok -} -float CV_ShapeEMDTest::computeShapeDistance(vector & query1, vector & query2, - vector & query3, vector & testq) -{ - //waitKey(0); - Ptr mysc = createShapeContextDistanceExtractor(angularBins, radialBins, minRad, maxRad); - //Ptr cost = createNormHistogramCostExtractor(cv::DIST_L1); - //Ptr cost = createChiHistogramCostExtractor(30,0.15); - //Ptr cost = createEMDHistogramCostExtractor(); - // Ptr cost = createEMDL1HistogramCostExtractor(); - mysc->setIterations(1); //(3) - mysc->setCostExtractor( createEMDL1HistogramCostExtractor() ); - //mysc->setTransformAlgorithm(createAffineTransformer(true)); - mysc->setTransformAlgorithm( createThinPlateSplineShapeTransformer() ); - //mysc->setImageAppearanceWeight(1.6); - //mysc->setImageAppearanceWeight(0.0); - //mysc->setImages(im1,imtest); - return ( std::min( mysc->computeDistance(query1, testq), - std::min(mysc->computeDistance(query2, testq), mysc->computeDistance(query3, testq) ))); -} - -void CV_ShapeEMDTest::mpegTest() -{ - string baseTestFolder="shape/mpeg_test/"; - string path = cvtest::TS::ptr()->get_data_path() + baseTestFolder; - vector namesHeaders; - listShapeNames(namesHeaders); - - // distance matrix // - Mat distanceMat=Mat::zeros(NSN*(int)namesHeaders.size(), NSN*(int)namesHeaders.size(), CV_32F); - - // query contours (normal v flipped, h flipped) and testing contour // - vector contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting; - - // reading query and computing its properties // - int counter=0; - const int loops=NSN*(int)namesHeaders.size()*NSN*(int)namesHeaders.size(); - for (size_t n=0; n origContour; - contoursQuery1=convertContourType(currentQuery, NP); - origContour=contoursQuery1; - contoursQuery2=convertContourType(flippedHQuery, NP); - contoursQuery3=convertContourType(flippedVQuery, NP); - - // compare with all the rest of the images: testing // - for (size_t nt=0; nt(NSN*(int)n+i-1, - NSN*(int)nt+it-1)=0; - continue; - } - // read testing image // - stringstream thetestpathandname; - thetestpathandname<(NSN*(int)n+i-1, NSN*(int)nt+it-1)= - computeShapeDistance(contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting); - std::cout<(NSN*(int)n+i-1, NSN*(int)nt+it-1)<get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::WRITE); - fs << "distanceMat" << distanceMat; -} - -const int FIRST_MANY=2*NSN; -void CV_ShapeEMDTest::displayMPEGResults() -{ - string baseTestFolder="shape/mpeg_test/"; - Mat distanceMat; - FileStorage fs(cvtest::TS::ptr()->get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::READ); - vector namesHeaders; - listShapeNames(namesHeaders); - - // Read generated MAT // - fs["distanceMat"]>>distanceMat; - - int corrects=0; - int divi=0; - for (int row=0; row(row,col)>distanceMat.at(row,i)) - { - nsmall++; - } - } - if (nsmall<=FIRST_MANY) - { - corrects++; - } - } - } - float porc = 100*float(corrects)/(NSN*distanceMat.rows); - std::cout<<"%="<= CURRENT_MAX_ACCUR) - ts->set_failed_test_info(cvtest::TS::OK); - else - ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); - -} - -void CV_ShapeEMDTest::run( int /*start_from*/ ) -{ - mpegTest(); - displayMPEGResults(); -} - -TEST(ShapeEMD_SCD, regression) { CV_ShapeEMDTest test; test.safe_run(); } diff --git a/modules/shape/test/test_hausdorff.cpp b/modules/shape/test/test_hausdorff.cpp deleted file mode 100644 index ec33436f0..000000000 --- a/modules/shape/test/test_hausdorff.cpp +++ /dev/null @@ -1,280 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "test_precomp.hpp" -#include - -using namespace cv; -using namespace std; - -const int NSN=5;//10;//20; //number of shapes per class -const float CURRENT_MAX_ACCUR=85; //90% and 91% reached in several tests, 85 is fixed as minimum boundary - -class CV_HaussTest : public cvtest::BaseTest -{ -public: - CV_HaussTest(); - ~CV_HaussTest(); -protected: - void run(int); -private: - float computeShapeDistance(vector &query1, vector &query2, - vector &query3, vector &testq); - vector convertContourType(const Mat& currentQuery, int n=180); - vector normalizeContour(const vector & contour); - void listShapeNames( vector &listHeaders); - void mpegTest(); - void displayMPEGResults(); -}; - -CV_HaussTest::CV_HaussTest() -{ -} -CV_HaussTest::~CV_HaussTest() -{ -} - -vector CV_HaussTest::normalizeContour(const vector &contour) -{ - vector output(contour.size()); - Mat disMat((int)contour.size(),(int)contour.size(),CV_32F); - Point2f meanpt(0,0); - float meanVal=1; - - for (int ii=0, end1 = (int)contour.size(); ii(ii,jj)=0; - else - { - disMat.at(ii,jj)= - float(fabs(double(contour[ii].x*contour[jj].x)))+float(fabs(double(contour[ii].y*contour[jj].y))); - } - } - meanpt.x+=contour[ii].x; - meanpt.y+=contour[ii].y; - } - meanpt.x/=contour.size(); - meanpt.y/=contour.size(); - meanVal=float(cv::mean(disMat)[0]); - for (size_t ii=0; ii &listHeaders) -{ - listHeaders.push_back("apple"); //ok - listHeaders.push_back("children"); // ok - listHeaders.push_back("device7"); // ok - listHeaders.push_back("Heart"); // ok - listHeaders.push_back("teddy"); // ok -} - - -vector CV_HaussTest::convertContourType(const Mat& currentQuery, int n) -{ - vector > _contoursQuery; - vector contoursQuery; - findContours(currentQuery, _contoursQuery, RETR_LIST, CHAIN_APPROX_NONE); - for (size_t border=0; border<_contoursQuery.size(); border++) - { - for (size_t p=0; p<_contoursQuery[border].size(); p++) - { - contoursQuery.push_back(_contoursQuery[border][p]); - } - } - - // In case actual number of points is less than n - for (int add=(int)contoursQuery.size()-1; add cont; - for (int i=0; i& query1, vector & query2, - vector & query3, vector & testq) -{ - Ptr haus = createHausdorffDistanceExtractor(); - return std::min(haus->computeDistance(query1,testq), std::min(haus->computeDistance(query2,testq), - haus->computeDistance(query3,testq))); -} - -void CV_HaussTest::mpegTest() -{ - string baseTestFolder="shape/mpeg_test/"; - string path = cvtest::TS::ptr()->get_data_path() + baseTestFolder; - vector namesHeaders; - listShapeNames(namesHeaders); - - // distance matrix // - Mat distanceMat=Mat::zeros(NSN*(int)namesHeaders.size(), NSN*(int)namesHeaders.size(), CV_32F); - - // query contours (normal v flipped, h flipped) and testing contour // - vector contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting; - - // reading query and computing its properties // - int counter=0; - const int loops=NSN*(int)namesHeaders.size()*NSN*(int)namesHeaders.size(); - for (size_t n=0; n origContour; - contoursQuery1=convertContourType(currentQuery); - origContour=contoursQuery1; - contoursQuery2=convertContourType(flippedHQuery); - contoursQuery3=convertContourType(flippedVQuery); - - // compare with all the rest of the images: testing // - for (size_t nt=0; nt(NSN*(int)n+i-1, - NSN*(int)nt+it-1)=0; - continue; - } - // read testing image // - stringstream thetestpathandname; - thetestpathandname<(NSN*(int)n+i-1, NSN*(int)nt+it-1)= - computeShapeDistance(contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting); - std::cout<(NSN*(int)n+i-1, NSN*(int)nt+it-1)<get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::WRITE); - fs << "distanceMat" << distanceMat; -} - -const int FIRST_MANY=2*NSN; -void CV_HaussTest::displayMPEGResults() -{ - string baseTestFolder="shape/mpeg_test/"; - Mat distanceMat; - FileStorage fs(cvtest::TS::ptr()->get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::READ); - vector namesHeaders; - listShapeNames(namesHeaders); - - // Read generated MAT // - fs["distanceMat"]>>distanceMat; - - int corrects=0; - int divi=0; - for (int row=0; row(row,col)>distanceMat.at(row,i)) - { - nsmall++; - } - } - if (nsmall<=FIRST_MANY) - { - corrects++; - } - } - } - float porc = 100*float(corrects)/(NSN*distanceMat.rows); - std::cout<<"%="<= CURRENT_MAX_ACCUR) - ts->set_failed_test_info(cvtest::TS::OK); - else - ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); - -} - - -void CV_HaussTest::run(int /* */) -{ - mpegTest(); - displayMPEGResults(); - ts->set_failed_test_info(cvtest::TS::OK); -} - -TEST(Hauss, regression) { CV_HaussTest test; test.safe_run(); } diff --git a/modules/shape/test/test_precomp.cpp b/modules/shape/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3..000000000 --- a/modules/shape/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/shape/test/test_precomp.hpp b/modules/shape/test/test_precomp.hpp index 819d711e8..af10e8008 100644 --- a/modules/shape/test/test_precomp.hpp +++ b/modules/shape/test/test_precomp.hpp @@ -16,6 +16,4 @@ #include "opencv2/imgcodecs.hpp" #include "opencv2/shape.hpp" -#include "opencv2/opencv_modules.hpp" - #endif diff --git a/modules/shape/test/test_shape.cpp b/modules/shape/test/test_shape.cpp index 04e89fe6b..6a84f2bc4 100644 --- a/modules/shape/test/test_shape.cpp +++ b/modules/shape/test/test_shape.cpp @@ -44,222 +44,258 @@ using namespace cv; using namespace std; -const int angularBins=12; -const int radialBins=4; -const float minRad=0.2f; -const float maxRad=2; -const int NSN=5;//10;//20; //number of shapes per class -const int NP=120; //number of points sympliying the contour -const float CURRENT_MAX_ACCUR=95; //99% and 100% reached in several tests, 95 is fixed as minimum boundary - -class CV_ShapeTest : public cvtest::BaseTest +template +class ShapeBaseTest : public cvtest::BaseTest { public: - CV_ShapeTest(); - ~CV_ShapeTest(); -protected: - void run(int); - -private: - void mpegTest(); - void listShapeNames(vector &listHeaders); - vector convertContourType(const Mat &, int n=0 ); - float computeShapeDistance(vector & queryNormal, - vector & queryFlipped1, - vector & queryFlipped2, - vector& testq); - void displayMPEGResults(); -}; - -CV_ShapeTest::CV_ShapeTest() -{ -} -CV_ShapeTest::~CV_ShapeTest() -{ -} - -vector CV_ShapeTest::convertContourType(const Mat& currentQuery, int n) -{ - vector > _contoursQuery; - vector contoursQuery; - findContours(currentQuery, _contoursQuery, RETR_LIST, CHAIN_APPROX_NONE); - for (size_t border=0; border<_contoursQuery.size(); border++) + typedef Point_ PointType; + ShapeBaseTest(int _NSN, int _NP, float _CURRENT_MAX_ACCUR) + : NSN(_NSN), NP(_NP), CURRENT_MAX_ACCUR(_CURRENT_MAX_ACCUR) { - for (size_t p=0; p<_contoursQuery[border].size(); p++) + // generate file list + vector shapeNames; + shapeNames.push_back("apple"); //ok + shapeNames.push_back("children"); // ok + shapeNames.push_back("device7"); // ok + shapeNames.push_back("Heart"); // ok + shapeNames.push_back("teddy"); // ok + for (vector::const_iterator i = shapeNames.begin(); i != shapeNames.end(); ++i) { - contoursQuery.push_back(Point2f((float)_contoursQuery[border][p].x, - (float)_contoursQuery[border][p].y)); + for (int j = 0; j < NSN; ++j) + { + stringstream filename; + filename << cvtest::TS::ptr()->get_data_path() + << "shape/mpeg_test/" << *i << "-" << j + 1 << ".png"; + filenames.push_back(filename.str()); + } } + // distance matrix + const int totalCount = (int)filenames.size(); + distanceMat = Mat::zeros(totalCount, totalCount, CV_32F); } - // In case actual number of points is less than n - for (int add=(int)contoursQuery.size()-1; add cont; - for (int i=0; i convertContourType(const Mat& currentQuery) const { - cont.push_back(contoursQuery[i]); - } - return cont; -} + vector > _contoursQuery; + findContours(currentQuery, _contoursQuery, RETR_LIST, CHAIN_APPROX_NONE); -void CV_ShapeTest::listShapeNames( vector &listHeaders) -{ - listHeaders.push_back("apple"); //ok - listHeaders.push_back("children"); // ok - listHeaders.push_back("device7"); // ok - listHeaders.push_back("Heart"); // ok - listHeaders.push_back("teddy"); // ok -} - -float CV_ShapeTest::computeShapeDistance(vector & query1, vector & query2, - vector & query3, vector & testq) -{ - //waitKey(0); - Ptr mysc = createShapeContextDistanceExtractor(angularBins, radialBins, minRad, maxRad); - //Ptr cost = createNormHistogramCostExtractor(cv::DIST_L1); - Ptr cost = createChiHistogramCostExtractor(30,0.15f); - //Ptr cost = createEMDHistogramCostExtractor(); - //Ptr cost = createEMDL1HistogramCostExtractor(); - mysc->setIterations(1); - mysc->setCostExtractor( cost ); - //mysc->setTransformAlgorithm(createAffineTransformer(true)); - mysc->setTransformAlgorithm( createThinPlateSplineShapeTransformer() ); - //mysc->setImageAppearanceWeight(1.6); - //mysc->setImageAppearanceWeight(0.0); - //mysc->setImages(im1,imtest); - return ( std::min( mysc->computeDistance(query1, testq), - std::min(mysc->computeDistance(query2, testq), mysc->computeDistance(query3, testq) ))); -} - -void CV_ShapeTest::mpegTest() -{ - string baseTestFolder="shape/mpeg_test/"; - string path = cvtest::TS::ptr()->get_data_path() + baseTestFolder; - vector namesHeaders; - listShapeNames(namesHeaders); - - // distance matrix // - Mat distanceMat=Mat::zeros(NSN*(int)namesHeaders.size(), NSN*(int)namesHeaders.size(), CV_32F); - - // query contours (normal v flipped, h flipped) and testing contour // - vector contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting; - - // reading query and computing its properties // - int counter=0; - const int loops=NSN*(int)namesHeaders.size()*NSN*(int)namesHeaders.size(); - for (size_t n=0; n contoursQuery; + for (size_t border=0; border<_contoursQuery.size(); border++) { - // read current image // - stringstream thepathandname; - thepathandname< cont; + for (int i=0; i contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting; + // reading query and computing its properties + for (vector::const_iterator a = filenames.begin(); a != filenames.end(); ++a) + { + // read current image + int aIndex = a - filenames.begin(); + Mat currentQuery = imread(*a, IMREAD_GRAYSCALE); + Mat flippedHQuery, flippedVQuery; flip(currentQuery, flippedHQuery, 0); flip(currentQuery, flippedVQuery, 1); - // compute border of the query and its flipped versions // - vector origContour; - contoursQuery1=convertContourType(currentQuery, NP); - origContour=contoursQuery1; - contoursQuery2=convertContourType(flippedHQuery, NP); - contoursQuery3=convertContourType(flippedVQuery, NP); - - // compare with all the rest of the images: testing // - for (size_t nt=0; nt::const_iterator b = filenames.begin(); b != filenames.end(); ++b) { - for (int it=1; it<=NSN; it++) + int bIndex = b - filenames.begin(); + float distance = 0; + // skip self-comparisson + if (a != b) { - // skip self-comparisson // - counter++; - if (nt==n && it==i) - { - distanceMat.at(NSN*(int)n+i-1, - NSN*(int)nt+it-1)=0; - continue; - } - // read testing image // - stringstream thetestpathandname; - thetestpathandname<(NSN*(int)n+i-1, NSN*(int)nt+it-1)= - computeShapeDistance(contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting); - std::cout<(NSN*(int)n+i-1, NSN*(int)nt+it-1)<(aIndex, bIndex) = distance; } } } - // save distance matrix // - FileStorage fs(cvtest::TS::ptr()->get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::WRITE); - fs << "distanceMat" << distanceMat; -} -const int FIRST_MANY=2*NSN; -void CV_ShapeTest::displayMPEGResults() -{ - string baseTestFolder="shape/mpeg_test/"; - Mat distanceMat; - FileStorage fs(cvtest::TS::ptr()->get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::READ); - vector namesHeaders; - listShapeNames(namesHeaders); - - // Read generated MAT // - fs["distanceMat"]>>distanceMat; - - int corrects=0; - int divi=0; - for (int row=0; row(row,col)>distanceMat.at(row,i)) + divi+=NSN; + } + for (int col=divi-NSN; col(row,col) > distanceMat.at(row,i)) + { + nsmall++; + } + } + if (nsmall<=FIRST_MANY) + { + corrects++; } } - if (nsmall<=FIRST_MANY) - { - corrects++; - } } + float porc = 100*float(corrects)/(NSN*distanceMat.rows); + std::cout << "Test result: " << porc << "%" << std::endl; + if (porc >= CURRENT_MAX_ACCUR) + ts->set_failed_test_info(cvtest::TS::OK); + else + ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); } - float porc = 100*float(corrects)/(NSN*distanceMat.rows); - std::cout<<"%="<= CURRENT_MAX_ACCUR) - ts->set_failed_test_info(cvtest::TS::OK); - else - ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); - //done -} -void CV_ShapeTest::run( int /*start_from*/ ) +protected: + int NSN; + int NP; + float CURRENT_MAX_ACCUR; + vector filenames; + Mat distanceMat; + compute cmp; +}; + +//------------------------------------------------------------------------ +// Test Shape_SCD.regression +//------------------------------------------------------------------------ + +class computeShapeDistance_Chi { - mpegTest(); - displayMPEGResults(); - ts->set_failed_test_info(cvtest::TS::OK); + Ptr mysc; +public: + computeShapeDistance_Chi() + { + const int angularBins=12; + const int radialBins=4; + const float minRad=0.2f; + const float maxRad=2; + mysc = createShapeContextDistanceExtractor(angularBins, radialBins, minRad, maxRad); + mysc->setIterations(1); + mysc->setCostExtractor(createChiHistogramCostExtractor(30,0.15f)); + mysc->setTransformAlgorithm( createThinPlateSplineShapeTransformer() ); + } + float operator()(vector & query1, vector & query2, + vector & query3, vector & testq) + { + return std::min(mysc->computeDistance(query1, testq), + std::min(mysc->computeDistance(query2, testq), + mysc->computeDistance(query3, testq))); + } +}; + +TEST(Shape_SCD, regression) +{ + const int NSN_val=5;//10;//20; //number of shapes per class + const int NP_val=120; //number of points simplifying the contour + const float CURRENT_MAX_ACCUR_val=95; //99% and 100% reached in several tests, 95 is fixed as minimum boundary + ShapeBaseTest test(NSN_val, NP_val, CURRENT_MAX_ACCUR_val); + test.safe_run(); } -TEST(Shape_SCD, regression) { CV_ShapeTest test; test.safe_run(); } +//------------------------------------------------------------------------ +// Test ShapeEMD_SCD.regression +//------------------------------------------------------------------------ + +class computeShapeDistance_EMD +{ + Ptr mysc; +public: + computeShapeDistance_EMD() + { + const int angularBins=12; + const int radialBins=4; + const float minRad=0.2f; + const float maxRad=2; + mysc = createShapeContextDistanceExtractor(angularBins, radialBins, minRad, maxRad); + mysc->setIterations(1); + mysc->setCostExtractor( createEMDL1HistogramCostExtractor() ); + mysc->setTransformAlgorithm( createThinPlateSplineShapeTransformer() ); + } + float operator()(vector & query1, vector & query2, + vector & query3, vector & testq) + { + return std::min(mysc->computeDistance(query1, testq), + std::min(mysc->computeDistance(query2, testq), + mysc->computeDistance(query3, testq))); + } +}; + +TEST(ShapeEMD_SCD, regression) +{ + const int NSN_val=5;//10;//20; //number of shapes per class + const int NP_val=100; //number of points simplifying the contour + const float CURRENT_MAX_ACCUR_val=95; //98% and 99% reached in several tests, 95 is fixed as minimum boundary + ShapeBaseTest test(NSN_val, NP_val, CURRENT_MAX_ACCUR_val); + test.safe_run(); +} + +//------------------------------------------------------------------------ +// Test Hauss.regression +//------------------------------------------------------------------------ + +class computeShapeDistance_Haussdorf +{ + Ptr haus; +public: + computeShapeDistance_Haussdorf() + { + haus = createHausdorffDistanceExtractor(); + } + float operator()(vector &query1, vector &query2, + vector &query3, vector &testq) + { + return std::min(haus->computeDistance(query1,testq), + std::min(haus->computeDistance(query2,testq), + haus->computeDistance(query3,testq))); + } +}; + +TEST(Hauss, regression) +{ + const int NSN_val=5;//10;//20; //number of shapes per class + const int NP_val = 180; //number of points simplifying the contour + const float CURRENT_MAX_ACCUR_val=85; //90% and 91% reached in several tests, 85 is fixed as minimum boundary + ShapeBaseTest test(NSN_val, NP_val, CURRENT_MAX_ACCUR_val); + test.safe_run(); +} From cf0a29ce4dda5a49d5a1732db7b7d3519324d158 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Wed, 28 Jan 2015 18:11:53 +0300 Subject: [PATCH 95/98] Fixed win64 compile warning --- modules/shape/test/test_shape.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/shape/test/test_shape.cpp b/modules/shape/test/test_shape.cpp index 6a84f2bc4..0601594f0 100644 --- a/modules/shape/test/test_shape.cpp +++ b/modules/shape/test/test_shape.cpp @@ -121,7 +121,7 @@ protected: for (vector::const_iterator a = filenames.begin(); a != filenames.end(); ++a) { // read current image - int aIndex = a - filenames.begin(); + int aIndex = (int)(a - filenames.begin()); Mat currentQuery = imread(*a, IMREAD_GRAYSCALE); Mat flippedHQuery, flippedVQuery; flip(currentQuery, flippedHQuery, 0); @@ -133,7 +133,7 @@ protected: // compare with all the rest of the images: testing for (vector::const_iterator b = filenames.begin(); b != filenames.end(); ++b) { - int bIndex = b - filenames.begin(); + int bIndex = (int)(b - filenames.begin()); float distance = 0; // skip self-comparisson if (a != b) From f282fd0ebf2aa299a270354f1d0544d4ee83b008 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Thu, 29 Jan 2015 13:16:31 +0300 Subject: [PATCH 96/98] ocl: print missing error message only if OPENCV_OPENCL_RUNTIME is used --- modules/core/src/opencl/runtime/opencl_core.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/core/src/opencl/runtime/opencl_core.cpp b/modules/core/src/opencl/runtime/opencl_core.cpp index 5dd174709..43f6b13b6 100644 --- a/modules/core/src/opencl/runtime/opencl_core.cpp +++ b/modules/core/src/opencl/runtime/opencl_core.cpp @@ -70,7 +70,8 @@ static void* AppleCLGetProcAddress(const char* name) handle = dlopen(oclpath, RTLD_LAZY | RTLD_GLOBAL); if (handle == NULL) { - fprintf(stderr, ERROR_MSG_CANT_LOAD); + if (envPath) + fprintf(stderr, ERROR_MSG_CANT_LOAD); } else if (dlsym(handle, OPENCL_FUNC_TO_CHECK_1_1) == NULL) { @@ -108,7 +109,8 @@ static void* WinGetProcAddress(const char* name) handle = LoadLibraryA(path); if (!handle) { - fprintf(stderr, ERROR_MSG_CANT_LOAD); + if (envPath) + fprintf(stderr, ERROR_MSG_CANT_LOAD); } else if (GetProcAddress(handle, OPENCL_FUNC_TO_CHECK_1_1) == NULL) { @@ -145,7 +147,8 @@ static void* GetProcAddress(const char* name) handle = dlopen(path, RTLD_LAZY | RTLD_GLOBAL); if (handle == NULL) { - fprintf(stderr, ERROR_MSG_CANT_LOAD); + if (envPath) + fprintf(stderr, ERROR_MSG_CANT_LOAD); } else if (dlsym(handle, OPENCL_FUNC_TO_CHECK_1_1) == NULL) { From 0d72420480a6584a3fec734909f6f40267a556c2 Mon Sep 17 00:00:00 2001 From: sergei Date: Fri, 30 Jan 2015 11:57:35 +0300 Subject: [PATCH 97/98] HAVE_TEGRA_OPTIMIZATION was fixed --- modules/imgproc/src/corner.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/imgproc/src/corner.cpp b/modules/imgproc/src/corner.cpp index 358cd5802..2e9400409 100644 --- a/modules/imgproc/src/corner.cpp +++ b/modules/imgproc/src/corner.cpp @@ -271,7 +271,8 @@ cornerEigenValsVecs( const Mat& src, Mat& eigenv, int block_size, #ifdef HAVE_TEGRA_OPTIMIZATION if (tegra::cornerEigenValsVecs(src, eigenv, block_size, aperture_size, op_type, k, borderType)) return; -#elif CV_SSE2 +#endif +#if CV_SSE2 bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2); #endif From 9944f46b47f17bd512c2c48fa537b8b1253e6e24 Mon Sep 17 00:00:00 2001 From: StevenPuttemans Date: Sun, 1 Feb 2015 15:08:43 +0100 Subject: [PATCH 98/98] fixing wrong model sizes --- data/haarcascades/haarcascade_fullbody.xml | 4 ++-- data/haarcascades/haarcascade_lowerbody.xml | 4 ++-- data/haarcascades/haarcascade_smile.xml | 4 ++-- data/haarcascades/haarcascade_upperbody.xml | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/data/haarcascades/haarcascade_fullbody.xml b/data/haarcascades/haarcascade_fullbody.xml index 1f4e3a70d..831f4708e 100644 --- a/data/haarcascades/haarcascade_fullbody.xml +++ b/data/haarcascades/haarcascade_fullbody.xml @@ -139,8 +139,8 @@ Thanks to Martin Spengler, ETH Zurich, for providing the demo movie. BOOST HAAR - 14 - 28 + 28 + 14 107 diff --git a/data/haarcascades/haarcascade_lowerbody.xml b/data/haarcascades/haarcascade_lowerbody.xml index 59949b0d8..db6d8d89f 100644 --- a/data/haarcascades/haarcascade_lowerbody.xml +++ b/data/haarcascades/haarcascade_lowerbody.xml @@ -139,8 +139,8 @@ Thanks to Martin Spengler, ETH Zurich, for providing the demo movie. BOOST HAAR - 19 - 23 + 23 + 19 89 diff --git a/data/haarcascades/haarcascade_smile.xml b/data/haarcascades/haarcascade_smile.xml index b7df2217b..569db9910 100644 --- a/data/haarcascades/haarcascade_smile.xml +++ b/data/haarcascades/haarcascade_smile.xml @@ -47,8 +47,8 @@ BOOST HAAR - 36 - 18 + 18 + 36 53 diff --git a/data/haarcascades/haarcascade_upperbody.xml b/data/haarcascades/haarcascade_upperbody.xml index 778687fbc..6070a7471 100644 --- a/data/haarcascades/haarcascade_upperbody.xml +++ b/data/haarcascades/haarcascade_upperbody.xml @@ -139,8 +139,8 @@ Thanks to Martin Spengler, ETH Zurich, for providing the demo movie. BOOST HAAR - 22 - 18 + 18 + 22 152