diff --git a/cmake/templates/OpenCVConfig.cmake.in b/cmake/templates/OpenCVConfig.cmake.in index 7dee995f5..edc0264e8 100644 --- a/cmake/templates/OpenCVConfig.cmake.in +++ b/cmake/templates/OpenCVConfig.cmake.in @@ -64,7 +64,11 @@ set(OpenCV_USE_CUFFT @HAVE_CUFFT@) set(OpenCV_USE_NVCUVID @HAVE_NVCUVID@) # Android API level from which OpenCV has been compiled is remembered -set(OpenCV_ANDROID_NATIVE_API_LEVEL @OpenCV_ANDROID_NATIVE_API_LEVEL_CONFIGCMAKE@) +if(ANDROID) + set(OpenCV_ANDROID_NATIVE_API_LEVEL @OpenCV_ANDROID_NATIVE_API_LEVEL_CONFIGCMAKE@) +else() + set(OpenCV_ANDROID_NATIVE_API_LEVEL 0) +endif() # Some additional settings are required if OpenCV is built as static libs set(OpenCV_SHARED @BUILD_SHARED_LIBS@) @@ -75,8 +79,8 @@ set(OpenCV_USE_MANGLED_PATHS @OpenCV_USE_MANGLED_PATHS_CONFIGCMAKE@) # Extract the directory where *this* file has been installed (determined at cmake run-time) get_filename_component(OpenCV_CONFIG_PATH "${CMAKE_CURRENT_LIST_FILE}" PATH CACHE) -if(NOT WIN32 OR OpenCV_ANDROID_NATIVE_API_LEVEL GREATER 0) - if(OpenCV_ANDROID_NATIVE_API_LEVEL GREATER 0) +if(NOT WIN32 OR ANDROID) + if(ANDROID) set(OpenCV_INSTALL_PATH "${OpenCV_CONFIG_PATH}/../../..") else() set(OpenCV_INSTALL_PATH "${OpenCV_CONFIG_PATH}/../..") diff --git a/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst b/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst index 75c262868..5a5597a75 100644 --- a/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst +++ b/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst @@ -187,7 +187,7 @@ Explanation image.convertTo(new_image, -1, alpha, beta); - where :convert_to:`convertTo <>` would effectively perform *new_image = a*image + beta*. However, we wanted to show you how to access each pixel. In any case, both methods give the same result. + where :convert_to:`convertTo <>` would effectively perform *new_image = a*image + beta*. However, we wanted to show you how to access each pixel. In any case, both methods give the same result but convertTo is more optimized and works a lot faster. Result ======= diff --git a/doc/tutorials/introduction/linux_install/linux_install.rst b/doc/tutorials/introduction/linux_install/linux_install.rst index d31c68a88..1801aef14 100644 --- a/doc/tutorials/introduction/linux_install/linux_install.rst +++ b/doc/tutorials/introduction/linux_install/linux_install.rst @@ -7,22 +7,24 @@ These steps have been tested for Ubuntu 10.04 but should work with other distros Required Packages ================= - * GCC 4.4.x or later. This can be installed with: + * GCC 4.4.x or later + * CMake 2.8.7 or higher + * Git + * GTK+2.x or higher, including headers (libgtk2.0-dev) + * pkg-config + * Python 2.6 or later and Numpy 1.5 or later with developer packages (python-dev, python-numpy) + * ffmpeg or libav development packages: libavcodec-dev, libavformat-dev, libswscale-dev + * [optional] libtbb2 libtbb-dev + * [optional] libdc1394 2.x + * [optional] libjpeg-dev, libpng-dev, libtiff-dev, libjasper-dev, libdc1394-22-dev + +The packages can be installed using a terminal and the following commands or by using Synaptic Manager: .. code-block:: bash - sudo apt-get install build-essential - - * CMake 2.8.7 or higher; - * Git; - * GTK+2.x or higher, including headers (libgtk2.0-dev); - * pkg-config; - * Python 2.6 or later and Numpy 1.5 or later with developer packages (python-dev, python-numpy); - * ffmpeg or libav development packages: libavcodec-dev, libavformat-dev, libswscale-dev; - * [optional] libdc1394 2.x; - * [optional] libjpeg-dev, libpng-dev, libtiff-dev, libjasper-dev. - -All the libraries above can be installed via Terminal or by using Synaptic Manager. + [compiler] sudo apt-get install build-essential + [required] sudo apt-get install cmake git libgtk2-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev + [optional] sudo apt-get install python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev Getting OpenCV Source Code ========================== diff --git a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst index 91b411682..5a68dad5c 100644 --- a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst +++ b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst @@ -78,6 +78,8 @@ Make sure your active solution configuration (:menuselection:`Build --> Configur Build your solution (:menuselection:`Build --> Build Solution`, or press *F7*). +Before continuing, do not forget to add the command line argument of your input image to your project (:menuselection:`Right click on project --> Properties --> Configuration Properties --> Debugging` and then set the field ``Command Arguments`` with the location of the image). + Now set a breakpoint on the source line that says .. code-block:: c++ diff --git a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst index 50f734803..574071de7 100644 --- a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst +++ b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst @@ -105,8 +105,8 @@ Explanation .. code-block:: cpp - Mat trainingDataMat(3, 2, CV_32FC1, trainingData); - Mat labelsMat (3, 1, CV_32FC1, labels); + Mat trainingDataMat(4, 2, CV_32FC1, trainingData); + Mat labelsMat (4, 1, CV_32FC1, labels); 2. **Set up SVM's parameters** diff --git a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst index 36af8362f..201f9bd47 100644 --- a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst @@ -224,9 +224,9 @@ Computes useful camera characteristics from the camera matrix. :param imageSize: Input image size in pixels. - :param apertureWidth: Physical width of the sensor. + :param apertureWidth: Physical width in mm of the sensor. - :param apertureHeight: Physical height of the sensor. + :param apertureHeight: Physical height in mm of the sensor. :param fovx: Output field of view in degrees along the horizontal sensor axis. @@ -234,13 +234,15 @@ Computes useful camera characteristics from the camera matrix. :param focalLength: Focal length of the lens in mm. - :param principalPoint: Principal point in pixels. + :param principalPoint: Principal point in mm. :param aspectRatio: :math:`f_y/f_x` The function computes various useful camera characteristics from the previously estimated camera matrix. +.. note:: + Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for the chessboard pitch (it can thus be any value). composeRT ------------- @@ -1490,6 +1492,10 @@ Reconstructs points by triangulation. The function reconstructs 3-dimensional points (in homogeneous coordinates) by using their observations with a stereo camera. Projections matrices can be obtained from :ocv:func:`stereoRectify`. +.. note:: + + Keep in mind that all input data should be of float type in order for this function to work. + .. seealso:: :ocv:func:`reprojectImageTo3D` diff --git a/modules/core/doc/drawing_functions.rst b/modules/core/doc/drawing_functions.rst index 06e51cf41..60b744f6d 100644 --- a/modules/core/doc/drawing_functions.rst +++ b/modules/core/doc/drawing_functions.rst @@ -585,7 +585,7 @@ Draws a text string. :param font: ``CvFont`` structure initialized using :ocv:cfunc:`InitFont`. :param fontFace: Font type. One of ``FONT_HERSHEY_SIMPLEX``, ``FONT_HERSHEY_PLAIN``, ``FONT_HERSHEY_DUPLEX``, ``FONT_HERSHEY_COMPLEX``, ``FONT_HERSHEY_TRIPLEX``, ``FONT_HERSHEY_COMPLEX_SMALL``, ``FONT_HERSHEY_SCRIPT_SIMPLEX``, or ``FONT_HERSHEY_SCRIPT_COMPLEX``, - where each of the font ID's can be combined with ``FONT_HERSHEY_ITALIC`` to get the slanted letters. + where each of the font ID's can be combined with ``FONT_ITALIC`` to get the slanted letters. :param fontScale: Font scale factor that is multiplied by the font-specific base size. diff --git a/modules/core/doc/operations_on_arrays.rst b/modules/core/doc/operations_on_arrays.rst index c936457af..c2121fc6f 100644 --- a/modules/core/doc/operations_on_arrays.rst +++ b/modules/core/doc/operations_on_arrays.rst @@ -1216,9 +1216,9 @@ gemm ---- Performs generalized matrix multiplication. -.. ocv:function:: void gemm( InputArray src1, InputArray src2, double alpha, InputArray src3, double gamma, OutputArray dst, int flags=0 ) +.. ocv:function:: void gemm( InputArray src1, InputArray src2, double alpha, InputArray src3, double beta, OutputArray dst, int flags=0 ) -.. ocv:pyfunction:: cv2.gemm(src1, src2, alpha, src3, gamma[, dst[, flags]]) -> dst +.. ocv:pyfunction:: cv2.gemm(src1, src2, alpha, src3, beta[, dst[, flags]]) -> dst .. ocv:cfunction:: void cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, const CvArr* src3, double beta, CvArr* dst, int tABC=0) diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index 12d11b006..9c409482c 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -389,7 +389,7 @@ CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val = 0); //! implements generalized matrix product algorithm GEMM from BLAS CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha, - InputArray src3, double gamma, OutputArray dst, int flags = 0); + InputArray src3, double beta, OutputArray dst, int flags = 0); //! multiplies matrix by its transposition from the left or from the right CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa, diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp index 9bb3f31c8..6c6832a31 100644 --- a/modules/core/src/matrix.cpp +++ b/modules/core/src/matrix.cpp @@ -4050,16 +4050,18 @@ double cv::kmeans( InputArray _data, int K, int flags, OutputArray _centers ) { const int SPP_TRIALS = 3; - Mat data = _data.getMat(); - bool isrow = data.rows == 1 && data.channels() > 1; - int N = !isrow ? data.rows : data.cols; - int dims = (!isrow ? data.cols : 1)*data.channels(); - int type = data.depth(); + Mat data0 = _data.getMat(); + bool isrow = data0.rows == 1 && data0.channels() > 1; + int N = !isrow ? data0.rows : data0.cols; + int dims = (!isrow ? data0.cols : 1)*data0.channels(); + int type = data0.depth(); attempts = std::max(attempts, 1); - CV_Assert( data.dims <= 2 && type == CV_32F && K > 0 ); + CV_Assert( data0.dims <= 2 && type == CV_32F && K > 0 ); CV_Assert( N >= K ); + Mat data(N, dims, CV_32F, data0.data, isrow ? dims * sizeof(float) : static_cast(data0.step)); + _bestLabels.create(N, 1, CV_32S, -1, true); Mat _labels, best_labels = _bestLabels.getMat(); diff --git a/modules/core/test/test_math.cpp b/modules/core/test/test_math.cpp index 0d64fe8c0..6b51d8af9 100644 --- a/modules/core/test/test_math.cpp +++ b/modules/core/test/test_math.cpp @@ -2627,6 +2627,15 @@ TEST(Core_SVD, flt) // TODO: eigenvv, invsqrt, cbrt, fastarctan, (round, floor, ceil(?)), +enum +{ + MAT_N_DIM_C1, + MAT_N_1_CDIM, + MAT_1_N_CDIM, + MAT_N_DIM_C1_NONCONT, + MAT_N_1_CDIM_NONCONT, + VECTOR +}; class CV_KMeansSingularTest : public cvtest::BaseTest { @@ -2634,7 +2643,7 @@ public: CV_KMeansSingularTest() {} ~CV_KMeansSingularTest() {} protected: - void run(int) + void run(int inVariant) { int i, iter = 0, N = 0, N0 = 0, K = 0, dims = 0; Mat labels; @@ -2646,20 +2655,70 @@ protected: for( iter = 0; iter < maxIter; iter++ ) { ts->update_context(this, iter, true); - dims = rng.uniform(1, MAX_DIM+1); + dims = rng.uniform(inVariant == MAT_1_N_CDIM ? 2 : 1, MAX_DIM+1); N = rng.uniform(1, MAX_POINTS+1); N0 = rng.uniform(1, MAX(N/10, 2)); K = rng.uniform(1, N+1); - Mat data0(N0, dims, CV_32F); - rng.fill(data0, RNG::UNIFORM, -1, 1); + if (inVariant == VECTOR) + { + dims = 2; - Mat data(N, dims, CV_32F); - for( i = 0; i < N; i++ ) - data0.row(rng.uniform(0, N0)).copyTo(data.row(i)); + std::vector data0(N0); + rng.fill(data0, RNG::UNIFORM, -1, 1); - kmeans(data, K, labels, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 30, 0), - 5, KMEANS_PP_CENTERS); + std::vector data(N); + for( i = 0; i < N; i++ ) + data[i] = data0[rng.uniform(0, N0)]; + + kmeans(data, K, labels, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 30, 0), + 5, KMEANS_PP_CENTERS); + } + else + { + Mat data0(N0, dims, CV_32F); + rng.fill(data0, RNG::UNIFORM, -1, 1); + + Mat data; + + switch (inVariant) + { + case MAT_N_DIM_C1: + data.create(N, dims, CV_32F); + for( i = 0; i < N; i++ ) + data0.row(rng.uniform(0, N0)).copyTo(data.row(i)); + break; + + case MAT_N_1_CDIM: + data.create(N, 1, CV_32FC(dims)); + for( i = 0; i < N; i++ ) + memcpy(data.ptr(i), data0.ptr(rng.uniform(0, N0)), dims * sizeof(float)); + break; + + case MAT_1_N_CDIM: + data.create(1, N, CV_32FC(dims)); + for( i = 0; i < N; i++ ) + memcpy(data.data + i * dims * sizeof(float), data0.ptr(rng.uniform(0, N0)), dims * sizeof(float)); + break; + + case MAT_N_DIM_C1_NONCONT: + data.create(N, dims + 5, CV_32F); + data = data(Range(0, N), Range(0, dims)); + for( i = 0; i < N; i++ ) + data0.row(rng.uniform(0, N0)).copyTo(data.row(i)); + break; + + case MAT_N_1_CDIM_NONCONT: + data.create(N, 3, CV_32FC(dims)); + data = data.colRange(0, 1); + for( i = 0; i < N; i++ ) + memcpy(data.ptr(i), data0.ptr(rng.uniform(0, N0)), dims * sizeof(float)); + break; + } + + kmeans(data, K, labels, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 30, 0), + 5, KMEANS_PP_CENTERS); + } Mat hist(K, 1, CV_32S, Scalar(0)); for( i = 0; i < N; i++ ) @@ -2683,7 +2742,19 @@ protected: } }; -TEST(Core_KMeans, singular) { CV_KMeansSingularTest test; test.safe_run(); } +TEST(Core_KMeans, singular) { CV_KMeansSingularTest test; test.safe_run(MAT_N_DIM_C1); } + +CV_ENUM(KMeansInputVariant, MAT_N_DIM_C1, MAT_N_1_CDIM, MAT_1_N_CDIM, MAT_N_DIM_C1_NONCONT, MAT_N_1_CDIM_NONCONT, VECTOR) + +typedef testing::TestWithParam Core_KMeans_InputVariants; + +TEST_P(Core_KMeans_InputVariants, singular) +{ + CV_KMeansSingularTest test; + test.safe_run(GetParam()); +} + +INSTANTIATE_TEST_CASE_P(AllVariants, Core_KMeans_InputVariants, KMeansInputVariant::all()); TEST(CovariationMatrixVectorOfMat, accuracy) { diff --git a/modules/cudawarping/src/cuda/resize.cu b/modules/cudawarping/src/cuda/resize.cu index dc91df383..7285a4748 100644 --- a/modules/cudawarping/src/cuda/resize.cu +++ b/modules/cudawarping/src/cuda/resize.cu @@ -213,7 +213,7 @@ namespace cv { namespace cuda { namespace device const dim3 block(32, 8); const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); - resize_linear<<>>(src, dst, fy, fx); + resize_linear<<>>(src, dst, fy, fx); cudaSafeCall( cudaGetLastError() ); if (stream == 0) diff --git a/modules/flann/include/opencv2/flann/autotuned_index.h b/modules/flann/include/opencv2/flann/autotuned_index.h index b0beac499..0670d19e2 100644 --- a/modules/flann/include/opencv2/flann/autotuned_index.h +++ b/modules/flann/include/opencv2/flann/autotuned_index.h @@ -99,18 +99,22 @@ public: */ virtual void buildIndex() { + std::ostringstream stream; bestParams_ = estimateBuildParams(); + print_params(bestParams_, stream); Logger::info("----------------------------------------------------\n"); Logger::info("Autotuned parameters:\n"); - print_params(bestParams_); + Logger::info("%s", stream.str().c_str()); Logger::info("----------------------------------------------------\n"); bestIndex_ = create_index_by_type(dataset_, bestParams_, distance_); bestIndex_->buildIndex(); speedup_ = estimateSearchParams(bestSearchParams_); + stream.str(std::string()); + print_params(bestSearchParams_, stream); Logger::info("----------------------------------------------------\n"); Logger::info("Search parameters:\n"); - print_params(bestSearchParams_); + Logger::info("%s", stream.str().c_str()); Logger::info("----------------------------------------------------\n"); } diff --git a/modules/flann/include/opencv2/flann/params.h b/modules/flann/include/opencv2/flann/params.h index 2f1d5e7ca..95ef4cd23 100644 --- a/modules/flann/include/opencv2/flann/params.h +++ b/modules/flann/include/opencv2/flann/params.h @@ -79,16 +79,19 @@ T get_param(const IndexParams& params, cv::String name) } } -inline void print_params(const IndexParams& params) +inline void print_params(const IndexParams& params, std::ostream& stream) { IndexParams::const_iterator it; for(it=params.begin(); it!=params.end(); ++it) { - std::cout << it->first << " : " << it->second << std::endl; + stream << it->first << " : " << it->second << std::endl; } } - +inline void print_params(const IndexParams& params) +{ + print_params(params, std::cout); +} } diff --git a/modules/highgui/src/cap_giganetix.cpp b/modules/highgui/src/cap_giganetix.cpp index ccbe7020f..e252a1643 100644 --- a/modules/highgui/src/cap_giganetix.cpp +++ b/modules/highgui/src/cap_giganetix.cpp @@ -711,13 +711,13 @@ CvCaptureCAM_Giganetix::setProperty( int property_id, double value ) INT64 w, wmax, val = (INT64)value; if((b_ret = m_device->GetIntegerNodeValue ("Width", w))) if((b_ret = m_device->GetIntegerNodeValue ("WidthMax", wmax))) - b_ret = m_device->SetIntegerNodeValue ("OffsetX", val w > wmax ? wmax - w : val); + b_ret = m_device->SetIntegerNodeValue ("OffsetX", (val + w) > wmax ? (wmax - w) : val); } break; case CV_CAP_PROP_GIGA_FRAME_OFFSET_Y: { INT64 h, hmax, val = (INT64)value; if((b_ret = m_device->GetIntegerNodeValue ("Height", h))) if((b_ret = m_device->GetIntegerNodeValue ("HeightMax", hmax))) - b_ret = m_device->SetIntegerNodeValue ("OffsetY", val h > hmax ? hmax - h : val); + b_ret = m_device->SetIntegerNodeValue ("OffsetY", (val + h) > hmax ? (hmax - h) : val); b_ret = m_device->SetIntegerNodeValue ("OffsetY", (INT64)value); } break; diff --git a/modules/highgui/src/cap_gstreamer.cpp b/modules/highgui/src/cap_gstreamer.cpp index 2aec14c4a..00562aa41 100644 --- a/modules/highgui/src/cap_gstreamer.cpp +++ b/modules/highgui/src/cap_gstreamer.cpp @@ -735,6 +735,7 @@ bool CvCapture_GStreamer::open( int type, const char* filename ) #if GST_VERSION_MAJOR == 0 caps = gst_caps_new_simple("video/x-raw-rgb", + "bpp", G_TYPE_INT, 24, "red_mask", G_TYPE_INT, 0x0000FF, "green_mask", G_TYPE_INT, 0x00FF00, "blue_mask", G_TYPE_INT, 0xFF0000, diff --git a/modules/highgui/src/window_w32.cpp b/modules/highgui/src/window_w32.cpp index 0c397fd14..8b317dff7 100644 --- a/modules/highgui/src/window_w32.cpp +++ b/modules/highgui/src/window_w32.cpp @@ -1472,8 +1472,6 @@ static LRESULT CALLBACK HighGUIProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM if( window->on_mouse ) { POINT pt; - RECT rect; - SIZE size = {0,0}; int flags = (wParam & MK_LBUTTON ? CV_EVENT_FLAG_LBUTTON : 0)| (wParam & MK_RBUTTON ? CV_EVENT_FLAG_RBUTTON : 0)| @@ -1499,12 +1497,23 @@ static LRESULT CALLBACK HighGUIProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM pt.x = GET_X_LPARAM( lParam ); pt.y = GET_Y_LPARAM( lParam ); - GetClientRect( window->hwnd, &rect ); - icvGetBitmapData( window, &size, 0, 0 ); + if (window->flags & CV_WINDOW_AUTOSIZE) + { + // As user can't change window size, do not scale window coordinates. Underlying windowing system + // may prevent full window from being displayed and in this case coordinates should not be scaled. + window->on_mouse( event, pt.x, pt.y, flags, window->on_mouse_param ); + } else { + // Full window is displayed using different size. Scale coordinates to match underlying positions. + RECT rect; + SIZE size = {0, 0}; - window->on_mouse( event, pt.x*size.cx/MAX(rect.right - rect.left,1), - pt.y*size.cy/MAX(rect.bottom - rect.top,1), flags, - window->on_mouse_param ); + GetClientRect( window->hwnd, &rect ); + icvGetBitmapData( window, &size, 0, 0 ); + + window->on_mouse( event, pt.x*size.cx/MAX(rect.right - rect.left,1), + pt.y*size.cy/MAX(rect.bottom - rect.top,1), flags, + window->on_mouse_param ); + } } break; diff --git a/modules/imgproc/doc/geometric_transformations.rst b/modules/imgproc/doc/geometric_transformations.rst index c94178f6c..b65b76cee 100644 --- a/modules/imgproc/doc/geometric_transformations.rst +++ b/modules/imgproc/doc/geometric_transformations.rst @@ -249,6 +249,57 @@ The function computes an inverse affine transformation represented by The result is also a :math:`2 \times 3` matrix of the same type as ``M`` . +LinearPolar +----------- +Remaps an image to polar space. + +.. ocv:cfunction:: void cvLinearPolar( const CvArr* src, CvArr* dst, CvPoint2D32f center, double maxRadius, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS ) + + :param src: Source image + + :param dst: Destination image + + :param center: The transformation center; + + :param maxRadius: Inverse magnitude scale parameter. See below + + :param flags: A combination of interpolation methods and the following optional flags: + + * **CV_WARP_FILL_OUTLIERS** fills all of the destination image pixels. If some of them correspond to outliers in the source image, they are set to zero + + * **CV_WARP_INVERSE_MAP** See below + +The function ``cvLinearPolar`` transforms the source image using the following transformation: + + * + Forward transformation (``CV_WARP_INVERSE_MAP`` is not set): + + .. math:: + + dst( \phi , \rho ) = src(x,y) + + + * + Inverse transformation (``CV_WARP_INVERSE_MAP`` is set): + + .. math:: + + dst(x,y) = src( \phi , \rho ) + + +where + + .. math:: + + \rho = (src.width/maxRadius) \cdot \sqrt{x^2 + y^2} , \phi =atan(y/x) + + +The function can not operate in-place. + +.. note:: + + * An example using the LinearPolar operation can be found at opencv_source_code/samples/c/polar_transforms.c + LogPolar diff --git a/modules/imgproc/doc/miscellaneous_transformations.rst b/modules/imgproc/doc/miscellaneous_transformations.rst index c8f7d1579..b8e9061dc 100644 --- a/modules/imgproc/doc/miscellaneous_transformations.rst +++ b/modules/imgproc/doc/miscellaneous_transformations.rst @@ -500,7 +500,7 @@ Fills a connected component with the given color. :param image: Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the function unless the ``FLOODFILL_MASK_ONLY`` flag is set in the second variant of the function. See the details below. - :param mask: (For the second function only) Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function uses and updates the mask, so you take responsibility of initializing the ``mask`` content. Flood-filling cannot go across non-zero pixels in the mask. For example, an edge detector output can be used as a mask to stop filling at edges. It is possible to use the same mask in multiple calls to the function to make sure the filled area does not overlap. + :param mask: Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels taller than ``image``. Since this is both an input and output parameter, you must take responsibility of initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example, an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the mask corresponding to filled pixels in the image are set to 1 or to the a value specified in ``flags`` as described below. It is therefore possible to use the same mask in multiple calls to the function to make sure the filled areas do not overlap. .. note:: Since the mask is larger than the filled image, a pixel :math:`(x, y)` in ``image`` corresponds to the pixel :math:`(x+1, y+1)` in the ``mask`` . @@ -514,11 +514,11 @@ Fills a connected component with the given color. :param rect: Optional output parameter set by the function to the minimum bounding rectangle of the repainted domain. - :param flags: Operation flags. Lower bits contain a connectivity value, 4 (default) or 8, used within the function. Connectivity determines which neighbors of a pixel are considered. Upper bits can be 0 or a combination of the following flags: + :param flags: Operation flags. The first 8 bits contain a connectivity value. The default value of 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner) will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill the ``mask`` (the default value is 1). For example, ``4 | ( 255 << 8 )`` will consider 4 nearest neighbours and fill the mask with a value of 255. The following additional options occupy higher bits and therefore may be further combined with the connectivity and mask fill values using bit-wise or (``|``): * **FLOODFILL_FIXED_RANGE** If set, the difference between the current pixel and seed pixel is considered. Otherwise, the difference between neighbor pixels is considered (that is, the range is floating). - * **FLOODFILL_MASK_ONLY** If set, the function does not change the image ( ``newVal`` is ignored), but fills the mask with the value in bits 8-16 of ``flags`` (that is, the fill value is set to newValue by adding (newValue << 8) to the ``flags``). The flag can be used for the second variant only. + * **FLOODFILL_MASK_ONLY** If set, the function does not change the image ( ``newVal`` is ignored), and only fills the mask with the value specified in bits 8-16 of ``flags`` as described above. This option only make sense in function variants that have the ``mask`` parameter. The functions ``floodFill`` fill a connected component starting from the seed point with the specified color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The pixel at :math:`(x,y)` is considered to belong to the repainted domain if: diff --git a/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst b/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst index 9dd1d9f2f..9c5a89c75 100644 --- a/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst +++ b/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst @@ -159,7 +159,7 @@ Finds contours in a binary image. .. ocv:cfunction:: int cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** first_contour, int header_size=sizeof(CvContour), int mode=CV_RETR_LIST, int method=CV_CHAIN_APPROX_SIMPLE, CvPoint offset=cvPoint(0,0) ) - :param image: Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as ``binary`` . You can use :ocv:func:`compare` , :ocv:func:`inRange` , :ocv:func:`threshold` , :ocv:func:`adaptiveThreshold` , :ocv:func:`Canny` , and others to create a binary image out of a grayscale or color one. The function modifies the ``image`` while extracting the contours. + :param image: Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as ``binary`` . You can use :ocv:func:`compare` , :ocv:func:`inRange` , :ocv:func:`threshold` , :ocv:func:`adaptiveThreshold` , :ocv:func:`Canny` , and others to create a binary image out of a grayscale or color one. The function modifies the ``image`` while extracting the contours. If mode equals to ``CV_RETR_CCOMP`` or ``CV_RETR_FLOODFILL``, the input can also be a 32-bit integer image of labels (``CV_32SC1``). :param contours: Detected contours. Each contour is stored as a vector of points. diff --git a/modules/imgproc/src/filter.cpp b/modules/imgproc/src/filter.cpp index ed177583d..d1af4065f 100644 --- a/modules/imgproc/src/filter.cpp +++ b/modules/imgproc/src/filter.cpp @@ -410,8 +410,11 @@ void FilterEngine::apply(const Mat& src, Mat& dst, dstOfs.y + srcRoi.height <= dst.rows ); int y = start(src, srcRoi, isolated); - proceed( src.data + y*src.step, (int)src.step, endY - startY, - dst.data + dstOfs.y*dst.step + dstOfs.x*dst.elemSize(), (int)dst.step ); + proceed( src.data + y*src.step + + srcRoi.x*src.elemSize(), + (int)src.step, endY - startY, + dst.data + dstOfs.y*dst.step + + dstOfs.x*dst.elemSize(), (int)dst.step ); } } diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index ace0deb07..bfbbcf728 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -1154,7 +1154,7 @@ void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize, Size size = _src.size(); _dst.create( size, type ); - if( borderType != BORDER_CONSTANT ) + if( borderType != BORDER_CONSTANT && (borderType & BORDER_ISOLATED) != 0 ) { if( size.height == 1 ) ksize.height = 1; diff --git a/modules/ml/src/tree.cpp b/modules/ml/src/tree.cpp index 3a67cdd37..41d2553a4 100644 --- a/modules/ml/src/tree.cpp +++ b/modules/ml/src/tree.cpp @@ -1445,8 +1445,6 @@ void CvDTreeTrainData::read_params( CvFileStorage* fs, CvFileNode* node ) var_type->data.i[var_count] = cat_var_count; ord_var_count = ~ord_var_count; - if( cat_var_count != cat_var_count || ord_var_count != ord_var_count ) - CV_ERROR( CV_StsParseError, "var_type is inconsistent with cat_var_count and ord_var_count" ); ////// if( cat_var_count > 0 || is_classifier ) diff --git a/modules/nonfree/doc/feature_detection.rst b/modules/nonfree/doc/feature_detection.rst index 22c1d97a9..b61b85cd8 100644 --- a/modules/nonfree/doc/feature_detection.rst +++ b/modules/nonfree/doc/feature_detection.rst @@ -121,6 +121,8 @@ Detects keypoints and computes SURF descriptors for them. .. ocv:pyfunction:: cv2.SURF.compute(image, keypoints[, descriptors]) -> keypoints, descriptors .. ocv:pyfunction:: cv2.SURF.detectAndCompute(image, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors +.. ocv:pyfunction:: cv2.SURF.detectAndCompute(image[, mask]) -> keypoints, descriptors + .. ocv:cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params ) :param image: Input 8-bit grayscale image diff --git a/modules/video/src/bgfg_gaussmix2.cpp b/modules/video/src/bgfg_gaussmix2.cpp index 098310f6b..a5c48cb1f 100644 --- a/modules/video/src/bgfg_gaussmix2.cpp +++ b/modules/video/src/bgfg_gaussmix2.cpp @@ -861,54 +861,48 @@ void BackgroundSubtractorMOG2Impl::getBackgroundImage(OutputArray backgroundImag } int nchannels = CV_MAT_CN(frameType); - CV_Assert( nchannels == 3 ); - Mat meanBackground(frameSize, CV_8UC3, Scalar::all(0)); - + CV_Assert(nchannels == 1 || nchannels == 3); + Mat meanBackground(frameSize, CV_MAKETYPE(CV_8U, nchannels), Scalar::all(0)); int firstGaussianIdx = 0; const GMM* gmm = (GMM*)bgmodel.data; - const Vec3f* mean = reinterpret_cast(gmm + frameSize.width*frameSize.height*nmixtures); + const float* mean = reinterpret_cast(gmm + frameSize.width*frameSize.height*nmixtures); + std::vector meanVal(nchannels, 0.f); for(int row=0; row(row, col); - Vec3f meanVal; float totalWeight = 0.f; for(int gaussianIdx = firstGaussianIdx; gaussianIdx < firstGaussianIdx + nmodes; gaussianIdx++) { GMM gaussian = gmm[gaussianIdx]; - meanVal += gaussian.weight * mean[gaussianIdx]; + size_t meanPosition = gaussianIdx*nchannels; + for(int chn = 0; chn < nchannels; chn++) + { + meanVal[chn] += gaussian.weight * mean[meanPosition + chn]; + } totalWeight += gaussian.weight; if(totalWeight > backgroundRatio) break; } - - meanVal *= (1.f / totalWeight); - meanBackground.at(row, col) = Vec3b(meanVal); + float invWeight = 1.f/totalWeight; + switch(nchannels) + { + case 1: + meanBackground.at(row, col) = (uchar)(meanVal[0] * invWeight); + meanVal[0] = 0.f; + break; + case 3: + Vec3f& meanVec = *reinterpret_cast(&meanVal[0]); + meanBackground.at(row, col) = Vec3b(meanVec * invWeight); + meanVec = 0.f; + break; + } firstGaussianIdx += nmixtures; } } - - switch(CV_MAT_CN(frameType)) - { - case 1: - { - std::vector channels; - split(meanBackground, channels); - channels[0].copyTo(backgroundImage); - break; - } - - case 3: - { - meanBackground.copyTo(backgroundImage); - break; - } - - default: - CV_Error(Error::StsUnsupportedFormat, ""); - } + meanBackground.copyTo(backgroundImage); } Ptr createBackgroundSubtractorMOG2(int _history, double _varThreshold, diff --git a/samples/cpp/kmeans.cpp b/samples/cpp/kmeans.cpp index 5a5140280..1737883be 100644 --- a/samples/cpp/kmeans.cpp +++ b/samples/cpp/kmeans.cpp @@ -33,7 +33,7 @@ int main( int /*argc*/, char** /*argv*/ ) { int k, clusterCount = rng.uniform(2, MAX_CLUSTERS+1); int i, sampleCount = rng.uniform(1, 1001); - Mat points(sampleCount, 2, CV_32F), labels; + Mat points(sampleCount, 1, CV_32FC2), labels; clusterCount = MIN(clusterCount, sampleCount); Mat centers;