diff --git a/CMakeLists.txt b/CMakeLists.txt index efb9e12db..45bc8f218 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -135,7 +135,7 @@ OCV_OPTION(WITH_TBB "Include Intel TBB support" OFF OCV_OPTION(WITH_CSTRIPES "Include C= support" OFF IF WIN32 ) OCV_OPTION(WITH_TIFF "Include TIFF support" ON IF (NOT IOS) ) OCV_OPTION(WITH_UNICAP "Include Unicap support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) ) -OCV_OPTION(WITH_V4L "Include Video 4 Linux support" ON IF (UNIX AND NOT APPLE AND NOT ANDROID) ) +OCV_OPTION(WITH_V4L "Include Video 4 Linux support" ON IF (UNIX AND NOT ANDROID) ) OCV_OPTION(WITH_VIDEOINPUT "Build HighGUI with DirectShow support" ON IF WIN32 ) OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF IF (NOT ANDROID AND NOT APPLE) ) OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) ) @@ -148,6 +148,7 @@ OCV_OPTION(WITH_OPENCLAMDBLAS "Include AMD OpenCL BLAS library support" OFF # OpenCV build components # =================================================== OCV_OPTION(BUILD_SHARED_LIBS "Build shared libraries (.dll/.so) instead of static ones (.lib/.a)" NOT (ANDROID OR IOS) ) +OCV_OPTION(BUILD_opencv_apps "Build utility applications (used for example to train classifiers)" (NOT ANDROID) IF (NOT IOS) ) OCV_OPTION(BUILD_ANDROID_EXAMPLES "Build examples for Android platform" ON IF ANDROID ) OCV_OPTION(BUILD_DOCS "Create build rules for OpenCV Documentation" ON ) OCV_OPTION(BUILD_EXAMPLES "Build all examples" OFF ) @@ -452,7 +453,9 @@ add_subdirectory(doc) add_subdirectory(data) # extra applications -add_subdirectory(apps) +if(BUILD_opencv_apps) + add_subdirectory(apps) +endif() # examples if(BUILD_EXAMPLES OR BUILD_ANDROID_EXAMPLES OR INSTALL_PYTHON_EXAMPLES) @@ -720,11 +723,13 @@ if(DEFINED WITH_V4L) endif() if(HAVE_CAMV4L2) set(HAVE_CAMV4L2_STR "YES") + elseif(HAVE_VIDEOIO) + set(HAVE_CAMV4L2_STR "YES(videoio)") else() set(HAVE_CAMV4L2_STR "NO") endif() status(" V4L/V4L2:" HAVE_LIBV4L THEN "Using libv4l (ver ${ALIASOF_libv4l1_VERSION})" - ELSE "${HAVE_CAMV4L_STR}/${HAVE_CAMV4L2_STR}") + ELSE "${HAVE_CAMV4L_STR}/${HAVE_CAMV4L2_STR}") endif(DEFINED WITH_V4L) if(DEFINED WITH_VIDEOINPUT) diff --git a/apps/haartraining/CMakeLists.txt b/apps/haartraining/CMakeLists.txt index 22349ed6b..953be3b7e 100644 --- a/apps/haartraining/CMakeLists.txt +++ b/apps/haartraining/CMakeLists.txt @@ -1,7 +1,3 @@ -if(IOS OR ANDROID) - return() -endif() - SET(OPENCV_HAARTRAINING_DEPS opencv_core opencv_imgproc opencv_highgui opencv_objdetect opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy) ocv_check_dependencies(${OPENCV_HAARTRAINING_DEPS}) diff --git a/apps/traincascade/CMakeLists.txt b/apps/traincascade/CMakeLists.txt index b92beb69b..f6fa679a9 100644 --- a/apps/traincascade/CMakeLists.txt +++ b/apps/traincascade/CMakeLists.txt @@ -1,7 +1,3 @@ -if(IOS OR ANDROID) - return() -endif() - set(OPENCV_TRAINCASCADE_DEPS opencv_core opencv_ml opencv_imgproc opencv_objdetect opencv_highgui opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy) ocv_check_dependencies(${OPENCV_TRAINCASCADE_DEPS}) diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index 5eeff178f..6827fa975 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -72,11 +72,12 @@ if(WITH_XINE) endif(WITH_XINE) # --- V4L --- -ocv_clear_vars(HAVE_LIBV4L HAVE_CAMV4L HAVE_CAMV4L2) +ocv_clear_vars(HAVE_LIBV4L HAVE_CAMV4L HAVE_CAMV4L2 HAVE_VIDEOIO) if(WITH_V4L) CHECK_MODULE(libv4l1 HAVE_LIBV4L) CHECK_INCLUDE_FILE(linux/videodev.h HAVE_CAMV4L) CHECK_INCLUDE_FILE(linux/videodev2.h HAVE_CAMV4L2) + CHECK_INCLUDE_FILE(sys/videoio.h HAVE_VIDEOIO) endif(WITH_V4L) # --- OpenNI --- diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake index 879d33253..f40cc6d19 100644 --- a/cmake/OpenCVUtils.cmake +++ b/cmake/OpenCVUtils.cmake @@ -64,6 +64,13 @@ MACRO(ocv_check_compiler_flag LANG FLAG RESULT) else() FILE(WRITE "${_fname}" "#pragma\nint main(void) { return 0; }\n") endif() + elseif("_${LANG}_" MATCHES "_OBJCXX_") + set(_fname "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/src.mm") + if("${CMAKE_CXX_FLAGS} ${FLAG} " MATCHES "-Werror " OR "${CMAKE_CXX_FLAGS} ${FLAG} " MATCHES "-Werror=unknown-pragmas ") + FILE(WRITE "${_fname}" "int main() { return 0; }\n") + else() + FILE(WRITE "${_fname}" "#pragma\nint main() { return 0; }\n") + endif() else() unset(_fname) endif() @@ -100,6 +107,8 @@ macro(ocv_check_flag_support lang flag varname) set(_lang CXX) elseif("_${lang}_" MATCHES "_C_") set(_lang C) + elseif("_${lang}_" MATCHES "_OBJCXX_") + set(_lang OBJCXX) else() set(_lang ${lang}) endif() diff --git a/cmake/templates/cvconfig.h.cmake b/cmake/templates/cvconfig.h.cmake index 6d4d2184f..62c8b68b3 100644 --- a/cmake/templates/cvconfig.h.cmake +++ b/cmake/templates/cvconfig.h.cmake @@ -19,6 +19,9 @@ /* V4L2 capturing support */ #cmakedefine HAVE_CAMV4L2 +/* V4L2 capturing support in videoio.h */ +#cmakedefine HAVE_VIDEOIO + /* V4L/V4L2 capturing support via libv4l */ #cmakedefine HAVE_LIBV4L diff --git a/doc/tutorials/features2d/feature_description/feature_description.rst b/doc/tutorials/features2d/feature_description/feature_description.rst index 9ba777500..fe9b7cdbf 100644 --- a/doc/tutorials/features2d/feature_description/feature_description.rst +++ b/doc/tutorials/features2d/feature_description/feature_description.rst @@ -32,6 +32,7 @@ This tutorial code's is shown lines below. You can also download it from `here < #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" + #include "opencv2/nonfree/features2d.hpp" using namespace cv; diff --git a/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst b/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst index 5dabe6004..465ff216c 100644 --- a/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst +++ b/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst @@ -22,127 +22,8 @@ Code This tutorial code's is shown lines below. You can also download it from `here `_ -.. code-block:: cpp - - #include "opencv2/highgui/highgui.hpp" - #include "opencv2/imgproc/imgproc.hpp" - #include - #include - #include - - using namespace cv; - using namespace std; - - /// Global variables - Mat src, src_gray; - Mat myHarris_dst; Mat myHarris_copy; Mat Mc; - Mat myShiTomasi_dst; Mat myShiTomasi_copy; - - int myShiTomasi_qualityLevel = 50; - int myHarris_qualityLevel = 50; - int max_qualityLevel = 100; - - double myHarris_minVal; double myHarris_maxVal; - double myShiTomasi_minVal; double myShiTomasi_maxVal; - - RNG rng(12345); - - char* myHarris_window = "My Harris corner detector"; - char* myShiTomasi_window = "My Shi Tomasi corner detector"; - - /// Function headers - void myShiTomasi_function( int, void* ); - void myHarris_function( int, void* ); - - /** @function main */ - int main( int argc, char** argv ) - { - /// Load source image and convert it to gray - src = imread( argv[1], 1 ); - cvtColor( src, src_gray, CV_BGR2GRAY ); - - /// Set some parameters - int blockSize = 3; int apertureSize = 3; - - /// My Harris matrix -- Using cornerEigenValsAndVecs - myHarris_dst = Mat::zeros( src_gray.size(), CV_32FC(6) ); - Mc = Mat::zeros( src_gray.size(), CV_32FC1 ); - - cornerEigenValsAndVecs( src_gray, myHarris_dst, blockSize, apertureSize, BORDER_DEFAULT ); - - /* calculate Mc */ - for( int j = 0; j < src_gray.rows; j++ ) - { for( int i = 0; i < src_gray.cols; i++ ) - { - float lambda_1 = myHarris_dst.at( j, i, 0 ); - float lambda_2 = myHarris_dst.at( j, i, 1 ); - Mc.at(j,i) = lambda_1*lambda_2 - 0.04*pow( ( lambda_1 + lambda_2 ), 2 ); - } - } - - minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal, 0, 0, Mat() ); - - /* Create Window and Trackbar */ - namedWindow( myHarris_window, CV_WINDOW_AUTOSIZE ); - createTrackbar( " Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, - myHarris_function ); - myHarris_function( 0, 0 ); - - /// My Shi-Tomasi -- Using cornerMinEigenVal - myShiTomasi_dst = Mat::zeros( src_gray.size(), CV_32FC1 ); - cornerMinEigenVal( src_gray, myShiTomasi_dst, blockSize, apertureSize, BORDER_DEFAULT ); - - minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal, 0, 0, Mat() ); - - /* Create Window and Trackbar */ - namedWindow( myShiTomasi_window, CV_WINDOW_AUTOSIZE ); - createTrackbar( " Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, - myShiTomasi_function ); - myShiTomasi_function( 0, 0 ); - - waitKey(0); - return(0); - } - - /** @function myShiTomasi_function */ - void myShiTomasi_function( int, void* ) - { - myShiTomasi_copy = src.clone(); - - if( myShiTomasi_qualityLevel < 1 ) { myShiTomasi_qualityLevel = 1; } - - for( int j = 0; j < src_gray.rows; j++ ) - { for( int i = 0; i < src_gray.cols; i++ ) - { - if( myShiTomasi_dst.at(j,i) > myShiTomasi_minVal + ( myShiTomasi_maxVal - - myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel ) - { circle( myShiTomasi_copy, Point(i,j), 4, Scalar( rng.uniform(0,255), - rng.uniform(0,255), rng.uniform(0,255) ), -1, 8, 0 ); } - } - } - imshow( myShiTomasi_window, myShiTomasi_copy ); - } - - /** @function myHarris_function */ - void myHarris_function( int, void* ) - { - myHarris_copy = src.clone(); - - if( myHarris_qualityLevel < 1 ) { myHarris_qualityLevel = 1; } - - for( int j = 0; j < src_gray.rows; j++ ) - { for( int i = 0; i < src_gray.cols; i++ ) - { - if( Mc.at(j,i) > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal ) - *myHarris_qualityLevel/max_qualityLevel ) - { circle( myHarris_copy, Point(i,j), 4, Scalar( rng.uniform(0,255), rng.uniform(0,255), - rng.uniform(0,255) ), -1, 8, 0 ); } - } - } - imshow( myHarris_window, myHarris_copy ); - } - - +.. literalinclude:: ../../../../../samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp + :language: cpp Explanation ============ diff --git a/doc/tutorials/introduction/load_save_image/load_save_image.rst b/doc/tutorials/introduction/load_save_image/load_save_image.rst index 7dfd8c9d8..50fb9ea37 100644 --- a/doc/tutorials/introduction/load_save_image/load_save_image.rst +++ b/doc/tutorials/introduction/load_save_image/load_save_image.rst @@ -15,7 +15,7 @@ In this tutorial you will learn how to: .. container:: enumeratevisibleitemswithsquare * Load an image using :imread:`imread <>` - * Transform an image from RGB to Grayscale format by using :cvt_color:`cvtColor <>` + * Transform an image from BGR to Grayscale format by using :cvt_color:`cvtColor <>` * Save your transformed image in a file on disk (using :imwrite:`imwrite <>`) Code @@ -45,7 +45,7 @@ Here it is: } Mat gray_image; - cvtColor( image, gray_image, CV_RGB2GRAY ); + cvtColor( image, gray_image, CV_BGR2GRAY ); imwrite( "../../images/Gray_Image.jpg", gray_image ); @@ -68,11 +68,11 @@ Explanation * Creating a Mat object to store the image information * Load an image using :imread:`imread <>`, located in the path given by *imageName*. Fort this example, assume you are loading a RGB image. -#. Now we are going to convert our image from RGB to Grayscale format. OpenCV has a really nice function to do this kind of transformations: +#. Now we are going to convert our image from BGR to Grayscale format. OpenCV has a really nice function to do this kind of transformations: .. code-block:: cpp - cvtColor( image, gray_image, CV_RGB2GRAY ); + cvtColor( image, gray_image, CV_BGR2GRAY ); As you can see, :cvt_color:`cvtColor <>` takes as arguments: @@ -80,7 +80,7 @@ Explanation * a source image (*image*) * a destination image (*gray_image*), in which we will save the converted image. - * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_RGB2GRAY** (self-explanatory). + * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :imread:`imread <>` has BGR default channel order in case of color images). #. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :imread:`imread <>`: :imwrite:`imwrite <>` diff --git a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst index 6478bae50..0d9502563 100644 --- a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst @@ -1319,7 +1319,7 @@ StereoSGBM::StereoSGBM :param speckleWindowSize: Maximum size of smooth disparity regions to consider their noise speckles and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range. - :param speckleRange: Maximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value, multiple of 16. Normally, 16 or 32 is good enough. + :param speckleRange: Maximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value, it will be implicitly multiplied by 16. Normally, 1 or 2 is good enough. :param fullDP: Set it to ``true`` to run the full-scale two-pass dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes, which is large for 640x480 stereo and huge for HD-size pictures. By default, it is set to ``false`` . diff --git a/modules/calib3d/src/_modelest.h b/modules/calib3d/src/_modelest.h index 2a94add86..a467e1cc0 100644 --- a/modules/calib3d/src/_modelest.h +++ b/modules/calib3d/src/_modelest.h @@ -43,9 +43,9 @@ #ifndef _CV_MODEL_EST_H_ #define _CV_MODEL_EST_H_ -#include "precomp.hpp" +#include "opencv2/calib3d/calib3d.hpp" -class CvModelEstimator2 +class CV_EXPORTS CvModelEstimator2 { public: CvModelEstimator2(int _modelPoints, CvSize _modelSize, int _maxBasicSolutions); diff --git a/modules/calib3d/src/calibration.cpp b/modules/calib3d/src/calibration.cpp index 21b702532..a63448506 100644 --- a/modules/calib3d/src/calibration.cpp +++ b/modules/calib3d/src/calibration.cpp @@ -3735,13 +3735,13 @@ float cv::rectify3Collinear( InputArray _cameraMatrix1, InputArray _distCoeffs1, OutputArray _Rmat1, OutputArray _Rmat2, OutputArray _Rmat3, OutputArray _Pmat1, OutputArray _Pmat2, OutputArray _Pmat3, OutputArray _Qmat, - double alpha, Size /*newImgSize*/, + double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags ) { // first, rectify the 1-2 stereo pair stereoRectify( _cameraMatrix1, _distCoeffs1, _cameraMatrix2, _distCoeffs2, imageSize, _Rmat12, _Tmat12, _Rmat1, _Rmat2, _Pmat1, _Pmat2, _Qmat, - flags, alpha, imageSize, roi1, roi2 ); + flags, alpha, newImgSize, roi1, roi2 ); Mat R12 = _Rmat12.getMat(), R13 = _Rmat13.getMat(), T12 = _Tmat12.getMat(), T13 = _Tmat13.getMat(); diff --git a/modules/calib3d/src/modelest.cpp b/modules/calib3d/src/modelest.cpp index 4aec4b6c5..8cd504979 100644 --- a/modules/calib3d/src/modelest.cpp +++ b/modules/calib3d/src/modelest.cpp @@ -331,6 +331,9 @@ bool CvModelEstimator2::getSubset( const CvMat* m1, const CvMat* m2, bool CvModelEstimator2::checkSubset( const CvMat* m, int count ) { + if( count <= 2 ) + return true; + int j, k, i, i0, i1; CvPoint2D64f* ptr = (CvPoint2D64f*)m->data.ptr; @@ -363,7 +366,7 @@ bool CvModelEstimator2::checkSubset( const CvMat* m, int count ) break; } - return i >= i1; + return i > i1; } diff --git a/modules/calib3d/test/test_modelest.cpp b/modules/calib3d/test/test_modelest.cpp new file mode 100644 index 000000000..e27c12d49 --- /dev/null +++ b/modules/calib3d/test/test_modelest.cpp @@ -0,0 +1,226 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "test_precomp.hpp" +#include "_modelest.h" + +using namespace cv; + +class BareModelEstimator : public CvModelEstimator2 +{ +public: + BareModelEstimator(int modelPoints, CvSize modelSize, int maxBasicSolutions); + + virtual int runKernel( const CvMat*, const CvMat*, CvMat* ); + virtual void computeReprojError( const CvMat*, const CvMat*, + const CvMat*, CvMat* ); + + bool checkSubsetPublic( const CvMat* ms1, int count, bool checkPartialSubset ); +}; + +BareModelEstimator::BareModelEstimator(int _modelPoints, CvSize _modelSize, int _maxBasicSolutions) + :CvModelEstimator2(_modelPoints, _modelSize, _maxBasicSolutions) +{ +} + +int BareModelEstimator::runKernel( const CvMat*, const CvMat*, CvMat* ) +{ + return 0; +} + +void BareModelEstimator::computeReprojError( const CvMat*, const CvMat*, + const CvMat*, CvMat* ) +{ +} + +bool BareModelEstimator::checkSubsetPublic( const CvMat* ms1, int count, bool checkPartialSubset ) +{ + checkPartialSubsets = checkPartialSubset; + return checkSubset(ms1, count); +} + +class CV_ModelEstimator2_Test : public cvtest::ArrayTest +{ +public: + CV_ModelEstimator2_Test(); + +protected: + void get_test_array_types_and_sizes( int test_case_idx, vector >& sizes, vector >& types ); + void fill_array( int test_case_idx, int i, int j, Mat& arr ); + double get_success_error_level( int test_case_idx, int i, int j ); + void run_func(); + void prepare_to_validation( int test_case_idx ); + + bool checkPartialSubsets; + int usedPointsCount; + + bool checkSubsetResult; + int generalPositionsCount; + int maxPointsCount; +}; + +CV_ModelEstimator2_Test::CV_ModelEstimator2_Test() +{ + generalPositionsCount = get_test_case_count() / 2; + maxPointsCount = 100; + + test_array[INPUT].push_back(NULL); + test_array[OUTPUT].push_back(NULL); + test_array[REF_OUTPUT].push_back(NULL); +} + +void CV_ModelEstimator2_Test::get_test_array_types_and_sizes( int /*test_case_idx*/, + vector > &sizes, vector > &types ) +{ + RNG &rng = ts->get_rng(); + checkPartialSubsets = (cvtest::randInt(rng) % 2 == 0); + + int pointsCount = cvtest::randInt(rng) % maxPointsCount; + usedPointsCount = pointsCount == 0 ? 0 : cvtest::randInt(rng) % pointsCount; + + sizes[INPUT][0] = cvSize(1, pointsCount); + types[INPUT][0] = CV_64FC2; + + sizes[OUTPUT][0] = sizes[REF_OUTPUT][0] = cvSize(1, 1); + types[OUTPUT][0] = types[REF_OUTPUT][0] = CV_8UC1; +} + +void CV_ModelEstimator2_Test::fill_array( int test_case_idx, int i, int j, Mat& arr ) +{ + if( i != INPUT ) + { + cvtest::ArrayTest::fill_array( test_case_idx, i, j, arr ); + return; + } + + if (test_case_idx < generalPositionsCount) + { + //generate points in a general position (i.e. no three points can lie on the same line.) + + bool isGeneralPosition; + do + { + ArrayTest::fill_array(test_case_idx, i, j, arr); + + //a simple check that the position is general: + // for each line check that all other points don't belong to it + isGeneralPosition = true; + for (int startPointIndex = 0; startPointIndex < usedPointsCount && isGeneralPosition; startPointIndex++) + { + for (int endPointIndex = startPointIndex + 1; endPointIndex < usedPointsCount && isGeneralPosition; endPointIndex++) + { + + for (int testPointIndex = 0; testPointIndex < usedPointsCount && isGeneralPosition; testPointIndex++) + { + if (testPointIndex == startPointIndex || testPointIndex == endPointIndex) + { + continue; + } + + CV_Assert(arr.type() == CV_64FC2); + Point2d tangentVector_1 = arr.at(endPointIndex) - arr.at(startPointIndex); + Point2d tangentVector_2 = arr.at(testPointIndex) - arr.at(startPointIndex); + + const float eps = 1e-4f; + //TODO: perhaps it is better to normalize the cross product by norms of the tangent vectors + if (fabs(tangentVector_1.cross(tangentVector_2)) < eps) + { + isGeneralPosition = false; + } + } + } + } + } + while(!isGeneralPosition); + } + else + { + //create points in a degenerate position (there are at least 3 points belonging to the same line) + + ArrayTest::fill_array(test_case_idx, i, j, arr); + if (usedPointsCount <= 2) + { + return; + } + + RNG &rng = ts->get_rng(); + int startPointIndex, endPointIndex, modifiedPointIndex; + do + { + startPointIndex = cvtest::randInt(rng) % usedPointsCount; + endPointIndex = cvtest::randInt(rng) % usedPointsCount; + modifiedPointIndex = checkPartialSubsets ? usedPointsCount - 1 : cvtest::randInt(rng) % usedPointsCount; + } + while (startPointIndex == endPointIndex || startPointIndex == modifiedPointIndex || endPointIndex == modifiedPointIndex); + + double startWeight = cvtest::randReal(rng); + CV_Assert(arr.type() == CV_64FC2); + arr.at(modifiedPointIndex) = startWeight * arr.at(startPointIndex) + (1.0 - startWeight) * arr.at(endPointIndex); + } +} + + +double CV_ModelEstimator2_Test::get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ ) +{ + return 0; +} + +void CV_ModelEstimator2_Test::prepare_to_validation( int test_case_idx ) +{ + test_mat[OUTPUT][0].at(0) = checkSubsetResult; + test_mat[REF_OUTPUT][0].at(0) = test_case_idx < generalPositionsCount || usedPointsCount <= 2; +} + +void CV_ModelEstimator2_Test::run_func() +{ + //make the input continuous + Mat input = test_mat[INPUT][0].clone(); + CvMat _input = input; + + RNG &rng = ts->get_rng(); + int modelPoints = cvtest::randInt(rng); + CvSize modelSize = cvSize(2, modelPoints); + int maxBasicSolutions = cvtest::randInt(rng); + BareModelEstimator modelEstimator(modelPoints, modelSize, maxBasicSolutions); + checkSubsetResult = modelEstimator.checkSubsetPublic(&_input, usedPointsCount, checkPartialSubsets); +} + +TEST(Calib3d_ModelEstimator2, accuracy) { CV_ModelEstimator2_Test test; test.safe_run(); } diff --git a/modules/core/include/opencv2/core/operations.hpp b/modules/core/include/opencv2/core/operations.hpp index ad9921c17..0d0f7c732 100644 --- a/modules/core/include/opencv2/core/operations.hpp +++ b/modules/core/include/opencv2/core/operations.hpp @@ -716,12 +716,12 @@ template struct CV_EXPORTS Matx_DetOp double operator ()(const Matx<_Tp, m, m>& a) const { Matx<_Tp, m, m> temp = a; - double p = LU(temp.val, m, m, 0, 0, 0); + double p = LU(temp.val, m*sizeof(_Tp), m, 0, 0, 0); if( p == 0 ) return p; for( int i = 0; i < m; i++ ) p *= temp(i, i); - return p; + return 1./p; } }; diff --git a/modules/core/include/opencv2/core/types_c.h b/modules/core/include/opencv2/core/types_c.h index cbc7872e6..33e7fe993 100644 --- a/modules/core/include/opencv2/core/types_c.h +++ b/modules/core/include/opencv2/core/types_c.h @@ -342,9 +342,8 @@ CV_INLINE int cvFloor( double value ) return i - (i > value); #else int i = cvRound(value); - Cv32suf diff; - diff.f = (float)(value - i); - return i - (diff.i < 0); + float diff = (float)(value - i); + return i - (diff < 0); #endif } @@ -360,9 +359,8 @@ CV_INLINE int cvCeil( double value ) return i + (i < value); #else int i = cvRound(value); - Cv32suf diff; - diff.f = (float)(i - value); - return i + (diff.i < 0); + float diff = (float)(i - value); + return i + (diff < 0); #endif } @@ -371,31 +369,19 @@ CV_INLINE int cvCeil( double value ) CV_INLINE int cvIsNaN( double value ) { -#if 1/*defined _MSC_VER || defined __BORLANDC__ - return _isnan(value); -#elif defined __GNUC__ - return isnan(value); -#else*/ Cv64suf ieee754; ieee754.f = value; return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) + ((unsigned)ieee754.u != 0) > 0x7ff00000; -#endif } CV_INLINE int cvIsInf( double value ) { -#if 1/*defined _MSC_VER || defined __BORLANDC__ - return !_finite(value); -#elif defined __GNUC__ - return isinf(value); -#else*/ Cv64suf ieee754; ieee754.f = value; return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 && (unsigned)ieee754.u == 0; -#endif } diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp index ebee0def9..0feae2682 100644 --- a/modules/core/src/matrix.cpp +++ b/modules/core/src/matrix.cpp @@ -841,7 +841,8 @@ int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) con { return (depth() == _depth || _depth <= 0) && (isContinuous() || !_requireContinuous) && - ((dims == 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) || (cols == _elemChannels && channels() == 1))) || + ((dims == 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) || + (cols == _elemChannels && channels() == 1))) || (dims == 3 && channels() == 1 && size.p[2] == _elemChannels && (size.p[0] == 1 || size.p[1] == 1) && (isContinuous() || step.p[1] == step.p[2]*size.p[2]))) ? (int)(total()*channels()/_elemChannels) : -1; diff --git a/modules/core/test/test_operations.cpp b/modules/core/test/test_operations.cpp index ad201ea0c..6b36883cf 100644 --- a/modules/core/test/test_operations.cpp +++ b/modules/core/test/test_operations.cpp @@ -998,6 +998,23 @@ bool CV_OperationsTest::operations1() add(Mat::zeros(6, 1, CV_64F), 1, c, noArray(), c.type()); CV_Assert( norm(Matx61f(1.f, 1.f, 1.f, 1.f, 1.f, 1.f), c, CV_C) == 0 ); + + vector pt2d(3); + vector pt3d(2); + + CV_Assert( Mat(pt2d).checkVector(2) == 3 && Mat(pt2d).checkVector(3) < 0 && + Mat(pt3d).checkVector(2) < 0 && Mat(pt3d).checkVector(3) == 2 ); + + Matx44f m44(0.8147f, 0.6324f, 0.9575f, 0.9572f, + 0.9058f, 0.0975f, 0.9649f, 0.4854f, + 0.1270f, 0.2785f, 0.1576f, 0.8003f, + 0.9134f, 0.5469f, 0.9706f, 0.1419f); + double d = determinant(m44); + CV_Assert( fabs(d - (-0.0262)) <= 0.001 ); + + Cv32suf z; + z.i = 0x80000000; + CV_Assert( cvFloor(z.f) == 0 && cvCeil(z.f) == 0 && cvRound(z.f) == 0 ); } catch(const test_excep&) { diff --git a/modules/highgui/CMakeLists.txt b/modules/highgui/CMakeLists.txt index f9c03fca5..6e1c53cf9 100644 --- a/modules/highgui/CMakeLists.txt +++ b/modules/highgui/CMakeLists.txt @@ -131,7 +131,7 @@ endif(HAVE_UNICAP) if(HAVE_LIBV4L) list(APPEND highgui_srcs src/cap_libv4l.cpp) -elseif(HAVE_CAMV4L OR HAVE_CAMV4L2) +elseif(HAVE_CAMV4L OR HAVE_CAMV4L2 OR HAVE_VIDEOIO) list(APPEND highgui_srcs src/cap_v4l.cpp) endif() @@ -215,6 +215,24 @@ ocv_module_include_directories() ocv_create_module(${GRFMT_LIBS} ${HIGHGUI_LIBRARIES}) +if(APPLE) + ocv_check_flag_support(OBJCXX "-fobjc-exceptions" HAVE_OBJC_EXCEPTIONS) + if(HAVE_OBJC_EXCEPTIONS) + foreach(source ${OPENCV_MODULE_${the_module}_SOURCES}) + if("${source}" MATCHES "\\.mm$") + get_source_file_property(flags "${source}" COMPILE_FLAGS) + if(flags) + set(flags "${_flags} -fobjc-exceptions") + else() + set(flags "-fobjc-exceptions") + endif() + + set_source_files_properties("${source}" PROPERTIES COMPILE_FLAGS "${flags}") + endif() + endforeach() + endif() +endif() + if(BUILD_SHARED_LIBS) add_definitions(-DHIGHGUI_EXPORTS) endif() diff --git a/modules/highgui/src/cap.cpp b/modules/highgui/src/cap.cpp index 6150a8ef8..57a215660 100644 --- a/modules/highgui/src/cap.cpp +++ b/modules/highgui/src/cap.cpp @@ -169,7 +169,9 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) defined(HAVE_TYZX) || \ defined(HAVE_VFW) || \ defined(HAVE_LIBV4L) || \ - (defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2)) || \ + defined(HAVE_CAMV4L) || \ + defined(HAVE_CAMV4L2) || \ + defined(HAVE_VIDEOIO) || \ defined(HAVE_GSTREAMER) || \ defined(HAVE_DC1394_2) || \ defined(HAVE_DC1394) || \ @@ -212,7 +214,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) return capture; #endif -#if defined HAVE_LIBV4L || (defined (HAVE_CAMV4L) && defined (HAVE_CAMV4L2)) +#if defined HAVE_LIBV4L || defined HAVE_CAMV4L || defined HAVE_CAMV4L2 || defined HAVE_VIDEOIO capture = cvCreateCameraCapture_V4L (index); if (capture) return capture; diff --git a/modules/highgui/src/cap_gstreamer.cpp b/modules/highgui/src/cap_gstreamer.cpp index 863ddad09..cafc803db 100644 --- a/modules/highgui/src/cap_gstreamer.cpp +++ b/modules/highgui/src/cap_gstreamer.cpp @@ -461,12 +461,19 @@ protected: void CvVideoWriter_GStreamer::init() { - encs[CV_FOURCC('H','F','Y','U')]=(char*)"ffenc_huffyuv"; encs[CV_FOURCC('D','R','A','C')]=(char*)"diracenc"; - encs[CV_FOURCC('X','V','I','D')]=(char*)"xvidenc"; - encs[CV_FOURCC('X','2','6','4')]=(char*)"x264enc"; + encs[CV_FOURCC('H','F','Y','U')]=(char*)"ffenc_huffyuv"; + encs[CV_FOURCC('J','P','E','G')]=(char*)"jpegenc"; + encs[CV_FOURCC('M','J','P','G')]=(char*)"jpegenc"; encs[CV_FOURCC('M','P','1','V')]=(char*)"mpeg2enc"; - //encs[CV_FOURCC('M','P','2','V')]=(char*)"mpeg2enc"; + encs[CV_FOURCC('M','P','2','V')]=(char*)"mpeg2enc"; + encs[CV_FOURCC('T','H','E','O')]=(char*)"theoraenc"; + encs[CV_FOURCC('V','P','8','0')]=(char*)"vp8enc"; + encs[CV_FOURCC('H','2','6','4')]=(char*)"x264enc"; + encs[CV_FOURCC('X','2','6','4')]=(char*)"x264enc"; + encs[CV_FOURCC('X','V','I','D')]=(char*)"xvidenc"; + encs[CV_FOURCC('F','F','Y','U')]=(char*)"y4menc"; + //encs[CV_FOURCC('H','F','Y','U')]=(char*)"y4menc"; pipeline=0; buffer=0; } diff --git a/modules/highgui/src/cap_libv4l.cpp b/modules/highgui/src/cap_libv4l.cpp index 01b611c66..63a2ff96b 100644 --- a/modules/highgui/src/cap_libv4l.cpp +++ b/modules/highgui/src/cap_libv4l.cpp @@ -346,7 +346,11 @@ static int numCameras = 0; static int indexList = 0; // IOCTL handling for V4L2 +#ifdef HAVE_IOCTL_ULONG +static int xioctl( int fd, unsigned long request, void *arg) +#else static int xioctl( int fd, int request, void *arg) +#endif { int r; diff --git a/modules/highgui/src/cap_v4l.cpp b/modules/highgui/src/cap_v4l.cpp index 2ab4567d1..fdbd120fa 100644 --- a/modules/highgui/src/cap_v4l.cpp +++ b/modules/highgui/src/cap_v4l.cpp @@ -202,7 +202,7 @@ make & enjoy! #include "precomp.hpp" -#if !defined WIN32 && defined HAVE_CAMV4L && defined HAVE_CAMV4L2 +#if !defined WIN32 && (defined HAVE_CAMV4L || defined HAVE_CAMV4L2 || defined HAVE_VIDEOIO) #define CLEAR(x) memset (&(x), 0, sizeof (x)) @@ -214,19 +214,26 @@ make & enjoy! #include #include +#ifdef HAVE_CAMVAL #include +#endif #include #include -#include /* for videodev2.h */ #include #include #include #ifdef HAVE_CAMV4L2 +#include /* for videodev2.h */ #include #endif +#ifdef HAVE_VIDEOIO +#include +#define HAVE_CAMV4L2 +#endif + /* Defaults - If your board can do better, set it here. Set for the most common type inputs. */ #define DEFAULT_V4L_WIDTH 640 #define DEFAULT_V4L_HEIGHT 480 @@ -288,11 +295,13 @@ typedef struct CvCaptureCAM_V4L int deviceHandle; int bufferIndex; int FirstCapture; +#ifdef HAVE_CAMV4L struct video_capability capability; struct video_window captureWindow; struct video_picture imageProperties; struct video_mbuf memoryBuffer; struct video_mmap *mmaps; +#endif /* HAVE_CAMV4L */ char *memoryMap; IplImage frame; @@ -345,24 +354,6 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h); static int numCameras = 0; static int indexList = 0; -#ifdef HAVE_CAMV4L2 - -// IOCTL handling for V4L2 -static int xioctl( int fd, int request, void *arg) -{ - - int r; - - - do r = ioctl (fd, request, arg); - while (-1 == r && EINTR == errno); - - return r; - -} - -#endif /* HAVE_CAMV4L2 */ - /* Simple test program: Find number of Video Sources available. Start from 0 and go to MAX_CAMERAS while checking for the device with that name. If it fails on the first attempt of /dev/video0, then check if /dev/video is valid. @@ -393,6 +384,8 @@ static void icvInitCapture_V4L() { }; /* End icvInitCapture_V4L */ +#ifdef HAVE_CAMV4L + static int try_palette(int fd, struct video_picture *cam_pic, @@ -410,6 +403,8 @@ try_palette(int fd, return 0; } +#endif /* HAVE_CAMV4L */ + #ifdef HAVE_CAMV4L2 static int try_palette_v4l2(CvCaptureCAM_V4L* capture, unsigned long colorspace) @@ -422,7 +417,7 @@ static int try_palette_v4l2(CvCaptureCAM_V4L* capture, unsigned long colorspace) capture->form.fmt.pix.width = DEFAULT_V4L_WIDTH; capture->form.fmt.pix.height = DEFAULT_V4L_HEIGHT; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_S_FMT, &capture->form)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_S_FMT, &capture->form)) return -1; @@ -434,6 +429,8 @@ static int try_palette_v4l2(CvCaptureCAM_V4L* capture, unsigned long colorspace) #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L + static int try_init_v4l(CvCaptureCAM_V4L* capture, char *deviceName) { @@ -449,7 +446,6 @@ static int try_init_v4l(CvCaptureCAM_V4L* capture, char *deviceName) /* No matter what the name - it still must be opened! */ capture->deviceHandle = open(deviceName, O_RDWR); - if (capture->deviceHandle == 0) { detect = -1; @@ -463,7 +459,6 @@ static int try_init_v4l(CvCaptureCAM_V4L* capture, char *deviceName) if (ioctl(capture->deviceHandle, VIDIOCGCAP, &capture->capability) < 0) { detect = 0; - icvCloseCAM_V4L(capture); } else @@ -476,54 +471,64 @@ static int try_init_v4l(CvCaptureCAM_V4L* capture, char *deviceName) } +#endif /* HAVE_CAMV4L */ + #ifdef HAVE_CAMV4L2 static int try_init_v4l2(CvCaptureCAM_V4L* capture, char *deviceName) { - - // if detect = -1 then unable to open device - // if detect = 0 then detected nothing - // if detect = 1 then V4L2 device - int detect = 0; - - // Test device for V4L2 compability + // Return value: + // -1 then unable to open device + // 0 then detected nothing + // 1 then V4L2 device + + int deviceIndex; /* Open and test V4L2 device */ capture->deviceHandle = open (deviceName, O_RDWR /* required */ | O_NONBLOCK, 0); - - - - if (capture->deviceHandle == 0) + if (-1 == capture->deviceHandle) { - detect = -1; - +#ifndef NDEBUG + fprintf(stderr, "(DEBUG) try_init_v4l2 open \"%s\": %s\n", deviceName, strerror(errno)); +#endif icvCloseCAM_V4L(capture); + return -1; } - if (detect == 0) + CLEAR (capture->cap); + if (-1 == ioctl (capture->deviceHandle, VIDIOC_QUERYCAP, &capture->cap)) { - CLEAR (capture->cap); - if (-1 == xioctl (capture->deviceHandle, VIDIOC_QUERYCAP, &capture->cap)) - { - detect = 0; - - icvCloseCAM_V4L(capture); - } - else - { - CLEAR (capture->capability); - capture->capability.type = capture->cap.capabilities; - - /* Query channels number */ - if (-1 != xioctl (capture->deviceHandle, VIDIOC_G_INPUT, &capture->capability.channels)) - { - detect = 1; - } - } +#ifndef NDEBUG + fprintf(stderr, "(DEBUG) try_init_v4l2 VIDIOC_QUERYCAP \"%s\": %s\n", deviceName, strerror(errno)); +#endif + icvCloseCAM_V4L(capture); + return 0; } - return detect; + /* Query channels number */ + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_INPUT, &deviceIndex)) + { +#ifndef NDEBUG + fprintf(stderr, "(DEBUG) try_init_v4l2 VIDIOC_G_INPUT \"%s\": %s\n", deviceName, strerror(errno)); +#endif + icvCloseCAM_V4L(capture); + return 0; + } + + /* Query information about current input */ + CLEAR (capture->inp); + capture->inp.index = deviceIndex; + if (-1 == ioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp)) + { +#ifndef NDEBUG + fprintf(stderr, "(DEBUG) try_init_v4l2 VIDIOC_ENUMINPUT \"%s\": %s\n", deviceName, strerror(errno)); +#endif + icvCloseCAM_V4L(capture); + return 0; + } + + return 1; } @@ -546,17 +551,12 @@ static int autosetup_capture_mode_v4l2(CvCaptureCAM_V4L* capture) else #ifdef HAVE_JPEG -#ifdef __USE_GNU - /* support for MJPEG is only available with libjpeg and gcc, - because it's use libjepg and fmemopen() - */ if (try_palette_v4l2(capture, V4L2_PIX_FMT_MJPEG) == 0 || try_palette_v4l2(capture, V4L2_PIX_FMT_JPEG) == 0) { capture->palette = PALETTE_MJPEG; } else -#endif #endif if (try_palette_v4l2(capture, V4L2_PIX_FMT_YUYV) == 0) @@ -593,6 +593,8 @@ static int autosetup_capture_mode_v4l2(CvCaptureCAM_V4L* capture) #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L + static int autosetup_capture_mode_v4l(CvCaptureCAM_V4L* capture) { @@ -626,6 +628,8 @@ static int autosetup_capture_mode_v4l(CvCaptureCAM_V4L* capture) } +#endif /* HAVE_CAMV4L */ + #ifdef HAVE_CAMV4L2 static void v4l2_scan_controls_enumerate_menu(CvCaptureCAM_V4L* capture) @@ -637,7 +641,7 @@ static void v4l2_scan_controls_enumerate_menu(CvCaptureCAM_V4L* capture) (int)capture->querymenu.index <= capture->queryctrl.maximum; capture->querymenu.index++) { - if (0 == xioctl (capture->deviceHandle, VIDIOC_QUERYMENU, + if (0 == ioctl (capture->deviceHandle, VIDIOC_QUERYMENU, &capture->querymenu)) { // printf (" %s\n", capture->querymenu.name); @@ -661,7 +665,7 @@ static void v4l2_scan_controls(CvCaptureCAM_V4L* capture) CLEAR (capture->queryctrl); capture->queryctrl.id = ctrl_id; - if (0 == xioctl (capture->deviceHandle, VIDIOC_QUERYCTRL, + if (0 == ioctl (capture->deviceHandle, VIDIOC_QUERYCTRL, &capture->queryctrl)) { @@ -731,7 +735,7 @@ static void v4l2_scan_controls(CvCaptureCAM_V4L* capture) CLEAR (capture->queryctrl); capture->queryctrl.id = ctrl_id; - if (0 == xioctl (capture->deviceHandle, VIDIOC_QUERYCTRL, + if (0 == ioctl (capture->deviceHandle, VIDIOC_QUERYCTRL, &capture->queryctrl)) { @@ -854,7 +858,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) capture->inp.index = CHANNEL_NUMBER; /* Set only channel number to CHANNEL_NUMBER */ /* V4L2 have a status field from selected video mode */ - if (-1 == xioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp)) { fprintf (stderr, "HIGHGUI ERROR: V4L2: Aren't able to set channel number\n"); icvCloseCAM_V4L (capture); @@ -866,7 +870,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) CLEAR (capture->form); capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { fprintf( stderr, "HIGHGUI ERROR: V4L2: Could not obtain specifics of capture window.\n\n"); icvCloseCAM_V4L(capture); return -1; @@ -904,7 +908,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) capture->req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; capture->req.memory = V4L2_MEMORY_MMAP; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_REQBUFS, &capture->req)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_REQBUFS, &capture->req)) { if (EINVAL == errno) { @@ -944,7 +948,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) buf.memory = V4L2_MEMORY_MMAP; buf.index = n_buffers; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_QUERYBUF, &buf)) { + if (-1 == ioctl (capture->deviceHandle, VIDIOC_QUERYBUF, &buf)) { perror ("VIDIOC_QUERYBUF"); /* free capture, and returns an error code */ @@ -976,8 +980,8 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) /* Set up Image data */ cvInitImageHeader( &capture->frame, - cvSize( capture->captureWindow.width, - capture->captureWindow.height ), + cvSize( capture->form.fmt.pix.width, + capture->form.fmt.pix.height ), IPL_DEPTH_8U, 3, IPL_ORIGIN_TL, 4 ); /* Allocate space for RGBA data */ capture->frame.imageData = (char *)cvAlloc(capture->frame.imageSize); @@ -987,6 +991,8 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L + static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) { int detect_v4l = 0; @@ -1105,6 +1111,8 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) return 1; }; /* End _capture_V4L */ +#endif /* HAVE_CAMV4L */ + static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (int index) { static int autoindex; @@ -1154,10 +1162,12 @@ static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (int index) icvCloseCAM_V4L(capture); V4L2_SUPPORT = 0; #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L if (_capture_V4L (capture, deviceName) == -1) { icvCloseCAM_V4L(capture); return NULL; } +#endif /* HAVE_CAMV4L */ #ifdef HAVE_CAMV4L2 } else { V4L2_SUPPORT = 1; @@ -1177,7 +1187,7 @@ static int read_frame_v4l2(CvCaptureCAM_V4L* capture) { buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_DQBUF, &buf)) { + if (-1 == ioctl (capture->deviceHandle, VIDIOC_DQBUF, &buf)) { switch (errno) { case EAGAIN: return 0; @@ -1185,7 +1195,7 @@ static int read_frame_v4l2(CvCaptureCAM_V4L* capture) { case EIO: if (!(buf.flags & (V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE))) { - if (xioctl(capture->deviceHandle, VIDIOC_QBUF, &buf) == -1) + if (ioctl(capture->deviceHandle, VIDIOC_QBUF, &buf) == -1) { return 0; } @@ -1208,7 +1218,7 @@ static int read_frame_v4l2(CvCaptureCAM_V4L* capture) { //printf("got data in buff %d, len=%d, flags=0x%X, seq=%d, used=%d)\n", // buf.index, buf.length, buf.flags, buf.sequence, buf.bytesused); - if (-1 == xioctl (capture->deviceHandle, VIDIOC_QBUF, &buf)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_QBUF, &buf)) perror ("VIDIOC_QBUF"); return 1; @@ -1266,7 +1276,9 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { #ifdef HAVE_CAMV4L2 +#ifdef HAVE_CAMV4L if (V4L2_SUPPORT == 1) +#endif { for (capture->bufferIndex = 0; @@ -1282,7 +1294,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { buf.memory = V4L2_MEMORY_MMAP; buf.index = (unsigned long)capture->bufferIndex; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_QBUF, &buf)) { + if (-1 == ioctl (capture->deviceHandle, VIDIOC_QBUF, &buf)) { perror ("VIDIOC_QBUF"); return 0; } @@ -1290,14 +1302,18 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { /* enable the streaming */ capture->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_STREAMON, + if (-1 == ioctl (capture->deviceHandle, VIDIOC_STREAMON, &capture->type)) { /* error enabling the stream */ perror ("VIDIOC_STREAMON"); return 0; } - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { for (capture->bufferIndex = 0; @@ -1316,6 +1332,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { } } +#endif /* HAVE_CAMV4L */ #if defined(V4L_ABORT_BADJPEG) && defined(HAVE_CAMV4L2) if (V4L2_SUPPORT == 1) @@ -1337,8 +1354,12 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { mainloop_v4l2(capture); - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { capture->mmaps[capture->bufferIndex].frame = capture->bufferIndex; @@ -1358,6 +1379,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { } } +#endif /* HAVE_CAMV4L */ return(1); } @@ -1393,7 +1415,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { static inline void move_420_block(int yTL, int yTR, int yBL, int yBR, int u, int v, - int rowPixels, unsigned char * rgb) + int rowPixels, unsigned char * rgb) { const int rvScale = 91881; const int guScale = -22553; @@ -1432,7 +1454,7 @@ move_420_block(int yTL, int yTR, int yBL, int yBR, int u, int v, static inline void move_411_block(int yTL, int yTR, int yBL, int yBR, int u, int v, - int /*rowPixels*/, unsigned char * rgb) + int /*rowPixels*/, unsigned char * rgb) { const int rvScale = 91881; const int guScale = -22553; @@ -1524,6 +1546,7 @@ yuv420p_to_rgb24(int width, int height, // /* Converts from interlaced YUV420 to RGB24. */ /* [FD] untested... */ +#ifdef HAVE_CAMV4L static void yuv420_to_rgb24(int width, int height, unsigned char *pIn0, unsigned char *pOut0) @@ -1568,6 +1591,7 @@ yuv420_to_rgb24(int width, int height, pOut += width * bytes; } } +#endif //HAVE_CAMV4L // Consider a YUV411P image of 8x2 pixels. // @@ -1619,6 +1643,8 @@ yuv411p_to_rgb24(int width, int height, /* based on ccvt_yuyv_bgr32() from camstream */ #define SAT(c) \ if (c & (~255)) { if (c < 0) c = 0; else c = 255; } + +#ifdef HAVE_CAMV4L2 static void yuyv_to_rgb24 (int width, int height, unsigned char *src, unsigned char *dst) { @@ -1710,6 +1736,7 @@ uyvy_to_rgb24 (int width, int height, unsigned char *src, unsigned char *dst) } } } +#endif //HAVE_CAMV4L2 #ifdef HAVE_JPEG @@ -1736,6 +1763,7 @@ mjpeg_to_rgb24 (int width, int height, * */ +#ifdef HAVE_CAMV4L2 static void bayer2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst) { long int i; @@ -1897,7 +1925,6 @@ static void sgbrg2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, uns } } - #define CLAMP(x) ((x)<0?0:((x)>255)?255:(x)) typedef struct { @@ -2068,13 +2095,14 @@ static int sonix_decompress(int width, int height, unsigned char *inp, unsigned return 0; } - +#endif //HAVE_CAMV4L2 static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { #ifdef HAVE_CAMV4L2 if (V4L2_SUPPORT == 0) #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { /* [FD] this really belongs here */ @@ -2083,6 +2111,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { } } +#endif /* HAVE_CAMV4L */ /* Now get what has already been captured as a IplImage return */ @@ -2103,8 +2132,12 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { capture->frame.imageData = (char *)cvAlloc(capture->frame.imageSize); } - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { if((capture->frame.width != capture->mmaps[capture->bufferIndex].width) @@ -2118,118 +2151,118 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { } } +#endif /* HAVE_CAMV4L */ #ifdef HAVE_CAMV4L2 if (V4L2_SUPPORT == 1) { switch (capture->palette) - { - case PALETTE_BGR24: - memcpy((char *)capture->frame.imageData, - (char *)capture->buffers[capture->bufferIndex].start, - capture->frame.imageSize); - break; + { + case PALETTE_BGR24: + memcpy((char *)capture->frame.imageData, + (char *)capture->buffers[capture->bufferIndex].start, + capture->frame.imageSize); + break; - case PALETTE_YVU420: - yuv420p_to_rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)(capture->buffers[capture->bufferIndex].start), - (unsigned char*)capture->frame.imageData); - break; + case PALETTE_YVU420: + yuv420p_to_rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)(capture->buffers[capture->bufferIndex].start), + (unsigned char*)capture->frame.imageData); + break; - case PALETTE_YUV411P: - yuv411p_to_rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)(capture->buffers[capture->bufferIndex].start), - (unsigned char*)capture->frame.imageData); - break; + case PALETTE_YUV411P: + yuv411p_to_rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)(capture->buffers[capture->bufferIndex].start), + (unsigned char*)capture->frame.imageData); + break; #ifdef HAVE_JPEG -#ifdef __USE_GNU - /* support for MJPEG is only available with libjpeg and gcc, - because it's use libjepg and fmemopen() - */ - case PALETTE_MJPEG: - if (!mjpeg_to_rgb24(capture->form.fmt.pix.width, + case PALETTE_MJPEG: + if (!mjpeg_to_rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)(capture->buffers[capture->bufferIndex] + .start), + capture->buffers[capture->bufferIndex].length, + (unsigned char*)capture->frame.imageData)) + return 0; + break; +#endif + + case PALETTE_YUYV: + yuyv_to_rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)(capture->buffers[capture->bufferIndex].start), + (unsigned char*)capture->frame.imageData); + break; + case PALETTE_UYVY: + uyvy_to_rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)(capture->buffers[capture->bufferIndex].start), + (unsigned char*)capture->frame.imageData); + break; + case PALETTE_SBGGR8: + bayer2rgb24(capture->form.fmt.pix.width, capture->form.fmt.pix.height, - (unsigned char*)(capture->buffers[capture->bufferIndex] - .start), - capture->buffers[capture->bufferIndex].length, - (unsigned char*)capture->frame.imageData)) - return 0; - break; -#endif -#endif + (unsigned char*)capture->buffers[capture->bufferIndex].start, + (unsigned char*)capture->frame.imageData); + break; - case PALETTE_YUYV: - yuyv_to_rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)(capture->buffers[capture->bufferIndex].start), - (unsigned char*)capture->frame.imageData); - break; + case PALETTE_SN9C10X: + sonix_decompress_init(); + sonix_decompress(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)capture->buffers[capture->bufferIndex].start, + (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start); - case PALETTE_UYVY: - uyvy_to_rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)(capture->buffers[capture->bufferIndex].start), - (unsigned char*)capture->frame.imageData); - break; - case PALETTE_SBGGR8: - bayer2rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)capture->buffers[capture->bufferIndex].start, - (unsigned char*)capture->frame.imageData); - break; + bayer2rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start, + (unsigned char*)capture->frame.imageData); + break; - case PALETTE_SN9C10X: - sonix_decompress_init(); - sonix_decompress(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)capture->buffers[capture->bufferIndex].start, - (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start); - - bayer2rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start, - (unsigned char*)capture->frame.imageData); - break; - - case PALETTE_SGBRG: - sgbrg2rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start, - (unsigned char*)capture->frame.imageData); - break; - } - } else + case PALETTE_SGBRG: + sgbrg2rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start, + (unsigned char*)capture->frame.imageData); + break; + } + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { - switch(capture->imageProperties.palette) { - case VIDEO_PALETTE_RGB24: + switch(capture->imageProperties.palette) + { + case VIDEO_PALETTE_RGB24: memcpy((char *)capture->frame.imageData, (char *)(capture->memoryMap + capture->memoryBuffer.offsets[capture->bufferIndex]), capture->frame.imageSize); break; - case VIDEO_PALETTE_YUV420P: + case VIDEO_PALETTE_YUV420P: yuv420p_to_rgb24(capture->captureWindow.width, capture->captureWindow.height, (unsigned char*)(capture->memoryMap + capture->memoryBuffer.offsets[capture->bufferIndex]), (unsigned char*)capture->frame.imageData); break; - case VIDEO_PALETTE_YUV420: + case VIDEO_PALETTE_YUV420: yuv420_to_rgb24(capture->captureWindow.width, capture->captureWindow.height, (unsigned char*)(capture->memoryMap + capture->memoryBuffer.offsets[capture->bufferIndex]), (unsigned char*)capture->frame.imageData); break; - case VIDEO_PALETTE_YUV411P: + case VIDEO_PALETTE_YUV411P: yuv411p_to_rgb24(capture->captureWindow.width, capture->captureWindow.height, (unsigned char*)(capture->memoryMap + capture->memoryBuffer.offsets[capture->bufferIndex]), (unsigned char*)capture->frame.imageData); break; - default: + default: fprintf( stderr, "HIGHGUI ERROR: V4L: Cannot convert from palette %d to RGB\n", capture->imageProperties.palette); @@ -2238,6 +2271,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { } } +#endif /* HAVE_CAMV4L */ return(&capture->frame); } @@ -2247,7 +2281,9 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, #ifdef HAVE_CAMV4L2 +#ifdef HAVE_CAMV4L if (V4L2_SUPPORT == 1) +#endif { /* default value for min and max */ @@ -2256,7 +2292,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, CLEAR (capture->form); capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { /* display an error message, and return an error code */ perror ("VIDIOC_G_FMT"); return -1; @@ -2297,7 +2333,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, return -1; } - if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_CTRL, + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_CTRL, &capture->control)) { fprintf( stderr, "HIGHGUI ERROR: V4L2: "); @@ -2358,8 +2394,12 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, /* all was OK, so convert to 0.0 - 1.0 range, and return the value */ return ((float)capture->control.value - v4l2_min + 1) / (v4l2_max - v4l2_min); - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { int retval = -1; @@ -2417,6 +2457,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, return float (retval) / 0xFFFF; } +#endif /* HAVE_CAMV4L */ }; @@ -2430,7 +2471,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { CLEAR (capture->cropcap); capture->cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (xioctl (capture->deviceHandle, VIDIOC_CROPCAP, &capture->cropcap) < 0) { + if (ioctl (capture->deviceHandle, VIDIOC_CROPCAP, &capture->cropcap) < 0) { fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: VIDIOC_CROPCAP\n"); } else { @@ -2439,7 +2480,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { capture->crop.c= capture->cropcap.defrect; /* set the crop area, but don't exit if the device don't support croping */ - if (xioctl (capture->deviceHandle, VIDIOC_S_CROP, &capture->crop) < 0) { + if (ioctl (capture->deviceHandle, VIDIOC_S_CROP, &capture->crop) < 0) { fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: VIDIOC_S_CROP\n"); } } @@ -2448,7 +2489,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* read the current setting, mainly to retreive the pixelformat information */ - xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form); + ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form); /* set the values we want to change */ capture->form.fmt.pix.width = w; @@ -2463,7 +2504,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { * don't test if the set of the size is ok, because some device * don't allow changing the size, and we will get the real size * later */ - xioctl (capture->deviceHandle, VIDIOC_S_FMT, &capture->form); + ioctl (capture->deviceHandle, VIDIOC_S_FMT, &capture->form); /* try to set framerate to 30 fps */ struct v4l2_streamparm setfps; @@ -2471,14 +2512,14 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { setfps.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; setfps.parm.capture.timeperframe.numerator = 1; setfps.parm.capture.timeperframe.denominator = 30; - xioctl (capture->deviceHandle, VIDIOC_S_PARM, &setfps); + ioctl (capture->deviceHandle, VIDIOC_S_PARM, &setfps); /* we need to re-initialize some things, like buffers, because the size has * changed */ capture->FirstCapture = 1; /* Get window info again, to get the real value */ - if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: Could not obtain specifics of capture window.\n\n"); @@ -2489,8 +2530,12 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { return 0; - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { if (capture==0) return 0; @@ -2517,6 +2562,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { capture->FirstCapture = 1; } +#endif /* HAVE_CAMV4L */ return 0; @@ -2573,7 +2619,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, } /* get the min and max values */ - if (-1 == xioctl (capture->deviceHandle, + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_CTRL, &capture->control)) { // perror ("VIDIOC_G_CTRL for getting min/max values"); return -1; @@ -2643,13 +2689,17 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, capture->control.value = (int)(value * (v4l2_max - v4l2_min) + v4l2_min); /* The driver may clamp the value or return ERANGE, ignored here */ - if (-1 == xioctl (capture->deviceHandle, + if (-1 == ioctl (capture->deviceHandle, VIDIOC_S_CTRL, &capture->control) && errno != ERANGE) { perror ("VIDIOC_S_CTRL"); return -1; } - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { int v4l_value; @@ -2694,6 +2744,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, return -1; } } +#endif /* HAVE_CAMV4L */ /* all was OK */ return 0; @@ -2754,6 +2805,7 @@ static void icvCloseCAM_V4L( CvCaptureCAM_V4L* capture ){ #ifdef HAVE_CAMV4L2 if (V4L2_SUPPORT == 0) #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { if (capture->mmaps) @@ -2762,10 +2814,14 @@ static void icvCloseCAM_V4L( CvCaptureCAM_V4L* capture ){ munmap(capture->memoryMap, capture->memoryBuffer.size); } +#endif /* HAVE_CAMV4L */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ #ifdef HAVE_CAMV4L2 - else { + { capture->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (ioctl(capture->deviceHandle, VIDIOC_STREAMOFF, &capture->type) < 0) { + if (-1 == ioctl(capture->deviceHandle, VIDIOC_STREAMOFF, &capture->type)) { perror ("Unable to stop the stream."); } diff --git a/modules/highgui/src/grfmt_tiff.cpp b/modules/highgui/src/grfmt_tiff.cpp index 7da9320fc..5179531f5 100644 --- a/modules/highgui/src/grfmt_tiff.cpp +++ b/modules/highgui/src/grfmt_tiff.cpp @@ -168,7 +168,6 @@ bool TiffDecoder::readData( Mat& img ) bool result = false; bool color = img.channels() > 1; uchar* data = img.data; - int step = (int)img.step; if( img.depth() != CV_8U && img.depth() != CV_16U && img.depth() != CV_32F && img.depth() != CV_64F ) return false; @@ -211,14 +210,14 @@ bool TiffDecoder::readData( Mat& img ) if( tile_height0 <= 0 ) tile_height0 = m_height; - AutoBuffer _buffer(tile_height0*tile_width0*8); + AutoBuffer _buffer( size_t(8) * tile_height0*tile_width0); uchar* buffer = _buffer; ushort* buffer16 = (ushort*)buffer; float* buffer32 = (float*)buffer; double* buffer64 = (double*)buffer; int tileidx = 0; - for( y = 0; y < m_height; y += tile_height0, data += step*tile_height0 ) + for( y = 0; y < m_height; y += tile_height0, data += img.step*tile_height0 ) { int tile_height = tile_height0; @@ -250,11 +249,11 @@ bool TiffDecoder::readData( Mat& img ) for( i = 0; i < tile_height; i++ ) if( color ) icvCvt_BGRA2BGR_8u_C4C3R( buffer + i*tile_width*4, 0, - data + x*3 + step*(tile_height - i - 1), 0, + data + x*3 + img.step*(tile_height - i - 1), 0, cvSize(tile_width,1), 2 ); else icvCvt_BGRA2Gray_8u_C4C1R( buffer + i*tile_width*4, 0, - data + x + step*(tile_height - i - 1), 0, + data + x + img.step*(tile_height - i - 1), 0, cvSize(tile_width,1), 2 ); break; } @@ -279,19 +278,19 @@ bool TiffDecoder::readData( Mat& img ) if( ncn == 1 ) { icvCvt_Gray2BGR_16u_C1C3R(buffer16 + i*tile_width*ncn, 0, - (ushort*)(data + step*i) + x*3, 0, + (ushort*)(data + img.step*i) + x*3, 0, cvSize(tile_width,1) ); } else if( ncn == 3 ) { icvCvt_RGB2BGR_16u_C3R(buffer16 + i*tile_width*ncn, 0, - (ushort*)(data + step*i) + x*3, 0, + (ushort*)(data + img.step*i) + x*3, 0, cvSize(tile_width,1) ); } else { icvCvt_BGRA2BGR_16u_C4C3R(buffer16 + i*tile_width*ncn, 0, - (ushort*)(data + step*i) + x*3, 0, + (ushort*)(data + img.step*i) + x*3, 0, cvSize(tile_width,1), 2 ); } } @@ -299,14 +298,14 @@ bool TiffDecoder::readData( Mat& img ) { if( ncn == 1 ) { - memcpy((ushort*)(data + step*i)+x, + memcpy((ushort*)(data + img.step*i)+x, buffer16 + i*tile_width*ncn, tile_width*sizeof(buffer16[0])); } else { icvCvt_BGRA2Gray_16u_CnC1R(buffer16 + i*tile_width*ncn, 0, - (ushort*)(data + step*i) + x, 0, + (ushort*)(data + img.step*i) + x, 0, cvSize(tile_width,1), ncn, 2 ); } } @@ -332,13 +331,13 @@ bool TiffDecoder::readData( Mat& img ) { if(dst_bpp == 32) { - memcpy((float*)(data + step*i)+x, + memcpy((float*)(data + img.step*i)+x, buffer32 + i*tile_width*ncn, tile_width*sizeof(buffer32[0])); } else { - memcpy((double*)(data + step*i)+x, + memcpy((double*)(data + img.step*i)+x, buffer64 + i*tile_width*ncn, tile_width*sizeof(buffer64[0])); } @@ -402,7 +401,18 @@ void TiffEncoder::writeTag( WLByteStream& strm, TiffTag tag, } #ifdef HAVE_TIFF -bool TiffEncoder::writeLibTiff( const Mat& img, const vector& /*params*/) + +static void readParam(const vector& params, int key, int& value) +{ + for(size_t i = 0; i + 1 < params.size(); i += 2) + if(params[i] == key) + { + value = params[i+1]; + break; + } +} + +bool TiffEncoder::writeLibTiff( const Mat& img, const vector& params) { int channels = img.channels(); int width = img.cols, height = img.rows; @@ -429,7 +439,9 @@ bool TiffEncoder::writeLibTiff( const Mat& img, const vector& /*params*/) const int bitsPerByte = 8; size_t fileStep = (width * channels * bitsPerChannel) / bitsPerByte; + int rowsPerStrip = (int)((1 << 13)/fileStep); + readParam(params, TIFFTAG_ROWSPERSTRIP, rowsPerStrip); if( rowsPerStrip < 1 ) rowsPerStrip = 1; @@ -450,6 +462,9 @@ bool TiffEncoder::writeLibTiff( const Mat& img, const vector& /*params*/) int compression = COMPRESSION_LZW; int predictor = PREDICTOR_HORIZONTAL; + readParam(params, TIFFTAG_COMPRESSION, compression); + readParam(params, TIFFTAG_PREDICTOR, predictor); + int colorspace = channels > 1 ? PHOTOMETRIC_RGB : PHOTOMETRIC_MINISBLACK; if ( !TIFFSetField(pTiffHandle, TIFFTAG_IMAGEWIDTH, width) diff --git a/modules/highgui/test/test_grfmt.cpp b/modules/highgui/test/test_grfmt.cpp index 7226ebf7c..28d30c9b0 100644 --- a/modules/highgui/test/test_grfmt.cpp +++ b/modules/highgui/test/test_grfmt.cpp @@ -291,3 +291,22 @@ TEST(Highgui_Jpeg, encode_empty) ASSERT_THROW(cv::imencode(".jpg", img, jpegImg), cv::Exception); } #endif + + +#ifdef HAVE_TIFF +#include "tiff.h" +TEST(Highgui_Tiff, decode_tile16384x16384) +{ + // see issue #2161 + cv::Mat big(16384, 16384, CV_8UC1, cv::Scalar::all(0)); + string file = cv::tempfile(".tiff"); + std::vector params; + params.push_back(TIFFTAG_ROWSPERSTRIP); + params.push_back(big.rows); + cv::imwrite(file, big, params); + big.release(); + + EXPECT_NO_THROW(cv::imread(file)); + remove(file.c_str()); +} +#endif diff --git a/modules/nonfree/CMakeLists.txt b/modules/nonfree/CMakeLists.txt index 8c7bd0efd..eeaf53a6f 100644 --- a/modules/nonfree/CMakeLists.txt +++ b/modules/nonfree/CMakeLists.txt @@ -3,4 +3,4 @@ if(BUILD_ANDROID_PACKAGE) endif() set(the_description "Functionality with possible limitations on the use") -ocv_define_module(nonfree opencv_imgproc opencv_features2d) +ocv_define_module(nonfree opencv_imgproc opencv_features2d opencv_calib3d) diff --git a/modules/nonfree/src/sift.cpp b/modules/nonfree/src/sift.cpp index aca8020cc..58ebd3101 100644 --- a/modules/nonfree/src/sift.cpp +++ b/modules/nonfree/src/sift.cpp @@ -162,8 +162,24 @@ static const float SIFT_DESCR_MAG_THR = 0.2f; // factor used to convert floating-point descriptor to unsigned char static const float SIFT_INT_DESCR_FCTR = 512.f; +#if 0 +// intermediate type used for DoG pyramids +typedef short sift_wt; static const int SIFT_FIXPT_SCALE = 48; +#else +// intermediate type used for DoG pyramids +typedef float sift_wt; +static const int SIFT_FIXPT_SCALE = 1; +#endif +static inline void +unpackOctave(const KeyPoint& kpt, int& octave, int& layer, float& scale) +{ + octave = kpt.octave & 255; + layer = (kpt.octave >> 8) & 255; + octave = octave < 128 ? octave : (-128 | octave); + scale = octave >= 0 ? 1.f/(1 << octave) : (float)(1 << -octave); +} static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma ) { @@ -172,7 +188,7 @@ static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma cvtColor(img, gray, COLOR_BGR2GRAY); else img.copyTo(gray); - gray.convertTo(gray_fpt, CV_16S, SIFT_FIXPT_SCALE, 0); + gray.convertTo(gray_fpt, DataType::type, SIFT_FIXPT_SCALE, 0); float sig_diff; @@ -245,7 +261,7 @@ void SIFT::buildDoGPyramid( const vector& gpyr, vector& dogpyr ) const const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i]; const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1]; Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i]; - subtract(src2, src1, dst, noArray(), CV_16S); + subtract(src2, src1, dst, noArray(), DataType::type); } } } @@ -276,8 +292,8 @@ static float calcOrientationHist( const Mat& img, Point pt, int radius, if( x <= 0 || x >= img.cols - 1 ) continue; - float dx = (float)(img.at(y, x+1) - img.at(y, x-1)); - float dy = (float)(img.at(y-1, x) - img.at(y+1, x)); + float dx = (float)(img.at(y, x+1) - img.at(y, x-1)); + float dy = (float)(img.at(y-1, x) - img.at(y+1, x)); X[k] = dx; Y[k] = dy; W[k] = (i*i + j*j)*expf_scale; k++; @@ -323,7 +339,7 @@ static float calcOrientationHist( const Mat& img, Point pt, int radius, // // Interpolates a scale-space extremum's location and scale to subpixel -// accuracy to form an image feature. Rejects features with low contrast. +// accuracy to form an image feature. Rejects features with low contrast. // Based on Section 4 of Lowe's paper. static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int octv, int& layer, int& r, int& c, int nOctaveLayers, @@ -334,7 +350,7 @@ static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int o const float second_deriv_scale = img_scale; const float cross_deriv_scale = img_scale*0.25f; - float xi=0, xr=0, xc=0, contr; + float xi=0, xr=0, xc=0, contr=0; int i = 0; for( ; i < SIFT_MAX_INTERP_STEPS; i++ ) @@ -344,20 +360,20 @@ static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int o const Mat& prev = dog_pyr[idx-1]; const Mat& next = dog_pyr[idx+1]; - Vec3f dD((img.at(r, c+1) - img.at(r, c-1))*deriv_scale, - (img.at(r+1, c) - img.at(r-1, c))*deriv_scale, - (next.at(r, c) - prev.at(r, c))*deriv_scale); + Vec3f dD((img.at(r, c+1) - img.at(r, c-1))*deriv_scale, + (img.at(r+1, c) - img.at(r-1, c))*deriv_scale, + (next.at(r, c) - prev.at(r, c))*deriv_scale); - float v2 = (float)img.at(r, c)*2; - float dxx = (img.at(r, c+1) + img.at(r, c-1) - v2)*second_deriv_scale; - float dyy = (img.at(r+1, c) + img.at(r-1, c) - v2)*second_deriv_scale; - float dss = (next.at(r, c) + prev.at(r, c) - v2)*second_deriv_scale; - float dxy = (img.at(r+1, c+1) - img.at(r+1, c-1) - - img.at(r-1, c+1) + img.at(r-1, c-1))*cross_deriv_scale; - float dxs = (next.at(r, c+1) - next.at(r, c-1) - - prev.at(r, c+1) + prev.at(r, c-1))*cross_deriv_scale; - float dys = (next.at(r+1, c) - next.at(r-1, c) - - prev.at(r+1, c) + prev.at(r-1, c))*cross_deriv_scale; + float v2 = (float)img.at(r, c)*2; + float dxx = (img.at(r, c+1) + img.at(r, c-1) - v2)*second_deriv_scale; + float dyy = (img.at(r+1, c) + img.at(r-1, c) - v2)*second_deriv_scale; + float dss = (next.at(r, c) + prev.at(r, c) - v2)*second_deriv_scale; + float dxy = (img.at(r+1, c+1) - img.at(r+1, c-1) - + img.at(r-1, c+1) + img.at(r-1, c-1))*cross_deriv_scale; + float dxs = (next.at(r, c+1) - next.at(r, c-1) - + prev.at(r, c+1) + prev.at(r, c-1))*cross_deriv_scale; + float dys = (next.at(r+1, c) - next.at(r-1, c) - + prev.at(r+1, c) + prev.at(r-1, c))*cross_deriv_scale; Matx33f H(dxx, dxy, dxs, dxy, dyy, dys, @@ -369,20 +385,25 @@ static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int o xr = -X[1]; xc = -X[0]; - if( std::abs( xi ) < 0.5f && std::abs( xr ) < 0.5f && std::abs( xc ) < 0.5f ) + if( std::abs(xi) < 0.5f && std::abs(xr) < 0.5f && std::abs(xc) < 0.5f ) break; - c += cvRound( xc ); - r += cvRound( xr ); - layer += cvRound( xi ); + if( std::abs(xi) > (float)(INT_MAX/3) || + std::abs(xr) > (float)(INT_MAX/3) || + std::abs(xc) > (float)(INT_MAX/3) ) + return false; + + c += cvRound(xc); + r += cvRound(xr); + layer += cvRound(xi); if( layer < 1 || layer > nOctaveLayers || - c < SIFT_IMG_BORDER || c >= img.cols - SIFT_IMG_BORDER || - r < SIFT_IMG_BORDER || r >= img.rows - SIFT_IMG_BORDER ) + c < SIFT_IMG_BORDER || c >= img.cols - SIFT_IMG_BORDER || + r < SIFT_IMG_BORDER || r >= img.rows - SIFT_IMG_BORDER ) return false; } - /* ensure convergence of interpolation */ + // ensure convergence of interpolation if( i >= SIFT_MAX_INTERP_STEPS ) return false; @@ -391,21 +412,21 @@ static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int o const Mat& img = dog_pyr[idx]; const Mat& prev = dog_pyr[idx-1]; const Mat& next = dog_pyr[idx+1]; - Matx31f dD((img.at(r, c+1) - img.at(r, c-1))*deriv_scale, - (img.at(r+1, c) - img.at(r-1, c))*deriv_scale, - (next.at(r, c) - prev.at(r, c))*deriv_scale); + Matx31f dD((img.at(r, c+1) - img.at(r, c-1))*deriv_scale, + (img.at(r+1, c) - img.at(r-1, c))*deriv_scale, + (next.at(r, c) - prev.at(r, c))*deriv_scale); float t = dD.dot(Matx31f(xc, xr, xi)); - contr = img.at(r, c)*img_scale + t * 0.5f; + contr = img.at(r, c)*img_scale + t * 0.5f; if( std::abs( contr ) * nOctaveLayers < contrastThreshold ) return false; - /* principal curvatures are computed using the trace and det of Hessian */ - float v2 = img.at(r, c)*2.f; - float dxx = (img.at(r, c+1) + img.at(r, c-1) - v2)*second_deriv_scale; - float dyy = (img.at(r+1, c) + img.at(r-1, c) - v2)*second_deriv_scale; - float dxy = (img.at(r+1, c+1) - img.at(r+1, c-1) - - img.at(r-1, c+1) + img.at(r-1, c-1)) * cross_deriv_scale; + // principal curvatures are computed using the trace and det of Hessian + float v2 = img.at(r, c)*2.f; + float dxx = (img.at(r, c+1) + img.at(r, c-1) - v2)*second_deriv_scale; + float dyy = (img.at(r+1, c) + img.at(r-1, c) - v2)*second_deriv_scale; + float dxy = (img.at(r+1, c+1) - img.at(r+1, c-1) - + img.at(r-1, c+1) + img.at(r-1, c-1)) * cross_deriv_scale; float tr = dxx + dyy; float det = dxx * dyy - dxy * dxy; @@ -417,6 +438,7 @@ static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int o kpt.pt.y = (r + xr) * (1 << octv); kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16); kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2; + kpt.response = std::abs(contr); return true; } @@ -448,13 +470,13 @@ void SIFT::findScaleSpaceExtrema( const vector& gauss_pyr, const vector(r); - const short* prevptr = prev.ptr(r); - const short* nextptr = next.ptr(r); + const sift_wt* currptr = img.ptr(r); + const sift_wt* prevptr = prev.ptr(r); + const sift_wt* nextptr = next.ptr(r); for( int c = SIFT_IMG_BORDER; c < cols-SIFT_IMG_BORDER; c++) { - int val = currptr[c]; + sift_wt val = currptr[c]; // find local extrema with pixel accuracy if( std::abs(val) > threshold && @@ -541,11 +563,9 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc for( i = -radius, k = 0; i <= radius; i++ ) for( j = -radius; j <= radius; j++ ) { - /* - Calculate sample's histogram array coords rotated relative to ori. - Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e. - r_rot = 1.5) have full weight placed in row 1 after interpolation. - */ + // Calculate sample's histogram array coords rotated relative to ori. + // Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e. + // r_rot = 1.5) have full weight placed in row 1 after interpolation. float c_rot = j * cos_t - i * sin_t; float r_rot = j * sin_t + i * cos_t; float rbin = r_rot + d/2 - 0.5f; @@ -553,10 +573,10 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc int r = pt.y + i, c = pt.x + j; if( rbin > -1 && rbin < d && cbin > -1 && cbin < d && - r > 0 && r < rows - 1 && c > 0 && c < cols - 1 ) + r > 0 && r < rows - 1 && c > 0 && c < cols - 1 ) { - float dx = (float)(img.at(r, c+1) - img.at(r, c-1)); - float dy = (float)(img.at(r-1, c) - img.at(r+1, c)); + float dx = (float)(img.at(r, c+1) - img.at(r, c-1)); + float dy = (float)(img.at(r-1, c) - img.at(r+1, c)); X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin; W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale; k++; @@ -632,29 +652,46 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc nrm2 += val*val; } nrm2 = SIFT_INT_DESCR_FCTR/std::max(std::sqrt(nrm2), FLT_EPSILON); + +#if 1 for( k = 0; k < len; k++ ) { dst[k] = saturate_cast(dst[k]*nrm2); } +#else + float nrm1 = 0; + for( k = 0; k < len; k++ ) + { + dst[k] *= nrm2; + nrm1 += dst[k]; + } + nrm1 = 1.f/std::max(nrm1, FLT_EPSILON); + for( k = 0; k < len; k++ ) + { + dst[k] = std::sqrt(dst[k] * nrm1);//saturate_cast(std::sqrt(dst[k] * nrm1)*SIFT_INT_DESCR_FCTR); + } +#endif } static void calcDescriptors(const vector& gpyr, const vector& keypoints, - Mat& descriptors, int nOctaveLayers ) + Mat& descriptors, int nOctaveLayers, int firstOctave ) { int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS; for( size_t i = 0; i < keypoints.size(); i++ ) { KeyPoint kpt = keypoints[i]; - int octv=kpt.octave & 255, layer=(kpt.octave >> 8) & 255; - float scale = 1.f/(1 << octv); + int octave, layer; + float scale; + unpackOctave(kpt, octave, layer, scale); + CV_Assert(octave >= firstOctave && layer <= nOctaveLayers+2); float size=kpt.size*scale; Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale); - const Mat& img = gpyr[octv*(nOctaveLayers + 3) + layer]; + const Mat& img = gpyr[(octave - firstOctave)*(nOctaveLayers + 3) + layer]; float angle = 360.f - kpt.angle; if(std::abs(angle - 360.f) < FLT_EPSILON) - angle = 0.f; + angle = 0.f; calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr((int)i)); } } @@ -691,6 +728,7 @@ void SIFT::operator()(InputArray _image, InputArray _mask, OutputArray _descriptors, bool useProvidedKeypoints) const { + int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0; Mat image = _image.getMat(), mask = _mask.getMat(); if( image.empty() || image.depth() != CV_8U ) @@ -699,9 +737,28 @@ void SIFT::operator()(InputArray _image, InputArray _mask, if( !mask.empty() && mask.type() != CV_8UC1 ) CV_Error( CV_StsBadArg, "mask has incorrect type (!=CV_8UC1)" ); - Mat base = createInitialImage(image, false, (float)sigma); + if( useProvidedKeypoints ) + { + firstOctave = 0; + int maxOctave = INT_MIN; + for( size_t i = 0; i < keypoints.size(); i++ ) + { + int octave, layer; + float scale; + unpackOctave(keypoints[i], octave, layer, scale); + firstOctave = std::min(firstOctave, octave); + maxOctave = std::max(maxOctave, octave); + actualNLayers = std::max(actualNLayers, layer-2); + } + + firstOctave = std::min(firstOctave, 0); + CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers ); + actualNOctaves = maxOctave - firstOctave + 1; + } + + Mat base = createInitialImage(image, firstOctave < 0, (float)sigma); vector gpyr, dogpyr; - int nOctaves = cvRound(log( (double)std::min( base.cols, base.rows ) ) / log(2.) - 2); + int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(log( (double)std::min( base.cols, base.rows ) ) / log(2.) - 2) - firstOctave; //double t, tf = getTickFrequency(); //t = (double)getTickCount(); @@ -724,6 +781,16 @@ void SIFT::operator()(InputArray _image, InputArray _mask, KeyPointsFilter::retainBest(keypoints, nfeatures); //t = (double)getTickCount() - t; //printf("keypoint detection time: %g\n", t*1000./tf); + + if( firstOctave < 0 ) + for( size_t i = 0; i < keypoints.size(); i++ ) + { + KeyPoint& kpt = keypoints[i]; + float scale = 1.f/(float)(1 << -firstOctave); + kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255); + kpt.pt *= scale; + kpt.size *= scale; + } } else { @@ -738,7 +805,7 @@ void SIFT::operator()(InputArray _image, InputArray _mask, _descriptors.create((int)keypoints.size(), dsize, CV_32F); Mat descriptors = _descriptors.getMat(); - calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers); + calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave); //t = (double)getTickCount() - t; //printf("descriptor extraction time: %g\n", t*1000./tf); } diff --git a/modules/nonfree/test/test_features2d.cpp b/modules/nonfree/test/test_features2d.cpp index eb8f44b8d..de993661e 100644 --- a/modules/nonfree/test/test_features2d.cpp +++ b/modules/nonfree/test/test_features2d.cpp @@ -40,6 +40,7 @@ //M*/ #include "test_precomp.hpp" +#include "opencv2/calib3d/calib3d.hpp" using namespace std; using namespace cv; @@ -1085,4 +1086,58 @@ TEST(Features2d_BruteForceDescriptorMatcher_knnMatch, regression) Ptr s = DescriptorExtractor::create("SURF"); ASSERT_STREQ(s->paramHelp("extended").c_str(), ""); } -*/ \ No newline at end of file +*/ + +class CV_DetectPlanarTest : public cvtest::BaseTest +{ +public: + CV_DetectPlanarTest(const string& _fname, int _min_ninliers) : fname(_fname), min_ninliers(_min_ninliers) {} + +protected: + void run(int) + { + Ptr f = Algorithm::create("Feature2D." + fname); + if(f.empty()) + return; + string path = string(ts->get_data_path()) + "detectors_descriptors_evaluation/planar/"; + string imgname1 = path + "box.png"; + string imgname2 = path + "box_in_scene.png"; + Mat img1 = imread(imgname1, 0); + Mat img2 = imread(imgname2, 0); + if( img1.empty() || img2.empty() ) + { + ts->printf( cvtest::TS::LOG, "missing %s and/or %s\n", imgname1.c_str(), imgname2.c_str()); + ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); + return; + } + vector kpt1, kpt2; + Mat d1, d2; + f->operator()(img1, Mat(), kpt1, d1); + f->operator()(img1, Mat(), kpt2, d2); + + vector matches; + BFMatcher(NORM_L2, true).match(d1, d2, matches); + + vector pt1, pt2; + for( size_t i = 0; i < matches.size(); i++ ) { + pt1.push_back(kpt1[matches[i].queryIdx].pt); + pt2.push_back(kpt2[matches[i].trainIdx].pt); + } + + Mat inliers, H = findHomography(pt1, pt2, RANSAC, 10, inliers); + int ninliers = countNonZero(inliers); + + if( ninliers < min_ninliers ) + { + ts->printf( cvtest::TS::LOG, "too little inliers (%d) vs expected %d\n", ninliers, min_ninliers); + ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); + return; + } + } + + string fname; + int min_ninliers; +}; + +TEST(Features2d_SIFTHomographyTest, regression) { CV_DetectPlanarTest test("SIFT", 80); test.safe_run(); } +//TEST(Features2d_SURFHomographyTest, regression) { CV_DetectPlanarTest test("SURF", 80); test.safe_run(); } diff --git a/modules/nonfree/test/test_rotation_and_scale_invariance.cpp b/modules/nonfree/test/test_rotation_and_scale_invariance.cpp index 3479be72a..6262456d9 100644 --- a/modules/nonfree/test/test_rotation_and_scale_invariance.cpp +++ b/modules/nonfree/test/test_rotation_and_scale_invariance.cpp @@ -186,6 +186,20 @@ void matchKeyPoints(const vector& keypoints0, const Mat& H, } } +static void removeVerySmallKeypoints(vector& keypoints) +{ + size_t i, j = 0, n = keypoints.size(); + for( i = 0; i < n; i++ ) + { + if( (keypoints[i].octave & 128) != 0 ) + ; + else + keypoints[j++] = keypoints[i]; + } + keypoints.resize(j); +} + + class DetectorRotationInvarianceTest : public cvtest::BaseTest { public: @@ -216,6 +230,7 @@ protected: vector keypoints0; featureDetector->detect(image0, keypoints0); + removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); @@ -226,6 +241,7 @@ protected: vector keypoints1; featureDetector->detect(image1, keypoints1, mask1); + removeVerySmallKeypoints(keypoints1); vector matches; matchKeyPoints(keypoints0, H, keypoints1, matches); @@ -329,6 +345,7 @@ protected: vector keypoints0; Mat descriptors0; featureDetector->detect(image0, keypoints0); + removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); descriptorExtractor->compute(image0, keypoints0, descriptors0); @@ -382,6 +399,7 @@ protected: float minDescInliersRatio; }; + class DetectorScaleInvarianceTest : public cvtest::BaseTest { public: @@ -412,6 +430,7 @@ protected: vector keypoints0; featureDetector->detect(image0, keypoints0); + removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); @@ -423,6 +442,7 @@ protected: vector keypoints1, osiKeypoints1; // osi - original size image featureDetector->detect(image1, keypoints1); + removeVerySmallKeypoints(keypoints1); if(keypoints1.size() < 15) CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); @@ -531,6 +551,7 @@ protected: vector keypoints0; featureDetector->detect(image0, keypoints0); + removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); Mat descriptors0; @@ -603,8 +624,8 @@ TEST(Features2d_RotationInvariance_Detector_SURF, regression) TEST(Features2d_RotationInvariance_Detector_SIFT, regression) { DetectorRotationInvarianceTest test(Algorithm::create("Feature2D.SIFT"), - 0.75f, - 0.76f); + 0.45f, + 0.70f); test.safe_run(); } @@ -665,7 +686,7 @@ TEST(Features2d_ScaleInvariance_Descriptor_SIFT, regression) DescriptorScaleInvarianceTest test(Algorithm::create("Feature2D.SIFT"), Algorithm::create("Feature2D.SIFT"), NORM_L1, - 0.87f); + 0.78f); test.safe_run(); } diff --git a/modules/ts/src/ts.cpp b/modules/ts/src/ts.cpp index 4ae9e7ea4..7ce37752d 100644 --- a/modules/ts/src/ts.cpp +++ b/modules/ts/src/ts.cpp @@ -47,8 +47,12 @@ #include #if defined WIN32 || defined _WIN32 || defined WIN64 || defined _WIN64 #include -#define NOMINMAX + #include +#undef small +#undef min +#undef max +#undef abs #ifdef _MSC_VER #include diff --git a/samples/cpp/descriptor_extractor_matcher.cpp b/samples/cpp/descriptor_extractor_matcher.cpp index 9902e2f70..7aa529910 100644 --- a/samples/cpp/descriptor_extractor_matcher.cpp +++ b/samples/cpp/descriptor_extractor_matcher.cpp @@ -221,6 +221,8 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective, drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), matchesMask, DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); #endif + + cout << "Number of inliers: " << countNonZero(matchesMask) << endl; } else drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg );