Merge branch 'master' of https://github.com/Itseez/opencv
This commit is contained in:
commit
7e2bb63378
@ -162,6 +162,7 @@ OCV_OPTION(WITH_OPENCLAMDBLAS "Include AMD OpenCL BLAS library support" ON
|
||||
OCV_OPTION(WITH_DIRECTX "Include DirectX support" ON IF WIN32 )
|
||||
OCV_OPTION(WITH_INTELPERC "Include Intel Perceptual Computing support" OFF IF WIN32 )
|
||||
OCV_OPTION(WITH_IPP_A "Include Intel IPP_A support" OFF IF (MSVC OR X86 OR X86_64) )
|
||||
OCV_OPTION(WITH_GDAL "Include GDAL Support" OFF IF (NOT ANDROID AND NOT IOS) )
|
||||
|
||||
# OpenCV build components
|
||||
# ===================================================
|
||||
@ -358,7 +359,7 @@ set(OPENCV_EXTRA_MODULES_PATH "" CACHE PATH "Where to look for additional OpenCV
|
||||
find_host_package(Git QUIET)
|
||||
|
||||
if(GIT_FOUND)
|
||||
execute_process(COMMAND "${GIT_EXECUTABLE}" describe --tags --always --dirty --match "2.[0-9].[0-9]*"
|
||||
execute_process(COMMAND "${GIT_EXECUTABLE}" describe --tags --always --dirty --match "[0-9].[0-9].[0-9]*"
|
||||
WORKING_DIRECTORY "${OpenCV_SOURCE_DIR}"
|
||||
OUTPUT_VARIABLE OPENCV_VCSVERSION
|
||||
RESULT_VARIABLE GIT_RESULT
|
||||
@ -813,6 +814,12 @@ else()
|
||||
status(" OpenEXR:" "NO")
|
||||
endif()
|
||||
|
||||
if( WITH_GDAL )
|
||||
status(" GDAL:" GDAL_FOUND THEN "${GDAL_LIBRARY}")
|
||||
else()
|
||||
status(" GDAL:" "NO")
|
||||
endif()
|
||||
|
||||
# ========================== VIDEO IO ==========================
|
||||
status("")
|
||||
status(" Video I/O:")
|
||||
|
@ -187,11 +187,11 @@ void CvHOGEvaluator::integralHistogram(const Mat &img, vector<Mat> &histogram, M
|
||||
|
||||
for( y = 0; y < gradSize.height; y++ )
|
||||
{
|
||||
const uchar* currPtr = img.data + img.step*ymap[y];
|
||||
const uchar* prevPtr = img.data + img.step*ymap[y-1];
|
||||
const uchar* nextPtr = img.data + img.step*ymap[y+1];
|
||||
float* gradPtr = (float*)grad.ptr(y);
|
||||
uchar* qanglePtr = (uchar*)qangle.ptr(y);
|
||||
const uchar* currPtr = img.ptr(ymap[y]);
|
||||
const uchar* prevPtr = img.ptr(ymap[y-1]);
|
||||
const uchar* nextPtr = img.ptr(ymap[y+1]);
|
||||
float* gradPtr = grad.ptr<float>(y);
|
||||
uchar* qanglePtr = qangle.ptr(y);
|
||||
|
||||
for( x = 0; x < width; x++ )
|
||||
{
|
||||
@ -226,9 +226,9 @@ void CvHOGEvaluator::integralHistogram(const Mat &img, vector<Mat> &histogram, M
|
||||
int magStep = (int)( grad.step / sizeof(float) );
|
||||
for( binIdx = 0; binIdx < nbins; binIdx++ )
|
||||
{
|
||||
histBuf = (float*)histogram[binIdx].data;
|
||||
magBuf = (const float*)grad.data;
|
||||
binsBuf = (const uchar*)qangle.data;
|
||||
histBuf = histogram[binIdx].ptr<float>();
|
||||
magBuf = grad.ptr<float>();
|
||||
binsBuf = qangle.ptr();
|
||||
|
||||
memset( histBuf, 0, histSize.width * sizeof(histBuf[0]) );
|
||||
histBuf += histStep + 1;
|
||||
|
@ -13,9 +13,9 @@ float calcNormFactor( const Mat& sum, const Mat& sqSum )
|
||||
size_t p0, p1, p2, p3;
|
||||
CV_SUM_OFFSETS( p0, p1, p2, p3, normrect, sum.step1() )
|
||||
double area = normrect.width * normrect.height;
|
||||
const int *sp = (const int*)sum.data;
|
||||
const int *sp = sum.ptr<int>();
|
||||
int valSum = sp[p0] - sp[p1] - sp[p2] + sp[p3];
|
||||
const double *sqp = (const double *)sqSum.data;
|
||||
const double *sqp = sqSum.ptr<double>();
|
||||
double valSqSum = sqp[p0] - sqp[p1] - sqp[p2] + sqp[p3];
|
||||
return (float) sqrt( (double) (area * valSqSum - (double)valSum * valSum) );
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ bool CvCascadeImageReader::NegReader::get( Mat& _img )
|
||||
return false;
|
||||
|
||||
Mat mat( winSize.height, winSize.width, CV_8UC1,
|
||||
(void*)(img.data + point.y * img.step + point.x * img.elemSize()), img.step );
|
||||
(void*)(img.ptr(point.y) + point.x * img.elemSize()), img.step );
|
||||
mat.copyTo(_img);
|
||||
|
||||
if( (int)( point.x + (1.0F + stepFactor ) * winSize.width ) < img.cols )
|
||||
|
@ -188,6 +188,8 @@ if(CUDA_FOUND)
|
||||
# we remove -frtti because it's used for C++ compiler
|
||||
# but NVCC uses C compiler by default
|
||||
string(REPLACE "-frtti" "" ${var} "${${var}}")
|
||||
|
||||
string(REPLACE "-fvisibility-inlines-hidden" "" ${var} "${${var}}")
|
||||
endforeach()
|
||||
|
||||
if(BUILD_SHARED_LIBS)
|
||||
|
@ -198,3 +198,15 @@ if(WITH_OPENEXR)
|
||||
|
||||
set(HAVE_OPENEXR YES)
|
||||
endif()
|
||||
|
||||
# --- GDAL (optional) ---
|
||||
if(WITH_GDAL)
|
||||
find_package(GDAL)
|
||||
|
||||
if(NOT GDAL_FOUND)
|
||||
ocv_clear_vars(GDAL_LIBRARY GDAL_INCLUDE_DIR)
|
||||
set(HAVE_GDAL NO)
|
||||
else()
|
||||
set(HAVE_GDAL YES)
|
||||
endif()
|
||||
endif()
|
||||
|
@ -585,25 +585,16 @@ macro(ocv_glob_module_sources)
|
||||
ocv_source_group("Src" DIRBASE "${CMAKE_CURRENT_LIST_DIR}/src" FILES ${lib_srcs} ${lib_int_hdrs})
|
||||
ocv_source_group("Include" DIRBASE "${CMAKE_CURRENT_LIST_DIR}/include" FILES ${lib_hdrs} ${lib_hdrs_detail})
|
||||
|
||||
if (exclude_cuda EQUAL -1)
|
||||
set(lib_cuda_srcs "")
|
||||
set(lib_cuda_hdrs "")
|
||||
if(HAVE_CUDA AND exclude_cuda EQUAL -1)
|
||||
file(GLOB lib_cuda_srcs
|
||||
"${CMAKE_CURRENT_LIST_DIR}/src/cuda/*.cu"
|
||||
)
|
||||
set(cuda_objs "")
|
||||
set(lib_cuda_hdrs "")
|
||||
if(HAVE_CUDA)
|
||||
ocv_include_directories(${CUDA_INCLUDE_DIRS})
|
||||
file(GLOB lib_cuda_hdrs
|
||||
"${CMAKE_CURRENT_LIST_DIR}/src/cuda/*.hpp"
|
||||
)
|
||||
|
||||
ocv_cuda_compile(cuda_objs ${lib_cuda_srcs} ${lib_cuda_hdrs})
|
||||
source_group("Src\\Cuda" FILES ${lib_cuda_srcs} ${lib_cuda_hdrs})
|
||||
endif()
|
||||
else()
|
||||
set(cuda_objs "")
|
||||
set(lib_cuda_srcs "")
|
||||
set(lib_cuda_hdrs "")
|
||||
file(GLOB lib_cuda_hdrs
|
||||
"${CMAKE_CURRENT_LIST_DIR}/src/cuda/*.hpp"
|
||||
)
|
||||
source_group("Src\\Cuda" FILES ${lib_cuda_srcs} ${lib_cuda_hdrs})
|
||||
endif()
|
||||
|
||||
file(GLOB cl_kernels
|
||||
@ -622,7 +613,7 @@ macro(ocv_glob_module_sources)
|
||||
endif()
|
||||
|
||||
ocv_set_module_sources(${_argn} HEADERS ${lib_hdrs} ${lib_hdrs_detail}
|
||||
SOURCES ${lib_srcs} ${lib_int_hdrs} ${cuda_objs} ${lib_cuda_srcs} ${lib_cuda_hdrs})
|
||||
SOURCES ${lib_srcs} ${lib_int_hdrs} ${lib_cuda_srcs} ${lib_cuda_hdrs})
|
||||
endmacro()
|
||||
|
||||
# creates OpenCV module in current folder
|
||||
|
@ -94,7 +94,7 @@ function(ocv_target_include_directories target)
|
||||
list(APPEND __params "${dir}")
|
||||
endif()
|
||||
endforeach()
|
||||
if(CMAKE_VERSION VERSION_LESS 2.8.11)
|
||||
if(HAVE_CUDA OR CMAKE_VERSION VERSION_LESS 2.8.11)
|
||||
include_directories(${__params})
|
||||
else()
|
||||
if(TARGET ${target})
|
||||
@ -748,6 +748,22 @@ function(ocv_add_executable target)
|
||||
endfunction()
|
||||
|
||||
function(ocv_add_library target)
|
||||
add_library(${target} ${ARGN})
|
||||
set(cuda_objs "")
|
||||
if(HAVE_CUDA)
|
||||
set(cuda_srcs "")
|
||||
|
||||
foreach(var ${ARGN})
|
||||
if(var MATCHES ".cu")
|
||||
list(APPEND cuda_srcs ${var})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
if(cuda_srcs)
|
||||
ocv_include_directories(${CUDA_INCLUDE_DIRS})
|
||||
ocv_cuda_compile(cuda_objs ${lib_cuda_srcs} ${lib_cuda_hdrs})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
add_library(${target} ${ARGN} ${cuda_objs})
|
||||
_ocv_append_target_includes(${target})
|
||||
endfunction()
|
||||
endfunction()
|
||||
|
@ -76,6 +76,9 @@
|
||||
/* ffmpeg in Gentoo */
|
||||
#cmakedefine HAVE_GENTOO_FFMPEG
|
||||
|
||||
/* Geospatial Data Abstraction Library */
|
||||
#cmakedefine HAVE_GDAL
|
||||
|
||||
/* GStreamer multimedia framework */
|
||||
#cmakedefine HAVE_GSTREAMER
|
||||
|
||||
|
@ -0,0 +1,112 @@
|
||||
.. _Bindings_Basics:
|
||||
|
||||
How OpenCV-Python Bindings Works?
|
||||
************************************
|
||||
|
||||
Goal
|
||||
=====
|
||||
|
||||
Learn:
|
||||
|
||||
* How OpenCV-Python bindings are generated?
|
||||
* How to extend new OpenCV modules to Python?
|
||||
|
||||
How OpenCV-Python bindings are generated?
|
||||
=========================================
|
||||
|
||||
In OpenCV, all algorithms are implemented in C++. But these algorithms can be used from different languages like Python, Java etc. This is made possible by the bindings generators. These generators create a bridge between C++ and Python which enables users to call C++ functions from Python. To get a complete picture of what is happening in background, a good knowledge of Python/C API is required. A simple example on extending C++ functions to Python can be found in official Python documentation[1]. So extending all functions in OpenCV to Python by writing their wrapper functions manually is a time-consuming task. So OpenCV does it in a more intelligent way. OpenCV generates these wrapper functions automatically from the C++ headers using some Python scripts which are located in ``modules/python/src2``. We will look into what they do.
|
||||
|
||||
First, ``modules/python/CMakeFiles.txt`` is a CMake script which checks the modules to be extended to Python. It will automatically check all the modules to be extended and grab their header files. These header files contain list of all classes, functions, constants etc. for that particular modules.
|
||||
|
||||
Second, these header files are passed to a Python script, ``modules/python/src2/gen2.py``. This is the Python bindings generator script. It calls another Python script ``modules/python/src2/hdr_parser.py``. This is the header parser script. This header parser splits the complete header file into small Python lists. So these lists contain all details about a particular function, class etc. For example, a function will be parsed to get a list containing function name, return type, input arguments, argument types etc. Final list contains details of all the functions, structs, classes etc. in that header file.
|
||||
|
||||
But header parser doesn't parse all the functions/classes in the header file. The developer has to specify which functions should be exported to Python. For that, there are certain macros added to the beginning of these declarations which enables the header parser to identify functions to be parsed. These macros are added by the developer who programs the particular function. In short, the developer decides which functions should be extended to Python and which are not. Details of those macros will be given in next session.
|
||||
|
||||
So header parser returns a final big list of parsed functions. Our generator script (gen2.py) will create wrapper functions for all the functions/classes/enums/structs parsed by header parser (You can find these header files during compilation in the ``build/modules/python/`` folder as ``pyopencv_generated_*.h`` files). But there may be some basic OpenCV datatypes like Mat, Vec4i, Size. They need to be extended manually. For example, a Mat type should be extended to Numpy array, Size should be extended to a tuple of two integers etc. Similarly, there may be some complex structs/classes/functions etc. which need to be extended manually. All such manual wrapper functions are placed in ``modules/python/src2/pycv2.hpp``.
|
||||
|
||||
So now only thing left is the compilation of these wrapper files which gives us **cv2** module. So when you call a function, say ``res = equalizeHist(img1,img2)`` in Python, you pass two numpy arrays and you expect another numpy array as the output. So these numpy arrays are converted to ``cv::Mat`` and then calls the ``equalizeHist()`` function in C++. Final result, ``res`` will be converted back into a Numpy array. So in short, almost all operations are done in C++ which gives us almost same speed as that of C++.
|
||||
|
||||
So this is the basic version of how OpenCV-Python bindings are generated.
|
||||
|
||||
|
||||
How to extend new modules to Python?
|
||||
=====================================
|
||||
|
||||
Header parser parse the header files based on some wrapper macros added to function declaration. Enumeration constants don't need any wrapper macros. They are automatically wrapped. But remaining functions, classes etc. need wrapper macros.
|
||||
|
||||
Functions are extended using ``CV_EXPORTS_W`` macro. An example is shown below.
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst );
|
||||
|
||||
Header parser can understand the input and output arguments from keywords like ``InputArray, OutputArray`` etc. But sometimes, we may need to hardcode inputs and outputs. For that, macros like ``CV_OUT, CV_IN_OUT`` etc. are used.
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
CV_EXPORTS_W void minEnclosingCircle( InputArray points,
|
||||
CV_OUT Point2f& center, CV_OUT float& radius );
|
||||
|
||||
For large classes also, ``CV_EXPORTS_W`` is used. To extend class methods, ``CV_WRAP`` is used. Similarly, ``CV_PROP`` is used for class fields.
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
class CV_EXPORTS_W CLAHE : public Algorithm
|
||||
{
|
||||
public:
|
||||
CV_WRAP virtual void apply(InputArray src, OutputArray dst) = 0;
|
||||
|
||||
CV_WRAP virtual void setClipLimit(double clipLimit) = 0;
|
||||
CV_WRAP virtual double getClipLimit() const = 0;
|
||||
}
|
||||
|
||||
Overloaded functions can be extended using ``CV_EXPORTS_AS``. But we need to pass a new name so that each function will be called by that name in Python. Take the case of integral function below. Three functions are available, so each one is named with a suffix in Python. Similarly ``CV_WRAP_AS`` can be used to wrap overloaded methods.
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
//! computes the integral image
|
||||
CV_EXPORTS_W void integral( InputArray src, OutputArray sum, int sdepth = -1 );
|
||||
|
||||
//! computes the integral image and integral for the squared image
|
||||
CV_EXPORTS_AS(integral2) void integral( InputArray src, OutputArray sum,
|
||||
OutputArray sqsum, int sdepth = -1, int sqdepth = -1 );
|
||||
|
||||
//! computes the integral image, integral for the squared image and the tilted integral image
|
||||
CV_EXPORTS_AS(integral3) void integral( InputArray src, OutputArray sum,
|
||||
OutputArray sqsum, OutputArray tilted,
|
||||
int sdepth = -1, int sqdepth = -1 );
|
||||
|
||||
Small classes/structs are extended using ``CV_EXPORTS_W_SIMPLE``. These structs are passed by value to C++ functions. Examples are KeyPoint, Match etc. Their methods are extended by ``CV_WRAP`` and fields are extended by ``CV_PROP_RW``.
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
class CV_EXPORTS_W_SIMPLE DMatch
|
||||
{
|
||||
public:
|
||||
CV_WRAP DMatch();
|
||||
CV_WRAP DMatch(int _queryIdx, int _trainIdx, float _distance);
|
||||
CV_WRAP DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance);
|
||||
|
||||
CV_PROP_RW int queryIdx; // query descriptor index
|
||||
CV_PROP_RW int trainIdx; // train descriptor index
|
||||
CV_PROP_RW int imgIdx; // train image index
|
||||
|
||||
CV_PROP_RW float distance;
|
||||
};
|
||||
|
||||
Some other small classes/structs can be exported using ``CV_EXPORTS_W_MAP`` where it is exported to a Python native dictionary. Moments() is an example of it.
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
class CV_EXPORTS_W_MAP Moments
|
||||
{
|
||||
public:
|
||||
//! spatial moments
|
||||
CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
|
||||
//! central moments
|
||||
CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
|
||||
//! central normalized moments
|
||||
CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
|
||||
};
|
||||
|
||||
So these are the major extension macros available in OpenCV. Typically, a developer has to put proper macros in their appropriate positions. Rest is done by generator scripts. Sometimes, there may be an exceptional cases where generator scripts cannot create the wrappers. Such functions need to be handled manually. But most of the time, a code written according to OpenCV coding guidelines will be automatically wrapped by generator scripts.
|
Binary file not shown.
After Width: | Height: | Size: 3.6 KiB |
@ -0,0 +1,36 @@
|
||||
.. _PY_Table-Of-Content-Bindings:
|
||||
|
||||
|
||||
OpenCV-Python Bindings
|
||||
--------------------------------
|
||||
|
||||
Here, you will learn how OpenCV-Python bindings are generated.
|
||||
|
||||
|
||||
* :ref:`Bindings_Basics`
|
||||
|
||||
.. tabularcolumns:: m{100pt} m{300pt}
|
||||
.. cssclass:: toctableopencv
|
||||
|
||||
=========== ======================================================
|
||||
|bind1| Learn how OpenCV-Python bindings are generated.
|
||||
|
||||
=========== ======================================================
|
||||
|
||||
.. |bind1| image:: images/nlm_icon.jpg
|
||||
:height: 90pt
|
||||
:width: 90pt
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.. raw:: latex
|
||||
|
||||
\pagebreak
|
||||
|
||||
.. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it.
|
||||
.. toctree::
|
||||
:hidden:
|
||||
|
||||
../py_bindings_basics/py_bindings_basics
|
@ -159,6 +159,20 @@ OpenCV-Python Tutorials
|
||||
:alt: OD Icon
|
||||
|
||||
|
||||
* :ref:`PY_Table-Of-Content-Bindings`
|
||||
|
||||
.. tabularcolumns:: m{100pt} m{300pt}
|
||||
.. cssclass:: toctableopencv
|
||||
|
||||
=========== =====================================================================
|
||||
|PyBin| In this section, we will see how OpenCV-Python bindings are generated
|
||||
|
||||
=========== =====================================================================
|
||||
|
||||
.. |PyBin| image:: images/obj_icon.jpg
|
||||
:height: 80pt
|
||||
:width: 80pt
|
||||
:alt: OD Icon
|
||||
|
||||
.. raw:: latex
|
||||
|
||||
@ -178,3 +192,4 @@ OpenCV-Python Tutorials
|
||||
py_ml/py_table_of_contents_ml/py_table_of_contents_ml
|
||||
py_photo/py_table_of_contents_photo/py_table_of_contents_photo
|
||||
py_objdetect/py_table_of_contents_objdetect/py_table_of_contents_objdetect
|
||||
py_bindings/py_table_of_contents_bindings/py_table_of_contents_bindings
|
||||
|
BIN
doc/tutorials/highgui/raster-gdal/images/flood-zone.jpg
Normal file
BIN
doc/tutorials/highgui/raster-gdal/images/flood-zone.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 111 KiB |
BIN
doc/tutorials/highgui/raster-gdal/images/heat-map.jpg
Normal file
BIN
doc/tutorials/highgui/raster-gdal/images/heat-map.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 53 KiB |
BIN
doc/tutorials/highgui/raster-gdal/images/output.jpg
Normal file
BIN
doc/tutorials/highgui/raster-gdal/images/output.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 120 KiB |
113
doc/tutorials/highgui/raster-gdal/raster_io_gdal.rst
Normal file
113
doc/tutorials/highgui/raster-gdal/raster_io_gdal.rst
Normal file
@ -0,0 +1,113 @@
|
||||
.. _Raster_IO_GDAL:
|
||||
|
||||
|
||||
Reading Geospatial Raster files with GDAL
|
||||
*****************************************
|
||||
|
||||
Geospatial raster data is a heavily used product in Geographic Information
|
||||
Systems and Photogrammetry. Raster data typically can represent imagery
|
||||
and Digital Elevation Models (DEM). The standard library for loading
|
||||
GIS imagery is the Geographic Data Abstraction Library (GDAL). In this example, we
|
||||
will show techniques for loading GIS raster formats using native OpenCV functions.
|
||||
In addition, we will show some an example of how OpenCV can use this data for
|
||||
novel and interesting purposes.
|
||||
|
||||
Goals
|
||||
=====
|
||||
|
||||
The primary objectives for this tutorial:
|
||||
|
||||
.. container:: enumeratevisibleitemswithsquare
|
||||
|
||||
+ How to use OpenCV imread to load satellite imagery.
|
||||
+ How to use OpenCV imread to load SRTM Digital Elevation Models
|
||||
+ Given the corner coordinates of both the image and DEM, correllate the elevation data to the image to find elevations for each pixel.
|
||||
+ Show a basic, easy-to-implement example of a terrain heat map.
|
||||
+ Show a basic use of DEM data coupled with ortho-rectified imagery.
|
||||
|
||||
To implement these goals, the following code takes a Digital Elevation Model as well as a GeoTiff image of San Francisco as input.
|
||||
The image and DEM data is processed and generates a terrain heat map of the image as well as labels areas of the city which would
|
||||
be affected should the water level of the bay rise 10, 50, and 100 meters.
|
||||
|
||||
Code
|
||||
====
|
||||
|
||||
.. literalinclude:: ../../../../samples/cpp/tutorial_code/HighGUI/GDAL_IO/gdal-image.cpp
|
||||
:language: cpp
|
||||
:linenos:
|
||||
:tab-width: 4
|
||||
|
||||
|
||||
How to Read Raster Data using GDAL
|
||||
======================================
|
||||
|
||||
This demonstration uses the default OpenCV :ocv:func:`imread` function. The primary difference is that in order to force GDAL to load the
|
||||
image, you must use the appropriate flag.
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
cv::Mat image = cv::imread( argv[1], cv::IMREAD_LOAD_GDAL );
|
||||
|
||||
When loading digital elevation models, the actual numeric value of each pixel is essential
|
||||
and cannot be scaled or truncated. For example, with image data a pixel represented as a double with a value of 1 has
|
||||
an equal appearance to a pixel which is represented as an unsigned character with a value of 255.
|
||||
With terrain data, the pixel value represents the elevation in meters. In order to ensure that OpenCV preserves the native value,
|
||||
use the GDAL flag in imread with the ANYDEPTH flag.
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
cv::Mat dem = cv::imread( argv[2], cv::IMREAD_LOAD_GDAL | cv::IMREAD_ANYDEPTH );
|
||||
|
||||
|
||||
If you know beforehand the type of DEM model you are loading, then it may be a safe bet to test the ``Mat::type()`` or ``Mat::depth()``
|
||||
using an assert or other mechanism. NASA or DOD specification documents can provide the input types for various
|
||||
elevation models. The major types, SRTM and DTED, are both signed shorts.
|
||||
|
||||
Notes
|
||||
=====
|
||||
|
||||
Lat/Lon (Geodetic) Coordinates should normally be avoided
|
||||
---------------------------------------------------------
|
||||
|
||||
The Geodetic Coordinate System is a spherical coordinate system, meaning that using them with Cartesian mathematics is technically incorrect. This
|
||||
demo uses them to increase the readability and is accurate enough to make the point. A better coordinate system would be Universal Transverse Mercator.
|
||||
|
||||
Finding the corner coordinates
|
||||
------------------------------
|
||||
|
||||
One easy method to find the corner coordinates of an image is to use the command-line tool ``gdalinfo``. For imagery which is ortho-rectified and contains
|
||||
the projection information, you can use the `USGS EarthExplorer <http://http://earthexplorer.usgs.gov>`_.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$> gdalinfo N37W123.hgt
|
||||
|
||||
Driver: SRTMHGT/SRTMHGT File Format
|
||||
Files: N37W123.hgt
|
||||
Size is 3601, 3601
|
||||
Coordinate System is:
|
||||
GEOGCS["WGS 84",
|
||||
DATUM["WGS_1984",
|
||||
|
||||
... more output ...
|
||||
|
||||
Corner Coordinates:
|
||||
Upper Left (-123.0001389, 38.0001389) (123d 0' 0.50"W, 38d 0' 0.50"N)
|
||||
Lower Left (-123.0001389, 36.9998611) (123d 0' 0.50"W, 36d59'59.50"N)
|
||||
Upper Right (-121.9998611, 38.0001389) (121d59'59.50"W, 38d 0' 0.50"N)
|
||||
Lower Right (-121.9998611, 36.9998611) (121d59'59.50"W, 36d59'59.50"N)
|
||||
Center (-122.5000000, 37.5000000) (122d30' 0.00"W, 37d30' 0.00"N)
|
||||
|
||||
... more output ...
|
||||
|
||||
|
||||
Results
|
||||
=======
|
||||
|
||||
Below is the output of the program. Use the first image as the input. For the DEM model, download the SRTM file located at the USGS here. `http://dds.cr.usgs.gov/srtm/version2_1/SRTM1/Region_04/N37W123.hgt.zip <http://dds.cr.usgs.gov/srtm/version2_1/SRTM1/Region_04/N37W123.hgt.zip>`_
|
||||
|
||||
.. image:: images/output.jpg
|
||||
|
||||
.. image:: images/heat-map.jpg
|
||||
|
||||
.. image:: images/flood-zone.jpg
|
Binary file not shown.
After Width: | Height: | Size: 73 KiB |
@ -64,6 +64,26 @@ This section contains valuable tutorials about how to read/save your image/video
|
||||
:height: 90pt
|
||||
:width: 90pt
|
||||
|
||||
+
|
||||
.. tabularcolumns:: m{100pt} m{300pt}
|
||||
.. cssclass:: toctableopencv
|
||||
|
||||
=============== ======================================================
|
||||
|hGDAL_IO| *Title:* :ref:`Raster_IO_GDAL`
|
||||
|
||||
*Compatibility:* > OpenCV 2.0
|
||||
|
||||
*Author:* |Author_MarvinS|
|
||||
|
||||
Read common GIS Raster and DEM files to display and manipulate geographic data.
|
||||
|
||||
=============== ======================================================
|
||||
|
||||
.. |hGDAL_IO| image:: images/gdal-io.jpg
|
||||
:height: 90pt
|
||||
:width: 90pt
|
||||
|
||||
|
||||
|
||||
.. raw:: latex
|
||||
|
||||
@ -75,3 +95,4 @@ This section contains valuable tutorials about how to read/save your image/video
|
||||
../trackbar/trackbar
|
||||
../video-input-psnr-ssim/video-input-psnr-ssim
|
||||
../video-write/video-write
|
||||
../raster-gdal/raster_io_gdal
|
||||
|
@ -398,7 +398,7 @@ Now modify src/main/java/HelloOpenCV.java so it contains the following Java code
|
||||
|
||||
// Draw a bounding box around each face.
|
||||
for (Rect rect : faceDetections.toArray()) {
|
||||
Core.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(0, 255, 0));
|
||||
Imgproc.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(0, 255, 0));
|
||||
}
|
||||
|
||||
// Save the visualized detection.
|
||||
|
@ -1939,7 +1939,7 @@ void cv::drawChessboardCorners( InputOutputArray _image, Size patternSize,
|
||||
Mat image = _image.getMat(); CvMat c_image = _image.getMat();
|
||||
int nelems = corners.checkVector(2, CV_32F, true);
|
||||
CV_Assert(nelems >= 0);
|
||||
cvDrawChessboardCorners( &c_image, patternSize, (CvPoint2D32f*)corners.data,
|
||||
cvDrawChessboardCorners( &c_image, patternSize, corners.ptr<CvPoint2D32f>(),
|
||||
nelems, patternWasFound );
|
||||
}
|
||||
|
||||
|
@ -2998,15 +2998,15 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints,
|
||||
int ni1 = imgpt1.checkVector(2, CV_32F);
|
||||
CV_Assert( ni > 0 && ni == ni1 );
|
||||
npoints.at<int>(i) = ni;
|
||||
memcpy( objPtData + j, objpt.data, ni*sizeof(objPtData[0]) );
|
||||
memcpy( imgPtData1 + j, imgpt1.data, ni*sizeof(imgPtData1[0]) );
|
||||
memcpy( objPtData + j, objpt.ptr(), ni*sizeof(objPtData[0]) );
|
||||
memcpy( imgPtData1 + j, imgpt1.ptr(), ni*sizeof(imgPtData1[0]) );
|
||||
|
||||
if( imgPtData2 )
|
||||
{
|
||||
Mat imgpt2 = imagePoints2.getMat(i);
|
||||
int ni2 = imgpt2.checkVector(2, CV_32F);
|
||||
CV_Assert( ni == ni2 );
|
||||
memcpy( imgPtData2 + j, imgpt2.data, ni*sizeof(imgPtData2[0]) );
|
||||
memcpy( imgPtData2 + j, imgpt2.ptr(), ni*sizeof(imgPtData2[0]) );
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3245,13 +3245,13 @@ double cv::calibrateCamera( InputArrayOfArrays _objectPoints,
|
||||
{
|
||||
_rvecs.create(3, 1, CV_64F, i, true);
|
||||
Mat rv = _rvecs.getMat(i);
|
||||
memcpy(rv.data, rvecM.ptr<double>(i), 3*sizeof(double));
|
||||
memcpy(rv.ptr(), rvecM.ptr<double>(i), 3*sizeof(double));
|
||||
}
|
||||
if( tvecs_needed )
|
||||
{
|
||||
_tvecs.create(3, 1, CV_64F, i, true);
|
||||
Mat tv = _tvecs.getMat(i);
|
||||
memcpy(tv.data, tvecM.ptr<double>(i), 3*sizeof(double));
|
||||
memcpy(tv.ptr(), tvecM.ptr<double>(i), 3*sizeof(double));
|
||||
}
|
||||
}
|
||||
cameraMatrix.copyTo(_cameraMatrix);
|
||||
@ -3472,7 +3472,7 @@ void cv::decomposeProjectionMatrix( InputArray _projMatrix, OutputArray _cameraM
|
||||
if( _eulerAngles.needed() )
|
||||
{
|
||||
_eulerAngles.create(3, 1, CV_64F, -1, true);
|
||||
p_eulerAngles = (CvPoint3D64f*)_eulerAngles.getMat().data;
|
||||
p_eulerAngles = _eulerAngles.getMat().ptr<CvPoint3D64f>();
|
||||
}
|
||||
|
||||
cvDecomposeProjectionMatrix(&c_projMatrix, &c_cameraMatrix, &c_rotMatrix,
|
||||
|
@ -61,7 +61,7 @@ public:
|
||||
Mat EE = Mat(Vt.t()).colRange(5, 9) * 1.0;
|
||||
Mat A(10, 20, CV_64F);
|
||||
EE = EE.t();
|
||||
getCoeffMat((double*)EE.data, (double*)A.data);
|
||||
getCoeffMat(EE.ptr<double>(), A.ptr<double>());
|
||||
EE = EE.t();
|
||||
|
||||
A = A.colRange(0, 10).inv() * A.colRange(10, 20);
|
||||
@ -137,7 +137,7 @@ public:
|
||||
cv::Mat Evec = EE.col(0) * xs.back() + EE.col(1) * ys.back() + EE.col(2) * zs.back() + EE.col(3);
|
||||
Evec /= norm(Evec);
|
||||
|
||||
memcpy(e + count * 9, Evec.data, 9 * sizeof(double));
|
||||
memcpy(e + count * 9, Evec.ptr(), 9 * sizeof(double));
|
||||
count++;
|
||||
}
|
||||
|
||||
|
@ -767,8 +767,8 @@ void cv::computeCorrespondEpilines( InputArray _points, int whichImage,
|
||||
|
||||
if( depth == CV_32S || depth == CV_32F )
|
||||
{
|
||||
const Point* ptsi = (const Point*)points.data;
|
||||
const Point2f* ptsf = (const Point2f*)points.data;
|
||||
const Point* ptsi = points.ptr<Point>();
|
||||
const Point2f* ptsf = points.ptr<Point2f>();
|
||||
Point3f* dstf = lines.ptr<Point3f>();
|
||||
for( int i = 0; i < npoints; i++ )
|
||||
{
|
||||
@ -784,7 +784,7 @@ void cv::computeCorrespondEpilines( InputArray _points, int whichImage,
|
||||
}
|
||||
else
|
||||
{
|
||||
const Point2d* ptsd = (const Point2d*)points.data;
|
||||
const Point2d* ptsd = points.ptr<Point2d>();
|
||||
Point3d* dstd = lines.ptr<Point3d>();
|
||||
for( int i = 0; i < npoints; i++ )
|
||||
{
|
||||
@ -829,8 +829,8 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
|
||||
{
|
||||
if( cn == 3 )
|
||||
{
|
||||
const Point3i* sptr = (const Point3i*)src.data;
|
||||
Point2f* dptr = (Point2f*)dst.data;
|
||||
const Point3i* sptr = src.ptr<Point3i>();
|
||||
Point2f* dptr = dst.ptr<Point2f>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
{
|
||||
float scale = sptr[i].z != 0 ? 1.f/sptr[i].z : 1.f;
|
||||
@ -839,8 +839,8 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
|
||||
}
|
||||
else
|
||||
{
|
||||
const Vec4i* sptr = (const Vec4i*)src.data;
|
||||
Point3f* dptr = (Point3f*)dst.data;
|
||||
const Vec4i* sptr = src.ptr<Vec4i>();
|
||||
Point3f* dptr = dst.ptr<Point3f>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
{
|
||||
float scale = sptr[i][3] != 0 ? 1.f/sptr[i][3] : 1.f;
|
||||
@ -852,8 +852,8 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
|
||||
{
|
||||
if( cn == 3 )
|
||||
{
|
||||
const Point3f* sptr = (const Point3f*)src.data;
|
||||
Point2f* dptr = (Point2f*)dst.data;
|
||||
const Point3f* sptr = src.ptr<Point3f>();
|
||||
Point2f* dptr = dst.ptr<Point2f>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
{
|
||||
float scale = sptr[i].z != 0.f ? 1.f/sptr[i].z : 1.f;
|
||||
@ -862,8 +862,8 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
|
||||
}
|
||||
else
|
||||
{
|
||||
const Vec4f* sptr = (const Vec4f*)src.data;
|
||||
Point3f* dptr = (Point3f*)dst.data;
|
||||
const Vec4f* sptr = src.ptr<Vec4f>();
|
||||
Point3f* dptr = dst.ptr<Point3f>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
{
|
||||
float scale = sptr[i][3] != 0.f ? 1.f/sptr[i][3] : 1.f;
|
||||
@ -875,8 +875,8 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
|
||||
{
|
||||
if( cn == 3 )
|
||||
{
|
||||
const Point3d* sptr = (const Point3d*)src.data;
|
||||
Point2d* dptr = (Point2d*)dst.data;
|
||||
const Point3d* sptr = src.ptr<Point3d>();
|
||||
Point2d* dptr = dst.ptr<Point2d>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
{
|
||||
double scale = sptr[i].z != 0. ? 1./sptr[i].z : 1.;
|
||||
@ -885,8 +885,8 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
|
||||
}
|
||||
else
|
||||
{
|
||||
const Vec4d* sptr = (const Vec4d*)src.data;
|
||||
Point3d* dptr = (Point3d*)dst.data;
|
||||
const Vec4d* sptr = src.ptr<Vec4d>();
|
||||
Point3d* dptr = dst.ptr<Point3d>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
{
|
||||
double scale = sptr[i][3] != 0.f ? 1./sptr[i][3] : 1.;
|
||||
@ -928,15 +928,15 @@ void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
|
||||
{
|
||||
if( cn == 2 )
|
||||
{
|
||||
const Point2i* sptr = (const Point2i*)src.data;
|
||||
Point3i* dptr = (Point3i*)dst.data;
|
||||
const Point2i* sptr = src.ptr<Point2i>();
|
||||
Point3i* dptr = dst.ptr<Point3i>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
dptr[i] = Point3i(sptr[i].x, sptr[i].y, 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
const Point3i* sptr = (const Point3i*)src.data;
|
||||
Vec4i* dptr = (Vec4i*)dst.data;
|
||||
const Point3i* sptr = src.ptr<Point3i>();
|
||||
Vec4i* dptr = dst.ptr<Vec4i>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
dptr[i] = Vec4i(sptr[i].x, sptr[i].y, sptr[i].z, 1);
|
||||
}
|
||||
@ -945,15 +945,15 @@ void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
|
||||
{
|
||||
if( cn == 2 )
|
||||
{
|
||||
const Point2f* sptr = (const Point2f*)src.data;
|
||||
Point3f* dptr = (Point3f*)dst.data;
|
||||
const Point2f* sptr = src.ptr<Point2f>();
|
||||
Point3f* dptr = dst.ptr<Point3f>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
dptr[i] = Point3f(sptr[i].x, sptr[i].y, 1.f);
|
||||
}
|
||||
else
|
||||
{
|
||||
const Point3f* sptr = (const Point3f*)src.data;
|
||||
Vec4f* dptr = (Vec4f*)dst.data;
|
||||
const Point3f* sptr = src.ptr<Point3f>();
|
||||
Vec4f* dptr = dst.ptr<Vec4f>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
dptr[i] = Vec4f(sptr[i].x, sptr[i].y, sptr[i].z, 1.f);
|
||||
}
|
||||
@ -962,15 +962,15 @@ void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
|
||||
{
|
||||
if( cn == 2 )
|
||||
{
|
||||
const Point2d* sptr = (const Point2d*)src.data;
|
||||
Point3d* dptr = (Point3d*)dst.data;
|
||||
const Point2d* sptr = src.ptr<Point2d>();
|
||||
Point3d* dptr = dst.ptr<Point3d>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
dptr[i] = Point3d(sptr[i].x, sptr[i].y, 1.);
|
||||
}
|
||||
else
|
||||
{
|
||||
const Point3d* sptr = (const Point3d*)src.data;
|
||||
Vec4d* dptr = (Vec4d*)dst.data;
|
||||
const Point3d* sptr = src.ptr<Point3d>();
|
||||
Vec4d* dptr = dst.ptr<Vec4d>();
|
||||
for( i = 0; i < npoints; i++ )
|
||||
dptr[i] = Vec4d(sptr[i].x, sptr[i].y, sptr[i].z, 1.);
|
||||
}
|
||||
|
@ -113,12 +113,12 @@ public:
|
||||
int d1 = m1.channels() > 1 ? m1.channels() : m1.cols;
|
||||
int d2 = m2.channels() > 1 ? m2.channels() : m2.cols;
|
||||
int count = m1.checkVector(d1), count2 = m2.checkVector(d2);
|
||||
const int *m1ptr = (const int*)m1.data, *m2ptr = (const int*)m2.data;
|
||||
const int *m1ptr = m1.ptr<int>(), *m2ptr = m2.ptr<int>();
|
||||
|
||||
ms1.create(modelPoints, 1, CV_MAKETYPE(m1.depth(), d1));
|
||||
ms2.create(modelPoints, 1, CV_MAKETYPE(m2.depth(), d2));
|
||||
|
||||
int *ms1ptr = (int*)ms1.data, *ms2ptr = (int*)ms2.data;
|
||||
int *ms1ptr = ms1.ptr<int>(), *ms2ptr = ms2.ptr<int>();
|
||||
|
||||
CV_Assert( count >= modelPoints && count == count2 );
|
||||
CV_Assert( (esz1 % sizeof(int)) == 0 && (esz2 % sizeof(int)) == 0 );
|
||||
@ -343,7 +343,7 @@ public:
|
||||
else
|
||||
errf = err;
|
||||
CV_Assert( errf.isContinuous() && errf.type() == CV_32F && (int)errf.total() == count );
|
||||
std::sort((int*)errf.data, (int*)errf.data + count);
|
||||
std::sort(errf.ptr<int>(), errf.ptr<int>() + count);
|
||||
|
||||
double median = count % 2 != 0 ?
|
||||
errf.at<float>(count/2) : (errf.at<float>(count/2-1) + errf.at<float>(count/2))*0.5;
|
||||
|
@ -114,7 +114,7 @@ static void prefilterNorm( const Mat& src, Mat& dst, int winsize, int ftzero, uc
|
||||
int scale_g = winsize*winsize/8, scale_s = (1024 + scale_g)/(scale_g*2);
|
||||
const int OFS = 256*5, TABSZ = OFS*2 + 256;
|
||||
uchar tab[TABSZ];
|
||||
const uchar* sptr = src.data;
|
||||
const uchar* sptr = src.ptr();
|
||||
int srcstep = (int)src.step;
|
||||
Size size = src.size();
|
||||
|
||||
@ -294,10 +294,10 @@ static void findStereoCorrespondenceBM_SSE2( const Mat& left, const Mat& right,
|
||||
ushort *sad, *hsad0, *hsad, *hsad_sub;
|
||||
int *htext;
|
||||
uchar *cbuf0, *cbuf;
|
||||
const uchar* lptr0 = left.data + lofs;
|
||||
const uchar* rptr0 = right.data + rofs;
|
||||
const uchar* lptr0 = left.ptr() + lofs;
|
||||
const uchar* rptr0 = right.ptr() + rofs;
|
||||
const uchar *lptr, *lptr_sub, *rptr;
|
||||
short* dptr = (short*)disp.data;
|
||||
short* dptr = disp.ptr<short>();
|
||||
int sstep = (int)left.step;
|
||||
int dstep = (int)(disp.step/sizeof(dptr[0]));
|
||||
int cstep = (height + dy0 + dy1)*ndisp;
|
||||
@ -357,7 +357,7 @@ static void findStereoCorrespondenceBM_SSE2( const Mat& left, const Mat& right,
|
||||
|
||||
for( x = 0; x < width1; x++, dptr++ )
|
||||
{
|
||||
short* costptr = cost.data ? (short*)cost.data + lofs + x : &costbuf;
|
||||
short* costptr = cost.data ? cost.ptr<short>() + lofs + x : &costbuf;
|
||||
int x0 = x - wsz2 - 1, x1 = x + wsz2;
|
||||
const uchar* cbuf_sub = cbuf0 + ((x0 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
|
||||
cbuf = cbuf0 + ((x1 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
|
||||
@ -542,10 +542,10 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right,
|
||||
|
||||
int *sad, *hsad0, *hsad, *hsad_sub, *htext;
|
||||
uchar *cbuf0, *cbuf;
|
||||
const uchar* lptr0 = left.data + lofs;
|
||||
const uchar* rptr0 = right.data + rofs;
|
||||
const uchar* lptr0 = left.ptr() + lofs;
|
||||
const uchar* rptr0 = right.ptr() + rofs;
|
||||
const uchar *lptr, *lptr_sub, *rptr;
|
||||
short* dptr = (short*)disp.data;
|
||||
short* dptr = disp.ptr<short>();
|
||||
int sstep = (int)left.step;
|
||||
int dstep = (int)(disp.step/sizeof(dptr[0]));
|
||||
int cstep = (height+dy0+dy1)*ndisp;
|
||||
@ -596,7 +596,7 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right,
|
||||
|
||||
for( x = 0; x < width1; x++, dptr++ )
|
||||
{
|
||||
int* costptr = cost.data ? (int*)cost.data + lofs + x : &costbuf;
|
||||
int* costptr = cost.data ? cost.ptr<int>() + lofs + x : &costbuf;
|
||||
int x0 = x - wsz2 - 1, x1 = x + wsz2;
|
||||
const uchar* cbuf_sub = cbuf0 + ((x0 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
|
||||
cbuf = cbuf0 + ((x1 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
|
||||
@ -803,7 +803,7 @@ struct FindStereoCorrespInvoker : public ParallelLoopBody
|
||||
int cols = left->cols, rows = left->rows;
|
||||
int _row0 = std::min(cvRound(range.start * rows / nstripes), rows);
|
||||
int _row1 = std::min(cvRound(range.end * rows / nstripes), rows);
|
||||
uchar *ptr = slidingSumBuf->data + range.start * stripeBufSize;
|
||||
uchar *ptr = slidingSumBuf->ptr() + range.start * stripeBufSize;
|
||||
int FILTERED = (state->minDisparity - 1)*16;
|
||||
|
||||
Rect roi = validDisparityRect & Rect(0, _row0, cols, _row1 - _row0);
|
||||
@ -988,7 +988,7 @@ public:
|
||||
if( slidingSumBuf.cols < bufSize )
|
||||
slidingSumBuf.create( 1, bufSize, CV_8U );
|
||||
|
||||
uchar *_buf = slidingSumBuf.data;
|
||||
uchar *_buf = slidingSumBuf.ptr();
|
||||
|
||||
parallel_for_(Range(0, 2), PrefilterInvoker(left0, right0, left, right, _buf, _buf + bufSize1, ¶ms), 1);
|
||||
|
||||
|
@ -383,12 +383,12 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
|
||||
width*16*img1.channels()*sizeof(PixType) + // temp buffer for computing per-pixel cost
|
||||
width*(sizeof(CostType) + sizeof(DispType)) + 1024; // disp2cost + disp2
|
||||
|
||||
if( !buffer.data || !buffer.isContinuous() ||
|
||||
if( buffer.empty() || !buffer.isContinuous() ||
|
||||
buffer.cols*buffer.rows*buffer.elemSize() < totalBufSize )
|
||||
buffer.create(1, (int)totalBufSize, CV_8U);
|
||||
|
||||
// summary cost over different (nDirs) directions
|
||||
CostType* Cbuf = (CostType*)alignPtr(buffer.data, ALIGN);
|
||||
CostType* Cbuf = (CostType*)alignPtr(buffer.ptr(), ALIGN);
|
||||
CostType* Sbuf = Cbuf + CSBufSize;
|
||||
CostType* hsumBuf = Sbuf + CSBufSize;
|
||||
CostType* pixDiff = hsumBuf + costBufSize*hsumBufNRows;
|
||||
@ -982,10 +982,10 @@ void filterSpecklesImpl(cv::Mat& img, int newVal, int maxSpeckleSize, int maxDif
|
||||
|
||||
int width = img.cols, height = img.rows, npixels = width*height;
|
||||
size_t bufSize = npixels*(int)(sizeof(Point2s) + sizeof(int) + sizeof(uchar));
|
||||
if( !_buf.isContinuous() || !_buf.data || _buf.cols*_buf.rows*_buf.elemSize() < bufSize )
|
||||
if( !_buf.isContinuous() || _buf.empty() || _buf.cols*_buf.rows*_buf.elemSize() < bufSize )
|
||||
_buf.create(1, (int)bufSize, CV_8U);
|
||||
|
||||
uchar* buf = _buf.data;
|
||||
uchar* buf = _buf.ptr();
|
||||
int i, j, dstep = (int)(img.step/sizeof(T));
|
||||
int* labels = (int*)buf;
|
||||
buf += npixels*sizeof(labels[0]);
|
||||
@ -1097,10 +1097,10 @@ void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSi
|
||||
if ((int)status >= 0)
|
||||
{
|
||||
if (type == CV_8UC1)
|
||||
status = ippiMarkSpeckles_8u_C1IR((Ipp8u *)img.data, (int)img.step, roisize,
|
||||
status = ippiMarkSpeckles_8u_C1IR(img.ptr<Ipp8u>(), (int)img.step, roisize,
|
||||
(Ipp8u)newVal, maxSpeckleSize, (Ipp8u)maxDiff, ippiNormL1, buffer);
|
||||
else
|
||||
status = ippiMarkSpeckles_16s_C1IR((Ipp16s *)img.data, (int)img.step, roisize,
|
||||
status = ippiMarkSpeckles_16s_C1IR(img.ptr<Ipp16s>(), (int)img.step, roisize,
|
||||
(Ipp16s)newVal, maxSpeckleSize, (Ipp16s)maxDiff, ippiNormL1, buffer);
|
||||
}
|
||||
|
||||
|
@ -773,10 +773,10 @@ void CV_CameraCalibrationTest_CPP::calibrate( int imageCount, int* pointCounts,
|
||||
flags );
|
||||
|
||||
assert( cameraMatrix.type() == CV_64FC1 );
|
||||
memcpy( _cameraMatrix, cameraMatrix.data, 9*sizeof(double) );
|
||||
memcpy( _cameraMatrix, cameraMatrix.ptr(), 9*sizeof(double) );
|
||||
|
||||
assert( cameraMatrix.type() == CV_64FC1 );
|
||||
memcpy( _distortionCoeffs, distCoeffs.data, 4*sizeof(double) );
|
||||
memcpy( _distortionCoeffs, distCoeffs.ptr(), 4*sizeof(double) );
|
||||
|
||||
vector<Mat>::iterator rvecsIt = rvecs.begin();
|
||||
vector<Mat>::iterator tvecsIt = tvecs.begin();
|
||||
@ -788,8 +788,8 @@ void CV_CameraCalibrationTest_CPP::calibrate( int imageCount, int* pointCounts,
|
||||
{
|
||||
Mat r9( 3, 3, CV_64FC1 );
|
||||
Rodrigues( *rvecsIt, r9 );
|
||||
memcpy( rm, r9.data, 9*sizeof(double) );
|
||||
memcpy( tm, tvecsIt->data, 3*sizeof(double) );
|
||||
memcpy( rm, r9.ptr(), 9*sizeof(double) );
|
||||
memcpy( tm, tvecsIt->ptr(), 3*sizeof(double) );
|
||||
}
|
||||
}
|
||||
|
||||
@ -1430,7 +1430,7 @@ void CV_StereoCalibrationTest::run( int )
|
||||
{
|
||||
Mat left = imread(imglist[i*2]);
|
||||
Mat right = imread(imglist[i*2+1]);
|
||||
if(!left.data || !right.data)
|
||||
if(left.empty() || right.empty())
|
||||
{
|
||||
ts->printf( cvtest::TS::LOG, "Can not load images %s and %s, testcase %d\n",
|
||||
imglist[i*2].c_str(), imglist[i*2+1].c_str(), testcase );
|
||||
@ -1722,7 +1722,7 @@ double CV_StereoCalibrationTest_C::calibrateStereoCamera( const vector<vector<Po
|
||||
for( int i = 0, ni = 0, j = 0; i < nimages; i++, j += ni )
|
||||
{
|
||||
ni = (int)objectPoints[i].size();
|
||||
((int*)npoints.data)[i] = ni;
|
||||
npoints.ptr<int>()[i] = ni;
|
||||
std::copy(objectPoints[i].begin(), objectPoints[i].end(), objPtData + j);
|
||||
std::copy(imagePoints1[i].begin(), imagePoints1[i].end(), imgPtData + j);
|
||||
std::copy(imagePoints2[i].begin(), imagePoints2[i].end(), imgPtData2 + j);
|
||||
|
@ -1021,12 +1021,12 @@ void CV_FundamentalMatTest::prepare_to_validation( int test_case_idx )
|
||||
cv::gemm( T, invA2, 1, Mat(), 0, F0 );
|
||||
F0 *= 1./f0[8];
|
||||
|
||||
uchar* status = test_mat[TEMP][1].data;
|
||||
uchar* status = test_mat[TEMP][1].ptr();
|
||||
double err_level = method <= CV_FM_8POINT ? 1 : get_success_error_level( test_case_idx, OUTPUT, 1 );
|
||||
uchar* mtfm1 = test_mat[REF_OUTPUT][1].data;
|
||||
uchar* mtfm2 = test_mat[OUTPUT][1].data;
|
||||
double* f_prop1 = (double*)test_mat[REF_OUTPUT][0].data;
|
||||
double* f_prop2 = (double*)test_mat[OUTPUT][0].data;
|
||||
uchar* mtfm1 = test_mat[REF_OUTPUT][1].ptr();
|
||||
uchar* mtfm2 = test_mat[OUTPUT][1].ptr();
|
||||
double* f_prop1 = test_mat[REF_OUTPUT][0].ptr<double>();
|
||||
double* f_prop2 = test_mat[OUTPUT][0].ptr<double>();
|
||||
|
||||
int i, pt_count = test_mat[INPUT][2].cols;
|
||||
Mat p1( 1, pt_count, CV_64FC2 );
|
||||
@ -1357,12 +1357,12 @@ void CV_EssentialMatTest::prepare_to_validation( int test_case_idx )
|
||||
cv::gemm( T1, T2, 1, Mat(), 0, F0 );
|
||||
F0 *= 1./f0[8];
|
||||
|
||||
uchar* status = test_mat[TEMP][1].data;
|
||||
uchar* status = test_mat[TEMP][1].ptr();
|
||||
double err_level = get_success_error_level( test_case_idx, OUTPUT, 1 );
|
||||
uchar* mtfm1 = test_mat[REF_OUTPUT][1].data;
|
||||
uchar* mtfm2 = test_mat[OUTPUT][1].data;
|
||||
double* e_prop1 = (double*)test_mat[REF_OUTPUT][0].data;
|
||||
double* e_prop2 = (double*)test_mat[OUTPUT][0].data;
|
||||
uchar* mtfm1 = test_mat[REF_OUTPUT][1].ptr();
|
||||
uchar* mtfm2 = test_mat[OUTPUT][1].ptr();
|
||||
double* e_prop1 = test_mat[REF_OUTPUT][0].ptr<double>();
|
||||
double* e_prop2 = test_mat[OUTPUT][0].ptr<double>();
|
||||
Mat E_prop2 = Mat(3, 1, CV_64F, e_prop2);
|
||||
|
||||
int i, pt_count = test_mat[INPUT][2].cols;
|
||||
@ -1407,8 +1407,8 @@ void CV_EssentialMatTest::prepare_to_validation( int test_case_idx )
|
||||
|
||||
|
||||
|
||||
double* pose_prop1 = (double*)test_mat[REF_OUTPUT][2].data;
|
||||
double* pose_prop2 = (double*)test_mat[OUTPUT][2].data;
|
||||
double* pose_prop1 = test_mat[REF_OUTPUT][2].ptr<double>();
|
||||
double* pose_prop2 = test_mat[OUTPUT][2].ptr<double>();
|
||||
double terr1 = cvtest::norm(Rt0.col(3) / norm(Rt0.col(3)) + test_mat[TEMP][3], NORM_L2);
|
||||
double terr2 = cvtest::norm(Rt0.col(3) / norm(Rt0.col(3)) - test_mat[TEMP][3], NORM_L2);
|
||||
Mat rvec;
|
||||
|
@ -142,7 +142,7 @@ protected:
|
||||
Mat_<double> res = Q * Mat_<double>(4, 1, from);
|
||||
res /= res(3, 0);
|
||||
|
||||
out3d_t pixel_exp = *(Vec3d*)res.data;
|
||||
out3d_t pixel_exp = *res.ptr<Vec3d>();
|
||||
out3d_t pixel_out = _3dImg(y, x);
|
||||
|
||||
const int largeZValue = 10000; /* see documentation */
|
||||
|
@ -10,10 +10,10 @@ core. The Core Functionality
|
||||
old_basic_structures
|
||||
dynamic_structures
|
||||
operations_on_arrays
|
||||
drawing_functions
|
||||
xml_yaml_persistence
|
||||
old_xml_yaml_persistence
|
||||
clustering
|
||||
utility_and_system_functions_and_macros
|
||||
opengl_interop
|
||||
ipp_async_converters
|
||||
optim
|
||||
|
@ -3405,12 +3405,11 @@ and want to compute value of the "virtual" pixel ``Point(-5, 100)`` in a floatin
|
||||
borderInterpolate(-5, img.cols, BORDER_WRAP));
|
||||
|
||||
|
||||
Normally, the function is not called directly. It is used inside :ocv:class:`FilterEngine`
|
||||
and :ocv:func:`copyMakeBorder` to compute tables for quick extrapolation.
|
||||
Normally, the function is not called directly. It is used inside filtering functions
|
||||
and also in :ocv:func:`copyMakeBorder`.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ocv:class:`FilterEngine`,
|
||||
:ocv:func:`copyMakeBorder`
|
||||
|
||||
|
||||
@ -3443,7 +3442,7 @@ Forms a border around an image.
|
||||
|
||||
The function copies the source image into the middle of the destination image. The areas to the
|
||||
left, to the right, above and below the copied source image will be filled with extrapolated pixels.
|
||||
This is not what :ocv:class:`FilterEngine` or filtering functions based on it do (they extrapolate
|
||||
This is not what filtering functions based on it do (they extrapolate
|
||||
pixels on-fly), but what other more complex functions, including your own, may do to simplify image
|
||||
boundary handling.
|
||||
|
||||
|
341
modules/core/doc/optim.rst
Normal file
341
modules/core/doc/optim.rst
Normal file
@ -0,0 +1,341 @@
|
||||
Optimization Algorithms
|
||||
=======================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
The algorithms in this section minimize or maximize function value within specified constraints or without any constraints.
|
||||
|
||||
solveLP
|
||||
--------------------
|
||||
Solve given (non-integer) linear programming problem using the Simplex Algorithm (Simplex Method).
|
||||
What we mean here by "linear programming problem" (or LP problem, for short) can be
|
||||
formulated as:
|
||||
|
||||
.. math::
|
||||
\mbox{Maximize } c\cdot x\\
|
||||
\mbox{Subject to:}\\
|
||||
Ax\leq b\\
|
||||
x\geq 0
|
||||
|
||||
Where :math:`c` is fixed *1*-by-*n* row-vector, :math:`A` is fixed *m*-by-*n* matrix, :math:`b` is fixed *m*-by-*1* column vector and
|
||||
:math:`x` is an arbitrary *n*-by-*1* column vector, which satisfies the constraints.
|
||||
|
||||
Simplex algorithm is one of many algorithms that are designed to handle this sort of problems efficiently. Although it is not optimal in theoretical
|
||||
sense (there exist algorithms that can solve any problem written as above in polynomial type, while simplex method degenerates to exponential time
|
||||
for some special cases), it is well-studied, easy to implement and is shown to work well for real-life purposes.
|
||||
|
||||
The particular implementation is taken almost verbatim from **Introduction to Algorithms, third edition**
|
||||
by T. H. Cormen, C. E. Leiserson, R. L. Rivest and Clifford Stein. In particular, the Bland's rule
|
||||
(`http://en.wikipedia.org/wiki/Bland%27s\_rule <http://en.wikipedia.org/wiki/Bland%27s_rule>`_) is used to prevent cycling.
|
||||
|
||||
.. ocv:function:: int solveLP(const Mat& Func, const Mat& Constr, Mat& z)
|
||||
|
||||
:param Func: This row-vector corresponds to :math:`c` in the LP problem formulation (see above). It should contain 32- or 64-bit floating point numbers. As a convenience, column-vector may be also submitted, in the latter case it is understood to correspond to :math:`c^T`.
|
||||
|
||||
:param Constr: *m*-by-*n\+1* matrix, whose rightmost column corresponds to :math:`b` in formulation above and the remaining to :math:`A`. It should containt 32- or 64-bit floating point numbers.
|
||||
|
||||
:param z: The solution will be returned here as a column-vector - it corresponds to :math:`c` in the formulation above. It will contain 64-bit floating point numbers.
|
||||
|
||||
:return: One of the return codes:
|
||||
|
||||
::
|
||||
|
||||
//!the return codes for solveLP() function
|
||||
enum
|
||||
{
|
||||
SOLVELP_UNBOUNDED = -2, //problem is unbounded (target function can achieve arbitrary high values)
|
||||
SOLVELP_UNFEASIBLE = -1, //problem is unfeasible (there are no points that satisfy all the constraints imposed)
|
||||
SOLVELP_SINGLE = 0, //there is only one maximum for target function
|
||||
SOLVELP_MULTI = 1 //there are multiple maxima for target function - the arbitrary one is returned
|
||||
};
|
||||
|
||||
DownhillSolver
|
||||
---------------------------------
|
||||
|
||||
.. ocv:class:: DownhillSolver
|
||||
|
||||
This class is used to perform the non-linear non-constrained *minimization* of a function, defined on an *n*-dimensional Euclidean space,
|
||||
using the **Nelder-Mead method**, also known as **downhill simplex method**. The basic idea about the method can be obtained from
|
||||
(`http://en.wikipedia.org/wiki/Nelder-Mead\_method <http://en.wikipedia.org/wiki/Nelder-Mead_method>`_). It should be noted, that
|
||||
this method, although deterministic, is rather a heuristic and therefore may converge to a local minima, not necessary a global one.
|
||||
It is iterative optimization technique, which at each step uses an information about the values of a function evaluated only at
|
||||
*n+1* points, arranged as a *simplex* in *n*-dimensional space (hence the second name of the method). At each step new point is
|
||||
chosen to evaluate function at, obtained value is compared with previous ones and based on this information simplex changes it's shape
|
||||
, slowly moving to the local minimum. Thus this method is using *only* function values to make decision, on contrary to, say, Nonlinear
|
||||
Conjugate Gradient method (which is also implemented in ``optim``).
|
||||
|
||||
Algorithm stops when the number of function evaluations done exceeds ``termcrit.maxCount``, when the function values at the
|
||||
vertices of simplex are within ``termcrit.epsilon`` range or simplex becomes so small that it
|
||||
can enclosed in a box with ``termcrit.epsilon`` sides, whatever comes first, for some defined by user
|
||||
positive integer ``termcrit.maxCount`` and positive non-integer ``termcrit.epsilon``.
|
||||
|
||||
::
|
||||
|
||||
class CV_EXPORTS Solver : public Algorithm
|
||||
{
|
||||
public:
|
||||
class CV_EXPORTS Function
|
||||
{
|
||||
public:
|
||||
virtual ~Function() {}
|
||||
virtual double calc(const double* x) const = 0;
|
||||
virtual void getGradient(const double* /*x*/,double* /*grad*/) {}
|
||||
};
|
||||
|
||||
virtual Ptr<Function> getFunction() const = 0;
|
||||
virtual void setFunction(const Ptr<Function>& f) = 0;
|
||||
|
||||
virtual TermCriteria getTermCriteria() const = 0;
|
||||
virtual void setTermCriteria(const TermCriteria& termcrit) = 0;
|
||||
|
||||
// x contain the initial point before the call and the minima position (if algorithm converged) after. x is assumed to be (something that
|
||||
// after getMat() will return) row-vector or column-vector. *It's size and should
|
||||
// be consisted with previous dimensionality data given, if any (otherwise, it determines dimensionality)*
|
||||
virtual double minimize(InputOutputArray x) = 0;
|
||||
};
|
||||
|
||||
class CV_EXPORTS DownhillSolver : public Solver
|
||||
{
|
||||
public:
|
||||
//! returns row-vector, even if the column-vector was given
|
||||
virtual void getInitStep(OutputArray step) const=0;
|
||||
//!This should be called at least once before the first call to minimize() and step is assumed to be (something that
|
||||
//! after getMat() will return) row-vector or column-vector. *It's dimensionality determines the dimensionality of a problem.*
|
||||
virtual void setInitStep(InputArray step)=0;
|
||||
};
|
||||
|
||||
It should be noted, that ``DownhillSolver`` is a derivative of the abstract interface ``Solver``, which in
|
||||
turn is derived from the ``Algorithm`` interface and is used to encapsulate the functionality, common to all non-linear optimization
|
||||
algorithms in the ``optim`` module.
|
||||
|
||||
DownhillSolver::getFunction
|
||||
--------------------------------------------
|
||||
|
||||
Getter for the optimized function. The optimized function is represented by ``Solver::Function`` interface, which requires
|
||||
derivatives to implement the sole method ``calc(double*)`` to evaluate the function.
|
||||
|
||||
.. ocv:function:: Ptr<Solver::Function> DownhillSolver::getFunction()
|
||||
|
||||
:return: Smart-pointer to an object that implements ``Solver::Function`` interface - it represents the function that is being optimized. It can be empty, if no function was given so far.
|
||||
|
||||
DownhillSolver::setFunction
|
||||
-----------------------------------------------
|
||||
|
||||
Setter for the optimized function. *It should be called at least once before the call to* ``DownhillSolver::minimize()``, as
|
||||
default value is not usable.
|
||||
|
||||
.. ocv:function:: void DownhillSolver::setFunction(const Ptr<Solver::Function>& f)
|
||||
|
||||
:param f: The new function to optimize.
|
||||
|
||||
DownhillSolver::getTermCriteria
|
||||
----------------------------------------------------
|
||||
|
||||
Getter for the previously set terminal criteria for this algorithm.
|
||||
|
||||
.. ocv:function:: TermCriteria DownhillSolver::getTermCriteria()
|
||||
|
||||
:return: Deep copy of the terminal criteria used at the moment.
|
||||
|
||||
DownhillSolver::setTermCriteria
|
||||
------------------------------------------
|
||||
|
||||
Set terminal criteria for downhill simplex method. Two things should be noted. First, this method *is not necessary* to be called
|
||||
before the first call to ``DownhillSolver::minimize()``, as the default value is sensible. Second, the method will raise an error
|
||||
if ``termcrit.type!=(TermCriteria::MAX_ITER+TermCriteria::EPS)``, ``termcrit.epsilon<=0`` or ``termcrit.maxCount<=0``. That is,
|
||||
both ``epsilon`` and ``maxCount`` should be set to positive values (non-integer and integer respectively) and they represent
|
||||
tolerance and maximal number of function evaluations that is allowed.
|
||||
|
||||
Algorithm stops when the number of function evaluations done exceeds ``termcrit.maxCount``, when the function values at the
|
||||
vertices of simplex are within ``termcrit.epsilon`` range or simplex becomes so small that it
|
||||
can enclosed in a box with ``termcrit.epsilon`` sides, whatever comes first.
|
||||
|
||||
.. ocv:function:: void DownhillSolver::setTermCriteria(const TermCriteria& termcrit)
|
||||
|
||||
:param termcrit: Terminal criteria to be used, represented as ``TermCriteria`` structure (defined elsewhere in openCV). Mind you, that it should meet ``(termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && termcrit.epsilon>0 && termcrit.maxCount>0)``, otherwise the error will be raised.
|
||||
|
||||
DownhillSolver::getInitStep
|
||||
-----------------------------------
|
||||
|
||||
Returns the initial step that will be used in downhill simplex algorithm. See the description
|
||||
of corresponding setter (follows next) for the meaning of this parameter.
|
||||
|
||||
.. ocv:function:: void getInitStep(OutputArray step)
|
||||
|
||||
:param step: Initial step that will be used in algorithm. Note, that although corresponding setter accepts column-vectors as well as row-vectors, this method will return a row-vector.
|
||||
|
||||
DownhillSolver::setInitStep
|
||||
----------------------------------
|
||||
|
||||
Sets the initial step that will be used in downhill simplex algorithm. Step, together with initial point (givin in ``DownhillSolver::minimize``)
|
||||
are two *n*-dimensional vectors that are used to determine the shape of initial simplex. Roughly said, initial point determines the position
|
||||
of a simplex (it will become simplex's centroid), while step determines the spread (size in each dimension) of a simplex. To be more precise,
|
||||
if :math:`s,x_0\in\mathbb{R}^n` are the initial step and initial point respectively, the vertices of a simplex will be: :math:`v_0:=x_0-\frac{1}{2}
|
||||
s` and :math:`v_i:=x_0+s_i` for :math:`i=1,2,\dots,n` where :math:`s_i` denotes projections of the initial step of *n*-th coordinate (the result
|
||||
of projection is treated to be vector given by :math:`s_i:=e_i\cdot\left<e_i\cdot s\right>`, where :math:`e_i` form canonical basis)
|
||||
|
||||
.. ocv:function:: void setInitStep(InputArray step)
|
||||
|
||||
:param step: Initial step that will be used in algorithm. Roughly said, it determines the spread (size in each dimension) of an initial simplex.
|
||||
|
||||
DownhillSolver::minimize
|
||||
-----------------------------------
|
||||
|
||||
The main method of the ``DownhillSolver``. It actually runs the algorithm and performs the minimization. The sole input parameter determines the
|
||||
centroid of the starting simplex (roughly, it tells where to start), all the others (terminal criteria, initial step, function to be minimized)
|
||||
are supposed to be set via the setters before the call to this method or the default values (not always sensible) will be used.
|
||||
|
||||
.. ocv:function:: double DownhillSolver::minimize(InputOutputArray x)
|
||||
|
||||
:param x: The initial point, that will become a centroid of an initial simplex. After the algorithm will terminate, it will be setted to the point where the algorithm stops, the point of possible minimum.
|
||||
|
||||
:return: The value of a function at the point found.
|
||||
|
||||
createDownhillSolver
|
||||
------------------------------------
|
||||
|
||||
This function returns the reference to the ready-to-use ``DownhillSolver`` object. All the parameters are optional, so this procedure can be called
|
||||
even without parameters at all. In this case, the default values will be used. As default value for terminal criteria are the only sensible ones,
|
||||
``DownhillSolver::setFunction()`` and ``DownhillSolver::setInitStep()`` should be called upon the obtained object, if the respective parameters
|
||||
were not given to ``createDownhillSolver()``. Otherwise, the two ways (give parameters to ``createDownhillSolver()`` or miss them out and call the
|
||||
``DownhillSolver::setFunction()`` and ``DownhillSolver::setInitStep()``) are absolutely equivalent (and will drop the same errors in the same way,
|
||||
should invalid input be detected).
|
||||
|
||||
.. ocv:function:: Ptr<DownhillSolver> createDownhillSolver(const Ptr<Solver::Function>& f,InputArray initStep, TermCriteria termcrit)
|
||||
|
||||
:param f: Pointer to the function that will be minimized, similarly to the one you submit via ``DownhillSolver::setFunction``.
|
||||
:param step: Initial step, that will be used to construct the initial simplex, similarly to the one you submit via ``DownhillSolver::setInitStep``.
|
||||
:param termcrit: Terminal criteria to the algorithm, similarly to the one you submit via ``DownhillSolver::setTermCriteria``.
|
||||
|
||||
|
||||
ConjGradSolver
|
||||
---------------------------------
|
||||
|
||||
.. ocv:class:: ConjGradSolver
|
||||
|
||||
This class is used to perform the non-linear non-constrained *minimization* of a function with *known gradient*
|
||||
, defined on an *n*-dimensional Euclidean space,
|
||||
using the **Nonlinear Conjugate Gradient method**. The implementation was done based on the beautifully clear explanatory article `An Introduction to the Conjugate Gradient Method Without the Agonizing Pain <http://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf>`_
|
||||
by Jonathan Richard Shewchuk. The method can be seen as an adaptation of a standard Conjugate Gradient method (see, for example
|
||||
`http://en.wikipedia.org/wiki/Conjugate_gradient_method <http://en.wikipedia.org/wiki/Conjugate_gradient_method>`_) for numerically solving the
|
||||
systems of linear equations.
|
||||
|
||||
It should be noted, that
|
||||
this method, although deterministic, is rather a heuristic method and therefore may converge to a local minima, not necessary a global one. What
|
||||
is even more disastrous, most of its behaviour is ruled by gradient, therefore it essentially cannot distinguish between local minima and maxima.
|
||||
Therefore, if it starts sufficiently near to the local maximum, it may converge to it. Another obvious restriction is that it should be possible
|
||||
to compute the gradient of a function at any point, thus it is preferable to have analytic expression for gradient and computational burden
|
||||
should be born by the user.
|
||||
|
||||
The latter responsibility is accompilished via the ``getGradient(const double* x,double* grad)`` method of a
|
||||
``Solver::Function`` interface (which represents function that is being optimized). This method takes point a point in *n*-dimensional space
|
||||
(first argument represents the array of coordinates of that point) and comput its gradient (it should be stored in the second argument as an array).
|
||||
|
||||
::
|
||||
|
||||
class CV_EXPORTS Solver : public Algorithm
|
||||
{
|
||||
public:
|
||||
class CV_EXPORTS Function
|
||||
{
|
||||
public:
|
||||
virtual ~Function() {}
|
||||
virtual double calc(const double* x) const = 0;
|
||||
virtual void getGradient(const double* /*x*/,double* /*grad*/) {}
|
||||
};
|
||||
|
||||
virtual Ptr<Function> getFunction() const = 0;
|
||||
virtual void setFunction(const Ptr<Function>& f) = 0;
|
||||
|
||||
virtual TermCriteria getTermCriteria() const = 0;
|
||||
virtual void setTermCriteria(const TermCriteria& termcrit) = 0;
|
||||
|
||||
// x contain the initial point before the call and the minima position (if algorithm converged) after. x is assumed to be (something that
|
||||
// after getMat() will return) row-vector or column-vector. *It's size and should
|
||||
// be consisted with previous dimensionality data given, if any (otherwise, it determines dimensionality)*
|
||||
virtual double minimize(InputOutputArray x) = 0;
|
||||
};
|
||||
|
||||
class CV_EXPORTS ConjGradSolver : public Solver{
|
||||
};
|
||||
|
||||
Note, that class ``ConjGradSolver`` thus does not add any new methods to the basic ``Solver`` interface.
|
||||
|
||||
ConjGradSolver::getFunction
|
||||
--------------------------------------------
|
||||
|
||||
Getter for the optimized function. The optimized function is represented by ``Solver::Function`` interface, which requires
|
||||
derivatives to implement the method ``calc(double*)`` to evaluate the function. It should be emphasized once more, that since Nonlinear
|
||||
Conjugate Gradient method requires gradient to be computable in addition to the function values,
|
||||
``getGradient(const double* x,double* grad)`` method of a ``Solver::Function`` interface should be also implemented meaningfully.
|
||||
|
||||
.. ocv:function:: Ptr<Solver::Function> ConjGradSolver::getFunction()
|
||||
|
||||
:return: Smart-pointer to an object that implements ``Solver::Function`` interface - it represents the function that is being optimized. It can be empty, if no function was given so far.
|
||||
|
||||
ConjGradSolver::setFunction
|
||||
-----------------------------------------------
|
||||
|
||||
Setter for the optimized function. *It should be called at least once before the call to* ``ConjGradSolver::minimize()``, as
|
||||
default value is not usable.
|
||||
|
||||
.. ocv:function:: void ConjGradSolver::setFunction(const Ptr<Solver::Function>& f)
|
||||
|
||||
:param f: The new function to optimize.
|
||||
|
||||
ConjGradSolver::getTermCriteria
|
||||
----------------------------------------------------
|
||||
|
||||
Getter for the previously set terminal criteria for this algorithm.
|
||||
|
||||
.. ocv:function:: TermCriteria ConjGradSolver::getTermCriteria()
|
||||
|
||||
:return: Deep copy of the terminal criteria used at the moment.
|
||||
|
||||
ConjGradSolver::setTermCriteria
|
||||
------------------------------------------
|
||||
|
||||
Set terminal criteria for downhill simplex method. Two things should be noted. First, this method *is not necessary* to be called
|
||||
before the first call to ``ConjGradSolver::minimize()``, as the default value is sensible. Second, the method will raise an error
|
||||
if ``termcrit.type!=(TermCriteria::MAX_ITER+TermCriteria::EPS)`` and ``termcrit.type!=TermCriteria::MAX_ITER``. This means that termination criteria
|
||||
has to restrict maximum number of iterations to be done and may optionally allow algorithm to stop earlier if certain tolerance
|
||||
is achieved (what we mean by "tolerance is achieved" will be clarified below). If ``termcrit`` restricts both tolerance and maximum iteration
|
||||
number, both ``termcrit.epsilon`` and ``termcrit.maxCount`` should be positive. In case, if ``termcrit.type==TermCriteria::MAX_ITER``,
|
||||
only member ``termcrit.maxCount`` is required to be positive and in this case algorithm will just work for required number of iterations.
|
||||
|
||||
In current implementation, "tolerance is achieved" means that we have arrived at the point where the :math:`L_2`-norm of the gradient is less
|
||||
than the tolerance value.
|
||||
|
||||
.. ocv:function:: void ConjGradSolver::setTermCriteria(const TermCriteria& termcrit)
|
||||
|
||||
:param termcrit: Terminal criteria to be used, represented as ``TermCriteria`` structure (defined elsewhere in openCV). Mind you, that it should meet ``termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && termcrit.epsilon>0 && termcrit.maxCount>0`` or ``termcrit.type==TermCriteria::MAX_ITER) && termcrit.maxCount>0``, otherwise the error will be raised.
|
||||
|
||||
ConjGradSolver::minimize
|
||||
-----------------------------------
|
||||
|
||||
The main method of the ``ConjGradSolver``. It actually runs the algorithm and performs the minimization. The sole input parameter determines the
|
||||
centroid of the starting simplex (roughly, it tells where to start), all the others (terminal criteria and function to be minimized)
|
||||
are supposed to be set via the setters before the call to this method or the default values (not always sensible) will be used. Sometimes it may
|
||||
throw an error, if these default values cannot be used (say, you forgot to set the function to minimize and default value, that is, empty function,
|
||||
cannot be used).
|
||||
|
||||
.. ocv:function:: double ConjGradSolver::minimize(InputOutputArray x)
|
||||
|
||||
:param x: The initial point. It is hard to overemphasize how important the choise of initial point is when you are using the heuristic algorithm like this one. Badly chosen initial point can make algorithm converge to (local) maximum instead of minimum, do not converge at all, converge to local minimum instead of global one.
|
||||
|
||||
:return: The value of a function at the point found.
|
||||
|
||||
createConjGradSolver
|
||||
------------------------------------
|
||||
|
||||
This function returns the reference to the ready-to-use ``ConjGradSolver`` object. All the parameters are optional, so this procedure can be called
|
||||
even without parameters at all. In this case, the default values will be used. As default value for terminal criteria are the only sensible ones,
|
||||
``ConjGradSolver::setFunction()`` should be called upon the obtained object, if the function
|
||||
was not given to ``createConjGradSolver()``. Otherwise, the two ways (submit it to ``createConjGradSolver()`` or miss it out and call the
|
||||
``ConjGradSolver::setFunction()``) are absolutely equivalent (and will drop the same errors in the same way,
|
||||
should invalid input be detected).
|
||||
|
||||
.. ocv:function:: Ptr<ConjGradSolver> createConjGradSolver(const Ptr<Solver::Function>& f, TermCriteria termcrit)
|
||||
|
||||
:param f: Pointer to the function that will be minimized, similarly to the one you submit via ``ConjGradSolver::setFunction``.
|
||||
:param termcrit: Terminal criteria to the algorithm, similarly to the one you submit via ``ConjGradSolver::setTermCriteria``.
|
@ -506,96 +506,6 @@ CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev
|
||||
//! shuffles the input array elements
|
||||
CV_EXPORTS_W void randShuffle(InputOutputArray dst, double iterFactor = 1., RNG* rng = 0);
|
||||
|
||||
//! draws the line segment (pt1, pt2) in the image
|
||||
CV_EXPORTS_W void line(InputOutputArray img, Point pt1, Point pt2, const Scalar& color,
|
||||
int thickness = 1, int lineType = LINE_8, int shift = 0);
|
||||
|
||||
//! draws an arrow from pt1 to pt2 in the image
|
||||
CV_EXPORTS_W void arrowedLine(InputOutputArray img, Point pt1, Point pt2, const Scalar& color,
|
||||
int thickness=1, int line_type=8, int shift=0, double tipLength=0.1);
|
||||
|
||||
//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image
|
||||
CV_EXPORTS_W void rectangle(InputOutputArray img, Point pt1, Point pt2,
|
||||
const Scalar& color, int thickness = 1,
|
||||
int lineType = LINE_8, int shift = 0);
|
||||
|
||||
//! draws the rectangle outline or a solid rectangle covering rec in the image
|
||||
CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec,
|
||||
const Scalar& color, int thickness = 1,
|
||||
int lineType = LINE_8, int shift = 0);
|
||||
|
||||
//! draws the circle outline or a solid circle in the image
|
||||
CV_EXPORTS_W void circle(InputOutputArray img, Point center, int radius,
|
||||
const Scalar& color, int thickness = 1,
|
||||
int lineType = LINE_8, int shift = 0);
|
||||
|
||||
//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image
|
||||
CV_EXPORTS_W void ellipse(InputOutputArray img, Point center, Size axes,
|
||||
double angle, double startAngle, double endAngle,
|
||||
const Scalar& color, int thickness = 1,
|
||||
int lineType = LINE_8, int shift = 0);
|
||||
|
||||
//! draws a rotated ellipse in the image
|
||||
CV_EXPORTS_W void ellipse(InputOutputArray img, const RotatedRect& box, const Scalar& color,
|
||||
int thickness = 1, int lineType = LINE_8);
|
||||
|
||||
//! draws a filled convex polygon in the image
|
||||
CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts,
|
||||
const Scalar& color, int lineType = LINE_8,
|
||||
int shift = 0);
|
||||
|
||||
CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points,
|
||||
const Scalar& color, int lineType = LINE_8,
|
||||
int shift = 0);
|
||||
|
||||
//! fills an area bounded by one or more polygons
|
||||
CV_EXPORTS void fillPoly(Mat& img, const Point** pts,
|
||||
const int* npts, int ncontours,
|
||||
const Scalar& color, int lineType = LINE_8, int shift = 0,
|
||||
Point offset = Point() );
|
||||
|
||||
CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts,
|
||||
const Scalar& color, int lineType = LINE_8, int shift = 0,
|
||||
Point offset = Point() );
|
||||
|
||||
//! draws one or more polygonal curves
|
||||
CV_EXPORTS void polylines(Mat& img, const Point* const* pts, const int* npts,
|
||||
int ncontours, bool isClosed, const Scalar& color,
|
||||
int thickness = 1, int lineType = LINE_8, int shift = 0 );
|
||||
|
||||
CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts,
|
||||
bool isClosed, const Scalar& color,
|
||||
int thickness = 1, int lineType = LINE_8, int shift = 0 );
|
||||
|
||||
//! draws contours in the image
|
||||
CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours,
|
||||
int contourIdx, const Scalar& color,
|
||||
int thickness = 1, int lineType = LINE_8,
|
||||
InputArray hierarchy = noArray(),
|
||||
int maxLevel = INT_MAX, Point offset = Point() );
|
||||
|
||||
//! clips the line segment by the rectangle Rect(0, 0, imgSize.width, imgSize.height)
|
||||
CV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2);
|
||||
|
||||
//! clips the line segment by the rectangle imgRect
|
||||
CV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2);
|
||||
|
||||
//! converts elliptic arc to a polygonal curve
|
||||
CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle,
|
||||
int arcStart, int arcEnd, int delta,
|
||||
CV_OUT std::vector<Point>& pts );
|
||||
|
||||
//! renders text string in the image
|
||||
CV_EXPORTS_W void putText( InputOutputArray img, const String& text, Point org,
|
||||
int fontFace, double fontScale, Scalar color,
|
||||
int thickness = 1, int lineType = LINE_8,
|
||||
bool bottomLeftOrigin = false );
|
||||
|
||||
//! returns bounding box of the text string
|
||||
CV_EXPORTS_W Size getTextSize(const String& text, int fontFace,
|
||||
double fontScale, int thickness,
|
||||
CV_OUT int* baseLine);
|
||||
|
||||
/*!
|
||||
Principal Component Analysis
|
||||
|
||||
@ -1319,5 +1229,7 @@ template<> struct ParamType<uchar>
|
||||
|
||||
#include "opencv2/core/operations.hpp"
|
||||
#include "opencv2/core/cvstd.inl.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/core/optim.hpp"
|
||||
|
||||
#endif /*__OPENCV_CORE_HPP__*/
|
||||
|
@ -1262,192 +1262,6 @@ CVAPI(int) cvNextGraphItem( CvGraphScanner* scanner );
|
||||
/* Creates a copy of graph */
|
||||
CVAPI(CvGraph*) cvCloneGraph( const CvGraph* graph, CvMemStorage* storage );
|
||||
|
||||
/****************************************************************************************\
|
||||
* Drawing *
|
||||
\****************************************************************************************/
|
||||
|
||||
/****************************************************************************************\
|
||||
* Drawing functions work with images/matrices of arbitrary type. *
|
||||
* For color images the channel order is BGR[A] *
|
||||
* Antialiasing is supported only for 8-bit image now. *
|
||||
* All the functions include parameter color that means rgb value (that may be *
|
||||
* constructed with CV_RGB macro) for color images and brightness *
|
||||
* for grayscale images. *
|
||||
* If a drawn figure is partially or completely outside of the image, it is clipped.*
|
||||
\****************************************************************************************/
|
||||
|
||||
#define CV_RGB( r, g, b ) cvScalar( (b), (g), (r), 0 )
|
||||
#define CV_FILLED -1
|
||||
|
||||
#define CV_AA 16
|
||||
|
||||
/* Draws 4-connected, 8-connected or antialiased line segment connecting two points */
|
||||
CVAPI(void) cvLine( CvArr* img, CvPoint pt1, CvPoint pt2,
|
||||
CvScalar color, int thickness CV_DEFAULT(1),
|
||||
int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) );
|
||||
|
||||
/* Draws a rectangle given two opposite corners of the rectangle (pt1 & pt2),
|
||||
if thickness<0 (e.g. thickness == CV_FILLED), the filled box is drawn */
|
||||
CVAPI(void) cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2,
|
||||
CvScalar color, int thickness CV_DEFAULT(1),
|
||||
int line_type CV_DEFAULT(8),
|
||||
int shift CV_DEFAULT(0));
|
||||
|
||||
/* Draws a rectangle specified by a CvRect structure */
|
||||
CVAPI(void) cvRectangleR( CvArr* img, CvRect r,
|
||||
CvScalar color, int thickness CV_DEFAULT(1),
|
||||
int line_type CV_DEFAULT(8),
|
||||
int shift CV_DEFAULT(0));
|
||||
|
||||
|
||||
/* Draws a circle with specified center and radius.
|
||||
Thickness works in the same way as with cvRectangle */
|
||||
CVAPI(void) cvCircle( CvArr* img, CvPoint center, int radius,
|
||||
CvScalar color, int thickness CV_DEFAULT(1),
|
||||
int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0));
|
||||
|
||||
/* Draws ellipse outline, filled ellipse, elliptic arc or filled elliptic sector,
|
||||
depending on <thickness>, <start_angle> and <end_angle> parameters. The resultant figure
|
||||
is rotated by <angle>. All the angles are in degrees */
|
||||
CVAPI(void) cvEllipse( CvArr* img, CvPoint center, CvSize axes,
|
||||
double angle, double start_angle, double end_angle,
|
||||
CvScalar color, int thickness CV_DEFAULT(1),
|
||||
int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0));
|
||||
|
||||
CV_INLINE void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color,
|
||||
int thickness CV_DEFAULT(1),
|
||||
int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) )
|
||||
{
|
||||
CvSize axes;
|
||||
axes.width = cvRound(box.size.width*0.5);
|
||||
axes.height = cvRound(box.size.height*0.5);
|
||||
|
||||
cvEllipse( img, cvPointFrom32f( box.center ), axes, box.angle,
|
||||
0, 360, color, thickness, line_type, shift );
|
||||
}
|
||||
|
||||
/* Fills convex or monotonous polygon. */
|
||||
CVAPI(void) cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color,
|
||||
int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0));
|
||||
|
||||
/* Fills an area bounded by one or more arbitrary polygons */
|
||||
CVAPI(void) cvFillPoly( CvArr* img, CvPoint** pts, const int* npts,
|
||||
int contours, CvScalar color,
|
||||
int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) );
|
||||
|
||||
/* Draws one or more polygonal curves */
|
||||
CVAPI(void) cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours,
|
||||
int is_closed, CvScalar color, int thickness CV_DEFAULT(1),
|
||||
int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) );
|
||||
|
||||
#define cvDrawRect cvRectangle
|
||||
#define cvDrawLine cvLine
|
||||
#define cvDrawCircle cvCircle
|
||||
#define cvDrawEllipse cvEllipse
|
||||
#define cvDrawPolyLine cvPolyLine
|
||||
|
||||
/* Clips the line segment connecting *pt1 and *pt2
|
||||
by the rectangular window
|
||||
(0<=x<img_size.width, 0<=y<img_size.height). */
|
||||
CVAPI(int) cvClipLine( CvSize img_size, CvPoint* pt1, CvPoint* pt2 );
|
||||
|
||||
/* Initializes line iterator. Initially, line_iterator->ptr will point
|
||||
to pt1 (or pt2, see left_to_right description) location in the image.
|
||||
Returns the number of pixels on the line between the ending points. */
|
||||
CVAPI(int) cvInitLineIterator( const CvArr* image, CvPoint pt1, CvPoint pt2,
|
||||
CvLineIterator* line_iterator,
|
||||
int connectivity CV_DEFAULT(8),
|
||||
int left_to_right CV_DEFAULT(0));
|
||||
|
||||
/* Moves iterator to the next line point */
|
||||
#define CV_NEXT_LINE_POINT( line_iterator ) \
|
||||
{ \
|
||||
int _line_iterator_mask = (line_iterator).err < 0 ? -1 : 0; \
|
||||
(line_iterator).err += (line_iterator).minus_delta + \
|
||||
((line_iterator).plus_delta & _line_iterator_mask); \
|
||||
(line_iterator).ptr += (line_iterator).minus_step + \
|
||||
((line_iterator).plus_step & _line_iterator_mask); \
|
||||
}
|
||||
|
||||
|
||||
/* basic font types */
|
||||
#define CV_FONT_HERSHEY_SIMPLEX 0
|
||||
#define CV_FONT_HERSHEY_PLAIN 1
|
||||
#define CV_FONT_HERSHEY_DUPLEX 2
|
||||
#define CV_FONT_HERSHEY_COMPLEX 3
|
||||
#define CV_FONT_HERSHEY_TRIPLEX 4
|
||||
#define CV_FONT_HERSHEY_COMPLEX_SMALL 5
|
||||
#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6
|
||||
#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7
|
||||
|
||||
/* font flags */
|
||||
#define CV_FONT_ITALIC 16
|
||||
|
||||
#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX
|
||||
|
||||
|
||||
/* Font structure */
|
||||
typedef struct CvFont
|
||||
{
|
||||
const char* nameFont; //Qt:nameFont
|
||||
CvScalar color; //Qt:ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component])
|
||||
int font_face; //Qt: bool italic /* =CV_FONT_* */
|
||||
const int* ascii; /* font data and metrics */
|
||||
const int* greek;
|
||||
const int* cyrillic;
|
||||
float hscale, vscale;
|
||||
float shear; /* slope coefficient: 0 - normal, >0 - italic */
|
||||
int thickness; //Qt: weight /* letters thickness */
|
||||
float dx; /* horizontal interval between letters */
|
||||
int line_type; //Qt: PointSize
|
||||
}
|
||||
CvFont;
|
||||
|
||||
/* Initializes font structure used further in cvPutText */
|
||||
CVAPI(void) cvInitFont( CvFont* font, int font_face,
|
||||
double hscale, double vscale,
|
||||
double shear CV_DEFAULT(0),
|
||||
int thickness CV_DEFAULT(1),
|
||||
int line_type CV_DEFAULT(8));
|
||||
|
||||
CV_INLINE CvFont cvFont( double scale, int thickness CV_DEFAULT(1) )
|
||||
{
|
||||
CvFont font;
|
||||
cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, scale, scale, 0, thickness, CV_AA );
|
||||
return font;
|
||||
}
|
||||
|
||||
/* Renders text stroke with specified font and color at specified location.
|
||||
CvFont should be initialized with cvInitFont */
|
||||
CVAPI(void) cvPutText( CvArr* img, const char* text, CvPoint org,
|
||||
const CvFont* font, CvScalar color );
|
||||
|
||||
/* Calculates bounding box of text stroke (useful for alignment) */
|
||||
CVAPI(void) cvGetTextSize( const char* text_string, const CvFont* font,
|
||||
CvSize* text_size, int* baseline );
|
||||
|
||||
|
||||
|
||||
/* Unpacks color value, if arrtype is CV_8UC?, <color> is treated as
|
||||
packed color value, otherwise the first channels (depending on arrtype)
|
||||
of destination scalar are set to the same value = <color> */
|
||||
CVAPI(CvScalar) cvColorToScalar( double packed_color, int arrtype );
|
||||
|
||||
/* Returns the polygon points which make up the given ellipse. The ellipse is define by
|
||||
the box of size 'axes' rotated 'angle' around the 'center'. A partial sweep
|
||||
of the ellipse arc can be done by spcifying arc_start and arc_end to be something
|
||||
other than 0 and 360, respectively. The input array 'pts' must be large enough to
|
||||
hold the result. The total number of points stored into 'pts' is returned by this
|
||||
function. */
|
||||
CVAPI(int) cvEllipse2Poly( CvPoint center, CvSize axes,
|
||||
int angle, int arc_start, int arc_end, CvPoint * pts, int delta );
|
||||
|
||||
/* Draws contour outlines or filled interiors on the image */
|
||||
CVAPI(void) cvDrawContours( CvArr *img, CvSeq* contour,
|
||||
CvScalar external_color, CvScalar hole_color,
|
||||
int max_level, int thickness CV_DEFAULT(1),
|
||||
int line_type CV_DEFAULT(8),
|
||||
CvPoint offset CV_DEFAULT(cvPoint(0,0)));
|
||||
|
||||
/* Does look-up transformation. Elements of the source array
|
||||
(that should be 8uC1 or 8sC1) are used as indexes in lutarr 256-element table */
|
||||
|
@ -261,7 +261,7 @@ public:
|
||||
int* refcount;
|
||||
|
||||
//! helper fields used in locateROI and adjustROI
|
||||
const uchar* datastart;
|
||||
uchar* datastart;
|
||||
const uchar* dataend;
|
||||
|
||||
//! allocator
|
||||
@ -349,7 +349,7 @@ public:
|
||||
uchar* data;
|
||||
int* refcount;
|
||||
|
||||
const uchar* datastart;
|
||||
uchar* datastart;
|
||||
const uchar* dataend;
|
||||
|
||||
AllocType alloc_type;
|
||||
|
@ -219,8 +219,14 @@ template<typename _Tp, int n> static inline
|
||||
std::ostream& operator << (std::ostream& out, const Vec<_Tp, n>& vec)
|
||||
{
|
||||
out << "[";
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( push )
|
||||
#pragma warning( disable: 4127 )
|
||||
#endif
|
||||
if(Vec<_Tp, n>::depth < CV_32F)
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( pop )
|
||||
#endif
|
||||
{
|
||||
for (int i = 0; i < n - 1; ++i) {
|
||||
out << (int)vec[i] << ", ";
|
||||
|
@ -2121,7 +2121,7 @@ MatConstIterator::MatConstIterator(const Mat* _m)
|
||||
{
|
||||
if( m && m->isContinuous() )
|
||||
{
|
||||
sliceStart = m->data;
|
||||
sliceStart = m->ptr();
|
||||
sliceEnd = sliceStart + m->total()*elemSize;
|
||||
}
|
||||
seek((const int*)0);
|
||||
@ -2134,7 +2134,7 @@ MatConstIterator::MatConstIterator(const Mat* _m, int _row, int _col)
|
||||
CV_Assert(m && m->dims <= 2);
|
||||
if( m->isContinuous() )
|
||||
{
|
||||
sliceStart = m->data;
|
||||
sliceStart = m->ptr();
|
||||
sliceEnd = sliceStart + m->total()*elemSize;
|
||||
}
|
||||
int idx[] = {_row, _col};
|
||||
@ -2148,7 +2148,7 @@ MatConstIterator::MatConstIterator(const Mat* _m, Point _pt)
|
||||
CV_Assert(m && m->dims <= 2);
|
||||
if( m->isContinuous() )
|
||||
{
|
||||
sliceStart = m->data;
|
||||
sliceStart = m->ptr();
|
||||
sliceEnd = sliceStart + m->total()*elemSize;
|
||||
}
|
||||
int idx[] = {_pt.y, _pt.x};
|
||||
|
@ -44,9 +44,10 @@
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
|
||||
namespace cv{namespace optim
|
||||
namespace cv
|
||||
{
|
||||
class CV_EXPORTS Solver : public Algorithm
|
||||
|
||||
class CV_EXPORTS MinProblemSolver : public Algorithm
|
||||
{
|
||||
public:
|
||||
class CV_EXPORTS Function
|
||||
@ -70,7 +71,7 @@ public:
|
||||
};
|
||||
|
||||
//! downhill simplex class
|
||||
class CV_EXPORTS DownhillSolver : public Solver
|
||||
class CV_EXPORTS DownhillSolver : public MinProblemSolver
|
||||
{
|
||||
public:
|
||||
//! returns row-vector, even if the column-vector was given
|
||||
@ -78,20 +79,22 @@ public:
|
||||
//!This should be called at least once before the first call to minimize() and step is assumed to be (something that
|
||||
//! after getMat() will return) row-vector or column-vector. *It's dimensionality determines the dimensionality of a problem.*
|
||||
virtual void setInitStep(InputArray step)=0;
|
||||
};
|
||||
|
||||
// both minRange & minError are specified by termcrit.epsilon; In addition, user may specify the number of iterations that the algorithm does.
|
||||
CV_EXPORTS_W Ptr<DownhillSolver> createDownhillSolver(const Ptr<Solver::Function>& f=Ptr<Solver::Function>(),
|
||||
InputArray initStep=Mat_<double>(1,1,0.0),
|
||||
TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001));
|
||||
// both minRange & minError are specified by termcrit.epsilon;
|
||||
// In addition, user may specify the number of iterations that the algorithm does.
|
||||
static Ptr<DownhillSolver> create(const Ptr<MinProblemSolver::Function>& f=Ptr<MinProblemSolver::Function>(),
|
||||
InputArray initStep=Mat_<double>(1,1,0.0),
|
||||
TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001));
|
||||
};
|
||||
|
||||
//! conjugate gradient method
|
||||
class CV_EXPORTS ConjGradSolver : public Solver{
|
||||
class CV_EXPORTS ConjGradSolver : public MinProblemSolver
|
||||
{
|
||||
public:
|
||||
static Ptr<ConjGradSolver> create(const Ptr<MinProblemSolver::Function>& f=Ptr<ConjGradSolver::Function>(),
|
||||
TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001));
|
||||
};
|
||||
|
||||
CV_EXPORTS_W Ptr<ConjGradSolver> createConjGradSolver(const Ptr<Solver::Function>& f=Ptr<ConjGradSolver::Function>(),
|
||||
TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001));
|
||||
|
||||
//!the return codes for solveLP() function
|
||||
enum
|
||||
{
|
||||
@ -102,7 +105,7 @@ enum
|
||||
};
|
||||
|
||||
CV_EXPORTS_W int solveLP(const Mat& Func, const Mat& Constr, Mat& z);
|
||||
CV_EXPORTS_W void denoise_TVL1(const std::vector<Mat>& observations,Mat& result, double lambda=1.0, int niters=30);
|
||||
}}// cv
|
||||
|
||||
}// cv
|
||||
|
||||
#endif
|
@ -75,7 +75,7 @@ OCL_PERF_TEST_P(DftFixture, Dft, ::testing::Combine(Values(C2C, R2R, C2R, R2C),
|
||||
const Size srcSize = get<1>(params);
|
||||
int flags = get<2>(params);
|
||||
|
||||
int in_cn, out_cn;
|
||||
int in_cn = 0, out_cn = 0;
|
||||
switch (dft_type)
|
||||
{
|
||||
case R2R: flags |= cv::DFT_REAL_OUTPUT; in_cn = 1; out_cn = 1; break;
|
||||
|
@ -1076,7 +1076,7 @@ void convertAndUnrollScalar( const Mat& sc, int buftype, uchar* scbuf, size_t bl
|
||||
{
|
||||
int scn = (int)sc.total(), cn = CV_MAT_CN(buftype);
|
||||
size_t esz = CV_ELEM_SIZE(buftype);
|
||||
getConvertFunc(sc.depth(), buftype)(sc.data, 1, 0, 1, scbuf, 1, Size(std::min(cn, scn), 1), 0);
|
||||
getConvertFunc(sc.depth(), buftype)(sc.ptr(), 1, 0, 1, scbuf, 1, Size(std::min(cn, scn), 1), 0);
|
||||
// unroll the scalar
|
||||
if( scn < cn )
|
||||
{
|
||||
@ -1215,7 +1215,7 @@ static void binary_op( InputArray _src1, InputArray _src2, OutputArray _dst,
|
||||
if( len == (size_t)(int)len )
|
||||
{
|
||||
sz.width = (int)len;
|
||||
func(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, 0);
|
||||
func(src1.ptr(), src1.step, src2.ptr(), src2.step, dst.ptr(), dst.step, sz, 0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -1491,9 +1491,6 @@ static bool ocl_arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
|
||||
if (!doubleSupport && (depth2 == CV_64F || depth1 == CV_64F))
|
||||
return false;
|
||||
|
||||
if( (oclop == OCL_OP_MUL_SCALE || oclop == OCL_OP_DIV_SCALE) && (depth1 >= CV_32F || depth2 >= CV_32F || ddepth >= CV_32F) )
|
||||
return false;
|
||||
|
||||
int kercn = haveMask || haveScalar ? cn : ocl::predictOptimalVectorWidth(_src1, _src2, _dst);
|
||||
int scalarcn = kercn == 3 ? 4 : kercn, rowsPerWI = d.isIntel() ? 4 : 1;
|
||||
|
||||
@ -1625,7 +1622,7 @@ static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
|
||||
|
||||
Mat src1 = psrc1->getMat(), src2 = psrc2->getMat(), dst = _dst.getMat();
|
||||
Size sz = getContinuousSize(src1, src2, dst, src1.channels());
|
||||
tab[depth1](src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, usrdata);
|
||||
tab[depth1](src1.ptr(), src1.step, src2.ptr(), src2.step, dst.ptr(), dst.step, sz, usrdata);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2988,7 +2985,7 @@ static bool ocl_compare(InputArray _src1, InputArray _src2, OutputArray _dst, in
|
||||
else
|
||||
{
|
||||
double fval = 0;
|
||||
getConvertFunc(depth2, CV_64F)(src2.data, 1, 0, 1, (uchar *)&fval, 1, Size(1, 1), 0);
|
||||
getConvertFunc(depth2, CV_64F)(src2.ptr(), 1, 0, 1, (uchar *)&fval, 1, Size(1, 1), 0);
|
||||
if( fval < getMinVal(depth1) )
|
||||
return dst.setTo(Scalar::all(op == CMP_GT || op == CMP_GE || op == CMP_NE ? 255 : 0)), true;
|
||||
|
||||
@ -3068,7 +3065,7 @@ void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op)
|
||||
_dst.create(src1.size(), CV_8UC(cn));
|
||||
Mat dst = _dst.getMat();
|
||||
Size sz = getContinuousSize(src1, src2, dst, src1.channels());
|
||||
getCmpFunc(src1.depth())(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, &op);
|
||||
getCmpFunc(src1.depth())(src1.ptr(), src1.step, src2.ptr(), src2.step, dst.ptr(), dst.step, sz, &op);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3109,7 +3106,7 @@ void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op)
|
||||
else
|
||||
{
|
||||
double fval=0;
|
||||
getConvertFunc(depth2, CV_64F)(src2.data, 1, 0, 1, (uchar*)&fval, 1, Size(1,1), 0);
|
||||
getConvertFunc(depth2, CV_64F)(src2.ptr(), 1, 0, 1, (uchar*)&fval, 1, Size(1,1), 0);
|
||||
if( fval < getMinVal(depth1) )
|
||||
{
|
||||
dst = Scalar::all(op == CMP_GT || op == CMP_GE || op == CMP_NE ? 255 : 0);
|
||||
@ -3679,8 +3676,8 @@ static bool ocl_inRange( InputArray _src, InputArray _lowerb,
|
||||
int* iubuf = ilbuf + cn;
|
||||
|
||||
BinaryFunc sccvtfunc = getConvertFunc(ldepth, CV_32S);
|
||||
sccvtfunc(lscalar.data, 1, 0, 1, (uchar*)ilbuf, 1, Size(cn, 1), 0);
|
||||
sccvtfunc(uscalar.data, 1, 0, 1, (uchar*)iubuf, 1, Size(cn, 1), 0);
|
||||
sccvtfunc(lscalar.ptr(), 1, 0, 1, (uchar*)ilbuf, 1, Size(cn, 1), 0);
|
||||
sccvtfunc(uscalar.ptr(), 1, 0, 1, (uchar*)iubuf, 1, Size(cn, 1), 0);
|
||||
int minval = cvRound(getMinVal(sdepth)), maxval = cvRound(getMaxVal(sdepth));
|
||||
|
||||
for( int k = 0; k < cn; k++ )
|
||||
@ -3790,8 +3787,8 @@ void cv::inRange(InputArray _src, InputArray _lowerb,
|
||||
int* iubuf = ilbuf + cn;
|
||||
|
||||
BinaryFunc sccvtfunc = getConvertFunc(scdepth, CV_32S);
|
||||
sccvtfunc(lb.data, 1, 0, 1, (uchar*)ilbuf, 1, Size(cn, 1), 0);
|
||||
sccvtfunc(ub.data, 1, 0, 1, (uchar*)iubuf, 1, Size(cn, 1), 0);
|
||||
sccvtfunc(lb.ptr(), 1, 0, 1, (uchar*)ilbuf, 1, Size(cn, 1), 0);
|
||||
sccvtfunc(ub.ptr(), 1, 0, 1, (uchar*)iubuf, 1, Size(cn, 1), 0);
|
||||
int minval = cvRound(getMinVal(depth)), maxval = cvRound(getMaxVal(depth));
|
||||
|
||||
for( int k = 0; k < cn; k++ )
|
||||
|
@ -40,10 +40,12 @@
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#undef ALEX_DEBUG
|
||||
#include "debug.hpp"
|
||||
|
||||
namespace cv{namespace optim{
|
||||
#define dprintf(x)
|
||||
#define print_matrix(x)
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
#define SEC_METHOD_ITERATIONS 4
|
||||
#define INITIAL_SEC_METHOD_SIGMA 0.1
|
||||
@ -57,15 +59,15 @@ namespace cv{namespace optim{
|
||||
void setTermCriteria(const TermCriteria& termcrit);
|
||||
double minimize(InputOutputArray x);
|
||||
protected:
|
||||
Ptr<Solver::Function> _Function;
|
||||
Ptr<MinProblemSolver::Function> _Function;
|
||||
TermCriteria _termcrit;
|
||||
Mat_<double> d,r,buf_x,r_old;
|
||||
Mat_<double> minimizeOnTheLine_buf1,minimizeOnTheLine_buf2;
|
||||
private:
|
||||
static void minimizeOnTheLine(Ptr<Solver::Function> _f,Mat_<double>& x,const Mat_<double>& d,Mat_<double>& buf1,Mat_<double>& buf2);
|
||||
static void minimizeOnTheLine(Ptr<MinProblemSolver::Function> _f,Mat_<double>& x,const Mat_<double>& d,Mat_<double>& buf1,Mat_<double>& buf2);
|
||||
};
|
||||
|
||||
void ConjGradSolverImpl::minimizeOnTheLine(Ptr<Solver::Function> _f,Mat_<double>& x,const Mat_<double>& d,Mat_<double>& buf1,
|
||||
void ConjGradSolverImpl::minimizeOnTheLine(Ptr<MinProblemSolver::Function> _f,Mat_<double>& x,const Mat_<double>& d,Mat_<double>& buf1,
|
||||
Mat_<double>& buf2){
|
||||
double sigma=INITIAL_SEC_METHOD_SIGMA;
|
||||
buf1=0.0;
|
||||
@ -119,13 +121,13 @@ namespace cv{namespace optim{
|
||||
Mat_<double> proxy_x;
|
||||
if(x_mat.rows>1){
|
||||
buf_x.create(1,ndim);
|
||||
Mat_<double> proxy(ndim,1,(double*)buf_x.data);
|
||||
Mat_<double> proxy(ndim,1,buf_x.ptr<double>());
|
||||
x_mat.copyTo(proxy);
|
||||
proxy_x=buf_x;
|
||||
}else{
|
||||
proxy_x=x_mat;
|
||||
}
|
||||
_Function->getGradient((double*)proxy_x.data,(double*)d.data);
|
||||
_Function->getGradient(proxy_x.ptr<double>(),d.ptr<double>());
|
||||
d*=-1.0;
|
||||
d.copyTo(r);
|
||||
|
||||
@ -138,7 +140,7 @@ namespace cv{namespace optim{
|
||||
for(int count=0;count<_termcrit.maxCount;count++){
|
||||
minimizeOnTheLine(_Function,proxy_x,d,minimizeOnTheLine_buf1,minimizeOnTheLine_buf2);
|
||||
r.copyTo(r_old);
|
||||
_Function->getGradient((double*)proxy_x.data,(double*)r.data);
|
||||
_Function->getGradient(proxy_x.ptr<double>(),r.ptr<double>());
|
||||
r*=-1.0;
|
||||
double r_norm_sq=norm(r);
|
||||
if(_termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && r_norm_sq<_termcrit.epsilon){
|
||||
@ -152,15 +154,15 @@ namespace cv{namespace optim{
|
||||
|
||||
|
||||
if(x_mat.rows>1){
|
||||
Mat(ndim, 1, CV_64F, (double*)proxy_x.data).copyTo(x);
|
||||
Mat(ndim, 1, CV_64F, proxy_x.ptr<double>()).copyTo(x);
|
||||
}
|
||||
return _Function->calc((double*)proxy_x.data);
|
||||
return _Function->calc(proxy_x.ptr<double>());
|
||||
}
|
||||
|
||||
ConjGradSolverImpl::ConjGradSolverImpl(){
|
||||
_Function=Ptr<Function>();
|
||||
}
|
||||
Ptr<Solver::Function> ConjGradSolverImpl::getFunction()const{
|
||||
Ptr<MinProblemSolver::Function> ConjGradSolverImpl::getFunction()const{
|
||||
return _Function;
|
||||
}
|
||||
void ConjGradSolverImpl::setFunction(const Ptr<Function>& f){
|
||||
@ -175,10 +177,10 @@ namespace cv{namespace optim{
|
||||
_termcrit=termcrit;
|
||||
}
|
||||
// both minRange & minError are specified by termcrit.epsilon; In addition, user may specify the number of iterations that the algorithm does.
|
||||
Ptr<ConjGradSolver> createConjGradSolver(const Ptr<Solver::Function>& f, TermCriteria termcrit){
|
||||
ConjGradSolver *CG=new ConjGradSolverImpl();
|
||||
Ptr<ConjGradSolver> ConjGradSolver::create(const Ptr<MinProblemSolver::Function>& f, TermCriteria termcrit){
|
||||
Ptr<ConjGradSolver> CG = makePtr<ConjGradSolverImpl>();
|
||||
CG->setFunction(f);
|
||||
CG->setTermCriteria(termcrit);
|
||||
return Ptr<ConjGradSolver>(CG);
|
||||
return CG;
|
||||
}
|
||||
}}
|
||||
}
|
@ -43,6 +43,11 @@
|
||||
#include "precomp.hpp"
|
||||
#include "opencl_kernels_core.hpp"
|
||||
|
||||
#ifdef __APPLE__
|
||||
#undef CV_NEON
|
||||
#define CV_NEON 0
|
||||
#endif
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
@ -1760,13 +1765,12 @@ static bool ocl_convertScaleAbs( InputArray _src, OutputArray _dst, double alpha
|
||||
kercn = ocl::predictOptimalVectorWidth(_src, _dst), rowsPerWI = d.isIntel() ? 4 : 1;
|
||||
bool doubleSupport = d.doubleFPConfig() > 0;
|
||||
|
||||
if (depth == CV_32F || depth == CV_64F)
|
||||
if (!doubleSupport && depth == CV_64F)
|
||||
return false;
|
||||
|
||||
char cvt[2][50];
|
||||
int wdepth = std::max(depth, CV_32F);
|
||||
ocl::Kernel k("KF", ocl::core::arithm_oclsrc,
|
||||
format("-D OP_CONVERT_SCALE_ABS -D UNARY_OP -D dstT=%s -D srcT1=%s"
|
||||
String build_opt = format("-D OP_CONVERT_SCALE_ABS -D UNARY_OP -D dstT=%s -D srcT1=%s"
|
||||
" -D workT=%s -D wdepth=%d -D convertToWT1=%s -D convertToDT=%s"
|
||||
" -D workT1=%s -D rowsPerWI=%d%s",
|
||||
ocl::typeToStr(CV_8UC(kercn)),
|
||||
@ -1775,7 +1779,8 @@ static bool ocl_convertScaleAbs( InputArray _src, OutputArray _dst, double alpha
|
||||
ocl::convertTypeStr(depth, wdepth, kercn, cvt[0]),
|
||||
ocl::convertTypeStr(wdepth, CV_8U, kercn, cvt[1]),
|
||||
ocl::typeToStr(wdepth), rowsPerWI,
|
||||
doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
|
||||
doubleSupport ? " -D DOUBLE_SUPPORT" : "");
|
||||
ocl::Kernel k("KF", ocl::core::arithm_oclsrc, build_opt);
|
||||
if (k.empty())
|
||||
return false;
|
||||
|
||||
@ -1815,7 +1820,7 @@ void cv::convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, doubl
|
||||
if( src.dims <= 2 )
|
||||
{
|
||||
Size sz = getContinuousSize(src, dst, cn);
|
||||
func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale );
|
||||
func( src.ptr(), src.step, 0, 0, dst.ptr(), dst.step, sz, scale );
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -2054,7 +2059,7 @@ public:
|
||||
CV_DbgAssert(lutcn == 3 || lutcn == 4);
|
||||
if (lutcn == 3)
|
||||
{
|
||||
IppStatus status = ippiCopy_8u_C3P3R(lut.data, (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
|
||||
IppStatus status = ippiCopy_8u_C3P3R(lut.ptr(), (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
|
||||
if (status < 0)
|
||||
{
|
||||
setIppErrorStatus();
|
||||
@ -2063,7 +2068,7 @@ public:
|
||||
}
|
||||
else if (lutcn == 4)
|
||||
{
|
||||
IppStatus status = ippiCopy_8u_C4P4R(lut.data, (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
|
||||
IppStatus status = ippiCopy_8u_C4P4R(lut.ptr(), (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
|
||||
if (status < 0)
|
||||
{
|
||||
setIppErrorStatus();
|
||||
@ -2096,14 +2101,14 @@ public:
|
||||
if (lutcn == 3)
|
||||
{
|
||||
if (ippiLUTPalette_8u_C3R(
|
||||
src.data, (int)src.step[0], dst.data, (int)dst.step[0],
|
||||
src.ptr(), (int)src.step[0], dst.ptr(), (int)dst.step[0],
|
||||
ippiSize(dst.size()), lutTable, 8) >= 0)
|
||||
return;
|
||||
}
|
||||
else if (lutcn == 4)
|
||||
{
|
||||
if (ippiLUTPalette_8u_C4R(
|
||||
src.data, (int)src.step[0], dst.data, (int)dst.step[0],
|
||||
src.ptr(), (int)src.step[0], dst.ptr(), (int)dst.step[0],
|
||||
ippiSize(dst.size()), lutTable, 8) >= 0)
|
||||
return;
|
||||
}
|
||||
@ -2153,7 +2158,7 @@ public:
|
||||
int len = (int)it.size;
|
||||
|
||||
for( size_t i = 0; i < it.nplanes; i++, ++it )
|
||||
func(ptrs[0], lut_.data, ptrs[1], len, cn, lutcn);
|
||||
func(ptrs[0], lut_.ptr(), ptrs[1], len, cn, lutcn);
|
||||
}
|
||||
private:
|
||||
LUTParallelBody(const LUTParallelBody&);
|
||||
@ -2225,7 +2230,7 @@ void cv::LUT( InputArray _src, InputArray _lut, OutputArray _dst )
|
||||
int len = (int)it.size;
|
||||
|
||||
for( size_t i = 0; i < it.nplanes; i++, ++it )
|
||||
func(ptrs[0], lut.data, ptrs[1], len, cn, lutcn);
|
||||
func(ptrs[0], lut.ptr(), ptrs[1], len, cn, lutcn);
|
||||
}
|
||||
|
||||
namespace cv {
|
||||
|
@ -741,28 +741,28 @@ void flip( InputArray _src, OutputArray _dst, int flip_mode )
|
||||
|
||||
if (ippFunc != 0)
|
||||
{
|
||||
if (ippFunc(src.data, (int)src.step, dst.data, (int)dst.step, ippiSize(src.cols, src.rows), axis) >= 0)
|
||||
if (ippFunc(src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, ippiSize(src.cols, src.rows), axis) >= 0)
|
||||
return;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
else if (ippFuncI != 0)
|
||||
{
|
||||
if (ippFuncI(dst.data, (int)dst.step, roisize, axis) >= 0)
|
||||
if (ippFuncI(dst.ptr(), (int)dst.step, roisize, axis) >= 0)
|
||||
return;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
#endif
|
||||
|
||||
if( flip_mode <= 0 )
|
||||
flipVert( src.data, src.step, dst.data, dst.step, src.size(), esz );
|
||||
flipVert( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
|
||||
else
|
||||
flipHoriz( src.data, src.step, dst.data, dst.step, src.size(), esz );
|
||||
flipHoriz( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
|
||||
|
||||
if( flip_mode < 0 )
|
||||
flipHoriz( dst.data, dst.step, dst.data, dst.step, dst.size(), esz );
|
||||
flipHoriz( dst.ptr(), dst.step, dst.ptr(), dst.step, dst.size(), esz );
|
||||
}
|
||||
|
||||
/*#ifdef HAVE_OPENCL
|
||||
#if defined HAVE_OPENCL && !defined __APPLE__
|
||||
|
||||
static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
|
||||
{
|
||||
@ -790,7 +790,7 @@ static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
|
||||
return k.run(2, globalsize, NULL, false);
|
||||
}
|
||||
|
||||
#endif*/
|
||||
#endif
|
||||
|
||||
void repeat(InputArray _src, int ny, int nx, OutputArray _dst)
|
||||
{
|
||||
@ -800,8 +800,10 @@ void repeat(InputArray _src, int ny, int nx, OutputArray _dst)
|
||||
Size ssize = _src.size();
|
||||
_dst.create(ssize.height*ny, ssize.width*nx, _src.type());
|
||||
|
||||
/*CV_OCL_RUN(_dst.isUMat(),
|
||||
ocl_repeat(_src, ny, nx, _dst))*/
|
||||
#if !defined __APPLE__
|
||||
CV_OCL_RUN(_dst.isUMat(),
|
||||
ocl_repeat(_src, ny, nx, _dst))
|
||||
#endif
|
||||
|
||||
Mat src = _src.getMat(), dst = _dst.getMat();
|
||||
Size dsize = dst.size();
|
||||
@ -812,11 +814,11 @@ void repeat(InputArray _src, int ny, int nx, OutputArray _dst)
|
||||
for( y = 0; y < ssize.height; y++ )
|
||||
{
|
||||
for( x = 0; x < dsize.width; x += ssize.width )
|
||||
memcpy( dst.data + y*dst.step + x, src.data + y*src.step, ssize.width );
|
||||
memcpy( dst.ptr(y) + x, src.ptr(y), ssize.width );
|
||||
}
|
||||
|
||||
for( ; y < dsize.height; y++ )
|
||||
memcpy( dst.data + y*dst.step, dst.data + (y - ssize.height)*dst.step, dsize.width );
|
||||
memcpy( dst.ptr(y), dst.ptr(y - ssize.height), dsize.width );
|
||||
}
|
||||
|
||||
Mat repeat(const Mat& src, int ny, int nx)
|
||||
@ -1218,8 +1220,8 @@ void cv::copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
|
||||
#endif
|
||||
|
||||
if( borderType != BORDER_CONSTANT )
|
||||
copyMakeBorder_8u( src.data, src.step, src.size(),
|
||||
dst.data, dst.step, dst.size(),
|
||||
copyMakeBorder_8u( src.ptr(), src.step, src.size(),
|
||||
dst.ptr(), dst.step, dst.size(),
|
||||
top, left, (int)src.elemSize(), borderType );
|
||||
else
|
||||
{
|
||||
@ -1231,8 +1233,8 @@ void cv::copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
|
||||
cn1 = 1;
|
||||
}
|
||||
scalarToRawData(value, buf, CV_MAKETYPE(src.depth(), cn1), cn);
|
||||
copyMakeConstBorder_8u( src.data, src.step, src.size(),
|
||||
dst.data, dst.step, dst.size(),
|
||||
copyMakeConstBorder_8u( src.ptr(), src.step, src.size(),
|
||||
dst.ptr(), dst.step, dst.size(),
|
||||
top, left, (int)src.elemSize(), (uchar*)(double*)buf );
|
||||
}
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ void cv::cuda::GpuMat::release()
|
||||
if (refcount && CV_XADD(refcount, -1) == 1)
|
||||
allocator->free(this);
|
||||
|
||||
data = datastart = dataend = 0;
|
||||
dataend = data = datastart = 0;
|
||||
step = rows = cols = 0;
|
||||
refcount = 0;
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ using namespace cv::cuda;
|
||||
cv::cuda::GpuMat::GpuMat(int rows_, int cols_, int type_, void* data_, size_t step_) :
|
||||
flags(Mat::MAGIC_VAL + (type_ & Mat::TYPE_MASK)), rows(rows_), cols(cols_),
|
||||
step(step_), data((uchar*)data_), refcount(0),
|
||||
datastart((uchar*)data_), dataend((uchar*)data_),
|
||||
datastart((uchar*)data_), dataend((const uchar*)data_),
|
||||
allocator(defaultAllocator())
|
||||
{
|
||||
size_t minstep = cols * elemSize();
|
||||
@ -75,7 +75,7 @@ cv::cuda::GpuMat::GpuMat(int rows_, int cols_, int type_, void* data_, size_t st
|
||||
cv::cuda::GpuMat::GpuMat(Size size_, int type_, void* data_, size_t step_) :
|
||||
flags(Mat::MAGIC_VAL + (type_ & Mat::TYPE_MASK)), rows(size_.height), cols(size_.width),
|
||||
step(step_), data((uchar*)data_), refcount(0),
|
||||
datastart((uchar*)data_), dataend((uchar*)data_),
|
||||
datastart((uchar*)data_), dataend((const uchar*)data_),
|
||||
allocator(defaultAllocator())
|
||||
{
|
||||
size_t minstep = cols * elemSize();
|
||||
|
@ -175,7 +175,7 @@ void cv::cuda::CudaMem::release()
|
||||
fastFree(refcount);
|
||||
}
|
||||
|
||||
data = datastart = dataend = 0;
|
||||
dataend = data = datastart = 0;
|
||||
step = rows = cols = 0;
|
||||
refcount = 0;
|
||||
#endif
|
||||
|
@ -3528,492 +3528,9 @@ cvPrevTreeNode( CvTreeNodeIterator* treeIterator )
|
||||
return prevNode;
|
||||
}
|
||||
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
// This is reimplementation of kd-trees from cvkdtree*.* by Xavier Delacour, cleaned-up and
|
||||
// adopted to work with the new OpenCV data structures. It's in cxcore to be shared by
|
||||
// both cv (CvFeatureTree) and ml (kNN).
|
||||
|
||||
// The algorithm is taken from:
|
||||
// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
|
||||
// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
|
||||
// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
|
||||
|
||||
const int MAX_TREE_DEPTH = 32;
|
||||
|
||||
KDTree::KDTree()
|
||||
{
|
||||
maxDepth = -1;
|
||||
normType = NORM_L2;
|
||||
}
|
||||
|
||||
KDTree::KDTree(InputArray _points, bool _copyData)
|
||||
{
|
||||
maxDepth = -1;
|
||||
normType = NORM_L2;
|
||||
build(_points, _copyData);
|
||||
}
|
||||
|
||||
KDTree::KDTree(InputArray _points, InputArray _labels, bool _copyData)
|
||||
{
|
||||
maxDepth = -1;
|
||||
normType = NORM_L2;
|
||||
build(_points, _labels, _copyData);
|
||||
}
|
||||
|
||||
struct SubTree
|
||||
{
|
||||
SubTree() : first(0), last(0), nodeIdx(0), depth(0) {}
|
||||
SubTree(int _first, int _last, int _nodeIdx, int _depth)
|
||||
: first(_first), last(_last), nodeIdx(_nodeIdx), depth(_depth) {}
|
||||
int first;
|
||||
int last;
|
||||
int nodeIdx;
|
||||
int depth;
|
||||
};
|
||||
|
||||
|
||||
static float
|
||||
medianPartition( size_t* ofs, int a, int b, const float* vals )
|
||||
{
|
||||
int k, a0 = a, b0 = b;
|
||||
int middle = (a + b)/2;
|
||||
while( b > a )
|
||||
{
|
||||
int i0 = a, i1 = (a+b)/2, i2 = b;
|
||||
float v0 = vals[ofs[i0]], v1 = vals[ofs[i1]], v2 = vals[ofs[i2]];
|
||||
int ip = v0 < v1 ? (v1 < v2 ? i1 : v0 < v2 ? i2 : i0) :
|
||||
v0 < v2 ? i0 : (v1 < v2 ? i2 : i1);
|
||||
float pivot = vals[ofs[ip]];
|
||||
std::swap(ofs[ip], ofs[i2]);
|
||||
|
||||
for( i1 = i0, i0--; i1 <= i2; i1++ )
|
||||
if( vals[ofs[i1]] <= pivot )
|
||||
{
|
||||
i0++;
|
||||
std::swap(ofs[i0], ofs[i1]);
|
||||
}
|
||||
if( i0 == middle )
|
||||
break;
|
||||
if( i0 > middle )
|
||||
b = i0 - (b == i0);
|
||||
else
|
||||
a = i0;
|
||||
}
|
||||
|
||||
float pivot = vals[ofs[middle]];
|
||||
int less = 0, more = 0;
|
||||
for( k = a0; k < middle; k++ )
|
||||
{
|
||||
CV_Assert(vals[ofs[k]] <= pivot);
|
||||
less += vals[ofs[k]] < pivot;
|
||||
}
|
||||
for( k = b0; k > middle; k-- )
|
||||
{
|
||||
CV_Assert(vals[ofs[k]] >= pivot);
|
||||
more += vals[ofs[k]] > pivot;
|
||||
}
|
||||
CV_Assert(std::abs(more - less) <= 1);
|
||||
|
||||
return vals[ofs[middle]];
|
||||
}
|
||||
|
||||
static void
|
||||
computeSums( const Mat& points, const size_t* ofs, int a, int b, double* sums )
|
||||
{
|
||||
int i, j, dims = points.cols;
|
||||
const float* data = points.ptr<float>(0);
|
||||
for( j = 0; j < dims; j++ )
|
||||
sums[j*2] = sums[j*2+1] = 0;
|
||||
for( i = a; i <= b; i++ )
|
||||
{
|
||||
const float* row = data + ofs[i];
|
||||
for( j = 0; j < dims; j++ )
|
||||
{
|
||||
double t = row[j], s = sums[j*2] + t, s2 = sums[j*2+1] + t*t;
|
||||
sums[j*2] = s; sums[j*2+1] = s2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void KDTree::build(InputArray _points, bool _copyData)
|
||||
{
|
||||
build(_points, noArray(), _copyData);
|
||||
}
|
||||
|
||||
|
||||
void KDTree::build(InputArray __points, InputArray __labels, bool _copyData)
|
||||
{
|
||||
Mat _points = __points.getMat(), _labels = __labels.getMat();
|
||||
CV_Assert(_points.type() == CV_32F && !_points.empty());
|
||||
std::vector<KDTree::Node>().swap(nodes);
|
||||
|
||||
if( !_copyData )
|
||||
points = _points;
|
||||
else
|
||||
{
|
||||
points.release();
|
||||
points.create(_points.size(), _points.type());
|
||||
}
|
||||
|
||||
int i, j, n = _points.rows, ptdims = _points.cols, top = 0;
|
||||
const float* data = _points.ptr<float>(0);
|
||||
float* dstdata = points.ptr<float>(0);
|
||||
size_t step = _points.step1();
|
||||
size_t dstep = points.step1();
|
||||
int ptpos = 0;
|
||||
labels.resize(n);
|
||||
const int* _labels_data = 0;
|
||||
|
||||
if( !_labels.empty() )
|
||||
{
|
||||
int nlabels = _labels.checkVector(1, CV_32S, true);
|
||||
CV_Assert(nlabels == n);
|
||||
_labels_data = (const int*)_labels.data;
|
||||
}
|
||||
|
||||
Mat sumstack(MAX_TREE_DEPTH*2, ptdims*2, CV_64F);
|
||||
SubTree stack[MAX_TREE_DEPTH*2];
|
||||
|
||||
std::vector<size_t> _ptofs(n);
|
||||
size_t* ptofs = &_ptofs[0];
|
||||
|
||||
for( i = 0; i < n; i++ )
|
||||
ptofs[i] = i*step;
|
||||
|
||||
nodes.push_back(Node());
|
||||
computeSums(points, ptofs, 0, n-1, sumstack.ptr<double>(top));
|
||||
stack[top++] = SubTree(0, n-1, 0, 0);
|
||||
int _maxDepth = 0;
|
||||
|
||||
while( --top >= 0 )
|
||||
{
|
||||
int first = stack[top].first, last = stack[top].last;
|
||||
int depth = stack[top].depth, nidx = stack[top].nodeIdx;
|
||||
int count = last - first + 1, dim = -1;
|
||||
const double* sums = sumstack.ptr<double>(top);
|
||||
double invCount = 1./count, maxVar = -1.;
|
||||
|
||||
if( count == 1 )
|
||||
{
|
||||
int idx0 = (int)(ptofs[first]/step);
|
||||
int idx = _copyData ? ptpos++ : idx0;
|
||||
nodes[nidx].idx = ~idx;
|
||||
if( _copyData )
|
||||
{
|
||||
const float* src = data + ptofs[first];
|
||||
float* dst = dstdata + idx*dstep;
|
||||
for( j = 0; j < ptdims; j++ )
|
||||
dst[j] = src[j];
|
||||
}
|
||||
labels[idx] = _labels_data ? _labels_data[idx0] : idx0;
|
||||
_maxDepth = std::max(_maxDepth, depth);
|
||||
continue;
|
||||
}
|
||||
|
||||
// find the dimensionality with the biggest variance
|
||||
for( j = 0; j < ptdims; j++ )
|
||||
{
|
||||
double m = sums[j*2]*invCount;
|
||||
double varj = sums[j*2+1]*invCount - m*m;
|
||||
if( maxVar < varj )
|
||||
{
|
||||
maxVar = varj;
|
||||
dim = j;
|
||||
}
|
||||
}
|
||||
|
||||
int left = (int)nodes.size(), right = left + 1;
|
||||
nodes.push_back(Node());
|
||||
nodes.push_back(Node());
|
||||
nodes[nidx].idx = dim;
|
||||
nodes[nidx].left = left;
|
||||
nodes[nidx].right = right;
|
||||
nodes[nidx].boundary = medianPartition(ptofs, first, last, data + dim);
|
||||
|
||||
int middle = (first + last)/2;
|
||||
double *lsums = (double*)sums, *rsums = lsums + ptdims*2;
|
||||
computeSums(points, ptofs, middle+1, last, rsums);
|
||||
for( j = 0; j < ptdims*2; j++ )
|
||||
lsums[j] = sums[j] - rsums[j];
|
||||
stack[top++] = SubTree(first, middle, left, depth+1);
|
||||
stack[top++] = SubTree(middle+1, last, right, depth+1);
|
||||
}
|
||||
maxDepth = _maxDepth;
|
||||
}
|
||||
|
||||
|
||||
struct PQueueElem
|
||||
{
|
||||
PQueueElem() : dist(0), idx(0) {}
|
||||
PQueueElem(float _dist, int _idx) : dist(_dist), idx(_idx) {}
|
||||
float dist;
|
||||
int idx;
|
||||
};
|
||||
|
||||
|
||||
int KDTree::findNearest(InputArray _vec, int K, int emax,
|
||||
OutputArray _neighborsIdx, OutputArray _neighbors,
|
||||
OutputArray _dist, OutputArray _labels) const
|
||||
|
||||
{
|
||||
Mat vecmat = _vec.getMat();
|
||||
CV_Assert( vecmat.isContinuous() && vecmat.type() == CV_32F && vecmat.total() == (size_t)points.cols );
|
||||
const float* vec = vecmat.ptr<float>();
|
||||
K = std::min(K, points.rows);
|
||||
int ptdims = points.cols;
|
||||
|
||||
CV_Assert(K > 0 && (normType == NORM_L2 || normType == NORM_L1));
|
||||
|
||||
AutoBuffer<uchar> _buf((K+1)*(sizeof(float) + sizeof(int)));
|
||||
int* idx = (int*)(uchar*)_buf;
|
||||
float* dist = (float*)(idx + K + 1);
|
||||
int i, j, ncount = 0, e = 0;
|
||||
|
||||
int qsize = 0, maxqsize = 1 << 10;
|
||||
AutoBuffer<uchar> _pqueue(maxqsize*sizeof(PQueueElem));
|
||||
PQueueElem* pqueue = (PQueueElem*)(uchar*)_pqueue;
|
||||
emax = std::max(emax, 1);
|
||||
|
||||
for( e = 0; e < emax; )
|
||||
{
|
||||
float d, alt_d = 0.f;
|
||||
int nidx;
|
||||
|
||||
if( e == 0 )
|
||||
nidx = 0;
|
||||
else
|
||||
{
|
||||
// take the next node from the priority queue
|
||||
if( qsize == 0 )
|
||||
break;
|
||||
nidx = pqueue[0].idx;
|
||||
alt_d = pqueue[0].dist;
|
||||
if( --qsize > 0 )
|
||||
{
|
||||
std::swap(pqueue[0], pqueue[qsize]);
|
||||
d = pqueue[0].dist;
|
||||
for( i = 0;;)
|
||||
{
|
||||
int left = i*2 + 1, right = i*2 + 2;
|
||||
if( left >= qsize )
|
||||
break;
|
||||
if( right < qsize && pqueue[right].dist < pqueue[left].dist )
|
||||
left = right;
|
||||
if( pqueue[left].dist >= d )
|
||||
break;
|
||||
std::swap(pqueue[i], pqueue[left]);
|
||||
i = left;
|
||||
}
|
||||
}
|
||||
|
||||
if( ncount == K && alt_d > dist[ncount-1] )
|
||||
continue;
|
||||
}
|
||||
|
||||
for(;;)
|
||||
{
|
||||
if( nidx < 0 )
|
||||
break;
|
||||
const Node& n = nodes[nidx];
|
||||
|
||||
if( n.idx < 0 )
|
||||
{
|
||||
i = ~n.idx;
|
||||
const float* row = points.ptr<float>(i);
|
||||
if( normType == NORM_L2 )
|
||||
for( j = 0, d = 0.f; j < ptdims; j++ )
|
||||
{
|
||||
float t = vec[j] - row[j];
|
||||
d += t*t;
|
||||
}
|
||||
else
|
||||
for( j = 0, d = 0.f; j < ptdims; j++ )
|
||||
d += std::abs(vec[j] - row[j]);
|
||||
|
||||
dist[ncount] = d;
|
||||
idx[ncount] = i;
|
||||
for( i = ncount-1; i >= 0; i-- )
|
||||
{
|
||||
if( dist[i] <= d )
|
||||
break;
|
||||
std::swap(dist[i], dist[i+1]);
|
||||
std::swap(idx[i], idx[i+1]);
|
||||
}
|
||||
ncount += ncount < K;
|
||||
e++;
|
||||
break;
|
||||
}
|
||||
|
||||
int alt;
|
||||
if( vec[n.idx] <= n.boundary )
|
||||
{
|
||||
nidx = n.left;
|
||||
alt = n.right;
|
||||
}
|
||||
else
|
||||
{
|
||||
nidx = n.right;
|
||||
alt = n.left;
|
||||
}
|
||||
|
||||
d = vec[n.idx] - n.boundary;
|
||||
if( normType == NORM_L2 )
|
||||
d = d*d + alt_d;
|
||||
else
|
||||
d = std::abs(d) + alt_d;
|
||||
// subtree prunning
|
||||
if( ncount == K && d > dist[ncount-1] )
|
||||
continue;
|
||||
// add alternative subtree to the priority queue
|
||||
pqueue[qsize] = PQueueElem(d, alt);
|
||||
for( i = qsize; i > 0; )
|
||||
{
|
||||
int parent = (i-1)/2;
|
||||
if( parent < 0 || pqueue[parent].dist <= d )
|
||||
break;
|
||||
std::swap(pqueue[i], pqueue[parent]);
|
||||
i = parent;
|
||||
}
|
||||
qsize += qsize+1 < maxqsize;
|
||||
}
|
||||
}
|
||||
|
||||
K = std::min(K, ncount);
|
||||
if( _neighborsIdx.needed() )
|
||||
{
|
||||
_neighborsIdx.create(K, 1, CV_32S, -1, true);
|
||||
Mat nidx = _neighborsIdx.getMat();
|
||||
Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx);
|
||||
}
|
||||
if( _dist.needed() )
|
||||
sqrt(Mat(K, 1, CV_32F, dist), _dist);
|
||||
|
||||
if( _neighbors.needed() || _labels.needed() )
|
||||
getPoints(Mat(K, 1, CV_32S, idx), _neighbors, _labels);
|
||||
return K;
|
||||
}
|
||||
|
||||
|
||||
void KDTree::findOrthoRange(InputArray _lowerBound,
|
||||
InputArray _upperBound,
|
||||
OutputArray _neighborsIdx,
|
||||
OutputArray _neighbors,
|
||||
OutputArray _labels ) const
|
||||
{
|
||||
int ptdims = points.cols;
|
||||
Mat lowerBound = _lowerBound.getMat(), upperBound = _upperBound.getMat();
|
||||
CV_Assert( lowerBound.size == upperBound.size &&
|
||||
lowerBound.isContinuous() &&
|
||||
upperBound.isContinuous() &&
|
||||
lowerBound.type() == upperBound.type() &&
|
||||
lowerBound.type() == CV_32F &&
|
||||
lowerBound.total() == (size_t)ptdims );
|
||||
const float* L = lowerBound.ptr<float>();
|
||||
const float* R = upperBound.ptr<float>();
|
||||
|
||||
std::vector<int> idx;
|
||||
AutoBuffer<int> _stack(MAX_TREE_DEPTH*2 + 1);
|
||||
int* stack = _stack;
|
||||
int top = 0;
|
||||
|
||||
stack[top++] = 0;
|
||||
|
||||
while( --top >= 0 )
|
||||
{
|
||||
int nidx = stack[top];
|
||||
if( nidx < 0 )
|
||||
break;
|
||||
const Node& n = nodes[nidx];
|
||||
if( n.idx < 0 )
|
||||
{
|
||||
int j, i = ~n.idx;
|
||||
const float* row = points.ptr<float>(i);
|
||||
for( j = 0; j < ptdims; j++ )
|
||||
if( row[j] < L[j] || row[j] >= R[j] )
|
||||
break;
|
||||
if( j == ptdims )
|
||||
idx.push_back(i);
|
||||
continue;
|
||||
}
|
||||
if( L[n.idx] <= n.boundary )
|
||||
stack[top++] = n.left;
|
||||
if( R[n.idx] > n.boundary )
|
||||
stack[top++] = n.right;
|
||||
}
|
||||
|
||||
if( _neighborsIdx.needed() )
|
||||
{
|
||||
_neighborsIdx.create((int)idx.size(), 1, CV_32S, -1, true);
|
||||
Mat nidx = _neighborsIdx.getMat();
|
||||
Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx);
|
||||
}
|
||||
getPoints( idx, _neighbors, _labels );
|
||||
}
|
||||
|
||||
|
||||
void KDTree::getPoints(InputArray _idx, OutputArray _pts, OutputArray _labels) const
|
||||
{
|
||||
Mat idxmat = _idx.getMat(), pts, labelsmat;
|
||||
CV_Assert( idxmat.isContinuous() && idxmat.type() == CV_32S &&
|
||||
(idxmat.cols == 1 || idxmat.rows == 1) );
|
||||
const int* idx = idxmat.ptr<int>();
|
||||
int* dstlabels = 0;
|
||||
|
||||
int ptdims = points.cols;
|
||||
int i, nidx = (int)idxmat.total();
|
||||
if( nidx == 0 )
|
||||
{
|
||||
_pts.release();
|
||||
_labels.release();
|
||||
return;
|
||||
}
|
||||
|
||||
if( _pts.needed() )
|
||||
{
|
||||
_pts.create( nidx, ptdims, points.type());
|
||||
pts = _pts.getMat();
|
||||
}
|
||||
|
||||
if(_labels.needed())
|
||||
{
|
||||
_labels.create(nidx, 1, CV_32S, -1, true);
|
||||
labelsmat = _labels.getMat();
|
||||
CV_Assert( labelsmat.isContinuous() );
|
||||
dstlabels = labelsmat.ptr<int>();
|
||||
}
|
||||
const int* srclabels = !labels.empty() ? &labels[0] : 0;
|
||||
|
||||
for( i = 0; i < nidx; i++ )
|
||||
{
|
||||
int k = idx[i];
|
||||
CV_Assert( (unsigned)k < (unsigned)points.rows );
|
||||
const float* src = points.ptr<float>(k);
|
||||
if( pts.data )
|
||||
std::copy(src, src + ptdims, pts.ptr<float>(i));
|
||||
if( dstlabels )
|
||||
dstlabels[i] = srclabels ? srclabels[k] : k;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const float* KDTree::getPoint(int ptidx, int* label) const
|
||||
{
|
||||
CV_Assert( (unsigned)ptidx < (unsigned)points.rows);
|
||||
if(label)
|
||||
*label = labels[ptidx];
|
||||
return points.ptr<float>(ptidx);
|
||||
}
|
||||
|
||||
|
||||
int KDTree::dims() const
|
||||
{
|
||||
return !points.empty() ? points.cols : 0;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
schar* seqPush( CvSeq* seq, const void* element )
|
||||
|
@ -39,8 +39,9 @@
|
||||
//
|
||||
//M*/
|
||||
#include "precomp.hpp"
|
||||
#include "debug.hpp"
|
||||
#include "opencv2/core/core_c.h"
|
||||
|
||||
#define dprintf(x)
|
||||
#define print_matrix(x)
|
||||
|
||||
/*
|
||||
|
||||
@ -83,13 +84,13 @@ Created by @SareeAlnaghy
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
void test(Ptr<optim::DownhillSolver> solver, Ptr<optim::Solver::Function> ptr_F, Mat &P, Mat &step)
|
||||
void test(Ptr<optim::DownhillSolver> MinProblemSolver, Ptr<optim::MinProblemSolver::Function> ptr_F, Mat &P, Mat &step)
|
||||
{
|
||||
try{
|
||||
|
||||
solver->setFunction(ptr_F);
|
||||
solver->setInitStep(step);
|
||||
double res = solver->minimize(P);
|
||||
MinProblemSolver->setFunction(ptr_F);
|
||||
MinProblemSolver->setInitStep(step);
|
||||
double res = MinProblemSolver->minimize(P);
|
||||
|
||||
cout << "res " << res << endl;
|
||||
}
|
||||
@ -102,7 +103,7 @@ cerr << "Error:: " << e.what() << endl;
|
||||
int main()
|
||||
{
|
||||
|
||||
class DistanceToLines :public optim::Solver::Function {
|
||||
class DistanceToLines :public optim::MinProblemSolver::Function {
|
||||
public:
|
||||
double calc(const double* x)const{
|
||||
|
||||
@ -114,10 +115,10 @@ return x[0] * x[0] + x[1] * x[1];
|
||||
Mat P = (Mat_<double>(1, 2) << 1.0, 1.0);
|
||||
Mat step = (Mat_<double>(2, 1) << -0.5, 0.5);
|
||||
|
||||
Ptr<optim::Solver::Function> ptr_F(new DistanceToLines());
|
||||
Ptr<optim::DownhillSolver> solver = optim::createDownhillSolver();
|
||||
Ptr<optim::MinProblemSolver::Function> ptr_F(new DistanceToLines());
|
||||
Ptr<optim::DownhillSolver> MinProblemSolver = optim::createDownhillSolver();
|
||||
|
||||
test(solver, ptr_F, P, step);
|
||||
test(MinProblemSolver, ptr_F, P, step);
|
||||
|
||||
system("pause");
|
||||
return 0;
|
||||
@ -131,11 +132,8 @@ multiple lines in three dimensions as not all lines intersect in three dimension
|
||||
|
||||
*/
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
namespace cv{namespace optim{
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class DownhillSolverImpl : public DownhillSolver
|
||||
{
|
||||
@ -149,7 +147,7 @@ namespace cv{namespace optim{
|
||||
void setTermCriteria(const TermCriteria& termcrit);
|
||||
double minimize(InputOutputArray x);
|
||||
protected:
|
||||
Ptr<Solver::Function> _Function;
|
||||
Ptr<MinProblemSolver::Function> _Function;
|
||||
TermCriteria _termcrit;
|
||||
Mat _step;
|
||||
Mat_<double> buf_x;
|
||||
@ -157,8 +155,8 @@ namespace cv{namespace optim{
|
||||
private:
|
||||
inline void createInitialSimplex(Mat_<double>& simplex,Mat& step);
|
||||
inline double innerDownhillSimplex(cv::Mat_<double>& p,double MinRange,double MinError,int& nfunk,
|
||||
const Ptr<Solver::Function>& f,int nmax);
|
||||
inline double tryNewPoint(Mat_<double>& p,Mat_<double>& y,Mat_<double>& coord_sum,const Ptr<Solver::Function>& f,int ihi,
|
||||
const Ptr<MinProblemSolver::Function>& f,int nmax);
|
||||
inline double tryNewPoint(Mat_<double>& p,Mat_<double>& y,Mat_<double>& coord_sum,const Ptr<MinProblemSolver::Function>& f,int ihi,
|
||||
double fac,Mat_<double>& ptry);
|
||||
};
|
||||
|
||||
@ -166,7 +164,7 @@ namespace cv{namespace optim{
|
||||
Mat_<double>& p,
|
||||
Mat_<double>& y,
|
||||
Mat_<double>& coord_sum,
|
||||
const Ptr<Solver::Function>& f,
|
||||
const Ptr<MinProblemSolver::Function>& f,
|
||||
int ihi,
|
||||
double fac,
|
||||
Mat_<double>& ptry
|
||||
@ -182,7 +180,7 @@ namespace cv{namespace optim{
|
||||
{
|
||||
ptry(j)=coord_sum(j)*fac1-p(ihi,j)*fac2;
|
||||
}
|
||||
ytry=f->calc((double*)ptry.data);
|
||||
ytry=f->calc(ptry.ptr<double>());
|
||||
if (ytry < y(ihi))
|
||||
{
|
||||
y(ihi)=ytry;
|
||||
@ -197,7 +195,7 @@ namespace cv{namespace optim{
|
||||
}
|
||||
|
||||
/*
|
||||
Performs the actual minimization of Solver::Function f (after the initialization was done)
|
||||
Performs the actual minimization of MinProblemSolver::Function f (after the initialization was done)
|
||||
|
||||
The matrix p[ndim+1][1..ndim] represents ndim+1 vertices that
|
||||
form a simplex - each row is an ndim vector.
|
||||
@ -208,7 +206,7 @@ namespace cv{namespace optim{
|
||||
double MinRange,
|
||||
double MinError,
|
||||
int& nfunk,
|
||||
const Ptr<Solver::Function>& f,
|
||||
const Ptr<MinProblemSolver::Function>& f,
|
||||
int nmax
|
||||
)
|
||||
{
|
||||
@ -302,7 +300,7 @@ namespace cv{namespace optim{
|
||||
{
|
||||
p(i,j) = coord_sum(j) = 0.5*(p(i,j)+p(ilo,j));
|
||||
}
|
||||
y(i)=f->calc((double*)coord_sum.data);
|
||||
y(i)=f->calc(coord_sum.ptr<double>());
|
||||
}
|
||||
}
|
||||
nfunk += ndim;
|
||||
@ -345,7 +343,7 @@ namespace cv{namespace optim{
|
||||
|
||||
if(x_mat.rows>1){
|
||||
buf_x.create(1,_step.cols);
|
||||
Mat_<double> proxy(_step.cols,1,(double*)buf_x.data);
|
||||
Mat_<double> proxy(_step.cols,1,buf_x.ptr<double>());
|
||||
x_mat.copyTo(proxy);
|
||||
proxy_x=buf_x;
|
||||
}else{
|
||||
@ -365,7 +363,7 @@ namespace cv{namespace optim{
|
||||
dprintf(("%d iterations done\n",count));
|
||||
|
||||
if(x_mat.rows>1){
|
||||
Mat(x_mat.rows, 1, CV_64F, (double*)proxy_x.data).copyTo(x);
|
||||
Mat(x_mat.rows, 1, CV_64F, proxy_x.ptr<double>()).copyTo(x);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -373,7 +371,7 @@ namespace cv{namespace optim{
|
||||
_Function=Ptr<Function>();
|
||||
_step=Mat_<double>();
|
||||
}
|
||||
Ptr<Solver::Function> DownhillSolverImpl::getFunction()const{
|
||||
Ptr<MinProblemSolver::Function> DownhillSolverImpl::getFunction()const{
|
||||
return _Function;
|
||||
}
|
||||
void DownhillSolverImpl::setFunction(const Ptr<Function>& f){
|
||||
@ -387,12 +385,12 @@ namespace cv{namespace optim{
|
||||
_termcrit=termcrit;
|
||||
}
|
||||
// both minRange & minError are specified by termcrit.epsilon; In addition, user may specify the number of iterations that the algorithm does.
|
||||
Ptr<DownhillSolver> createDownhillSolver(const Ptr<Solver::Function>& f, InputArray initStep, TermCriteria termcrit){
|
||||
DownhillSolver *DS=new DownhillSolverImpl();
|
||||
Ptr<DownhillSolver> DownhillSolver::create(const Ptr<MinProblemSolver::Function>& f, InputArray initStep, TermCriteria termcrit){
|
||||
Ptr<DownhillSolver> DS = makePtr<DownhillSolverImpl>();
|
||||
DS->setFunction(f);
|
||||
DS->setInitStep(initStep);
|
||||
DS->setTermCriteria(termcrit);
|
||||
return Ptr<DownhillSolver>(DS);
|
||||
return DS;
|
||||
}
|
||||
void DownhillSolverImpl::getInitStep(OutputArray step)const{
|
||||
_step.copyTo(step);
|
||||
@ -408,4 +406,4 @@ namespace cv{namespace optim{
|
||||
transpose(m,_step);
|
||||
}
|
||||
}
|
||||
}}
|
||||
}
|
@ -1547,7 +1547,7 @@ public:
|
||||
}
|
||||
|
||||
for( int i = range.start; i < range.end; ++i)
|
||||
if(!ippidft((Ipp32fc*)(src.data+i*src.step), (int)src.step,(Ipp32fc*)(dst.data+i*dst.step), (int)dst.step, pDFTSpec, (Ipp8u*)pBuffer))
|
||||
if(!ippidft(src.ptr<Ipp32fc>(i), (int)src.step,dst.ptr<Ipp32fc>(i), (int)dst.step, pDFTSpec, (Ipp8u*)pBuffer))
|
||||
{
|
||||
*ok = false;
|
||||
}
|
||||
@ -1718,9 +1718,9 @@ static bool ippi_DFT_C_32F(const Mat& src, Mat& dst, bool inv, int norm_flag)
|
||||
}
|
||||
|
||||
if (!inv)
|
||||
status = ippiDFTFwd_CToC_32fc_C1R( (Ipp32fc*)src.data, (int)src.step, (Ipp32fc*)dst.data, (int)dst.step, pDFTSpec, pBuffer );
|
||||
status = ippiDFTFwd_CToC_32fc_C1R( src.ptr<Ipp32fc>(), (int)src.step, dst.ptr<Ipp32fc>(), (int)dst.step, pDFTSpec, pBuffer );
|
||||
else
|
||||
status = ippiDFTInv_CToC_32fc_C1R( (Ipp32fc*)src.data, (int)src.step, (Ipp32fc*)dst.data, (int)dst.step, pDFTSpec, pBuffer );
|
||||
status = ippiDFTInv_CToC_32fc_C1R( src.ptr<Ipp32fc>(), (int)src.step, dst.ptr<Ipp32fc>(), (int)dst.step, pDFTSpec, pBuffer );
|
||||
|
||||
if ( sizeBuffer > 0 )
|
||||
ippFree( pBuffer );
|
||||
@ -2661,8 +2661,8 @@ void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows )
|
||||
{
|
||||
int a = 0, b = count;
|
||||
uchar *buf0, *buf1, *dbuf0, *dbuf1;
|
||||
const uchar* sptr0 = src.data;
|
||||
uchar* dptr0 = dst.data;
|
||||
const uchar* sptr0 = src.ptr();
|
||||
uchar* dptr0 = dst.ptr();
|
||||
buf0 = ptr;
|
||||
ptr += len*complex_elem_size;
|
||||
buf1 = ptr;
|
||||
@ -2911,9 +2911,9 @@ void cv::mulSpectrums( InputArray _srcA, InputArray _srcB,
|
||||
|
||||
if( depth == CV_32F )
|
||||
{
|
||||
const float* dataA = (const float*)srcA.data;
|
||||
const float* dataB = (const float*)srcB.data;
|
||||
float* dataC = (float*)dst.data;
|
||||
const float* dataA = srcA.ptr<float>();
|
||||
const float* dataB = srcB.ptr<float>();
|
||||
float* dataC = dst.ptr<float>();
|
||||
|
||||
size_t stepA = srcA.step/sizeof(dataA[0]);
|
||||
size_t stepB = srcB.step/sizeof(dataB[0]);
|
||||
@ -2978,9 +2978,9 @@ void cv::mulSpectrums( InputArray _srcA, InputArray _srcB,
|
||||
}
|
||||
else
|
||||
{
|
||||
const double* dataA = (const double*)srcA.data;
|
||||
const double* dataB = (const double*)srcB.data;
|
||||
double* dataC = (double*)dst.data;
|
||||
const double* dataA = srcA.ptr<double>();
|
||||
const double* dataB = srcB.ptr<double>();
|
||||
double* dataC = dst.ptr<double>();
|
||||
|
||||
size_t stepA = srcA.step/sizeof(dataA[0]);
|
||||
size_t stepB = srcB.step/sizeof(dataB[0]);
|
||||
@ -3299,7 +3299,7 @@ public:
|
||||
pBuffer = (uchar*)buf;
|
||||
|
||||
for( int i = range.start; i < range.end; ++i)
|
||||
if(!(*ippidct)((float*)(src->data+i*src->step), (int)src->step,(float*)(dst->data+i*dst->step), (int)dst->step, pDCTSpec, (Ipp8u*)pBuffer))
|
||||
if(!(*ippidct)(src->ptr<float>(i), (int)src->step,dst->ptr<float>(i), (int)dst->step, pDCTSpec, (Ipp8u*)pBuffer))
|
||||
*ok = false;
|
||||
}
|
||||
else
|
||||
@ -3368,7 +3368,7 @@ static bool ippi_DCT_32f(const Mat& src, Mat& dst, bool inv, bool row)
|
||||
buf.allocate( bufSize );
|
||||
pBuffer = (uchar*)buf;
|
||||
|
||||
status = ippFunc((float*)src.data, (int)src.step, (float*)dst.data, (int)dst.step, pDCTSpec, (Ipp8u*)pBuffer);
|
||||
status = ippFunc(src.ptr<float>(), (int)src.step, dst.ptr<float>(), (int)dst.step, pDCTSpec, (Ipp8u*)pBuffer);
|
||||
}
|
||||
|
||||
if (pDCTSpec)
|
||||
@ -3438,7 +3438,8 @@ void cv::dct( InputArray _src0, OutputArray _dst, int flags )
|
||||
|
||||
for( ; stage <= end_stage; stage++ )
|
||||
{
|
||||
uchar *sptr = src.data, *dptr = dst.data;
|
||||
const uchar* sptr = src.ptr();
|
||||
uchar* dptr = dst.ptr();
|
||||
size_t sstep0, sstep1, dstep0, dstep1;
|
||||
|
||||
if( stage == 0 )
|
||||
|
531
modules/core/src/kdtree.cpp
Normal file
531
modules/core/src/kdtree.cpp
Normal file
@ -0,0 +1,531 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
// This is reimplementation of kd-trees from cvkdtree*.* by Xavier Delacour, cleaned-up and
|
||||
// adopted to work with the new OpenCV data structures. It's in cxcore to be shared by
|
||||
// both cv (CvFeatureTree) and ml (kNN).
|
||||
|
||||
// The algorithm is taken from:
|
||||
// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
|
||||
// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
|
||||
// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
|
||||
|
||||
const int MAX_TREE_DEPTH = 32;
|
||||
|
||||
KDTree::KDTree()
|
||||
{
|
||||
maxDepth = -1;
|
||||
normType = NORM_L2;
|
||||
}
|
||||
|
||||
KDTree::KDTree(InputArray _points, bool _copyData)
|
||||
{
|
||||
maxDepth = -1;
|
||||
normType = NORM_L2;
|
||||
build(_points, _copyData);
|
||||
}
|
||||
|
||||
KDTree::KDTree(InputArray _points, InputArray _labels, bool _copyData)
|
||||
{
|
||||
maxDepth = -1;
|
||||
normType = NORM_L2;
|
||||
build(_points, _labels, _copyData);
|
||||
}
|
||||
|
||||
struct SubTree
|
||||
{
|
||||
SubTree() : first(0), last(0), nodeIdx(0), depth(0) {}
|
||||
SubTree(int _first, int _last, int _nodeIdx, int _depth)
|
||||
: first(_first), last(_last), nodeIdx(_nodeIdx), depth(_depth) {}
|
||||
int first;
|
||||
int last;
|
||||
int nodeIdx;
|
||||
int depth;
|
||||
};
|
||||
|
||||
|
||||
static float
|
||||
medianPartition( size_t* ofs, int a, int b, const float* vals )
|
||||
{
|
||||
int k, a0 = a, b0 = b;
|
||||
int middle = (a + b)/2;
|
||||
while( b > a )
|
||||
{
|
||||
int i0 = a, i1 = (a+b)/2, i2 = b;
|
||||
float v0 = vals[ofs[i0]], v1 = vals[ofs[i1]], v2 = vals[ofs[i2]];
|
||||
int ip = v0 < v1 ? (v1 < v2 ? i1 : v0 < v2 ? i2 : i0) :
|
||||
v0 < v2 ? i0 : (v1 < v2 ? i2 : i1);
|
||||
float pivot = vals[ofs[ip]];
|
||||
std::swap(ofs[ip], ofs[i2]);
|
||||
|
||||
for( i1 = i0, i0--; i1 <= i2; i1++ )
|
||||
if( vals[ofs[i1]] <= pivot )
|
||||
{
|
||||
i0++;
|
||||
std::swap(ofs[i0], ofs[i1]);
|
||||
}
|
||||
if( i0 == middle )
|
||||
break;
|
||||
if( i0 > middle )
|
||||
b = i0 - (b == i0);
|
||||
else
|
||||
a = i0;
|
||||
}
|
||||
|
||||
float pivot = vals[ofs[middle]];
|
||||
int less = 0, more = 0;
|
||||
for( k = a0; k < middle; k++ )
|
||||
{
|
||||
CV_Assert(vals[ofs[k]] <= pivot);
|
||||
less += vals[ofs[k]] < pivot;
|
||||
}
|
||||
for( k = b0; k > middle; k-- )
|
||||
{
|
||||
CV_Assert(vals[ofs[k]] >= pivot);
|
||||
more += vals[ofs[k]] > pivot;
|
||||
}
|
||||
CV_Assert(std::abs(more - less) <= 1);
|
||||
|
||||
return vals[ofs[middle]];
|
||||
}
|
||||
|
||||
static void
|
||||
computeSums( const Mat& points, const size_t* ofs, int a, int b, double* sums )
|
||||
{
|
||||
int i, j, dims = points.cols;
|
||||
const float* data = points.ptr<float>(0);
|
||||
for( j = 0; j < dims; j++ )
|
||||
sums[j*2] = sums[j*2+1] = 0;
|
||||
for( i = a; i <= b; i++ )
|
||||
{
|
||||
const float* row = data + ofs[i];
|
||||
for( j = 0; j < dims; j++ )
|
||||
{
|
||||
double t = row[j], s = sums[j*2] + t, s2 = sums[j*2+1] + t*t;
|
||||
sums[j*2] = s; sums[j*2+1] = s2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void KDTree::build(InputArray _points, bool _copyData)
|
||||
{
|
||||
build(_points, noArray(), _copyData);
|
||||
}
|
||||
|
||||
|
||||
void KDTree::build(InputArray __points, InputArray __labels, bool _copyData)
|
||||
{
|
||||
Mat _points = __points.getMat(), _labels = __labels.getMat();
|
||||
CV_Assert(_points.type() == CV_32F && !_points.empty());
|
||||
std::vector<KDTree::Node>().swap(nodes);
|
||||
|
||||
if( !_copyData )
|
||||
points = _points;
|
||||
else
|
||||
{
|
||||
points.release();
|
||||
points.create(_points.size(), _points.type());
|
||||
}
|
||||
|
||||
int i, j, n = _points.rows, ptdims = _points.cols, top = 0;
|
||||
const float* data = _points.ptr<float>(0);
|
||||
float* dstdata = points.ptr<float>(0);
|
||||
size_t step = _points.step1();
|
||||
size_t dstep = points.step1();
|
||||
int ptpos = 0;
|
||||
labels.resize(n);
|
||||
const int* _labels_data = 0;
|
||||
|
||||
if( !_labels.empty() )
|
||||
{
|
||||
int nlabels = _labels.checkVector(1, CV_32S, true);
|
||||
CV_Assert(nlabels == n);
|
||||
_labels_data = _labels.ptr<int>();
|
||||
}
|
||||
|
||||
Mat sumstack(MAX_TREE_DEPTH*2, ptdims*2, CV_64F);
|
||||
SubTree stack[MAX_TREE_DEPTH*2];
|
||||
|
||||
std::vector<size_t> _ptofs(n);
|
||||
size_t* ptofs = &_ptofs[0];
|
||||
|
||||
for( i = 0; i < n; i++ )
|
||||
ptofs[i] = i*step;
|
||||
|
||||
nodes.push_back(Node());
|
||||
computeSums(points, ptofs, 0, n-1, sumstack.ptr<double>(top));
|
||||
stack[top++] = SubTree(0, n-1, 0, 0);
|
||||
int _maxDepth = 0;
|
||||
|
||||
while( --top >= 0 )
|
||||
{
|
||||
int first = stack[top].first, last = stack[top].last;
|
||||
int depth = stack[top].depth, nidx = stack[top].nodeIdx;
|
||||
int count = last - first + 1, dim = -1;
|
||||
const double* sums = sumstack.ptr<double>(top);
|
||||
double invCount = 1./count, maxVar = -1.;
|
||||
|
||||
if( count == 1 )
|
||||
{
|
||||
int idx0 = (int)(ptofs[first]/step);
|
||||
int idx = _copyData ? ptpos++ : idx0;
|
||||
nodes[nidx].idx = ~idx;
|
||||
if( _copyData )
|
||||
{
|
||||
const float* src = data + ptofs[first];
|
||||
float* dst = dstdata + idx*dstep;
|
||||
for( j = 0; j < ptdims; j++ )
|
||||
dst[j] = src[j];
|
||||
}
|
||||
labels[idx] = _labels_data ? _labels_data[idx0] : idx0;
|
||||
_maxDepth = std::max(_maxDepth, depth);
|
||||
continue;
|
||||
}
|
||||
|
||||
// find the dimensionality with the biggest variance
|
||||
for( j = 0; j < ptdims; j++ )
|
||||
{
|
||||
double m = sums[j*2]*invCount;
|
||||
double varj = sums[j*2+1]*invCount - m*m;
|
||||
if( maxVar < varj )
|
||||
{
|
||||
maxVar = varj;
|
||||
dim = j;
|
||||
}
|
||||
}
|
||||
|
||||
int left = (int)nodes.size(), right = left + 1;
|
||||
nodes.push_back(Node());
|
||||
nodes.push_back(Node());
|
||||
nodes[nidx].idx = dim;
|
||||
nodes[nidx].left = left;
|
||||
nodes[nidx].right = right;
|
||||
nodes[nidx].boundary = medianPartition(ptofs, first, last, data + dim);
|
||||
|
||||
int middle = (first + last)/2;
|
||||
double *lsums = (double*)sums, *rsums = lsums + ptdims*2;
|
||||
computeSums(points, ptofs, middle+1, last, rsums);
|
||||
for( j = 0; j < ptdims*2; j++ )
|
||||
lsums[j] = sums[j] - rsums[j];
|
||||
stack[top++] = SubTree(first, middle, left, depth+1);
|
||||
stack[top++] = SubTree(middle+1, last, right, depth+1);
|
||||
}
|
||||
maxDepth = _maxDepth;
|
||||
}
|
||||
|
||||
|
||||
struct PQueueElem
|
||||
{
|
||||
PQueueElem() : dist(0), idx(0) {}
|
||||
PQueueElem(float _dist, int _idx) : dist(_dist), idx(_idx) {}
|
||||
float dist;
|
||||
int idx;
|
||||
};
|
||||
|
||||
|
||||
int KDTree::findNearest(InputArray _vec, int K, int emax,
|
||||
OutputArray _neighborsIdx, OutputArray _neighbors,
|
||||
OutputArray _dist, OutputArray _labels) const
|
||||
|
||||
{
|
||||
Mat vecmat = _vec.getMat();
|
||||
CV_Assert( vecmat.isContinuous() && vecmat.type() == CV_32F && vecmat.total() == (size_t)points.cols );
|
||||
const float* vec = vecmat.ptr<float>();
|
||||
K = std::min(K, points.rows);
|
||||
int ptdims = points.cols;
|
||||
|
||||
CV_Assert(K > 0 && (normType == NORM_L2 || normType == NORM_L1));
|
||||
|
||||
AutoBuffer<uchar> _buf((K+1)*(sizeof(float) + sizeof(int)));
|
||||
int* idx = (int*)(uchar*)_buf;
|
||||
float* dist = (float*)(idx + K + 1);
|
||||
int i, j, ncount = 0, e = 0;
|
||||
|
||||
int qsize = 0, maxqsize = 1 << 10;
|
||||
AutoBuffer<uchar> _pqueue(maxqsize*sizeof(PQueueElem));
|
||||
PQueueElem* pqueue = (PQueueElem*)(uchar*)_pqueue;
|
||||
emax = std::max(emax, 1);
|
||||
|
||||
for( e = 0; e < emax; )
|
||||
{
|
||||
float d, alt_d = 0.f;
|
||||
int nidx;
|
||||
|
||||
if( e == 0 )
|
||||
nidx = 0;
|
||||
else
|
||||
{
|
||||
// take the next node from the priority queue
|
||||
if( qsize == 0 )
|
||||
break;
|
||||
nidx = pqueue[0].idx;
|
||||
alt_d = pqueue[0].dist;
|
||||
if( --qsize > 0 )
|
||||
{
|
||||
std::swap(pqueue[0], pqueue[qsize]);
|
||||
d = pqueue[0].dist;
|
||||
for( i = 0;;)
|
||||
{
|
||||
int left = i*2 + 1, right = i*2 + 2;
|
||||
if( left >= qsize )
|
||||
break;
|
||||
if( right < qsize && pqueue[right].dist < pqueue[left].dist )
|
||||
left = right;
|
||||
if( pqueue[left].dist >= d )
|
||||
break;
|
||||
std::swap(pqueue[i], pqueue[left]);
|
||||
i = left;
|
||||
}
|
||||
}
|
||||
|
||||
if( ncount == K && alt_d > dist[ncount-1] )
|
||||
continue;
|
||||
}
|
||||
|
||||
for(;;)
|
||||
{
|
||||
if( nidx < 0 )
|
||||
break;
|
||||
const Node& n = nodes[nidx];
|
||||
|
||||
if( n.idx < 0 )
|
||||
{
|
||||
i = ~n.idx;
|
||||
const float* row = points.ptr<float>(i);
|
||||
if( normType == NORM_L2 )
|
||||
for( j = 0, d = 0.f; j < ptdims; j++ )
|
||||
{
|
||||
float t = vec[j] - row[j];
|
||||
d += t*t;
|
||||
}
|
||||
else
|
||||
for( j = 0, d = 0.f; j < ptdims; j++ )
|
||||
d += std::abs(vec[j] - row[j]);
|
||||
|
||||
dist[ncount] = d;
|
||||
idx[ncount] = i;
|
||||
for( i = ncount-1; i >= 0; i-- )
|
||||
{
|
||||
if( dist[i] <= d )
|
||||
break;
|
||||
std::swap(dist[i], dist[i+1]);
|
||||
std::swap(idx[i], idx[i+1]);
|
||||
}
|
||||
ncount += ncount < K;
|
||||
e++;
|
||||
break;
|
||||
}
|
||||
|
||||
int alt;
|
||||
if( vec[n.idx] <= n.boundary )
|
||||
{
|
||||
nidx = n.left;
|
||||
alt = n.right;
|
||||
}
|
||||
else
|
||||
{
|
||||
nidx = n.right;
|
||||
alt = n.left;
|
||||
}
|
||||
|
||||
d = vec[n.idx] - n.boundary;
|
||||
if( normType == NORM_L2 )
|
||||
d = d*d + alt_d;
|
||||
else
|
||||
d = std::abs(d) + alt_d;
|
||||
// subtree prunning
|
||||
if( ncount == K && d > dist[ncount-1] )
|
||||
continue;
|
||||
// add alternative subtree to the priority queue
|
||||
pqueue[qsize] = PQueueElem(d, alt);
|
||||
for( i = qsize; i > 0; )
|
||||
{
|
||||
int parent = (i-1)/2;
|
||||
if( parent < 0 || pqueue[parent].dist <= d )
|
||||
break;
|
||||
std::swap(pqueue[i], pqueue[parent]);
|
||||
i = parent;
|
||||
}
|
||||
qsize += qsize+1 < maxqsize;
|
||||
}
|
||||
}
|
||||
|
||||
K = std::min(K, ncount);
|
||||
if( _neighborsIdx.needed() )
|
||||
{
|
||||
_neighborsIdx.create(K, 1, CV_32S, -1, true);
|
||||
Mat nidx = _neighborsIdx.getMat();
|
||||
Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx);
|
||||
}
|
||||
if( _dist.needed() )
|
||||
sqrt(Mat(K, 1, CV_32F, dist), _dist);
|
||||
|
||||
if( _neighbors.needed() || _labels.needed() )
|
||||
getPoints(Mat(K, 1, CV_32S, idx), _neighbors, _labels);
|
||||
return K;
|
||||
}
|
||||
|
||||
|
||||
void KDTree::findOrthoRange(InputArray _lowerBound,
|
||||
InputArray _upperBound,
|
||||
OutputArray _neighborsIdx,
|
||||
OutputArray _neighbors,
|
||||
OutputArray _labels ) const
|
||||
{
|
||||
int ptdims = points.cols;
|
||||
Mat lowerBound = _lowerBound.getMat(), upperBound = _upperBound.getMat();
|
||||
CV_Assert( lowerBound.size == upperBound.size &&
|
||||
lowerBound.isContinuous() &&
|
||||
upperBound.isContinuous() &&
|
||||
lowerBound.type() == upperBound.type() &&
|
||||
lowerBound.type() == CV_32F &&
|
||||
lowerBound.total() == (size_t)ptdims );
|
||||
const float* L = lowerBound.ptr<float>();
|
||||
const float* R = upperBound.ptr<float>();
|
||||
|
||||
std::vector<int> idx;
|
||||
AutoBuffer<int> _stack(MAX_TREE_DEPTH*2 + 1);
|
||||
int* stack = _stack;
|
||||
int top = 0;
|
||||
|
||||
stack[top++] = 0;
|
||||
|
||||
while( --top >= 0 )
|
||||
{
|
||||
int nidx = stack[top];
|
||||
if( nidx < 0 )
|
||||
break;
|
||||
const Node& n = nodes[nidx];
|
||||
if( n.idx < 0 )
|
||||
{
|
||||
int j, i = ~n.idx;
|
||||
const float* row = points.ptr<float>(i);
|
||||
for( j = 0; j < ptdims; j++ )
|
||||
if( row[j] < L[j] || row[j] >= R[j] )
|
||||
break;
|
||||
if( j == ptdims )
|
||||
idx.push_back(i);
|
||||
continue;
|
||||
}
|
||||
if( L[n.idx] <= n.boundary )
|
||||
stack[top++] = n.left;
|
||||
if( R[n.idx] > n.boundary )
|
||||
stack[top++] = n.right;
|
||||
}
|
||||
|
||||
if( _neighborsIdx.needed() )
|
||||
{
|
||||
_neighborsIdx.create((int)idx.size(), 1, CV_32S, -1, true);
|
||||
Mat nidx = _neighborsIdx.getMat();
|
||||
Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx);
|
||||
}
|
||||
getPoints( idx, _neighbors, _labels );
|
||||
}
|
||||
|
||||
|
||||
void KDTree::getPoints(InputArray _idx, OutputArray _pts, OutputArray _labels) const
|
||||
{
|
||||
Mat idxmat = _idx.getMat(), pts, labelsmat;
|
||||
CV_Assert( idxmat.isContinuous() && idxmat.type() == CV_32S &&
|
||||
(idxmat.cols == 1 || idxmat.rows == 1) );
|
||||
const int* idx = idxmat.ptr<int>();
|
||||
int* dstlabels = 0;
|
||||
|
||||
int ptdims = points.cols;
|
||||
int i, nidx = (int)idxmat.total();
|
||||
if( nidx == 0 )
|
||||
{
|
||||
_pts.release();
|
||||
_labels.release();
|
||||
return;
|
||||
}
|
||||
|
||||
if( _pts.needed() )
|
||||
{
|
||||
_pts.create( nidx, ptdims, points.type());
|
||||
pts = _pts.getMat();
|
||||
}
|
||||
|
||||
if(_labels.needed())
|
||||
{
|
||||
_labels.create(nidx, 1, CV_32S, -1, true);
|
||||
labelsmat = _labels.getMat();
|
||||
CV_Assert( labelsmat.isContinuous() );
|
||||
dstlabels = labelsmat.ptr<int>();
|
||||
}
|
||||
const int* srclabels = !labels.empty() ? &labels[0] : 0;
|
||||
|
||||
for( i = 0; i < nidx; i++ )
|
||||
{
|
||||
int k = idx[i];
|
||||
CV_Assert( (unsigned)k < (unsigned)points.rows );
|
||||
const float* src = points.ptr<float>(k);
|
||||
if( !pts.empty() )
|
||||
std::copy(src, src + ptdims, pts.ptr<float>(i));
|
||||
if( dstlabels )
|
||||
dstlabels[i] = srclabels ? srclabels[k] : k;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const float* KDTree::getPoint(int ptidx, int* label) const
|
||||
{
|
||||
CV_Assert( (unsigned)ptidx < (unsigned)points.rows);
|
||||
if(label)
|
||||
*label = labels[ptidx];
|
||||
return points.ptr<float>(ptidx);
|
||||
}
|
||||
|
||||
|
||||
int KDTree::dims() const
|
||||
{
|
||||
return !points.empty() ? points.cols : 0;
|
||||
}
|
||||
|
||||
}
|
457
modules/core/src/kmeans.cpp
Normal file
457
modules/core/src/kmeans.cpp
Normal file
@ -0,0 +1,457 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
////////////////////////////////////////// kmeans ////////////////////////////////////////////
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
static void generateRandomCenter(const std::vector<Vec2f>& box, float* center, RNG& rng)
|
||||
{
|
||||
size_t j, dims = box.size();
|
||||
float margin = 1.f/dims;
|
||||
for( j = 0; j < dims; j++ )
|
||||
center[j] = ((float)rng*(1.f+margin*2.f)-margin)*(box[j][1] - box[j][0]) + box[j][0];
|
||||
}
|
||||
|
||||
class KMeansPPDistanceComputer : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
KMeansPPDistanceComputer( float *_tdist2,
|
||||
const float *_data,
|
||||
const float *_dist,
|
||||
int _dims,
|
||||
size_t _step,
|
||||
size_t _stepci )
|
||||
: tdist2(_tdist2),
|
||||
data(_data),
|
||||
dist(_dist),
|
||||
dims(_dims),
|
||||
step(_step),
|
||||
stepci(_stepci) { }
|
||||
|
||||
void operator()( const cv::Range& range ) const
|
||||
{
|
||||
const int begin = range.start;
|
||||
const int end = range.end;
|
||||
|
||||
for ( int i = begin; i<end; i++ )
|
||||
{
|
||||
tdist2[i] = std::min(normL2Sqr_(data + step*i, data + stepci, dims), dist[i]);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
KMeansPPDistanceComputer& operator=(const KMeansPPDistanceComputer&); // to quiet MSVC
|
||||
|
||||
float *tdist2;
|
||||
const float *data;
|
||||
const float *dist;
|
||||
const int dims;
|
||||
const size_t step;
|
||||
const size_t stepci;
|
||||
};
|
||||
|
||||
/*
|
||||
k-means center initialization using the following algorithm:
|
||||
Arthur & Vassilvitskii (2007) k-means++: The Advantages of Careful Seeding
|
||||
*/
|
||||
static void generateCentersPP(const Mat& _data, Mat& _out_centers,
|
||||
int K, RNG& rng, int trials)
|
||||
{
|
||||
int i, j, k, dims = _data.cols, N = _data.rows;
|
||||
const float* data = _data.ptr<float>(0);
|
||||
size_t step = _data.step/sizeof(data[0]);
|
||||
std::vector<int> _centers(K);
|
||||
int* centers = &_centers[0];
|
||||
std::vector<float> _dist(N*3);
|
||||
float* dist = &_dist[0], *tdist = dist + N, *tdist2 = tdist + N;
|
||||
double sum0 = 0;
|
||||
|
||||
centers[0] = (unsigned)rng % N;
|
||||
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
dist[i] = normL2Sqr_(data + step*i, data + step*centers[0], dims);
|
||||
sum0 += dist[i];
|
||||
}
|
||||
|
||||
for( k = 1; k < K; k++ )
|
||||
{
|
||||
double bestSum = DBL_MAX;
|
||||
int bestCenter = -1;
|
||||
|
||||
for( j = 0; j < trials; j++ )
|
||||
{
|
||||
double p = (double)rng*sum0, s = 0;
|
||||
for( i = 0; i < N-1; i++ )
|
||||
if( (p -= dist[i]) <= 0 )
|
||||
break;
|
||||
int ci = i;
|
||||
|
||||
parallel_for_(Range(0, N),
|
||||
KMeansPPDistanceComputer(tdist2, data, dist, dims, step, step*ci));
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
s += tdist2[i];
|
||||
}
|
||||
|
||||
if( s < bestSum )
|
||||
{
|
||||
bestSum = s;
|
||||
bestCenter = ci;
|
||||
std::swap(tdist, tdist2);
|
||||
}
|
||||
}
|
||||
centers[k] = bestCenter;
|
||||
sum0 = bestSum;
|
||||
std::swap(dist, tdist);
|
||||
}
|
||||
|
||||
for( k = 0; k < K; k++ )
|
||||
{
|
||||
const float* src = data + step*centers[k];
|
||||
float* dst = _out_centers.ptr<float>(k);
|
||||
for( j = 0; j < dims; j++ )
|
||||
dst[j] = src[j];
|
||||
}
|
||||
}
|
||||
|
||||
class KMeansDistanceComputer : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
KMeansDistanceComputer( double *_distances,
|
||||
int *_labels,
|
||||
const Mat& _data,
|
||||
const Mat& _centers )
|
||||
: distances(_distances),
|
||||
labels(_labels),
|
||||
data(_data),
|
||||
centers(_centers)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()( const Range& range ) const
|
||||
{
|
||||
const int begin = range.start;
|
||||
const int end = range.end;
|
||||
const int K = centers.rows;
|
||||
const int dims = centers.cols;
|
||||
|
||||
const float *sample;
|
||||
for( int i = begin; i<end; ++i)
|
||||
{
|
||||
sample = data.ptr<float>(i);
|
||||
int k_best = 0;
|
||||
double min_dist = DBL_MAX;
|
||||
|
||||
for( int k = 0; k < K; k++ )
|
||||
{
|
||||
const float* center = centers.ptr<float>(k);
|
||||
const double dist = normL2Sqr_(sample, center, dims);
|
||||
|
||||
if( min_dist > dist )
|
||||
{
|
||||
min_dist = dist;
|
||||
k_best = k;
|
||||
}
|
||||
}
|
||||
|
||||
distances[i] = min_dist;
|
||||
labels[i] = k_best;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
KMeansDistanceComputer& operator=(const KMeansDistanceComputer&); // to quiet MSVC
|
||||
|
||||
double *distances;
|
||||
int *labels;
|
||||
const Mat& data;
|
||||
const Mat& centers;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
double cv::kmeans( InputArray _data, int K,
|
||||
InputOutputArray _bestLabels,
|
||||
TermCriteria criteria, int attempts,
|
||||
int flags, OutputArray _centers )
|
||||
{
|
||||
const int SPP_TRIALS = 3;
|
||||
Mat data0 = _data.getMat();
|
||||
bool isrow = data0.rows == 1 && data0.channels() > 1;
|
||||
int N = !isrow ? data0.rows : data0.cols;
|
||||
int dims = (!isrow ? data0.cols : 1)*data0.channels();
|
||||
int type = data0.depth();
|
||||
|
||||
attempts = std::max(attempts, 1);
|
||||
CV_Assert( data0.dims <= 2 && type == CV_32F && K > 0 );
|
||||
CV_Assert( N >= K );
|
||||
|
||||
Mat data(N, dims, CV_32F, data0.ptr(), isrow ? dims * sizeof(float) : static_cast<size_t>(data0.step));
|
||||
|
||||
_bestLabels.create(N, 1, CV_32S, -1, true);
|
||||
|
||||
Mat _labels, best_labels = _bestLabels.getMat();
|
||||
if( flags & CV_KMEANS_USE_INITIAL_LABELS )
|
||||
{
|
||||
CV_Assert( (best_labels.cols == 1 || best_labels.rows == 1) &&
|
||||
best_labels.cols*best_labels.rows == N &&
|
||||
best_labels.type() == CV_32S &&
|
||||
best_labels.isContinuous());
|
||||
best_labels.copyTo(_labels);
|
||||
}
|
||||
else
|
||||
{
|
||||
if( !((best_labels.cols == 1 || best_labels.rows == 1) &&
|
||||
best_labels.cols*best_labels.rows == N &&
|
||||
best_labels.type() == CV_32S &&
|
||||
best_labels.isContinuous()))
|
||||
best_labels.create(N, 1, CV_32S);
|
||||
_labels.create(best_labels.size(), best_labels.type());
|
||||
}
|
||||
int* labels = _labels.ptr<int>();
|
||||
|
||||
Mat centers(K, dims, type), old_centers(K, dims, type), temp(1, dims, type);
|
||||
std::vector<int> counters(K);
|
||||
std::vector<Vec2f> _box(dims);
|
||||
Vec2f* box = &_box[0];
|
||||
double best_compactness = DBL_MAX, compactness = 0;
|
||||
RNG& rng = theRNG();
|
||||
int a, iter, i, j, k;
|
||||
|
||||
if( criteria.type & TermCriteria::EPS )
|
||||
criteria.epsilon = std::max(criteria.epsilon, 0.);
|
||||
else
|
||||
criteria.epsilon = FLT_EPSILON;
|
||||
criteria.epsilon *= criteria.epsilon;
|
||||
|
||||
if( criteria.type & TermCriteria::COUNT )
|
||||
criteria.maxCount = std::min(std::max(criteria.maxCount, 2), 100);
|
||||
else
|
||||
criteria.maxCount = 100;
|
||||
|
||||
if( K == 1 )
|
||||
{
|
||||
attempts = 1;
|
||||
criteria.maxCount = 2;
|
||||
}
|
||||
|
||||
const float* sample = data.ptr<float>(0);
|
||||
for( j = 0; j < dims; j++ )
|
||||
box[j] = Vec2f(sample[j], sample[j]);
|
||||
|
||||
for( i = 1; i < N; i++ )
|
||||
{
|
||||
sample = data.ptr<float>(i);
|
||||
for( j = 0; j < dims; j++ )
|
||||
{
|
||||
float v = sample[j];
|
||||
box[j][0] = std::min(box[j][0], v);
|
||||
box[j][1] = std::max(box[j][1], v);
|
||||
}
|
||||
}
|
||||
|
||||
for( a = 0; a < attempts; a++ )
|
||||
{
|
||||
double max_center_shift = DBL_MAX;
|
||||
for( iter = 0;; )
|
||||
{
|
||||
swap(centers, old_centers);
|
||||
|
||||
if( iter == 0 && (a > 0 || !(flags & KMEANS_USE_INITIAL_LABELS)) )
|
||||
{
|
||||
if( flags & KMEANS_PP_CENTERS )
|
||||
generateCentersPP(data, centers, K, rng, SPP_TRIALS);
|
||||
else
|
||||
{
|
||||
for( k = 0; k < K; k++ )
|
||||
generateRandomCenter(_box, centers.ptr<float>(k), rng);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if( iter == 0 && a == 0 && (flags & KMEANS_USE_INITIAL_LABELS) )
|
||||
{
|
||||
for( i = 0; i < N; i++ )
|
||||
CV_Assert( (unsigned)labels[i] < (unsigned)K );
|
||||
}
|
||||
|
||||
// compute centers
|
||||
centers = Scalar(0);
|
||||
for( k = 0; k < K; k++ )
|
||||
counters[k] = 0;
|
||||
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
sample = data.ptr<float>(i);
|
||||
k = labels[i];
|
||||
float* center = centers.ptr<float>(k);
|
||||
j=0;
|
||||
#if CV_ENABLE_UNROLLED
|
||||
for(; j <= dims - 4; j += 4 )
|
||||
{
|
||||
float t0 = center[j] + sample[j];
|
||||
float t1 = center[j+1] + sample[j+1];
|
||||
|
||||
center[j] = t0;
|
||||
center[j+1] = t1;
|
||||
|
||||
t0 = center[j+2] + sample[j+2];
|
||||
t1 = center[j+3] + sample[j+3];
|
||||
|
||||
center[j+2] = t0;
|
||||
center[j+3] = t1;
|
||||
}
|
||||
#endif
|
||||
for( ; j < dims; j++ )
|
||||
center[j] += sample[j];
|
||||
counters[k]++;
|
||||
}
|
||||
|
||||
if( iter > 0 )
|
||||
max_center_shift = 0;
|
||||
|
||||
for( k = 0; k < K; k++ )
|
||||
{
|
||||
if( counters[k] != 0 )
|
||||
continue;
|
||||
|
||||
// if some cluster appeared to be empty then:
|
||||
// 1. find the biggest cluster
|
||||
// 2. find the farthest from the center point in the biggest cluster
|
||||
// 3. exclude the farthest point from the biggest cluster and form a new 1-point cluster.
|
||||
int max_k = 0;
|
||||
for( int k1 = 1; k1 < K; k1++ )
|
||||
{
|
||||
if( counters[max_k] < counters[k1] )
|
||||
max_k = k1;
|
||||
}
|
||||
|
||||
double max_dist = 0;
|
||||
int farthest_i = -1;
|
||||
float* new_center = centers.ptr<float>(k);
|
||||
float* old_center = centers.ptr<float>(max_k);
|
||||
float* _old_center = temp.ptr<float>(); // normalized
|
||||
float scale = 1.f/counters[max_k];
|
||||
for( j = 0; j < dims; j++ )
|
||||
_old_center[j] = old_center[j]*scale;
|
||||
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
if( labels[i] != max_k )
|
||||
continue;
|
||||
sample = data.ptr<float>(i);
|
||||
double dist = normL2Sqr_(sample, _old_center, dims);
|
||||
|
||||
if( max_dist <= dist )
|
||||
{
|
||||
max_dist = dist;
|
||||
farthest_i = i;
|
||||
}
|
||||
}
|
||||
|
||||
counters[max_k]--;
|
||||
counters[k]++;
|
||||
labels[farthest_i] = k;
|
||||
sample = data.ptr<float>(farthest_i);
|
||||
|
||||
for( j = 0; j < dims; j++ )
|
||||
{
|
||||
old_center[j] -= sample[j];
|
||||
new_center[j] += sample[j];
|
||||
}
|
||||
}
|
||||
|
||||
for( k = 0; k < K; k++ )
|
||||
{
|
||||
float* center = centers.ptr<float>(k);
|
||||
CV_Assert( counters[k] != 0 );
|
||||
|
||||
float scale = 1.f/counters[k];
|
||||
for( j = 0; j < dims; j++ )
|
||||
center[j] *= scale;
|
||||
|
||||
if( iter > 0 )
|
||||
{
|
||||
double dist = 0;
|
||||
const float* old_center = old_centers.ptr<float>(k);
|
||||
for( j = 0; j < dims; j++ )
|
||||
{
|
||||
double t = center[j] - old_center[j];
|
||||
dist += t*t;
|
||||
}
|
||||
max_center_shift = std::max(max_center_shift, dist);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if( ++iter == MAX(criteria.maxCount, 2) || max_center_shift <= criteria.epsilon )
|
||||
break;
|
||||
|
||||
// assign labels
|
||||
Mat dists(1, N, CV_64F);
|
||||
double* dist = dists.ptr<double>(0);
|
||||
parallel_for_(Range(0, N),
|
||||
KMeansDistanceComputer(dist, labels, data, centers));
|
||||
compactness = 0;
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
compactness += dist[i];
|
||||
}
|
||||
}
|
||||
|
||||
if( compactness < best_compactness )
|
||||
{
|
||||
best_compactness = compactness;
|
||||
if( _centers.needed() )
|
||||
centers.copyTo(_centers);
|
||||
_labels.copyTo(best_labels);
|
||||
}
|
||||
}
|
||||
|
||||
return best_compactness;
|
||||
}
|
@ -859,7 +859,7 @@ double cv::determinant( InputArray _mat )
|
||||
double result = 0;
|
||||
int type = mat.type(), rows = mat.rows;
|
||||
size_t step = mat.step;
|
||||
const uchar* m = mat.data;
|
||||
const uchar* m = mat.ptr();
|
||||
|
||||
CV_Assert( !mat.empty() );
|
||||
CV_Assert( mat.rows == mat.cols && (type == CV_32F || type == CV_64F));
|
||||
@ -882,11 +882,11 @@ double cv::determinant( InputArray _mat )
|
||||
Mat a(rows, rows, CV_32F, (uchar*)buffer);
|
||||
mat.copyTo(a);
|
||||
|
||||
result = LU((float*)a.data, a.step, rows, 0, 0, 0);
|
||||
result = LU(a.ptr<float>(), a.step, rows, 0, 0, 0);
|
||||
if( result )
|
||||
{
|
||||
for( int i = 0; i < rows; i++ )
|
||||
result *= ((const float*)(a.data + a.step*i))[i];
|
||||
result *= a.at<float>(i,i);
|
||||
result = 1./result;
|
||||
}
|
||||
}
|
||||
@ -906,11 +906,11 @@ double cv::determinant( InputArray _mat )
|
||||
Mat a(rows, rows, CV_64F, (uchar*)buffer);
|
||||
mat.copyTo(a);
|
||||
|
||||
result = LU((double*)a.data, a.step, rows, 0, 0, 0);
|
||||
result = LU(a.ptr<double>(), a.step, rows, 0, 0, 0);
|
||||
if( result )
|
||||
{
|
||||
for( int i = 0; i < rows; i++ )
|
||||
result *= ((const double*)(a.data + a.step*i))[i];
|
||||
result *= a.at<double>(i,i);
|
||||
result = 1./result;
|
||||
}
|
||||
}
|
||||
@ -949,8 +949,8 @@ double cv::invert( InputArray _src, OutputArray _dst, int method )
|
||||
AutoBuffer<uchar> _buf((m*nm + nm + nm*n)*esz + sizeof(double));
|
||||
uchar* buf = alignPtr((uchar*)_buf, (int)esz);
|
||||
Mat u(m, nm, type, buf);
|
||||
Mat w(nm, 1, type, u.data + m*nm*esz);
|
||||
Mat vt(nm, n, type, w.data + nm*esz);
|
||||
Mat w(nm, 1, type, u.ptr() + m*nm*esz);
|
||||
Mat vt(nm, n, type, w.ptr() + nm*esz);
|
||||
|
||||
SVD::compute(src, w, u, vt);
|
||||
SVD::backSubst(w, u, vt, Mat(), _dst);
|
||||
@ -968,8 +968,8 @@ double cv::invert( InputArray _src, OutputArray _dst, int method )
|
||||
AutoBuffer<uchar> _buf((n*n*2 + n)*esz + sizeof(double));
|
||||
uchar* buf = alignPtr((uchar*)_buf, (int)esz);
|
||||
Mat u(n, n, type, buf);
|
||||
Mat w(n, 1, type, u.data + n*n*esz);
|
||||
Mat vt(n, n, type, w.data + n*esz);
|
||||
Mat w(n, 1, type, u.ptr() + n*n*esz);
|
||||
Mat vt(n, n, type, w.ptr() + n*esz);
|
||||
|
||||
eigen(src, w, vt);
|
||||
transpose(vt, u);
|
||||
@ -988,8 +988,8 @@ double cv::invert( InputArray _src, OutputArray _dst, int method )
|
||||
|
||||
if( n <= 3 )
|
||||
{
|
||||
const uchar* srcdata = src.data;
|
||||
uchar* dstdata = dst.data;
|
||||
const uchar* srcdata = src.ptr();
|
||||
uchar* dstdata = dst.ptr();
|
||||
size_t srcstep = src.step;
|
||||
size_t dststep = dst.step;
|
||||
|
||||
@ -1169,13 +1169,13 @@ double cv::invert( InputArray _src, OutputArray _dst, int method )
|
||||
setIdentity(dst);
|
||||
|
||||
if( method == DECOMP_LU && type == CV_32F )
|
||||
result = LU((float*)src1.data, src1.step, n, (float*)dst.data, dst.step, n) != 0;
|
||||
result = LU(src1.ptr<float>(), src1.step, n, dst.ptr<float>(), dst.step, n) != 0;
|
||||
else if( method == DECOMP_LU && type == CV_64F )
|
||||
result = LU((double*)src1.data, src1.step, n, (double*)dst.data, dst.step, n) != 0;
|
||||
result = LU(src1.ptr<double>(), src1.step, n, dst.ptr<double>(), dst.step, n) != 0;
|
||||
else if( method == DECOMP_CHOLESKY && type == CV_32F )
|
||||
result = Cholesky((float*)src1.data, src1.step, n, (float*)dst.data, dst.step, n);
|
||||
result = Cholesky(src1.ptr<float>(), src1.step, n, dst.ptr<float>(), dst.step, n);
|
||||
else
|
||||
result = Cholesky((double*)src1.data, src1.step, n, (double*)dst.data, dst.step, n);
|
||||
result = Cholesky(src1.ptr<double>(), src1.step, n, dst.ptr<double>(), dst.step, n);
|
||||
|
||||
if( !result )
|
||||
dst = Scalar(0);
|
||||
@ -1212,9 +1212,9 @@ bool cv::solve( InputArray _src, InputArray _src2arg, OutputArray _dst, int meth
|
||||
#define bf(y) ((float*)(bdata + y*src2step))[0]
|
||||
#define bd(y) ((double*)(bdata + y*src2step))[0]
|
||||
|
||||
const uchar* srcdata = src.data;
|
||||
const uchar* bdata = _src2.data;
|
||||
uchar* dstdata = dst.data;
|
||||
const uchar* srcdata = src.ptr();
|
||||
const uchar* bdata = _src2.ptr();
|
||||
uchar* dstdata = dst.ptr();
|
||||
size_t srcstep = src.step;
|
||||
size_t src2step = _src2.step;
|
||||
size_t dststep = dst.step;
|
||||
@ -1709,23 +1709,23 @@ cvEigenVV( CvArr* srcarr, CvArr* evectsarr, CvArr* evalsarr, double,
|
||||
eigen(src, evals, evects);
|
||||
if( evects0.data != evects.data )
|
||||
{
|
||||
const uchar* p = evects0.data;
|
||||
const uchar* p = evects0.ptr();
|
||||
evects.convertTo(evects0, evects0.type());
|
||||
CV_Assert( p == evects0.data );
|
||||
CV_Assert( p == evects0.ptr() );
|
||||
}
|
||||
}
|
||||
else
|
||||
eigen(src, evals);
|
||||
if( evals0.data != evals.data )
|
||||
{
|
||||
const uchar* p = evals0.data;
|
||||
const uchar* p = evals0.ptr();
|
||||
if( evals0.size() == evals.size() )
|
||||
evals.convertTo(evals0, evals0.type());
|
||||
else if( evals0.type() == evals.type() )
|
||||
cv::transpose(evals, evals0);
|
||||
else
|
||||
cv::Mat(evals.t()).convertTo(evals0, evals0.type());
|
||||
CV_Assert( p == evals0.data );
|
||||
CV_Assert( p == evals0.ptr() );
|
||||
}
|
||||
}
|
||||
|
||||
@ -1743,7 +1743,7 @@ cvSVD( CvArr* aarr, CvArr* warr, CvArr* uarr, CvArr* varr, int flags )
|
||||
cv::SVD svd;
|
||||
|
||||
if( w.size() == cv::Size(nm, 1) )
|
||||
svd.w = cv::Mat(nm, 1, type, w.data );
|
||||
svd.w = cv::Mat(nm, 1, type, w.ptr() );
|
||||
else if( w.isContinuous() )
|
||||
svd.w = w;
|
||||
|
||||
@ -1766,7 +1766,7 @@ cvSVD( CvArr* aarr, CvArr* warr, CvArr* uarr, CvArr* varr, int flags )
|
||||
((m != n && (svd.u.size() == cv::Size(mn, mn) ||
|
||||
svd.vt.size() == cv::Size(mn, mn))) ? cv::SVD::FULL_UV : 0));
|
||||
|
||||
if( u.data )
|
||||
if( !u.empty() )
|
||||
{
|
||||
if( flags & CV_SVD_U_T )
|
||||
cv::transpose( svd.u, u );
|
||||
@ -1777,7 +1777,7 @@ cvSVD( CvArr* aarr, CvArr* warr, CvArr* uarr, CvArr* varr, int flags )
|
||||
}
|
||||
}
|
||||
|
||||
if( v.data )
|
||||
if( !v.empty() )
|
||||
{
|
||||
if( !(flags & CV_SVD_V_T) )
|
||||
cv::transpose( svd.vt, v );
|
||||
|
@ -42,9 +42,13 @@
|
||||
#include <climits>
|
||||
#include <algorithm>
|
||||
#include <cstdarg>
|
||||
#include <debug.hpp>
|
||||
|
||||
namespace cv{namespace optim{
|
||||
#define dprintf(x)
|
||||
#define print_matrix(x)
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
using std::vector;
|
||||
|
||||
#ifdef ALEX_DEBUG
|
||||
@ -355,4 +359,4 @@ static inline void swap_columns(Mat_<double>& A,int col1,int col2){
|
||||
A(i,col2)=tmp;
|
||||
}
|
||||
}
|
||||
}}
|
||||
}
|
@ -786,7 +786,7 @@ void polarToCart( InputArray src1, InputArray src2,
|
||||
depth == CV_64F ? (ippsPolarToCart)ippsPolarToCart_64f : 0;
|
||||
CV_Assert(ippFunc != 0);
|
||||
|
||||
IppStatus status = ippFunc(Mag.data, Angle.data, X.data, Y.data, static_cast<int>(cn * X.total()));
|
||||
IppStatus status = ippFunc(Mag.ptr(), Angle.ptr(), X.ptr(), Y.ptr(), static_cast<int>(cn * X.total()));
|
||||
if (status >= 0)
|
||||
return;
|
||||
setIppErrorStatus();
|
||||
@ -2220,7 +2220,7 @@ void pow( InputArray _src, double power, OutputArray _dst )
|
||||
}
|
||||
size.width *= cn;
|
||||
|
||||
IppStatus status = ippiSqr_32f_C1R((const Ipp32f *)src.data, srcstep, (Ipp32f *)dst.data, dststep, ippiSize(size.width, size.height));
|
||||
IppStatus status = ippiSqr_32f_C1R(src.ptr<Ipp32f>(), srcstep, dst.ptr<Ipp32f>(), dststep, ippiSize(size.width, size.height));
|
||||
|
||||
if (status >= 0)
|
||||
return;
|
||||
@ -2278,8 +2278,8 @@ void pow( InputArray _src, double power, OutputArray _dst )
|
||||
if (src.isContinuous() && dst.isContinuous())
|
||||
{
|
||||
IppStatus status = depth == CV_32F ?
|
||||
ippsPowx_32f_A21((const Ipp32f *)src.data, (Ipp32f)power, (Ipp32f*)dst.data, (Ipp32s)(src.total() * cn)) :
|
||||
ippsPowx_64f_A50((const Ipp64f *)src.data, power, (Ipp64f*)dst.data, (Ipp32s)(src.total() * cn));
|
||||
ippsPowx_32f_A21(src.ptr<Ipp32f>(), (Ipp32f)power, dst.ptr<Ipp32f>(), (Ipp32s)(src.total() * cn)) :
|
||||
ippsPowx_64f_A50(src.ptr<Ipp64f>(), power, dst.ptr<Ipp64f>(), (Ipp32s)(src.total() * cn));
|
||||
|
||||
if (status >= 0)
|
||||
return;
|
||||
@ -2451,7 +2451,7 @@ bool checkRange(InputArray _src, bool quiet, Point* pt, double minVal, double ma
|
||||
{
|
||||
Cv32suf a, b;
|
||||
int ia, ib;
|
||||
const int* isrc = (const int*)src.data;
|
||||
const int* isrc = src.ptr<int>();
|
||||
size_t step = src.step/sizeof(isrc[0]);
|
||||
|
||||
a.f = (float)std::max(minVal, (double)-FLT_MAX);
|
||||
@ -2480,7 +2480,7 @@ bool checkRange(InputArray _src, bool quiet, Point* pt, double minVal, double ma
|
||||
{
|
||||
Cv64suf a, b;
|
||||
int64 ia, ib;
|
||||
const int64* isrc = (const int64*)src.data;
|
||||
const int64* isrc = src.ptr<int64>();
|
||||
size_t step = src.step/sizeof(isrc[0]);
|
||||
|
||||
a.f = minVal;
|
||||
|
@ -781,8 +781,9 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
InputArray matC, double beta, OutputArray _matD, int flags )
|
||||
{
|
||||
#ifdef HAVE_CLAMDBLAS
|
||||
CV_OCL_RUN(ocl::haveAmdBlas() && matA.dims() <= 2 && matB.dims() <= 2 && matC.dims() <= 2 && _matD.isUMat(),
|
||||
ocl_gemm(matA, matB, alpha, matC, beta, _matD, flags))
|
||||
CV_OCL_RUN(ocl::haveAmdBlas() && matA.dims() <= 2 && matB.dims() <= 2 && matC.dims() <= 2 && _matD.isUMat() &&
|
||||
matA.cols() > 20 && matA.rows() > 20 && matB.cols() > 20, // since it works incorrect for small sizes
|
||||
ocl_gemm(matA, matB, alpha, matC, beta, _matD, flags))
|
||||
#endif
|
||||
|
||||
const int block_lin_size = 128;
|
||||
@ -821,7 +822,7 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
break;
|
||||
}
|
||||
|
||||
if( C.data )
|
||||
if( !C.empty() )
|
||||
{
|
||||
CV_Assert( C.type() == type &&
|
||||
(((flags&GEMM_3_T) == 0 && C.rows == d_size.height && C.cols == d_size.width) ||
|
||||
@ -840,9 +841,9 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
{
|
||||
if( type == CV_32F )
|
||||
{
|
||||
float* d = (float*)D.data;
|
||||
const float *a = (const float*)A.data,
|
||||
*b = (const float*)B.data,
|
||||
float* d = D.ptr<float>();
|
||||
const float *a = A.ptr<float>(),
|
||||
*b = B.ptr<float>(),
|
||||
*c = (const float*)C.data;
|
||||
size_t d_step = D.step/sizeof(d[0]),
|
||||
a_step = A.step/sizeof(a[0]),
|
||||
@ -968,9 +969,9 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
|
||||
if( type == CV_64F )
|
||||
{
|
||||
double* d = (double*)D.data;
|
||||
const double *a = (const double*)A.data,
|
||||
*b = (const double*)B.data,
|
||||
double* d = D.ptr<double>();
|
||||
const double *a = A.ptr<double>(),
|
||||
*b = B.ptr<double>(),
|
||||
*c = (const double*)C.data;
|
||||
size_t d_step = D.step/sizeof(d[0]),
|
||||
a_step = A.step/sizeof(a[0]),
|
||||
@ -1100,6 +1101,7 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
GEMMBlockMulFunc blockMulFunc;
|
||||
GEMMStoreFunc storeFunc;
|
||||
Mat *matD = &D, tmat;
|
||||
int tmat_size = 0;
|
||||
const uchar* Cdata = C.data;
|
||||
size_t Cstep = C.data ? (size_t)C.step : 0;
|
||||
AutoBuffer<uchar> buf;
|
||||
@ -1132,8 +1134,8 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
|
||||
if( D.data == A.data || D.data == B.data )
|
||||
{
|
||||
buf.allocate(d_size.width*d_size.height*CV_ELEM_SIZE(type));
|
||||
tmat = Mat(d_size.height, d_size.width, type, (uchar*)buf );
|
||||
tmat_size = d_size.width*d_size.height*CV_ELEM_SIZE(type);
|
||||
// Allocate tmat later, once the size of buf is known
|
||||
matD = &tmat;
|
||||
}
|
||||
|
||||
@ -1210,8 +1212,12 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
(d_size.width <= block_lin_size &&
|
||||
d_size.height <= block_lin_size && len <= block_lin_size) )
|
||||
{
|
||||
singleMulFunc( A.data, A.step, B.data, b_step, Cdata, Cstep,
|
||||
matD->data, matD->step, a_size, d_size, alpha, beta, flags );
|
||||
if( tmat_size > 0 ) {
|
||||
buf.allocate(tmat_size);
|
||||
tmat = Mat(d_size.height, d_size.width, type, (uchar*)buf );
|
||||
}
|
||||
singleMulFunc( A.ptr(), A.step, B.ptr(), b_step, Cdata, Cstep,
|
||||
matD->ptr(), matD->step, a_size, d_size, alpha, beta, flags );
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1238,7 +1244,7 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
else
|
||||
b_step0 = elem_size, b_step1 = b_step;
|
||||
|
||||
if( !C.data )
|
||||
if( C.empty() )
|
||||
{
|
||||
c_step0 = c_step1 = 0;
|
||||
flags &= ~GEMM_3_T;
|
||||
@ -1269,12 +1275,14 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
flags &= ~GEMM_1_T;
|
||||
}
|
||||
|
||||
buf.allocate(a_buf_size + b_buf_size + d_buf_size);
|
||||
buf.allocate(d_buf_size + b_buf_size + a_buf_size + tmat_size);
|
||||
d_buf = (uchar*)buf;
|
||||
b_buf = d_buf + d_buf_size;
|
||||
|
||||
if( is_a_t )
|
||||
a_buf = b_buf + b_buf_size;
|
||||
if( tmat_size > 0 )
|
||||
tmat = Mat(d_size.height, d_size.width, type, b_buf + b_buf_size + a_buf_size );
|
||||
|
||||
for( i = 0; i < d_size.height; i += di )
|
||||
{
|
||||
@ -1284,7 +1292,7 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
|
||||
for( j = 0; j < d_size.width; j += dj )
|
||||
{
|
||||
uchar* _d = matD->data + i*matD->step + j*elem_size;
|
||||
uchar* _d = matD->ptr() + i*matD->step + j*elem_size;
|
||||
const uchar* _c = Cdata + i*c_step0 + j*c_step1;
|
||||
size_t _d_step = matD->step;
|
||||
dj = dn0;
|
||||
@ -1301,9 +1309,9 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
|
||||
for( k = 0; k < len; k += dk )
|
||||
{
|
||||
const uchar* _a = A.data + i*a_step0 + k*a_step1;
|
||||
const uchar* _a = A.ptr() + i*a_step0 + k*a_step1;
|
||||
size_t _a_step = A.step;
|
||||
const uchar* _b = B.data + k*b_step0 + j*b_step1;
|
||||
const uchar* _b = B.ptr() + k*b_step0 + j*b_step1;
|
||||
size_t _b_step = b_step;
|
||||
Size a_bl_size;
|
||||
|
||||
@ -1348,7 +1356,7 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
|
||||
|
||||
if( dk0 < len )
|
||||
storeFunc( _c, Cstep, _d, _d_step,
|
||||
matD->data + i*matD->step + j*elem_size,
|
||||
matD->ptr(i) + j*elem_size,
|
||||
matD->step, Size(dj,di), alpha, beta, flags );
|
||||
}
|
||||
}
|
||||
@ -1857,7 +1865,7 @@ void cv::transform( InputArray _src, OutputArray _dst, InputArray _mtx )
|
||||
_mbuf.allocate(dcn*(scn+1));
|
||||
mbuf = (double*)_mbuf;
|
||||
Mat tmp(dcn, scn+1, mtype, mbuf);
|
||||
memset(tmp.data, 0, tmp.total()*tmp.elemSize());
|
||||
memset(tmp.ptr(), 0, tmp.total()*tmp.elemSize());
|
||||
if( m.cols == scn+1 )
|
||||
m.convertTo(tmp, mtype);
|
||||
else
|
||||
@ -1868,7 +1876,7 @@ void cv::transform( InputArray _src, OutputArray _dst, InputArray _mtx )
|
||||
m = tmp;
|
||||
}
|
||||
else
|
||||
mbuf = (double*)m.data;
|
||||
mbuf = m.ptr<double>();
|
||||
|
||||
if( scn == dcn )
|
||||
{
|
||||
@ -2038,7 +2046,7 @@ void cv::perspectiveTransform( InputArray _src, OutputArray _dst, InputArray _mt
|
||||
m = tmp;
|
||||
}
|
||||
else
|
||||
mbuf = (double*)m.data;
|
||||
mbuf = m.ptr<double>();
|
||||
|
||||
TransformFunc func = depth == CV_32F ?
|
||||
(TransformFunc)perspectiveTransform_32f :
|
||||
@ -2226,7 +2234,7 @@ void cv::scaleAdd( InputArray _src1, double alpha, InputArray _src2, OutputArray
|
||||
if (src1.isContinuous() && src2.isContinuous() && dst.isContinuous())
|
||||
{
|
||||
size_t len = src1.total()*cn;
|
||||
func(src1.data, src2.data, dst.data, (int)len, palpha);
|
||||
func(src1.ptr(), src2.ptr(), dst.ptr(), (int)len, palpha);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2270,7 +2278,7 @@ void cv::calcCovarMatrix( const Mat* data, int nsamples, Mat& covar, Mat& _mean,
|
||||
{
|
||||
CV_Assert( data[i].size() == size && data[i].type() == type );
|
||||
if( data[i].isContinuous() )
|
||||
memcpy( _data.ptr(i), data[i].data, sz*esz );
|
||||
memcpy( _data.ptr(i), data[i].ptr(), sz*esz );
|
||||
else
|
||||
{
|
||||
Mat dataRow(size.height, size.width, type, _data.ptr(i));
|
||||
@ -2391,12 +2399,12 @@ double cv::Mahalanobis( InputArray _v1, InputArray _v2, InputArray _icovar )
|
||||
|
||||
if( depth == CV_32F )
|
||||
{
|
||||
const float* src1 = (const float*)v1.data;
|
||||
const float* src2 = (const float*)v2.data;
|
||||
const float* src1 = v1.ptr<float>();
|
||||
const float* src2 = v2.ptr<float>();
|
||||
size_t step1 = v1.step/sizeof(src1[0]);
|
||||
size_t step2 = v2.step/sizeof(src2[0]);
|
||||
double* diff = buf;
|
||||
const float* mat = (const float*)icovar.data;
|
||||
const float* mat = icovar.ptr<float>();
|
||||
size_t matstep = icovar.step/sizeof(mat[0]);
|
||||
|
||||
for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width )
|
||||
@ -2422,12 +2430,12 @@ double cv::Mahalanobis( InputArray _v1, InputArray _v2, InputArray _icovar )
|
||||
}
|
||||
else if( depth == CV_64F )
|
||||
{
|
||||
const double* src1 = (const double*)v1.data;
|
||||
const double* src2 = (const double*)v2.data;
|
||||
const double* src1 = v1.ptr<double>();
|
||||
const double* src2 = v2.ptr<double>();
|
||||
size_t step1 = v1.step/sizeof(src1[0]);
|
||||
size_t step2 = v2.step/sizeof(src2[0]);
|
||||
double* diff = buf;
|
||||
const double* mat = (const double*)icovar.data;
|
||||
const double* mat = icovar.ptr<double>();
|
||||
size_t matstep = icovar.step/sizeof(mat[0]);
|
||||
|
||||
for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width )
|
||||
@ -2468,9 +2476,9 @@ template<typename sT, typename dT> static void
|
||||
MulTransposedR( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale )
|
||||
{
|
||||
int i, j, k;
|
||||
const sT* src = (const sT*)srcmat.data;
|
||||
dT* dst = (dT*)dstmat.data;
|
||||
const dT* delta = (const dT*)deltamat.data;
|
||||
const sT* src = srcmat.ptr<sT>();
|
||||
dT* dst = dstmat.ptr<dT>();
|
||||
const dT* delta = deltamat.ptr<dT>();
|
||||
size_t srcstep = srcmat.step/sizeof(src[0]);
|
||||
size_t dststep = dstmat.step/sizeof(dst[0]);
|
||||
size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
|
||||
@ -2587,9 +2595,9 @@ template<typename sT, typename dT> static void
|
||||
MulTransposedL( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale )
|
||||
{
|
||||
int i, j, k;
|
||||
const sT* src = (const sT*)srcmat.data;
|
||||
dT* dst = (dT*)dstmat.data;
|
||||
const dT* delta = (const dT*)deltamat.data;
|
||||
const sT* src = srcmat.ptr<sT>();
|
||||
dT* dst = dstmat.ptr<dT>();
|
||||
const dT* delta = deltamat.ptr<dT>();
|
||||
size_t srcstep = srcmat.step/sizeof(src[0]);
|
||||
size_t dststep = dstmat.step/sizeof(dst[0]);
|
||||
size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
|
||||
@ -2668,7 +2676,7 @@ void cv::mulTransposed( InputArray _src, OutputArray _dst, bool ata,
|
||||
dtype = std::max(std::max(CV_MAT_DEPTH(dtype >= 0 ? dtype : stype), delta.depth()), CV_32F);
|
||||
CV_Assert( src.channels() == 1 );
|
||||
|
||||
if( delta.data )
|
||||
if( !delta.empty() )
|
||||
{
|
||||
CV_Assert( delta.channels() == 1 &&
|
||||
(delta.rows == src.rows || delta.rows == 1) &&
|
||||
@ -2687,7 +2695,7 @@ void cv::mulTransposed( InputArray _src, OutputArray _dst, bool ata,
|
||||
{
|
||||
Mat src2;
|
||||
const Mat* tsrc = &src;
|
||||
if( delta.data )
|
||||
if( !delta.empty() )
|
||||
{
|
||||
if( delta.size() == src.size() )
|
||||
subtract( src, delta, src2 );
|
||||
@ -2958,341 +2966,6 @@ double Mat::dot(InputArray _mat) const
|
||||
return r;
|
||||
}
|
||||
|
||||
/****************************************************************************************\
|
||||
* PCA *
|
||||
\****************************************************************************************/
|
||||
|
||||
PCA::PCA() {}
|
||||
|
||||
PCA::PCA(InputArray data, InputArray _mean, int flags, int maxComponents)
|
||||
{
|
||||
operator()(data, _mean, flags, maxComponents);
|
||||
}
|
||||
|
||||
PCA::PCA(InputArray data, InputArray _mean, int flags, double retainedVariance)
|
||||
{
|
||||
operator()(data, _mean, flags, retainedVariance);
|
||||
}
|
||||
|
||||
PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, int maxComponents)
|
||||
{
|
||||
Mat data = _data.getMat(), _mean = __mean.getMat();
|
||||
int covar_flags = CV_COVAR_SCALE;
|
||||
int i, len, in_count;
|
||||
Size mean_sz;
|
||||
|
||||
CV_Assert( data.channels() == 1 );
|
||||
if( flags & CV_PCA_DATA_AS_COL )
|
||||
{
|
||||
len = data.rows;
|
||||
in_count = data.cols;
|
||||
covar_flags |= CV_COVAR_COLS;
|
||||
mean_sz = Size(1, len);
|
||||
}
|
||||
else
|
||||
{
|
||||
len = data.cols;
|
||||
in_count = data.rows;
|
||||
covar_flags |= CV_COVAR_ROWS;
|
||||
mean_sz = Size(len, 1);
|
||||
}
|
||||
|
||||
int count = std::min(len, in_count), out_count = count;
|
||||
if( maxComponents > 0 )
|
||||
out_count = std::min(count, maxComponents);
|
||||
|
||||
// "scrambled" way to compute PCA (when cols(A)>rows(A)):
|
||||
// B = A'A; B*x=b*x; C = AA'; C*y=c*y -> AA'*y=c*y -> A'A*(A'*y)=c*(A'*y) -> c = b, x=A'*y
|
||||
if( len <= in_count )
|
||||
covar_flags |= CV_COVAR_NORMAL;
|
||||
|
||||
int ctype = std::max(CV_32F, data.depth());
|
||||
mean.create( mean_sz, ctype );
|
||||
|
||||
Mat covar( count, count, ctype );
|
||||
|
||||
if( _mean.data )
|
||||
{
|
||||
CV_Assert( _mean.size() == mean_sz );
|
||||
_mean.convertTo(mean, ctype);
|
||||
covar_flags |= CV_COVAR_USE_AVG;
|
||||
}
|
||||
|
||||
calcCovarMatrix( data, covar, mean, covar_flags, ctype );
|
||||
eigen( covar, eigenvalues, eigenvectors );
|
||||
|
||||
if( !(covar_flags & CV_COVAR_NORMAL) )
|
||||
{
|
||||
// CV_PCA_DATA_AS_ROW: cols(A)>rows(A). x=A'*y -> x'=y'*A
|
||||
// CV_PCA_DATA_AS_COL: rows(A)>cols(A). x=A''*y -> x'=y'*A'
|
||||
Mat tmp_data, tmp_mean = repeat(mean, data.rows/mean.rows, data.cols/mean.cols);
|
||||
if( data.type() != ctype || tmp_mean.data == mean.data )
|
||||
{
|
||||
data.convertTo( tmp_data, ctype );
|
||||
subtract( tmp_data, tmp_mean, tmp_data );
|
||||
}
|
||||
else
|
||||
{
|
||||
subtract( data, tmp_mean, tmp_mean );
|
||||
tmp_data = tmp_mean;
|
||||
}
|
||||
|
||||
Mat evects1(count, len, ctype);
|
||||
gemm( eigenvectors, tmp_data, 1, Mat(), 0, evects1,
|
||||
(flags & CV_PCA_DATA_AS_COL) ? CV_GEMM_B_T : 0);
|
||||
eigenvectors = evects1;
|
||||
|
||||
// normalize eigenvectors
|
||||
for( i = 0; i < out_count; i++ )
|
||||
{
|
||||
Mat vec = eigenvectors.row(i);
|
||||
normalize(vec, vec);
|
||||
}
|
||||
}
|
||||
|
||||
if( count > out_count )
|
||||
{
|
||||
// use clone() to physically copy the data and thus deallocate the original matrices
|
||||
eigenvalues = eigenvalues.rowRange(0,out_count).clone();
|
||||
eigenvectors = eigenvectors.rowRange(0,out_count).clone();
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
void PCA::write(FileStorage& fs ) const
|
||||
{
|
||||
CV_Assert( fs.isOpened() );
|
||||
|
||||
fs << "name" << "PCA";
|
||||
fs << "vectors" << eigenvectors;
|
||||
fs << "values" << eigenvalues;
|
||||
fs << "mean" << mean;
|
||||
}
|
||||
|
||||
void PCA::read(const FileNode& fs)
|
||||
{
|
||||
CV_Assert( !fs.empty() );
|
||||
String name = (String)fs["name"];
|
||||
CV_Assert( name == "PCA" );
|
||||
|
||||
cv::read(fs["vectors"], eigenvectors);
|
||||
cv::read(fs["values"], eigenvalues);
|
||||
cv::read(fs["mean"], mean);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
int computeCumulativeEnergy(const Mat& eigenvalues, double retainedVariance)
|
||||
{
|
||||
CV_DbgAssert( eigenvalues.type() == DataType<T>::type );
|
||||
|
||||
Mat g(eigenvalues.size(), DataType<T>::type);
|
||||
|
||||
for(int ig = 0; ig < g.rows; ig++)
|
||||
{
|
||||
g.at<T>(ig, 0) = 0;
|
||||
for(int im = 0; im <= ig; im++)
|
||||
{
|
||||
g.at<T>(ig,0) += eigenvalues.at<T>(im,0);
|
||||
}
|
||||
}
|
||||
|
||||
int L;
|
||||
|
||||
for(L = 0; L < eigenvalues.rows; L++)
|
||||
{
|
||||
double energy = g.at<T>(L, 0) / g.at<T>(g.rows - 1, 0);
|
||||
if(energy > retainedVariance)
|
||||
break;
|
||||
}
|
||||
|
||||
L = std::max(2, L);
|
||||
|
||||
return L;
|
||||
}
|
||||
|
||||
PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, double retainedVariance)
|
||||
{
|
||||
Mat data = _data.getMat(), _mean = __mean.getMat();
|
||||
int covar_flags = CV_COVAR_SCALE;
|
||||
int i, len, in_count;
|
||||
Size mean_sz;
|
||||
|
||||
CV_Assert( data.channels() == 1 );
|
||||
if( flags & CV_PCA_DATA_AS_COL )
|
||||
{
|
||||
len = data.rows;
|
||||
in_count = data.cols;
|
||||
covar_flags |= CV_COVAR_COLS;
|
||||
mean_sz = Size(1, len);
|
||||
}
|
||||
else
|
||||
{
|
||||
len = data.cols;
|
||||
in_count = data.rows;
|
||||
covar_flags |= CV_COVAR_ROWS;
|
||||
mean_sz = Size(len, 1);
|
||||
}
|
||||
|
||||
CV_Assert( retainedVariance > 0 && retainedVariance <= 1 );
|
||||
|
||||
int count = std::min(len, in_count);
|
||||
|
||||
// "scrambled" way to compute PCA (when cols(A)>rows(A)):
|
||||
// B = A'A; B*x=b*x; C = AA'; C*y=c*y -> AA'*y=c*y -> A'A*(A'*y)=c*(A'*y) -> c = b, x=A'*y
|
||||
if( len <= in_count )
|
||||
covar_flags |= CV_COVAR_NORMAL;
|
||||
|
||||
int ctype = std::max(CV_32F, data.depth());
|
||||
mean.create( mean_sz, ctype );
|
||||
|
||||
Mat covar( count, count, ctype );
|
||||
|
||||
if( _mean.data )
|
||||
{
|
||||
CV_Assert( _mean.size() == mean_sz );
|
||||
_mean.convertTo(mean, ctype);
|
||||
}
|
||||
|
||||
calcCovarMatrix( data, covar, mean, covar_flags, ctype );
|
||||
eigen( covar, eigenvalues, eigenvectors );
|
||||
|
||||
if( !(covar_flags & CV_COVAR_NORMAL) )
|
||||
{
|
||||
// CV_PCA_DATA_AS_ROW: cols(A)>rows(A). x=A'*y -> x'=y'*A
|
||||
// CV_PCA_DATA_AS_COL: rows(A)>cols(A). x=A''*y -> x'=y'*A'
|
||||
Mat tmp_data, tmp_mean = repeat(mean, data.rows/mean.rows, data.cols/mean.cols);
|
||||
if( data.type() != ctype || tmp_mean.data == mean.data )
|
||||
{
|
||||
data.convertTo( tmp_data, ctype );
|
||||
subtract( tmp_data, tmp_mean, tmp_data );
|
||||
}
|
||||
else
|
||||
{
|
||||
subtract( data, tmp_mean, tmp_mean );
|
||||
tmp_data = tmp_mean;
|
||||
}
|
||||
|
||||
Mat evects1(count, len, ctype);
|
||||
gemm( eigenvectors, tmp_data, 1, Mat(), 0, evects1,
|
||||
(flags & CV_PCA_DATA_AS_COL) ? CV_GEMM_B_T : 0);
|
||||
eigenvectors = evects1;
|
||||
|
||||
// normalize all eigenvectors
|
||||
for( i = 0; i < eigenvectors.rows; i++ )
|
||||
{
|
||||
Mat vec = eigenvectors.row(i);
|
||||
normalize(vec, vec);
|
||||
}
|
||||
}
|
||||
|
||||
// compute the cumulative energy content for each eigenvector
|
||||
int L;
|
||||
if (ctype == CV_32F)
|
||||
L = computeCumulativeEnergy<float>(eigenvalues, retainedVariance);
|
||||
else
|
||||
L = computeCumulativeEnergy<double>(eigenvalues, retainedVariance);
|
||||
|
||||
// use clone() to physically copy the data and thus deallocate the original matrices
|
||||
eigenvalues = eigenvalues.rowRange(0,L).clone();
|
||||
eigenvectors = eigenvectors.rowRange(0,L).clone();
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
void PCA::project(InputArray _data, OutputArray result) const
|
||||
{
|
||||
Mat data = _data.getMat();
|
||||
CV_Assert( mean.data && eigenvectors.data &&
|
||||
((mean.rows == 1 && mean.cols == data.cols) || (mean.cols == 1 && mean.rows == data.rows)));
|
||||
Mat tmp_data, tmp_mean = repeat(mean, data.rows/mean.rows, data.cols/mean.cols);
|
||||
int ctype = mean.type();
|
||||
if( data.type() != ctype || tmp_mean.data == mean.data )
|
||||
{
|
||||
data.convertTo( tmp_data, ctype );
|
||||
subtract( tmp_data, tmp_mean, tmp_data );
|
||||
}
|
||||
else
|
||||
{
|
||||
subtract( data, tmp_mean, tmp_mean );
|
||||
tmp_data = tmp_mean;
|
||||
}
|
||||
if( mean.rows == 1 )
|
||||
gemm( tmp_data, eigenvectors, 1, Mat(), 0, result, GEMM_2_T );
|
||||
else
|
||||
gemm( eigenvectors, tmp_data, 1, Mat(), 0, result, 0 );
|
||||
}
|
||||
|
||||
Mat PCA::project(InputArray data) const
|
||||
{
|
||||
Mat result;
|
||||
project(data, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void PCA::backProject(InputArray _data, OutputArray result) const
|
||||
{
|
||||
Mat data = _data.getMat();
|
||||
CV_Assert( mean.data && eigenvectors.data &&
|
||||
((mean.rows == 1 && eigenvectors.rows == data.cols) ||
|
||||
(mean.cols == 1 && eigenvectors.rows == data.rows)));
|
||||
|
||||
Mat tmp_data, tmp_mean;
|
||||
data.convertTo(tmp_data, mean.type());
|
||||
if( mean.rows == 1 )
|
||||
{
|
||||
tmp_mean = repeat(mean, data.rows, 1);
|
||||
gemm( tmp_data, eigenvectors, 1, tmp_mean, 1, result, 0 );
|
||||
}
|
||||
else
|
||||
{
|
||||
tmp_mean = repeat(mean, 1, data.cols);
|
||||
gemm( eigenvectors, tmp_data, 1, tmp_mean, 1, result, GEMM_1_T );
|
||||
}
|
||||
}
|
||||
|
||||
Mat PCA::backProject(InputArray data) const
|
||||
{
|
||||
Mat result;
|
||||
backProject(data, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void cv::PCACompute(InputArray data, InputOutputArray mean,
|
||||
OutputArray eigenvectors, int maxComponents)
|
||||
{
|
||||
PCA pca;
|
||||
pca(data, mean, 0, maxComponents);
|
||||
pca.mean.copyTo(mean);
|
||||
pca.eigenvectors.copyTo(eigenvectors);
|
||||
}
|
||||
|
||||
void cv::PCACompute(InputArray data, InputOutputArray mean,
|
||||
OutputArray eigenvectors, double retainedVariance)
|
||||
{
|
||||
PCA pca;
|
||||
pca(data, mean, 0, retainedVariance);
|
||||
pca.mean.copyTo(mean);
|
||||
pca.eigenvectors.copyTo(eigenvectors);
|
||||
}
|
||||
|
||||
void cv::PCAProject(InputArray data, InputArray mean,
|
||||
InputArray eigenvectors, OutputArray result)
|
||||
{
|
||||
PCA pca;
|
||||
pca.mean = mean.getMat();
|
||||
pca.eigenvectors = eigenvectors.getMat();
|
||||
pca.project(data, result);
|
||||
}
|
||||
|
||||
void cv::PCABackProject(InputArray data, InputArray mean,
|
||||
InputArray eigenvectors, OutputArray result)
|
||||
{
|
||||
PCA pca;
|
||||
pca.mean = mean.getMat();
|
||||
pca.eigenvectors = eigenvectors.getMat();
|
||||
pca.backProject(data, result);
|
||||
}
|
||||
|
||||
/****************************************************************************************\
|
||||
@ -3426,7 +3099,7 @@ cvCalcPCA( const CvArr* data_arr, CvArr* avg_arr, CvArr* eigenvals, CvArr* eigen
|
||||
pca.eigenvectors = evects;
|
||||
|
||||
pca(data, (flags & CV_PCA_USE_AVG) ? mean : cv::Mat(),
|
||||
flags, evals.data ? evals.rows + evals.cols - 1 : 0);
|
||||
flags, !evals.empty() ? evals.rows + evals.cols - 1 : 0);
|
||||
|
||||
if( pca.mean.size() == mean.size() )
|
||||
pca.mean.convertTo( mean, mean.type() );
|
||||
|
@ -352,7 +352,7 @@ static void finalizeHdr(Mat& m)
|
||||
m.datalimit = m.datastart + m.size[0]*m.step[0];
|
||||
if( m.size[0] > 0 )
|
||||
{
|
||||
m.dataend = m.data + m.size[d-1]*m.step[d-1];
|
||||
m.dataend = m.ptr() + m.size[d-1]*m.step[d-1];
|
||||
for( int i = 0; i < d-1; i++ )
|
||||
m.dataend += (m.size[i] - 1)*m.step[i];
|
||||
}
|
||||
@ -871,7 +871,7 @@ Mat cvarrToMat(const CvArr* arr, bool copyData,
|
||||
}
|
||||
|
||||
Mat buf(total, 1, type);
|
||||
cvCvtSeqToArray(seq, buf.data, CV_WHOLE_SEQ);
|
||||
cvCvtSeqToArray(seq, buf.ptr(), CV_WHOLE_SEQ);
|
||||
return buf;
|
||||
}
|
||||
CV_Error(CV_StsBadArg, "Unknown array type");
|
||||
@ -1941,7 +1941,7 @@ size_t _InputArray::offset(int i) const
|
||||
{
|
||||
CV_Assert( i < 0 );
|
||||
const Mat * const m = ((const Mat*)obj);
|
||||
return (size_t)(m->data - m->datastart);
|
||||
return (size_t)(m->ptr() - m->datastart);
|
||||
}
|
||||
|
||||
if( k == UMAT )
|
||||
@ -1960,7 +1960,7 @@ size_t _InputArray::offset(int i) const
|
||||
return 1;
|
||||
CV_Assert( i < (int)vv.size() );
|
||||
|
||||
return (size_t)(vv[i].data - vv[i].datastart);
|
||||
return (size_t)(vv[i].ptr() - vv[i].datastart);
|
||||
}
|
||||
|
||||
if( k == STD_VECTOR_UMAT )
|
||||
@ -2618,7 +2618,7 @@ void _OutputArray::setTo(const _InputArray& arr, const _InputArray & mask) const
|
||||
{
|
||||
Mat value = arr.getMat();
|
||||
CV_Assert( checkScalar(value, type(), arr.kind(), _InputArray::GPU_MAT) );
|
||||
((cuda::GpuMat*)obj)->setTo(Scalar(Vec<double, 4>((double *)value.data)), mask);
|
||||
((cuda::GpuMat*)obj)->setTo(Scalar(Vec<double, 4>(value.ptr<double>())), mask);
|
||||
}
|
||||
else
|
||||
CV_Error(Error::StsNotImplemented, "");
|
||||
@ -2804,7 +2804,7 @@ void cv::setIdentity( InputOutputArray _m, const Scalar& s )
|
||||
|
||||
if( type == CV_32FC1 )
|
||||
{
|
||||
float* data = (float*)m.data;
|
||||
float* data = m.ptr<float>();
|
||||
float val = (float)s[0];
|
||||
size_t step = m.step/sizeof(data[0]);
|
||||
|
||||
@ -2818,7 +2818,7 @@ void cv::setIdentity( InputOutputArray _m, const Scalar& s )
|
||||
}
|
||||
else if( type == CV_64FC1 )
|
||||
{
|
||||
double* data = (double*)m.data;
|
||||
double* data = m.ptr<double>();
|
||||
double val = s[0];
|
||||
size_t step = m.step/sizeof(data[0]);
|
||||
|
||||
@ -2846,7 +2846,7 @@ cv::Scalar cv::trace( InputArray _m )
|
||||
|
||||
if( type == CV_32FC1 )
|
||||
{
|
||||
const float* ptr = (const float*)m.data;
|
||||
const float* ptr = m.ptr<float>();
|
||||
size_t step = m.step/sizeof(ptr[0]) + 1;
|
||||
double _s = 0;
|
||||
for( i = 0; i < nm; i++ )
|
||||
@ -2856,7 +2856,7 @@ cv::Scalar cv::trace( InputArray _m )
|
||||
|
||||
if( type == CV_64FC1 )
|
||||
{
|
||||
const double* ptr = (const double*)m.data;
|
||||
const double* ptr = m.ptr<double>();
|
||||
size_t step = m.step/sizeof(ptr[0]) + 1;
|
||||
double _s = 0;
|
||||
for( i = 0; i < nm; i++ )
|
||||
@ -3002,6 +3002,13 @@ static bool ocl_transpose( InputArray _src, OutputArray _dst )
|
||||
CV_Assert(dst.cols == dst.rows);
|
||||
kernelName += "_inplace";
|
||||
}
|
||||
else
|
||||
{
|
||||
// check required local memory size
|
||||
size_t required_local_memory = (size_t) TILE_DIM*(TILE_DIM+1)*CV_ELEM_SIZE(type);
|
||||
if (required_local_memory > ocl::Device::getDefault().localMemSize())
|
||||
return false;
|
||||
}
|
||||
|
||||
ocl::Kernel k(kernelName.c_str(), ocl::core::transpose_oclsrc,
|
||||
format("-D T=%s -D T1=%s -D cn=%d -D TILE_DIM=%d -D BLOCK_ROWS=%d -D rowsPerWI=%d",
|
||||
@ -3108,13 +3115,13 @@ void cv::transpose( InputArray _src, OutputArray _dst )
|
||||
IppiSize roiSize = { src.cols, src.rows };
|
||||
if (ippFunc != 0)
|
||||
{
|
||||
if (ippFunc(src.data, (int)src.step, dst.data, (int)dst.step, roiSize) >= 0)
|
||||
if (ippFunc(src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, roiSize) >= 0)
|
||||
return;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
else if (ippFuncI != 0)
|
||||
{
|
||||
if (ippFuncI(dst.data, (int)dst.step, roiSize) >= 0)
|
||||
if (ippFuncI(dst.ptr(), (int)dst.step, roiSize) >= 0)
|
||||
return;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
@ -3125,13 +3132,13 @@ void cv::transpose( InputArray _src, OutputArray _dst )
|
||||
TransposeInplaceFunc func = transposeInplaceTab[esz];
|
||||
CV_Assert( func != 0 );
|
||||
CV_Assert( dst.cols == dst.rows );
|
||||
func( dst.data, dst.step, dst.rows );
|
||||
func( dst.ptr(), dst.step, dst.rows );
|
||||
}
|
||||
else
|
||||
{
|
||||
TransposeFunc func = transposeTab[esz];
|
||||
CV_Assert( func != 0 );
|
||||
func( src.data, src.step, dst.data, dst.step, src.size() );
|
||||
func( src.ptr(), src.step, dst.ptr(), dst.step, src.size() );
|
||||
}
|
||||
}
|
||||
|
||||
@ -3147,7 +3154,7 @@ void cv::completeSymm( InputOutputArray _m, bool LtoR )
|
||||
int rows = m.rows;
|
||||
int j0 = 0, j1 = rows;
|
||||
|
||||
uchar* data = m.data;
|
||||
uchar* data = m.ptr();
|
||||
for( int i = 0; i < rows; i++ )
|
||||
{
|
||||
if( !LtoR ) j1 = i; else j0 = i+1;
|
||||
@ -3205,8 +3212,8 @@ reduceR_( const Mat& srcmat, Mat& dstmat )
|
||||
size.width *= srcmat.channels();
|
||||
AutoBuffer<WT> buffer(size.width);
|
||||
WT* buf = buffer;
|
||||
ST* dst = (ST*)dstmat.data;
|
||||
const T* src = (const T*)srcmat.data;
|
||||
ST* dst = dstmat.ptr<ST>();
|
||||
const T* src = srcmat.ptr<T>();
|
||||
size_t srcstep = srcmat.step/sizeof(src[0]);
|
||||
int i;
|
||||
Op op;
|
||||
@ -3251,8 +3258,8 @@ reduceC_( const Mat& srcmat, Mat& dstmat )
|
||||
|
||||
for( int y = 0; y < size.height; y++ )
|
||||
{
|
||||
const T* src = (const T*)(srcmat.data + srcmat.step*y);
|
||||
ST* dst = (ST*)(dstmat.data + dstmat.step*y);
|
||||
const T* src = srcmat.ptr<T>(y);
|
||||
ST* dst = dstmat.ptr<ST>(y);
|
||||
if( size.width == cn )
|
||||
for( k = 0; k < cn; k++ )
|
||||
dst[k] = src[k];
|
||||
@ -3349,7 +3356,7 @@ static inline void reduceSumC_8u16u16s32f_64f(const cv::Mat& srcmat, cv::Mat& ds
|
||||
if (ippFunc)
|
||||
{
|
||||
for (int y = 0; y < size.height; ++y)
|
||||
if (ippFunc(srcmat.data + sstep * y, sstep, roisize, dstmat.ptr<Ipp64f>(y)) < 0)
|
||||
if (ippFunc(srcmat.ptr(y), sstep, roisize, dstmat.ptr<Ipp64f>(y)) < 0)
|
||||
{
|
||||
setIppErrorStatus();
|
||||
cv::Mat dstroi = dstmat.rowRange(y, y + 1);
|
||||
@ -3360,7 +3367,7 @@ static inline void reduceSumC_8u16u16s32f_64f(const cv::Mat& srcmat, cv::Mat& ds
|
||||
else if (ippFuncHint)
|
||||
{
|
||||
for (int y = 0; y < size.height; ++y)
|
||||
if (ippFuncHint(srcmat.data + sstep * y, sstep, roisize, dstmat.ptr<Ipp64f>(y), ippAlgHintAccurate) < 0)
|
||||
if (ippFuncHint(srcmat.ptr(y), sstep, roisize, dstmat.ptr<Ipp64f>(y), ippAlgHintAccurate) < 0)
|
||||
{
|
||||
setIppErrorStatus();
|
||||
cv::Mat dstroi = dstmat.rowRange(y, y + 1);
|
||||
@ -3462,9 +3469,6 @@ static bool ocl_reduce(InputArray _src, OutputArray _dst,
|
||||
if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
|
||||
return false;
|
||||
|
||||
if ((op == CV_REDUCE_SUM && sdepth == CV_32F) || op == CV_REDUCE_MIN || op == CV_REDUCE_MAX)
|
||||
return false;
|
||||
|
||||
if (op == CV_REDUCE_AVG)
|
||||
{
|
||||
if (sdepth < CV_32S && ddepth < CV_32S)
|
||||
@ -3773,10 +3777,10 @@ template<typename T> static void sort_( const Mat& src, Mat& dst, int flags )
|
||||
T* ptr = bptr;
|
||||
if( sortRows )
|
||||
{
|
||||
T* dptr = (T*)(dst.data + dst.step*i);
|
||||
T* dptr = dst.ptr<T>(i);
|
||||
if( !inplace )
|
||||
{
|
||||
const T* sptr = (const T*)(src.data + src.step*i);
|
||||
const T* sptr = src.ptr<T>(i);
|
||||
memcpy(dptr, sptr, sizeof(T) * len);
|
||||
}
|
||||
ptr = dptr;
|
||||
@ -3784,7 +3788,7 @@ template<typename T> static void sort_( const Mat& src, Mat& dst, int flags )
|
||||
else
|
||||
{
|
||||
for( j = 0; j < len; j++ )
|
||||
ptr[j] = ((const T*)(src.data + src.step*j))[i];
|
||||
ptr[j] = src.ptr<T>(j)[i];
|
||||
}
|
||||
|
||||
#ifdef USE_IPP_SORT
|
||||
@ -3813,7 +3817,7 @@ template<typename T> static void sort_( const Mat& src, Mat& dst, int flags )
|
||||
|
||||
if( !sortRows )
|
||||
for( j = 0; j < len; j++ )
|
||||
((T*)(dst.data + dst.step*j))[i] = ptr[j];
|
||||
dst.ptr<T>(j)[i] = ptr[j];
|
||||
}
|
||||
}
|
||||
|
||||
@ -3886,12 +3890,12 @@ template<typename T> static void sortIdx_( const Mat& src, Mat& dst, int flags )
|
||||
if( sortRows )
|
||||
{
|
||||
ptr = (T*)(src.data + src.step*i);
|
||||
iptr = (int*)(dst.data + dst.step*i);
|
||||
iptr = dst.ptr<int>(i);
|
||||
}
|
||||
else
|
||||
{
|
||||
for( j = 0; j < len; j++ )
|
||||
ptr[j] = ((const T*)(src.data + src.step*j))[i];
|
||||
ptr[j] = src.ptr<T>(j)[i];
|
||||
}
|
||||
for( j = 0; j < len; j++ )
|
||||
iptr[j] = j;
|
||||
@ -3921,7 +3925,7 @@ template<typename T> static void sortIdx_( const Mat& src, Mat& dst, int flags )
|
||||
|
||||
if( !sortRows )
|
||||
for( j = 0; j < len; j++ )
|
||||
((int*)(dst.data + dst.step*j))[i] = iptr[j];
|
||||
dst.ptr<int>(j)[i] = iptr[j];
|
||||
}
|
||||
}
|
||||
|
||||
@ -3964,420 +3968,6 @@ void cv::sortIdx( InputArray _src, OutputArray _dst, int flags )
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////// kmeans ////////////////////////////////////////////
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
static void generateRandomCenter(const std::vector<Vec2f>& box, float* center, RNG& rng)
|
||||
{
|
||||
size_t j, dims = box.size();
|
||||
float margin = 1.f/dims;
|
||||
for( j = 0; j < dims; j++ )
|
||||
center[j] = ((float)rng*(1.f+margin*2.f)-margin)*(box[j][1] - box[j][0]) + box[j][0];
|
||||
}
|
||||
|
||||
class KMeansPPDistanceComputer : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
KMeansPPDistanceComputer( float *_tdist2,
|
||||
const float *_data,
|
||||
const float *_dist,
|
||||
int _dims,
|
||||
size_t _step,
|
||||
size_t _stepci )
|
||||
: tdist2(_tdist2),
|
||||
data(_data),
|
||||
dist(_dist),
|
||||
dims(_dims),
|
||||
step(_step),
|
||||
stepci(_stepci) { }
|
||||
|
||||
void operator()( const cv::Range& range ) const
|
||||
{
|
||||
const int begin = range.start;
|
||||
const int end = range.end;
|
||||
|
||||
for ( int i = begin; i<end; i++ )
|
||||
{
|
||||
tdist2[i] = std::min(normL2Sqr_(data + step*i, data + stepci, dims), dist[i]);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
KMeansPPDistanceComputer& operator=(const KMeansPPDistanceComputer&); // to quiet MSVC
|
||||
|
||||
float *tdist2;
|
||||
const float *data;
|
||||
const float *dist;
|
||||
const int dims;
|
||||
const size_t step;
|
||||
const size_t stepci;
|
||||
};
|
||||
|
||||
/*
|
||||
k-means center initialization using the following algorithm:
|
||||
Arthur & Vassilvitskii (2007) k-means++: The Advantages of Careful Seeding
|
||||
*/
|
||||
static void generateCentersPP(const Mat& _data, Mat& _out_centers,
|
||||
int K, RNG& rng, int trials)
|
||||
{
|
||||
int i, j, k, dims = _data.cols, N = _data.rows;
|
||||
const float* data = _data.ptr<float>(0);
|
||||
size_t step = _data.step/sizeof(data[0]);
|
||||
std::vector<int> _centers(K);
|
||||
int* centers = &_centers[0];
|
||||
std::vector<float> _dist(N*3);
|
||||
float* dist = &_dist[0], *tdist = dist + N, *tdist2 = tdist + N;
|
||||
double sum0 = 0;
|
||||
|
||||
centers[0] = (unsigned)rng % N;
|
||||
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
dist[i] = normL2Sqr_(data + step*i, data + step*centers[0], dims);
|
||||
sum0 += dist[i];
|
||||
}
|
||||
|
||||
for( k = 1; k < K; k++ )
|
||||
{
|
||||
double bestSum = DBL_MAX;
|
||||
int bestCenter = -1;
|
||||
|
||||
for( j = 0; j < trials; j++ )
|
||||
{
|
||||
double p = (double)rng*sum0, s = 0;
|
||||
for( i = 0; i < N-1; i++ )
|
||||
if( (p -= dist[i]) <= 0 )
|
||||
break;
|
||||
int ci = i;
|
||||
|
||||
parallel_for_(Range(0, N),
|
||||
KMeansPPDistanceComputer(tdist2, data, dist, dims, step, step*ci));
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
s += tdist2[i];
|
||||
}
|
||||
|
||||
if( s < bestSum )
|
||||
{
|
||||
bestSum = s;
|
||||
bestCenter = ci;
|
||||
std::swap(tdist, tdist2);
|
||||
}
|
||||
}
|
||||
centers[k] = bestCenter;
|
||||
sum0 = bestSum;
|
||||
std::swap(dist, tdist);
|
||||
}
|
||||
|
||||
for( k = 0; k < K; k++ )
|
||||
{
|
||||
const float* src = data + step*centers[k];
|
||||
float* dst = _out_centers.ptr<float>(k);
|
||||
for( j = 0; j < dims; j++ )
|
||||
dst[j] = src[j];
|
||||
}
|
||||
}
|
||||
|
||||
class KMeansDistanceComputer : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
KMeansDistanceComputer( double *_distances,
|
||||
int *_labels,
|
||||
const Mat& _data,
|
||||
const Mat& _centers )
|
||||
: distances(_distances),
|
||||
labels(_labels),
|
||||
data(_data),
|
||||
centers(_centers)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()( const Range& range ) const
|
||||
{
|
||||
const int begin = range.start;
|
||||
const int end = range.end;
|
||||
const int K = centers.rows;
|
||||
const int dims = centers.cols;
|
||||
|
||||
const float *sample;
|
||||
for( int i = begin; i<end; ++i)
|
||||
{
|
||||
sample = data.ptr<float>(i);
|
||||
int k_best = 0;
|
||||
double min_dist = DBL_MAX;
|
||||
|
||||
for( int k = 0; k < K; k++ )
|
||||
{
|
||||
const float* center = centers.ptr<float>(k);
|
||||
const double dist = normL2Sqr_(sample, center, dims);
|
||||
|
||||
if( min_dist > dist )
|
||||
{
|
||||
min_dist = dist;
|
||||
k_best = k;
|
||||
}
|
||||
}
|
||||
|
||||
distances[i] = min_dist;
|
||||
labels[i] = k_best;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
KMeansDistanceComputer& operator=(const KMeansDistanceComputer&); // to quiet MSVC
|
||||
|
||||
double *distances;
|
||||
int *labels;
|
||||
const Mat& data;
|
||||
const Mat& centers;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
double cv::kmeans( InputArray _data, int K,
|
||||
InputOutputArray _bestLabels,
|
||||
TermCriteria criteria, int attempts,
|
||||
int flags, OutputArray _centers )
|
||||
{
|
||||
const int SPP_TRIALS = 3;
|
||||
Mat data0 = _data.getMat();
|
||||
bool isrow = data0.rows == 1 && data0.channels() > 1;
|
||||
int N = !isrow ? data0.rows : data0.cols;
|
||||
int dims = (!isrow ? data0.cols : 1)*data0.channels();
|
||||
int type = data0.depth();
|
||||
|
||||
attempts = std::max(attempts, 1);
|
||||
CV_Assert( data0.dims <= 2 && type == CV_32F && K > 0 );
|
||||
CV_Assert( N >= K );
|
||||
|
||||
Mat data(N, dims, CV_32F, data0.data, isrow ? dims * sizeof(float) : static_cast<size_t>(data0.step));
|
||||
|
||||
_bestLabels.create(N, 1, CV_32S, -1, true);
|
||||
|
||||
Mat _labels, best_labels = _bestLabels.getMat();
|
||||
if( flags & CV_KMEANS_USE_INITIAL_LABELS )
|
||||
{
|
||||
CV_Assert( (best_labels.cols == 1 || best_labels.rows == 1) &&
|
||||
best_labels.cols*best_labels.rows == N &&
|
||||
best_labels.type() == CV_32S &&
|
||||
best_labels.isContinuous());
|
||||
best_labels.copyTo(_labels);
|
||||
}
|
||||
else
|
||||
{
|
||||
if( !((best_labels.cols == 1 || best_labels.rows == 1) &&
|
||||
best_labels.cols*best_labels.rows == N &&
|
||||
best_labels.type() == CV_32S &&
|
||||
best_labels.isContinuous()))
|
||||
best_labels.create(N, 1, CV_32S);
|
||||
_labels.create(best_labels.size(), best_labels.type());
|
||||
}
|
||||
int* labels = _labels.ptr<int>();
|
||||
|
||||
Mat centers(K, dims, type), old_centers(K, dims, type), temp(1, dims, type);
|
||||
std::vector<int> counters(K);
|
||||
std::vector<Vec2f> _box(dims);
|
||||
Vec2f* box = &_box[0];
|
||||
double best_compactness = DBL_MAX, compactness = 0;
|
||||
RNG& rng = theRNG();
|
||||
int a, iter, i, j, k;
|
||||
|
||||
if( criteria.type & TermCriteria::EPS )
|
||||
criteria.epsilon = std::max(criteria.epsilon, 0.);
|
||||
else
|
||||
criteria.epsilon = FLT_EPSILON;
|
||||
criteria.epsilon *= criteria.epsilon;
|
||||
|
||||
if( criteria.type & TermCriteria::COUNT )
|
||||
criteria.maxCount = std::min(std::max(criteria.maxCount, 2), 100);
|
||||
else
|
||||
criteria.maxCount = 100;
|
||||
|
||||
if( K == 1 )
|
||||
{
|
||||
attempts = 1;
|
||||
criteria.maxCount = 2;
|
||||
}
|
||||
|
||||
const float* sample = data.ptr<float>(0);
|
||||
for( j = 0; j < dims; j++ )
|
||||
box[j] = Vec2f(sample[j], sample[j]);
|
||||
|
||||
for( i = 1; i < N; i++ )
|
||||
{
|
||||
sample = data.ptr<float>(i);
|
||||
for( j = 0; j < dims; j++ )
|
||||
{
|
||||
float v = sample[j];
|
||||
box[j][0] = std::min(box[j][0], v);
|
||||
box[j][1] = std::max(box[j][1], v);
|
||||
}
|
||||
}
|
||||
|
||||
for( a = 0; a < attempts; a++ )
|
||||
{
|
||||
double max_center_shift = DBL_MAX;
|
||||
for( iter = 0;; )
|
||||
{
|
||||
swap(centers, old_centers);
|
||||
|
||||
if( iter == 0 && (a > 0 || !(flags & KMEANS_USE_INITIAL_LABELS)) )
|
||||
{
|
||||
if( flags & KMEANS_PP_CENTERS )
|
||||
generateCentersPP(data, centers, K, rng, SPP_TRIALS);
|
||||
else
|
||||
{
|
||||
for( k = 0; k < K; k++ )
|
||||
generateRandomCenter(_box, centers.ptr<float>(k), rng);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if( iter == 0 && a == 0 && (flags & KMEANS_USE_INITIAL_LABELS) )
|
||||
{
|
||||
for( i = 0; i < N; i++ )
|
||||
CV_Assert( (unsigned)labels[i] < (unsigned)K );
|
||||
}
|
||||
|
||||
// compute centers
|
||||
centers = Scalar(0);
|
||||
for( k = 0; k < K; k++ )
|
||||
counters[k] = 0;
|
||||
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
sample = data.ptr<float>(i);
|
||||
k = labels[i];
|
||||
float* center = centers.ptr<float>(k);
|
||||
j=0;
|
||||
#if CV_ENABLE_UNROLLED
|
||||
for(; j <= dims - 4; j += 4 )
|
||||
{
|
||||
float t0 = center[j] + sample[j];
|
||||
float t1 = center[j+1] + sample[j+1];
|
||||
|
||||
center[j] = t0;
|
||||
center[j+1] = t1;
|
||||
|
||||
t0 = center[j+2] + sample[j+2];
|
||||
t1 = center[j+3] + sample[j+3];
|
||||
|
||||
center[j+2] = t0;
|
||||
center[j+3] = t1;
|
||||
}
|
||||
#endif
|
||||
for( ; j < dims; j++ )
|
||||
center[j] += sample[j];
|
||||
counters[k]++;
|
||||
}
|
||||
|
||||
if( iter > 0 )
|
||||
max_center_shift = 0;
|
||||
|
||||
for( k = 0; k < K; k++ )
|
||||
{
|
||||
if( counters[k] != 0 )
|
||||
continue;
|
||||
|
||||
// if some cluster appeared to be empty then:
|
||||
// 1. find the biggest cluster
|
||||
// 2. find the farthest from the center point in the biggest cluster
|
||||
// 3. exclude the farthest point from the biggest cluster and form a new 1-point cluster.
|
||||
int max_k = 0;
|
||||
for( int k1 = 1; k1 < K; k1++ )
|
||||
{
|
||||
if( counters[max_k] < counters[k1] )
|
||||
max_k = k1;
|
||||
}
|
||||
|
||||
double max_dist = 0;
|
||||
int farthest_i = -1;
|
||||
float* new_center = centers.ptr<float>(k);
|
||||
float* old_center = centers.ptr<float>(max_k);
|
||||
float* _old_center = temp.ptr<float>(); // normalized
|
||||
float scale = 1.f/counters[max_k];
|
||||
for( j = 0; j < dims; j++ )
|
||||
_old_center[j] = old_center[j]*scale;
|
||||
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
if( labels[i] != max_k )
|
||||
continue;
|
||||
sample = data.ptr<float>(i);
|
||||
double dist = normL2Sqr_(sample, _old_center, dims);
|
||||
|
||||
if( max_dist <= dist )
|
||||
{
|
||||
max_dist = dist;
|
||||
farthest_i = i;
|
||||
}
|
||||
}
|
||||
|
||||
counters[max_k]--;
|
||||
counters[k]++;
|
||||
labels[farthest_i] = k;
|
||||
sample = data.ptr<float>(farthest_i);
|
||||
|
||||
for( j = 0; j < dims; j++ )
|
||||
{
|
||||
old_center[j] -= sample[j];
|
||||
new_center[j] += sample[j];
|
||||
}
|
||||
}
|
||||
|
||||
for( k = 0; k < K; k++ )
|
||||
{
|
||||
float* center = centers.ptr<float>(k);
|
||||
CV_Assert( counters[k] != 0 );
|
||||
|
||||
float scale = 1.f/counters[k];
|
||||
for( j = 0; j < dims; j++ )
|
||||
center[j] *= scale;
|
||||
|
||||
if( iter > 0 )
|
||||
{
|
||||
double dist = 0;
|
||||
const float* old_center = old_centers.ptr<float>(k);
|
||||
for( j = 0; j < dims; j++ )
|
||||
{
|
||||
double t = center[j] - old_center[j];
|
||||
dist += t*t;
|
||||
}
|
||||
max_center_shift = std::max(max_center_shift, dist);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if( ++iter == MAX(criteria.maxCount, 2) || max_center_shift <= criteria.epsilon )
|
||||
break;
|
||||
|
||||
// assign labels
|
||||
Mat dists(1, N, CV_64F);
|
||||
double* dist = dists.ptr<double>(0);
|
||||
parallel_for_(Range(0, N),
|
||||
KMeansDistanceComputer(dist, labels, data, centers));
|
||||
compactness = 0;
|
||||
for( i = 0; i < N; i++ )
|
||||
{
|
||||
compactness += dist[i];
|
||||
}
|
||||
}
|
||||
|
||||
if( compactness < best_compactness )
|
||||
{
|
||||
best_compactness = compactness;
|
||||
if( _centers.needed() )
|
||||
centers.copyTo(_centers);
|
||||
_labels.copyTo(best_labels);
|
||||
}
|
||||
}
|
||||
|
||||
return best_compactness;
|
||||
}
|
||||
|
||||
|
||||
CV_IMPL void cvSetIdentity( CvArr* arr, CvScalar value )
|
||||
{
|
||||
cv::Mat m = cv::cvarrToMat(arr);
|
||||
@ -4758,7 +4348,7 @@ Point MatConstIterator::pos() const
|
||||
return Point();
|
||||
CV_DbgAssert(m->dims <= 2);
|
||||
|
||||
ptrdiff_t ofs = ptr - m->data;
|
||||
ptrdiff_t ofs = ptr - m->ptr();
|
||||
int y = (int)(ofs/m->step[0]);
|
||||
return Point((int)((ofs - y*m->step[0])/elemSize), y);
|
||||
}
|
||||
@ -4766,7 +4356,7 @@ Point MatConstIterator::pos() const
|
||||
void MatConstIterator::pos(int* _idx) const
|
||||
{
|
||||
CV_Assert(m != 0 && _idx);
|
||||
ptrdiff_t ofs = ptr - m->data;
|
||||
ptrdiff_t ofs = ptr - m->ptr();
|
||||
for( int i = 0; i < m->dims; i++ )
|
||||
{
|
||||
size_t s = m->step[i], v = ofs/s;
|
||||
@ -4781,7 +4371,7 @@ ptrdiff_t MatConstIterator::lpos() const
|
||||
return 0;
|
||||
if( m->isContinuous() )
|
||||
return (ptr - sliceStart)/elemSize;
|
||||
ptrdiff_t ofs = ptr - m->data;
|
||||
ptrdiff_t ofs = ptr - m->ptr();
|
||||
int i, d = m->dims;
|
||||
if( d == 2 )
|
||||
{
|
||||
@ -4816,13 +4406,13 @@ void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
|
||||
ptrdiff_t ofs0, y;
|
||||
if( relative )
|
||||
{
|
||||
ofs0 = ptr - m->data;
|
||||
ofs0 = ptr - m->ptr();
|
||||
y = ofs0/m->step[0];
|
||||
ofs += y*m->cols + (ofs0 - y*m->step[0])/elemSize;
|
||||
}
|
||||
y = ofs/m->cols;
|
||||
int y1 = std::min(std::max((int)y, 0), m->rows-1);
|
||||
sliceStart = m->data + y1*m->step[0];
|
||||
sliceStart = m->ptr(y1);
|
||||
sliceEnd = sliceStart + m->cols*elemSize;
|
||||
ptr = y < 0 ? sliceStart : y >= m->rows ? sliceEnd :
|
||||
sliceStart + (ofs - y*m->cols)*elemSize;
|
||||
@ -4839,8 +4429,8 @@ void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
|
||||
ptrdiff_t t = ofs/szi;
|
||||
int v = (int)(ofs - t*szi);
|
||||
ofs = t;
|
||||
ptr = m->data + v*elemSize;
|
||||
sliceStart = m->data;
|
||||
ptr = m->ptr() + v*elemSize;
|
||||
sliceStart = m->ptr();
|
||||
|
||||
for( int i = d-2; i >= 0; i-- )
|
||||
{
|
||||
@ -4855,7 +4445,7 @@ void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
|
||||
if( ofs > 0 )
|
||||
ptr = sliceEnd;
|
||||
else
|
||||
ptr = sliceStart + (ptr - m->data);
|
||||
ptr = sliceStart + (ptr - m->ptr());
|
||||
}
|
||||
|
||||
void MatConstIterator::seek(const int* _idx, bool relative)
|
||||
@ -5051,7 +4641,7 @@ SparseMat::SparseMat(const Mat& m)
|
||||
|
||||
int i, idx[CV_MAX_DIM] = {0}, d = m.dims, lastSize = m.size[d - 1];
|
||||
size_t esz = m.elemSize();
|
||||
uchar* dptr = m.data;
|
||||
const uchar* dptr = m.ptr();
|
||||
|
||||
for(;;)
|
||||
{
|
||||
|
@ -1765,7 +1765,7 @@ struct Device::Impl
|
||||
if (vendorName_ == "Advanced Micro Devices, Inc." ||
|
||||
vendorName_ == "AMD")
|
||||
vendorID_ = VENDOR_AMD;
|
||||
else if (vendorName_ == "Intel(R) Corporation")
|
||||
else if (vendorName_ == "Intel(R) Corporation" || vendorName_ == "Intel" || strstr(name_.c_str(), "Iris") != 0)
|
||||
vendorID_ = VENDOR_INTEL;
|
||||
else if (vendorName_ == "NVIDIA Corporation")
|
||||
vendorID_ = VENDOR_NVIDIA;
|
||||
@ -2750,7 +2750,7 @@ KernelArg::KernelArg(int _flags, UMat* _m, int _wscale, int _iwscale, const void
|
||||
KernelArg KernelArg::Constant(const Mat& m)
|
||||
{
|
||||
CV_Assert(m.isContinuous());
|
||||
return KernelArg(CONSTANT, 0, 0, 0, m.data, m.total()*m.elemSize());
|
||||
return KernelArg(CONSTANT, 0, 0, 0, m.ptr(), m.total()*m.elemSize());
|
||||
}
|
||||
|
||||
/////////////////////////////////////////// Kernel /////////////////////////////////////////////
|
||||
@ -4397,7 +4397,7 @@ template <typename T>
|
||||
static std::string kerToStr(const Mat & k)
|
||||
{
|
||||
int width = k.cols - 1, depth = k.depth();
|
||||
const T * const data = reinterpret_cast<const T *>(k.data);
|
||||
const T * const data = k.ptr<T>();
|
||||
|
||||
std::ostringstream stream;
|
||||
stream.precision(10);
|
||||
|
@ -98,7 +98,7 @@
|
||||
|
||||
#ifdef OP_CALC2
|
||||
#define CALC_MAX2(p) \
|
||||
maxval2 = MAX(maxval2, temp.p);
|
||||
maxval2 = MAX(maxval2, temp2.p);
|
||||
#else
|
||||
#define CALC_MAX2(p)
|
||||
#endif
|
||||
@ -196,7 +196,7 @@ __kernel void minmaxloc(__global const uchar * srcptr, int src_step, int src_off
|
||||
|
||||
#ifdef HAVE_SRC2
|
||||
#ifdef HAVE_SRC2_CONT
|
||||
src2_index = mul24(id, srcTSIZE);
|
||||
src2_index = id * srcTSIZE; //mul24(id, srcTSIZE);
|
||||
#else
|
||||
src2_index = mad24(id / cols, src2_step, mul24(id % cols, srcTSIZE));
|
||||
#endif
|
||||
|
@ -108,7 +108,10 @@ __kernel void reduce_horz_opt(__global const uchar * srcptr, int src_step, int s
|
||||
int src_index = mad24(y, src_step, mad24(x, (int)sizeof(srcT) * cn, src_offset));
|
||||
|
||||
__global const srcT * src = (__global const srcT *)(srcptr + src_index);
|
||||
bufT tmp[cn] = { INIT_VALUE };
|
||||
bufT tmp[cn];
|
||||
#pragma unroll
|
||||
for (int c = 0; c < cn; ++c)
|
||||
tmp[c] = INIT_VALUE;
|
||||
|
||||
int src_step_mul = BUF_COLS * cn;
|
||||
for (int idx = x; idx < cols; idx += BUF_COLS, src += src_step_mul)
|
||||
@ -140,7 +143,10 @@ __kernel void reduce_horz_opt(__global const uchar * srcptr, int src_step, int s
|
||||
int dst_index = mad24(y, dst_step, dst_offset);
|
||||
|
||||
__global dstT * dst = (__global dstT *)(dstptr + dst_index);
|
||||
bufT tmp[cn] = { INIT_VALUE };
|
||||
bufT tmp[cn];
|
||||
#pragma unroll
|
||||
for (int c = 0; c < cn; ++c)
|
||||
tmp[c] = INIT_VALUE;
|
||||
|
||||
#pragma unroll
|
||||
for (int xin = 0; xin < BUF_COLS / 2; xin ++)
|
||||
@ -179,7 +185,10 @@ __kernel void reduce(__global const uchar * srcptr, int src_step, int src_offset
|
||||
int dst_index = mad24(x, (int)sizeof(dstT0) * cn, dst_offset);
|
||||
|
||||
__global dstT0 * dst = (__global dstT0 *)(dstptr + dst_index);
|
||||
dstT tmp[cn] = { INIT_VALUE };
|
||||
dstT tmp[cn];
|
||||
#pragma unroll
|
||||
for (int c = 0; c < cn; ++c)
|
||||
tmp[c] = INIT_VALUE;
|
||||
|
||||
for (int y = 0; y < rows; ++y, src_index += src_step)
|
||||
{
|
||||
@ -209,7 +218,10 @@ __kernel void reduce(__global const uchar * srcptr, int src_step, int src_offset
|
||||
|
||||
__global const srcT * src = (__global const srcT *)(srcptr + src_index);
|
||||
__global dstT * dst = (__global dstT *)(dstptr + dst_index);
|
||||
dstT tmp[cn] = { INIT_VALUE };
|
||||
dstT tmp[cn];
|
||||
#pragma unroll
|
||||
for (int c = 0; c < cn; ++c)
|
||||
tmp[c] = INIT_VALUE;
|
||||
|
||||
for (int x = 0; x < cols; ++x, src += cn)
|
||||
{
|
||||
|
@ -240,6 +240,11 @@ void cv::parallel_for_(const cv::Range& range, const cv::ParallelLoopBody& body,
|
||||
{
|
||||
ProxyLoopBody pbody(body, range, nstripes);
|
||||
cv::Range stripeRange = pbody.stripeRange();
|
||||
if( stripeRange.end - stripeRange.start == 1 )
|
||||
{
|
||||
body(range);
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined HAVE_TBB
|
||||
|
||||
|
384
modules/core/src/pca.cpp
Normal file
384
modules/core/src/pca.cpp
Normal file
@ -0,0 +1,384 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
/****************************************************************************************\
|
||||
* PCA *
|
||||
\****************************************************************************************/
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
PCA::PCA() {}
|
||||
|
||||
PCA::PCA(InputArray data, InputArray _mean, int flags, int maxComponents)
|
||||
{
|
||||
operator()(data, _mean, flags, maxComponents);
|
||||
}
|
||||
|
||||
PCA::PCA(InputArray data, InputArray _mean, int flags, double retainedVariance)
|
||||
{
|
||||
operator()(data, _mean, flags, retainedVariance);
|
||||
}
|
||||
|
||||
PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, int maxComponents)
|
||||
{
|
||||
Mat data = _data.getMat(), _mean = __mean.getMat();
|
||||
int covar_flags = CV_COVAR_SCALE;
|
||||
int i, len, in_count;
|
||||
Size mean_sz;
|
||||
|
||||
CV_Assert( data.channels() == 1 );
|
||||
if( flags & CV_PCA_DATA_AS_COL )
|
||||
{
|
||||
len = data.rows;
|
||||
in_count = data.cols;
|
||||
covar_flags |= CV_COVAR_COLS;
|
||||
mean_sz = Size(1, len);
|
||||
}
|
||||
else
|
||||
{
|
||||
len = data.cols;
|
||||
in_count = data.rows;
|
||||
covar_flags |= CV_COVAR_ROWS;
|
||||
mean_sz = Size(len, 1);
|
||||
}
|
||||
|
||||
int count = std::min(len, in_count), out_count = count;
|
||||
if( maxComponents > 0 )
|
||||
out_count = std::min(count, maxComponents);
|
||||
|
||||
// "scrambled" way to compute PCA (when cols(A)>rows(A)):
|
||||
// B = A'A; B*x=b*x; C = AA'; C*y=c*y -> AA'*y=c*y -> A'A*(A'*y)=c*(A'*y) -> c = b, x=A'*y
|
||||
if( len <= in_count )
|
||||
covar_flags |= CV_COVAR_NORMAL;
|
||||
|
||||
int ctype = std::max(CV_32F, data.depth());
|
||||
mean.create( mean_sz, ctype );
|
||||
|
||||
Mat covar( count, count, ctype );
|
||||
|
||||
if( !_mean.empty() )
|
||||
{
|
||||
CV_Assert( _mean.size() == mean_sz );
|
||||
_mean.convertTo(mean, ctype);
|
||||
covar_flags |= CV_COVAR_USE_AVG;
|
||||
}
|
||||
|
||||
calcCovarMatrix( data, covar, mean, covar_flags, ctype );
|
||||
eigen( covar, eigenvalues, eigenvectors );
|
||||
|
||||
if( !(covar_flags & CV_COVAR_NORMAL) )
|
||||
{
|
||||
// CV_PCA_DATA_AS_ROW: cols(A)>rows(A). x=A'*y -> x'=y'*A
|
||||
// CV_PCA_DATA_AS_COL: rows(A)>cols(A). x=A''*y -> x'=y'*A'
|
||||
Mat tmp_data, tmp_mean = repeat(mean, data.rows/mean.rows, data.cols/mean.cols);
|
||||
if( data.type() != ctype || tmp_mean.data == mean.data )
|
||||
{
|
||||
data.convertTo( tmp_data, ctype );
|
||||
subtract( tmp_data, tmp_mean, tmp_data );
|
||||
}
|
||||
else
|
||||
{
|
||||
subtract( data, tmp_mean, tmp_mean );
|
||||
tmp_data = tmp_mean;
|
||||
}
|
||||
|
||||
Mat evects1(count, len, ctype);
|
||||
gemm( eigenvectors, tmp_data, 1, Mat(), 0, evects1,
|
||||
(flags & CV_PCA_DATA_AS_COL) ? CV_GEMM_B_T : 0);
|
||||
eigenvectors = evects1;
|
||||
|
||||
// normalize eigenvectors
|
||||
for( i = 0; i < out_count; i++ )
|
||||
{
|
||||
Mat vec = eigenvectors.row(i);
|
||||
normalize(vec, vec);
|
||||
}
|
||||
}
|
||||
|
||||
if( count > out_count )
|
||||
{
|
||||
// use clone() to physically copy the data and thus deallocate the original matrices
|
||||
eigenvalues = eigenvalues.rowRange(0,out_count).clone();
|
||||
eigenvectors = eigenvectors.rowRange(0,out_count).clone();
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
void PCA::write(FileStorage& fs ) const
|
||||
{
|
||||
CV_Assert( fs.isOpened() );
|
||||
|
||||
fs << "name" << "PCA";
|
||||
fs << "vectors" << eigenvectors;
|
||||
fs << "values" << eigenvalues;
|
||||
fs << "mean" << mean;
|
||||
}
|
||||
|
||||
void PCA::read(const FileNode& fs)
|
||||
{
|
||||
CV_Assert( !fs.empty() );
|
||||
String name = (String)fs["name"];
|
||||
CV_Assert( name == "PCA" );
|
||||
|
||||
cv::read(fs["vectors"], eigenvectors);
|
||||
cv::read(fs["values"], eigenvalues);
|
||||
cv::read(fs["mean"], mean);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
int computeCumulativeEnergy(const Mat& eigenvalues, double retainedVariance)
|
||||
{
|
||||
CV_DbgAssert( eigenvalues.type() == DataType<T>::type );
|
||||
|
||||
Mat g(eigenvalues.size(), DataType<T>::type);
|
||||
|
||||
for(int ig = 0; ig < g.rows; ig++)
|
||||
{
|
||||
g.at<T>(ig, 0) = 0;
|
||||
for(int im = 0; im <= ig; im++)
|
||||
{
|
||||
g.at<T>(ig,0) += eigenvalues.at<T>(im,0);
|
||||
}
|
||||
}
|
||||
|
||||
int L;
|
||||
|
||||
for(L = 0; L < eigenvalues.rows; L++)
|
||||
{
|
||||
double energy = g.at<T>(L, 0) / g.at<T>(g.rows - 1, 0);
|
||||
if(energy > retainedVariance)
|
||||
break;
|
||||
}
|
||||
|
||||
L = std::max(2, L);
|
||||
|
||||
return L;
|
||||
}
|
||||
|
||||
PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, double retainedVariance)
|
||||
{
|
||||
Mat data = _data.getMat(), _mean = __mean.getMat();
|
||||
int covar_flags = CV_COVAR_SCALE;
|
||||
int i, len, in_count;
|
||||
Size mean_sz;
|
||||
|
||||
CV_Assert( data.channels() == 1 );
|
||||
if( flags & CV_PCA_DATA_AS_COL )
|
||||
{
|
||||
len = data.rows;
|
||||
in_count = data.cols;
|
||||
covar_flags |= CV_COVAR_COLS;
|
||||
mean_sz = Size(1, len);
|
||||
}
|
||||
else
|
||||
{
|
||||
len = data.cols;
|
||||
in_count = data.rows;
|
||||
covar_flags |= CV_COVAR_ROWS;
|
||||
mean_sz = Size(len, 1);
|
||||
}
|
||||
|
||||
CV_Assert( retainedVariance > 0 && retainedVariance <= 1 );
|
||||
|
||||
int count = std::min(len, in_count);
|
||||
|
||||
// "scrambled" way to compute PCA (when cols(A)>rows(A)):
|
||||
// B = A'A; B*x=b*x; C = AA'; C*y=c*y -> AA'*y=c*y -> A'A*(A'*y)=c*(A'*y) -> c = b, x=A'*y
|
||||
if( len <= in_count )
|
||||
covar_flags |= CV_COVAR_NORMAL;
|
||||
|
||||
int ctype = std::max(CV_32F, data.depth());
|
||||
mean.create( mean_sz, ctype );
|
||||
|
||||
Mat covar( count, count, ctype );
|
||||
|
||||
if( !_mean.empty() )
|
||||
{
|
||||
CV_Assert( _mean.size() == mean_sz );
|
||||
_mean.convertTo(mean, ctype);
|
||||
}
|
||||
|
||||
calcCovarMatrix( data, covar, mean, covar_flags, ctype );
|
||||
eigen( covar, eigenvalues, eigenvectors );
|
||||
|
||||
if( !(covar_flags & CV_COVAR_NORMAL) )
|
||||
{
|
||||
// CV_PCA_DATA_AS_ROW: cols(A)>rows(A). x=A'*y -> x'=y'*A
|
||||
// CV_PCA_DATA_AS_COL: rows(A)>cols(A). x=A''*y -> x'=y'*A'
|
||||
Mat tmp_data, tmp_mean = repeat(mean, data.rows/mean.rows, data.cols/mean.cols);
|
||||
if( data.type() != ctype || tmp_mean.data == mean.data )
|
||||
{
|
||||
data.convertTo( tmp_data, ctype );
|
||||
subtract( tmp_data, tmp_mean, tmp_data );
|
||||
}
|
||||
else
|
||||
{
|
||||
subtract( data, tmp_mean, tmp_mean );
|
||||
tmp_data = tmp_mean;
|
||||
}
|
||||
|
||||
Mat evects1(count, len, ctype);
|
||||
gemm( eigenvectors, tmp_data, 1, Mat(), 0, evects1,
|
||||
(flags & CV_PCA_DATA_AS_COL) ? CV_GEMM_B_T : 0);
|
||||
eigenvectors = evects1;
|
||||
|
||||
// normalize all eigenvectors
|
||||
for( i = 0; i < eigenvectors.rows; i++ )
|
||||
{
|
||||
Mat vec = eigenvectors.row(i);
|
||||
normalize(vec, vec);
|
||||
}
|
||||
}
|
||||
|
||||
// compute the cumulative energy content for each eigenvector
|
||||
int L;
|
||||
if (ctype == CV_32F)
|
||||
L = computeCumulativeEnergy<float>(eigenvalues, retainedVariance);
|
||||
else
|
||||
L = computeCumulativeEnergy<double>(eigenvalues, retainedVariance);
|
||||
|
||||
// use clone() to physically copy the data and thus deallocate the original matrices
|
||||
eigenvalues = eigenvalues.rowRange(0,L).clone();
|
||||
eigenvectors = eigenvectors.rowRange(0,L).clone();
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
void PCA::project(InputArray _data, OutputArray result) const
|
||||
{
|
||||
Mat data = _data.getMat();
|
||||
CV_Assert( !mean.empty() && !eigenvectors.empty() &&
|
||||
((mean.rows == 1 && mean.cols == data.cols) || (mean.cols == 1 && mean.rows == data.rows)));
|
||||
Mat tmp_data, tmp_mean = repeat(mean, data.rows/mean.rows, data.cols/mean.cols);
|
||||
int ctype = mean.type();
|
||||
if( data.type() != ctype || tmp_mean.data == mean.data )
|
||||
{
|
||||
data.convertTo( tmp_data, ctype );
|
||||
subtract( tmp_data, tmp_mean, tmp_data );
|
||||
}
|
||||
else
|
||||
{
|
||||
subtract( data, tmp_mean, tmp_mean );
|
||||
tmp_data = tmp_mean;
|
||||
}
|
||||
if( mean.rows == 1 )
|
||||
gemm( tmp_data, eigenvectors, 1, Mat(), 0, result, GEMM_2_T );
|
||||
else
|
||||
gemm( eigenvectors, tmp_data, 1, Mat(), 0, result, 0 );
|
||||
}
|
||||
|
||||
Mat PCA::project(InputArray data) const
|
||||
{
|
||||
Mat result;
|
||||
project(data, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void PCA::backProject(InputArray _data, OutputArray result) const
|
||||
{
|
||||
Mat data = _data.getMat();
|
||||
CV_Assert( !mean.empty() && !eigenvectors.empty() &&
|
||||
((mean.rows == 1 && eigenvectors.rows == data.cols) ||
|
||||
(mean.cols == 1 && eigenvectors.rows == data.rows)));
|
||||
|
||||
Mat tmp_data, tmp_mean;
|
||||
data.convertTo(tmp_data, mean.type());
|
||||
if( mean.rows == 1 )
|
||||
{
|
||||
tmp_mean = repeat(mean, data.rows, 1);
|
||||
gemm( tmp_data, eigenvectors, 1, tmp_mean, 1, result, 0 );
|
||||
}
|
||||
else
|
||||
{
|
||||
tmp_mean = repeat(mean, 1, data.cols);
|
||||
gemm( eigenvectors, tmp_data, 1, tmp_mean, 1, result, GEMM_1_T );
|
||||
}
|
||||
}
|
||||
|
||||
Mat PCA::backProject(InputArray data) const
|
||||
{
|
||||
Mat result;
|
||||
backProject(data, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void cv::PCACompute(InputArray data, InputOutputArray mean,
|
||||
OutputArray eigenvectors, int maxComponents)
|
||||
{
|
||||
PCA pca;
|
||||
pca(data, mean, 0, maxComponents);
|
||||
pca.mean.copyTo(mean);
|
||||
pca.eigenvectors.copyTo(eigenvectors);
|
||||
}
|
||||
|
||||
void cv::PCACompute(InputArray data, InputOutputArray mean,
|
||||
OutputArray eigenvectors, double retainedVariance)
|
||||
{
|
||||
PCA pca;
|
||||
pca(data, mean, 0, retainedVariance);
|
||||
pca.mean.copyTo(mean);
|
||||
pca.eigenvectors.copyTo(eigenvectors);
|
||||
}
|
||||
|
||||
void cv::PCAProject(InputArray data, InputArray mean,
|
||||
InputArray eigenvectors, OutputArray result)
|
||||
{
|
||||
PCA pca;
|
||||
pca.mean = mean.getMat();
|
||||
pca.eigenvectors = eigenvectors.getMat();
|
||||
pca.project(data, result);
|
||||
}
|
||||
|
||||
void cv::PCABackProject(InputArray data, InputArray mean,
|
||||
InputArray eigenvectors, OutputArray result)
|
||||
{
|
||||
PCA pca;
|
||||
pca.mean = mean.getMat();
|
||||
pca.eigenvectors = eigenvectors.getMat();
|
||||
pca.backProject(data, result);
|
||||
}
|
@ -99,8 +99,6 @@ extern const float g_8x32fTab[];
|
||||
extern const ushort g_8x16uSqrTab[];
|
||||
#define CV_SQR_8U(x) cv::g_8x16uSqrTab[(x)+255]
|
||||
|
||||
extern const char* g_HersheyGlyphs[];
|
||||
|
||||
extern const uchar g_Saturate8u[];
|
||||
#define CV_FAST_CAST_8U(t) (assert(-256 <= (t) && (t) <= 512), cv::g_Saturate8u[(t)+256])
|
||||
#define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b)))
|
||||
|
@ -511,8 +511,8 @@ void RNG::fill( InputOutputArray _mat, int disttype,
|
||||
{
|
||||
_parambuf.allocate(cn*8 + n1 + n2);
|
||||
double* parambuf = _parambuf;
|
||||
double* p1 = (double*)_param1.data;
|
||||
double* p2 = (double*)_param2.data;
|
||||
double* p1 = _param1.ptr<double>();
|
||||
double* p2 = _param2.ptr<double>();
|
||||
|
||||
if( !_param1.isContinuous() || _param1.type() != CV_64F || n1 != cn )
|
||||
{
|
||||
@ -625,7 +625,7 @@ void RNG::fill( InputOutputArray _mat, int disttype,
|
||||
int esz = (int)CV_ELEM_SIZE(ptype);
|
||||
|
||||
if( _param1.isContinuous() && _param1.type() == ptype )
|
||||
mean = _param1.data;
|
||||
mean = _param1.ptr();
|
||||
else
|
||||
{
|
||||
Mat tmp(_param1.size(), ptype, parambuf);
|
||||
@ -638,7 +638,7 @@ void RNG::fill( InputOutputArray _mat, int disttype,
|
||||
mean[j] = mean[j - n1*esz];
|
||||
|
||||
if( _param2.isContinuous() && _param2.type() == ptype )
|
||||
stddev = _param2.data;
|
||||
stddev = _param2.ptr();
|
||||
else
|
||||
{
|
||||
Mat tmp(_param2.size(), ptype, parambuf + cn);
|
||||
@ -753,7 +753,7 @@ randShuffle_( Mat& _arr, RNG& rng, double iterFactor )
|
||||
int sz = _arr.rows*_arr.cols, iters = cvRound(iterFactor*sz);
|
||||
if( _arr.isContinuous() )
|
||||
{
|
||||
T* arr = (T*)_arr.data;
|
||||
T* arr = _arr.ptr<T>();
|
||||
for( int i = 0; i < iters; i++ )
|
||||
{
|
||||
int j = (unsigned)rng % sz, k = (unsigned)rng % sz;
|
||||
@ -762,7 +762,7 @@ randShuffle_( Mat& _arr, RNG& rng, double iterFactor )
|
||||
}
|
||||
else
|
||||
{
|
||||
uchar* data = _arr.data;
|
||||
uchar* data = _arr.ptr();
|
||||
size_t step = _arr.step;
|
||||
int cols = _arr.cols;
|
||||
for( int i = 0; i < iters; i++ )
|
||||
|
@ -605,8 +605,8 @@ cv::Scalar cv::sum( InputArray _src )
|
||||
if( ippFuncHint || ippFuncNoHint )
|
||||
{
|
||||
Ipp64f res[4];
|
||||
IppStatus ret = ippFuncHint ? ippFuncHint(src.data, (int)src.step[0], sz, res, ippAlgHintAccurate) :
|
||||
ippFuncNoHint(src.data, (int)src.step[0], sz, res);
|
||||
IppStatus ret = ippFuncHint ? ippFuncHint(src.ptr(), (int)src.step[0], sz, res, ippAlgHintAccurate) :
|
||||
ippFuncNoHint(src.ptr(), (int)src.step[0], sz, res);
|
||||
if( ret >= 0 )
|
||||
{
|
||||
Scalar sc;
|
||||
@ -791,7 +791,7 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask )
|
||||
if( ippFuncC1 )
|
||||
{
|
||||
Ipp64f res;
|
||||
if( ippFuncC1(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, &res) >= 0 )
|
||||
if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, &res) >= 0 )
|
||||
return Scalar(res);
|
||||
setIppErrorStatus();
|
||||
}
|
||||
@ -804,9 +804,9 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask )
|
||||
if( ippFuncC3 )
|
||||
{
|
||||
Ipp64f res1, res2, res3;
|
||||
if( ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 1, &res1) >= 0 &&
|
||||
ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 2, &res2) >= 0 &&
|
||||
ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 3, &res3) >= 0 )
|
||||
if( ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 1, &res1) >= 0 &&
|
||||
ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 2, &res2) >= 0 &&
|
||||
ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 3, &res3) >= 0 )
|
||||
{
|
||||
return Scalar(res1, res2, res3);
|
||||
}
|
||||
@ -838,8 +838,8 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask )
|
||||
if( ippFuncHint || ippFuncNoHint )
|
||||
{
|
||||
Ipp64f res[4];
|
||||
IppStatus ret = ippFuncHint ? ippFuncHint(src.data, (int)src.step[0], sz, res, ippAlgHintAccurate) :
|
||||
ippFuncNoHint(src.data, (int)src.step[0], sz, res);
|
||||
IppStatus ret = ippFuncHint ? ippFuncHint(src.ptr(), (int)src.step[0], sz, res, ippAlgHintAccurate) :
|
||||
ippFuncNoHint(src.ptr(), (int)src.step[0], sz, res);
|
||||
if( ret >= 0 )
|
||||
{
|
||||
Scalar sc;
|
||||
@ -981,11 +981,11 @@ static bool ocl_meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv
|
||||
part_sum funcs[3] = { ocl_part_sum<int>, ocl_part_sum<float>, ocl_part_sum<double> };
|
||||
Mat dbm = db.getMat(ACCESS_READ);
|
||||
|
||||
mean = funcs[ddepth - CV_32S](Mat(1, groups, dtype, dbm.data));
|
||||
stddev = funcs[sqddepth - CV_32S](Mat(1, groups, sqdtype, dbm.data + groups * CV_ELEM_SIZE(dtype)));
|
||||
mean = funcs[ddepth - CV_32S](Mat(1, groups, dtype, dbm.ptr()));
|
||||
stddev = funcs[sqddepth - CV_32S](Mat(1, groups, sqdtype, dbm.ptr() + groups * CV_ELEM_SIZE(dtype)));
|
||||
|
||||
if (haveMask)
|
||||
nz = saturate_cast<int>(funcs[0](Mat(1, groups, CV_32SC1, dbm.data +
|
||||
nz = saturate_cast<int>(funcs[0](Mat(1, groups, CV_32SC1, dbm.ptr() +
|
||||
groups * (CV_ELEM_SIZE(dtype) +
|
||||
CV_ELEM_SIZE(sqdtype))))[0]);
|
||||
}
|
||||
@ -1052,7 +1052,7 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
|
||||
_mean.create(cn, 1, CV_64F, -1, true);
|
||||
mean = _mean.getMat();
|
||||
dcn_mean = (int)mean.total();
|
||||
pmean = (Ipp64f *)mean.data;
|
||||
pmean = mean.ptr<Ipp64f>();
|
||||
}
|
||||
int dcn_stddev = -1;
|
||||
if( _sdv.needed() )
|
||||
@ -1061,7 +1061,7 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
|
||||
_sdv.create(cn, 1, CV_64F, -1, true);
|
||||
stddev = _sdv.getMat();
|
||||
dcn_stddev = (int)stddev.total();
|
||||
pstddev = (Ipp64f *)stddev.data;
|
||||
pstddev = stddev.ptr<Ipp64f>();
|
||||
}
|
||||
for( int c = cn; c < dcn_mean; c++ )
|
||||
pmean[c] = 0;
|
||||
@ -1079,7 +1079,7 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
|
||||
0;
|
||||
if( ippFuncC1 )
|
||||
{
|
||||
if( ippFuncC1(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, pmean, pstddev) >= 0 )
|
||||
if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, pmean, pstddev) >= 0 )
|
||||
return;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
@ -1091,9 +1091,9 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
|
||||
0;
|
||||
if( ippFuncC3 )
|
||||
{
|
||||
if( ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 &&
|
||||
ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 &&
|
||||
ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 )
|
||||
if( ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 &&
|
||||
ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 &&
|
||||
ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 )
|
||||
return;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
@ -1110,7 +1110,7 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
|
||||
0;
|
||||
if( ippFuncC1 )
|
||||
{
|
||||
if( ippFuncC1(src.data, (int)src.step[0], sz, pmean, pstddev) >= 0 )
|
||||
if( ippFuncC1(src.ptr(), (int)src.step[0], sz, pmean, pstddev) >= 0 )
|
||||
return;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
@ -1122,9 +1122,9 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
|
||||
0;
|
||||
if( ippFuncC3 )
|
||||
{
|
||||
if( ippFuncC3(src.data, (int)src.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 &&
|
||||
ippFuncC3(src.data, (int)src.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 &&
|
||||
ippFuncC3(src.data, (int)src.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 )
|
||||
if( ippFuncC3(src.ptr(), (int)src.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 &&
|
||||
ippFuncC3(src.ptr(), (int)src.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 &&
|
||||
ippFuncC3(src.ptr(), (int)src.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 )
|
||||
return;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
@ -1358,26 +1358,26 @@ void getMinMaxRes(const Mat & db, double * minVal, double * maxVal,
|
||||
const uint * minlocptr = NULL, * maxlocptr = NULL;
|
||||
if (minVal || minLoc)
|
||||
{
|
||||
minptr = (const T *)db.data;
|
||||
minptr = db.ptr<T>();
|
||||
index += sizeof(T) * groupnum;
|
||||
}
|
||||
if (maxVal || maxLoc)
|
||||
{
|
||||
maxptr = (const T *)(db.data + index);
|
||||
maxptr = (const T *)(db.ptr() + index);
|
||||
index += sizeof(T) * groupnum;
|
||||
}
|
||||
if (minLoc)
|
||||
{
|
||||
minlocptr = (uint *)(db.data + index);
|
||||
minlocptr = (const uint *)(db.ptr() + index);
|
||||
index += sizeof(uint) * groupnum;
|
||||
}
|
||||
if (maxLoc)
|
||||
{
|
||||
maxlocptr = (uint *)(db.data + index);
|
||||
maxlocptr = (const uint *)(db.ptr() + index);
|
||||
index += sizeof(uint) * groupnum;
|
||||
}
|
||||
if (maxVal2)
|
||||
maxptr2 = (const T *)(db.data + index);
|
||||
maxptr2 = (const T *)(db.ptr() + index);
|
||||
|
||||
for (int i = 0; i < groupnum; i++)
|
||||
{
|
||||
@ -1444,7 +1444,10 @@ static bool ocl_minMaxIdx( InputArray _src, double* minVal, double* maxVal, int*
|
||||
bool doubleSupport = dev.doubleFPConfig() > 0, haveMask = !_mask.empty(),
|
||||
haveSrc2 = _src2.kind() != _InputArray::NONE;
|
||||
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
|
||||
kercn = haveMask ? cn : std::min(4, ocl::predictOptimalVectorWidth(_src));
|
||||
kercn = haveMask ? cn : std::min(4, ocl::predictOptimalVectorWidth(_src, _src2));
|
||||
|
||||
if (haveMask && dev.isAMD())
|
||||
return false;
|
||||
|
||||
CV_Assert( (cn == 1 && (!haveMask || _mask.type() == CV_8U)) ||
|
||||
(cn >= 1 && !minLoc && !maxLoc) );
|
||||
@ -1536,7 +1539,7 @@ static bool ocl_minMaxIdx( InputArray _src, double* minVal, double* maxVal, int*
|
||||
}
|
||||
|
||||
size_t globalsize = groupnum * wgs;
|
||||
if (!k.run(1, &globalsize, &wgs, false))
|
||||
if (!k.run(1, &globalsize, &wgs, true))
|
||||
return false;
|
||||
|
||||
static const getMinMaxResFunc functab[7] =
|
||||
@ -1602,13 +1605,13 @@ void cv::minMaxIdx(InputArray _src, double* minVal,
|
||||
{
|
||||
Ipp32f min, max;
|
||||
IppiPoint minp, maxp;
|
||||
if( ippFuncC1(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, &min, &max, &minp, &maxp) >= 0 )
|
||||
if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, &min, &max, &minp, &maxp) >= 0 )
|
||||
{
|
||||
if( minVal )
|
||||
*minVal = (double)min;
|
||||
if( maxVal )
|
||||
*maxVal = (double)max;
|
||||
if( !minp.x && !minp.y && !maxp.x && !maxp.y && !mask.data[0] )
|
||||
if( !minp.x && !minp.y && !maxp.x && !maxp.y && !mask.ptr()[0] )
|
||||
minp.x = maxp.x = -1;
|
||||
if( minIdx )
|
||||
{
|
||||
@ -1641,7 +1644,7 @@ void cv::minMaxIdx(InputArray _src, double* minVal,
|
||||
{
|
||||
Ipp32f min, max;
|
||||
IppiPoint minp, maxp;
|
||||
if( ippFuncC1(src.data, (int)src.step[0], sz, &min, &max, &minp, &maxp) >= 0 )
|
||||
if( ippFuncC1(src.ptr(), (int)src.step[0], sz, &min, &max, &minp, &maxp) >= 0 )
|
||||
{
|
||||
if( minVal )
|
||||
*minVal = (double)min;
|
||||
@ -2190,9 +2193,6 @@ static bool ocl_norm( InputArray _src, int normType, InputArray _mask, double &
|
||||
(!doubleSupport && depth == CV_64F))
|
||||
return false;
|
||||
|
||||
if( depth == CV_32F && (!_mask.empty() || normType == NORM_INF) )
|
||||
return false;
|
||||
|
||||
UMat src = _src.getUMat();
|
||||
|
||||
if (normType == NORM_INF)
|
||||
@ -2280,7 +2280,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
|
||||
if( ippFuncC1 )
|
||||
{
|
||||
Ipp64f norm;
|
||||
if( ippFuncC1(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, &norm) >= 0 )
|
||||
if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, &norm) >= 0 )
|
||||
return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm;
|
||||
|
||||
setIppErrorStatus();
|
||||
@ -2381,8 +2381,8 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
|
||||
if( ippFuncHint || ippFuncNoHint )
|
||||
{
|
||||
Ipp64f norm_array[4];
|
||||
IppStatus ret = ippFuncHint ? ippFuncHint(src.data, (int)src.step[0], sz, norm_array, ippAlgHintAccurate) :
|
||||
ippFuncNoHint(src.data, (int)src.step[0], sz, norm_array);
|
||||
IppStatus ret = ippFuncHint ? ippFuncHint(src.ptr(), (int)src.step[0], sz, norm_array, ippAlgHintAccurate) :
|
||||
ippFuncNoHint(src.ptr(), (int)src.step[0], sz, norm_array);
|
||||
if( ret >= 0 )
|
||||
{
|
||||
Ipp64f norm = (normType == NORM_L2 || normType == NORM_L2SQR) ? norm_array[0] * norm_array[0] : norm_array[0];
|
||||
@ -2548,9 +2548,6 @@ static bool ocl_norm( InputArray _src1, InputArray _src2, int normType, InputArr
|
||||
normType &= ~NORM_RELATIVE;
|
||||
bool normsum = normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR;
|
||||
|
||||
if ( !normsum || !_mask.empty() )
|
||||
return false;
|
||||
|
||||
if (normsum)
|
||||
{
|
||||
if (!ocl_sum(_src1, sc1, normType == NORM_L2 || normType == NORM_L2SQR ?
|
||||
@ -2643,7 +2640,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
|
||||
if( ippFuncC1 )
|
||||
{
|
||||
Ipp64f norm;
|
||||
if( ippFuncC1(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], mask.data, (int)mask.step[0], sz, &norm) >= 0 )
|
||||
if( ippFuncC1(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], mask.ptr(), (int)mask.step[0], sz, &norm) >= 0 )
|
||||
return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
@ -2679,14 +2676,14 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
|
||||
if (ippFuncNoHint)
|
||||
{
|
||||
Ipp64f norm;
|
||||
if( ippFuncNoHint(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], sz, &norm) >= 0 )
|
||||
if( ippFuncNoHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, &norm) >= 0 )
|
||||
return (double)norm;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
if (ippFuncHint)
|
||||
{
|
||||
Ipp64f norm;
|
||||
if( ippFuncHint(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], sz, &norm, ippAlgHintAccurate) >= 0 )
|
||||
if( ippFuncHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, &norm, ippAlgHintAccurate) >= 0 )
|
||||
return (double)norm;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
@ -2739,7 +2736,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
|
||||
if( ippFuncC1 )
|
||||
{
|
||||
Ipp64f norm;
|
||||
if( ippFuncC1(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], mask.data, (int)mask.step[0], sz, &norm) >= 0 )
|
||||
if( ippFuncC1(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], mask.ptr(), (int)mask.step[0], sz, &norm) >= 0 )
|
||||
return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm;
|
||||
setIppErrorStatus();
|
||||
}
|
||||
@ -2839,8 +2836,8 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
|
||||
if( ippFuncHint || ippFuncNoHint )
|
||||
{
|
||||
Ipp64f norm_array[4];
|
||||
IppStatus ret = ippFuncHint ? ippFuncHint(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], sz, norm_array, ippAlgHintAccurate) :
|
||||
ippFuncNoHint(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], sz, norm_array);
|
||||
IppStatus ret = ippFuncHint ? ippFuncHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, norm_array, ippAlgHintAccurate) :
|
||||
ippFuncNoHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, norm_array);
|
||||
if( ret >= 0 )
|
||||
{
|
||||
Ipp64f norm = (normType == NORM_L2 || normType == NORM_L2SQR) ? norm_array[0] * norm_array[0] : norm_array[0];
|
||||
@ -3322,7 +3319,7 @@ void cv::findNonZero( InputArray _src, OutputArray _idx )
|
||||
_idx.create(n, 1, CV_32SC2);
|
||||
Mat idx = _idx.getMat();
|
||||
CV_Assert(idx.isContinuous());
|
||||
Point* idx_ptr = (Point*)idx.data;
|
||||
Point* idx_ptr = idx.ptr<Point>();
|
||||
|
||||
for( int i = 0; i < src.rows; i++ )
|
||||
{
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -658,7 +658,7 @@ void UMat::copyTo(OutputArray _dst) const
|
||||
}
|
||||
|
||||
Mat dst = _dst.getMat();
|
||||
u->currAllocator->download(u, dst.data, dims, sz, srcofs, step.p, dst.step.p);
|
||||
u->currAllocator->download(u, dst.ptr(), dims, sz, srcofs, step.p, dst.step.p);
|
||||
}
|
||||
|
||||
void UMat::copyTo(OutputArray _dst, InputArray _mask) const
|
||||
|
@ -1303,7 +1303,7 @@ OCL_TEST_P(Norm, NORM_INF_2args)
|
||||
OCL_OFF(const double cpuRes = cv::norm(src1_roi, src2_roi, type));
|
||||
OCL_ON(const double gpuRes = cv::norm(usrc1_roi, usrc2_roi, type));
|
||||
|
||||
EXPECT_NEAR(cpuRes, gpuRes, 0.2);
|
||||
EXPECT_PRED3(relativeError, cpuRes, gpuRes, 1e-6);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1323,7 +1323,7 @@ OCL_TEST_P(Norm, NORM_INF_2args_mask)
|
||||
OCL_OFF(const double cpuRes = cv::norm(src1_roi, src2_roi, type, mask_roi));
|
||||
OCL_ON(const double gpuRes = cv::norm(usrc1_roi, usrc2_roi, type, umask_roi));
|
||||
|
||||
EXPECT_NEAR(cpuRes, gpuRes, 0.1);
|
||||
EXPECT_PRED3(relativeError, cpuRes, gpuRes, 1e-6);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1547,7 +1547,49 @@ OCL_TEST_P(InRange, Scalar)
|
||||
|
||||
//////////////////////////////// ConvertScaleAbs ////////////////////////////////////////////////
|
||||
|
||||
typedef ArithmTestBase ConvertScaleAbs;
|
||||
PARAM_TEST_CASE(ConvertScaleAbs, MatDepth, Channels, bool)
|
||||
{
|
||||
int depth;
|
||||
int cn;
|
||||
bool use_roi;
|
||||
cv::Scalar val;
|
||||
|
||||
TEST_DECLARE_INPUT_PARAMETER(src);
|
||||
TEST_DECLARE_OUTPUT_PARAMETER(dst);
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
depth = GET_PARAM(0);
|
||||
cn = GET_PARAM(1);
|
||||
use_roi = GET_PARAM(2);
|
||||
}
|
||||
|
||||
virtual void generateTestData()
|
||||
{
|
||||
const int stype = CV_MAKE_TYPE(depth, cn);
|
||||
const int dtype = CV_MAKE_TYPE(CV_8U, cn);
|
||||
|
||||
Size roiSize = randomSize(1, MAX_VALUE);
|
||||
Border srcBorder = randomBorder(0, use_roi ? MAX_VALUE : 0);
|
||||
randomSubMat(src, src_roi, roiSize, srcBorder, stype, 2, 11); // FIXIT: Test with minV, maxV
|
||||
|
||||
Border dstBorder = randomBorder(0, use_roi ? MAX_VALUE : 0);
|
||||
randomSubMat(dst, dst_roi, roiSize, dstBorder, dtype, 5, 16);
|
||||
|
||||
val = cv::Scalar(rng.uniform(-100.0, 100.0), rng.uniform(-100.0, 100.0),
|
||||
rng.uniform(-100.0, 100.0), rng.uniform(-100.0, 100.0));
|
||||
|
||||
UMAT_UPLOAD_INPUT_PARAMETER(src);
|
||||
UMAT_UPLOAD_OUTPUT_PARAMETER(dst);
|
||||
}
|
||||
|
||||
void Near(double threshold = 0.)
|
||||
{
|
||||
OCL_EXPECT_MATS_NEAR(dst, threshold);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
OCL_TEST_P(ConvertScaleAbs, Mat)
|
||||
{
|
||||
@ -1555,10 +1597,10 @@ OCL_TEST_P(ConvertScaleAbs, Mat)
|
||||
{
|
||||
generateTestData();
|
||||
|
||||
OCL_OFF(cv::convertScaleAbs(src1_roi, dst1_roi, val[0], val[1]));
|
||||
OCL_ON(cv::convertScaleAbs(usrc1_roi, udst1_roi, val[0], val[1]));
|
||||
OCL_OFF(cv::convertScaleAbs(src_roi, dst_roi, val[0], val[1]));
|
||||
OCL_ON(cv::convertScaleAbs(usrc_roi, udst_roi, val[0], val[1]));
|
||||
|
||||
Near(depth <= CV_32S ? 1 : 1e-6);
|
||||
Near(1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1704,7 +1746,7 @@ OCL_TEST_P(ReduceSum, Mat)
|
||||
OCL_OFF(cv::reduce(src_roi, dst_roi, dim, CV_REDUCE_SUM, dtype));
|
||||
OCL_ON(cv::reduce(usrc_roi, udst_roi, dim, CV_REDUCE_SUM, dtype));
|
||||
|
||||
double eps = ddepth <= CV_32S ? 1 : 1e-4;
|
||||
double eps = ddepth <= CV_32S ? 1 : 7e-4;
|
||||
OCL_EXPECT_MATS_NEAR(dst, eps);
|
||||
}
|
||||
}
|
||||
|
@ -90,14 +90,15 @@ PARAM_TEST_CASE(Gemm,
|
||||
|
||||
void generateTestData()
|
||||
{
|
||||
Size ARoiSize = randomSize(1, MAX_VALUE);
|
||||
// set minimum size to 20, since testing less sizes doesn't make sense
|
||||
Size ARoiSize = randomSize(20, MAX_VALUE);
|
||||
Border ABorder = randomBorder(0, use_roi ? MAX_VALUE : 0);
|
||||
randomSubMat(A, A_roi, ARoiSize, ABorder, type, -11, 11);
|
||||
|
||||
if (atrans)
|
||||
ARoiSize = Size(ARoiSize.height, ARoiSize.width);
|
||||
|
||||
Size BRoiSize = randomSize(1, MAX_VALUE);
|
||||
Size BRoiSize = randomSize(20, MAX_VALUE);
|
||||
if (btrans)
|
||||
BRoiSize.width = ARoiSize.width;
|
||||
else
|
||||
|
@ -562,10 +562,10 @@ static void inRange(const Mat& src, const Mat& lb, const Mat& rb, Mat& dst)
|
||||
|
||||
for( i = 0; i < nplanes; i++, ++it )
|
||||
{
|
||||
const uchar* sptr = planes[0].data;
|
||||
const uchar* aptr = planes[1].data;
|
||||
const uchar* bptr = planes[2].data;
|
||||
uchar* dptr = planes[3].data;
|
||||
const uchar* sptr = planes[0].ptr();
|
||||
const uchar* aptr = planes[1].ptr();
|
||||
const uchar* bptr = planes[2].ptr();
|
||||
uchar* dptr = planes[3].ptr();
|
||||
|
||||
switch( depth )
|
||||
{
|
||||
@ -614,8 +614,8 @@ static void inRangeS(const Mat& src, const Scalar& lb, const Scalar& rb, Mat& ds
|
||||
|
||||
for( i = 0; i < nplanes; i++, ++it )
|
||||
{
|
||||
const uchar* sptr = planes[0].data;
|
||||
uchar* dptr = planes[1].data;
|
||||
const uchar* sptr = planes[0].ptr();
|
||||
uchar* dptr = planes[1].ptr();
|
||||
|
||||
switch( depth )
|
||||
{
|
||||
@ -905,8 +905,8 @@ static void exp(const Mat& src, Mat& dst)
|
||||
|
||||
for( i = 0; i < nplanes; i++, ++it )
|
||||
{
|
||||
const uchar* sptr = planes[0].data;
|
||||
uchar* dptr = planes[1].data;
|
||||
const uchar* sptr = planes[0].ptr();
|
||||
uchar* dptr = planes[1].ptr();
|
||||
|
||||
if( depth == CV_32F )
|
||||
{
|
||||
@ -934,8 +934,8 @@ static void log(const Mat& src, Mat& dst)
|
||||
|
||||
for( i = 0; i < nplanes; i++, ++it )
|
||||
{
|
||||
const uchar* sptr = planes[0].data;
|
||||
uchar* dptr = planes[1].data;
|
||||
const uchar* sptr = planes[0].ptr();
|
||||
uchar* dptr = planes[1].ptr();
|
||||
|
||||
if( depth == CV_32F )
|
||||
{
|
||||
@ -1027,10 +1027,10 @@ static void cartToPolar(const Mat& mx, const Mat& my, Mat& mmag, Mat& mangle, bo
|
||||
{
|
||||
if( depth == CV_32F )
|
||||
{
|
||||
const float* xptr = (const float*)planes[0].data;
|
||||
const float* yptr = (const float*)planes[1].data;
|
||||
float* mptr = (float*)planes[2].data;
|
||||
float* aptr = (float*)planes[3].data;
|
||||
const float* xptr = planes[0].ptr<float>();
|
||||
const float* yptr = planes[1].ptr<float>();
|
||||
float* mptr = planes[2].ptr<float>();
|
||||
float* aptr = planes[3].ptr<float>();
|
||||
|
||||
for( j = 0; j < total; j++ )
|
||||
{
|
||||
@ -1042,10 +1042,10 @@ static void cartToPolar(const Mat& mx, const Mat& my, Mat& mmag, Mat& mangle, bo
|
||||
}
|
||||
else
|
||||
{
|
||||
const double* xptr = (const double*)planes[0].data;
|
||||
const double* yptr = (const double*)planes[1].data;
|
||||
double* mptr = (double*)planes[2].data;
|
||||
double* aptr = (double*)planes[3].data;
|
||||
const double* xptr = planes[0].ptr<double>();
|
||||
const double* yptr = planes[1].ptr<double>();
|
||||
double* mptr = planes[2].ptr<double>();
|
||||
double* aptr = planes[3].ptr<double>();
|
||||
|
||||
for( j = 0; j < total; j++ )
|
||||
{
|
||||
|
@ -41,7 +41,7 @@
|
||||
#include "test_precomp.hpp"
|
||||
#include <cstdlib>
|
||||
|
||||
static void mytest(cv::Ptr<cv::optim::ConjGradSolver> solver,cv::Ptr<cv::optim::Solver::Function> ptr_F,cv::Mat& x,
|
||||
static void mytest(cv::Ptr<cv::ConjGradSolver> solver,cv::Ptr<cv::MinProblemSolver::Function> ptr_F,cv::Mat& x,
|
||||
cv::Mat& etalon_x,double etalon_res){
|
||||
solver->setFunction(ptr_F);
|
||||
//int ndim=MAX(step.cols,step.rows);
|
||||
@ -50,7 +50,7 @@ static void mytest(cv::Ptr<cv::optim::ConjGradSolver> solver,cv::Ptr<cv::optim::
|
||||
std::cout<<"x:\n\t"<<x<<std::endl;
|
||||
std::cout<<"etalon_res:\n\t"<<etalon_res<<std::endl;
|
||||
std::cout<<"etalon_x:\n\t"<<etalon_x<<std::endl;
|
||||
double tol=solver->getTermCriteria().epsilon;
|
||||
double tol = 1e-2;
|
||||
ASSERT_TRUE(std::abs(res-etalon_res)<tol);
|
||||
/*for(cv::Mat_<double>::iterator it1=x.begin<double>(),it2=etalon_x.begin<double>();it1!=x.end<double>();it1++,it2++){
|
||||
ASSERT_TRUE(std::abs((*it1)-(*it2))<tol);
|
||||
@ -58,7 +58,7 @@ static void mytest(cv::Ptr<cv::optim::ConjGradSolver> solver,cv::Ptr<cv::optim::
|
||||
std::cout<<"--------------------------\n";
|
||||
}
|
||||
|
||||
class SphereF:public cv::optim::Solver::Function{
|
||||
class SphereF:public cv::MinProblemSolver::Function{
|
||||
public:
|
||||
double calc(const double* x)const{
|
||||
return x[0]*x[0]+x[1]*x[1]+x[2]*x[2]+x[3]*x[3];
|
||||
@ -69,7 +69,7 @@ public:
|
||||
}
|
||||
}
|
||||
};
|
||||
class RosenbrockF:public cv::optim::Solver::Function{
|
||||
class RosenbrockF:public cv::MinProblemSolver::Function{
|
||||
double calc(const double* x)const{
|
||||
return 100*(x[1]-x[0]*x[0])*(x[1]-x[0]*x[0])+(1-x[0])*(1-x[0]);
|
||||
}
|
||||
@ -79,11 +79,11 @@ class RosenbrockF:public cv::optim::Solver::Function{
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Optim_ConjGrad, regression_basic){
|
||||
cv::Ptr<cv::optim::ConjGradSolver> solver=cv::optim::createConjGradSolver();
|
||||
TEST(DISABLED_Core_ConjGradSolver, regression_basic){
|
||||
cv::Ptr<cv::ConjGradSolver> solver=cv::ConjGradSolver::create();
|
||||
#if 1
|
||||
{
|
||||
cv::Ptr<cv::optim::Solver::Function> ptr_F(new SphereF());
|
||||
cv::Ptr<cv::MinProblemSolver::Function> ptr_F(new SphereF());
|
||||
cv::Mat x=(cv::Mat_<double>(4,1)<<50.0,10.0,1.0,-10.0),
|
||||
etalon_x=(cv::Mat_<double>(1,4)<<0.0,0.0,0.0,0.0);
|
||||
double etalon_res=0.0;
|
||||
@ -92,7 +92,7 @@ TEST(Optim_ConjGrad, regression_basic){
|
||||
#endif
|
||||
#if 1
|
||||
{
|
||||
cv::Ptr<cv::optim::Solver::Function> ptr_F(new RosenbrockF());
|
||||
cv::Ptr<cv::MinProblemSolver::Function> ptr_F(new RosenbrockF());
|
||||
cv::Mat x=(cv::Mat_<double>(2,1)<<0.0,0.0),
|
||||
etalon_x=(cv::Mat_<double>(2,1)<<1.0,1.0);
|
||||
double etalon_res=0.0;
|
@ -43,7 +43,7 @@
|
||||
#include <cmath>
|
||||
#include <algorithm>
|
||||
|
||||
static void mytest(cv::Ptr<cv::optim::DownhillSolver> solver,cv::Ptr<cv::optim::Solver::Function> ptr_F,cv::Mat& x,cv::Mat& step,
|
||||
static void mytest(cv::Ptr<cv::DownhillSolver> solver,cv::Ptr<cv::MinProblemSolver::Function> ptr_F,cv::Mat& x,cv::Mat& step,
|
||||
cv::Mat& etalon_x,double etalon_res){
|
||||
solver->setFunction(ptr_F);
|
||||
int ndim=MAX(step.cols,step.rows);
|
||||
@ -58,7 +58,7 @@ static void mytest(cv::Ptr<cv::optim::DownhillSolver> solver,cv::Ptr<cv::optim::
|
||||
std::cout<<"x:\n\t"<<x<<std::endl;
|
||||
std::cout<<"etalon_res:\n\t"<<etalon_res<<std::endl;
|
||||
std::cout<<"etalon_x:\n\t"<<etalon_x<<std::endl;
|
||||
double tol=solver->getTermCriteria().epsilon;
|
||||
double tol=1e-2;//solver->getTermCriteria().epsilon;
|
||||
ASSERT_TRUE(std::abs(res-etalon_res)<tol);
|
||||
/*for(cv::Mat_<double>::iterator it1=x.begin<double>(),it2=etalon_x.begin<double>();it1!=x.end<double>();it1++,it2++){
|
||||
ASSERT_TRUE(std::abs((*it1)-(*it2))<tol);
|
||||
@ -66,23 +66,23 @@ static void mytest(cv::Ptr<cv::optim::DownhillSolver> solver,cv::Ptr<cv::optim::
|
||||
std::cout<<"--------------------------\n";
|
||||
}
|
||||
|
||||
class SphereF:public cv::optim::Solver::Function{
|
||||
class SphereF:public cv::MinProblemSolver::Function{
|
||||
public:
|
||||
double calc(const double* x)const{
|
||||
return x[0]*x[0]+x[1]*x[1];
|
||||
}
|
||||
};
|
||||
class RosenbrockF:public cv::optim::Solver::Function{
|
||||
class RosenbrockF:public cv::MinProblemSolver::Function{
|
||||
double calc(const double* x)const{
|
||||
return 100*(x[1]-x[0]*x[0])*(x[1]-x[0]*x[0])+(1-x[0])*(1-x[0]);
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Optim_Downhill, regression_basic){
|
||||
cv::Ptr<cv::optim::DownhillSolver> solver=cv::optim::createDownhillSolver();
|
||||
TEST(DISABLED_Core_DownhillSolver, regression_basic){
|
||||
cv::Ptr<cv::DownhillSolver> solver=cv::DownhillSolver::create();
|
||||
#if 1
|
||||
{
|
||||
cv::Ptr<cv::optim::Solver::Function> ptr_F(new SphereF());
|
||||
cv::Ptr<cv::MinProblemSolver::Function> ptr_F = cv::makePtr<SphereF>();
|
||||
cv::Mat x=(cv::Mat_<double>(1,2)<<1.0,1.0),
|
||||
step=(cv::Mat_<double>(2,1)<<-0.5,-0.5),
|
||||
etalon_x=(cv::Mat_<double>(1,2)<<-0.0,0.0);
|
||||
@ -92,7 +92,7 @@ TEST(Optim_Downhill, regression_basic){
|
||||
#endif
|
||||
#if 1
|
||||
{
|
||||
cv::Ptr<cv::optim::Solver::Function> ptr_F(new RosenbrockF());
|
||||
cv::Ptr<cv::MinProblemSolver::Function> ptr_F = cv::makePtr<RosenbrockF>();
|
||||
cv::Mat x=(cv::Mat_<double>(2,1)<<0.0,0.0),
|
||||
step=(cv::Mat_<double>(2,1)<<0.5,+0.5),
|
||||
etalon_x=(cv::Mat_<double>(2,1)<<1.0,1.0);
|
@ -39,8 +39,8 @@ static void DFT_1D( const Mat& _src, Mat& _dst, int flags, const Mat& _wave=Mat(
|
||||
double scale = (flags & DFT_SCALE) ? 1./n : 1.;
|
||||
size_t esz = _src.elemSize();
|
||||
size_t srcstep = esz, dststep = esz;
|
||||
const uchar* src0 = _src.data;
|
||||
uchar* dst0 = _dst.data;
|
||||
const uchar* src0 = _src.ptr();
|
||||
uchar* dst0 = _dst.ptr();
|
||||
|
||||
CV_Assert( _src.cols + _src.rows - 1 == n );
|
||||
|
||||
|
@ -164,7 +164,7 @@ void Core_EigenTest_32::run(int) { check_full(CV_32FC1); }
|
||||
void Core_EigenTest_64::run(int) { check_full(CV_64FC1); }
|
||||
|
||||
Core_EigenTest::Core_EigenTest()
|
||||
: eps_val_32(1e-3f), eps_vec_32(1e-2f),
|
||||
: eps_val_32(1e-3f), eps_vec_32(12e-3f),
|
||||
eps_val_64(1e-4f), eps_vec_64(1e-3f), ntests(100) {}
|
||||
Core_EigenTest::~Core_EigenTest() {}
|
||||
|
||||
|
@ -126,7 +126,7 @@ protected:
|
||||
|
||||
CvSeq* seq = cvCreateSeq(test_mat.type(), (int)sizeof(CvSeq),
|
||||
(int)test_mat.elemSize(), storage);
|
||||
cvSeqPushMulti(seq, test_mat.data, test_mat.cols*test_mat.rows);
|
||||
cvSeqPushMulti(seq, test_mat.ptr(), test_mat.cols*test_mat.rows);
|
||||
|
||||
CvGraph* graph = cvCreateGraph( CV_ORIENTED_GRAPH,
|
||||
sizeof(CvGraph), sizeof(CvGraphVtx),
|
||||
|
@ -41,7 +41,7 @@
|
||||
#include "test_precomp.hpp"
|
||||
#include <iostream>
|
||||
|
||||
TEST(Optim_LpSolver, regression_basic){
|
||||
TEST(Core_LPSolver, regression_basic){
|
||||
cv::Mat A,B,z,etalon_z;
|
||||
|
||||
#if 1
|
||||
@ -49,7 +49,7 @@ TEST(Optim_LpSolver, regression_basic){
|
||||
A=(cv::Mat_<double>(3,1)<<3,1,2);
|
||||
B=(cv::Mat_<double>(3,4)<<1,1,3,30,2,2,5,24,4,1,2,36);
|
||||
std::cout<<"here A goes\n"<<A<<"\n";
|
||||
cv::optim::solveLP(A,B,z);
|
||||
cv::solveLP(A,B,z);
|
||||
std::cout<<"here z goes\n"<<z<<"\n";
|
||||
etalon_z=(cv::Mat_<double>(3,1)<<8,4,0);
|
||||
ASSERT_EQ(cv::countNonZero(z!=etalon_z),0);
|
||||
@ -60,7 +60,7 @@ TEST(Optim_LpSolver, regression_basic){
|
||||
A=(cv::Mat_<double>(1,2)<<18,12.5);
|
||||
B=(cv::Mat_<double>(3,3)<<1,1,20,1,0,20,0,1,16);
|
||||
std::cout<<"here A goes\n"<<A<<"\n";
|
||||
cv::optim::solveLP(A,B,z);
|
||||
cv::solveLP(A,B,z);
|
||||
std::cout<<"here z goes\n"<<z<<"\n";
|
||||
etalon_z=(cv::Mat_<double>(2,1)<<20,0);
|
||||
ASSERT_EQ(cv::countNonZero(z!=etalon_z),0);
|
||||
@ -71,14 +71,14 @@ TEST(Optim_LpSolver, regression_basic){
|
||||
A=(cv::Mat_<double>(1,2)<<5,-3);
|
||||
B=(cv::Mat_<double>(2,3)<<1,-1,1,2,1,2);
|
||||
std::cout<<"here A goes\n"<<A<<"\n";
|
||||
cv::optim::solveLP(A,B,z);
|
||||
cv::solveLP(A,B,z);
|
||||
std::cout<<"here z goes\n"<<z<<"\n";
|
||||
etalon_z=(cv::Mat_<double>(2,1)<<1,0);
|
||||
ASSERT_EQ(cv::countNonZero(z!=etalon_z),0);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST(Optim_LpSolver, regression_init_unfeasible){
|
||||
TEST(Core_LPSolver, regression_init_unfeasible){
|
||||
cv::Mat A,B,z,etalon_z;
|
||||
|
||||
#if 1
|
||||
@ -86,14 +86,14 @@ TEST(Optim_LpSolver, regression_init_unfeasible){
|
||||
A=(cv::Mat_<double>(1,3)<<-1,-1,-1);
|
||||
B=(cv::Mat_<double>(2,4)<<-2,-7.5,-3,-10000,-20,-5,-10,-30000);
|
||||
std::cout<<"here A goes\n"<<A<<"\n";
|
||||
cv::optim::solveLP(A,B,z);
|
||||
cv::solveLP(A,B,z);
|
||||
std::cout<<"here z goes\n"<<z<<"\n";
|
||||
etalon_z=(cv::Mat_<double>(3,1)<<1250,1000,0);
|
||||
ASSERT_EQ(cv::countNonZero(z!=etalon_z),0);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST(Optim_LpSolver, regression_absolutely_unfeasible){
|
||||
TEST(DISABLED_Core_LPSolver, regression_absolutely_unfeasible){
|
||||
cv::Mat A,B,z,etalon_z;
|
||||
|
||||
#if 1
|
||||
@ -101,12 +101,12 @@ TEST(Optim_LpSolver, regression_absolutely_unfeasible){
|
||||
A=(cv::Mat_<double>(1,1)<<1);
|
||||
B=(cv::Mat_<double>(2,2)<<1,-1);
|
||||
std::cout<<"here A goes\n"<<A<<"\n";
|
||||
int res=cv::optim::solveLP(A,B,z);
|
||||
int res=cv::solveLP(A,B,z);
|
||||
ASSERT_EQ(res,-1);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST(Optim_LpSolver, regression_multiple_solutions){
|
||||
TEST(Core_LPSolver, regression_multiple_solutions){
|
||||
cv::Mat A,B,z,etalon_z;
|
||||
|
||||
#if 1
|
||||
@ -114,7 +114,7 @@ TEST(Optim_LpSolver, regression_multiple_solutions){
|
||||
A=(cv::Mat_<double>(2,1)<<1,1);
|
||||
B=(cv::Mat_<double>(1,3)<<1,1,1);
|
||||
std::cout<<"here A goes\n"<<A<<"\n";
|
||||
int res=cv::optim::solveLP(A,B,z);
|
||||
int res=cv::solveLP(A,B,z);
|
||||
printf("res=%d\n",res);
|
||||
printf("scalar %g\n",z.dot(A));
|
||||
std::cout<<"here z goes\n"<<z<<"\n";
|
||||
@ -123,7 +123,7 @@ TEST(Optim_LpSolver, regression_multiple_solutions){
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST(Optim_LpSolver, regression_cycling){
|
||||
TEST(Core_LPSolver, regression_cycling){
|
||||
cv::Mat A,B,z,etalon_z;
|
||||
|
||||
#if 1
|
||||
@ -131,7 +131,7 @@ TEST(Optim_LpSolver, regression_cycling){
|
||||
A=(cv::Mat_<double>(4,1)<<10,-57,-9,-24);
|
||||
B=(cv::Mat_<double>(3,5)<<0.5,-5.5,-2.5,9,0,0.5,-1.5,-0.5,1,0,1,0,0,0,1);
|
||||
std::cout<<"here A goes\n"<<A<<"\n";
|
||||
int res=cv::optim::solveLP(A,B,z);
|
||||
int res=cv::solveLP(A,B,z);
|
||||
printf("res=%d\n",res);
|
||||
printf("scalar %g\n",z.dot(A));
|
||||
std::cout<<"here z goes\n"<<z<<"\n";
|
@ -323,8 +323,8 @@ protected:
|
||||
evec = svd.vt;
|
||||
eval = svd.w;*/
|
||||
|
||||
Mat subEval( maxComponents, 1, eval.type(), eval.data ),
|
||||
subEvec( maxComponents, evec.cols, evec.type(), evec.data );
|
||||
Mat subEval( maxComponents, 1, eval.type(), eval.ptr() ),
|
||||
subEvec( maxComponents, evec.cols, evec.type(), evec.ptr() );
|
||||
|
||||
#ifdef CHECK_C
|
||||
Mat prjTestPoints, backPrjTestPoints, cPoints = rPoints.t(), cTestPoints = rTestPoints.t();
|
||||
|
@ -458,7 +458,7 @@ void Core_TraceTest::prepare_to_validation( int )
|
||||
{
|
||||
Mat& mat = test_mat[INPUT][0];
|
||||
int count = MIN( mat.rows, mat.cols );
|
||||
Mat diag(count, 1, mat.type(), mat.data, mat.step + mat.elemSize());
|
||||
Mat diag(count, 1, mat.type(), mat.ptr(), mat.step + mat.elemSize());
|
||||
Scalar r = cvtest::mean(diag);
|
||||
r *= (double)count;
|
||||
|
||||
@ -2698,7 +2698,7 @@ protected:
|
||||
case MAT_1_N_CDIM:
|
||||
data.create(1, N, CV_32FC(dims));
|
||||
for( i = 0; i < N; i++ )
|
||||
memcpy(data.data + i * dims * sizeof(float), data0.ptr(rng.uniform(0, N0)), dims * sizeof(float));
|
||||
memcpy(data.ptr() + i * dims * sizeof(float), data0.ptr(rng.uniform(0, N0)), dims * sizeof(float));
|
||||
break;
|
||||
|
||||
case MAT_N_DIM_C1_NONCONT:
|
||||
|
@ -3,28 +3,6 @@
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
TEST(Core_Drawing, _914)
|
||||
{
|
||||
const int rows = 256;
|
||||
const int cols = 256;
|
||||
|
||||
Mat img(rows, cols, CV_8UC1, Scalar(255));
|
||||
|
||||
line(img, Point(0, 10), Point(255, 10), Scalar(0), 2, 4);
|
||||
line(img, Point(-5, 20), Point(260, 20), Scalar(0), 2, 4);
|
||||
line(img, Point(10, 0), Point(10, 255), Scalar(0), 2, 4);
|
||||
|
||||
double x0 = 0.0/pow(2.0, -2.0);
|
||||
double x1 = 255.0/pow(2.0, -2.0);
|
||||
double y = 30.5/pow(2.0, -2.0);
|
||||
|
||||
line(img, Point(int(x0), int(y)), Point(int(x1), int(y)), Scalar(0), 2, 4, 2);
|
||||
|
||||
int pixelsDrawn = rows*cols - countNonZero(img);
|
||||
ASSERT_EQ( (3*rows + cols)*3 - 3*9, pixelsDrawn);
|
||||
}
|
||||
|
||||
|
||||
TEST(Core_OutputArrayCreate, _1997)
|
||||
{
|
||||
struct local {
|
||||
|
@ -37,8 +37,8 @@ bool Core_RandTest::check_pdf(const Mat& hist, double scale,
|
||||
int dist_type, double& refval, double& realval)
|
||||
{
|
||||
Mat hist0(hist.size(), CV_32F);
|
||||
const int* H = (const int*)hist.data;
|
||||
float* H0 = ((float*)hist0.data);
|
||||
const int* H = hist.ptr<int>();
|
||||
float* H0 = hist0.ptr<float>();
|
||||
int i, hsz = hist.cols;
|
||||
|
||||
double sum = 0;
|
||||
@ -183,7 +183,7 @@ void Core_RandTest::run( int )
|
||||
|
||||
for( c = 0; c < cn; c++ )
|
||||
{
|
||||
const uchar* data = arr[0].data;
|
||||
const uchar* data = arr[0].ptr();
|
||||
int* H = hist[c].ptr<int>();
|
||||
int HSZ = hist[c].cols;
|
||||
double minVal = dist_type == CV_RAND_UNI ? A[c] : A[c] - B[c]*4;
|
||||
@ -255,7 +255,7 @@ void Core_RandTest::run( int )
|
||||
int SDIM = cvtest::randInt(rng) % (MAX_SDIM-1) + 2;
|
||||
int N0 = (SZ*cn/SDIM), n = 0;
|
||||
double r2 = 0;
|
||||
const uchar* data = arr[0].data;
|
||||
const uchar* data = arr[0].ptr();
|
||||
double scale[4], delta[4];
|
||||
for( c = 0; c < cn; c++ )
|
||||
{
|
||||
|
@ -55,15 +55,27 @@ namespace cv { namespace cuda {
|
||||
////////////////////////////////////////////////////
|
||||
// MOG
|
||||
|
||||
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractorMOG
|
||||
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractor
|
||||
{
|
||||
public:
|
||||
using cv::BackgroundSubtractorMOG::apply;
|
||||
using cv::BackgroundSubtractorMOG::getBackgroundImage;
|
||||
|
||||
using cv::BackgroundSubtractor::apply;
|
||||
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
|
||||
|
||||
using cv::BackgroundSubtractor::getBackgroundImage;
|
||||
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
|
||||
|
||||
virtual int getHistory() const = 0;
|
||||
virtual void setHistory(int nframes) = 0;
|
||||
|
||||
virtual int getNMixtures() const = 0;
|
||||
virtual void setNMixtures(int nmix) = 0;
|
||||
|
||||
virtual double getBackgroundRatio() const = 0;
|
||||
virtual void setBackgroundRatio(double backgroundRatio) = 0;
|
||||
|
||||
virtual double getNoiseSigma() const = 0;
|
||||
virtual void setNoiseSigma(double noiseSigma) = 0;
|
||||
};
|
||||
|
||||
CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG>
|
||||
@ -91,12 +103,41 @@ CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG2>
|
||||
////////////////////////////////////////////////////
|
||||
// GMG
|
||||
|
||||
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractorGMG
|
||||
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor
|
||||
{
|
||||
public:
|
||||
using cv::BackgroundSubtractorGMG::apply;
|
||||
|
||||
using cv::BackgroundSubtractor::apply;
|
||||
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
|
||||
|
||||
virtual int getMaxFeatures() const = 0;
|
||||
virtual void setMaxFeatures(int maxFeatures) = 0;
|
||||
|
||||
virtual double getDefaultLearningRate() const = 0;
|
||||
virtual void setDefaultLearningRate(double lr) = 0;
|
||||
|
||||
virtual int getNumFrames() const = 0;
|
||||
virtual void setNumFrames(int nframes) = 0;
|
||||
|
||||
virtual int getQuantizationLevels() const = 0;
|
||||
virtual void setQuantizationLevels(int nlevels) = 0;
|
||||
|
||||
virtual double getBackgroundPrior() const = 0;
|
||||
virtual void setBackgroundPrior(double bgprior) = 0;
|
||||
|
||||
virtual int getSmoothingRadius() const = 0;
|
||||
virtual void setSmoothingRadius(int radius) = 0;
|
||||
|
||||
virtual double getDecisionThreshold() const = 0;
|
||||
virtual void setDecisionThreshold(double thresh) = 0;
|
||||
|
||||
virtual bool getUpdateBackgroundModel() const = 0;
|
||||
virtual void setUpdateBackgroundModel(bool update) = 0;
|
||||
|
||||
virtual double getMinVal() const = 0;
|
||||
virtual void setMinVal(double val) = 0;
|
||||
|
||||
virtual double getMaxVal() const = 0;
|
||||
virtual void setMaxVal(double val) = 0;
|
||||
};
|
||||
|
||||
CV_EXPORTS Ptr<cuda::BackgroundSubtractorGMG>
|
||||
|
@ -239,58 +239,7 @@ PERF_TEST_P(Video_Cn_LearningRate, MOG,
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
|
||||
cv::Mat foreground;
|
||||
|
||||
mog->apply(frame, foreground, learningRate);
|
||||
|
||||
int i = 0;
|
||||
|
||||
// collect performance data
|
||||
for (; i < numIters; ++i)
|
||||
{
|
||||
cap >> frame;
|
||||
ASSERT_FALSE(frame.empty());
|
||||
|
||||
if (cn != 3)
|
||||
{
|
||||
cv::Mat temp;
|
||||
if (cn == 1)
|
||||
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
|
||||
else
|
||||
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
|
||||
cv::swap(temp, frame);
|
||||
}
|
||||
|
||||
startTimer();
|
||||
if(!next())
|
||||
break;
|
||||
|
||||
mog->apply(frame, foreground, learningRate);
|
||||
|
||||
stopTimer();
|
||||
}
|
||||
|
||||
// process last frame in sequence to get data for sanity test
|
||||
for (; i < numIters; ++i)
|
||||
{
|
||||
cap >> frame;
|
||||
ASSERT_FALSE(frame.empty());
|
||||
|
||||
if (cn != 3)
|
||||
{
|
||||
cv::Mat temp;
|
||||
if (cn == 1)
|
||||
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
|
||||
else
|
||||
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
|
||||
cv::swap(temp, frame);
|
||||
}
|
||||
|
||||
mog->apply(frame, foreground, learningRate);
|
||||
}
|
||||
|
||||
CPU_SANITY_CHECK(foreground);
|
||||
FAIL_NO_CPU();
|
||||
}
|
||||
}
|
||||
|
||||
@ -576,7 +525,7 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
|
||||
cv::cuda::GpuMat d_frame(frame);
|
||||
cv::cuda::GpuMat foreground;
|
||||
|
||||
cv::Ptr<cv::BackgroundSubtractorGMG> d_gmg = cv::cuda::createBackgroundSubtractorGMG();
|
||||
cv::Ptr<cv::cuda::BackgroundSubtractorGMG> d_gmg = cv::cuda::createBackgroundSubtractorGMG();
|
||||
d_gmg->setMaxFeatures(maxFeatures);
|
||||
|
||||
d_gmg->apply(d_frame, foreground);
|
||||
@ -645,71 +594,7 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat foreground;
|
||||
cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0));
|
||||
|
||||
cv::Ptr<cv::BackgroundSubtractorGMG> gmg = cv::createBackgroundSubtractorGMG();
|
||||
gmg->setMaxFeatures(maxFeatures);
|
||||
|
||||
gmg->apply(frame, foreground);
|
||||
|
||||
int i = 0;
|
||||
|
||||
// collect performance data
|
||||
for (; i < numIters; ++i)
|
||||
{
|
||||
cap >> frame;
|
||||
if (frame.empty())
|
||||
{
|
||||
cap.release();
|
||||
cap.open(inputFile);
|
||||
cap >> frame;
|
||||
}
|
||||
|
||||
if (cn != 3)
|
||||
{
|
||||
cv::Mat temp;
|
||||
if (cn == 1)
|
||||
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
|
||||
else
|
||||
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
|
||||
cv::swap(temp, frame);
|
||||
}
|
||||
|
||||
startTimer();
|
||||
if(!next())
|
||||
break;
|
||||
|
||||
gmg->apply(frame, foreground);
|
||||
|
||||
stopTimer();
|
||||
}
|
||||
|
||||
// process last frame in sequence to get data for sanity test
|
||||
for (; i < numIters; ++i)
|
||||
{
|
||||
cap >> frame;
|
||||
if (frame.empty())
|
||||
{
|
||||
cap.release();
|
||||
cap.open(inputFile);
|
||||
cap >> frame;
|
||||
}
|
||||
|
||||
if (cn != 3)
|
||||
{
|
||||
cv::Mat temp;
|
||||
if (cn == 1)
|
||||
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
|
||||
else
|
||||
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
|
||||
cv::swap(temp, frame);
|
||||
}
|
||||
|
||||
gmg->apply(frame, foreground);
|
||||
}
|
||||
|
||||
CPU_SANITY_CHECK(foreground);
|
||||
FAIL_NO_CPU();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -59,91 +59,14 @@ using namespace cvtest;
|
||||
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// MOG
|
||||
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
|
||||
|
||||
namespace
|
||||
{
|
||||
IMPLEMENT_PARAM_CLASS(UseGray, bool)
|
||||
IMPLEMENT_PARAM_CLASS(LearningRate, double)
|
||||
}
|
||||
|
||||
PARAM_TEST_CASE(MOG, cv::cuda::DeviceInfo, std::string, UseGray, LearningRate, UseRoi)
|
||||
{
|
||||
cv::cuda::DeviceInfo devInfo;
|
||||
std::string inputFile;
|
||||
bool useGray;
|
||||
double learningRate;
|
||||
bool useRoi;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
cv::cuda::setDevice(devInfo.deviceID());
|
||||
|
||||
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
|
||||
|
||||
useGray = GET_PARAM(2);
|
||||
|
||||
learningRate = GET_PARAM(3);
|
||||
|
||||
useRoi = GET_PARAM(4);
|
||||
}
|
||||
};
|
||||
|
||||
CUDA_TEST_P(MOG, Update)
|
||||
{
|
||||
cv::VideoCapture cap(inputFile);
|
||||
ASSERT_TRUE(cap.isOpened());
|
||||
|
||||
cv::Mat frame;
|
||||
cap >> frame;
|
||||
ASSERT_FALSE(frame.empty());
|
||||
|
||||
cv::Ptr<cv::BackgroundSubtractorMOG> mog = cv::cuda::createBackgroundSubtractorMOG();
|
||||
cv::cuda::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
|
||||
|
||||
cv::Ptr<cv::BackgroundSubtractorMOG> mog_gold = cv::createBackgroundSubtractorMOG();
|
||||
cv::Mat foreground_gold;
|
||||
|
||||
for (int i = 0; i < 10; ++i)
|
||||
{
|
||||
cap >> frame;
|
||||
ASSERT_FALSE(frame.empty());
|
||||
|
||||
if (useGray)
|
||||
{
|
||||
cv::Mat temp;
|
||||
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
|
||||
cv::swap(temp, frame);
|
||||
}
|
||||
|
||||
mog->apply(loadMat(frame, useRoi), foreground, learningRate);
|
||||
|
||||
mog_gold->apply(frame, foreground_gold, learningRate);
|
||||
|
||||
ASSERT_MAT_NEAR(foreground_gold, foreground, 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(CUDA_BgSegm, MOG, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
testing::Values(std::string("768x576.avi")),
|
||||
testing::Values(UseGray(true), UseGray(false)),
|
||||
testing::Values(LearningRate(0.0), LearningRate(0.01)),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// MOG2
|
||||
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
|
||||
|
||||
namespace
|
||||
{
|
||||
{
|
||||
IMPLEMENT_PARAM_CLASS(UseGray, bool)
|
||||
IMPLEMENT_PARAM_CLASS(DetectShadow, bool)
|
||||
}
|
||||
|
||||
@ -257,57 +180,4 @@ INSTANTIATE_TEST_CASE_P(CUDA_BgSegm, MOG2, testing::Combine(
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// GMG
|
||||
|
||||
PARAM_TEST_CASE(GMG, cv::cuda::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
|
||||
{
|
||||
};
|
||||
|
||||
CUDA_TEST_P(GMG, Accuracy)
|
||||
{
|
||||
const cv::cuda::DeviceInfo devInfo = GET_PARAM(0);
|
||||
cv::cuda::setDevice(devInfo.deviceID());
|
||||
const cv::Size size = GET_PARAM(1);
|
||||
const int depth = GET_PARAM(2);
|
||||
const int channels = GET_PARAM(3);
|
||||
const bool useRoi = GET_PARAM(4);
|
||||
|
||||
const int type = CV_MAKE_TYPE(depth, channels);
|
||||
|
||||
const cv::Mat zeros(size, CV_8UC1, cv::Scalar::all(0));
|
||||
const cv::Mat fullfg(size, CV_8UC1, cv::Scalar::all(255));
|
||||
|
||||
cv::Mat frame = randomMat(size, type, 0, 100);
|
||||
cv::cuda::GpuMat d_frame = loadMat(frame, useRoi);
|
||||
|
||||
cv::Ptr<cv::BackgroundSubtractorGMG> gmg = cv::cuda::createBackgroundSubtractorGMG();
|
||||
gmg->setNumFrames(5);
|
||||
gmg->setSmoothingRadius(0);
|
||||
|
||||
cv::cuda::GpuMat d_fgmask = createMat(size, CV_8UC1, useRoi);
|
||||
|
||||
for (int i = 0; i < gmg->getNumFrames(); ++i)
|
||||
{
|
||||
gmg->apply(d_frame, d_fgmask);
|
||||
|
||||
// fgmask should be entirely background during training
|
||||
ASSERT_MAT_NEAR(zeros, d_fgmask, 0);
|
||||
}
|
||||
|
||||
frame = randomMat(size, type, 160, 255);
|
||||
d_frame = loadMat(frame, useRoi);
|
||||
gmg->apply(d_frame, d_fgmask);
|
||||
|
||||
// now fgmask should be entirely foreground
|
||||
ASSERT_MAT_NEAR(fullfg, d_fgmask, 0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(CUDA_BgSegm, GMG, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
testing::Values(MatType(CV_8U), MatType(CV_16U), MatType(CV_32F)),
|
||||
testing::Values(Channels(1), Channels(3), Channels(4)),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
#endif // HAVE_CUDA
|
||||
|
@ -192,7 +192,7 @@ void BOWImgDescriptorExtractor::compute( InputArray keypointDescriptors, OutputA
|
||||
|
||||
Mat imgDescriptor = _imgDescriptor.getMat();
|
||||
|
||||
float *dptr = (float*)imgDescriptor.data;
|
||||
float *dptr = imgDescriptor.ptr<float>();
|
||||
for( size_t i = 0; i < matches.size(); i++ )
|
||||
{
|
||||
int queryIdx = matches[i].queryIdx;
|
||||
|
@ -427,7 +427,7 @@ BRISK::smoothedIntensity(const cv::Mat& image, const cv::Mat& integral, const fl
|
||||
if (dx + dy > 2)
|
||||
{
|
||||
// now the calculation:
|
||||
const uchar* ptr = image.data + x_left + imagecols * y_top;
|
||||
const uchar* ptr = image.ptr() + x_left + imagecols * y_top;
|
||||
// first the corners:
|
||||
ret_val = A * int(*ptr);
|
||||
ptr += dx + 1;
|
||||
@ -438,7 +438,7 @@ BRISK::smoothedIntensity(const cv::Mat& image, const cv::Mat& integral, const fl
|
||||
ret_val += D * int(*ptr);
|
||||
|
||||
// next the edges:
|
||||
int* ptr_integral = (int*) integral.data + x_left + integralcols * y_top + 1;
|
||||
const int* ptr_integral = integral.ptr<int>() + x_left + integralcols * y_top + 1;
|
||||
// find a simple path through the different surface corners
|
||||
const int tmp1 = (*ptr_integral);
|
||||
ptr_integral += dx;
|
||||
@ -475,7 +475,7 @@ BRISK::smoothedIntensity(const cv::Mat& image, const cv::Mat& integral, const fl
|
||||
}
|
||||
|
||||
// now the calculation:
|
||||
const uchar* ptr = image.data + x_left + imagecols * y_top;
|
||||
const uchar* ptr = image.ptr() + x_left + imagecols * y_top;
|
||||
// first row:
|
||||
ret_val = A * int(*ptr);
|
||||
ptr++;
|
||||
@ -607,7 +607,7 @@ BRISK::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, s
|
||||
int t2;
|
||||
|
||||
// the feature orientation
|
||||
const uchar* ptr = descriptors.data;
|
||||
const uchar* ptr = descriptors.ptr();
|
||||
for (size_t k = 0; k < ksize; k++)
|
||||
{
|
||||
cv::KeyPoint& kp = keypoints[k];
|
||||
@ -1070,7 +1070,7 @@ BriskScaleSpace::isMax2D(const int layer, const int x_layer, const int y_layer)
|
||||
{
|
||||
const cv::Mat& scores = pyramid_[layer].scores();
|
||||
const int scorescols = scores.cols;
|
||||
const uchar* data = scores.data + y_layer * scorescols + x_layer;
|
||||
const uchar* data = scores.ptr() + y_layer * scorescols + x_layer;
|
||||
// decision tree:
|
||||
const uchar center = (*data);
|
||||
data--;
|
||||
@ -1154,11 +1154,11 @@ BriskScaleSpace::isMax2D(const int layer, const int x_layer, const int y_layer)
|
||||
{
|
||||
// in this case, we have to analyze the situation more carefully:
|
||||
// the values are gaussian blurred and then we really decide
|
||||
data = scores.data + y_layer * scorescols + x_layer;
|
||||
data = scores.ptr() + y_layer * scorescols + x_layer;
|
||||
int smoothedcenter = 4 * center + 2 * (s_10 + s10 + s0_1 + s01) + s_1_1 + s1_1 + s_11 + s11;
|
||||
for (unsigned int i = 0; i < deltasize; i += 2)
|
||||
{
|
||||
data = scores.data + (y_layer - 1 + delta[i + 1]) * scorescols + x_layer + delta[i] - 1;
|
||||
data = scores.ptr() + (y_layer - 1 + delta[i + 1]) * scorescols + x_layer + delta[i] - 1;
|
||||
int othercenter = *data;
|
||||
data++;
|
||||
othercenter += 2 * (*data);
|
||||
@ -2140,7 +2140,7 @@ BriskLayer::value(const cv::Mat& mat, float xf, float yf, float scale_in) const
|
||||
const int r_y = (int)((yf - y) * 1024);
|
||||
const int r_x_1 = (1024 - r_x);
|
||||
const int r_y_1 = (1024 - r_y);
|
||||
const uchar* ptr = image.data + x + y * imagecols;
|
||||
const uchar* ptr = image.ptr() + x + y * imagecols;
|
||||
// just interpolate:
|
||||
ret_val = (r_x_1 * r_y_1 * int(*ptr));
|
||||
ptr++;
|
||||
@ -2186,7 +2186,7 @@ BriskLayer::value(const cv::Mat& mat, float xf, float yf, float scale_in) const
|
||||
const int r_y1_i = (int)(r_y1 * scaling);
|
||||
|
||||
// now the calculation:
|
||||
const uchar* ptr = image.data + x_left + imagecols * y_top;
|
||||
const uchar* ptr = image.ptr() + x_left + imagecols * y_top;
|
||||
// first row:
|
||||
ret_val = A * int(*ptr);
|
||||
ptr++;
|
||||
|
@ -62,7 +62,7 @@ static void writeMatInBin( const Mat& mat, const string& filename )
|
||||
fwrite( (void*)&type, sizeof(int), 1, f );
|
||||
int dataSize = (int)(mat.step * mat.rows * mat.channels());
|
||||
fwrite( (void*)&dataSize, sizeof(int), 1, f );
|
||||
fwrite( (void*)mat.data, 1, dataSize, f );
|
||||
fwrite( (void*)mat.ptr(), 1, dataSize, f );
|
||||
fclose(f);
|
||||
}
|
||||
}
|
||||
|
@ -470,8 +470,8 @@ void runKnnSearch_(void* index, const Mat& query, Mat& indices, Mat& dists,
|
||||
CV_Assert(query.isContinuous() && indices.isContinuous() && dists.isContinuous());
|
||||
|
||||
::cvflann::Matrix<ElementType> _query((ElementType*)query.data, query.rows, query.cols);
|
||||
::cvflann::Matrix<int> _indices((int*)indices.data, indices.rows, indices.cols);
|
||||
::cvflann::Matrix<DistanceType> _dists((DistanceType*)dists.data, dists.rows, dists.cols);
|
||||
::cvflann::Matrix<int> _indices(indices.ptr<int>(), indices.rows, indices.cols);
|
||||
::cvflann::Matrix<DistanceType> _dists(dists.ptr<DistanceType>(), dists.rows, dists.cols);
|
||||
|
||||
((IndexType*)index)->knnSearch(_query, _indices, _dists, knn,
|
||||
(const ::cvflann::SearchParams&)get_params(params));
|
||||
@ -496,8 +496,8 @@ int runRadiusSearch_(void* index, const Mat& query, Mat& indices, Mat& dists,
|
||||
CV_Assert(query.isContinuous() && indices.isContinuous() && dists.isContinuous());
|
||||
|
||||
::cvflann::Matrix<ElementType> _query((ElementType*)query.data, query.rows, query.cols);
|
||||
::cvflann::Matrix<int> _indices((int*)indices.data, indices.rows, indices.cols);
|
||||
::cvflann::Matrix<DistanceType> _dists((DistanceType*)dists.data, dists.rows, dists.cols);
|
||||
::cvflann::Matrix<int> _indices(indices.ptr<int>(), indices.rows, indices.cols);
|
||||
::cvflann::Matrix<DistanceType> _dists(dists.ptr<DistanceType>(), dists.rows, dists.cols);
|
||||
|
||||
return ((IndexType*)index)->radiusSearch(_query, _indices, _dists,
|
||||
saturate_cast<float>(radius),
|
||||
|
@ -43,6 +43,7 @@
|
||||
#define __OPENCV_HIGHGUI_H__
|
||||
|
||||
#include "opencv2/core/core_c.h"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/imgcodecs/imgcodecs_c.h"
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
|
||||
|
@ -50,6 +50,11 @@ if(HAVE_OPENEXR)
|
||||
list(APPEND GRFMT_LIBS ${OPENEXR_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(HAVE_GDAL)
|
||||
include_directories(SYSTEM ${GDAL_INCLUDE_DIR})
|
||||
list(APPEND GRFMT_LIBS ${GDAL_LIBRARY})
|
||||
endif()
|
||||
|
||||
file(GLOB grfmt_hdrs ${CMAKE_CURRENT_LIST_DIR}/src/grfmt*.hpp)
|
||||
file(GLOB grfmt_srcs ${CMAKE_CURRENT_LIST_DIR}/src/grfmt*.cpp)
|
||||
list(APPEND grfmt_hdrs ${CMAKE_CURRENT_LIST_DIR}/src/bitstrm.hpp)
|
||||
|
@ -53,7 +53,8 @@ enum { IMREAD_UNCHANGED = -1, // 8bit, color or not
|
||||
IMREAD_GRAYSCALE = 0, // 8bit, gray
|
||||
IMREAD_COLOR = 1, // ?, color
|
||||
IMREAD_ANYDEPTH = 2, // any depth, ?
|
||||
IMREAD_ANYCOLOR = 4 // ?, any color
|
||||
IMREAD_ANYCOLOR = 4, // ?, any color
|
||||
IMREAD_LOAD_GDAL = 8 // Use gdal driver
|
||||
};
|
||||
|
||||
enum { IMWRITE_JPEG_QUALITY = 1,
|
||||
|
@ -183,7 +183,7 @@ bool BmpDecoder::readHeader()
|
||||
|
||||
bool BmpDecoder::readData( Mat& img )
|
||||
{
|
||||
uchar* data = img.data;
|
||||
uchar* data = img.ptr();
|
||||
int step = (int)img.step;
|
||||
bool color = img.channels() > 1;
|
||||
uchar gray_palette[256];
|
||||
@ -553,7 +553,7 @@ bool BmpEncoder::write( const Mat& img, const std::vector<int>& )
|
||||
width *= channels;
|
||||
for( int y = height - 1; y >= 0; y-- )
|
||||
{
|
||||
strm.putBytes( img.data + img.step*y, width );
|
||||
strm.putBytes( img.ptr(y), width );
|
||||
if( fileStep > width )
|
||||
strm.putBytes( zeropad, fileStep - width );
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ bool ExrDecoder::readData( Mat& img )
|
||||
m_native_depth = CV_MAT_DEPTH(type()) == img.depth();
|
||||
bool color = img.channels() > 1;
|
||||
|
||||
uchar* data = img.data;
|
||||
uchar* data = img.ptr();
|
||||
int step = img.step;
|
||||
bool justcopy = m_native_depth;
|
||||
bool chromatorgb = false;
|
||||
@ -583,8 +583,7 @@ bool ExrEncoder::write( const Mat& img, const std::vector<int>& )
|
||||
bool issigned = depth == CV_8S || depth == CV_16S || depth == CV_32S;
|
||||
bool isfloat = depth == CV_32F || depth == CV_64F;
|
||||
depth = CV_ELEM_SIZE1(depth)*8;
|
||||
uchar* data = img.data;
|
||||
int step = img.step;
|
||||
const int step = img.step;
|
||||
|
||||
Header header( width, height );
|
||||
Imf::PixelType type;
|
||||
@ -618,7 +617,7 @@ bool ExrEncoder::write( const Mat& img, const std::vector<int>& )
|
||||
int size;
|
||||
if( type == FLOAT && depth == 32 )
|
||||
{
|
||||
buffer = (char *)const_cast<uchar *>(data);
|
||||
buffer = (char *)const_cast<uchar *>(img.ptr());
|
||||
bufferstep = step;
|
||||
size = 4;
|
||||
}
|
||||
@ -674,18 +673,19 @@ bool ExrEncoder::write( const Mat& img, const std::vector<int>& )
|
||||
|
||||
if( depth <= 8 )
|
||||
{
|
||||
const uchar* sd = img.ptr(line);
|
||||
for(int i = 0; i < width * channels; i++)
|
||||
buf[i] = data[i] + offset;
|
||||
buf[i] = sd[i] + offset;
|
||||
}
|
||||
else if( depth <= 16 )
|
||||
{
|
||||
unsigned short *sd = (unsigned short *)data;
|
||||
const unsigned short *sd = img.ptr<unsigned short>(line);
|
||||
for(int i = 0; i < width * channels; i++)
|
||||
buf[i] = sd[i] + offset;
|
||||
}
|
||||
else
|
||||
{
|
||||
int *sd = (int *)data; // FIXME 64-bit problems
|
||||
const int *sd = img.ptr<int>(line); // FIXME 64-bit problems
|
||||
for(int i = 0; i < width * channels; i++)
|
||||
buf[i] = (unsigned) sd[i] + offset;
|
||||
}
|
||||
@ -696,12 +696,13 @@ bool ExrEncoder::write( const Mat& img, const std::vector<int>& )
|
||||
|
||||
if( depth <= 8 )
|
||||
{
|
||||
const uchar* sd = img.ptr(line);
|
||||
for(int i = 0; i < width * channels; i++)
|
||||
buf[i] = data[i];
|
||||
buf[i] = sd[i];
|
||||
}
|
||||
else if( depth <= 16 )
|
||||
{
|
||||
unsigned short *sd = (unsigned short *)data;
|
||||
const unsigned short *sd = img.ptr<unsigned short>(line);
|
||||
for(int i = 0; i < width * channels; i++)
|
||||
buf[i] = sd[i];
|
||||
}
|
||||
@ -715,7 +716,6 @@ bool ExrEncoder::write( const Mat& img, const std::vector<int>& )
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
data += step;
|
||||
}
|
||||
delete[] buffer;
|
||||
}
|
||||
|
560
modules/imgcodecs/src/grfmt_gdal.cpp
Normal file
560
modules/imgcodecs/src/grfmt_gdal.cpp
Normal file
@ -0,0 +1,560 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
#include "grfmt_gdal.hpp"
|
||||
|
||||
#ifdef HAVE_GDAL
|
||||
|
||||
/// C++ Standard Libraries
|
||||
#include <iostream>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
|
||||
namespace cv{
|
||||
|
||||
|
||||
/**
|
||||
* Convert GDAL Palette Interpretation to OpenCV Pixel Type
|
||||
*/
|
||||
int gdalPaletteInterpretation2OpenCV( GDALPaletteInterp const& paletteInterp, GDALDataType const& gdalType ){
|
||||
|
||||
switch( paletteInterp ){
|
||||
|
||||
/// GRAYSCALE
|
||||
case GPI_Gray:
|
||||
if( gdalType == GDT_Byte ){ return CV_8UC1; }
|
||||
if( gdalType == GDT_UInt16 ){ return CV_16UC1; }
|
||||
if( gdalType == GDT_Int16 ){ return CV_16SC1; }
|
||||
if( gdalType == GDT_UInt32 ){ return CV_32SC1; }
|
||||
if( gdalType == GDT_Int32 ){ return CV_32SC1; }
|
||||
if( gdalType == GDT_Float32 ){ return CV_32FC1; }
|
||||
if( gdalType == GDT_Float64 ){ return CV_64FC1; }
|
||||
return -1;
|
||||
|
||||
/// RGB
|
||||
case GPI_RGB:
|
||||
if( gdalType == GDT_Byte ){ return CV_8UC1; }
|
||||
if( gdalType == GDT_UInt16 ){ return CV_16UC3; }
|
||||
if( gdalType == GDT_Int16 ){ return CV_16SC3; }
|
||||
if( gdalType == GDT_UInt32 ){ return CV_32SC3; }
|
||||
if( gdalType == GDT_Int32 ){ return CV_32SC3; }
|
||||
if( gdalType == GDT_Float32 ){ return CV_32FC3; }
|
||||
if( gdalType == GDT_Float64 ){ return CV_64FC3; }
|
||||
return -1;
|
||||
|
||||
|
||||
/// otherwise
|
||||
default:
|
||||
return -1;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert gdal type to opencv type
|
||||
*/
|
||||
int gdal2opencv( const GDALDataType& gdalType, const int& channels ){
|
||||
|
||||
switch( gdalType ){
|
||||
|
||||
/// UInt8
|
||||
case GDT_Byte:
|
||||
if( channels == 1 ){ return CV_8UC1; }
|
||||
if( channels == 3 ){ return CV_8UC3; }
|
||||
if( channels == 4 ){ return CV_8UC4; }
|
||||
return -1;
|
||||
|
||||
/// UInt16
|
||||
case GDT_UInt16:
|
||||
if( channels == 1 ){ return CV_16UC1; }
|
||||
if( channels == 3 ){ return CV_16UC3; }
|
||||
if( channels == 4 ){ return CV_16UC4; }
|
||||
return -1;
|
||||
|
||||
/// Int16
|
||||
case GDT_Int16:
|
||||
if( channels == 1 ){ return CV_16SC1; }
|
||||
if( channels == 3 ){ return CV_16SC3; }
|
||||
if( channels == 4 ){ return CV_16SC4; }
|
||||
return -1;
|
||||
|
||||
/// UInt32
|
||||
case GDT_UInt32:
|
||||
case GDT_Int32:
|
||||
if( channels == 1 ){ return CV_32SC1; }
|
||||
if( channels == 3 ){ return CV_32SC3; }
|
||||
if( channels == 4 ){ return CV_32SC4; }
|
||||
return -1;
|
||||
|
||||
default:
|
||||
std::cout << "Unknown GDAL Data Type" << std::endl;
|
||||
std::cout << "Type: " << GDALGetDataTypeName(gdalType) << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
std::string GetOpenCVTypeName( const int& type ){
|
||||
|
||||
switch(type){
|
||||
case CV_8UC1:
|
||||
return "CV_8UC1";
|
||||
case CV_8UC3:
|
||||
return "CV_8UC3";
|
||||
case CV_8UC4:
|
||||
return "CV_8UC4";
|
||||
case CV_16UC1:
|
||||
return "CV_16UC1";
|
||||
case CV_16UC3:
|
||||
return "CV_16UC3";
|
||||
case CV_16UC4:
|
||||
return "CV_16UC4";
|
||||
case CV_16SC1:
|
||||
return "CV_16SC1";
|
||||
case CV_16SC3:
|
||||
return "CV_16SC3";
|
||||
case CV_16SC4:
|
||||
return "CV_16SC4";
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
return "Unknown";
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* GDAL Decoder Constructor
|
||||
*/
|
||||
GdalDecoder::GdalDecoder(){
|
||||
|
||||
|
||||
// set a dummy signature
|
||||
m_signature="0";
|
||||
for( size_t i=0; i<160; i++ ){
|
||||
m_signature += "0";
|
||||
}
|
||||
|
||||
/// Register the driver
|
||||
GDALAllRegister();
|
||||
|
||||
m_driver = NULL;
|
||||
m_dataset = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* GDAL Decoder Destructor
|
||||
*/
|
||||
GdalDecoder::~GdalDecoder(){
|
||||
|
||||
|
||||
if( m_dataset != NULL ){
|
||||
close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert data range
|
||||
*/
|
||||
double range_cast( const GDALDataType& gdalType, const int& cvDepth, const double& value ){
|
||||
|
||||
// uint8 -> uint8
|
||||
if( gdalType == GDT_Byte && cvDepth == CV_8U ){
|
||||
return value;
|
||||
}
|
||||
// uint8 -> uint16
|
||||
if( gdalType == GDT_Byte && (cvDepth == CV_16U || cvDepth == CV_16S)){
|
||||
return (value*256);
|
||||
}
|
||||
|
||||
// uint8 -> uint32
|
||||
if( gdalType == GDT_Byte && (cvDepth == CV_32F || cvDepth == CV_32S)){
|
||||
return (value*16777216);
|
||||
}
|
||||
|
||||
// int16 -> uint8
|
||||
if( (gdalType == GDT_UInt16 || gdalType == GDT_Int16) && cvDepth == CV_8U ){
|
||||
return std::floor(value/256.0);
|
||||
}
|
||||
|
||||
// int16 -> int16
|
||||
if( (gdalType == GDT_UInt16 || gdalType == GDT_Int16) &&
|
||||
( cvDepth == CV_16U || cvDepth == CV_16S )){
|
||||
return value;
|
||||
}
|
||||
|
||||
std::cout << GDALGetDataTypeName( gdalType ) << std::endl;
|
||||
std::cout << "warning: unknown range cast requested." << std::endl;
|
||||
return (value);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* There are some better mpl techniques for doing this.
|
||||
*/
|
||||
void write_pixel( const double& pixelValue,
|
||||
const GDALDataType& gdalType,
|
||||
const int& gdalChannels,
|
||||
Mat& image,
|
||||
const int& row,
|
||||
const int& col,
|
||||
const int& channel ){
|
||||
|
||||
// convert the pixel
|
||||
double newValue = range_cast(gdalType, image.depth(), pixelValue );
|
||||
|
||||
// input: 1 channel, output: 1 channel
|
||||
if( gdalChannels == 1 && image.channels() == 1 ){
|
||||
if( image.depth() == CV_8U ){ image.at<uchar>(row,col) = newValue; }
|
||||
else if( image.depth() == CV_16U ){ image.at<unsigned short>(row,col) = newValue; }
|
||||
else if( image.depth() == CV_16S ){ image.at<short>(row,col) = newValue; }
|
||||
else if( image.depth() == CV_32S ){ image.at<int>(row,col) = newValue; }
|
||||
else if( image.depth() == CV_32F ){ image.at<float>(row,col) = newValue; }
|
||||
else if( image.depth() == CV_64F ){ image.at<double>(row,col) = newValue; }
|
||||
else{ throw std::runtime_error("Unknown image depth, gdal: 1, img: 1"); }
|
||||
}
|
||||
|
||||
// input: 1 channel, output: 3 channel
|
||||
else if( gdalChannels == 1 && image.channels() == 3 ){
|
||||
if( image.depth() == CV_8U ){ image.at<Vec3b>(row,col) = Vec3b(newValue,newValue,newValue); }
|
||||
else if( image.depth() == CV_16U ){ image.at<Vec3s>(row,col) = Vec3s(newValue,newValue,newValue); }
|
||||
else if( image.depth() == CV_16S ){ image.at<Vec3s>(row,col) = Vec3s(newValue,newValue,newValue); }
|
||||
else if( image.depth() == CV_32S ){ image.at<Vec3i>(row,col) = Vec3i(newValue,newValue,newValue); }
|
||||
else if( image.depth() == CV_32F ){ image.at<Vec3f>(row,col) = Vec3f(newValue,newValue,newValue); }
|
||||
else if( image.depth() == CV_64F ){ image.at<Vec3d>(row,col) = Vec3d(newValue,newValue,newValue); }
|
||||
else{ throw std::runtime_error("Unknown image depth, gdal:1, img: 3"); }
|
||||
}
|
||||
|
||||
// input: 3 channel, output: 1 channel
|
||||
else if( gdalChannels == 3 && image.channels() == 1 ){
|
||||
if( image.depth() == CV_8U ){ image.at<uchar>(row,col) += (newValue/3.0); }
|
||||
else{ throw std::runtime_error("Unknown image depth, gdal:3, img: 1"); }
|
||||
}
|
||||
|
||||
// input: 4 channel, output: 1 channel
|
||||
else if( gdalChannels == 4 && image.channels() == 1 ){
|
||||
if( image.depth() == CV_8U ){ image.at<uchar>(row,col) = newValue; }
|
||||
else{ throw std::runtime_error("Unknown image depth, gdal: 4, image: 1"); }
|
||||
}
|
||||
|
||||
// input: 3 channel, output: 3 channel
|
||||
else if( gdalChannels == 3 && image.channels() == 3 ){
|
||||
if( image.depth() == CV_8U ){ image.at<Vec3b>(row,col)[channel] = newValue; }
|
||||
else if( image.depth() == CV_16U ){ image.at<Vec3s>(row,col)[channel] = newValue; }
|
||||
else if( image.depth() == CV_16S ){ image.at<Vec3s>(row,col)[channel] = newValue; }
|
||||
else if( image.depth() == CV_32S ){ image.at<Vec3i>(row,col)[channel] = newValue; }
|
||||
else if( image.depth() == CV_32F ){ image.at<Vec3f>(row,col)[channel] = newValue; }
|
||||
else if( image.depth() == CV_64F ){ image.at<Vec3d>(row,col)[channel] = newValue; }
|
||||
else{ throw std::runtime_error("Unknown image depth, gdal: 3, image: 3"); }
|
||||
}
|
||||
|
||||
// input: 4 channel, output: 3 channel
|
||||
else if( gdalChannels == 4 && image.channels() == 3 ){
|
||||
if( channel >= 4 ){ return; }
|
||||
else if( image.depth() == CV_8U && channel < 4 ){ image.at<Vec3b>(row,col)[channel] = newValue; }
|
||||
else if( image.depth() == CV_16U && channel < 4 ){ image.at<Vec3s>(row,col)[channel] = newValue; }
|
||||
else if( image.depth() == CV_16S && channel < 4 ){ image.at<Vec3s>(row,col)[channel] = newValue; }
|
||||
else if( image.depth() == CV_32S && channel < 4 ){ image.at<Vec3i>(row,col)[channel] = newValue; }
|
||||
else if( image.depth() == CV_32F && channel < 4 ){ image.at<Vec3f>(row,col)[channel] = newValue; }
|
||||
else if( image.depth() == CV_64F && channel < 4 ){ image.at<Vec3d>(row,col)[channel] = newValue; }
|
||||
else{ throw std::runtime_error("Unknown image depth, gdal: 4, image: 3"); }
|
||||
}
|
||||
|
||||
// input: 4 channel, output: 4 channel
|
||||
else if( gdalChannels == 4 && image.channels() == 4 ){
|
||||
if( image.depth() == CV_8U ){ image.at<Vec4b>(row,col)[channel] = newValue; }
|
||||
else{ throw std::runtime_error("Unknown image depth, gdal: 4, image: 4"); }
|
||||
}
|
||||
|
||||
// otherwise, throw an error
|
||||
else{
|
||||
throw std::runtime_error("error: can't convert types.");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
void write_ctable_pixel( const double& pixelValue,
|
||||
const GDALDataType& gdalType,
|
||||
GDALColorTable const* gdalColorTable,
|
||||
Mat& image,
|
||||
const int& y,
|
||||
const int& x,
|
||||
const int& c ){
|
||||
|
||||
if( gdalColorTable == NULL ){
|
||||
write_pixel( pixelValue, gdalType, 1, image, y, x, c );
|
||||
}
|
||||
|
||||
// if we are Grayscale, then do a straight conversion
|
||||
if( gdalColorTable->GetPaletteInterpretation() == GPI_Gray ){
|
||||
write_pixel( pixelValue, gdalType, 1, image, y, x, c );
|
||||
}
|
||||
|
||||
// if we are rgb, then convert here
|
||||
else if( gdalColorTable->GetPaletteInterpretation() == GPI_RGB ){
|
||||
|
||||
// get the pixel
|
||||
short r = gdalColorTable->GetColorEntry( (int)pixelValue )->c1;
|
||||
short g = gdalColorTable->GetColorEntry( (int)pixelValue )->c2;
|
||||
short b = gdalColorTable->GetColorEntry( (int)pixelValue )->c3;
|
||||
short a = gdalColorTable->GetColorEntry( (int)pixelValue )->c4;
|
||||
|
||||
write_pixel( r, gdalType, 4, image, y, x, 2 );
|
||||
write_pixel( g, gdalType, 4, image, y, x, 1 );
|
||||
write_pixel( b, gdalType, 4, image, y, x, 0 );
|
||||
if( image.channels() > 3 ){
|
||||
write_pixel( a, gdalType, 4, image, y, x, 1 );
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise, set zeros
|
||||
else{
|
||||
write_pixel( pixelValue, gdalType, 1, image, y, x, c );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* read data
|
||||
*/
|
||||
bool GdalDecoder::readData( Mat& img ){
|
||||
|
||||
|
||||
// make sure the image is the proper size
|
||||
if( img.size().height != m_height ){
|
||||
return false;
|
||||
}
|
||||
if( img.size().width != m_width ){
|
||||
return false;
|
||||
}
|
||||
|
||||
// make sure the raster is alive
|
||||
if( m_dataset == NULL || m_driver == NULL ){
|
||||
return false;
|
||||
}
|
||||
|
||||
// set the image to zero
|
||||
img = 0;
|
||||
|
||||
|
||||
// iterate over each raster band
|
||||
// note that OpenCV does bgr rather than rgb
|
||||
int nChannels = m_dataset->GetRasterCount();
|
||||
GDALColorTable* gdalColorTable = NULL;
|
||||
if( m_dataset->GetRasterBand(1)->GetColorTable() != NULL ){
|
||||
gdalColorTable = m_dataset->GetRasterBand(1)->GetColorTable();
|
||||
}
|
||||
|
||||
const GDALDataType gdalType = m_dataset->GetRasterBand(1)->GetRasterDataType();
|
||||
int nRows, nCols;
|
||||
|
||||
if( nChannels > img.channels() ){
|
||||
nChannels = img.channels();
|
||||
}
|
||||
|
||||
for( int c = 0; c<nChannels; c++ ){
|
||||
|
||||
// get the GDAL Band
|
||||
GDALRasterBand* band = m_dataset->GetRasterBand(c+1);
|
||||
|
||||
// make sure the image band has the same dimensions as the image
|
||||
if( band->GetXSize() != m_width || band->GetYSize() != m_height ){ return false; }
|
||||
|
||||
// grab the raster size
|
||||
nRows = band->GetYSize();
|
||||
nCols = band->GetXSize();
|
||||
|
||||
// create a temporary scanline pointer to store data
|
||||
double* scanline = new double[nCols];
|
||||
|
||||
// iterate over each row and column
|
||||
for( int y=0; y<nRows; y++ ){
|
||||
|
||||
// get the entire row
|
||||
band->RasterIO( GF_Read, 0, y, nCols, 1, scanline, nCols, 1, GDT_Float64, 0, 0);
|
||||
|
||||
// set inside the image
|
||||
for( int x=0; x<nCols; x++ ){
|
||||
|
||||
// set depending on image types
|
||||
// given boost, I would use enable_if to speed up. Avoid for now.
|
||||
if( hasColorTable == false ){
|
||||
write_pixel( scanline[x], gdalType, nChannels, img, y, x, c );
|
||||
}
|
||||
else{
|
||||
write_ctable_pixel( scanline[x], gdalType, gdalColorTable, img, y, x, c );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// delete our temp pointer
|
||||
delete [] scanline;
|
||||
|
||||
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Read image header
|
||||
*/
|
||||
bool GdalDecoder::readHeader(){
|
||||
|
||||
// load the dataset
|
||||
m_dataset = (GDALDataset*) GDALOpen( m_filename.c_str(), GA_ReadOnly);
|
||||
|
||||
// if dataset is null, then there was a problem
|
||||
if( m_dataset == NULL ){
|
||||
return false;
|
||||
}
|
||||
|
||||
// make sure we have pixel data inside the raster
|
||||
if( m_dataset->GetRasterCount() <= 0 ){
|
||||
return false;
|
||||
}
|
||||
|
||||
//extract the driver infomation
|
||||
m_driver = m_dataset->GetDriver();
|
||||
|
||||
// if the driver failed, then exit
|
||||
if( m_driver == NULL ){
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// get the image dimensions
|
||||
m_width = m_dataset->GetRasterXSize();
|
||||
m_height= m_dataset->GetRasterYSize();
|
||||
|
||||
// make sure we have at least one band/channel
|
||||
if( m_dataset->GetRasterCount() <= 0 ){
|
||||
return false;
|
||||
}
|
||||
|
||||
// check if we have a color palette
|
||||
int tempType;
|
||||
if( m_dataset->GetRasterBand(1)->GetColorInterpretation() == GCI_PaletteIndex ){
|
||||
|
||||
// remember that we have a color palette
|
||||
hasColorTable = true;
|
||||
|
||||
// if the color tables does not exist, then we failed
|
||||
if( m_dataset->GetRasterBand(1)->GetColorTable() == NULL ){
|
||||
return false;
|
||||
}
|
||||
|
||||
// otherwise, get the pixeltype
|
||||
else{
|
||||
// convert the palette interpretation to opencv type
|
||||
tempType = gdalPaletteInterpretation2OpenCV( m_dataset->GetRasterBand(1)->GetColorTable()->GetPaletteInterpretation(),
|
||||
m_dataset->GetRasterBand(1)->GetRasterDataType() );
|
||||
|
||||
if( tempType == -1 ){
|
||||
return false;
|
||||
}
|
||||
m_type = tempType;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// otherwise, we have standard channels
|
||||
else{
|
||||
|
||||
// remember that we don't have a color table
|
||||
hasColorTable = false;
|
||||
|
||||
// convert the datatype to opencv
|
||||
tempType = gdal2opencv( m_dataset->GetRasterBand(1)->GetRasterDataType(), m_dataset->GetRasterCount() );
|
||||
if( tempType == -1 ){
|
||||
return false;
|
||||
}
|
||||
m_type = tempType;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the module
|
||||
*/
|
||||
void GdalDecoder::close(){
|
||||
|
||||
|
||||
GDALClose((GDALDatasetH)m_dataset);
|
||||
m_dataset = NULL;
|
||||
m_driver = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new decoder
|
||||
*/
|
||||
ImageDecoder GdalDecoder::newDecoder()const{
|
||||
return makePtr<GdalDecoder>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the file signature
|
||||
*/
|
||||
bool GdalDecoder::checkSignature( const String& signature )const{
|
||||
|
||||
|
||||
// look for NITF
|
||||
std::string str = signature.c_str();
|
||||
if( str.substr(0,4).find("NITF") != std::string::npos ){
|
||||
return true;
|
||||
}
|
||||
|
||||
// look for DTED
|
||||
if( str.substr(140,4) == "DTED" ){
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
} /// End of cv Namespace
|
||||
|
||||
#endif /**< End of HAVE_GDAL Definition */
|
160
modules/imgcodecs/src/grfmt_gdal.hpp
Normal file
160
modules/imgcodecs/src/grfmt_gdal.hpp
Normal file
@ -0,0 +1,160 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __GRFMT_GDAL_HPP__
|
||||
#define __GRFMT_GDAL_HPP__
|
||||
|
||||
/// Macro to make sure we specified GDAL in CMake
|
||||
#ifdef HAVE_GDAL
|
||||
|
||||
/// C++ Libraries
|
||||
#include <iostream>
|
||||
|
||||
/// OpenCV Libraries
|
||||
#include "grfmt_base.hpp"
|
||||
#include "precomp.hpp"
|
||||
|
||||
/// Geospatial Data Abstraction Library
|
||||
#include <gdal/cpl_conv.h>
|
||||
#include <gdal/gdal_priv.h>
|
||||
#include <gdal/gdal.h>
|
||||
|
||||
|
||||
/// Start of CV Namespace
|
||||
namespace cv {
|
||||
|
||||
/**
|
||||
* Convert GDAL Palette Interpretation to OpenCV Pixel Type
|
||||
*/
|
||||
int gdalPaletteInterpretation2OpenCV( GDALPaletteInterp const& paletteInterp,
|
||||
GDALDataType const& gdalType );
|
||||
|
||||
/**
|
||||
* Convert a GDAL Raster Type to OpenCV Type
|
||||
*/
|
||||
int gdal2opencv( const GDALDataType& gdalType, const int& channels );
|
||||
|
||||
/**
|
||||
* Write an image to pixel
|
||||
*/
|
||||
void write_pixel( const double& pixelValue,
|
||||
GDALDataType const& gdalType,
|
||||
const int& gdalChannels,
|
||||
Mat& image,
|
||||
const int& row,
|
||||
const int& col,
|
||||
const int& channel );
|
||||
|
||||
/**
|
||||
* Write a color table pixel to the image
|
||||
*/
|
||||
void write_ctable_pixel( const double& pixelValue,
|
||||
const GDALDataType& gdalType,
|
||||
const GDALColorTable* gdalColorTable,
|
||||
Mat& image,
|
||||
const int& y,
|
||||
const int& x,
|
||||
const int& c );
|
||||
|
||||
/**
|
||||
* Loader for GDAL
|
||||
*/
|
||||
class GdalDecoder : public BaseImageDecoder{
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Default Constructor
|
||||
*/
|
||||
GdalDecoder();
|
||||
|
||||
/**
|
||||
* Destructor
|
||||
*/
|
||||
~GdalDecoder();
|
||||
|
||||
/**
|
||||
* Read image data
|
||||
*/
|
||||
bool readData( Mat& img );
|
||||
|
||||
/**
|
||||
* Read the image header
|
||||
*/
|
||||
bool readHeader();
|
||||
|
||||
/**
|
||||
* Close the module
|
||||
*/
|
||||
void close();
|
||||
|
||||
/**
|
||||
* Create a new decoder
|
||||
*/
|
||||
ImageDecoder newDecoder() const;
|
||||
|
||||
/**
|
||||
* Test the file signature
|
||||
*
|
||||
* In general, this should be avoided as the user should specifically request GDAL.
|
||||
* The reason is that GDAL tends to overlap with other image formats and it is probably
|
||||
* safer to use other formats first.
|
||||
*/
|
||||
virtual bool checkSignature( const String& signature ) const;
|
||||
|
||||
protected:
|
||||
|
||||
/// GDAL Dataset
|
||||
GDALDataset* m_dataset;
|
||||
|
||||
/// GDAL Driver
|
||||
GDALDriver* m_driver;
|
||||
|
||||
/// Check if we are reading from a color table
|
||||
bool hasColorTable;
|
||||
|
||||
}; /// End of GdalDecoder Class
|
||||
|
||||
} /// End of Namespace cv
|
||||
|
||||
#endif/*HAVE_GDAL*/
|
||||
|
||||
#endif/*__GRFMT_GDAL_HPP__*/
|
@ -228,7 +228,7 @@ bool JpegDecoder::readHeader()
|
||||
if( !m_buf.empty() )
|
||||
{
|
||||
jpeg_buffer_src(&state->cinfo, &state->source);
|
||||
state->source.pub.next_input_byte = m_buf.data;
|
||||
state->source.pub.next_input_byte = m_buf.ptr();
|
||||
state->source.pub.bytes_in_buffer = m_buf.cols*m_buf.rows*m_buf.elemSize();
|
||||
}
|
||||
else
|
||||
@ -449,7 +449,7 @@ bool JpegDecoder::readData( Mat& img )
|
||||
buffer = (*cinfo->mem->alloc_sarray)((j_common_ptr)cinfo,
|
||||
JPOOL_IMAGE, m_width*4, 1 );
|
||||
|
||||
uchar* data = img.data;
|
||||
uchar* data = img.ptr();
|
||||
for( ; m_height--; data += step )
|
||||
{
|
||||
jpeg_read_scanlines( cinfo, buffer, 1 );
|
||||
|
@ -134,7 +134,7 @@ void PngDecoder::readDataFromBuf( void* _png_ptr, uchar* dst, size_t size )
|
||||
png_error(png_ptr, "PNG input buffer is incomplete");
|
||||
return;
|
||||
}
|
||||
memcpy( dst, &decoder->m_buf.data[decoder->m_buf_pos], size );
|
||||
memcpy( dst, decoder->m_buf.ptr() + decoder->m_buf_pos, size );
|
||||
decoder->m_buf_pos += size;
|
||||
}
|
||||
|
||||
@ -228,7 +228,7 @@ bool PngDecoder::readData( Mat& img )
|
||||
AutoBuffer<uchar*> _buffer(m_height);
|
||||
uchar** buffer = _buffer;
|
||||
int color = img.channels() > 1;
|
||||
uchar* data = img.data;
|
||||
uchar* data = img.ptr();
|
||||
int step = (int)img.step;
|
||||
|
||||
if( m_png_ptr && m_info_ptr && m_end_info && m_width && m_height )
|
||||
|
@ -189,7 +189,7 @@ bool PxMDecoder::readHeader()
|
||||
bool PxMDecoder::readData( Mat& img )
|
||||
{
|
||||
int color = img.channels() > 1;
|
||||
uchar* data = img.data;
|
||||
uchar* data = img.ptr();
|
||||
int step = (int)img.step;
|
||||
PaletteEntry palette[256];
|
||||
bool result = false;
|
||||
@ -418,16 +418,16 @@ bool PxMEncoder::write( const Mat& img, const std::vector<int>& params )
|
||||
|
||||
for( y = 0; y < height; y++ )
|
||||
{
|
||||
uchar* data = img.data + img.step*y;
|
||||
const uchar* const data = img.ptr(y);
|
||||
if( isBinary )
|
||||
{
|
||||
if( _channels == 3 )
|
||||
{
|
||||
if( depth == 8 )
|
||||
icvCvt_BGR2RGB_8u_C3R( (uchar*)data, 0,
|
||||
icvCvt_BGR2RGB_8u_C3R( (const uchar*)data, 0,
|
||||
(uchar*)buffer, 0, cvSize(width,1) );
|
||||
else
|
||||
icvCvt_BGR2RGB_16u_C3R( (ushort*)data, 0,
|
||||
icvCvt_BGR2RGB_16u_C3R( (const ushort*)data, 0,
|
||||
(ushort*)buffer, 0, cvSize(width,1) );
|
||||
}
|
||||
|
||||
@ -443,7 +443,7 @@ bool PxMEncoder::write( const Mat& img, const std::vector<int>& params )
|
||||
buffer[x + 1] = v;
|
||||
}
|
||||
}
|
||||
strm.putBytes( (channels > 1 || depth > 8) ? buffer : (char*)data, fileStep );
|
||||
strm.putBytes( (channels > 1 || depth > 8) ? buffer : (const char*)data, fileStep );
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -469,11 +469,11 @@ bool PxMEncoder::write( const Mat& img, const std::vector<int>& params )
|
||||
{
|
||||
for( x = 0; x < width*channels; x += channels )
|
||||
{
|
||||
sprintf( ptr, "% 6d", ((ushort *)data)[x + 2] );
|
||||
sprintf( ptr, "% 6d", ((const ushort *)data)[x + 2] );
|
||||
ptr += 6;
|
||||
sprintf( ptr, "% 6d", ((ushort *)data)[x + 1] );
|
||||
sprintf( ptr, "% 6d", ((const ushort *)data)[x + 1] );
|
||||
ptr += 6;
|
||||
sprintf( ptr, "% 6d", ((ushort *)data)[x] );
|
||||
sprintf( ptr, "% 6d", ((const ushort *)data)[x] );
|
||||
ptr += 6;
|
||||
*ptr++ = ' ';
|
||||
*ptr++ = ' ';
|
||||
@ -494,7 +494,7 @@ bool PxMEncoder::write( const Mat& img, const std::vector<int>& params )
|
||||
{
|
||||
for( x = 0; x < width; x++ )
|
||||
{
|
||||
sprintf( ptr, "% 6d", ((ushort *)data)[x] );
|
||||
sprintf( ptr, "% 6d", ((const ushort *)data)[x] );
|
||||
ptr += 6;
|
||||
}
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ bool SunRasterDecoder::readHeader()
|
||||
bool SunRasterDecoder::readData( Mat& img )
|
||||
{
|
||||
int color = img.channels() > 1;
|
||||
uchar* data = img.data;
|
||||
uchar* data = img.ptr();
|
||||
int step = (int)img.step;
|
||||
uchar gray_palette[256];
|
||||
bool result = false;
|
||||
@ -414,7 +414,7 @@ bool SunRasterEncoder::write( const Mat& img, const std::vector<int>& )
|
||||
strm.putDWord( 0 );
|
||||
|
||||
for( y = 0; y < height; y++ )
|
||||
strm.putBytes( img.data + img.step*y, fileStep );
|
||||
strm.putBytes( img.ptr(y), fileStep );
|
||||
|
||||
strm.close();
|
||||
result = true;
|
||||
|
@ -190,7 +190,7 @@ bool TiffDecoder::readData( Mat& img )
|
||||
}
|
||||
bool result = false;
|
||||
bool color = img.channels() > 1;
|
||||
uchar* data = img.data;
|
||||
uchar* data = img.ptr();
|
||||
|
||||
if( img.depth() != CV_8U && img.depth() != CV_16U && img.depth() != CV_32F && img.depth() != CV_64F )
|
||||
return false;
|
||||
@ -587,25 +587,25 @@ bool TiffEncoder::writeLibTiff( const Mat& img, const std::vector<int>& params)
|
||||
{
|
||||
case 1:
|
||||
{
|
||||
memcpy(buffer, img.data + img.step * y, scanlineSize);
|
||||
memcpy(buffer, img.ptr(y), scanlineSize);
|
||||
break;
|
||||
}
|
||||
|
||||
case 3:
|
||||
{
|
||||
if (depth == CV_8U)
|
||||
icvCvt_BGR2RGB_8u_C3R( img.data + img.step*y, 0, buffer, 0, cvSize(width,1) );
|
||||
icvCvt_BGR2RGB_8u_C3R( img.ptr(y), 0, buffer, 0, cvSize(width,1) );
|
||||
else
|
||||
icvCvt_BGR2RGB_16u_C3R( (const ushort*)(img.data + img.step*y), 0, (ushort*)buffer, 0, cvSize(width,1) );
|
||||
icvCvt_BGR2RGB_16u_C3R( img.ptr<ushort>(y), 0, (ushort*)buffer, 0, cvSize(width,1) );
|
||||
break;
|
||||
}
|
||||
|
||||
case 4:
|
||||
{
|
||||
if (depth == CV_8U)
|
||||
icvCvt_BGRA2RGBA_8u_C4R( img.data + img.step*y, 0, buffer, 0, cvSize(width,1) );
|
||||
icvCvt_BGRA2RGBA_8u_C4R( img.ptr(y), 0, buffer, 0, cvSize(width,1) );
|
||||
else
|
||||
icvCvt_BGRA2RGBA_16u_C4R( (const ushort*)(img.data + img.step*y), 0, (ushort*)buffer, 0, cvSize(width,1) );
|
||||
icvCvt_BGRA2RGBA_16u_C4R( img.ptr<ushort>(y), 0, (ushort*)buffer, 0, cvSize(width,1) );
|
||||
break;
|
||||
}
|
||||
|
||||
@ -742,22 +742,22 @@ bool TiffEncoder::write( const Mat& img, const std::vector<int>& /*params*/)
|
||||
if( channels == 3 )
|
||||
{
|
||||
if (depth == CV_8U)
|
||||
icvCvt_BGR2RGB_8u_C3R( img.data + img.step*y, 0, buffer, 0, cvSize(width,1) );
|
||||
icvCvt_BGR2RGB_8u_C3R( img.ptr(y), 0, buffer, 0, cvSize(width,1) );
|
||||
else
|
||||
icvCvt_BGR2RGB_16u_C3R( (const ushort*)(img.data + img.step*y), 0, (ushort*)buffer, 0, cvSize(width,1) );
|
||||
icvCvt_BGR2RGB_16u_C3R( img.ptr<ushort>(y), 0, (ushort*)buffer, 0, cvSize(width,1) );
|
||||
}
|
||||
else
|
||||
{
|
||||
if( channels == 4 )
|
||||
{
|
||||
if (depth == CV_8U)
|
||||
icvCvt_BGRA2RGBA_8u_C4R( img.data + img.step*y, 0, buffer, 0, cvSize(width,1) );
|
||||
icvCvt_BGRA2RGBA_8u_C4R( img.ptr(y), 0, buffer, 0, cvSize(width,1) );
|
||||
else
|
||||
icvCvt_BGRA2RGBA_16u_C4R( (const ushort*)(img.data + img.step*y), 0, (ushort*)buffer, 0, cvSize(width,1) );
|
||||
icvCvt_BGRA2RGBA_16u_C4R( img.ptr<ushort>(y), 0, (ushort*)buffer, 0, cvSize(width,1) );
|
||||
}
|
||||
}
|
||||
|
||||
strm.putBytes( channels > 1 ? buffer : img.data + img.step*y, fileStep );
|
||||
strm.putBytes( channels > 1 ? buffer : img.ptr(y), fileStep );
|
||||
}
|
||||
|
||||
stripCounts[i] = (short)(strm.getPos() - stripOffsets[i]);
|
||||
|
@ -118,7 +118,7 @@ bool WebPDecoder::readHeader()
|
||||
|
||||
data.create(1, wfile_size, CV_8U);
|
||||
|
||||
size_t data_size = fread(data.data, 1, wfile_size, wfile);
|
||||
size_t data_size = fread(data.ptr(), 1, wfile_size, wfile);
|
||||
|
||||
if(wfile)
|
||||
{
|
||||
@ -136,7 +136,7 @@ bool WebPDecoder::readHeader()
|
||||
}
|
||||
|
||||
WebPBitstreamFeatures features;
|
||||
if(VP8_STATUS_OK == WebPGetFeatures(data.data, WEBP_HEADER_SIZE, &features))
|
||||
if(VP8_STATUS_OK == WebPGetFeatures(data.ptr(), WEBP_HEADER_SIZE, &features))
|
||||
{
|
||||
m_width = features.width;
|
||||
m_height = features.height;
|
||||
@ -167,18 +167,18 @@ bool WebPDecoder::readData(Mat &img)
|
||||
img.create(m_height, m_width, m_type);
|
||||
}
|
||||
|
||||
uchar* out_data = img.data;
|
||||
uchar* out_data = img.ptr();
|
||||
size_t out_data_size = img.cols * img.rows * img.elemSize();
|
||||
|
||||
uchar *res_ptr = 0;
|
||||
if (channels == 3)
|
||||
{
|
||||
res_ptr = WebPDecodeBGRInto(data.data, data.total(), out_data,
|
||||
res_ptr = WebPDecodeBGRInto(data.ptr(), data.total(), out_data,
|
||||
(int)out_data_size, (int)img.step);
|
||||
}
|
||||
else if (channels == 4)
|
||||
{
|
||||
res_ptr = WebPDecodeBGRAInto(data.data, data.total(), out_data,
|
||||
res_ptr = WebPDecodeBGRAInto(data.ptr(), data.total(), out_data,
|
||||
(int)out_data_size, (int)img.step);
|
||||
}
|
||||
|
||||
@ -255,22 +255,22 @@ bool WebPEncoder::write(const Mat& img, const std::vector<int>& params)
|
||||
{
|
||||
if(channels == 3)
|
||||
{
|
||||
size = WebPEncodeLosslessBGR(image->data, width, height, (int)image->step, &out);
|
||||
size = WebPEncodeLosslessBGR(image->ptr(), width, height, (int)image->step, &out);
|
||||
}
|
||||
else if(channels == 4)
|
||||
{
|
||||
size = WebPEncodeLosslessBGRA(image->data, width, height, (int)image->step, &out);
|
||||
size = WebPEncodeLosslessBGRA(image->ptr(), width, height, (int)image->step, &out);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if(channels == 3)
|
||||
{
|
||||
size = WebPEncodeBGR(image->data, width, height, (int)image->step, quality, &out);
|
||||
size = WebPEncodeBGR(image->ptr(), width, height, (int)image->step, quality, &out);
|
||||
}
|
||||
else if(channels == 4)
|
||||
{
|
||||
size = WebPEncodeBGRA(image->data, width, height, (int)image->step, quality, &out);
|
||||
size = WebPEncodeBGRA(image->ptr(), width, height, (int)image->step, quality, &out);
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user