Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
572e4f5255
4
3rdparty/jinja2/markupsafe/__init__.py
vendored
4
3rdparty/jinja2/markupsafe/__init__.py
vendored
@ -9,7 +9,7 @@
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
from _compat import text_type, string_types, int_types, \
|
||||
from ._compat import text_type, string_types, int_types, \
|
||||
unichr, PY2
|
||||
|
||||
|
||||
@ -227,7 +227,7 @@ class _MarkupEscapeHelper(object):
|
||||
try:
|
||||
from _speedups import escape, escape_silent, soft_unicode
|
||||
except ImportError:
|
||||
from _native import escape, escape_silent, soft_unicode
|
||||
from ._native import escape, escape_silent, soft_unicode
|
||||
|
||||
if not PY2:
|
||||
soft_str = soft_unicode
|
||||
|
2
3rdparty/jinja2/markupsafe/_native.py
vendored
2
3rdparty/jinja2/markupsafe/_native.py
vendored
@ -8,7 +8,7 @@
|
||||
:copyright: (c) 2010 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from _compat import text_type
|
||||
from ._compat import text_type
|
||||
|
||||
|
||||
def escape(s):
|
||||
|
2
3rdparty/jinja2/utils.py
vendored
2
3rdparty/jinja2/utils.py
vendored
@ -517,4 +517,4 @@ class Joiner(object):
|
||||
|
||||
|
||||
# Imported here because that's where it was in the past
|
||||
from markupsafe import Markup, escape, soft_unicode
|
||||
from .markupsafe import Markup, escape, soft_unicode
|
||||
|
@ -213,7 +213,7 @@ foreach(__opttype OPT DBG)
|
||||
SET(OpenCV_EXTRA_LIBS_${__opttype} "")
|
||||
|
||||
# CUDA
|
||||
if(OpenCV_CUDA_VERSION AND (CMAKE_CROSSCOMPILING OR (WIN32 AND NOT OpenCV_SHARED)))
|
||||
if(OpenCV_CUDA_VERSION)
|
||||
if(NOT CUDA_FOUND)
|
||||
find_package(CUDA ${OpenCV_CUDA_VERSION} EXACT REQUIRED)
|
||||
else()
|
||||
@ -222,32 +222,41 @@ foreach(__opttype OPT DBG)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_LIBRARIES})
|
||||
set(OpenCV_CUDA_LIBS_ABSPATH ${CUDA_LIBRARIES})
|
||||
|
||||
if(${CUDA_VERSION} VERSION_LESS "5.5")
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_npp_LIBRARY})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_npp_LIBRARY})
|
||||
else()
|
||||
find_cuda_helper_libs(nppc)
|
||||
find_cuda_helper_libs(nppi)
|
||||
find_cuda_helper_libs(npps)
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY})
|
||||
endif()
|
||||
|
||||
if(OpenCV_USE_CUBLAS)
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_CUBLAS_LIBRARIES})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_CUBLAS_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(OpenCV_USE_CUFFT)
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_CUFFT_LIBRARIES})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_CUFFT_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(OpenCV_USE_NVCUVID)
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nvcuvid_LIBRARIES})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nvcuvid_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nvcuvenc_LIBRARIES})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nvcuvenc_LIBRARIES})
|
||||
endif()
|
||||
|
||||
set(OpenCV_CUDA_LIBS_RELPATH "")
|
||||
foreach(l ${OpenCV_CUDA_LIBS_ABSPATH})
|
||||
get_filename_component(_tmp ${l} PATH)
|
||||
list(APPEND OpenCV_CUDA_LIBS_RELPATH ${_tmp})
|
||||
endforeach()
|
||||
|
||||
list(REMOVE_DUPLICATES OpenCV_CUDA_LIBS_RELPATH)
|
||||
link_directories(${OpenCV_CUDA_LIBS_RELPATH})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
|
94
doc/conf.py
94
doc/conf.py
@ -304,11 +304,11 @@ extlinks = {
|
||||
'oldbasicstructures' : ('http://docs.opencv.org/modules/core/doc/old_basic_structures.html#%s', None),
|
||||
'readwriteimagevideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None),
|
||||
'operationsonarrays' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html#%s', None),
|
||||
'utilitysystemfunctions':('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html#%s', None),
|
||||
'imgprocfilter':('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
|
||||
'svms':('http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#%s', None),
|
||||
'drawingfunc':('http://docs.opencv.org/modules/core/doc/drawing_functions.html#%s', None),
|
||||
'xmlymlpers':('http://docs.opencv.org/modules/core/doc/xml_yaml_persistence.html#%s', None),
|
||||
'utilitysystemfunctions' : ('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html#%s', None),
|
||||
'imgprocfilter' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
|
||||
'svms' : ('http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#%s', None),
|
||||
'drawingfunc' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#%s', None),
|
||||
'xmlymlpers' : ('http://docs.opencv.org/modules/core/doc/xml_yaml_persistence.html#%s', None),
|
||||
'hgvideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None),
|
||||
'gpuinit' : ('http://docs.opencv.org/modules/gpu/doc/initalization_and_information.html#%s', None),
|
||||
'gpudatastructure' : ('http://docs.opencv.org/modules/gpu/doc/data_structures.html#%s', None),
|
||||
@ -316,56 +316,58 @@ extlinks = {
|
||||
'gpuperelement' : ('http://docs.opencv.org/modules/gpu/doc/per_element_operations.html#%s', None),
|
||||
'gpuimgproc' : ('http://docs.opencv.org/modules/gpu/doc/image_processing.html#%s', None),
|
||||
'gpumatrixreduct' : ('http://docs.opencv.org/modules/gpu/doc/matrix_reductions.html#%s', None),
|
||||
'filtering':('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
|
||||
'filtering' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
|
||||
'flann' : ('http://docs.opencv.org/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.html#%s', None ),
|
||||
'calib3d' : ('http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#%s', None ),
|
||||
'feature2d' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html#%s', None ),
|
||||
'imgproc_geometric' : ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#%s', None ),
|
||||
'miscellaneous_transformations' : ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#%s', None),
|
||||
'user_interface' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html#%s', None),
|
||||
|
||||
# 'opencv_group' : ('http://answers.opencv.org/%s', None),
|
||||
'opencv_qa' : ('http://answers.opencv.org/%s', None),
|
||||
'how_to_contribute' : ('http://code.opencv.org/projects/opencv/wiki/How_to_contribute/%s', None),
|
||||
|
||||
'cvt_color': ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=cvtcolor#cvtcolor%s', None),
|
||||
'imread': ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#imread%s', None),
|
||||
'imwrite': ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imwrite#imwrite%s', None),
|
||||
'imshow': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=imshow#imshow%s', None),
|
||||
'named_window': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=namedwindow#namedwindow%s', None),
|
||||
'wait_key': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=waitkey#waitkey%s', None),
|
||||
'add_weighted': ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=addweighted#addweighted%s', None),
|
||||
'saturate_cast': ('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html?highlight=saturate_cast#saturate-cast%s', None),
|
||||
'mat_zeros': ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=zeros#mat-zeros%s', None),
|
||||
'convert_to': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#mat-convertto%s', None),
|
||||
'create_trackbar': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=createtrackbar#createtrackbar%s', None),
|
||||
'point': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#point%s', None),
|
||||
'scalar': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#scalar%s', None),
|
||||
'line': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#line%s', None),
|
||||
'ellipse': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#ellipse%s', None),
|
||||
'rectangle': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#rectangle%s', None),
|
||||
'circle': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#circle%s', None),
|
||||
'fill_poly': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#fillpoly%s', None),
|
||||
'rng': ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=rng#rng%s', None),
|
||||
'put_text': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#puttext%s', None),
|
||||
'gaussian_blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur%s', None),
|
||||
'blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=blur#blur%s', None),
|
||||
'median_blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=medianblur#medianblur%s', None),
|
||||
'bilateral_filter': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=bilateralfilter#bilateralfilter%s', None),
|
||||
'erode': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=erode#erode%s', None),
|
||||
'dilate': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=dilate#dilate%s', None),
|
||||
'get_structuring_element': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=getstructuringelement#getstructuringelement%s', None),
|
||||
'flood_fill': ( 'http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=floodfill#floodfill%s', None),
|
||||
'morphology_ex': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=morphologyex#morphologyex%s', None),
|
||||
'pyr_down': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrdown#pyrdown%s', None),
|
||||
'pyr_up': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrup#pyrup%s', None),
|
||||
'resize': ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=resize#resize%s', None),
|
||||
'threshold': ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#threshold%s', None),
|
||||
'filter2d': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=filter2d#filter2d%s', None),
|
||||
'copy_make_border': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=copymakeborder#copymakeborder%s', None),
|
||||
'sobel': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=sobel#sobel%s', None),
|
||||
'scharr': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=scharr#scharr%s', None),
|
||||
'laplacian': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=laplacian#laplacian%s', None),
|
||||
'canny': ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=canny#canny%s', None),
|
||||
'copy_to': ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=copyto#mat-copyto%s', None),
|
||||
'cvt_color' : ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=cvtcolor#cvtcolor%s', None),
|
||||
'imread' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#imread%s', None),
|
||||
'imwrite' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imwrite#imwrite%s', None),
|
||||
'imshow' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=imshow#imshow%s', None),
|
||||
'named_window' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=namedwindow#namedwindow%s', None),
|
||||
'wait_key' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=waitkey#waitkey%s', None),
|
||||
'add_weighted' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=addweighted#addweighted%s', None),
|
||||
'saturate_cast' : ('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html?highlight=saturate_cast#saturate-cast%s', None),
|
||||
'mat_zeros' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=zeros#mat-zeros%s', None),
|
||||
'convert_to' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html#mat-convertto%s', None),
|
||||
'create_trackbar' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=createtrackbar#createtrackbar%s', None),
|
||||
'point' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html#point%s', None),
|
||||
'scalar' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html#scalar%s', None),
|
||||
'line' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#line%s', None),
|
||||
'ellipse' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#ellipse%s', None),
|
||||
'rectangle' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#rectangle%s', None),
|
||||
'circle' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#circle%s', None),
|
||||
'fill_poly' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#fillpoly%s', None),
|
||||
'rng' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=rng#rng%s', None),
|
||||
'put_text' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#puttext%s', None),
|
||||
'gaussian_blur' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur%s', None),
|
||||
'blur' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=blur#blur%s', None),
|
||||
'median_blur' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=medianblur#medianblur%s', None),
|
||||
'bilateral_filter' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=bilateralfilter#bilateralfilter%s', None),
|
||||
'erode' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=erode#erode%s', None),
|
||||
'dilate' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=dilate#dilate%s', None),
|
||||
'get_structuring_element' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=getstructuringelement#getstructuringelement%s', None),
|
||||
'flood_fill' : ( 'http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=floodfill#floodfill%s', None),
|
||||
'morphology_ex' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=morphologyex#morphologyex%s', None),
|
||||
'pyr_down' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrdown#pyrdown%s', None),
|
||||
'pyr_up' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrup#pyrup%s', None),
|
||||
'resize' : ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=resize#resize%s', None),
|
||||
'threshold' : ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#threshold%s', None),
|
||||
'filter2d' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=filter2d#filter2d%s', None),
|
||||
'copy_make_border' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=copymakeborder#copymakeborder%s', None),
|
||||
'sobel' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=sobel#sobel%s', None),
|
||||
'scharr' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=scharr#scharr%s', None),
|
||||
'laplacian' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=laplacian#laplacian%s', None),
|
||||
'canny' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=canny#canny%s', None),
|
||||
'copy_to' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=copyto#mat-copyto%s', None),
|
||||
'hough_lines' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghlines#houghlines%s', None),
|
||||
'hough_lines_p' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghlinesp#houghlinesp%s', None),
|
||||
'hough_circles' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghcircles#houghcircles%s', None),
|
||||
|
@ -7,45 +7,41 @@ Introduction to OpenCV-Python Tutorials
|
||||
OpenCV
|
||||
===============
|
||||
|
||||
OpenCV was started at Intel in 1999 by **Gary Bradsky** and the first release came out in 2000. **Vadim Pisarevsky** joined Gary Bradsky to manage Intel's Russian software OpenCV team. In 2005, OpenCV was used on Stanley, the vehicle who won 2005 DARPA Grand Challenge. Later its active development continued under the support of Willow Garage, with Gary Bradsky and Vadim Pisarevsky leading the project. Right now, OpenCV supports a lot of algorithms related to Computer Vision and Machine Learning and it is expanding day-by-day.
|
||||
OpenCV was started at Intel in 1999 by **Gary Bradsky**, and the first release came out in 2000. **Vadim Pisarevsky** joined Gary Bradsky to manage Intel's Russian software OpenCV team. In 2005, OpenCV was used on Stanley, the vehicle that won the 2005 DARPA Grand Challenge. Later, its active development continued under the support of Willow Garage with Gary Bradsky and Vadim Pisarevsky leading the project. OpenCV now supports a multitude of algorithms related to Computer Vision and Machine Learning and is expanding day by day.
|
||||
|
||||
Currently OpenCV supports a wide variety of programming languages like C++, Python, Java etc and is available on different platforms including Windows, Linux, OS X, Android, iOS etc. Also, interfaces based on CUDA and OpenCL are also under active development for high-speed GPU operations.
|
||||
OpenCV supports a wide variety of programming languages such as C++, Python, Java, etc., and is available on different platforms including Windows, Linux, OS X, Android, and iOS. Interfaces for high-speed GPU operations based on CUDA and OpenCL are also under active development.
|
||||
|
||||
OpenCV-Python is the Python API of OpenCV. It combines the best qualities of OpenCV C++ API and Python language.
|
||||
OpenCV-Python is the Python API for OpenCV, combining the best qualities of the OpenCV C++ API and the Python language.
|
||||
|
||||
|
||||
OpenCV-Python
|
||||
===============
|
||||
|
||||
Python is a general purpose programming language started by **Guido van Rossum**, which became very popular in short time mainly because of its simplicity and code readability. It enables the programmer to express his ideas in fewer lines of code without reducing any readability.
|
||||
OpenCV-Python is a library of Python bindings designed to solve computer vision problems.
|
||||
|
||||
Compared to other languages like C/C++, Python is slower. But another important feature of Python is that it can be easily extended with C/C++. This feature helps us to write computationally intensive codes in C/C++ and create a Python wrapper for it so that we can use these wrappers as Python modules. This gives us two advantages: first, our code is as fast as original C/C++ code (since it is the actual C++ code working in background) and second, it is very easy to code in Python. This is how OpenCV-Python works, it is a Python wrapper around original C++ implementation.
|
||||
Python is a general purpose programming language started by **Guido van Rossum** that became very popular very quickly, mainly because of its simplicity and code readability. It enables the programmer to express ideas in fewer lines of code without reducing readability.
|
||||
|
||||
And the support of Numpy makes the task more easier. **Numpy** is a highly optimized library for numerical operations. It gives a MATLAB-style syntax. All the OpenCV array structures are converted to-and-from Numpy arrays. So whatever operations you can do in Numpy, you can combine it with OpenCV, which increases number of weapons in your arsenal. Besides that, several other libraries like SciPy, Matplotlib which supports Numpy can be used with this.
|
||||
Compared to languages like C/C++, Python is slower. That said, Python can be easily extended with C/C++, which allows us to write computationally intensive code in C/C++ and create Python wrappers that can be used as Python modules. This gives us two advantages: first, the code is as fast as the original C/C++ code (since it is the actual C++ code working in background) and second, it easier to code in Python than C/C++. OpenCV-Python is a Python wrapper for the original OpenCV C++ implementation.
|
||||
|
||||
So OpenCV-Python is an appropriate tool for fast prototyping of computer vision problems.
|
||||
OpenCV-Python makes use of **Numpy**, which is a highly optimized library for numerical operations with a MATLAB-style syntax. All the OpenCV array structures are converted to and from Numpy arrays. This also makes it easier to integrate with other libraries that use Numpy such as SciPy and Matplotlib.
|
||||
|
||||
|
||||
OpenCV-Python Tutorials
|
||||
=============================
|
||||
|
||||
OpenCV introduces a new set of tutorials which will guide you through various functions available in OpenCV-Python. **This guide is mainly focused on OpenCV 3.x version** (although most of the tutorials will work with OpenCV 2.x also).
|
||||
OpenCV introduces a new set of tutorials which will guide you through various functions available in OpenCV-Python. **This guide is mainly focused on OpenCV 3.x version** (although most of the tutorials will also work with OpenCV 2.x).
|
||||
|
||||
A prior knowledge on Python and Numpy is required before starting because they won't be covered in this guide. **Especially, a good knowledge on Numpy is must to write optimized codes in OpenCV-Python.**
|
||||
Prior knowledge of Python and Numpy is recommended as they won't be covered in this guide. **Proficiency with Numpy is a must in order to write optimized code using OpenCV-Python.**
|
||||
|
||||
This tutorial has been started by *Abid Rahman K.* as part of Google Summer of Code 2013 program, under the guidance of *Alexander Mordvintsev*.
|
||||
This tutorial was originally started by *Abid Rahman K.* as part of the Google Summer of Code 2013 program under the guidance of *Alexander Mordvintsev*.
|
||||
|
||||
|
||||
OpenCV Needs You !!!
|
||||
==========================
|
||||
|
||||
Since OpenCV is an open source initiative, all are welcome to make contributions to this library. And it is same for this tutorial also.
|
||||
Since OpenCV is an open source initiative, all are welcome to make contributions to the library, documentation, and tutorials. If you find any mistake in this tutorial (from a small spelling mistake to an egregious error in code or concept), feel free to correct it by cloning OpenCV in `GitHub <https://github.com/Itseez/opencv>`_ and submitting a pull request. OpenCV developers will check your pull request, give you important feedback and (once it passes the approval of the reviewer) it will be merged into OpenCV. You will then become an open source contributor :-)
|
||||
|
||||
So, if you find any mistake in this tutorial (whether it be a small spelling mistake or a big error in code or concepts, whatever), feel free to correct it.
|
||||
|
||||
And that will be a good task for freshers who begin to contribute to open source projects. Just fork the OpenCV in github, make necessary corrections and send a pull request to OpenCV. OpenCV developers will check your pull request, give you important feedback and once it passes the approval of the reviewer, it will be merged to OpenCV. Then you become a open source contributor. Similar is the case with other tutorials, documentation etc.
|
||||
|
||||
As new modules are added to OpenCV-Python, this tutorial will have to be expanded. So those who knows about particular algorithm can write up a tutorial which includes a basic theory of the algorithm and a code showing basic usage of the algorithm and submit it to OpenCV.
|
||||
As new modules are added to OpenCV-Python, this tutorial will have to be expanded. If you are familiar with a particular algorithm and can write up a tutorial including basic theory of the algorithm and code showing example usage, please do so.
|
||||
|
||||
Remember, we **together** can make this project a great success !!!
|
||||
|
||||
|
@ -32,14 +32,14 @@ Here's a function that will do this:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
void Sharpen(const Mat& myImage,Mat& Result)
|
||||
void Sharpen(const Mat& myImage, Mat& Result)
|
||||
{
|
||||
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
|
||||
|
||||
Result.create(myImage.size(),myImage.type());
|
||||
Result.create(myImage.size(), myImage.type());
|
||||
const int nChannels = myImage.channels();
|
||||
|
||||
for(int j = 1 ; j < myImage.rows-1; ++j)
|
||||
for(int j = 1; j < myImage.rows - 1; ++j)
|
||||
{
|
||||
const uchar* previous = myImage.ptr<uchar>(j - 1);
|
||||
const uchar* current = myImage.ptr<uchar>(j );
|
||||
@ -47,17 +47,17 @@ Here's a function that will do this:
|
||||
|
||||
uchar* output = Result.ptr<uchar>(j);
|
||||
|
||||
for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
|
||||
for(int i = nChannels; i < nChannels * (myImage.cols - 1); ++i)
|
||||
{
|
||||
*output++ = saturate_cast<uchar>(5*current[i]
|
||||
-current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
|
||||
*output++ = saturate_cast<uchar>(5 * current[i]
|
||||
-current[i - nChannels] - current[i + nChannels] - previous[i] - next[i]);
|
||||
}
|
||||
}
|
||||
|
||||
Result.row(0).setTo(Scalar(0));
|
||||
Result.row(Result.rows-1).setTo(Scalar(0));
|
||||
Result.row(Result.rows - 1).setTo(Scalar(0));
|
||||
Result.col(0).setTo(Scalar(0));
|
||||
Result.col(Result.cols-1).setTo(Scalar(0));
|
||||
Result.col(Result.cols - 1).setTo(Scalar(0));
|
||||
}
|
||||
|
||||
At first we make sure that the input images data is in unsigned char format. For this we use the :utilitysystemfunctions:`CV_Assert <cv-assert>` function that throws an error when the expression inside it is false.
|
||||
@ -70,14 +70,14 @@ We create an output image with the same size and the same type as our input. As
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
Result.create(myImage.size(),myImage.type());
|
||||
Result.create(myImage.size(), myImage.type());
|
||||
const int nChannels = myImage.channels();
|
||||
|
||||
We'll use the plain C [] operator to access pixels. Because we need to access multiple rows at the same time we'll acquire the pointers for each of them (a previous, a current and a next line). We need another pointer to where we're going to save the calculation. Then simply access the right items with the [] operator. For moving the output pointer ahead we simply increase this (with one byte) after each operation:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
for(int j = 1 ; j < myImage.rows-1; ++j)
|
||||
for(int j = 1; j < myImage.rows - 1; ++j)
|
||||
{
|
||||
const uchar* previous = myImage.ptr<uchar>(j - 1);
|
||||
const uchar* current = myImage.ptr<uchar>(j );
|
||||
@ -85,21 +85,21 @@ We'll use the plain C [] operator to access pixels. Because we need to access mu
|
||||
|
||||
uchar* output = Result.ptr<uchar>(j);
|
||||
|
||||
for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
|
||||
for(int i = nChannels; i < nChannels * (myImage.cols - 1); ++i)
|
||||
{
|
||||
*output++ = saturate_cast<uchar>(5*current[i]
|
||||
-current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
|
||||
*output++ = saturate_cast<uchar>(5 * current[i]
|
||||
-current[i - nChannels] - current[i + nChannels] - previous[i] - next[i]);
|
||||
}
|
||||
}
|
||||
|
||||
On the borders of the image the upper notation results inexistent pixel locations (like minus one - minus one). In these points our formula is undefined. A simple solution is to not apply the mask in these points and, for example, set the pixels on the borders to zeros:
|
||||
On the borders of the image the upper notation results inexistent pixel locations (like minus one - minus one). In these points our formula is undefined. A simple solution is to not apply the kernel in these points and, for example, set the pixels on the borders to zeros:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
Result.row(0).setTo(Scalar(0)); // The top row
|
||||
Result.row(Result.rows-1).setTo(Scalar(0)); // The bottom row
|
||||
Result.col(0).setTo(Scalar(0)); // The left column
|
||||
Result.col(Result.cols-1).setTo(Scalar(0)); // The right column
|
||||
Result.row(0).setTo(Scalar(0)); // The top row
|
||||
Result.row(Result.rows - 1).setTo(Scalar(0)); // The bottom row
|
||||
Result.col(0).setTo(Scalar(0)); // The left column
|
||||
Result.col(Result.cols - 1).setTo(Scalar(0)); // The right column
|
||||
|
||||
The filter2D function
|
||||
=====================
|
||||
@ -116,7 +116,7 @@ Then call the :filtering:`filter2D <filter2d>` function specifying the input, th
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
filter2D(I, K, I.depth(), kern );
|
||||
filter2D(I, K, I.depth(), kern);
|
||||
|
||||
The function even has a fifth optional argument to specify the center of the kernel, and a sixth one for determining what to do in the regions where the operation is undefined (borders). Using this function has the advantage that it's shorter, less verbose and because there are some optimization techniques implemented it is usually faster than the *hand-coded method*. For example in my test while the second one took only 13 milliseconds the first took around 31 milliseconds. Quite some difference.
|
||||
|
||||
|
@ -45,7 +45,7 @@ All the above objects, in the end, point to the same single data matrix. Their h
|
||||
:linenos:
|
||||
|
||||
Mat D (A, Rect(10, 10, 100, 100) ); // using a rectangle
|
||||
Mat E = A(Range:all(), Range(1,3)); // using row and column boundaries
|
||||
Mat E = A(Range::all(), Range(1,3)); // using row and column boundaries
|
||||
|
||||
Now you may ask if the matrix itself may belong to multiple *Mat* objects who takes responsibility for cleaning it up when it's no longer needed. The short answer is: the last object that used it. This is handled by using a reference counting mechanism. Whenever somebody copies a header of a *Mat* object, a counter is increased for the matrix. Whenever a header is cleaned this counter is decreased. When the counter reaches zero the matrix too is freed. Sometimes you will want to copy the matrix itself too, so OpenCV provides the :basicstructures:`clone() <mat-clone>` and :basicstructures:`copyTo() <mat-copyto>` functions.
|
||||
|
||||
@ -86,7 +86,7 @@ Each of the building components has their own valid domains. This leads to the d
|
||||
Creating a *Mat* object explicitly
|
||||
==================================
|
||||
|
||||
In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() <imwrite>` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices.
|
||||
In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readwriteimagevideo:`imwrite() <imwrite>` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices.
|
||||
|
||||
Although *Mat* works really well as an image container, it is also a general matrix class. Therefore, it is possible to create and manipulate multidimensional matrices. You can create a Mat object in multiple ways:
|
||||
|
||||
|
@ -5,7 +5,7 @@ Load, Modify, and Save an Image
|
||||
|
||||
.. note::
|
||||
|
||||
We assume that by now you know how to load an image using :imread:`imread <>` and to display it in a window (using :imshow:`imshow <>`). Read the :ref:`Display_Image` tutorial otherwise.
|
||||
We assume that by now you know how to load an image using :readwriteimagevideo:`imread <imread>` and to display it in a window (using :user_interface:`imshow <imshow>`). Read the :ref:`Display_Image` tutorial otherwise.
|
||||
|
||||
Goals
|
||||
======
|
||||
@ -14,9 +14,9 @@ In this tutorial you will learn how to:
|
||||
|
||||
.. container:: enumeratevisibleitemswithsquare
|
||||
|
||||
* Load an image using :imread:`imread <>`
|
||||
* Transform an image from BGR to Grayscale format by using :cvt_color:`cvtColor <>`
|
||||
* Save your transformed image in a file on disk (using :imwrite:`imwrite <>`)
|
||||
* Load an image using :readwriteimagevideo:`imread <imread>`
|
||||
* Transform an image from BGR to Grayscale format by using :miscellaneous_transformations:`cvtColor <cvtcolor>`
|
||||
* Save your transformed image in a file on disk (using :readwriteimagevideo:`imwrite <imwrite>`)
|
||||
|
||||
Code
|
||||
======
|
||||
@ -62,10 +62,7 @@ Here it is:
|
||||
Explanation
|
||||
============
|
||||
|
||||
#. We begin by:
|
||||
|
||||
* Creating a Mat object to store the image information
|
||||
* Load an image using :imread:`imread <>`, located in the path given by *imageName*. Fort this example, assume you are loading a RGB image.
|
||||
#. We begin by loading an image using :readwriteimagevideo:`imread <imread>`, located in the path given by *imageName*. For this example, assume you are loading a RGB image.
|
||||
|
||||
#. Now we are going to convert our image from BGR to Grayscale format. OpenCV has a really nice function to do this kind of transformations:
|
||||
|
||||
@ -73,15 +70,15 @@ Explanation
|
||||
|
||||
cvtColor( image, gray_image, CV_BGR2GRAY );
|
||||
|
||||
As you can see, :cvt_color:`cvtColor <>` takes as arguments:
|
||||
As you can see, :miscellaneous_transformations:`cvtColor <cvtcolor>` takes as arguments:
|
||||
|
||||
.. container:: enumeratevisibleitemswithsquare
|
||||
|
||||
* a source image (*image*)
|
||||
* a destination image (*gray_image*), in which we will save the converted image.
|
||||
* an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :imread:`imread <>` has BGR default channel order in case of color images).
|
||||
* an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :readwriteimagevideo:`imread <imread>` has BGR default channel order in case of color images).
|
||||
|
||||
#. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :imread:`imread <>`: :imwrite:`imwrite <>`
|
||||
#. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :readwriteimagevideo:`imread <imread>`: :readwriteimagevideo:`imwrite <imwrite>`
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
|
@ -1387,7 +1387,7 @@ description rewritten using
|
||||
|
||||
IplImage* color_img = cvCreateImage(cvSize(320,240), IPL_DEPTH_8U, 3);
|
||||
IplImage gray_img_hdr, *gray_img;
|
||||
gray_img = (IplImage*)cvReshapeND(color_img, &gray_img_hdr, 1, 0, 0);
|
||||
gray_img = (IplImage*)cvReshapeMatND(color_img, sizeof(gray_img_hdr), &gray_img_hdr, 1, 0, 0);
|
||||
|
||||
...
|
||||
|
||||
@ -1395,6 +1395,18 @@ description rewritten using
|
||||
int size[] = { 2, 2, 2 };
|
||||
CvMatND* mat = cvCreateMatND(3, size, CV_32F);
|
||||
CvMat row_header, *row;
|
||||
row = (CvMat*)cvReshapeMatND(mat, sizeof(row_header), &row_header, 0, 1, 0);
|
||||
|
||||
..
|
||||
|
||||
In C, the header file for this function includes a convenient macro ``cvReshapeND`` that does away with the ``sizeof_header`` parameter. So, the lines containing the call to ``cvReshapeMatND`` in the examples may be replaced as follow:
|
||||
|
||||
::
|
||||
|
||||
gray_img = (IplImage*)cvReshapeND(color_img, &gray_img_hdr, 1, 0, 0);
|
||||
|
||||
...
|
||||
|
||||
row = (CvMat*)cvReshapeND(mat, &row_header, 0, 1, 0);
|
||||
|
||||
..
|
||||
|
@ -2618,53 +2618,37 @@ static bool ocl_compare(InputArray _src1, InputArray _src2, OutputArray _dst, in
|
||||
{
|
||||
const ocl::Device& dev = ocl::Device::getDefault();
|
||||
bool doubleSupport = dev.doubleFPConfig() > 0;
|
||||
int type1 = _src1.type(), depth1 = CV_MAT_DEPTH(type1), cn = CV_MAT_CN(type1);
|
||||
int type2 = _src2.type();
|
||||
|
||||
if (!haveScalar)
|
||||
{
|
||||
if ( (!doubleSupport && (depth1 == CV_64F || _src2.depth() == CV_64F)) ||
|
||||
!_src1.sameSize(_src2) || type1 != type2)
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (cn > 1 || depth1 <= CV_32S) // FIXIT: if (cn > 4): Need to clear CPU-based compare behavior
|
||||
return false;
|
||||
}
|
||||
int type1 = _src1.type(), depth1 = CV_MAT_DEPTH(type1), cn = CV_MAT_CN(type1),
|
||||
type2 = _src2.type(), depth2 = CV_MAT_DEPTH(type2);
|
||||
|
||||
if (!doubleSupport && depth1 == CV_64F)
|
||||
return false;
|
||||
|
||||
if (!haveScalar && (!_src1.sameSize(_src2) || type1 != type2))
|
||||
return false;
|
||||
|
||||
int kercn = haveScalar ? cn : ocl::predictOptimalVectorWidth(_src1, _src2, _dst);
|
||||
// Workaround for bug with "?:" operator in AMD OpenCL compiler
|
||||
bool workaroundForAMD = /*dev.isAMD() &&*/
|
||||
(
|
||||
(depth1 != CV_8U && depth1 != CV_8S)
|
||||
);
|
||||
if (workaroundForAMD)
|
||||
if (depth1 >= CV_16U)
|
||||
kercn = 1;
|
||||
|
||||
int scalarcn = kercn == 3 ? 4 : kercn;
|
||||
|
||||
const char * const operationMap[] = { "==", ">", ">=", "<", "<=", "!=" };
|
||||
char cvt[40];
|
||||
|
||||
String buildOptions = format(
|
||||
"-D %s -D srcT1=%s -D dstT=%s -D workT=srcT1 -D cn=%d"
|
||||
" -D convertToDT=%s -D OP_CMP -D CMP_OPERATOR=%s -D srcT1_C1=%s"
|
||||
" -D srcT2_C1=%s -D dstT_C1=%s -D workST=%s%s",
|
||||
(haveScalar ? "UNARY_OP" : "BINARY_OP"),
|
||||
ocl::typeToStr(CV_MAKE_TYPE(depth1, kercn)),
|
||||
ocl::typeToStr(CV_8UC(kercn)), kercn,
|
||||
ocl::convertTypeStr(depth1, CV_8U, kercn, cvt),
|
||||
operationMap[op],
|
||||
ocl::typeToStr(depth1), ocl::typeToStr(depth1), ocl::typeToStr(CV_8U),
|
||||
ocl::typeToStr(CV_MAKE_TYPE(depth1, scalarcn)),
|
||||
doubleSupport ? " -D DOUBLE_SUPPORT" : ""
|
||||
);
|
||||
String opts = format("-D %s -D srcT1=%s -D dstT=%s -D workT=srcT1 -D cn=%d"
|
||||
" -D convertToDT=%s -D OP_CMP -D CMP_OPERATOR=%s -D srcT1_C1=%s"
|
||||
" -D srcT2_C1=%s -D dstT_C1=%s -D workST=%s%s",
|
||||
haveScalar ? "UNARY_OP" : "BINARY_OP",
|
||||
ocl::typeToStr(CV_MAKE_TYPE(depth1, kercn)),
|
||||
ocl::typeToStr(CV_8UC(kercn)), kercn,
|
||||
ocl::convertTypeStr(depth1, CV_8U, kercn, cvt),
|
||||
operationMap[op], ocl::typeToStr(depth1),
|
||||
ocl::typeToStr(depth1), ocl::typeToStr(CV_8U),
|
||||
ocl::typeToStr(CV_MAKE_TYPE(depth1, scalarcn)),
|
||||
doubleSupport ? " -D DOUBLE_SUPPORT" : "");
|
||||
|
||||
ocl::Kernel k("KF", ocl::core::arithm_oclsrc, buildOptions);
|
||||
ocl::Kernel k("KF", ocl::core::arithm_oclsrc, opts);
|
||||
if (k.empty())
|
||||
return false;
|
||||
|
||||
@ -2675,24 +2659,43 @@ static bool ocl_compare(InputArray _src1, InputArray _src2, OutputArray _dst, in
|
||||
|
||||
if (haveScalar)
|
||||
{
|
||||
size_t esz = CV_ELEM_SIZE1(type1)*scalarcn;
|
||||
double buf[4]={0,0,0,0};
|
||||
Mat src2sc = _src2.getMat();
|
||||
size_t esz = CV_ELEM_SIZE1(type1) * scalarcn;
|
||||
double buf[4] = { 0, 0, 0, 0 };
|
||||
Mat src2 = _src2.getMat();
|
||||
|
||||
if (!src2sc.empty())
|
||||
convertAndUnrollScalar(src2sc, type1, (uchar*)buf, 1);
|
||||
if( depth1 > CV_32S )
|
||||
convertAndUnrollScalar( src2, depth1, (uchar *)buf, kercn );
|
||||
else
|
||||
{
|
||||
double fval = 0;
|
||||
getConvertFunc(depth2, CV_64F)(src2.data, 0, 0, 0, (uchar *)&fval, 0, Size(1, 1), 0);
|
||||
if( fval < getMinVal(depth1) )
|
||||
return dst.setTo(Scalar::all(op == CMP_GT || op == CMP_GE || op == CMP_NE ? 255 : 0)), true;
|
||||
|
||||
if( fval > getMaxVal(depth1) )
|
||||
return dst.setTo(Scalar::all(op == CMP_LT || op == CMP_LE || op == CMP_NE ? 255 : 0)), true;
|
||||
|
||||
int ival = cvRound(fval);
|
||||
if( fval != ival )
|
||||
{
|
||||
if( op == CMP_LT || op == CMP_GE )
|
||||
ival = cvCeil(fval);
|
||||
else if( op == CMP_LE || op == CMP_GT )
|
||||
ival = cvFloor(fval);
|
||||
else
|
||||
return dst.setTo(Scalar::all(op == CMP_NE ? 255 : 0)), true;
|
||||
}
|
||||
convertAndUnrollScalar(Mat(1, 1, CV_32S, &ival), depth1, (uchar *)buf, kercn);
|
||||
}
|
||||
|
||||
ocl::KernelArg scalararg = ocl::KernelArg(0, 0, 0, 0, buf, esz);
|
||||
|
||||
k.args(ocl::KernelArg::ReadOnlyNoSize(src1, cn, kercn),
|
||||
ocl::KernelArg::WriteOnly(dst, cn, kercn),
|
||||
scalararg);
|
||||
ocl::KernelArg::WriteOnly(dst, cn, kercn), scalararg);
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_DbgAssert(type1 == type2);
|
||||
UMat src2 = _src2.getUMat();
|
||||
CV_DbgAssert(size == src2.size());
|
||||
|
||||
k.args(ocl::KernelArg::ReadOnlyNoSize(src1),
|
||||
ocl::KernelArg::ReadOnlyNoSize(src2),
|
||||
|
@ -5486,11 +5486,27 @@ internal::WriteStructContext::WriteStructContext(FileStorage& _fs,
|
||||
{
|
||||
cvStartWriteStruct(**fs, !name.empty() ? name.c_str() : 0, flags,
|
||||
!typeName.empty() ? typeName.c_str() : 0);
|
||||
fs->elname = String();
|
||||
if ((flags & FileNode::TYPE_MASK) == FileNode::SEQ)
|
||||
{
|
||||
fs->state = FileStorage::VALUE_EXPECTED;
|
||||
fs->structs.push_back('[');
|
||||
}
|
||||
else
|
||||
{
|
||||
fs->state = FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP;
|
||||
fs->structs.push_back('{');
|
||||
}
|
||||
}
|
||||
|
||||
internal::WriteStructContext::~WriteStructContext()
|
||||
{
|
||||
cvEndWriteStruct(**fs);
|
||||
fs->structs.pop_back();
|
||||
fs->state = fs->structs.empty() || fs->structs.back() == '{' ?
|
||||
FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP :
|
||||
FileStorage::VALUE_EXPECTED;
|
||||
fs->elname = String();
|
||||
}
|
||||
|
||||
|
||||
|
@ -88,8 +88,10 @@ void UMatData::unlock()
|
||||
|
||||
MatAllocator* UMat::getStdAllocator()
|
||||
{
|
||||
#ifdef HAVE_OPENCL
|
||||
if( ocl::haveOpenCL() && ocl::useOpenCL() )
|
||||
return ocl::getOpenCLAllocator();
|
||||
#endif
|
||||
return Mat::getStdAllocator();
|
||||
}
|
||||
|
||||
@ -665,7 +667,7 @@ void UMat::copyTo(OutputArray _dst, InputArray _mask) const
|
||||
copyTo(_dst);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
int cn = channels(), mtype = _mask.type(), mdepth = CV_MAT_DEPTH(mtype), mcn = CV_MAT_CN(mtype);
|
||||
CV_Assert( mdepth == CV_8U && (mcn == 1 || mcn == cn) );
|
||||
|
||||
@ -692,7 +694,7 @@ void UMat::copyTo(OutputArray _dst, InputArray _mask) const
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
Mat src = getMat(ACCESS_READ);
|
||||
src.copyTo(_dst, _mask);
|
||||
}
|
||||
@ -713,7 +715,7 @@ void UMat::convertTo(OutputArray _dst, int _type, double alpha, double beta) con
|
||||
copyTo(_dst);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
|
||||
bool needDouble = sdepth == CV_64F || ddepth == CV_64F;
|
||||
if( dims <= 2 && cn && _dst.isUMat() && ocl::useOpenCL() &&
|
||||
@ -748,7 +750,7 @@ void UMat::convertTo(OutputArray _dst, int _type, double alpha, double beta) con
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
Mat m = getMat(ACCESS_READ);
|
||||
m.convertTo(_dst, _type, alpha, beta);
|
||||
}
|
||||
@ -756,7 +758,9 @@ void UMat::convertTo(OutputArray _dst, int _type, double alpha, double beta) con
|
||||
UMat& UMat::setTo(InputArray _value, InputArray _mask)
|
||||
{
|
||||
bool haveMask = !_mask.empty();
|
||||
#ifdef HAVE_OPENCL
|
||||
int tp = type(), cn = CV_MAT_CN(tp);
|
||||
|
||||
if( dims <= 2 && cn <= 4 && CV_MAT_DEPTH(tp) < CV_64F && ocl::useOpenCL() )
|
||||
{
|
||||
Mat value = _value.getMat();
|
||||
@ -795,6 +799,7 @@ UMat& UMat::setTo(InputArray _value, InputArray _mask)
|
||||
return *this;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
Mat m = getMat(haveMask ? ACCESS_RW : ACCESS_WRITE);
|
||||
m.setTo(_value, _mask);
|
||||
return *this;
|
||||
|
@ -380,6 +380,40 @@ TEST(Core_InputOutput, write_read_consistency) { Core_IOTest test; test.safe_run
|
||||
|
||||
extern void testFormatter();
|
||||
|
||||
|
||||
struct UserDefinedType
|
||||
{
|
||||
int a;
|
||||
float b;
|
||||
};
|
||||
|
||||
static inline bool operator==(const UserDefinedType &x,
|
||||
const UserDefinedType &y) {
|
||||
return (x.a == y.a) && (x.b == y.b);
|
||||
}
|
||||
|
||||
static inline void write(FileStorage &fs,
|
||||
const String&,
|
||||
const UserDefinedType &value)
|
||||
{
|
||||
fs << "{:" << "a" << value.a << "b" << value.b << "}";
|
||||
}
|
||||
|
||||
static inline void read(const FileNode& node,
|
||||
UserDefinedType& value,
|
||||
const UserDefinedType& default_value
|
||||
= UserDefinedType()) {
|
||||
if(node.empty())
|
||||
{
|
||||
value = default_value;
|
||||
}
|
||||
else
|
||||
{
|
||||
node["a"] >> value.a;
|
||||
node["b"] >> value.b;
|
||||
}
|
||||
}
|
||||
|
||||
class CV_MiscIOTest : public cvtest::BaseTest
|
||||
{
|
||||
public:
|
||||
@ -393,11 +427,14 @@ protected:
|
||||
string fname = cv::tempfile(".xml");
|
||||
vector<int> mi, mi2, mi3, mi4;
|
||||
vector<Mat> mv, mv2, mv3, mv4;
|
||||
vector<UserDefinedType> vudt, vudt2, vudt3, vudt4;
|
||||
Mat m(10, 9, CV_32F);
|
||||
Mat empty;
|
||||
UserDefinedType udt = { 8, 3.3f };
|
||||
randu(m, 0, 1);
|
||||
mi3.push_back(5);
|
||||
mv3.push_back(m);
|
||||
vudt3.push_back(udt);
|
||||
Point_<float> p1(1.1f, 2.2f), op1;
|
||||
Point3i p2(3, 4, 5), op2;
|
||||
Size s1(6, 7), os1;
|
||||
@ -412,6 +449,8 @@ protected:
|
||||
fs << "mv" << mv;
|
||||
fs << "mi3" << mi3;
|
||||
fs << "mv3" << mv3;
|
||||
fs << "vudt" << vudt;
|
||||
fs << "vudt3" << vudt3;
|
||||
fs << "empty" << empty;
|
||||
fs << "p1" << p1;
|
||||
fs << "p2" << p2;
|
||||
@ -428,6 +467,8 @@ protected:
|
||||
fs["mv"] >> mv2;
|
||||
fs["mi3"] >> mi4;
|
||||
fs["mv3"] >> mv4;
|
||||
fs["vudt"] >> vudt2;
|
||||
fs["vudt3"] >> vudt4;
|
||||
fs["empty"] >> empty;
|
||||
fs["p1"] >> op1;
|
||||
fs["p2"] >> op2;
|
||||
@ -442,6 +483,8 @@ protected:
|
||||
CV_Assert( norm(mi3, mi4, CV_C) == 0 );
|
||||
CV_Assert( mv4.size() == 1 );
|
||||
double n = norm(mv3[0], mv4[0], CV_C);
|
||||
CV_Assert( vudt2.empty() );
|
||||
CV_Assert( vudt3 == vudt4 );
|
||||
CV_Assert( n == 0 );
|
||||
CV_Assert( op1 == p1 );
|
||||
CV_Assert( op2 == p2 );
|
||||
|
@ -69,7 +69,7 @@ Computes the descriptors for a set of keypoints detected in an image (first vari
|
||||
|
||||
:param keypoints: Input collection of keypoints. Keypoints for which a descriptor cannot be computed are removed. Sometimes new keypoints can be added, for example: ``SIFT`` duplicates keypoint with several dominant orientations (for each orientation).
|
||||
|
||||
:param descriptors: Computed descriptors. In the second variant of the method ``descriptors[i]`` are descriptors computed for a ``keypoints[i]`. Row ``j`` is the ``keypoints`` (or ``keypoints[i]``) is the descriptor for keypoint ``j``-th keypoint.
|
||||
:param descriptors: Computed descriptors. In the second variant of the method ``descriptors[i]`` are descriptors computed for a ``keypoints[i]``. Row ``j`` is the ``keypoints`` (or ``keypoints[i]``) is the descriptor for keypoint ``j``-th keypoint.
|
||||
|
||||
|
||||
DescriptorExtractor::create
|
||||
|
@ -249,7 +249,7 @@ Brute-force matcher constructor.
|
||||
|
||||
:param normType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB, BRISK and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
|
||||
|
||||
:param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
|
||||
:param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMatcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
|
||||
|
||||
|
||||
FlannBasedMatcher
|
||||
|
@ -123,7 +123,7 @@ OCL_PERF_TEST_P(BruteForceMatcherFixture, RadiusMatch, ::testing::Combine(OCL_PE
|
||||
SANITY_CHECK_MATCHES(matches1, 1e-3);
|
||||
}
|
||||
|
||||
}//ocl
|
||||
}//cvtest
|
||||
} // ocl
|
||||
} // cvtest
|
||||
|
||||
#endif //HAVE_OPENCL
|
||||
#endif // HAVE_OPENCL
|
||||
|
50
modules/features2d/perf/opencl/perf_fast.cpp
Normal file
50
modules/features2d/perf/opencl/perf_fast.cpp
Normal file
@ -0,0 +1,50 @@
|
||||
#include "perf_precomp.hpp"
|
||||
#include "opencv2/ts/ocl_perf.hpp"
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
|
||||
namespace cvtest {
|
||||
namespace ocl {
|
||||
|
||||
enum { TYPE_5_8 =FastFeatureDetector::TYPE_5_8, TYPE_7_12 = FastFeatureDetector::TYPE_7_12, TYPE_9_16 = FastFeatureDetector::TYPE_9_16 };
|
||||
CV_ENUM(FastType, TYPE_5_8, TYPE_7_12)
|
||||
|
||||
typedef std::tr1::tuple<string, FastType> File_Type_t;
|
||||
typedef TestBaseWithParam<File_Type_t> FASTFixture;
|
||||
|
||||
#define FAST_IMAGES \
|
||||
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
|
||||
"stitching/a3.png"
|
||||
|
||||
OCL_PERF_TEST_P(FASTFixture, FastDetect, testing::Combine(
|
||||
testing::Values(FAST_IMAGES),
|
||||
FastType::all()
|
||||
))
|
||||
{
|
||||
string filename = getDataPath(get<0>(GetParam()));
|
||||
int type = get<1>(GetParam());
|
||||
Mat mframe = imread(filename, IMREAD_GRAYSCALE);
|
||||
|
||||
if (mframe.empty())
|
||||
FAIL() << "Unable to load source image " << filename;
|
||||
|
||||
UMat frame;
|
||||
mframe.copyTo(frame);
|
||||
declare.in(frame);
|
||||
|
||||
Ptr<FeatureDetector> fd = Algorithm::create<FeatureDetector>("Feature2D.FAST");
|
||||
ASSERT_FALSE( fd.empty() );
|
||||
fd->set("threshold", 20);
|
||||
fd->set("nonmaxSuppression", true);
|
||||
fd->set("type", type);
|
||||
vector<KeyPoint> points;
|
||||
|
||||
OCL_TEST_CYCLE() fd->detect(frame, points);
|
||||
|
||||
SANITY_CHECK_KEYPOINTS(points);
|
||||
}
|
||||
|
||||
} // ocl
|
||||
} // cvtest
|
||||
|
||||
#endif // HAVE_OPENCL
|
86
modules/features2d/perf/opencl/perf_orb.cpp
Normal file
86
modules/features2d/perf/opencl/perf_orb.cpp
Normal file
@ -0,0 +1,86 @@
|
||||
#include "perf_precomp.hpp"
|
||||
#include "opencv2/ts/ocl_perf.hpp"
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
|
||||
namespace cvtest {
|
||||
namespace ocl {
|
||||
|
||||
typedef ::perf::TestBaseWithParam<std::string> ORBFixture;
|
||||
|
||||
#define ORB_IMAGES OCL_PERF_ENUM("cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png", "stitching/a3.png")
|
||||
|
||||
OCL_PERF_TEST_P(ORBFixture, ORB_Detect, ORB_IMAGES)
|
||||
{
|
||||
string filename = getDataPath(GetParam());
|
||||
Mat mframe = imread(filename, IMREAD_GRAYSCALE);
|
||||
|
||||
if (mframe.empty())
|
||||
FAIL() << "Unable to load source image " << filename;
|
||||
|
||||
UMat frame, mask;
|
||||
mframe.copyTo(frame);
|
||||
|
||||
declare.in(frame);
|
||||
ORB detector(1500, 1.3f, 1);
|
||||
vector<KeyPoint> points;
|
||||
|
||||
OCL_TEST_CYCLE() detector(frame, mask, points);
|
||||
|
||||
std::sort(points.begin(), points.end(), comparators::KeypointGreater());
|
||||
SANITY_CHECK_KEYPOINTS(points, 1e-5);
|
||||
}
|
||||
|
||||
OCL_PERF_TEST_P(ORBFixture, ORB_Extract, ORB_IMAGES)
|
||||
{
|
||||
string filename = getDataPath(GetParam());
|
||||
Mat mframe = imread(filename, IMREAD_GRAYSCALE);
|
||||
|
||||
if (mframe.empty())
|
||||
FAIL() << "Unable to load source image " << filename;
|
||||
|
||||
UMat mask, frame;
|
||||
mframe.copyTo(frame);
|
||||
|
||||
declare.in(frame);
|
||||
|
||||
ORB detector(1500, 1.3f, 1);
|
||||
vector<KeyPoint> points;
|
||||
detector(frame, mask, points);
|
||||
std::sort(points.begin(), points.end(), comparators::KeypointGreater());
|
||||
|
||||
UMat descriptors;
|
||||
|
||||
OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, true);
|
||||
|
||||
SANITY_CHECK(descriptors);
|
||||
}
|
||||
|
||||
OCL_PERF_TEST_P(ORBFixture, ORB_Full, ORB_IMAGES)
|
||||
{
|
||||
string filename = getDataPath(GetParam());
|
||||
Mat mframe = imread(filename, IMREAD_GRAYSCALE);
|
||||
|
||||
if (mframe.empty())
|
||||
FAIL() << "Unable to load source image " << filename;
|
||||
|
||||
UMat mask, frame;
|
||||
mframe.copyTo(frame);
|
||||
|
||||
declare.in(frame);
|
||||
ORB detector(1500, 1.3f, 1);
|
||||
|
||||
vector<KeyPoint> points;
|
||||
UMat descriptors;
|
||||
|
||||
OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, false);
|
||||
|
||||
::perf::sort(points, descriptors);
|
||||
SANITY_CHECK_KEYPOINTS(points, 1e-5);
|
||||
SANITY_CHECK(descriptors);
|
||||
}
|
||||
|
||||
} // ocl
|
||||
} // cvtest
|
||||
|
||||
#endif // HAVE_OPENCL
|
@ -217,6 +217,7 @@ enum { IMREAD_UNCHANGED = -1, // 8bit, color or not
|
||||
|
||||
enum { IMWRITE_JPEG_QUALITY = 1,
|
||||
IMWRITE_JPEG_PROGRESSIVE = 2,
|
||||
IMWRITE_JPEG_OPTIMIZE = 3,
|
||||
IMWRITE_PNG_COMPRESSION = 16,
|
||||
IMWRITE_PNG_STRATEGY = 17,
|
||||
IMWRITE_PNG_BILEVEL = 18,
|
||||
|
@ -221,6 +221,7 @@ enum
|
||||
{
|
||||
CV_IMWRITE_JPEG_QUALITY =1,
|
||||
CV_IMWRITE_JPEG_PROGRESSIVE =2,
|
||||
CV_IMWRITE_JPEG_OPTIMIZE =3,
|
||||
CV_IMWRITE_PNG_COMPRESSION =16,
|
||||
CV_IMWRITE_PNG_STRATEGY =17,
|
||||
CV_IMWRITE_PNG_BILEVEL =18,
|
||||
|
@ -1309,6 +1309,8 @@ bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
|
||||
}
|
||||
|
||||
//cleanup
|
||||
CFRelease(cfData);
|
||||
CVPixelBufferRelease(pixelBuffer);
|
||||
CGImageRelease(cgImage);
|
||||
CGDataProviderRelease(provider);
|
||||
CGColorSpaceRelease(colorSpace);
|
||||
|
@ -599,6 +599,7 @@ bool JpegEncoder::write( const Mat& img, const std::vector<int>& params )
|
||||
|
||||
int quality = 95;
|
||||
int progressive = 0;
|
||||
int optimize = 0;
|
||||
|
||||
for( size_t i = 0; i < params.size(); i += 2 )
|
||||
{
|
||||
@ -612,6 +613,11 @@ bool JpegEncoder::write( const Mat& img, const std::vector<int>& params )
|
||||
{
|
||||
progressive = params[i+1];
|
||||
}
|
||||
|
||||
if( params[i] == CV_IMWRITE_JPEG_OPTIMIZE )
|
||||
{
|
||||
optimize = params[i+1];
|
||||
}
|
||||
}
|
||||
|
||||
jpeg_set_defaults( &cinfo );
|
||||
@ -619,6 +625,8 @@ bool JpegEncoder::write( const Mat& img, const std::vector<int>& params )
|
||||
TRUE /* limit to baseline-JPEG values */ );
|
||||
if( progressive )
|
||||
jpeg_simple_progression( &cinfo );
|
||||
if( optimize )
|
||||
cinfo.optimize_coding = TRUE;
|
||||
jpeg_start_compress( &cinfo, TRUE );
|
||||
|
||||
if( channels > 1 )
|
||||
|
@ -410,6 +410,30 @@ TEST(Highgui_Jpeg, encode_decode_progressive_jpeg)
|
||||
|
||||
remove(output_progressive.c_str());
|
||||
}
|
||||
|
||||
TEST(Highgui_Jpeg, encode_decode_optimize_jpeg)
|
||||
{
|
||||
cvtest::TS& ts = *cvtest::TS::ptr();
|
||||
string input = string(ts.get_data_path()) + "../cv/shared/lena.png";
|
||||
cv::Mat img = cv::imread(input);
|
||||
ASSERT_FALSE(img.empty());
|
||||
|
||||
std::vector<int> params;
|
||||
params.push_back(IMWRITE_JPEG_OPTIMIZE);
|
||||
params.push_back(1);
|
||||
|
||||
string output_optimized = cv::tempfile(".jpg");
|
||||
EXPECT_NO_THROW(cv::imwrite(output_optimized, img, params));
|
||||
cv::Mat img_jpg_optimized = cv::imread(output_optimized);
|
||||
|
||||
string output_normal = cv::tempfile(".jpg");
|
||||
EXPECT_NO_THROW(cv::imwrite(output_normal, img));
|
||||
cv::Mat img_jpg_normal = cv::imread(output_normal);
|
||||
|
||||
EXPECT_EQ(0, cv::norm(img_jpg_optimized, img_jpg_normal, NORM_INF));
|
||||
|
||||
remove(output_optimized.c_str());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2014, Itseez, Inc, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
@ -40,6 +41,8 @@
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencl_kernels.hpp"
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
static IppStatus sts = ippInit();
|
||||
#endif
|
||||
@ -495,6 +498,58 @@ void cv::Scharr( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
|
||||
sepFilter2D( _src, _dst, ddepth, kx, ky, Point(-1, -1), delta, borderType );
|
||||
}
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
|
||||
namespace cv {
|
||||
|
||||
static bool ocl_Laplacian5(InputArray _src, OutputArray _dst,
|
||||
const Mat & kd, const Mat & ks, double scale, double delta,
|
||||
int borderType, int depth, int ddepth)
|
||||
{
|
||||
int iscale = cvRound(scale), idelta = cvRound(delta);
|
||||
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0,
|
||||
floatCoeff = std::fabs(delta - idelta) > DBL_EPSILON || std::fabs(scale - iscale) > DBL_EPSILON;
|
||||
int cn = _src.channels(), wdepth = std::max(depth, floatCoeff ? CV_32F : CV_32S), kercn = 1;
|
||||
|
||||
if (!doubleSupport && wdepth == CV_64F)
|
||||
return false;
|
||||
|
||||
char cvt[2][40];
|
||||
ocl::Kernel k("sumConvert", ocl::imgproc::laplacian5_oclsrc,
|
||||
format("-D srcT=%s -D WT=%s -D dstT=%s -D coeffT=%s -D wdepth=%d "
|
||||
"-D convertToWT=%s -D convertToDT=%s%s",
|
||||
ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)),
|
||||
ocl::typeToStr(CV_MAKE_TYPE(wdepth, kercn)),
|
||||
ocl::typeToStr(CV_MAKE_TYPE(ddepth, kercn)),
|
||||
ocl::typeToStr(wdepth), wdepth,
|
||||
ocl::convertTypeStr(depth, wdepth, kercn, cvt[0]),
|
||||
ocl::convertTypeStr(wdepth, ddepth, kercn, cvt[1]),
|
||||
doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
|
||||
if (k.empty())
|
||||
return false;
|
||||
|
||||
UMat d2x, d2y;
|
||||
sepFilter2D(_src, d2x, depth, kd, ks, Point(-1, -1), 0, borderType);
|
||||
sepFilter2D(_src, d2y, depth, ks, kd, Point(-1, -1), 0, borderType);
|
||||
|
||||
UMat dst = _dst.getUMat();
|
||||
|
||||
ocl::KernelArg d2xarg = ocl::KernelArg::ReadOnlyNoSize(d2x),
|
||||
d2yarg = ocl::KernelArg::ReadOnlyNoSize(d2y),
|
||||
dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);
|
||||
|
||||
if (wdepth >= CV_32F)
|
||||
k.args(d2xarg, d2yarg, dstarg, (float)scale, (float)delta);
|
||||
else
|
||||
k.args(d2xarg, d2yarg, dstarg, iscale, idelta);
|
||||
|
||||
size_t globalsize[] = { dst.cols * cn / kercn, dst.rows };
|
||||
return k.run(2, globalsize, NULL, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize,
|
||||
double scale, double delta, int borderType )
|
||||
@ -531,27 +586,28 @@ void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize,
|
||||
}
|
||||
else
|
||||
{
|
||||
Mat src = _src.getMat(), dst = _dst.getMat();
|
||||
const size_t STRIPE_SIZE = 1 << 14;
|
||||
|
||||
int depth = src.depth();
|
||||
int ktype = std::max(CV_32F, std::max(ddepth, depth));
|
||||
int wdepth = depth == CV_8U && ksize <= 5 ? CV_16S : depth <= CV_32F ? CV_32F : CV_64F;
|
||||
int wtype = CV_MAKETYPE(wdepth, src.channels());
|
||||
int ktype = std::max(CV_32F, std::max(ddepth, sdepth));
|
||||
int wdepth = sdepth == CV_8U && ksize <= 5 ? CV_16S : sdepth <= CV_32F ? CV_32F : CV_64F;
|
||||
int wtype = CV_MAKETYPE(wdepth, cn);
|
||||
Mat kd, ks;
|
||||
getSobelKernels( kd, ks, 2, 0, ksize, false, ktype );
|
||||
int dtype = CV_MAKETYPE(ddepth, src.channels());
|
||||
|
||||
int dy0 = std::min(std::max((int)(STRIPE_SIZE/(getElemSize(src.type())*src.cols)), 1), src.rows);
|
||||
Ptr<FilterEngine> fx = createSeparableLinearFilter(src.type(),
|
||||
CV_OCL_RUN(_dst.isUMat(),
|
||||
ocl_Laplacian5(_src, _dst, kd, ks, scale,
|
||||
delta, borderType, wdepth, ddepth))
|
||||
|
||||
const size_t STRIPE_SIZE = 1 << 14;
|
||||
Ptr<FilterEngine> fx = createSeparableLinearFilter(stype,
|
||||
wtype, kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() );
|
||||
Ptr<FilterEngine> fy = createSeparableLinearFilter(src.type(),
|
||||
Ptr<FilterEngine> fy = createSeparableLinearFilter(stype,
|
||||
wtype, ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() );
|
||||
|
||||
Mat src = _src.getMat(), dst = _dst.getMat();
|
||||
int y = fx->start(src), dsty = 0, dy = 0;
|
||||
fy->start(src);
|
||||
const uchar* sptr = src.data + y*src.step;
|
||||
|
||||
int dy0 = std::min(std::max((int)(STRIPE_SIZE/(CV_ELEM_SIZE(stype)*src.cols)), 1), src.rows);
|
||||
Mat d2x( dy0 + kd.rows - 1, src.cols, wtype );
|
||||
Mat d2y( dy0 + kd.rows - 1, src.cols, wtype );
|
||||
|
||||
@ -564,7 +620,7 @@ void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize,
|
||||
Mat dstripe = dst.rowRange(dsty, dsty + dy);
|
||||
d2x.rows = d2y.rows = dy; // modify the headers, which should work
|
||||
d2x += d2y;
|
||||
d2x.convertTo( dstripe, dtype, scale, delta );
|
||||
d2x.convertTo( dstripe, ddepth, scale, delta );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -164,6 +164,12 @@ static bool ocl_goodFeaturesToTrack( InputArray _image, OutputArray _corners,
|
||||
return false;
|
||||
|
||||
total = std::min<size_t>(counter.getMat(ACCESS_READ).at<int>(0, 0), possibleCornersCount);
|
||||
if (total == 0)
|
||||
{
|
||||
_corners.release();
|
||||
return true;
|
||||
}
|
||||
|
||||
tmpCorners.resize(total);
|
||||
|
||||
Mat mcorners(1, (int)total, CV_32FC2, &tmpCorners[0]);
|
||||
|
@ -1917,71 +1917,73 @@ class IPPresizeInvoker :
|
||||
public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
IPPresizeInvoker(Mat &_src, Mat &_dst, double _inv_scale_x, double _inv_scale_y, int _mode, bool *_ok) :
|
||||
ParallelLoopBody(), src(_src), dst(_dst), inv_scale_x(_inv_scale_x), inv_scale_y(_inv_scale_y), mode(_mode), ok(_ok)
|
||||
{
|
||||
*ok = true;
|
||||
IppiSize srcSize, dstSize;
|
||||
int type = src.type();
|
||||
int specSize = 0, initSize = 0;
|
||||
srcSize.width = src.cols;
|
||||
srcSize.height = src.rows;
|
||||
dstSize.width = dst.cols;
|
||||
dstSize.height = dst.rows;
|
||||
IPPresizeInvoker(const Mat & _src, Mat & _dst, double _inv_scale_x, double _inv_scale_y, int _mode, bool *_ok) :
|
||||
ParallelLoopBody(), src(_src), dst(_dst), inv_scale_x(_inv_scale_x), inv_scale_y(_inv_scale_y), mode(_mode), ok(_ok)
|
||||
{
|
||||
*ok = true;
|
||||
IppiSize srcSize, dstSize;
|
||||
int type = src.type();
|
||||
int specSize = 0, initSize = 0;
|
||||
srcSize.width = src.cols;
|
||||
srcSize.height = src.rows;
|
||||
dstSize.width = dst.cols;
|
||||
dstSize.height = dst.rows;
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case CV_8UC1: SET_IPP_RESIZE_PTR(8u,C1); break;
|
||||
case CV_8UC3: SET_IPP_RESIZE_PTR(8u,C3); break;
|
||||
case CV_8UC4: SET_IPP_RESIZE_PTR(8u,C4); break;
|
||||
case CV_16UC1: SET_IPP_RESIZE_PTR(16u,C1); break;
|
||||
case CV_16UC3: SET_IPP_RESIZE_PTR(16u,C3); break;
|
||||
case CV_16UC4: SET_IPP_RESIZE_PTR(16u,C4); break;
|
||||
case CV_16SC1: SET_IPP_RESIZE_PTR(16s,C1); break;
|
||||
case CV_16SC3: SET_IPP_RESIZE_PTR(16s,C3); break;
|
||||
case CV_16SC4: SET_IPP_RESIZE_PTR(16s,C4); break;
|
||||
case CV_32FC1: SET_IPP_RESIZE_PTR(32f,C1); break;
|
||||
case CV_32FC3: SET_IPP_RESIZE_PTR(32f,C3); break;
|
||||
case CV_32FC4: SET_IPP_RESIZE_PTR(32f,C4); break;
|
||||
case CV_64FC1: SET_IPP_RESIZE_LINEAR_FUNC_64_PTR(64f,C1); break;
|
||||
case CV_64FC3: SET_IPP_RESIZE_LINEAR_FUNC_64_PTR(64f,C3); break;
|
||||
case CV_64FC4: SET_IPP_RESIZE_LINEAR_FUNC_64_PTR(64f,C4); break;
|
||||
default: { *ok = false; return;} break;
|
||||
}
|
||||
}
|
||||
switch (type)
|
||||
{
|
||||
case CV_8UC1: SET_IPP_RESIZE_PTR(8u,C1); break;
|
||||
case CV_8UC3: SET_IPP_RESIZE_PTR(8u,C3); break;
|
||||
case CV_8UC4: SET_IPP_RESIZE_PTR(8u,C4); break;
|
||||
case CV_16UC1: SET_IPP_RESIZE_PTR(16u,C1); break;
|
||||
case CV_16UC3: SET_IPP_RESIZE_PTR(16u,C3); break;
|
||||
case CV_16UC4: SET_IPP_RESIZE_PTR(16u,C4); break;
|
||||
case CV_16SC1: SET_IPP_RESIZE_PTR(16s,C1); break;
|
||||
case CV_16SC3: SET_IPP_RESIZE_PTR(16s,C3); break;
|
||||
case CV_16SC4: SET_IPP_RESIZE_PTR(16s,C4); break;
|
||||
case CV_32FC1: SET_IPP_RESIZE_PTR(32f,C1); break;
|
||||
case CV_32FC3: SET_IPP_RESIZE_PTR(32f,C3); break;
|
||||
case CV_32FC4: SET_IPP_RESIZE_PTR(32f,C4); break;
|
||||
case CV_64FC1: SET_IPP_RESIZE_LINEAR_FUNC_64_PTR(64f,C1); break;
|
||||
case CV_64FC3: SET_IPP_RESIZE_LINEAR_FUNC_64_PTR(64f,C3); break;
|
||||
case CV_64FC4: SET_IPP_RESIZE_LINEAR_FUNC_64_PTR(64f,C4); break;
|
||||
default: { *ok = false; return; } break;
|
||||
}
|
||||
}
|
||||
|
||||
~IPPresizeInvoker()
|
||||
{
|
||||
}
|
||||
~IPPresizeInvoker()
|
||||
{
|
||||
}
|
||||
|
||||
virtual void operator() (const Range& range) const
|
||||
{
|
||||
if (*ok == false) return;
|
||||
virtual void operator() (const Range& range) const
|
||||
{
|
||||
if (*ok == false)
|
||||
return;
|
||||
|
||||
int cn = src.channels();
|
||||
int dsty = min(cvRound(range.start * inv_scale_y), dst.rows);
|
||||
int dstwidth = min(cvRound(src.cols * inv_scale_x), dst.cols);
|
||||
int dstheight = min(cvRound(range.end * inv_scale_y), dst.rows);
|
||||
int cn = src.channels();
|
||||
int dsty = min(cvRound(range.start * inv_scale_y), dst.rows);
|
||||
int dstwidth = min(cvRound(src.cols * inv_scale_x), dst.cols);
|
||||
int dstheight = min(cvRound(range.end * inv_scale_y), dst.rows);
|
||||
|
||||
IppiPoint dstOffset = { 0, dsty }, srcOffset = {0, 0};
|
||||
IppiSize dstSize = { dstwidth, dstheight - dsty };
|
||||
int bufsize = 0, itemSize = (int)src.elemSize1();
|
||||
IppiPoint dstOffset = { 0, dsty }, srcOffset = {0, 0};
|
||||
IppiSize dstSize = { dstwidth, dstheight - dsty };
|
||||
int bufsize = 0, itemSize = (int)src.elemSize1();
|
||||
|
||||
CHECK_IPP_STATUS(getBufferSizeFunc(pSpec, dstSize, cn, &bufsize));
|
||||
CHECK_IPP_STATUS(getSrcOffsetFunc(pSpec, dstOffset, &srcOffset));
|
||||
CHECK_IPP_STATUS(getBufferSizeFunc(pSpec, dstSize, cn, &bufsize));
|
||||
CHECK_IPP_STATUS(getSrcOffsetFunc(pSpec, dstOffset, &srcOffset));
|
||||
|
||||
Ipp8u* pSrc = (Ipp8u*)src.data + (int)src.step[0] * srcOffset.y + srcOffset.x * cn * itemSize;
|
||||
Ipp8u* pDst = (Ipp8u*)dst.data + (int)dst.step[0] * dstOffset.y + dstOffset.x * cn * itemSize;
|
||||
Ipp8u* pSrc = (Ipp8u*)src.data + (int)src.step[0] * srcOffset.y + srcOffset.x * cn * itemSize;
|
||||
Ipp8u* pDst = (Ipp8u*)dst.data + (int)dst.step[0] * dstOffset.y + dstOffset.x * cn * itemSize;
|
||||
|
||||
AutoBuffer<uchar> buf(bufsize + 64);
|
||||
uchar* bufptr = alignPtr((uchar*)buf, 32);
|
||||
AutoBuffer<uchar> buf(bufsize + 64);
|
||||
uchar* bufptr = alignPtr((uchar*)buf, 32);
|
||||
|
||||
if( func( pSrc, (int)src.step[0], pDst, (int)dst.step[0], dstOffset, dstSize, ippBorderRepl, 0, pSpec, bufptr ) < 0 )
|
||||
*ok = false;
|
||||
}
|
||||
|
||||
if( func( pSrc, (int)src.step[0], pDst, (int)dst.step[0], dstOffset, dstSize, ippBorderRepl, 0, pSpec, bufptr ) < 0 )
|
||||
*ok = false;
|
||||
}
|
||||
private:
|
||||
Mat &src;
|
||||
Mat &dst;
|
||||
Mat & src;
|
||||
Mat & dst;
|
||||
double inv_scale_x;
|
||||
double inv_scale_y;
|
||||
void *pSpec;
|
||||
@ -1993,12 +1995,13 @@ private:
|
||||
bool *ok;
|
||||
const IPPresizeInvoker& operator= (const IPPresizeInvoker&);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
|
||||
static void ocl_computeResizeAreaTabs(int ssize, int dsize, double scale, int * const map_tab,
|
||||
float * const alpha_tab, int * const ofs_tab)
|
||||
float * const alpha_tab, int * const ofs_tab)
|
||||
{
|
||||
int k = 0, dx = 0;
|
||||
for ( ; dx < dsize; dx++)
|
||||
@ -2049,8 +2052,16 @@ static bool ocl_resize( InputArray _src, OutputArray _dst, Size dsize,
|
||||
{
|
||||
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
|
||||
|
||||
double inv_fx = 1. / fx, inv_fy = 1. / fy;
|
||||
double inv_fx = 1.0 / fx, inv_fy = 1.0 / fy;
|
||||
float inv_fxf = (float)inv_fx, inv_fyf = (float)inv_fy;
|
||||
int iscale_x = saturate_cast<int>(inv_fx), iscale_y = saturate_cast<int>(inv_fx);
|
||||
bool is_area_fast = std::abs(inv_fx - iscale_x) < DBL_EPSILON &&
|
||||
std::abs(inv_fy - iscale_y) < DBL_EPSILON;
|
||||
|
||||
// in case of scale_x && scale_y is equal to 2
|
||||
// INTER_AREA (fast) also is equal to INTER_LINEAR
|
||||
if( interpolation == INTER_LINEAR && is_area_fast && iscale_x == 2 && iscale_y == 2 )
|
||||
/*interpolation = INTER_AREA*/(void)0; // INTER_AREA is slower
|
||||
|
||||
if( !(cn <= 4 &&
|
||||
(interpolation == INTER_NEAREST || interpolation == INTER_LINEAR ||
|
||||
@ -2061,39 +2072,105 @@ static bool ocl_resize( InputArray _src, OutputArray _dst, Size dsize,
|
||||
_dst.create(dsize, type);
|
||||
UMat dst = _dst.getUMat();
|
||||
|
||||
Size ssize = src.size();
|
||||
ocl::Kernel k;
|
||||
size_t globalsize[] = { dst.cols, dst.rows };
|
||||
|
||||
if (interpolation == INTER_LINEAR)
|
||||
{
|
||||
int wdepth = std::max(depth, CV_32S);
|
||||
int wtype = CV_MAKETYPE(wdepth, cn);
|
||||
char buf[2][32];
|
||||
k.create("resizeLN", ocl::imgproc::resize_oclsrc,
|
||||
format("-D INTER_LINEAR -D depth=%d -D PIXTYPE=%s -D PIXTYPE1=%s "
|
||||
"-D WORKTYPE=%s -D convertToWT=%s -D convertToDT=%s -D cn=%d",
|
||||
depth, ocl::typeToStr(type), ocl::typeToStr(depth), ocl::typeToStr(wtype),
|
||||
ocl::convertTypeStr(depth, wdepth, cn, buf[0]),
|
||||
ocl::convertTypeStr(wdepth, depth, cn, buf[1]),
|
||||
cn));
|
||||
|
||||
// integer path is slower because of CPU part, so it's disabled
|
||||
if (depth == CV_8U && ((void)0, 0))
|
||||
{
|
||||
AutoBuffer<uchar> _buffer((dsize.width + dsize.height)*(sizeof(int) + sizeof(short)*2));
|
||||
int* xofs = (int*)(uchar*)_buffer, * yofs = xofs + dsize.width;
|
||||
short* ialpha = (short*)(yofs + dsize.height), * ibeta = ialpha + dsize.width*2;
|
||||
float fxx, fyy;
|
||||
int sx, sy;
|
||||
|
||||
for (int dx = 0; dx < dsize.width; dx++)
|
||||
{
|
||||
fxx = (float)((dx+0.5)*inv_fx - 0.5);
|
||||
sx = cvFloor(fxx);
|
||||
fxx -= sx;
|
||||
|
||||
if (sx < 0)
|
||||
fxx = 0, sx = 0;
|
||||
|
||||
if (sx >= ssize.width-1)
|
||||
fxx = 0, sx = ssize.width-1;
|
||||
|
||||
xofs[dx] = sx;
|
||||
ialpha[dx*2 + 0] = saturate_cast<short>((1.f - fxx) * INTER_RESIZE_COEF_SCALE);
|
||||
ialpha[dx*2 + 1] = saturate_cast<short>(fxx * INTER_RESIZE_COEF_SCALE);
|
||||
}
|
||||
|
||||
for (int dy = 0; dy < dsize.height; dy++)
|
||||
{
|
||||
fyy = (float)((dy+0.5)*inv_fy - 0.5);
|
||||
sy = cvFloor(fyy);
|
||||
fyy -= sy;
|
||||
|
||||
yofs[dy] = sy;
|
||||
ibeta[dy*2 + 0] = saturate_cast<short>((1.f - fyy) * INTER_RESIZE_COEF_SCALE);
|
||||
ibeta[dy*2 + 1] = saturate_cast<short>(fyy * INTER_RESIZE_COEF_SCALE);
|
||||
}
|
||||
|
||||
int wdepth = std::max(depth, CV_32S), wtype = CV_MAKETYPE(wdepth, cn);
|
||||
UMat coeffs;
|
||||
Mat(1, static_cast<int>(_buffer.size()), CV_8UC1, (uchar *)_buffer).copyTo(coeffs);
|
||||
|
||||
k.create("resizeLN", ocl::imgproc::resize_oclsrc,
|
||||
format("-D INTER_LINEAR_INTEGER -D depth=%d -D T=%s -D T1=%s "
|
||||
"-D WT=%s -D convertToWT=%s -D convertToDT=%s -D cn=%d "
|
||||
"-D INTER_RESIZE_COEF_BITS=%d",
|
||||
depth, ocl::typeToStr(type), ocl::typeToStr(depth), ocl::typeToStr(wtype),
|
||||
ocl::convertTypeStr(depth, wdepth, cn, buf[0]),
|
||||
ocl::convertTypeStr(wdepth, depth, cn, buf[1]),
|
||||
cn, INTER_RESIZE_COEF_BITS));
|
||||
if (k.empty())
|
||||
return false;
|
||||
|
||||
k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
|
||||
ocl::KernelArg::PtrReadOnly(coeffs));
|
||||
}
|
||||
else
|
||||
{
|
||||
int wdepth = std::max(depth, CV_32S), wtype = CV_MAKETYPE(wdepth, cn);
|
||||
k.create("resizeLN", ocl::imgproc::resize_oclsrc,
|
||||
format("-D INTER_LINEAR -D depth=%d -D T=%s -D T1=%s "
|
||||
"-D WT=%s -D convertToWT=%s -D convertToDT=%s -D cn=%d "
|
||||
"-D INTER_RESIZE_COEF_BITS=%d",
|
||||
depth, ocl::typeToStr(type), ocl::typeToStr(depth), ocl::typeToStr(wtype),
|
||||
ocl::convertTypeStr(depth, wdepth, cn, buf[0]),
|
||||
ocl::convertTypeStr(wdepth, depth, cn, buf[1]),
|
||||
cn, INTER_RESIZE_COEF_BITS));
|
||||
if (k.empty())
|
||||
return false;
|
||||
|
||||
k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
|
||||
(float)inv_fx, (float)inv_fy);
|
||||
}
|
||||
}
|
||||
else if (interpolation == INTER_NEAREST)
|
||||
{
|
||||
k.create("resizeNN", ocl::imgproc::resize_oclsrc,
|
||||
format("-D INTER_NEAREST -D PIXTYPE=%s -D PIXTYPE1=%s -D cn=%d",
|
||||
format("-D INTER_NEAREST -D T=%s -D T1=%s -D cn=%d",
|
||||
ocl::memopTypeToStr(type), ocl::memopTypeToStr(depth), cn));
|
||||
if (k.empty())
|
||||
return false;
|
||||
|
||||
k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
|
||||
(float)inv_fx, (float)inv_fy);
|
||||
}
|
||||
else if (interpolation == INTER_AREA)
|
||||
{
|
||||
int iscale_x = saturate_cast<int>(inv_fx);
|
||||
int iscale_y = saturate_cast<int>(inv_fy);
|
||||
bool is_area_fast = std::abs(inv_fx - iscale_x) < DBL_EPSILON &&
|
||||
std::abs(inv_fy - iscale_y) < DBL_EPSILON;
|
||||
int wdepth = std::max(depth, is_area_fast ? CV_32S : CV_32F);
|
||||
int wtype = CV_MAKE_TYPE(wdepth, cn);
|
||||
|
||||
char cvt[2][40];
|
||||
String buildOption = format("-D INTER_AREA -D PIXTYPE=%s -D PIXTYPE1=%s -D WTV=%s -D convertToWTV=%s -D cn=%d",
|
||||
String buildOption = format("-D INTER_AREA -D T=%s -D T1=%s -D WTV=%s -D convertToWTV=%s -D cn=%d",
|
||||
ocl::typeToStr(type), ocl::typeToStr(depth), ocl::typeToStr(wtype),
|
||||
ocl::convertTypeStr(depth, wdepth, cn, cvt[0]), cn);
|
||||
|
||||
@ -2103,7 +2180,7 @@ static bool ocl_resize( InputArray _src, OutputArray _dst, Size dsize,
|
||||
if (is_area_fast)
|
||||
{
|
||||
int wdepth2 = std::max(CV_32F, depth), wtype2 = CV_MAKE_TYPE(wdepth2, cn);
|
||||
buildOption = buildOption + format(" -D convertToPIXTYPE=%s -D WT2V=%s -D convertToWT2V=%s -D INTER_AREA_FAST"
|
||||
buildOption = buildOption + format(" -D convertToT=%s -D WT2V=%s -D convertToWT2V=%s -D INTER_AREA_FAST"
|
||||
" -D XSCALE=%d -D YSCALE=%d -D SCALE=%ff",
|
||||
ocl::convertTypeStr(wdepth2, depth, cn, cvt[0]),
|
||||
ocl::typeToStr(wtype2), ocl::convertTypeStr(wdepth, wdepth2, cn, cvt[1]),
|
||||
@ -2126,12 +2203,11 @@ static bool ocl_resize( InputArray _src, OutputArray _dst, Size dsize,
|
||||
}
|
||||
else
|
||||
{
|
||||
buildOption = buildOption + format(" -D convertToPIXTYPE=%s", ocl::convertTypeStr(wdepth, depth, cn, cvt[0]));
|
||||
buildOption = buildOption + format(" -D convertToT=%s", ocl::convertTypeStr(wdepth, depth, cn, cvt[0]));
|
||||
k.create("resizeAREA", ocl::imgproc::resize_oclsrc, buildOption);
|
||||
if (k.empty())
|
||||
return false;
|
||||
|
||||
Size ssize = src.size();
|
||||
int xytab_size = (ssize.width + ssize.height) << 1;
|
||||
int tabofs_size = dsize.height + dsize.width + 2;
|
||||
|
||||
@ -2161,11 +2237,6 @@ static bool ocl_resize( InputArray _src, OutputArray _dst, Size dsize,
|
||||
return k.run(2, globalsize, NULL, false);
|
||||
}
|
||||
|
||||
if( k.empty() )
|
||||
return false;
|
||||
k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
|
||||
(float)inv_fx, (float)inv_fy);
|
||||
|
||||
return k.run(2, globalsize, 0, false);
|
||||
}
|
||||
|
||||
|
34
modules/imgproc/src/opencl/laplacian5.cl
Normal file
34
modules/imgproc/src/opencl/laplacian5.cl
Normal file
@ -0,0 +1,34 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
// Copyright (C) 2014, Itseez, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
|
||||
#define noconvert
|
||||
|
||||
__kernel void sumConvert(__global const uchar * src1ptr, int src1_step, int src1_offset,
|
||||
__global const uchar * src2ptr, int src2_step, int src2_offset,
|
||||
__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,
|
||||
coeffT scale, coeffT delta)
|
||||
{
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (y < dst_rows && x < dst_cols)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, mad24(x, (int)sizeof(srcT), src1_offset));
|
||||
int src2_index = mad24(y, src2_step, mad24(x, (int)sizeof(srcT), src2_offset));
|
||||
int dst_index = mad24(y, dst_step, mad24(x, (int)sizeof(dstT), dst_offset));
|
||||
|
||||
__global const srcT * src1 = (__global const srcT *)(src1ptr + src1_index);
|
||||
__global const srcT * src2 = (__global const srcT *)(src2ptr + src2_index);
|
||||
__global dstT * dst = (__global dstT *)(dstptr + dst_index);
|
||||
|
||||
#if wdepth <= 4
|
||||
dst[0] = convertToDT( mad24((WT)(scale), convertToWT(src1[0]) + convertToWT(src2[0]), (WT)(delta)) );
|
||||
#else
|
||||
dst[0] = convertToDT( mad((WT)(scale), convertToWT(src1[0]) + convertToWT(src2[0]), (WT)(delta)) );
|
||||
#endif
|
||||
}
|
||||
}
|
@ -43,110 +43,140 @@
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if defined DOUBLE_SUPPORT
|
||||
#ifdef DOUBLE_SUPPORT
|
||||
#ifdef cl_amd_fp64
|
||||
#pragma OPENCL EXTENSION cl_amd_fp64:enable
|
||||
#elif defined (cl_khr_fp64)
|
||||
#pragma OPENCL EXTENSION cl_khr_fp64:enable
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define INTER_RESIZE_COEF_BITS 11
|
||||
#define INTER_RESIZE_COEF_SCALE (1 << INTER_RESIZE_COEF_BITS)
|
||||
#define CAST_BITS (INTER_RESIZE_COEF_BITS << 1)
|
||||
#define INC(x,l) min(x+1,l-1)
|
||||
|
||||
|
||||
#define noconvert(x) (x)
|
||||
#define noconvert
|
||||
|
||||
#if cn != 3
|
||||
#define loadpix(addr) *(__global const PIXTYPE*)(addr)
|
||||
#define storepix(val, addr) *(__global PIXTYPE*)(addr) = val
|
||||
#define PIXSIZE ((int)sizeof(PIXTYPE))
|
||||
#define loadpix(addr) *(__global const T *)(addr)
|
||||
#define storepix(val, addr) *(__global T *)(addr) = val
|
||||
#define TSIZE (int)sizeof(T)
|
||||
#else
|
||||
#define loadpix(addr) vload3(0, (__global const PIXTYPE1*)(addr))
|
||||
#define storepix(val, addr) vstore3(val, 0, (__global PIXTYPE1*)(addr))
|
||||
#define PIXSIZE ((int)sizeof(PIXTYPE1)*3)
|
||||
#define loadpix(addr) vload3(0, (__global const T1 *)(addr))
|
||||
#define storepix(val, addr) vstore3(val, 0, (__global T1 *)(addr))
|
||||
#define TSIZE (int)sizeof(T1)*cn
|
||||
#endif
|
||||
|
||||
#if defined INTER_LINEAR
|
||||
#ifdef INTER_LINEAR_INTEGER
|
||||
|
||||
__kernel void resizeLN(__global const uchar* srcptr, int srcstep, int srcoffset,
|
||||
int srcrows, int srccols,
|
||||
__global uchar* dstptr, int dststep, int dstoffset,
|
||||
int dstrows, int dstcols,
|
||||
__kernel void resizeLN(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,
|
||||
__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,
|
||||
__global const uchar * buffer)
|
||||
{
|
||||
int dx = get_global_id(0);
|
||||
int dy = get_global_id(1);
|
||||
|
||||
if (dx < dst_cols && dy < dst_rows)
|
||||
{
|
||||
__global const int * xofs = (__global const int *)(buffer), * yofs = xofs + dst_cols;
|
||||
__global const short * ialpha = (__global const short *)(yofs + dst_rows);
|
||||
__global const short * ibeta = ialpha + ((dst_cols + dy) << 1);
|
||||
ialpha += dx << 1;
|
||||
|
||||
int sx0 = xofs[dx], sy0 = clamp(yofs[dy], 0, src_rows - 1),
|
||||
sy1 = clamp(yofs[dy] + 1, 0, src_rows - 1);
|
||||
short a0 = ialpha[0], a1 = ialpha[1];
|
||||
short b0 = ibeta[0], b1 = ibeta[1];
|
||||
|
||||
int src_index0 = mad24(sy0, src_step, mad24(sx0, TSIZE, src_offset)),
|
||||
src_index1 = mad24(sy1, src_step, mad24(sx0, TSIZE, src_offset));
|
||||
WT data0 = convertToWT(loadpix(srcptr + src_index0));
|
||||
WT data1 = convertToWT(loadpix(srcptr + src_index0 + TSIZE));
|
||||
WT data2 = convertToWT(loadpix(srcptr + src_index1));
|
||||
WT data3 = convertToWT(loadpix(srcptr + src_index1 + TSIZE));
|
||||
|
||||
WT val = ( (((data0 * a0 + data1 * a1) >> 4) * b0) >> 16) +
|
||||
( (((data2 * a0 + data3 * a1) >> 4) * b1) >> 16);
|
||||
|
||||
storepix(convertToDT((val + 2) >> 2),
|
||||
dstptr + mad24(dy, dst_step, mad24(dx, TSIZE, dst_offset)));
|
||||
}
|
||||
}
|
||||
|
||||
#elif defined INTER_LINEAR
|
||||
|
||||
__kernel void resizeLN(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,
|
||||
__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,
|
||||
float ifx, float ify)
|
||||
{
|
||||
int dx = get_global_id(0);
|
||||
int dy = get_global_id(1);
|
||||
|
||||
float sx = ((dx+0.5f) * ifx - 0.5f), sy = ((dy+0.5f) * ify - 0.5f);
|
||||
int x = floor(sx), y = floor(sy);
|
||||
if (dx < dst_cols && dy < dst_rows)
|
||||
{
|
||||
float sx = ((dx+0.5f) * ifx - 0.5f), sy = ((dy+0.5f) * ify - 0.5f);
|
||||
int x = floor(sx), y = floor(sy);
|
||||
|
||||
float u = sx - x, v = sy - y;
|
||||
float u = sx - x, v = sy - y;
|
||||
|
||||
if ( x<0 ) x=0,u=0;
|
||||
if ( x>=srccols ) x=srccols-1,u=0;
|
||||
if ( y<0 ) y=0,v=0;
|
||||
if ( y>=srcrows ) y=srcrows-1,v=0;
|
||||
if ( x<0 ) x=0,u=0;
|
||||
if ( x>=src_cols ) x=src_cols-1,u=0;
|
||||
if ( y<0 ) y=0,v=0;
|
||||
if ( y>=src_rows ) y=src_rows-1,v=0;
|
||||
|
||||
int y_ = INC(y,srcrows);
|
||||
int x_ = INC(x,srccols);
|
||||
int y_ = INC(y, src_rows);
|
||||
int x_ = INC(x, src_cols);
|
||||
|
||||
#if depth <= 4
|
||||
u = u * INTER_RESIZE_COEF_SCALE;
|
||||
v = v * INTER_RESIZE_COEF_SCALE;
|
||||
|
||||
u = u * INTER_RESIZE_COEF_SCALE;
|
||||
v = v * INTER_RESIZE_COEF_SCALE;
|
||||
int U = rint(u);
|
||||
int V = rint(v);
|
||||
int U1 = rint(INTER_RESIZE_COEF_SCALE - u);
|
||||
int V1 = rint(INTER_RESIZE_COEF_SCALE - v);
|
||||
|
||||
int U = rint(u);
|
||||
int V = rint(v);
|
||||
int U1 = rint(INTER_RESIZE_COEF_SCALE - u);
|
||||
int V1 = rint(INTER_RESIZE_COEF_SCALE - v);
|
||||
WT data0 = convertToWT(loadpix(srcptr + mad24(y, src_step, mad24(x, TSIZE, src_offset))));
|
||||
WT data1 = convertToWT(loadpix(srcptr + mad24(y, src_step, mad24(x_, TSIZE, src_offset))));
|
||||
WT data2 = convertToWT(loadpix(srcptr + mad24(y_, src_step, mad24(x, TSIZE, src_offset))));
|
||||
WT data3 = convertToWT(loadpix(srcptr + mad24(y_, src_step, mad24(x_, TSIZE, src_offset))));
|
||||
|
||||
WORKTYPE data0 = convertToWT(loadpix(srcptr + mad24(y, srcstep, srcoffset + x*PIXSIZE)));
|
||||
WORKTYPE data1 = convertToWT(loadpix(srcptr + mad24(y, srcstep, srcoffset + x_*PIXSIZE)));
|
||||
WORKTYPE data2 = convertToWT(loadpix(srcptr + mad24(y_, srcstep, srcoffset + x*PIXSIZE)));
|
||||
WORKTYPE data3 = convertToWT(loadpix(srcptr + mad24(y_, srcstep, srcoffset + x_*PIXSIZE)));
|
||||
|
||||
WORKTYPE val = mul24((WORKTYPE)mul24(U1, V1), data0) + mul24((WORKTYPE)mul24(U, V1), data1) +
|
||||
mul24((WORKTYPE)mul24(U1, V), data2) + mul24((WORKTYPE)mul24(U, V), data3);
|
||||
|
||||
PIXTYPE uval = convertToDT((val + (1<<(CAST_BITS-1)))>>CAST_BITS);
|
||||
WT val = mul24((WT)mul24(U1, V1), data0) + mul24((WT)mul24(U, V1), data1) +
|
||||
mul24((WT)mul24(U1, V), data2) + mul24((WT)mul24(U, V), data3);
|
||||
|
||||
T uval = convertToDT((val + (1<<(CAST_BITS-1)))>>CAST_BITS);
|
||||
#else
|
||||
float u1 = 1.f - u;
|
||||
float v1 = 1.f - v;
|
||||
WORKTYPE data0 = convertToWT(loadpix(srcptr + mad24(y, srcstep, srcoffset + x*PIXSIZE)));
|
||||
WORKTYPE data1 = convertToWT(loadpix(srcptr + mad24(y, srcstep, srcoffset + x_*PIXSIZE)));
|
||||
WORKTYPE data2 = convertToWT(loadpix(srcptr + mad24(y_, srcstep, srcoffset + x*PIXSIZE)));
|
||||
WORKTYPE data3 = convertToWT(loadpix(srcptr + mad24(y_, srcstep, srcoffset + x_*PIXSIZE)));
|
||||
|
||||
PIXTYPE uval = u1 * v1 * data0 + u * v1 * data1 + u1 * v *data2 + u * v *data3;
|
||||
float u1 = 1.f - u;
|
||||
float v1 = 1.f - v;
|
||||
WT data0 = convertToWT(loadpix(srcptr + mad24(y, src_step, mad24(x, TSIZE, src_offset))));
|
||||
WT data1 = convertToWT(loadpix(srcptr + mad24(y, src_step, mad24(x_, TSIZE, src_offset))));
|
||||
WT data2 = convertToWT(loadpix(srcptr + mad24(y_, src_step, mad24(x, TSIZE, src_offset))));
|
||||
WT data3 = convertToWT(loadpix(srcptr + mad24(y_, src_step, mad24(x_, TSIZE, src_offset))));
|
||||
|
||||
T uval = u1 * v1 * data0 + u * v1 * data1 + u1 * v *data2 + u * v *data3;
|
||||
#endif
|
||||
|
||||
if(dx < dstcols && dy < dstrows)
|
||||
{
|
||||
storepix(uval, dstptr + mad24(dy, dststep, dstoffset + dx*PIXSIZE));
|
||||
storepix(uval, dstptr + mad24(dy, dst_step, mad24(dx, TSIZE, dst_offset)));
|
||||
}
|
||||
}
|
||||
|
||||
#elif defined INTER_NEAREST
|
||||
|
||||
__kernel void resizeNN(__global const uchar* srcptr, int srcstep, int srcoffset,
|
||||
int srcrows, int srccols,
|
||||
__global uchar* dstptr, int dststep, int dstoffset,
|
||||
int dstrows, int dstcols,
|
||||
__kernel void resizeNN(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,
|
||||
__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,
|
||||
float ifx, float ify)
|
||||
{
|
||||
int dx = get_global_id(0);
|
||||
int dy = get_global_id(1);
|
||||
|
||||
if( dx < dstcols && dy < dstrows )
|
||||
if (dx < dst_cols && dy < dst_rows)
|
||||
{
|
||||
float s1 = dx*ifx;
|
||||
float s2 = dy*ify;
|
||||
int sx = min(convert_int_rtz(s1), srccols-1);
|
||||
int sy = min(convert_int_rtz(s2), srcrows-1);
|
||||
float s1 = dx * ifx;
|
||||
float s2 = dy * ify;
|
||||
int sx = min(convert_int_rtz(s1), src_cols - 1);
|
||||
int sy = min(convert_int_rtz(s2), src_rows - 1);
|
||||
|
||||
storepix(loadpix(srcptr + mad24(sy, srcstep, srcoffset + sx*PIXSIZE)),
|
||||
dstptr + mad24(dy, dststep, dstoffset + dx*PIXSIZE));
|
||||
storepix(loadpix(srcptr + mad24(sy, src_step, mad24(sx, TSIZE, src_offset))),
|
||||
dstptr + mad24(dy, dst_step, mad24(dx, TSIZE, dst_offset)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -179,10 +209,10 @@ __kernel void resizeAREA_FAST(__global const uchar * src, int src_step, int src_
|
||||
int src_index = mad24(symap_tab[y + sy], src_step, src_offset);
|
||||
#pragma unroll
|
||||
for (int x = 0; x < XSCALE; ++x)
|
||||
sum += convertToWTV(loadpix(src + src_index + sxmap_tab[sx + x]*PIXSIZE));
|
||||
sum += convertToWTV(loadpix(src + mad24(sxmap_tab[sx + x], TSIZE, src_index)));
|
||||
}
|
||||
|
||||
storepix(convertToPIXTYPE(convertToWT2V(sum) * (WT2V)(SCALE)), dst + dst_index + dx*PIXSIZE);
|
||||
storepix(convertToT(convertToWT2V(sum) * (WT2V)(SCALE)), dst + mad24(dx, TSIZE, dst_index));
|
||||
}
|
||||
}
|
||||
|
||||
@ -224,12 +254,12 @@ __kernel void resizeAREA(__global const uchar * src, int src_step, int src_offse
|
||||
for (int sx = sx0, xk = xk0; sx <= sx1; ++sx, ++xk)
|
||||
{
|
||||
WTV alpha = (WTV)(xalpha_tab[xk]);
|
||||
buf += convertToWTV(loadpix(src + src_index + sx*PIXSIZE)) * alpha;
|
||||
buf += convertToWTV(loadpix(src + mad24(sx, TSIZE, src_index))) * alpha;
|
||||
}
|
||||
sum += buf * beta;
|
||||
}
|
||||
|
||||
storepix(convertToPIXTYPE(sum), dst + dst_index + dx*PIXSIZE);
|
||||
storepix(convertToT(sum), dst + mad24(dx, TSIZE, dst_index));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -316,7 +316,7 @@ OCL_INSTANTIATE_TEST_CASE_P(Filter, Bilateral, Combine(
|
||||
|
||||
OCL_INSTANTIATE_TEST_CASE_P(Filter, LaplacianTest, Combine(
|
||||
FILTER_TYPES,
|
||||
Values(1, 3), // kernel size
|
||||
Values(1, 3, 5), // kernel size
|
||||
Values(Size(0, 0)), // not used
|
||||
FILTER_BORDER_SET_NO_WRAP_NO_ISOLATED,
|
||||
Values(1.0, 0.2, 3.0), // kernel scale
|
||||
|
@ -210,12 +210,15 @@ OCL_TEST_P(Resize, Mat)
|
||||
{
|
||||
for (int j = 0; j < test_loop_times; j++)
|
||||
{
|
||||
int depth = CV_MAT_DEPTH(type);
|
||||
double eps = depth <= CV_32S ? 1 : 1e-2;
|
||||
|
||||
random_roi();
|
||||
|
||||
OCL_OFF(cv::resize(src_roi, dst_roi, Size(), fx, fy, interpolation));
|
||||
OCL_ON(cv::resize(usrc_roi, udst_roi, Size(), fx, fy, interpolation));
|
||||
|
||||
Near(1.0);
|
||||
Near(eps);
|
||||
}
|
||||
}
|
||||
|
||||
@ -328,8 +331,8 @@ OCL_INSTANTIATE_TEST_CASE_P(ImgprocWarp, WarpPerspective, Combine(
|
||||
|
||||
OCL_INSTANTIATE_TEST_CASE_P(ImgprocWarp, Resize, Combine(
|
||||
Values(CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, CV_32FC4),
|
||||
Values(0.5, 1.5, 2.0),
|
||||
Values(0.5, 1.5, 2.0),
|
||||
Values(0.5, 1.5, 2.0, 0.2),
|
||||
Values(0.5, 1.5, 2.0, 0.2),
|
||||
Values((Interpolation)INTER_NEAREST, (Interpolation)INTER_LINEAR),
|
||||
Bool()));
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
import os, sys, re, string, fnmatch
|
||||
allmodules = ["core", "flann", "imgproc", "ml", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "softcascade", "superres"]
|
||||
verbose = False
|
||||
@ -141,10 +142,10 @@ class RstParser(object):
|
||||
def parse_section_safe(self, module_name, section_name, file_name, lineno, lines):
|
||||
try:
|
||||
self.parse_section(module_name, section_name, file_name, lineno, lines)
|
||||
except AssertionError, args:
|
||||
except AssertionError as args:
|
||||
if show_errors:
|
||||
print >> sys.stderr, "RST parser error E%03d: assertion in \"%s\" at %s:%s" % (ERROR_001_SECTIONFAILURE, section_name, file_name, lineno)
|
||||
print >> sys.stderr, " Details: %s" % args
|
||||
print("RST parser error E%03d: assertion in \"%s\" at %s:%s" % (ERROR_001_SECTIONFAILURE, section_name, file_name, lineno), file=sys.stderr)
|
||||
print(" Details: %s" % args, file=sys.stderr)
|
||||
|
||||
def parse_section(self, module_name, section_name, file_name, lineno, lines):
|
||||
self.sections_total += 1
|
||||
@ -152,7 +153,7 @@ class RstParser(object):
|
||||
#if section_name.find(" ") >= 0 and section_name.find("::operator") < 0:
|
||||
if (section_name.find(" ") >= 0 and not bool(re.match(r"(\w+::)*operator\s*(\w+|>>|<<|\(\)|->|\+\+|--|=|==|\+=|-=)", section_name)) ) or section_name.endswith(":"):
|
||||
if show_errors:
|
||||
print >> sys.stderr, "RST parser warning W%03d: SKIPPED: \"%s\" File: %s:%s" % (WARNING_002_HDRWHITESPACE, section_name, file_name, lineno)
|
||||
print("RST parser warning W%03d: SKIPPED: \"%s\" File: %s:%s" % (WARNING_002_HDRWHITESPACE, section_name, file_name, lineno), file=sys.stderr)
|
||||
self.sections_skipped += 1
|
||||
return
|
||||
|
||||
@ -311,7 +312,7 @@ class RstParser(object):
|
||||
|
||||
if fdecl.balance != 0:
|
||||
if show_critical_errors:
|
||||
print >> sys.stderr, "RST parser error E%03d: invalid parentheses balance in \"%s\" at %s:%s" % (ERROR_003_PARENTHESES, section_name, file_name, lineno)
|
||||
print("RST parser error E%03d: invalid parentheses balance in \"%s\" at %s:%s" % (ERROR_003_PARENTHESES, section_name, file_name, lineno), file=sys.stderr)
|
||||
return
|
||||
|
||||
# save last parameter if needed
|
||||
@ -328,7 +329,7 @@ class RstParser(object):
|
||||
elif func:
|
||||
if func["name"] in known_text_sections_names:
|
||||
if show_errors:
|
||||
print >> sys.stderr, "RST parser warning W%03d: SKIPPED: \"%s\" File: %s:%s" % (WARNING_002_HDRWHITESPACE, section_name, file_name, lineno)
|
||||
print("RST parser warning W%03d: SKIPPED: \"%s\" File: %s:%s" % (WARNING_002_HDRWHITESPACE, section_name, file_name, lineno), file=sys.stderr)
|
||||
self.sections_skipped += 1
|
||||
elif show_errors:
|
||||
self.print_info(func, True, sys.stderr)
|
||||
@ -351,7 +352,7 @@ class RstParser(object):
|
||||
if l.find("\t") >= 0:
|
||||
whitespace_warnings += 1
|
||||
if whitespace_warnings <= max_whitespace_warnings and show_warnings:
|
||||
print >> sys.stderr, "RST parser warning W%03d: tab symbol instead of space is used at %s:%s" % (WARNING_004_TABS, doc, lineno)
|
||||
print("RST parser warning W%03d: tab symbol instead of space is used at %s:%s" % (WARNING_004_TABS, doc, lineno), file=sys.stderr)
|
||||
l = l.replace("\t", " ")
|
||||
|
||||
# handle first line
|
||||
@ -388,8 +389,8 @@ class RstParser(object):
|
||||
|
||||
def add_new_fdecl(self, func, decl):
|
||||
if decl.fdecl.endswith(";"):
|
||||
print >> sys.stderr, "RST parser error E%03d: unexpected semicolon at the end of declaration in \"%s\" at %s:%s" \
|
||||
% (ERROR_011_EOLEXPECTED, func["name"], func["file"], func["line"])
|
||||
print("RST parser error E%03d: unexpected semicolon at the end of declaration in \"%s\" at %s:%s" \
|
||||
% (ERROR_011_EOLEXPECTED, func["name"], func["file"], func["line"]), file=sys.stderr)
|
||||
decls = func.get("decls", [])
|
||||
if (decl.lang == "C++" or decl.lang == "C"):
|
||||
rst_decl = self.cpp_parser.parse_func_decl_no_wrap(decl.fdecl)
|
||||
@ -405,37 +406,37 @@ class RstParser(object):
|
||||
if show_errors:
|
||||
#check black_list
|
||||
if decl.name not in params_blacklist.get(func["name"], []):
|
||||
print >> sys.stderr, "RST parser error E%03d: redefinition of parameter \"%s\" in \"%s\" at %s:%s" \
|
||||
% (ERROR_005_REDEFENITIONPARAM, decl.name, func["name"], func["file"], func["line"])
|
||||
print("RST parser error E%03d: redefinition of parameter \"%s\" in \"%s\" at %s:%s" \
|
||||
% (ERROR_005_REDEFENITIONPARAM, decl.name, func["name"], func["file"], func["line"]), file=sys.stderr)
|
||||
else:
|
||||
params[decl.name] = decl.comment
|
||||
func["params"] = params
|
||||
|
||||
def print_info(self, func, skipped=False, out = sys.stdout):
|
||||
print >> out
|
||||
print(file=out)
|
||||
if skipped:
|
||||
print >> out, "SKIPPED DEFINITION:"
|
||||
print >> out, "name: %s" % (func.get("name","~empty~"))
|
||||
print >> out, "file: %s:%s" % (func.get("file","~empty~"), func.get("line","~empty~"))
|
||||
print >> out, "is class: %s" % func.get("isclass", False)
|
||||
print >> out, "is struct: %s" % func.get("isstruct", False)
|
||||
print >> out, "module: %s" % func.get("module","~unknown~")
|
||||
print >> out, "namespace: %s" % func.get("namespace", "~empty~")
|
||||
print >> out, "class: %s" % (func.get("class","~empty~"))
|
||||
print >> out, "method: %s" % (func.get("method","~empty~"))
|
||||
print >> out, "brief: %s" % (func.get("brief","~empty~"))
|
||||
print("SKIPPED DEFINITION:", file=out)
|
||||
print("name: %s" % (func.get("name","~empty~")), file=out)
|
||||
print("file: %s:%s" % (func.get("file","~empty~"), func.get("line","~empty~")), file=out)
|
||||
print("is class: %s" % func.get("isclass", False), file=out)
|
||||
print("is struct: %s" % func.get("isstruct", False), file=out)
|
||||
print("module: %s" % func.get("module","~unknown~"), file=out)
|
||||
print("namespace: %s" % func.get("namespace", "~empty~"), file=out)
|
||||
print("class: %s" % (func.get("class","~empty~")), file=out)
|
||||
print("method: %s" % (func.get("method","~empty~")), file=out)
|
||||
print("brief: %s" % (func.get("brief","~empty~")), file=out)
|
||||
if "decls" in func:
|
||||
print >> out, "declarations:"
|
||||
print("declarations:", file=out)
|
||||
for d in func["decls"]:
|
||||
print >> out, " %7s: %s" % (d[0], re.sub(r"[ ]+", " ", d[1]))
|
||||
print(" %7s: %s" % (d[0], re.sub(r"[ ]+", " ", d[1])), file=out)
|
||||
if "seealso" in func:
|
||||
print >> out, "seealso: ", func["seealso"]
|
||||
print("seealso: ", func["seealso"], file=out)
|
||||
if "params" in func:
|
||||
print >> out, "parameters:"
|
||||
print("parameters:", file=out)
|
||||
for name, comment in func["params"].items():
|
||||
print >> out, "%23s: %s" % (name, comment)
|
||||
print >> out, "long: %s" % (func.get("long","~empty~"))
|
||||
print >> out
|
||||
print("%23s: %s" % (name, comment), file=out)
|
||||
print("long: %s" % (func.get("long","~empty~")), file=out)
|
||||
print(file=out)
|
||||
|
||||
def validate(self, func):
|
||||
if func.get("decls", None) is None:
|
||||
@ -443,13 +444,13 @@ class RstParser(object):
|
||||
return False
|
||||
if func["name"] in self.definitions:
|
||||
if show_errors:
|
||||
print >> sys.stderr, "RST parser error E%03d: \"%s\" from: %s:%s is already documented at %s:%s" \
|
||||
% (ERROR_006_REDEFENITIONFUNC, func["name"], func["file"], func["line"], self.definitions[func["name"]]["file"], self.definitions[func["name"]]["line"])
|
||||
print("RST parser error E%03d: \"%s\" from: %s:%s is already documented at %s:%s" \
|
||||
% (ERROR_006_REDEFENITIONFUNC, func["name"], func["file"], func["line"], self.definitions[func["name"]]["file"], self.definitions[func["name"]]["line"]), file=sys.stderr)
|
||||
return False
|
||||
return self.validateParams(func)
|
||||
|
||||
def validateParams(self, func):
|
||||
documentedParams = func.get("params", {}).keys()
|
||||
documentedParams = list(func.get("params", {}).keys())
|
||||
params = []
|
||||
|
||||
for decl in func.get("decls", []):
|
||||
@ -464,13 +465,13 @@ class RstParser(object):
|
||||
# 1. all params are documented
|
||||
for p in params:
|
||||
if p not in documentedParams and show_warnings:
|
||||
print >> sys.stderr, "RST parser warning W%03d: parameter \"%s\" of \"%s\" is undocumented. %s:%s" % (WARNING_007_UNDOCUMENTEDPARAM, p, func["name"], func["file"], func["line"])
|
||||
print("RST parser warning W%03d: parameter \"%s\" of \"%s\" is undocumented. %s:%s" % (WARNING_007_UNDOCUMENTEDPARAM, p, func["name"], func["file"], func["line"]), file=sys.stderr)
|
||||
|
||||
# 2. only real params are documented
|
||||
for p in documentedParams:
|
||||
if p not in params and show_warnings:
|
||||
if p not in params_blacklist.get(func["name"], []):
|
||||
print >> sys.stderr, "RST parser warning W%03d: unexisting parameter \"%s\" of \"%s\" is documented at %s:%s" % (WARNING_008_MISSINGPARAM, p, func["name"], func["file"], func["line"])
|
||||
print("RST parser warning W%03d: unexisting parameter \"%s\" of \"%s\" is documented at %s:%s" % (WARNING_008_MISSINGPARAM, p, func["name"], func["file"], func["line"]), file=sys.stderr)
|
||||
return True
|
||||
|
||||
def normalize(self, func):
|
||||
@ -541,7 +542,7 @@ class RstParser(object):
|
||||
func["name"] = fname[4:]
|
||||
func["method"] = fname[4:]
|
||||
elif show_warnings:
|
||||
print >> sys.stderr, "RST parser warning W%03d: \"%s\" - section name is \"%s\" instead of \"%s\" at %s:%s" % (WARNING_009_HDRMISMATCH, fname, func["name"], fname[6:], func["file"], func["line"])
|
||||
print("RST parser warning W%03d: \"%s\" - section name is \"%s\" instead of \"%s\" at %s:%s" % (WARNING_009_HDRMISMATCH, fname, func["name"], fname[6:], func["file"], func["line"]), file=sys.stderr)
|
||||
#self.print_info(func)
|
||||
|
||||
def normalizeText(self, s):
|
||||
@ -632,11 +633,11 @@ class RstParser(object):
|
||||
return s
|
||||
|
||||
def printSummary(self):
|
||||
print "RST Parser Summary:"
|
||||
print " Total sections: %s" % self.sections_total
|
||||
print " Skipped sections: %s" % self.sections_skipped
|
||||
print " Parsed sections: %s" % self.sections_parsed
|
||||
print " Invalid sections: %s" % (self.sections_total - self.sections_parsed - self.sections_skipped)
|
||||
print("RST Parser Summary:")
|
||||
print(" Total sections: %s" % self.sections_total)
|
||||
print(" Skipped sections: %s" % self.sections_skipped)
|
||||
print(" Parsed sections: %s" % self.sections_parsed)
|
||||
print(" Invalid sections: %s" % (self.sections_total - self.sections_parsed - self.sections_skipped))
|
||||
|
||||
# statistic by language
|
||||
stat = {}
|
||||
@ -651,12 +652,12 @@ class RstParser(object):
|
||||
for decl in d.get("decls", []):
|
||||
stat[decl[0]] = stat.get(decl[0], 0) + 1
|
||||
|
||||
print
|
||||
print " classes documented: %s" % classes
|
||||
print " structs documented: %s" % structs
|
||||
print()
|
||||
print(" classes documented: %s" % classes)
|
||||
print(" structs documented: %s" % structs)
|
||||
for lang in sorted(stat.items()):
|
||||
print " %7s functions documented: %s" % lang
|
||||
print
|
||||
print(" %7s functions documented: %s" % lang)
|
||||
print()
|
||||
|
||||
def mathReplace2(match):
|
||||
m = mathReplace(match)
|
||||
@ -743,7 +744,7 @@ def mathReplace(match):
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print "Usage:\n", os.path.basename(sys.argv[0]), " <module path>"
|
||||
print("Usage:\n", os.path.basename(sys.argv[0]), " <module path>")
|
||||
exit(0)
|
||||
|
||||
if len(sys.argv) >= 3:
|
||||
@ -759,7 +760,7 @@ if __name__ == "__main__":
|
||||
module = sys.argv[1]
|
||||
|
||||
if module != "all" and not os.path.isdir(os.path.join(rst_parser_dir, "../../" + module)):
|
||||
print "RST parser error E%03d: module \"%s\" could not be found." % (ERROR_010_NOMODULE, module)
|
||||
print("RST parser error E%03d: module \"%s\" could not be found." % (ERROR_010_NOMODULE, module))
|
||||
exit(1)
|
||||
|
||||
parser = RstParser(hdr_parser.CppHeaderParser())
|
||||
|
@ -288,7 +288,7 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
|
||||
}
|
||||
|
||||
public Mat rgba() {
|
||||
Imgproc.cvtColor(mYuvFrameData, mRgba, Imgproc.COLOR_YUV2BGR_NV12, 4);
|
||||
Imgproc.cvtColor(mYuvFrameData, mRgba, Imgproc.COLOR_YUV2RGBA_NV21, 4);
|
||||
return mRgba;
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,7 @@ def substitute(build, output_dir):
|
||||
# populate template
|
||||
populated = template.render(build=build, time=time)
|
||||
with open(os.path.join(output_dir, 'buildInformation.m'), 'wb') as f:
|
||||
f.write(populated)
|
||||
f.write(populated.encode('utf-8'))
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""
|
||||
|
@ -22,7 +22,7 @@ def substitute(cv, output_dir):
|
||||
# populate template
|
||||
populated = template.render(cv=cv, time=time)
|
||||
with open(os.path.join(output_dir, 'mex.m'), 'wb') as f:
|
||||
f.write(populated)
|
||||
f.write(populated.encode('utf-8'))
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""
|
||||
|
@ -1,5 +1,4 @@
|
||||
from textwrap import TextWrapper
|
||||
from string import split, join
|
||||
import re, os
|
||||
# precompile a URL matching regular expression
|
||||
urlexpr = re.compile(r"((https?):((//)|(\\\\))+[\w\d:#@%/;$()~_?\+-=\\\.&]*)", re.MULTILINE|re.UNICODE)
|
||||
@ -177,4 +176,4 @@ def comment(text, wrap=80, escape='% ', escape_first='', escape_last=''):
|
||||
escapn = '\n'+escape
|
||||
lines = text.split('\n')
|
||||
wlines = (tw.wrap(line) for line in lines)
|
||||
return escape_first+escape+join((join(line, escapn) for line in wlines), escapn)+escape_last
|
||||
return escape_first+escape+escapn.join(escapn.join(line) for line in wlines)+escape_last
|
||||
|
@ -1,4 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
import sys, re, os, time
|
||||
from string import Template
|
||||
from parse_tree import ParseTree, todict, constants
|
||||
from filters import *
|
||||
|
||||
class MatlabWrapperGenerator(object):
|
||||
"""
|
||||
@ -22,9 +26,14 @@ class MatlabWrapperGenerator(object):
|
||||
The output_dir specifies the directory to write the generated sources
|
||||
to.
|
||||
"""
|
||||
# dynamically import the parsers
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
import hdr_parser
|
||||
import rst_parser
|
||||
|
||||
# parse each of the files and store in a dictionary
|
||||
# as a separate "namespace"
|
||||
parser = CppHeaderParser()
|
||||
parser = hdr_parser.CppHeaderParser()
|
||||
rst = rst_parser.RstParser(parser)
|
||||
rst_parser.verbose = False
|
||||
rst_parser.show_warnings = False
|
||||
@ -91,13 +100,13 @@ class MatlabWrapperGenerator(object):
|
||||
output_class_dir = output_dir+'/+cv'
|
||||
output_map_dir = output_dir+'/map'
|
||||
if not os.path.isdir(output_source_dir):
|
||||
os.mkdir(output_source_dir)
|
||||
os.makedirs(output_source_dir)
|
||||
if not os.path.isdir(output_private_dir):
|
||||
os.mkdir(output_private_dir)
|
||||
os.makedirs(output_private_dir)
|
||||
if not os.path.isdir(output_class_dir):
|
||||
os.mkdir(output_class_dir)
|
||||
os.makedirs(output_class_dir)
|
||||
if not os.path.isdir(output_map_dir):
|
||||
os.mkdir(output_map_dir)
|
||||
os.makedirs(output_map_dir)
|
||||
|
||||
# populate templates
|
||||
for namespace in parse_tree.namespaces:
|
||||
@ -105,27 +114,27 @@ class MatlabWrapperGenerator(object):
|
||||
for method in namespace.methods:
|
||||
populated = tfunction.render(fun=method, time=time, includes=namespace.name)
|
||||
with open(output_source_dir+'/'+method.name+'.cpp', 'wb') as f:
|
||||
f.write(populated)
|
||||
f.write(populated.encode('utf-8'))
|
||||
if namespace.name in doc and method.name in doc[namespace.name]:
|
||||
populated = tdoc.render(fun=method, doc=doc[namespace.name][method.name], time=time)
|
||||
with open(output_class_dir+'/'+method.name+'.m', 'wb') as f:
|
||||
f.write(populated)
|
||||
f.write(populated.encode('utf-8'))
|
||||
# classes
|
||||
for clss in namespace.classes:
|
||||
# cpp converter
|
||||
populated = tclassc.render(clss=clss, time=time)
|
||||
with open(output_private_dir+'/'+clss.name+'Bridge.cpp', 'wb') as f:
|
||||
f.write(populated)
|
||||
f.write(populated.encode('utf-8'))
|
||||
# matlab classdef
|
||||
populated = tclassm.render(clss=clss, time=time)
|
||||
with open(output_class_dir+'/'+clss.name+'.m', 'wb') as f:
|
||||
f.write(populated)
|
||||
f.write(populated.encode('utf-8'))
|
||||
|
||||
# create a global constants lookup table
|
||||
const = dict(constants(todict(parse_tree.namespaces)))
|
||||
populated = tconst.render(constants=const, time=time)
|
||||
with open(output_dir+'/cv.m', 'wb') as f:
|
||||
f.write(populated)
|
||||
f.write(populated.encode('utf-8'))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -168,7 +177,6 @@ if __name__ == "__main__":
|
||||
"""
|
||||
|
||||
# parse the input options
|
||||
import sys, re, os, time
|
||||
from argparse import ArgumentParser
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('--jinja2')
|
||||
@ -185,13 +193,6 @@ if __name__ == "__main__":
|
||||
sys.path.append(args.hdrparser)
|
||||
sys.path.append(args.rstparser)
|
||||
|
||||
from string import Template
|
||||
from hdr_parser import CppHeaderParser
|
||||
import rst_parser
|
||||
from parse_tree import ParseTree, todict, constants
|
||||
from filters import *
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
# create the generator
|
||||
mwg = MatlabWrapperGenerator()
|
||||
mwg.gen(args.moduleroot, args.modules, args.extra, args.outdir)
|
||||
|
@ -1,6 +1,12 @@
|
||||
from string import join
|
||||
import collections
|
||||
from textwrap import fill
|
||||
from filters import *
|
||||
try:
|
||||
# Python 2.7+
|
||||
basestring
|
||||
except NameError:
|
||||
# Python 3.3+
|
||||
basestring = str
|
||||
|
||||
class ParseTree(object):
|
||||
"""
|
||||
@ -74,7 +80,7 @@ class ParseTree(object):
|
||||
self.namespaces = namespaces if namespaces else []
|
||||
|
||||
def __str__(self):
|
||||
return join((ns.__str__() for ns in self.namespaces), '\n\n\n')
|
||||
return '\n\n\n'.join(ns.__str__() for ns in self.namespaces)
|
||||
|
||||
def build(self, namespaces):
|
||||
babel = Translator()
|
||||
@ -94,7 +100,7 @@ class ParseTree(object):
|
||||
constants.append(obj)
|
||||
else:
|
||||
raise TypeError('Unexpected object type: '+str(type(obj)))
|
||||
self.namespaces.append(Namespace(name, constants, class_tree.values(), methods))
|
||||
self.namespaces.append(Namespace(name, constants, list(class_tree.values()), methods))
|
||||
|
||||
def insertIntoClassTree(self, obj, class_tree):
|
||||
cname = obj.name if type(obj) is Class else obj.clss
|
||||
@ -208,9 +214,9 @@ class Namespace(object):
|
||||
|
||||
def __str__(self):
|
||||
return 'namespace '+self.name+' {\n\n'+\
|
||||
(join((c.__str__() for c in self.constants), '\n')+'\n\n' if self.constants else '')+\
|
||||
(join((f.__str__() for f in self.methods), '\n')+'\n\n' if self.methods else '')+\
|
||||
(join((o.__str__() for o in self.classes), '\n\n') if self.classes else '')+'\n};'
|
||||
('\n'.join(c.__str__() for c in self.constants)+'\n\n' if self.constants else '')+\
|
||||
('\n'.join(f.__str__() for f in self.methods)+'\n\n' if self.methods else '')+\
|
||||
('\n\n'.join(o.__str__() for o in self.classes) if self.classes else '')+'\n};'
|
||||
|
||||
class Class(object):
|
||||
"""
|
||||
@ -228,8 +234,8 @@ class Class(object):
|
||||
|
||||
def __str__(self):
|
||||
return 'class '+self.name+' {\n\t'+\
|
||||
(join((c.__str__() for c in self.constants), '\n\t')+'\n\n\t' if self.constants else '')+\
|
||||
(join((f.__str__() for f in self.methods), '\n\t') if self.methods else '')+'\n};'
|
||||
('\n\t'.join(c.__str__() for c in self.constants)+'\n\n\t' if self.constants else '')+\
|
||||
('\n\t'.join(f.__str__() for f in self.methods) if self.methods else '')+'\n};'
|
||||
|
||||
class Method(object):
|
||||
"""
|
||||
@ -260,7 +266,7 @@ class Method(object):
|
||||
|
||||
def __str__(self):
|
||||
return (self.rtp+' ' if self.rtp else '')+self.name+'('+\
|
||||
join((arg.__str__() for arg in self.req+self.opt), ', ')+\
|
||||
', '.join(arg.__str__() for arg in self.req+self.opt)+\
|
||||
')'+(' const' if self.const else '')+';'
|
||||
|
||||
class Argument(object):
|
||||
@ -334,23 +340,20 @@ def constants(tree):
|
||||
for gen in constants(val):
|
||||
yield gen
|
||||
|
||||
def todict(obj, classkey=None):
|
||||
|
||||
def todict(obj):
|
||||
"""
|
||||
Convert the ParseTree to a dictionary, stripping all objects of their
|
||||
methods and converting class names to strings
|
||||
Recursively convert a Python object graph to sequences (lists)
|
||||
and mappings (dicts) of primitives (bool, int, float, string, ...)
|
||||
"""
|
||||
if isinstance(obj, dict):
|
||||
for k in obj.keys():
|
||||
obj[k] = todict(obj[k], classkey)
|
||||
return obj
|
||||
elif hasattr(obj, "__iter__"):
|
||||
return [todict(v, classkey) for v in obj]
|
||||
elif hasattr(obj, "__dict__"):
|
||||
data = dict([(key, todict(value, classkey))
|
||||
for key, value in obj.__dict__.iteritems()
|
||||
if not callable(value) and not key.startswith('_')])
|
||||
if classkey is not None and hasattr(obj, "__class__"):
|
||||
data[classkey] = obj.__class__.__name__
|
||||
return data
|
||||
else:
|
||||
if isinstance(obj, basestring):
|
||||
return obj
|
||||
elif isinstance(obj, dict):
|
||||
return dict((key, todict(val)) for key, val in obj.items())
|
||||
elif isinstance(obj, collections.Iterable):
|
||||
return [todict(val) for val in obj]
|
||||
elif hasattr(obj, '__dict__'):
|
||||
return todict(vars(obj))
|
||||
elif hasattr(obj, '__slots__'):
|
||||
return todict(dict((name, getattr(obj, name)) for name in getattr(obj, '__slots__')))
|
||||
return obj
|
||||
|
@ -16,9 +16,7 @@ PERF_TEST_P(surf, detect, testing::Values(SURF_IMAGES))
|
||||
{
|
||||
string filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
|
||||
if (frame.empty())
|
||||
FAIL() << "Unable to load source image " << filename;
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
Mat mask;
|
||||
declare.in(frame).time(90);
|
||||
@ -34,9 +32,7 @@ PERF_TEST_P(surf, extract, testing::Values(SURF_IMAGES))
|
||||
{
|
||||
string filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
|
||||
if (frame.empty())
|
||||
FAIL() << "Unable to load source image " << filename;
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
Mat mask;
|
||||
declare.in(frame).time(90);
|
||||
@ -55,9 +51,7 @@ PERF_TEST_P(surf, full, testing::Values(SURF_IMAGES))
|
||||
{
|
||||
string filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
|
||||
if (frame.empty())
|
||||
FAIL() << "Unable to load source image " << filename;
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
Mat mask;
|
||||
declare.in(frame).time(90);
|
||||
|
@ -72,7 +72,7 @@ static bool ocl_fastNlMeansDenoising(InputArray _src, OutputArray _dst, float h,
|
||||
int type = _src.type(), cn = CV_MAT_CN(type);
|
||||
Size size = _src.size();
|
||||
|
||||
if ( type != CV_8UC1 || type != CV_8UC2 || type != CV_8UC4 )
|
||||
if ( type != CV_8UC1 && type != CV_8UC2 && type != CV_8UC4 )
|
||||
return false;
|
||||
|
||||
int templateWindowHalfWize = templateWindowSize / 2;
|
||||
|
@ -4,6 +4,8 @@
|
||||
#include "opencv2/core.hpp"
|
||||
#include "ts_gtest.h"
|
||||
|
||||
#include <functional>
|
||||
|
||||
#if !(defined(LOGD) || defined(LOGI) || defined(LOGW) || defined(LOGE))
|
||||
# if defined(ANDROID) && defined(USE_ANDROID_LOGGING)
|
||||
# include <android/log.h>
|
||||
@ -555,31 +557,33 @@ namespace comparators
|
||||
{
|
||||
|
||||
template<typename T>
|
||||
struct CV_EXPORTS RectLess_
|
||||
struct CV_EXPORTS RectLess_ :
|
||||
public std::binary_function<cv::Rect_<T>, cv::Rect_<T>, bool>
|
||||
{
|
||||
bool operator()(const cv::Rect_<T>& r1, const cv::Rect_<T>& r2) const
|
||||
{
|
||||
return r1.x < r2.x
|
||||
|| (r1.x == r2.x && r1.y < r2.y)
|
||||
|| (r1.x == r2.x && r1.y == r2.y && r1.width < r2.width)
|
||||
|| (r1.x == r2.x && r1.y == r2.y && r1.width == r2.width && r1.height < r2.height);
|
||||
return r1.x < r2.x ||
|
||||
(r1.x == r2.x && r1.y < r2.y) ||
|
||||
(r1.x == r2.x && r1.y == r2.y && r1.width < r2.width) ||
|
||||
(r1.x == r2.x && r1.y == r2.y && r1.width == r2.width && r1.height < r2.height);
|
||||
}
|
||||
};
|
||||
|
||||
typedef RectLess_<int> RectLess;
|
||||
|
||||
struct CV_EXPORTS KeypointGreater
|
||||
struct CV_EXPORTS KeypointGreater :
|
||||
public std::binary_function<cv::KeyPoint, cv::KeyPoint, bool>
|
||||
{
|
||||
bool operator()(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2) const
|
||||
{
|
||||
if(kp1.response > kp2.response) return true;
|
||||
if(kp1.response < kp2.response) return false;
|
||||
if(kp1.size > kp2.size) return true;
|
||||
if(kp1.size < kp2.size) return false;
|
||||
if(kp1.octave > kp2.octave) return true;
|
||||
if(kp1.octave < kp2.octave) return false;
|
||||
if(kp1.pt.y < kp2.pt.y) return false;
|
||||
if(kp1.pt.y > kp2.pt.y) return true;
|
||||
if (kp1.response > kp2.response) return true;
|
||||
if (kp1.response < kp2.response) return false;
|
||||
if (kp1.size > kp2.size) return true;
|
||||
if (kp1.size < kp2.size) return false;
|
||||
if (kp1.octave > kp2.octave) return true;
|
||||
if (kp1.octave < kp2.octave) return false;
|
||||
if (kp1.pt.y < kp2.pt.y) return false;
|
||||
if (kp1.pt.y > kp2.pt.y) return true;
|
||||
return kp1.pt.x < kp2.pt.x;
|
||||
}
|
||||
};
|
||||
|
@ -426,7 +426,7 @@ class table(object):
|
||||
if r == 0:
|
||||
css = css[:-1] + "border-top:2px solid #6678B1;\""
|
||||
out.write(" <td%s%s>\n" % (attr, css))
|
||||
if th is not None:
|
||||
if td is not None:
|
||||
out.write(" %s\n" % htmlEncode(td.text))
|
||||
out.write(" </td>\n")
|
||||
i += colspan
|
||||
|
@ -16,7 +16,7 @@ USAGE
|
||||
--feature - Feature to use. Can be sift, surf, orb or brisk. Append '-flann'
|
||||
to feature name to use Flann-based matcher instead bruteforce.
|
||||
|
||||
Press left mouse button on a feature point to see its mathcing point.
|
||||
Press left mouse button on a feature point to see its matching point.
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
|
Loading…
x
Reference in New Issue
Block a user