Merge branch '2.4' of git://github.com/Itseez/opencv into mog2_bgimg_gray
This commit is contained in:
commit
9eb0e7d99b
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so
vendored
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so
vendored
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so
vendored
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so
vendored
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so
vendored
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so
vendored
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so
vendored
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so
vendored
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.4.0.so
vendored
BIN
3rdparty/lib/armeabi-v7a/libnative_camera_r4.4.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi/libnative_camera_r2.2.0.so
vendored
BIN
3rdparty/lib/armeabi/libnative_camera_r2.2.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi/libnative_camera_r2.3.3.so
vendored
BIN
3rdparty/lib/armeabi/libnative_camera_r2.3.3.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi/libnative_camera_r3.0.1.so
vendored
BIN
3rdparty/lib/armeabi/libnative_camera_r3.0.1.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi/libnative_camera_r4.0.0.so
vendored
BIN
3rdparty/lib/armeabi/libnative_camera_r4.0.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi/libnative_camera_r4.0.3.so
vendored
BIN
3rdparty/lib/armeabi/libnative_camera_r4.0.3.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi/libnative_camera_r4.1.1.so
vendored
BIN
3rdparty/lib/armeabi/libnative_camera_r4.1.1.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi/libnative_camera_r4.2.0.so
vendored
BIN
3rdparty/lib/armeabi/libnative_camera_r4.2.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi/libnative_camera_r4.3.0.so
vendored
BIN
3rdparty/lib/armeabi/libnative_camera_r4.3.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/armeabi/libnative_camera_r4.4.0.so
vendored
BIN
3rdparty/lib/armeabi/libnative_camera_r4.4.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/mips/libnative_camera_r4.0.3.so
vendored
BIN
3rdparty/lib/mips/libnative_camera_r4.0.3.so
vendored
Binary file not shown.
BIN
3rdparty/lib/mips/libnative_camera_r4.1.1.so
vendored
BIN
3rdparty/lib/mips/libnative_camera_r4.1.1.so
vendored
Binary file not shown.
BIN
3rdparty/lib/mips/libnative_camera_r4.2.0.so
vendored
BIN
3rdparty/lib/mips/libnative_camera_r4.2.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/mips/libnative_camera_r4.3.0.so
vendored
BIN
3rdparty/lib/mips/libnative_camera_r4.3.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/mips/libnative_camera_r4.4.0.so
vendored
BIN
3rdparty/lib/mips/libnative_camera_r4.4.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/x86/libnative_camera_r2.3.3.so
vendored
BIN
3rdparty/lib/x86/libnative_camera_r2.3.3.so
vendored
Binary file not shown.
BIN
3rdparty/lib/x86/libnative_camera_r3.0.1.so
vendored
BIN
3rdparty/lib/x86/libnative_camera_r3.0.1.so
vendored
Binary file not shown.
BIN
3rdparty/lib/x86/libnative_camera_r4.0.3.so
vendored
BIN
3rdparty/lib/x86/libnative_camera_r4.0.3.so
vendored
Binary file not shown.
BIN
3rdparty/lib/x86/libnative_camera_r4.1.1.so
vendored
BIN
3rdparty/lib/x86/libnative_camera_r4.1.1.so
vendored
Binary file not shown.
BIN
3rdparty/lib/x86/libnative_camera_r4.2.0.so
vendored
BIN
3rdparty/lib/x86/libnative_camera_r4.2.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/x86/libnative_camera_r4.3.0.so
vendored
BIN
3rdparty/lib/x86/libnative_camera_r4.3.0.so
vendored
Binary file not shown.
BIN
3rdparty/lib/x86/libnative_camera_r4.4.0.so
vendored
BIN
3rdparty/lib/x86/libnative_camera_r4.4.0.so
vendored
Binary file not shown.
@ -512,6 +512,16 @@ if(NOT HAVE_CUDA)
|
||||
set(ENABLE_DYNAMIC_CUDA OFF)
|
||||
endif()
|
||||
|
||||
if(HAVE_CUDA AND NOT ENABLE_DYNAMIC_CUDA)
|
||||
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
|
||||
if(HAVE_CUBLAS)
|
||||
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} ${CUDA_cublas_LIBRARY})
|
||||
endif()
|
||||
if(HAVE_CUFFT)
|
||||
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} ${CUDA_cufft_LIBRARY})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Solution folders:
|
||||
# ----------------------------------------------------------------------------
|
||||
|
@ -56,8 +56,11 @@ if(ANDROID)
|
||||
|
||||
# remove CUDA runtime and NPP from regular deps
|
||||
# it can be added separately if needed.
|
||||
ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "libcu")
|
||||
ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "libnpp")
|
||||
ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cusparse")
|
||||
ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cufft")
|
||||
ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cublas")
|
||||
ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "npp")
|
||||
ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cudart")
|
||||
|
||||
if(HAVE_CUDA)
|
||||
# CUDA runtime libraries and are required always
|
||||
|
@ -152,11 +152,14 @@ ifeq ($(OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_ALREADY_INCLUDED),)
|
||||
OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_ALREADY_INCLUDED:=on
|
||||
endif
|
||||
|
||||
ifeq ($(OPENCV_USE_GPU_MODULE),on)
|
||||
include $(CLEAR_VARS)
|
||||
LOCAL_MODULE:=opencv_gpu
|
||||
LOCAL_SRC_FILES:=$(OPENCV_LIBS_DIR)/libopencv_gpu.a
|
||||
include $(PREBUILT_STATIC_LIBRARY)
|
||||
ifeq ($(OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_GPU_ALREADY_INCLUDED),)
|
||||
ifeq ($(OPENCV_USE_GPU_MODULE),on)
|
||||
include $(CLEAR_VARS)
|
||||
LOCAL_MODULE:=opencv_gpu
|
||||
LOCAL_SRC_FILES:=$(OPENCV_LIBS_DIR)/libopencv_gpu.a
|
||||
include $(PREBUILT_STATIC_LIBRARY)
|
||||
endif
|
||||
OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_GPU_ALREADY_INCLUDED:=on
|
||||
endif
|
||||
|
||||
ifeq ($(OPENCV_LOCAL_CFLAGS),)
|
||||
|
@ -210,7 +210,7 @@ foreach(__opttype OPT DBG)
|
||||
SET(OpenCV_EXTRA_LIBS_${__opttype} "")
|
||||
|
||||
# CUDA
|
||||
if(OpenCV_CUDA_VERSION AND (CMAKE_CROSSCOMPILING OR (WIN32 AND NOT OpenCV_SHARED)))
|
||||
if(OpenCV_CUDA_VERSION)
|
||||
if(NOT CUDA_FOUND)
|
||||
find_package(CUDA ${OpenCV_CUDA_VERSION} EXACT REQUIRED)
|
||||
else()
|
||||
@ -219,32 +219,41 @@ foreach(__opttype OPT DBG)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_LIBRARIES})
|
||||
set(OpenCV_CUDA_LIBS_ABSPATH ${CUDA_LIBRARIES})
|
||||
|
||||
if(${CUDA_VERSION} VERSION_LESS "5.5")
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_npp_LIBRARY})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_npp_LIBRARY})
|
||||
else()
|
||||
find_cuda_helper_libs(nppc)
|
||||
find_cuda_helper_libs(nppi)
|
||||
find_cuda_helper_libs(npps)
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY})
|
||||
endif()
|
||||
|
||||
if(OpenCV_USE_CUBLAS)
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_CUBLAS_LIBRARIES})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_CUBLAS_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(OpenCV_USE_CUFFT)
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_CUFFT_LIBRARIES})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_CUFFT_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(OpenCV_USE_NVCUVID)
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nvcuvid_LIBRARIES})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nvcuvid_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nvcuvenc_LIBRARIES})
|
||||
list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nvcuvenc_LIBRARIES})
|
||||
endif()
|
||||
|
||||
set(OpenCV_CUDA_LIBS_RELPATH "")
|
||||
foreach(l ${OpenCV_CUDA_LIBS_ABSPATH})
|
||||
get_filename_component(_tmp ${l} PATH)
|
||||
list(APPEND OpenCV_CUDA_LIBS_RELPATH ${_tmp})
|
||||
endforeach()
|
||||
|
||||
list(REMOVE_DUPLICATES OpenCV_CUDA_LIBS_RELPATH)
|
||||
link_directories(${OpenCV_CUDA_LIBS_RELPATH})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
|
94
doc/conf.py
94
doc/conf.py
@ -293,11 +293,11 @@ extlinks = {
|
||||
'oldbasicstructures' : ('http://docs.opencv.org/modules/core/doc/old_basic_structures.html#%s', None),
|
||||
'readwriteimagevideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None),
|
||||
'operationsonarrays' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html#%s', None),
|
||||
'utilitysystemfunctions':('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html#%s', None),
|
||||
'imgprocfilter':('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
|
||||
'svms':('http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#%s', None),
|
||||
'drawingfunc':('http://docs.opencv.org/modules/core/doc/drawing_functions.html#%s', None),
|
||||
'xmlymlpers':('http://docs.opencv.org/modules/core/doc/xml_yaml_persistence.html#%s', None),
|
||||
'utilitysystemfunctions' : ('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html#%s', None),
|
||||
'imgprocfilter' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
|
||||
'svms' : ('http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#%s', None),
|
||||
'drawingfunc' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#%s', None),
|
||||
'xmlymlpers' : ('http://docs.opencv.org/modules/core/doc/xml_yaml_persistence.html#%s', None),
|
||||
'hgvideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None),
|
||||
'gpuinit' : ('http://docs.opencv.org/modules/gpu/doc/initalization_and_information.html#%s', None),
|
||||
'gpudatastructure' : ('http://docs.opencv.org/modules/gpu/doc/data_structures.html#%s', None),
|
||||
@ -305,56 +305,58 @@ extlinks = {
|
||||
'gpuperelement' : ('http://docs.opencv.org/modules/gpu/doc/per_element_operations.html#%s', None),
|
||||
'gpuimgproc' : ('http://docs.opencv.org/modules/gpu/doc/image_processing.html#%s', None),
|
||||
'gpumatrixreduct' : ('http://docs.opencv.org/modules/gpu/doc/matrix_reductions.html#%s', None),
|
||||
'filtering':('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
|
||||
'filtering' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
|
||||
'flann' : ('http://docs.opencv.org/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.html#%s', None ),
|
||||
'calib3d' : ('http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#%s', None ),
|
||||
'feature2d' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html#%s', None ),
|
||||
'imgproc_geometric' : ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#%s', None ),
|
||||
'miscellaneous_transformations' : ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#%s', None),
|
||||
'user_interface' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html#%s', None),
|
||||
|
||||
# 'opencv_group' : ('http://answers.opencv.org/%s', None),
|
||||
'opencv_qa' : ('http://answers.opencv.org/%s', None),
|
||||
'how_to_contribute' : ('http://code.opencv.org/projects/opencv/wiki/How_to_contribute/%s', None),
|
||||
|
||||
'cvt_color': ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=cvtcolor#cvtcolor%s', None),
|
||||
'imread': ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#imread%s', None),
|
||||
'imwrite': ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imwrite#imwrite%s', None),
|
||||
'imshow': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=imshow#imshow%s', None),
|
||||
'named_window': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=namedwindow#namedwindow%s', None),
|
||||
'wait_key': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=waitkey#waitkey%s', None),
|
||||
'add_weighted': ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=addweighted#addweighted%s', None),
|
||||
'saturate_cast': ('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html?highlight=saturate_cast#saturate-cast%s', None),
|
||||
'mat_zeros': ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=zeros#mat-zeros%s', None),
|
||||
'convert_to': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#mat-convertto%s', None),
|
||||
'create_trackbar': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=createtrackbar#createtrackbar%s', None),
|
||||
'point': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#point%s', None),
|
||||
'scalar': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#scalar%s', None),
|
||||
'line': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#line%s', None),
|
||||
'ellipse': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#ellipse%s', None),
|
||||
'rectangle': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#rectangle%s', None),
|
||||
'circle': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#circle%s', None),
|
||||
'fill_poly': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#fillpoly%s', None),
|
||||
'rng': ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=rng#rng%s', None),
|
||||
'put_text': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#puttext%s', None),
|
||||
'gaussian_blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur%s', None),
|
||||
'blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=blur#blur%s', None),
|
||||
'median_blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=medianblur#medianblur%s', None),
|
||||
'bilateral_filter': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=bilateralfilter#bilateralfilter%s', None),
|
||||
'erode': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=erode#erode%s', None),
|
||||
'dilate': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=dilate#dilate%s', None),
|
||||
'get_structuring_element': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=getstructuringelement#getstructuringelement%s', None),
|
||||
'flood_fill': ( 'http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=floodfill#floodfill%s', None),
|
||||
'morphology_ex': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=morphologyex#morphologyex%s', None),
|
||||
'pyr_down': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrdown#pyrdown%s', None),
|
||||
'pyr_up': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrup#pyrup%s', None),
|
||||
'resize': ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=resize#resize%s', None),
|
||||
'threshold': ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#threshold%s', None),
|
||||
'filter2d': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=filter2d#filter2d%s', None),
|
||||
'copy_make_border': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=copymakeborder#copymakeborder%s', None),
|
||||
'sobel': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=sobel#sobel%s', None),
|
||||
'scharr': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=scharr#scharr%s', None),
|
||||
'laplacian': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=laplacian#laplacian%s', None),
|
||||
'canny': ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=canny#canny%s', None),
|
||||
'copy_to': ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=copyto#mat-copyto%s', None),
|
||||
'cvt_color' : ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=cvtcolor#cvtcolor%s', None),
|
||||
'imread' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#imread%s', None),
|
||||
'imwrite' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imwrite#imwrite%s', None),
|
||||
'imshow' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=imshow#imshow%s', None),
|
||||
'named_window' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=namedwindow#namedwindow%s', None),
|
||||
'wait_key' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=waitkey#waitkey%s', None),
|
||||
'add_weighted' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=addweighted#addweighted%s', None),
|
||||
'saturate_cast' : ('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html?highlight=saturate_cast#saturate-cast%s', None),
|
||||
'mat_zeros' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=zeros#mat-zeros%s', None),
|
||||
'convert_to' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html#mat-convertto%s', None),
|
||||
'create_trackbar' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=createtrackbar#createtrackbar%s', None),
|
||||
'point' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html#point%s', None),
|
||||
'scalar' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html#scalar%s', None),
|
||||
'line' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#line%s', None),
|
||||
'ellipse' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#ellipse%s', None),
|
||||
'rectangle' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#rectangle%s', None),
|
||||
'circle' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#circle%s', None),
|
||||
'fill_poly' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#fillpoly%s', None),
|
||||
'rng' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=rng#rng%s', None),
|
||||
'put_text' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#puttext%s', None),
|
||||
'gaussian_blur' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur%s', None),
|
||||
'blur' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=blur#blur%s', None),
|
||||
'median_blur' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=medianblur#medianblur%s', None),
|
||||
'bilateral_filter' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=bilateralfilter#bilateralfilter%s', None),
|
||||
'erode' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=erode#erode%s', None),
|
||||
'dilate' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=dilate#dilate%s', None),
|
||||
'get_structuring_element' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=getstructuringelement#getstructuringelement%s', None),
|
||||
'flood_fill' : ( 'http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=floodfill#floodfill%s', None),
|
||||
'morphology_ex' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=morphologyex#morphologyex%s', None),
|
||||
'pyr_down' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrdown#pyrdown%s', None),
|
||||
'pyr_up' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrup#pyrup%s', None),
|
||||
'resize' : ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=resize#resize%s', None),
|
||||
'threshold' : ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#threshold%s', None),
|
||||
'filter2d' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=filter2d#filter2d%s', None),
|
||||
'copy_make_border' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=copymakeborder#copymakeborder%s', None),
|
||||
'sobel' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=sobel#sobel%s', None),
|
||||
'scharr' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=scharr#scharr%s', None),
|
||||
'laplacian' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=laplacian#laplacian%s', None),
|
||||
'canny' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=canny#canny%s', None),
|
||||
'copy_to' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=copyto#mat-copyto%s', None),
|
||||
'hough_lines' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghlines#houghlines%s', None),
|
||||
'hough_lines_p' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghlinesp#houghlinesp%s', None),
|
||||
'hough_circles' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghcircles#houghcircles%s', None),
|
||||
|
@ -6,12 +6,12 @@ Adding (blending) two images using OpenCV
|
||||
Goal
|
||||
=====
|
||||
|
||||
In this tutorial you will learn how to:
|
||||
In this tutorial you will learn:
|
||||
|
||||
.. container:: enumeratevisibleitemswithsquare
|
||||
|
||||
* What is *linear blending* and why it is useful.
|
||||
* Add two images using :add_weighted:`addWeighted <>`
|
||||
* what is *linear blending* and why it is useful;
|
||||
* how to add two images using :add_weighted:`addWeighted <>`
|
||||
|
||||
Theory
|
||||
=======
|
||||
|
@ -18,7 +18,7 @@ We'll seek answers for the following questions:
|
||||
Our test case
|
||||
=============
|
||||
|
||||
Let us consider a simple color reduction method. Using the unsigned char C and C++ type for matrix item storing a channel of pixel may have up to 256 different values. For a three channel image this can allow the formation of way too many colors (16 million to be exact). Working with so many color shades may give a heavy blow to our algorithm performance. However, sometimes it is enough to work with a lot less of them to get the same final result.
|
||||
Let us consider a simple color reduction method. By using the unsigned char C and C++ type for matrix item storing, a channel of pixel may have up to 256 different values. For a three channel image this can allow the formation of way too many colors (16 million to be exact). Working with so many color shades may give a heavy blow to our algorithm performance. However, sometimes it is enough to work with a lot less of them to get the same final result.
|
||||
|
||||
In this cases it's common that we make a *color space reduction*. This means that we divide the color space current value with a new input value to end up with fewer colors. For instance every value between zero and nine takes the new value zero, every value between ten and nineteen the value ten and so on.
|
||||
|
||||
|
@ -32,14 +32,14 @@ Here's a function that will do this:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
void Sharpen(const Mat& myImage,Mat& Result)
|
||||
void Sharpen(const Mat& myImage, Mat& Result)
|
||||
{
|
||||
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
|
||||
|
||||
Result.create(myImage.size(),myImage.type());
|
||||
Result.create(myImage.size(), myImage.type());
|
||||
const int nChannels = myImage.channels();
|
||||
|
||||
for(int j = 1 ; j < myImage.rows-1; ++j)
|
||||
for(int j = 1; j < myImage.rows - 1; ++j)
|
||||
{
|
||||
const uchar* previous = myImage.ptr<uchar>(j - 1);
|
||||
const uchar* current = myImage.ptr<uchar>(j );
|
||||
@ -47,17 +47,17 @@ Here's a function that will do this:
|
||||
|
||||
uchar* output = Result.ptr<uchar>(j);
|
||||
|
||||
for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
|
||||
for(int i = nChannels; i < nChannels * (myImage.cols - 1); ++i)
|
||||
{
|
||||
*output++ = saturate_cast<uchar>(5*current[i]
|
||||
-current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
|
||||
*output++ = saturate_cast<uchar>(5 * current[i]
|
||||
-current[i - nChannels] - current[i + nChannels] - previous[i] - next[i]);
|
||||
}
|
||||
}
|
||||
|
||||
Result.row(0).setTo(Scalar(0));
|
||||
Result.row(Result.rows-1).setTo(Scalar(0));
|
||||
Result.row(Result.rows - 1).setTo(Scalar(0));
|
||||
Result.col(0).setTo(Scalar(0));
|
||||
Result.col(Result.cols-1).setTo(Scalar(0));
|
||||
Result.col(Result.cols - 1).setTo(Scalar(0));
|
||||
}
|
||||
|
||||
At first we make sure that the input images data is in unsigned char format. For this we use the :utilitysystemfunctions:`CV_Assert <cv-assert>` function that throws an error when the expression inside it is false.
|
||||
@ -70,14 +70,14 @@ We create an output image with the same size and the same type as our input. As
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
Result.create(myImage.size(),myImage.type());
|
||||
Result.create(myImage.size(), myImage.type());
|
||||
const int nChannels = myImage.channels();
|
||||
|
||||
We'll use the plain C [] operator to access pixels. Because we need to access multiple rows at the same time we'll acquire the pointers for each of them (a previous, a current and a next line). We need another pointer to where we're going to save the calculation. Then simply access the right items with the [] operator. For moving the output pointer ahead we simply increase this (with one byte) after each operation:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
for(int j = 1 ; j < myImage.rows-1; ++j)
|
||||
for(int j = 1; j < myImage.rows - 1; ++j)
|
||||
{
|
||||
const uchar* previous = myImage.ptr<uchar>(j - 1);
|
||||
const uchar* current = myImage.ptr<uchar>(j );
|
||||
@ -85,21 +85,21 @@ We'll use the plain C [] operator to access pixels. Because we need to access mu
|
||||
|
||||
uchar* output = Result.ptr<uchar>(j);
|
||||
|
||||
for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
|
||||
for(int i = nChannels; i < nChannels * (myImage.cols - 1); ++i)
|
||||
{
|
||||
*output++ = saturate_cast<uchar>(5*current[i]
|
||||
-current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
|
||||
*output++ = saturate_cast<uchar>(5 * current[i]
|
||||
-current[i - nChannels] - current[i + nChannels] - previous[i] - next[i]);
|
||||
}
|
||||
}
|
||||
|
||||
On the borders of the image the upper notation results inexistent pixel locations (like minus one - minus one). In these points our formula is undefined. A simple solution is to not apply the mask in these points and, for example, set the pixels on the borders to zeros:
|
||||
On the borders of the image the upper notation results inexistent pixel locations (like minus one - minus one). In these points our formula is undefined. A simple solution is to not apply the kernel in these points and, for example, set the pixels on the borders to zeros:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
Result.row(0).setTo(Scalar(0)); // The top row
|
||||
Result.row(Result.rows-1).setTo(Scalar(0)); // The bottom row
|
||||
Result.col(0).setTo(Scalar(0)); // The left column
|
||||
Result.col(Result.cols-1).setTo(Scalar(0)); // The right column
|
||||
Result.row(0).setTo(Scalar(0)); // The top row
|
||||
Result.row(Result.rows - 1).setTo(Scalar(0)); // The bottom row
|
||||
Result.col(0).setTo(Scalar(0)); // The left column
|
||||
Result.col(Result.cols - 1).setTo(Scalar(0)); // The right column
|
||||
|
||||
The filter2D function
|
||||
=====================
|
||||
@ -116,7 +116,7 @@ Then call the :filtering:`filter2D <filter2d>` function specifying the input, th
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
filter2D(I, K, I.depth(), kern );
|
||||
filter2D(I, K, I.depth(), kern);
|
||||
|
||||
The function even has a fifth optional argument to specify the center of the kernel, and a sixth one for determining what to do in the regions where the operation is undefined (borders). Using this function has the advantage that it's shorter, less verbose and because there are some optimization techniques implemented it is usually faster than the *hand-coded method*. For example in my test while the second one took only 13 milliseconds the first took around 31 milliseconds. Quite some difference.
|
||||
|
||||
|
@ -45,7 +45,7 @@ All the above objects, in the end, point to the same single data matrix. Their h
|
||||
:linenos:
|
||||
|
||||
Mat D (A, Rect(10, 10, 100, 100) ); // using a rectangle
|
||||
Mat E = A(Range:all(), Range(1,3)); // using row and column boundaries
|
||||
Mat E = A(Range::all(), Range(1,3)); // using row and column boundaries
|
||||
|
||||
Now you may ask if the matrix itself may belong to multiple *Mat* objects who takes responsibility for cleaning it up when it's no longer needed. The short answer is: the last object that used it. This is handled by using a reference counting mechanism. Whenever somebody copies a header of a *Mat* object, a counter is increased for the matrix. Whenever a header is cleaned this counter is decreased. When the counter reaches zero the matrix too is freed. Sometimes you will want to copy the matrix itself too, so OpenCV provides the :basicstructures:`clone() <mat-clone>` and :basicstructures:`copyTo() <mat-copyto>` functions.
|
||||
|
||||
@ -86,7 +86,7 @@ Each of the building components has their own valid domains. This leads to the d
|
||||
Creating a *Mat* object explicitly
|
||||
==================================
|
||||
|
||||
In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() <imwrite>` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices.
|
||||
In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readwriteimagevideo:`imwrite() <imwrite>` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices.
|
||||
|
||||
Although *Mat* works really well as an image container, it is also a general matrix class. Therefore, it is possible to create and manipulate multidimensional matrices. You can create a Mat object in multiple ways:
|
||||
|
||||
|
@ -48,10 +48,10 @@ The structure of package contents looks as follows:
|
||||
|
||||
::
|
||||
|
||||
OpenCV-2.4.8-android-sdk
|
||||
OpenCV-2.4.9-android-sdk
|
||||
|_ apk
|
||||
| |_ OpenCV_2.4.8_binary_pack_armv7a.apk
|
||||
| |_ OpenCV_2.4.8_Manager_2.16_XXX.apk
|
||||
| |_ OpenCV_2.4.9_binary_pack_armv7a.apk
|
||||
| |_ OpenCV_2.4.9_Manager_2.18_XXX.apk
|
||||
|
|
||||
|_ doc
|
||||
|_ samples
|
||||
@ -157,10 +157,10 @@ Get the OpenCV4Android SDK
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
unzip ~/Downloads/OpenCV-2.4.8-android-sdk.zip
|
||||
unzip ~/Downloads/OpenCV-2.4.9-android-sdk.zip
|
||||
|
||||
.. |opencv_android_bin_pack| replace:: :file:`OpenCV-2.4.8-android-sdk.zip`
|
||||
.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.8/OpenCV-2.4.8-android-sdk.zip/download
|
||||
.. |opencv_android_bin_pack| replace:: :file:`OpenCV-2.4.9-android-sdk.zip`
|
||||
.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.9/OpenCV-2.4.9-android-sdk.zip/download
|
||||
.. |opencv_android_bin_pack_url| replace:: |opencv_android_bin_pack|
|
||||
.. |seven_zip| replace:: 7-Zip
|
||||
.. _seven_zip: http://www.7-zip.org/
|
||||
@ -295,7 +295,7 @@ Well, running samples from Eclipse is very simple:
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
<Android SDK path>/platform-tools/adb install <OpenCV4Android SDK path>/apk/OpenCV_2.4.8_Manager_2.16_armv7a-neon.apk
|
||||
<Android SDK path>/platform-tools/adb install <OpenCV4Android SDK path>/apk/OpenCV_2.4.9_Manager_2.18_armv7a-neon.apk
|
||||
|
||||
.. note:: ``armeabi``, ``armv7a-neon``, ``arm7a-neon-android8``, ``mips`` and ``x86`` stand for
|
||||
platform targets:
|
||||
|
@ -55,14 +55,14 @@ Manager to access OpenCV libraries externally installed in the target system.
|
||||
:guilabel:`File -> Import -> Existing project in your workspace`.
|
||||
|
||||
Press :guilabel:`Browse` button and locate OpenCV4Android SDK
|
||||
(:file:`OpenCV-2.4.8-android-sdk/sdk`).
|
||||
(:file:`OpenCV-2.4.9-android-sdk/sdk`).
|
||||
|
||||
.. image:: images/eclipse_opencv_dependency0.png
|
||||
:alt: Add dependency from OpenCV library
|
||||
:align: center
|
||||
|
||||
#. In application project add a reference to the OpenCV Java SDK in
|
||||
:guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.8``.
|
||||
:guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.9``.
|
||||
|
||||
.. image:: images/eclipse_opencv_dependency1.png
|
||||
:alt: Add dependency from OpenCV library
|
||||
@ -128,27 +128,27 @@ described above.
|
||||
#. Add the OpenCV library project to your workspace the same way as for the async initialization
|
||||
above. Use menu :guilabel:`File -> Import -> Existing project in your workspace`,
|
||||
press :guilabel:`Browse` button and select OpenCV SDK path
|
||||
(:file:`OpenCV-2.4.8-android-sdk/sdk`).
|
||||
(:file:`OpenCV-2.4.9-android-sdk/sdk`).
|
||||
|
||||
.. image:: images/eclipse_opencv_dependency0.png
|
||||
:alt: Add dependency from OpenCV library
|
||||
:align: center
|
||||
|
||||
#. In the application project add a reference to the OpenCV4Android SDK in
|
||||
:guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.8``;
|
||||
:guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.9``;
|
||||
|
||||
.. image:: images/eclipse_opencv_dependency1.png
|
||||
:alt: Add dependency from OpenCV library
|
||||
:align: center
|
||||
|
||||
#. If your application project **doesn't have a JNI part**, just copy the corresponding OpenCV
|
||||
native libs from :file:`<OpenCV-2.4.8-android-sdk>/sdk/native/libs/<target_arch>` to your
|
||||
native libs from :file:`<OpenCV-2.4.9-android-sdk>/sdk/native/libs/<target_arch>` to your
|
||||
project directory to folder :file:`libs/<target_arch>`.
|
||||
|
||||
In case of the application project **with a JNI part**, instead of manual libraries copying you
|
||||
need to modify your ``Android.mk`` file:
|
||||
add the following two code lines after the ``"include $(CLEAR_VARS)"`` and before
|
||||
``"include path_to_OpenCV-2.4.8-android-sdk/sdk/native/jni/OpenCV.mk"``
|
||||
``"include path_to_OpenCV-2.4.9-android-sdk/sdk/native/jni/OpenCV.mk"``
|
||||
|
||||
.. code-block:: make
|
||||
:linenos:
|
||||
@ -221,7 +221,7 @@ taken:
|
||||
|
||||
.. code-block:: make
|
||||
|
||||
include C:\Work\OpenCV4Android\OpenCV-2.4.8-android-sdk\sdk\native\jni\OpenCV.mk
|
||||
include C:\Work\OpenCV4Android\OpenCV-2.4.9-android-sdk\sdk\native\jni\OpenCV.mk
|
||||
|
||||
Should be inserted into the :file:`jni/Android.mk` file **after** this line:
|
||||
|
||||
|
@ -5,7 +5,7 @@ Load, Modify, and Save an Image
|
||||
|
||||
.. note::
|
||||
|
||||
We assume that by now you know how to load an image using :imread:`imread <>` and to display it in a window (using :imshow:`imshow <>`). Read the :ref:`Display_Image` tutorial otherwise.
|
||||
We assume that by now you know how to load an image using :readwriteimagevideo:`imread <imread>` and to display it in a window (using :user_interface:`imshow <imshow>`). Read the :ref:`Display_Image` tutorial otherwise.
|
||||
|
||||
Goals
|
||||
======
|
||||
@ -14,9 +14,9 @@ In this tutorial you will learn how to:
|
||||
|
||||
.. container:: enumeratevisibleitemswithsquare
|
||||
|
||||
* Load an image using :imread:`imread <>`
|
||||
* Transform an image from BGR to Grayscale format by using :cvt_color:`cvtColor <>`
|
||||
* Save your transformed image in a file on disk (using :imwrite:`imwrite <>`)
|
||||
* Load an image using :readwriteimagevideo:`imread <imread>`
|
||||
* Transform an image from BGR to Grayscale format by using :miscellaneous_transformations:`cvtColor <cvtcolor>`
|
||||
* Save your transformed image in a file on disk (using :readwriteimagevideo:`imwrite <imwrite>`)
|
||||
|
||||
Code
|
||||
======
|
||||
@ -63,10 +63,7 @@ Here it is:
|
||||
Explanation
|
||||
============
|
||||
|
||||
#. We begin by:
|
||||
|
||||
* Creating a Mat object to store the image information
|
||||
* Load an image using :imread:`imread <>`, located in the path given by *imageName*. Fort this example, assume you are loading a RGB image.
|
||||
#. We begin by loading an image using :readwriteimagevideo:`imread <imread>`, located in the path given by *imageName*. For this example, assume you are loading a RGB image.
|
||||
|
||||
#. Now we are going to convert our image from BGR to Grayscale format. OpenCV has a really nice function to do this kind of transformations:
|
||||
|
||||
@ -74,15 +71,15 @@ Explanation
|
||||
|
||||
cvtColor( image, gray_image, CV_BGR2GRAY );
|
||||
|
||||
As you can see, :cvt_color:`cvtColor <>` takes as arguments:
|
||||
As you can see, :miscellaneous_transformations:`cvtColor <cvtcolor>` takes as arguments:
|
||||
|
||||
.. container:: enumeratevisibleitemswithsquare
|
||||
|
||||
* a source image (*image*)
|
||||
* a destination image (*gray_image*), in which we will save the converted image.
|
||||
* an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :imread:`imread <>` has BGR default channel order in case of color images).
|
||||
* an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :readwriteimagevideo:`imread <imread>` has BGR default channel order in case of color images).
|
||||
|
||||
#. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :imread:`imread <>`: :imwrite:`imwrite <>`
|
||||
#. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :readwriteimagevideo:`imread <imread>`: :readwriteimagevideo:`imwrite <imwrite>`
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
|
@ -102,7 +102,7 @@ As always, we would be happy to hear your comments and receive your contribution
|
||||
.. cssclass:: toctableopencv
|
||||
|
||||
=========== =======================================================
|
||||
|Video| Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions.
|
||||
|Video| Look here in order to find use on your video stream algorithms like: motion extraction, feature tracking and foreground extractions.
|
||||
|
||||
=========== =======================================================
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
*video* module. Video analysis
|
||||
-----------------------------------------------------------
|
||||
|
||||
Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions.
|
||||
Look here in order to find use on your video stream algorithms like: motion extraction, feature tracking and foreground extractions.
|
||||
|
||||
.. include:: ../../definitions/noContent.rst
|
||||
|
||||
|
@ -58,7 +58,7 @@ SET_TARGET_PROPERTIES(${the_target} PROPERTIES
|
||||
RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH}
|
||||
)
|
||||
|
||||
if (NOT (CMAKE_BUILD_TYPE MATCHES "debug"))
|
||||
if (NOT (CMAKE_BUILD_TYPE MATCHES "Debug"))
|
||||
ADD_CUSTOM_COMMAND( TARGET ${the_target} POST_BUILD COMMAND ${CMAKE_STRIP} --strip-unneeded "${LIBRARY_OUTPUT_PATH}/lib${the_target}.so" )
|
||||
endif()
|
||||
|
||||
|
@ -61,6 +61,12 @@
|
||||
|
||||
using namespace android;
|
||||
|
||||
// non-public camera related classes are not binary compatible
|
||||
// objects of these classes have different sizeof on different platforms
|
||||
// additional memory tail to all system objects to overcome sizeof issue
|
||||
#define MAGIC_TAIL 4096
|
||||
|
||||
|
||||
void debugShowFPS();
|
||||
|
||||
#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0)
|
||||
@ -90,6 +96,7 @@ public:
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
std::string getProcessName()
|
||||
{
|
||||
std::string result;
|
||||
@ -142,12 +149,22 @@ class CameraHandler: public CameraListener
|
||||
protected:
|
||||
int cameraId;
|
||||
sp<Camera> camera;
|
||||
CameraParameters params;
|
||||
#if defined(ANDROID_r3_0_1) || defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
|
||||
sp<SurfaceTexture> surface;
|
||||
#endif
|
||||
#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0) || defined(ANDROID_r4_4_0)
|
||||
sp<BufferQueue> queue;
|
||||
sp<ConsumerListenerStub> listener;
|
||||
#endif
|
||||
CameraParameters* params;
|
||||
CameraCallback cameraCallback;
|
||||
void* userData;
|
||||
|
||||
int emptyCameraCallbackReported;
|
||||
|
||||
int width;
|
||||
int height;
|
||||
|
||||
static const char* flashModesNames[ANDROID_CAMERA_FLASH_MODES_NUM];
|
||||
static const char* focusModesNames[ANDROID_CAMERA_FOCUS_MODES_NUM];
|
||||
static const char* whiteBalanceModesNames[ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM];
|
||||
@ -258,7 +275,7 @@ protected:
|
||||
|
||||
int is_supported(const char* supp_modes_key, const char* mode)
|
||||
{
|
||||
const char* supported_modes = params.get(supp_modes_key);
|
||||
const char* supported_modes = params->get(supp_modes_key);
|
||||
return (supported_modes && mode && (strstr(supported_modes, mode) > 0));
|
||||
}
|
||||
|
||||
@ -268,7 +285,7 @@ protected:
|
||||
if (focus_distance_type >= 0 && focus_distance_type < 3)
|
||||
{
|
||||
float focus_distances[3];
|
||||
const char* output = params.get(CameraParameters::KEY_FOCUS_DISTANCES);
|
||||
const char* output = params->get(CameraParameters::KEY_FOCUS_DISTANCES);
|
||||
int val_num = CameraHandler::split_float(output, focus_distances, ',', 3);
|
||||
if(val_num == 3)
|
||||
{
|
||||
@ -300,10 +317,15 @@ public:
|
||||
emptyCameraCallbackReported(0)
|
||||
{
|
||||
LOGD("Instantiated new CameraHandler (%p, %p)", callback, _userData);
|
||||
void* params_buffer = operator new(sizeof(CameraParameters) + MAGIC_TAIL);
|
||||
params = new(params_buffer) CameraParameters();
|
||||
}
|
||||
|
||||
virtual ~CameraHandler()
|
||||
{
|
||||
if (params)
|
||||
params->~CameraParameters();
|
||||
operator delete(params);
|
||||
LOGD("CameraHandler destructor is called");
|
||||
}
|
||||
|
||||
@ -371,10 +393,18 @@ const char* CameraHandler::focusModesNames[ANDROID_CAMERA_FOCUS_MODES_NUM] =
|
||||
CameraParameters::FOCUS_MODE_AUTO,
|
||||
#if !defined(ANDROID_r2_2_0)
|
||||
CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO,
|
||||
#else
|
||||
CameraParameters::FOCUS_MODE_AUTO,
|
||||
#endif
|
||||
CameraParameters::FOCUS_MODE_EDOF,
|
||||
CameraParameters::FOCUS_MODE_FIXED,
|
||||
CameraParameters::FOCUS_MODE_INFINITY
|
||||
CameraParameters::FOCUS_MODE_INFINITY,
|
||||
CameraParameters::FOCUS_MODE_MACRO,
|
||||
#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1)
|
||||
CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE
|
||||
#else
|
||||
CameraParameters::FOCUS_MODE_AUTO
|
||||
#endif
|
||||
};
|
||||
|
||||
const char* CameraHandler::whiteBalanceModesNames[ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM] =
|
||||
@ -534,39 +564,39 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
|
||||
{
|
||||
LOGI("initCameraConnect: Setting paramers from previous camera handler");
|
||||
camera->setParameters(prevCameraParameters->flatten());
|
||||
handler->params.unflatten(prevCameraParameters->flatten());
|
||||
handler->params->unflatten(prevCameraParameters->flatten());
|
||||
}
|
||||
else
|
||||
{
|
||||
android::String8 params_str = camera->getParameters();
|
||||
LOGI("initCameraConnect: [%s]", params_str.string());
|
||||
|
||||
handler->params.unflatten(params_str);
|
||||
handler->params->unflatten(params_str);
|
||||
|
||||
LOGD("Supported Cameras: %s", handler->params.get("camera-indexes"));
|
||||
LOGD("Supported Picture Sizes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES));
|
||||
LOGD("Supported Picture Formats: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS));
|
||||
LOGD("Supported Preview Sizes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES));
|
||||
LOGD("Supported Preview Formats: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS));
|
||||
LOGD("Supported Preview Frame Rates: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES));
|
||||
LOGD("Supported Thumbnail Sizes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES));
|
||||
LOGD("Supported Whitebalance Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE));
|
||||
LOGD("Supported Effects: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_EFFECTS));
|
||||
LOGD("Supported Scene Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_SCENE_MODES));
|
||||
LOGD("Supported Focus Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES));
|
||||
LOGD("Supported Antibanding Options: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_ANTIBANDING));
|
||||
LOGD("Supported Flash Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_FLASH_MODES));
|
||||
LOGD("Supported Cameras: %s", handler->params->get("camera-indexes"));
|
||||
LOGD("Supported Picture Sizes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES));
|
||||
LOGD("Supported Picture Formats: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS));
|
||||
LOGD("Supported Preview Sizes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES));
|
||||
LOGD("Supported Preview Formats: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS));
|
||||
LOGD("Supported Preview Frame Rates: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES));
|
||||
LOGD("Supported Thumbnail Sizes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES));
|
||||
LOGD("Supported Whitebalance Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE));
|
||||
LOGD("Supported Effects: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_EFFECTS));
|
||||
LOGD("Supported Scene Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_SCENE_MODES));
|
||||
LOGD("Supported Focus Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES));
|
||||
LOGD("Supported Antibanding Options: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_ANTIBANDING));
|
||||
LOGD("Supported Flash Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_FLASH_MODES));
|
||||
|
||||
#if !defined(ANDROID_r2_2_0)
|
||||
// Set focus mode to continuous-video if supported
|
||||
const char* available_focus_modes = handler->params.get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES);
|
||||
const char* available_focus_modes = handler->params->get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES);
|
||||
if (available_focus_modes != 0)
|
||||
{
|
||||
if (strstr(available_focus_modes, "continuous-video") != NULL)
|
||||
{
|
||||
handler->params.set(CameraParameters::KEY_FOCUS_MODE, CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO);
|
||||
handler->params->set(CameraParameters::KEY_FOCUS_MODE, CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO);
|
||||
|
||||
status_t resParams = handler->camera->setParameters(handler->params.flatten());
|
||||
status_t resParams = handler->camera->setParameters(handler->params->flatten());
|
||||
|
||||
if (resParams != 0)
|
||||
{
|
||||
@ -581,7 +611,7 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
|
||||
#endif
|
||||
|
||||
//check if yuv420sp format available. Set this format as preview format.
|
||||
const char* available_formats = handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS);
|
||||
const char* available_formats = handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS);
|
||||
if (available_formats != 0)
|
||||
{
|
||||
const char* format_to_set = 0;
|
||||
@ -607,9 +637,9 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
|
||||
|
||||
if (0 != format_to_set)
|
||||
{
|
||||
handler->params.setPreviewFormat(format_to_set);
|
||||
handler->params->setPreviewFormat(format_to_set);
|
||||
|
||||
status_t resParams = handler->camera->setParameters(handler->params.flatten());
|
||||
status_t resParams = handler->camera->setParameters(handler->params->flatten());
|
||||
|
||||
if (resParams != 0)
|
||||
LOGE("initCameraConnect: failed to set preview format to %s", format_to_set);
|
||||
@ -617,6 +647,13 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
|
||||
LOGD("initCameraConnect: preview format is set to %s", format_to_set);
|
||||
}
|
||||
}
|
||||
|
||||
handler->params->setPreviewSize(640, 480);
|
||||
status_t resParams = handler->camera->setParameters(handler->params->flatten());
|
||||
if (resParams != 0)
|
||||
LOGE("initCameraConnect: failed to set preview resolution to 640x480");
|
||||
else
|
||||
LOGD("initCameraConnect: preview format is set to 640x480");
|
||||
}
|
||||
|
||||
status_t bufferStatus;
|
||||
@ -627,22 +664,27 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
|
||||
#elif defined(ANDROID_r2_3_3)
|
||||
/* Do nothing in case of 2.3 for now */
|
||||
#elif defined(ANDROID_r3_0_1) || defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
|
||||
sp<SurfaceTexture> surfaceTexture = new SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
|
||||
bufferStatus = camera->setPreviewTexture(surfaceTexture);
|
||||
void* surface_texture_obj = operator new(sizeof(SurfaceTexture) + MAGIC_TAIL);
|
||||
handler->surface = new(surface_texture_obj) SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
|
||||
bufferStatus = camera->setPreviewTexture(handler->surface);
|
||||
if (bufferStatus != 0)
|
||||
LOGE("initCameraConnect: failed setPreviewTexture call (status %d); camera might not work correctly", bufferStatus);
|
||||
#elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0)
|
||||
sp<BufferQueue> bufferQueue = new BufferQueue();
|
||||
sp<BufferQueue::ConsumerListener> queueListener = new ConsumerListenerStub();
|
||||
bufferQueue->consumerConnect(queueListener);
|
||||
bufferStatus = camera->setPreviewTexture(bufferQueue);
|
||||
void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
|
||||
handler->queue = new(buffer_queue_obj) BufferQueue();
|
||||
void* consumer_listener_obj = operator new(sizeof(ConsumerListenerStub) + MAGIC_TAIL);
|
||||
handler->listener = new(consumer_listener_obj) ConsumerListenerStub();
|
||||
handler->queue->consumerConnect(handler->listener);
|
||||
bufferStatus = camera->setPreviewTexture(handler->queue);
|
||||
if (bufferStatus != 0)
|
||||
LOGE("initCameraConnect: failed setPreviewTexture call; camera might not work correctly");
|
||||
# elif defined(ANDROID_r4_4_0)
|
||||
sp<BufferQueue> bufferQueue = new BufferQueue();
|
||||
sp<IConsumerListener> queueListener = new ConsumerListenerStub();
|
||||
bufferQueue->consumerConnect(queueListener, true);
|
||||
bufferStatus = handler->camera->setPreviewTarget(bufferQueue);
|
||||
void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
|
||||
handler->queue = new(buffer_queue_obj) BufferQueue();
|
||||
void* consumer_listener_obj = operator new(sizeof(ConsumerListenerStub) + MAGIC_TAIL);
|
||||
handler->listener = new(consumer_listener_obj) ConsumerListenerStub();
|
||||
handler->queue->consumerConnect(handler->listener, true);
|
||||
bufferStatus = handler->camera->setPreviewTarget(handler->queue);
|
||||
if (bufferStatus != 0)
|
||||
LOGE("applyProperties: failed setPreviewTexture call; camera might not work correctly");
|
||||
# endif
|
||||
@ -723,18 +765,18 @@ double CameraHandler::getProperty(int propIdx)
|
||||
case ANDROID_CAMERA_PROPERTY_FRAMEWIDTH:
|
||||
{
|
||||
int w,h;
|
||||
params.getPreviewSize(&w, &h);
|
||||
params->getPreviewSize(&w, &h);
|
||||
return w;
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT:
|
||||
{
|
||||
int w,h;
|
||||
params.getPreviewSize(&w, &h);
|
||||
params->getPreviewSize(&w, &h);
|
||||
return h;
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_SUPPORTED_PREVIEW_SIZES_STRING:
|
||||
{
|
||||
cameraPropertySupportedPreviewSizesString = params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES);
|
||||
cameraPropertySupportedPreviewSizesString = params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES);
|
||||
union {const char* str;double res;} u;
|
||||
memset(&u.res, 0, sizeof(u.res));
|
||||
u.str = cameraPropertySupportedPreviewSizesString.c_str();
|
||||
@ -742,7 +784,7 @@ double CameraHandler::getProperty(int propIdx)
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_PREVIEW_FORMAT_STRING:
|
||||
{
|
||||
const char* fmt = params.get(CameraParameters::KEY_PREVIEW_FORMAT);
|
||||
const char* fmt = params->get(CameraParameters::KEY_PREVIEW_FORMAT);
|
||||
if (fmt == CameraParameters::PIXEL_FORMAT_YUV422SP)
|
||||
fmt = "yuv422sp";
|
||||
else if (fmt == CameraParameters::PIXEL_FORMAT_YUV420SP)
|
||||
@ -762,44 +804,44 @@ double CameraHandler::getProperty(int propIdx)
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_EXPOSURE:
|
||||
{
|
||||
int exposure = params.getInt(CameraParameters::KEY_EXPOSURE_COMPENSATION);
|
||||
int exposure = params->getInt(CameraParameters::KEY_EXPOSURE_COMPENSATION);
|
||||
return exposure;
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_FPS:
|
||||
{
|
||||
return params.getPreviewFrameRate();
|
||||
return params->getPreviewFrameRate();
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_FLASH_MODE:
|
||||
{
|
||||
int flash_mode = getModeNum(CameraHandler::flashModesNames,
|
||||
ANDROID_CAMERA_FLASH_MODES_NUM,
|
||||
params.get(CameraParameters::KEY_FLASH_MODE));
|
||||
params->get(CameraParameters::KEY_FLASH_MODE));
|
||||
return flash_mode;
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_FOCUS_MODE:
|
||||
{
|
||||
int focus_mode = getModeNum(CameraHandler::focusModesNames,
|
||||
ANDROID_CAMERA_FOCUS_MODES_NUM,
|
||||
params.get(CameraParameters::KEY_FOCUS_MODE));
|
||||
params->get(CameraParameters::KEY_FOCUS_MODE));
|
||||
return focus_mode;
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_WHITE_BALANCE:
|
||||
{
|
||||
int white_balance = getModeNum(CameraHandler::whiteBalanceModesNames,
|
||||
ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM,
|
||||
params.get(CameraParameters::KEY_WHITE_BALANCE));
|
||||
params->get(CameraParameters::KEY_WHITE_BALANCE));
|
||||
return white_balance;
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_ANTIBANDING:
|
||||
{
|
||||
int antibanding = getModeNum(CameraHandler::antibandingModesNames,
|
||||
ANDROID_CAMERA_ANTIBANDING_MODES_NUM,
|
||||
params.get(CameraParameters::KEY_ANTIBANDING));
|
||||
params->get(CameraParameters::KEY_ANTIBANDING));
|
||||
return antibanding;
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_FOCAL_LENGTH:
|
||||
{
|
||||
float focal_length = params.getFloat(CameraParameters::KEY_FOCAL_LENGTH);
|
||||
float focal_length = params->getFloat(CameraParameters::KEY_FOCAL_LENGTH);
|
||||
return focal_length;
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_NEAR:
|
||||
@ -814,6 +856,24 @@ double CameraHandler::getProperty(int propIdx)
|
||||
{
|
||||
return getFocusDistance(ANDROID_CAMERA_FOCUS_DISTANCE_FAR_INDEX);
|
||||
}
|
||||
#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1)
|
||||
case ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK:
|
||||
{
|
||||
const char* status = params->get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK);
|
||||
if (status == CameraParameters::TRUE)
|
||||
return 1.;
|
||||
else
|
||||
return 0.;
|
||||
}
|
||||
case ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK:
|
||||
{
|
||||
const char* status = params->get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK);
|
||||
if (status == CameraParameters::TRUE)
|
||||
return 1.;
|
||||
else
|
||||
return 0.;
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
LOGW("CameraHandler::getProperty - Unsupported property.");
|
||||
};
|
||||
@ -824,99 +884,151 @@ void CameraHandler::setProperty(int propIdx, double value)
|
||||
{
|
||||
LOGD("CameraHandler::setProperty(%d, %f)", propIdx, value);
|
||||
|
||||
android::String8 params_str;
|
||||
params_str = camera->getParameters();
|
||||
LOGI("Params before set: [%s]", params_str.string());
|
||||
|
||||
switch (propIdx)
|
||||
{
|
||||
case ANDROID_CAMERA_PROPERTY_FRAMEWIDTH:
|
||||
{
|
||||
int w,h;
|
||||
params.getPreviewSize(&w, &h);
|
||||
w = (int)value;
|
||||
params.setPreviewSize(w, h);
|
||||
params->getPreviewSize(&w, &h);
|
||||
width = (int)value;
|
||||
}
|
||||
break;
|
||||
case ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT:
|
||||
{
|
||||
int w,h;
|
||||
params.getPreviewSize(&w, &h);
|
||||
h = (int)value;
|
||||
params.setPreviewSize(w, h);
|
||||
params->getPreviewSize(&w, &h);
|
||||
height = (int)value;
|
||||
}
|
||||
break;
|
||||
case ANDROID_CAMERA_PROPERTY_EXPOSURE:
|
||||
{
|
||||
int max_exposure = params.getInt("max-exposure-compensation");
|
||||
int min_exposure = params.getInt("min-exposure-compensation");
|
||||
if(max_exposure && min_exposure){
|
||||
int max_exposure = params->getInt("max-exposure-compensation");
|
||||
int min_exposure = params->getInt("min-exposure-compensation");
|
||||
if(max_exposure && min_exposure)
|
||||
{
|
||||
int exposure = (int)value;
|
||||
if(exposure >= min_exposure && exposure <= max_exposure){
|
||||
params.set("exposure-compensation", exposure);
|
||||
} else {
|
||||
if(exposure >= min_exposure && exposure <= max_exposure)
|
||||
params->set("exposure-compensation", exposure);
|
||||
else
|
||||
LOGE("Exposure compensation not in valid range (%i,%i).", min_exposure, max_exposure);
|
||||
}
|
||||
} else {
|
||||
} else
|
||||
LOGE("Exposure compensation adjust is not supported.");
|
||||
}
|
||||
|
||||
camera->setParameters(params->flatten());
|
||||
}
|
||||
break;
|
||||
case ANDROID_CAMERA_PROPERTY_FLASH_MODE:
|
||||
{
|
||||
int new_val = (int)value;
|
||||
if(new_val >= 0 && new_val < ANDROID_CAMERA_FLASH_MODES_NUM){
|
||||
if(new_val >= 0 && new_val < ANDROID_CAMERA_FLASH_MODES_NUM)
|
||||
{
|
||||
const char* mode_name = flashModesNames[new_val];
|
||||
if(is_supported(CameraParameters::KEY_SUPPORTED_FLASH_MODES, mode_name))
|
||||
params.set(CameraParameters::KEY_FLASH_MODE, mode_name);
|
||||
params->set(CameraParameters::KEY_FLASH_MODE, mode_name);
|
||||
else
|
||||
LOGE("Flash mode %s is not supported.", mode_name);
|
||||
} else {
|
||||
LOGE("Flash mode value not in valid range.");
|
||||
}
|
||||
else
|
||||
LOGE("Flash mode value not in valid range.");
|
||||
|
||||
camera->setParameters(params->flatten());
|
||||
}
|
||||
break;
|
||||
case ANDROID_CAMERA_PROPERTY_FOCUS_MODE:
|
||||
{
|
||||
int new_val = (int)value;
|
||||
if(new_val >= 0 && new_val < ANDROID_CAMERA_FOCUS_MODES_NUM){
|
||||
if(new_val >= 0 && new_val < ANDROID_CAMERA_FOCUS_MODES_NUM)
|
||||
{
|
||||
const char* mode_name = focusModesNames[new_val];
|
||||
if(is_supported(CameraParameters::KEY_SUPPORTED_FOCUS_MODES, mode_name))
|
||||
params.set(CameraParameters::KEY_FOCUS_MODE, mode_name);
|
||||
params->set(CameraParameters::KEY_FOCUS_MODE, mode_name);
|
||||
else
|
||||
LOGE("Focus mode %s is not supported.", mode_name);
|
||||
} else {
|
||||
LOGE("Focus mode value not in valid range.");
|
||||
}
|
||||
else
|
||||
LOGE("Focus mode value not in valid range.");
|
||||
|
||||
camera->setParameters(params->flatten());
|
||||
}
|
||||
break;
|
||||
case ANDROID_CAMERA_PROPERTY_WHITE_BALANCE:
|
||||
{
|
||||
int new_val = (int)value;
|
||||
if(new_val >= 0 && new_val < ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM){
|
||||
if(new_val >= 0 && new_val < ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM)
|
||||
{
|
||||
const char* mode_name = whiteBalanceModesNames[new_val];
|
||||
if(is_supported(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE, mode_name))
|
||||
params.set(CameraParameters::KEY_WHITE_BALANCE, mode_name);
|
||||
params->set(CameraParameters::KEY_WHITE_BALANCE, mode_name);
|
||||
else
|
||||
LOGE("White balance mode %s is not supported.", mode_name);
|
||||
} else {
|
||||
LOGE("White balance mode value not in valid range.");
|
||||
}
|
||||
else
|
||||
LOGE("White balance mode value not in valid range.");
|
||||
|
||||
camera->setParameters(params->flatten());
|
||||
}
|
||||
break;
|
||||
case ANDROID_CAMERA_PROPERTY_ANTIBANDING:
|
||||
{
|
||||
int new_val = (int)value;
|
||||
if(new_val >= 0 && new_val < ANDROID_CAMERA_ANTIBANDING_MODES_NUM){
|
||||
if(new_val >= 0 && new_val < ANDROID_CAMERA_ANTIBANDING_MODES_NUM)
|
||||
{
|
||||
const char* mode_name = antibandingModesNames[new_val];
|
||||
if(is_supported(CameraParameters::KEY_SUPPORTED_ANTIBANDING, mode_name))
|
||||
params.set(CameraParameters::KEY_ANTIBANDING, mode_name);
|
||||
params->set(CameraParameters::KEY_ANTIBANDING, mode_name);
|
||||
else
|
||||
LOGE("Antibanding mode %s is not supported.", mode_name);
|
||||
} else {
|
||||
LOGE("Antibanding mode value not in valid range.");
|
||||
}
|
||||
else
|
||||
LOGE("Antibanding mode value not in valid range.");
|
||||
|
||||
camera->setParameters(params->flatten());
|
||||
}
|
||||
break;
|
||||
#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1)
|
||||
case ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK:
|
||||
{
|
||||
if (is_supported(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED, "true"))
|
||||
{
|
||||
if (value != 0)
|
||||
params->set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK, CameraParameters::TRUE);
|
||||
else
|
||||
params->set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK, CameraParameters::FALSE);
|
||||
LOGE("Expose lock is set");
|
||||
}
|
||||
else
|
||||
LOGE("Expose lock is not supported");
|
||||
|
||||
camera->setParameters(params->flatten());
|
||||
}
|
||||
break;
|
||||
case ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK:
|
||||
{
|
||||
if (is_supported(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED, "true"))
|
||||
{
|
||||
if (value != 0)
|
||||
params->set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK, CameraParameters::TRUE);
|
||||
else
|
||||
params->set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK, CameraParameters::FALSE);
|
||||
LOGE("White balance lock is set");
|
||||
}
|
||||
else
|
||||
LOGE("White balance lock is not supported");
|
||||
|
||||
camera->setParameters(params->flatten());
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
LOGW("CameraHandler::setProperty - Unsupported property.");
|
||||
};
|
||||
|
||||
params_str = camera->getParameters();
|
||||
LOGI("Params after set: [%s]", params_str.string());
|
||||
}
|
||||
|
||||
void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
|
||||
@ -935,7 +1047,10 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
|
||||
return;
|
||||
}
|
||||
|
||||
CameraParameters curCameraParameters((*ppcameraHandler)->params.flatten());
|
||||
// delayed resolution setup to exclude errors during other parameres setup on the fly
|
||||
// without camera restart
|
||||
if (((*ppcameraHandler)->width != 0) && ((*ppcameraHandler)->height != 0))
|
||||
(*ppcameraHandler)->params->setPreviewSize((*ppcameraHandler)->width, (*ppcameraHandler)->height);
|
||||
|
||||
#if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) \
|
||||
|| defined(ANDROID_r4_3_0) || defined(ANDROID_r4_4_0)
|
||||
@ -951,27 +1066,27 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
|
||||
return;
|
||||
}
|
||||
|
||||
handler->camera->setParameters(curCameraParameters.flatten());
|
||||
handler->params.unflatten(curCameraParameters.flatten());
|
||||
handler->camera->setParameters((*ppcameraHandler)->params->flatten());
|
||||
|
||||
status_t bufferStatus;
|
||||
# if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
|
||||
sp<SurfaceTexture> surfaceTexture = new SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
|
||||
bufferStatus = handler->camera->setPreviewTexture(surfaceTexture);
|
||||
void* surface_texture_obj = operator new(sizeof(SurfaceTexture) + MAGIC_TAIL);
|
||||
handler->surface = new(surface_texture_obj) SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
|
||||
bufferStatus = handler->camera->setPreviewTexture(handler->surface);
|
||||
if (bufferStatus != 0)
|
||||
LOGE("applyProperties: failed setPreviewTexture call (status %d); camera might not work correctly", bufferStatus);
|
||||
# elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0)
|
||||
sp<BufferQueue> bufferQueue = new BufferQueue();
|
||||
sp<BufferQueue::ConsumerListener> queueListener = new ConsumerListenerStub();
|
||||
bufferQueue->consumerConnect(queueListener);
|
||||
bufferStatus = handler->camera->setPreviewTexture(bufferQueue);
|
||||
void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
|
||||
handler->queue = new(buffer_queue_obj) BufferQueue();
|
||||
handler->queue->consumerConnect(handler->listener);
|
||||
bufferStatus = handler->camera->setPreviewTexture(handler->queue);
|
||||
if (bufferStatus != 0)
|
||||
LOGE("applyProperties: failed setPreviewTexture call; camera might not work correctly");
|
||||
# elif defined(ANDROID_r4_4_0)
|
||||
sp<BufferQueue> bufferQueue = new BufferQueue();
|
||||
sp<IConsumerListener> queueListener = new ConsumerListenerStub();
|
||||
bufferQueue->consumerConnect(queueListener, true);
|
||||
bufferStatus = handler->camera->setPreviewTarget(bufferQueue);
|
||||
void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
|
||||
handler->queue = new(buffer_queue_obj) BufferQueue();
|
||||
handler->queue->consumerConnect(handler->listener, true);
|
||||
bufferStatus = handler->camera->setPreviewTarget(handler->queue);
|
||||
if (bufferStatus != 0)
|
||||
LOGE("applyProperties: failed setPreviewTexture call; camera might not work correctly");
|
||||
# endif
|
||||
@ -1002,7 +1117,7 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
|
||||
LOGD("CameraHandler::applyProperties(): after previousCameraHandler->closeCameraConnect");
|
||||
|
||||
LOGD("CameraHandler::applyProperties(): before initCameraConnect");
|
||||
CameraHandler* handler=initCameraConnect(cameraCallback, cameraId, userData, &curCameraParameters);
|
||||
CameraHandler* handler=initCameraConnect(cameraCallback, cameraId, userData, (*ppcameraHandler)->params);
|
||||
LOGD("CameraHandler::applyProperties(): after initCameraConnect, handler=0x%x", (int)handler);
|
||||
if (handler == NULL) {
|
||||
LOGE("ERROR in applyProperties --- cannot reinit camera");
|
||||
|
@ -15,7 +15,9 @@ enum {
|
||||
ANDROID_CAMERA_PROPERTY_FOCAL_LENGTH = 105,
|
||||
ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_NEAR = 106,
|
||||
ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_OPTIMAL = 107,
|
||||
ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_FAR = 108
|
||||
ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_FAR = 108,
|
||||
ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK = 109,
|
||||
ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK = 110
|
||||
};
|
||||
|
||||
|
||||
@ -30,12 +32,12 @@ enum {
|
||||
|
||||
enum {
|
||||
ANDROID_CAMERA_FOCUS_MODE_AUTO = 0,
|
||||
ANDROID_CAMERA_FOCUS_MODE_CONTINUOUS_PICTURE,
|
||||
ANDROID_CAMERA_FOCUS_MODE_CONTINUOUS_VIDEO,
|
||||
ANDROID_CAMERA_FOCUS_MODE_EDOF,
|
||||
ANDROID_CAMERA_FOCUS_MODE_FIXED,
|
||||
ANDROID_CAMERA_FOCUS_MODE_INFINITY,
|
||||
ANDROID_CAMERA_FOCUS_MODE_MACRO,
|
||||
ANDROID_CAMERA_FOCUS_MODE_CONTINUOUS_PICTURE,
|
||||
ANDROID_CAMERA_FOCUS_MODES_NUM
|
||||
};
|
||||
|
||||
|
@ -75,7 +75,7 @@ Moreover every :ocv:class:`FaceRecognizer` supports the:
|
||||
Setting the Thresholds
|
||||
+++++++++++++++++++++++
|
||||
|
||||
Sometimes you run into the situation, when you want to apply a threshold on the prediction. A common scenario in face recognition is to tell, wether a face belongs to the training dataset or if it is unknown. You might wonder, why there's no public API in :ocv:class:`FaceRecognizer` to set the threshold for the prediction, but rest assured: It's supported. It just means there's no generic way in an abstract class to provide an interface for setting/getting the thresholds of *every possible* :ocv:class:`FaceRecognizer` algorithm. The appropriate place to set the thresholds is in the constructor of the specific :ocv:class:`FaceRecognizer` and since every :ocv:class:`FaceRecognizer` is a :ocv:class:`Algorithm` (see above), you can get/set the thresholds at runtime!
|
||||
Sometimes you run into the situation, when you want to apply a threshold on the prediction. A common scenario in face recognition is to tell, whether a face belongs to the training dataset or if it is unknown. You might wonder, why there's no public API in :ocv:class:`FaceRecognizer` to set the threshold for the prediction, but rest assured: It's supported. It just means there's no generic way in an abstract class to provide an interface for setting/getting the thresholds of *every possible* :ocv:class:`FaceRecognizer` algorithm. The appropriate place to set the thresholds is in the constructor of the specific :ocv:class:`FaceRecognizer` and since every :ocv:class:`FaceRecognizer` is a :ocv:class:`Algorithm` (see above), you can get/set the thresholds at runtime!
|
||||
|
||||
Here is an example of setting a threshold for the Eigenfaces method, when creating the model:
|
||||
|
||||
|
@ -71,7 +71,7 @@ You really don't want to create the CSV file by hand. And you really don't want
|
||||
Fisherfaces for Gender Classification
|
||||
--------------------------------------
|
||||
|
||||
If you want to decide wether a person is *male* or *female*, you have to learn the discriminative features of both classes. The Eigenfaces method is based on the Principal Component Analysis, which is an unsupervised statistical model and not suitable for this task. Please see the Face Recognition tutorial for insights into the algorithms. The Fisherfaces instead yields a class-specific linear projection, so it is much better suited for the gender classification task. `http://www.bytefish.de/blog/gender_classification <http://www.bytefish.de/blog/gender_classification>`_ shows the recognition rate of the Fisherfaces method for gender classification.
|
||||
If you want to decide whether a person is *male* or *female*, you have to learn the discriminative features of both classes. The Eigenfaces method is based on the Principal Component Analysis, which is an unsupervised statistical model and not suitable for this task. Please see the Face Recognition tutorial for insights into the algorithms. The Fisherfaces instead yields a class-specific linear projection, so it is much better suited for the gender classification task. `http://www.bytefish.de/blog/gender_classification <http://www.bytefish.de/blog/gender_classification>`_ shows the recognition rate of the Fisherfaces method for gender classification.
|
||||
|
||||
The Fisherfaces method achieves a 98% recognition rate in a subject-independent cross-validation. A subject-independent cross-validation means *images of the person under test are never used for learning the model*. And could you believe it: you can simply use the facerec_fisherfaces demo, that's inlcuded in OpenCV.
|
||||
|
||||
|
@ -1436,7 +1436,7 @@ description rewritten using
|
||||
|
||||
IplImage* color_img = cvCreateImage(cvSize(320,240), IPL_DEPTH_8U, 3);
|
||||
IplImage gray_img_hdr, *gray_img;
|
||||
gray_img = (IplImage*)cvReshapeND(color_img, &gray_img_hdr, 1, 0, 0);
|
||||
gray_img = (IplImage*)cvReshapeMatND(color_img, sizeof(gray_img_hdr), &gray_img_hdr, 1, 0, 0);
|
||||
|
||||
...
|
||||
|
||||
@ -1444,6 +1444,18 @@ description rewritten using
|
||||
int size[] = { 2, 2, 2 };
|
||||
CvMatND* mat = cvCreateMatND(3, size, CV_32F);
|
||||
CvMat row_header, *row;
|
||||
row = (CvMat*)cvReshapeMatND(mat, sizeof(row_header), &row_header, 0, 1, 0);
|
||||
|
||||
..
|
||||
|
||||
In C, the header file for this function includes a convenient macro ``cvReshapeND`` that does away with the ``sizeof_header`` parameter. So, the lines containing the call to ``cvReshapeMatND`` in the examples may be replaced as follow:
|
||||
|
||||
::
|
||||
|
||||
gray_img = (IplImage*)cvReshapeND(color_img, &gray_img_hdr, 1, 0, 0);
|
||||
|
||||
...
|
||||
|
||||
row = (CvMat*)cvReshapeND(mat, &row_header, 0, 1, 0);
|
||||
|
||||
..
|
||||
|
@ -49,7 +49,7 @@
|
||||
|
||||
#define CV_VERSION_EPOCH 2
|
||||
#define CV_VERSION_MAJOR 4
|
||||
#define CV_VERSION_MINOR 8
|
||||
#define CV_VERSION_MINOR 9
|
||||
#define CV_VERSION_REVISION 0
|
||||
|
||||
#define CVAUX_STR_EXP(__A) #__A
|
||||
|
@ -148,7 +148,7 @@ public:
|
||||
void write(std::ostream& out, const Mat& m, const int*, int) const
|
||||
{
|
||||
out << "[";
|
||||
writeMat(out, m, ';', ' ', m.cols == 1);
|
||||
writeMat(out, m, ';', ' ', m.rows == 1);
|
||||
out << "]";
|
||||
}
|
||||
|
||||
@ -165,7 +165,7 @@ public:
|
||||
void write(std::ostream& out, const Mat& m, const int*, int) const
|
||||
{
|
||||
out << "[";
|
||||
writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1);
|
||||
writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.rows*m.channels() == 1);
|
||||
out << "]";
|
||||
}
|
||||
|
||||
@ -187,7 +187,7 @@ public:
|
||||
"uint8", "int8", "uint16", "int16", "int32", "float32", "float64", "uint64"
|
||||
};
|
||||
out << "array([";
|
||||
writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1);
|
||||
writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.rows*m.channels() == 1);
|
||||
out << "], type='" << numpyTypes[m.depth()] << "')";
|
||||
}
|
||||
|
||||
@ -204,7 +204,7 @@ public:
|
||||
virtual ~CSVFormatter() {}
|
||||
void write(std::ostream& out, const Mat& m, const int*, int) const
|
||||
{
|
||||
writeMat(out, m, ' ', ' ', m.cols*m.channels() == 1);
|
||||
writeMat(out, m, ' ', ' ', m.rows*m.channels() == 1);
|
||||
if(m.rows > 1)
|
||||
out << "\n";
|
||||
}
|
||||
@ -223,7 +223,7 @@ public:
|
||||
void write(std::ostream& out, const Mat& m, const int*, int) const
|
||||
{
|
||||
out << "{";
|
||||
writeMat(out, m, ',', ' ', m.cols==1);
|
||||
writeMat(out, m, ',', ' ', m.rows==1);
|
||||
out << "}";
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ Computes the descriptors for a set of keypoints detected in an image (first vari
|
||||
|
||||
:param keypoints: Input collection of keypoints. Keypoints for which a descriptor cannot be computed are removed. Sometimes new keypoints can be added, for example: ``SIFT`` duplicates keypoint with several dominant orientations (for each orientation).
|
||||
|
||||
:param descriptors: Computed descriptors. In the second variant of the method ``descriptors[i]`` are descriptors computed for a ``keypoints[i]`. Row ``j`` is the ``keypoints`` (or ``keypoints[i]``) is the descriptor for keypoint ``j``-th keypoint.
|
||||
:param descriptors: Computed descriptors. In the second variant of the method ``descriptors[i]`` are descriptors computed for a ``keypoints[i]``. Row ``j`` is the ``keypoints`` (or ``keypoints[i]``) is the descriptor for keypoint ``j``-th keypoint.
|
||||
|
||||
|
||||
DescriptorExtractor::create
|
||||
|
@ -277,7 +277,7 @@ Brute-force matcher constructor.
|
||||
|
||||
:param normType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB, BRISK and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
|
||||
|
||||
:param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
|
||||
:param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMatcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
|
||||
|
||||
|
||||
FlannBasedMatcher
|
||||
|
@ -43,7 +43,7 @@ Utilizing Multiple GPUs
|
||||
-----------------------
|
||||
|
||||
In the current version, each of the OpenCV GPU algorithms can use only a single GPU. So, to utilize multiple GPUs, you have to manually distribute the work between GPUs.
|
||||
Switching active devie can be done using :ocv:func:`gpu::setDevice()` function. For more details please read Cuda C Programing Guide.
|
||||
Switching active devie can be done using :ocv:func:`gpu::setDevice()` function. For more details please read Cuda C Programming Guide.
|
||||
|
||||
While developing algorithms for multiple GPUs, note a data passing overhead. For primitive functions and small images, it can be significant, which may eliminate all the advantages of having multiple GPUs. But for high-level algorithms, consider using multi-GPU acceleration. For example, the Stereo Block Matching algorithm has been successfully parallelized using the following algorithm:
|
||||
|
||||
|
@ -1832,6 +1832,8 @@ PERF_TEST_P(Sz_Dp_MinDist, ImgProc_HoughCircles,
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// GeneralizedHough
|
||||
|
||||
#if !defined(__GNUC__) || (__GNUC__ * 10 + __GNUC_MINOR__ != 47)
|
||||
|
||||
CV_FLAGS(GHMethod, GHT_POSITION, GHT_SCALE, GHT_ROTATION)
|
||||
|
||||
DEF_PARAM_TEST(Method_Sz, GHMethod, cv::Size);
|
||||
@ -1918,3 +1920,5 @@ PERF_TEST_P(Method_Sz, DISABLED_ImgProc_GeneralizedHough,
|
||||
CPU_SANITY_CHECK(positions);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -40,6 +40,10 @@
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ == 47)
|
||||
# define CUDA_DISABLER
|
||||
#endif
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include <thrust/device_ptr.h>
|
||||
|
@ -46,6 +46,10 @@ using namespace std;
|
||||
using namespace cv;
|
||||
using namespace cv::gpu;
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ == 47)
|
||||
# define CUDA_DISABLER
|
||||
#endif
|
||||
|
||||
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
|
||||
|
||||
Ptr<GeneralizedHough_GPU> cv::gpu::GeneralizedHough_GPU::create(int) { throw_nogpu(); return Ptr<GeneralizedHough_GPU>(); }
|
||||
|
@ -336,7 +336,7 @@ GPU_TEST_P(MOG2, getBackgroundImage)
|
||||
cv::Mat background_gold;
|
||||
mog2_gold.getBackgroundImage(background_gold);
|
||||
|
||||
ASSERT_MAT_NEAR(background_gold, background, 0);
|
||||
ASSERT_MAT_NEAR(background_gold, background, 1);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Video, MOG2, testing::Combine(
|
||||
|
@ -185,6 +185,8 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HoughCircles, testing::Combine(
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// GeneralizedHough
|
||||
|
||||
#if !defined(__GNUC__) || (__GNUC__ * 10 + __GNUC_MINOR__ != 47)
|
||||
|
||||
PARAM_TEST_CASE(GeneralizedHough, cv::gpu::DeviceInfo, UseRoi)
|
||||
{
|
||||
};
|
||||
@ -252,4 +254,6 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, GeneralizedHough, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
#endif
|
||||
|
||||
#endif // HAVE_CUDA
|
||||
|
@ -496,7 +496,7 @@ VideoWriter constructors
|
||||
|
||||
:param filename: Name of the output video file.
|
||||
|
||||
:param fourcc: 4-character code of codec used to compress the frames. For example, ``CV_FOURCC('P','I','M,'1')`` is a MPEG-1 codec, ``CV_FOURCC('M','J','P','G')`` is a motion-jpeg codec etc. List of codes can be obtained at `Video Codecs by FOURCC <http://www.fourcc.org/codecs.php>`_ page.
|
||||
:param fourcc: 4-character code of codec used to compress the frames. For example, ``CV_FOURCC('P','I','M','1')`` is a MPEG-1 codec, ``CV_FOURCC('M','J','P','G')`` is a motion-jpeg codec etc. List of codes can be obtained at `Video Codecs by FOURCC <http://www.fourcc.org/codecs.php>`_ page.
|
||||
|
||||
:param fps: Framerate of the created video stream.
|
||||
|
||||
|
@ -454,6 +454,8 @@ enum
|
||||
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
|
||||
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
|
||||
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008,
|
||||
CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009,
|
||||
CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010,
|
||||
|
||||
// Properties of cameras available through AVFOUNDATION interface
|
||||
CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001,
|
||||
@ -534,6 +536,7 @@ enum
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_PICTURE,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_EDOF,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_FIXED,
|
||||
|
@ -289,6 +289,10 @@ double CvCapture_Android::getProperty( int propIdx )
|
||||
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_OPTIMAL);
|
||||
case CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR:
|
||||
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_FAR);
|
||||
case CV_CAP_PROP_ANDROID_EXPOSE_LOCK:
|
||||
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK);
|
||||
case CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK:
|
||||
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK);
|
||||
default:
|
||||
CV_Error( CV_StsOutOfRange, "Failed attempt to GET unsupported camera property." );
|
||||
break;
|
||||
@ -327,14 +331,23 @@ bool CvCapture_Android::setProperty( int propIdx, double propValue )
|
||||
case CV_CAP_PROP_ANDROID_ANTIBANDING:
|
||||
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_ANTIBANDING, propValue);
|
||||
break;
|
||||
case CV_CAP_PROP_ANDROID_EXPOSE_LOCK:
|
||||
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK, propValue);
|
||||
break;
|
||||
case CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK:
|
||||
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK, propValue);
|
||||
break;
|
||||
default:
|
||||
CV_Error( CV_StsOutOfRange, "Failed attempt to SET unsupported camera property." );
|
||||
return false;
|
||||
}
|
||||
|
||||
if (propIdx != CV_CAP_PROP_AUTOGRAB) {// property for highgui class CvCapture_Android only
|
||||
// Only changes in frame size require camera restart
|
||||
if ((propIdx == CV_CAP_PROP_FRAME_WIDTH) || (propIdx == CV_CAP_PROP_FRAME_HEIGHT))
|
||||
{ // property for highgui class CvCapture_Android only
|
||||
m_CameraParamsChanged = true;
|
||||
}
|
||||
|
||||
res = true;
|
||||
}
|
||||
|
||||
|
@ -1313,6 +1313,8 @@ bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
|
||||
}
|
||||
|
||||
//cleanup
|
||||
CFRelease(cfData);
|
||||
CVPixelBufferRelease(pixelBuffer);
|
||||
CGImageRelease(cgImage);
|
||||
CGDataProviderRelease(provider);
|
||||
CGColorSpaceRelease(colorSpace);
|
||||
|
@ -288,7 +288,7 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
|
||||
}
|
||||
|
||||
public Mat rgba() {
|
||||
Imgproc.cvtColor(mYuvFrameData, mRgba, Imgproc.COLOR_YUV2BGR_NV12, 4);
|
||||
Imgproc.cvtColor(mYuvFrameData, mRgba, Imgproc.COLOR_YUV2RGBA_NV21, 4);
|
||||
return mRgba;
|
||||
}
|
||||
|
||||
|
@ -175,7 +175,6 @@ public class NativeCameraView extends CameraBridgeViewBase {
|
||||
}
|
||||
|
||||
deliverAndDrawFrame(mFrame);
|
||||
|
||||
} while (!mStopThread);
|
||||
}
|
||||
}
|
||||
|
@ -42,6 +42,11 @@ public class OpenCVLoader
|
||||
*/
|
||||
public static final String OPENCV_VERSION_2_4_8 = "2.4.8";
|
||||
|
||||
/**
|
||||
* OpenCV Library version 2.4.9.
|
||||
*/
|
||||
public static final String OPENCV_VERSION_2_4_9 = "2.4.9";
|
||||
|
||||
/**
|
||||
* Loads and initializes OpenCV library from current application package. Roughly, it's an analog of system.loadLibrary("opencv_java").
|
||||
* @return Returns true is initialization of OpenCV was successful.
|
||||
|
@ -730,12 +730,12 @@ private:
|
||||
m_pFViVarRes = (int*)cvAlloc(sizeof(int)*m_Dim);
|
||||
m_Sizes = (int*)cvAlloc(sizeof(int)*m_Dim);
|
||||
|
||||
{ /* Create init sparce matrix: */
|
||||
{ /* Create init sparse matrix: */
|
||||
int i;
|
||||
for(i=0;i<m_Dim;++i)m_Sizes[i] = m_BinNum;
|
||||
m_HistMat.Realloc(m_Dim,m_Sizes,SPARSE);
|
||||
m_HistVolumeSaved = 0;
|
||||
} /* Create init sparce matrix. */
|
||||
} /* Create init sparse matrix. */
|
||||
} /* AllocData. */
|
||||
|
||||
void FreeData()
|
||||
|
@ -58,7 +58,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
|
||||
CvMat *X0,CvMat *observRes,CvMat *resultX,
|
||||
int maxIter,double epsilon)
|
||||
{
|
||||
/* This is not sparce method */
|
||||
/* This is not sparse method */
|
||||
/* Make optimization using */
|
||||
/* func - function to compute */
|
||||
/* uses function to compute jacobian */
|
||||
|
@ -240,7 +240,7 @@ The class ``SURF_GPU`` uses some buffers and provides access to it. All buffers
|
||||
|
||||
ocl::SURF_OCL
|
||||
-------------
|
||||
.. ocv:class:: ocl::SURF_OCL
|
||||
.. ocv:class:: ocl::SURF_OCL : public Feature2D
|
||||
|
||||
Class used for extracting Speeded Up Robust Features (SURF) from an image. ::
|
||||
|
||||
|
@ -53,7 +53,7 @@ namespace cv
|
||||
//! Speeded up robust features, port from GPU module.
|
||||
////////////////////////////////// SURF //////////////////////////////////////////
|
||||
|
||||
class CV_EXPORTS SURF_OCL
|
||||
class CV_EXPORTS SURF_OCL : public cv::Feature2D
|
||||
{
|
||||
public:
|
||||
enum KeypointLayout
|
||||
@ -72,10 +72,13 @@ namespace cv
|
||||
SURF_OCL();
|
||||
//! the full constructor taking all the necessary parameters
|
||||
explicit SURF_OCL(double _hessianThreshold, int _nOctaves = 4,
|
||||
int _nOctaveLayers = 2, bool _extended = false, float _keypointsRatio = 0.01f, bool _upright = false);
|
||||
int _nOctaveLayers = 2, bool _extended = true, float _keypointsRatio = 0.01f, bool _upright = false);
|
||||
|
||||
//! returns the descriptor size in float's (64 or 128)
|
||||
int descriptorSize() const;
|
||||
|
||||
int descriptorType() const;
|
||||
|
||||
//! upload host keypoints to device memory
|
||||
void uploadKeypoints(const vector<cv::KeyPoint> &keypoints, oclMat &keypointsocl);
|
||||
//! download keypoints from device to host memory
|
||||
@ -103,6 +106,17 @@ namespace cv
|
||||
void operator()(const oclMat &img, const oclMat &mask, std::vector<KeyPoint> &keypoints, std::vector<float> &descriptors,
|
||||
bool useProvidedKeypoints = false);
|
||||
|
||||
//! finds the keypoints using fast hessian detector used in SURF
|
||||
void operator()(InputArray img, InputArray mask,
|
||||
CV_OUT vector<KeyPoint>& keypoints) const;
|
||||
//! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints
|
||||
void operator()(InputArray img, InputArray mask,
|
||||
CV_OUT vector<KeyPoint>& keypoints,
|
||||
OutputArray descriptors,
|
||||
bool useProvidedKeypoints=false) const;
|
||||
|
||||
AlgorithmInfo* info() const;
|
||||
|
||||
void releaseMemory();
|
||||
|
||||
// SURF parameters
|
||||
@ -116,7 +130,9 @@ namespace cv
|
||||
oclMat sum, mask1, maskSum, intBuffer;
|
||||
oclMat det, trace;
|
||||
oclMat maxPosBuffer;
|
||||
|
||||
protected:
|
||||
void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const;
|
||||
void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -11,4 +11,33 @@ static const char * impls[] = {
|
||||
"plain"
|
||||
};
|
||||
|
||||
CV_PERF_TEST_MAIN_WITH_IMPLS(nonfree, impls, perf::printCudaInfo())
|
||||
#ifdef HAVE_OPENCL
|
||||
#define DUMP_PROPERTY_XML(propertyName, propertyValue) \
|
||||
do { \
|
||||
std::stringstream ssName, ssValue;\
|
||||
ssName << propertyName;\
|
||||
ssValue << propertyValue; \
|
||||
::testing::Test::RecordProperty(ssName.str(), ssValue.str()); \
|
||||
} while (false)
|
||||
|
||||
#define DUMP_MESSAGE_STDOUT(msg) \
|
||||
do { \
|
||||
std::cout << msg << std::endl; \
|
||||
} while (false)
|
||||
|
||||
#include "opencv2/ocl/private/opencl_dumpinfo.hpp"
|
||||
#endif
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
::perf::TestBase::setPerformanceStrategy(::perf::PERF_STRATEGY_SIMPLE);
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_OPENCL)
|
||||
CV_PERF_TEST_MAIN_INTERNALS(nonfree, impls, perf::printCudaInfo(), dumpOpenCLDevice());
|
||||
#elif defined(HAVE_CUDA)
|
||||
CV_PERF_TEST_MAIN_INTERNALS(nonfree, impls, perf::printCudaInfo());
|
||||
#elif defined(HAVE_OPENCL)
|
||||
CV_PERF_TEST_MAIN_INTERNALS(nonfree, impls, dumpOpenCLDevice());
|
||||
#else
|
||||
CV_PERF_TEST_MAIN_INTERNALS(nonfree, impls)
|
||||
#endif
|
||||
}
|
||||
|
@ -9,6 +9,8 @@
|
||||
#ifndef __OPENCV_PERF_PRECOMP_HPP__
|
||||
#define __OPENCV_PERF_PRECOMP_HPP__
|
||||
|
||||
#include "cvconfig.h"
|
||||
|
||||
#include "opencv2/ts/ts.hpp"
|
||||
|
||||
#include "opencv2/nonfree/nonfree.hpp"
|
||||
|
@ -16,9 +16,7 @@ PERF_TEST_P(surf, detect, testing::Values(SURF_IMAGES))
|
||||
{
|
||||
String filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
|
||||
if (frame.empty())
|
||||
FAIL() << "Unable to load source image " << filename;
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
Mat mask;
|
||||
declare.in(frame).time(90);
|
||||
@ -34,9 +32,7 @@ PERF_TEST_P(surf, extract, testing::Values(SURF_IMAGES))
|
||||
{
|
||||
String filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
|
||||
if (frame.empty())
|
||||
FAIL() << "Unable to load source image " << filename;
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
Mat mask;
|
||||
declare.in(frame).time(90);
|
||||
@ -55,9 +51,7 @@ PERF_TEST_P(surf, full, testing::Values(SURF_IMAGES))
|
||||
{
|
||||
String filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
|
||||
if (frame.empty())
|
||||
FAIL() << "Unable to load source image " << filename;
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
Mat mask;
|
||||
declare.in(frame).time(90);
|
||||
|
@ -121,4 +121,93 @@ PERF_TEST_P(OCL_SURF, DISABLED_without_data_transfer, testing::Values(SURF_IMAGE
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
|
||||
|
||||
PERF_TEST_P(OCL_SURF, DISABLED_detect, testing::Values(SURF_IMAGES))
|
||||
{
|
||||
String filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
declare.in(frame);
|
||||
|
||||
Mat mask;
|
||||
vector<KeyPoint> points;
|
||||
Ptr<Feature2D> detector;
|
||||
|
||||
if (getSelectedImpl() == "plain")
|
||||
{
|
||||
detector = new SURF;
|
||||
TEST_CYCLE() detector->operator()(frame, mask, points, noArray());
|
||||
}
|
||||
else if (getSelectedImpl() == "ocl")
|
||||
{
|
||||
detector = new ocl::SURF_OCL;
|
||||
OCL_TEST_CYCLE() detector->operator()(frame, mask, points, noArray());
|
||||
}
|
||||
else CV_TEST_FAIL_NO_IMPL();
|
||||
|
||||
SANITY_CHECK_KEYPOINTS(points, 1e-3);
|
||||
}
|
||||
|
||||
PERF_TEST_P(OCL_SURF, DISABLED_extract, testing::Values(SURF_IMAGES))
|
||||
{
|
||||
String filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
declare.in(frame);
|
||||
|
||||
Mat mask;
|
||||
Ptr<Feature2D> detector;
|
||||
vector<KeyPoint> points;
|
||||
vector<float> descriptors;
|
||||
|
||||
if (getSelectedImpl() == "plain")
|
||||
{
|
||||
detector = new SURF;
|
||||
detector->operator()(frame, mask, points, noArray());
|
||||
TEST_CYCLE() detector->operator()(frame, mask, points, descriptors, true);
|
||||
}
|
||||
else if (getSelectedImpl() == "ocl")
|
||||
{
|
||||
detector = new ocl::SURF_OCL;
|
||||
detector->operator()(frame, mask, points, noArray());
|
||||
OCL_TEST_CYCLE() detector->operator()(frame, mask, points, descriptors, true);
|
||||
}
|
||||
else CV_TEST_FAIL_NO_IMPL();
|
||||
|
||||
SANITY_CHECK(descriptors, 1e-4);
|
||||
}
|
||||
|
||||
PERF_TEST_P(OCL_SURF, DISABLED_full, testing::Values(SURF_IMAGES))
|
||||
{
|
||||
String filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
declare.in(frame).time(90);
|
||||
|
||||
Mat mask;
|
||||
Ptr<Feature2D> detector;
|
||||
vector<KeyPoint> points;
|
||||
vector<float> descriptors;
|
||||
|
||||
if (getSelectedImpl() == "plain")
|
||||
{
|
||||
detector = new SURF;
|
||||
TEST_CYCLE() detector->operator()(frame, mask, points, descriptors, false);
|
||||
}
|
||||
else if (getSelectedImpl() == "ocl")
|
||||
{
|
||||
detector = new ocl::SURF_OCL;
|
||||
detector->operator()(frame, mask, points, noArray());
|
||||
OCL_TEST_CYCLE() detector->operator()(frame, mask, points, descriptors, false);
|
||||
}
|
||||
else CV_TEST_FAIL_NO_IMPL();
|
||||
|
||||
SANITY_CHECK_KEYPOINTS(points, 1e-3);
|
||||
SANITY_CHECK(descriptors, 1e-4);
|
||||
}
|
||||
|
||||
#endif // HAVE_OPENCV_OCL
|
||||
|
@ -63,6 +63,20 @@ CV_INIT_ALGORITHM(SIFT, "Feature2D.SIFT",
|
||||
obj.info()->addParam(obj, "edgeThreshold", obj.edgeThreshold);
|
||||
obj.info()->addParam(obj, "sigma", obj.sigma))
|
||||
|
||||
#ifdef HAVE_OPENCV_OCL
|
||||
|
||||
namespace ocl {
|
||||
CV_INIT_ALGORITHM(SURF_OCL, "Feature2D.SURF_OCL",
|
||||
obj.info()->addParam(obj, "hessianThreshold", obj.hessianThreshold);
|
||||
obj.info()->addParam(obj, "nOctaves", obj.nOctaves);
|
||||
obj.info()->addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
|
||||
obj.info()->addParam(obj, "extended", obj.extended);
|
||||
obj.info()->addParam(obj, "upright", obj.upright))
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool initModule_nonfree(void)
|
||||
|
@ -283,9 +283,9 @@ private:
|
||||
cv::ocl::SURF_OCL::SURF_OCL()
|
||||
{
|
||||
hessianThreshold = 100.0f;
|
||||
extended = true;
|
||||
extended = false;
|
||||
nOctaves = 4;
|
||||
nOctaveLayers = 2;
|
||||
nOctaveLayers = 3;
|
||||
keypointsRatio = 0.01f;
|
||||
upright = false;
|
||||
}
|
||||
@ -305,6 +305,11 @@ int cv::ocl::SURF_OCL::descriptorSize() const
|
||||
return extended ? 128 : 64;
|
||||
}
|
||||
|
||||
int cv::ocl::SURF_OCL::descriptorType() const
|
||||
{
|
||||
return CV_32F;
|
||||
}
|
||||
|
||||
void cv::ocl::SURF_OCL::uploadKeypoints(const vector<KeyPoint> &keypoints, oclMat &keypointsGPU)
|
||||
{
|
||||
if (keypoints.empty())
|
||||
@ -447,6 +452,81 @@ void cv::ocl::SURF_OCL::operator()(const oclMat &img, const oclMat &mask, vector
|
||||
downloadDescriptors(descriptorsGPU, descriptors);
|
||||
}
|
||||
|
||||
|
||||
void cv::ocl::SURF_OCL::operator()(InputArray img, InputArray mask,
|
||||
CV_OUT vector<KeyPoint>& keypoints) const
|
||||
{
|
||||
this->operator()(img, mask, keypoints, noArray(), false);
|
||||
}
|
||||
|
||||
void cv::ocl::SURF_OCL::operator()(InputArray img, InputArray mask, vector<KeyPoint> &keypoints,
|
||||
OutputArray descriptors, bool useProvidedKeypoints) const
|
||||
{
|
||||
oclMat _img, _mask;
|
||||
if(img.kind() == _InputArray::OCL_MAT)
|
||||
_img = *(oclMat*)img.obj;
|
||||
else
|
||||
_img.upload(img.getMat());
|
||||
if(_img.channels() != 1)
|
||||
{
|
||||
oclMat temp;
|
||||
cvtColor(_img, temp, COLOR_BGR2GRAY);
|
||||
_img = temp;
|
||||
}
|
||||
|
||||
if( !mask.empty() )
|
||||
{
|
||||
if(mask.kind() == _InputArray::OCL_MAT)
|
||||
_mask = *(oclMat*)mask.obj;
|
||||
else
|
||||
_mask.upload(mask.getMat());
|
||||
}
|
||||
|
||||
SURF_OCL_Invoker surf((SURF_OCL&)*this, _img, _mask);
|
||||
oclMat keypointsGPU;
|
||||
|
||||
if (!useProvidedKeypoints || !upright)
|
||||
((SURF_OCL*)this)->uploadKeypoints(keypoints, keypointsGPU);
|
||||
|
||||
if (!useProvidedKeypoints)
|
||||
surf.detectKeypoints(keypointsGPU);
|
||||
else if (!upright)
|
||||
surf.findOrientation(keypointsGPU);
|
||||
if(keypointsGPU.cols*keypointsGPU.rows != 0)
|
||||
((SURF_OCL*)this)->downloadKeypoints(keypointsGPU, keypoints);
|
||||
|
||||
if( descriptors.needed() )
|
||||
{
|
||||
oclMat descriptorsGPU;
|
||||
surf.computeDescriptors(keypointsGPU, descriptorsGPU, descriptorSize());
|
||||
Size sz = descriptorsGPU.size();
|
||||
if( descriptors.kind() == _InputArray::STD_VECTOR )
|
||||
{
|
||||
CV_Assert(descriptors.type() == CV_32F);
|
||||
std::vector<float>* v = (std::vector<float>*)descriptors.obj;
|
||||
v->resize(sz.width*sz.height);
|
||||
Mat m(sz, CV_32F, &v->at(0));
|
||||
descriptorsGPU.download(m);
|
||||
}
|
||||
else
|
||||
{
|
||||
descriptors.create(sz, CV_32F);
|
||||
Mat m = descriptors.getMat();
|
||||
descriptorsGPU.download(m);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void cv::ocl::SURF_OCL::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
|
||||
{
|
||||
(*this)(image, mask, keypoints, noArray(), false);
|
||||
}
|
||||
|
||||
void cv::ocl::SURF_OCL::computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const
|
||||
{
|
||||
(*this)(image, Mat(), keypoints, descriptors, true);
|
||||
}
|
||||
|
||||
void cv::ocl::SURF_OCL::releaseMemory()
|
||||
{
|
||||
sum.release();
|
||||
|
@ -50,6 +50,22 @@ const string DETECTOR_DIR = FEATURES2D_DIR + "/feature_detectors";
|
||||
const string DESCRIPTOR_DIR = FEATURES2D_DIR + "/descriptor_extractors";
|
||||
const string IMAGE_FILENAME = "tsukuba.png";
|
||||
|
||||
#if defined(HAVE_OPENCV_OCL) && 0 // unblock this to see SURF_OCL tests failures
|
||||
static Ptr<Feature2D> getSURF()
|
||||
{
|
||||
ocl::PlatformsInfo p;
|
||||
if(ocl::getOpenCLPlatforms(p) > 0)
|
||||
return new ocl::SURF_OCL;
|
||||
else
|
||||
return new SURF;
|
||||
}
|
||||
#else
|
||||
static Ptr<Feature2D> getSURF()
|
||||
{
|
||||
return new SURF;
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************************\
|
||||
* Regression tests for feature detectors comparing keypoints. *
|
||||
\****************************************************************************************/
|
||||
@ -978,7 +994,7 @@ TEST( Features2d_Detector_SIFT, regression )
|
||||
|
||||
TEST( Features2d_Detector_SURF, regression )
|
||||
{
|
||||
CV_FeatureDetectorTest test( "detector-surf", FeatureDetector::create("SURF") );
|
||||
CV_FeatureDetectorTest test( "detector-surf", Ptr<FeatureDetector>(getSURF()) );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
@ -995,7 +1011,7 @@ TEST( Features2d_DescriptorExtractor_SIFT, regression )
|
||||
TEST( Features2d_DescriptorExtractor_SURF, regression )
|
||||
{
|
||||
CV_DescriptorExtractorTest<L2<float> > test( "descriptor-surf", 0.05f,
|
||||
DescriptorExtractor::create("SURF") );
|
||||
Ptr<DescriptorExtractor>(getSURF()) );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
@ -1036,10 +1052,10 @@ TEST(Features2d_BruteForceDescriptorMatcher_knnMatch, regression)
|
||||
const int sz = 100;
|
||||
const int k = 3;
|
||||
|
||||
Ptr<DescriptorExtractor> ext = DescriptorExtractor::create("SURF");
|
||||
Ptr<DescriptorExtractor> ext = Ptr<DescriptorExtractor>(getSURF());
|
||||
ASSERT_TRUE(ext != NULL);
|
||||
|
||||
Ptr<FeatureDetector> det = FeatureDetector::create("SURF");
|
||||
Ptr<FeatureDetector> det = Ptr<FeatureDetector>(getSURF());
|
||||
//"%YAML:1.0\nhessianThreshold: 8000.\noctaves: 3\noctaveLayers: 4\nupright: 0\n"
|
||||
ASSERT_TRUE(det != NULL);
|
||||
|
||||
@ -1096,7 +1112,11 @@ public:
|
||||
protected:
|
||||
void run(int)
|
||||
{
|
||||
Ptr<Feature2D> f = Algorithm::create<Feature2D>("Feature2D." + fname);
|
||||
Ptr<Feature2D> f;
|
||||
if(fname == "SURF")
|
||||
f = getSURF();
|
||||
else
|
||||
f = Algorithm::create<Feature2D>("Feature2D." + fname);
|
||||
if(f.empty())
|
||||
return;
|
||||
string path = string(ts->get_data_path()) + "detectors_descriptors_evaluation/planar/";
|
||||
|
@ -48,6 +48,12 @@ using namespace cv;
|
||||
const string IMAGE_TSUKUBA = "/features2d/tsukuba.png";
|
||||
const string IMAGE_BIKES = "/detectors_descriptors_evaluation/images_datasets/bikes/img1.png";
|
||||
|
||||
#if defined(HAVE_OPENCV_OCL) && 0 // unblock this to see SURF_OCL tests failures
|
||||
#define SURF_NAME "Feature2D.SURF_OCL"
|
||||
#else
|
||||
#define SURF_NAME "Feature2D.SURF"
|
||||
#endif
|
||||
|
||||
#define SHOW_DEBUG_LOG 0
|
||||
|
||||
static
|
||||
@ -615,7 +621,7 @@ protected:
|
||||
*/
|
||||
TEST(Features2d_RotationInvariance_Detector_SURF, regression)
|
||||
{
|
||||
DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
|
||||
DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>(SURF_NAME),
|
||||
0.44f,
|
||||
0.76f);
|
||||
test.safe_run();
|
||||
@ -634,8 +640,8 @@ TEST(Features2d_RotationInvariance_Detector_SIFT, DISABLED_regression)
|
||||
*/
|
||||
TEST(Features2d_RotationInvariance_Descriptor_SURF, regression)
|
||||
{
|
||||
DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
|
||||
Algorithm::create<DescriptorExtractor>("Feature2D.SURF"),
|
||||
DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>(SURF_NAME),
|
||||
Algorithm::create<DescriptorExtractor>(SURF_NAME),
|
||||
NORM_L1,
|
||||
0.83f);
|
||||
test.safe_run();
|
||||
@ -655,7 +661,7 @@ TEST(Features2d_RotationInvariance_Descriptor_SIFT, regression)
|
||||
*/
|
||||
TEST(Features2d_ScaleInvariance_Detector_SURF, regression)
|
||||
{
|
||||
DetectorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
|
||||
DetectorScaleInvarianceTest test(Algorithm::create<FeatureDetector>(SURF_NAME),
|
||||
0.64f,
|
||||
0.84f);
|
||||
test.safe_run();
|
||||
@ -674,8 +680,8 @@ TEST(Features2d_ScaleInvariance_Detector_SIFT, regression)
|
||||
*/
|
||||
TEST(Features2d_ScaleInvariance_Descriptor_SURF, regression)
|
||||
{
|
||||
DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
|
||||
Algorithm::create<DescriptorExtractor>("Feature2D.SURF"),
|
||||
DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>(SURF_NAME),
|
||||
Algorithm::create<DescriptorExtractor>(SURF_NAME),
|
||||
NORM_L1,
|
||||
0.61f);
|
||||
test.safe_run();
|
||||
|
@ -65,15 +65,15 @@ ocl::integral
|
||||
-----------------
|
||||
Computes an integral image.
|
||||
|
||||
.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum, oclMat &sqsum, int sdepth=-1)
|
||||
.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum, oclMat &sqsum)
|
||||
|
||||
.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum, int sdepth=-1)
|
||||
.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum)
|
||||
|
||||
:param src: Source image. Only ``CV_8UC1`` images are supported for now.
|
||||
|
||||
:param sum: Integral image containing 32-bit unsigned integer or 32-bit floating-point .
|
||||
:param sum: Integral image containing 32-bit unsigned integer values packed into ``CV_32SC1`` .
|
||||
|
||||
:param sqsum: Sqsum values is ``CV_32FC1`` or ``CV_64FC1`` type.
|
||||
:param sqsum: Sqsum values is ``CV_32FC1`` type.
|
||||
|
||||
.. seealso:: :ocv:func:`integral`
|
||||
|
||||
|
@ -859,10 +859,10 @@ namespace cv
|
||||
CV_EXPORTS void warpPerspective(const oclMat &src, oclMat &dst, const Mat &M, Size dsize, int flags = INTER_LINEAR);
|
||||
|
||||
//! computes the integral image and integral for the squared image
|
||||
// sum will support CV_32S, CV_32F, sqsum - support CV32F, CV_64F
|
||||
// sum will have CV_32S type, sqsum - CV32F type
|
||||
// supports only CV_8UC1 source type
|
||||
CV_EXPORTS void integral(const oclMat &src, oclMat &sum, oclMat &sqsum, int sdepth=-1 );
|
||||
CV_EXPORTS void integral(const oclMat &src, oclMat &sum, int sdepth=-1 );
|
||||
CV_EXPORTS void integral(const oclMat &src, oclMat &sum, oclMat &sqsum);
|
||||
CV_EXPORTS void integral(const oclMat &src, oclMat &sum);
|
||||
CV_EXPORTS void cornerHarris(const oclMat &src, oclMat &dst, int blockSize, int ksize, double k, int bordertype = cv::BORDER_DEFAULT);
|
||||
CV_EXPORTS void cornerHarris_dxdy(const oclMat &src, oclMat &dst, oclMat &Dx, oclMat &Dy,
|
||||
int blockSize, int ksize, double k, int bordertype = cv::BORDER_DEFAULT);
|
||||
@ -936,7 +936,7 @@ namespace cv
|
||||
Size m_maxSize;
|
||||
vector<CvSize> sizev;
|
||||
vector<float> scalev;
|
||||
oclMat gimg1, gsum, gsqsum, gsqsum_t;
|
||||
oclMat gimg1, gsum, gsqsum;
|
||||
void * buffers;
|
||||
};
|
||||
|
||||
|
@ -92,12 +92,12 @@ PERF_TEST(HaarFixture, Haar)
|
||||
typedef std::tr1::tuple<std::string, std::string, int> Cascade_Image_MinSize_t;
|
||||
typedef perf::TestBaseWithParam<Cascade_Image_MinSize_t> Cascade_Image_MinSize;
|
||||
|
||||
OCL_PERF_TEST_P(Cascade_Image_MinSize, DISABLED_CascadeClassifier,
|
||||
OCL_PERF_TEST_P(Cascade_Image_MinSize, CascadeClassifier,
|
||||
testing::Combine(testing::Values( string("cv/cascadeandhog/cascades/haarcascade_frontalface_alt.xml"),
|
||||
string("cv/cascadeandhog/cascades/haarcascade_frontalface_alt2.xml") ),
|
||||
testing::Values(string("cv/shared/lena.png"),
|
||||
string("cv/cascadeandhog/images/bttf301.png")/*,
|
||||
string("cv/cascadeandhog/images/class57.png")*/ ),
|
||||
testing::Values( string("cv/shared/lena.png"),
|
||||
string("cv/cascadeandhog/images/bttf301.png"),
|
||||
string("cv/cascadeandhog/images/class57.png") ),
|
||||
testing::Values(30, 64, 90)))
|
||||
{
|
||||
const string cascasePath = get<0>(GetParam());
|
||||
@ -121,7 +121,7 @@ OCL_PERF_TEST_P(Cascade_Image_MinSize, DISABLED_CascadeClassifier,
|
||||
faces.clear();
|
||||
|
||||
startTimer();
|
||||
cc.detectMultiScale(img, faces, 1.1, 3, 0, minSize);
|
||||
cc.detectMultiScale(img, faces, 1.1, 3, CV_HAAR_SCALE_IMAGE, minSize);
|
||||
stopTimer();
|
||||
}
|
||||
}
|
||||
@ -137,7 +137,7 @@ OCL_PERF_TEST_P(Cascade_Image_MinSize, DISABLED_CascadeClassifier,
|
||||
ocl::finish();
|
||||
|
||||
startTimer();
|
||||
cc.detectMultiScale(uimg, faces, 1.1, 3, 0, minSize);
|
||||
cc.detectMultiScale(uimg, faces, 1.1, 3, CV_HAAR_SCALE_IMAGE, minSize);
|
||||
stopTimer();
|
||||
}
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ OCL_PERF_TEST_P(CornerHarrisFixture, CornerHarris,
|
||||
typedef tuple<Size, MatDepth> IntegralParams;
|
||||
typedef TestBaseWithParam<IntegralParams> IntegralFixture;
|
||||
|
||||
OCL_PERF_TEST_P(IntegralFixture, Integral1, ::testing::Combine(OCL_TEST_SIZES, OCL_PERF_ENUM(CV_32S, CV_32F)))
|
||||
OCL_PERF_TEST_P(IntegralFixture, DISABLED_Integral1, ::testing::Combine(OCL_TEST_SIZES, OCL_PERF_ENUM(CV_32S, CV_32F)))
|
||||
{
|
||||
const IntegralParams params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
@ -250,7 +250,7 @@ OCL_PERF_TEST_P(IntegralFixture, Integral1, ::testing::Combine(OCL_TEST_SIZES, O
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst;
|
||||
|
||||
OCL_TEST_CYCLE() cv::ocl::integral(oclSrc, oclDst, sdepth);
|
||||
// OCL_TEST_CYCLE() cv::ocl::integral(oclSrc, oclDst, sdepth);
|
||||
|
||||
oclDst.download(dst);
|
||||
|
||||
|
@ -109,13 +109,13 @@ OCL_PERF_TEST_P(CV_TM_CCORR_NORMEDFixture, matchTemplate,
|
||||
|
||||
oclDst.download(dst);
|
||||
|
||||
SANITY_CHECK(dst, 3e-2);
|
||||
SANITY_CHECK(dst, 2e-2);
|
||||
}
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::matchTemplate(src, templ, dst, CV_TM_CCORR_NORMED);
|
||||
|
||||
SANITY_CHECK(dst, 3e-2);
|
||||
SANITY_CHECK(dst, 2e-2);
|
||||
}
|
||||
else
|
||||
OCL_PERF_ELSE
|
||||
|
@ -747,15 +747,6 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
|
||||
oclMat gsum(totalheight + 4, gimg.cols + 1, CV_32SC1);
|
||||
oclMat gsqsum(totalheight + 4, gimg.cols + 1, CV_32FC1);
|
||||
|
||||
int sdepth = 0;
|
||||
if(Context::getContext()->supportsFeature(FEATURE_CL_DOUBLE))
|
||||
sdepth = CV_64FC1;
|
||||
else
|
||||
sdepth = CV_32FC1;
|
||||
sdepth = CV_MAT_DEPTH(sdepth);
|
||||
int type = CV_MAKE_TYPE(sdepth, 1);
|
||||
oclMat gsqsum_t(totalheight + 4, gimg.cols + 1, type);
|
||||
|
||||
cl_mem stagebuffer;
|
||||
cl_mem nodebuffer;
|
||||
cl_mem candidatebuffer;
|
||||
@ -763,7 +754,6 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
|
||||
cv::Rect roi, roi2;
|
||||
cv::Mat imgroi, imgroisq;
|
||||
cv::ocl::oclMat resizeroi, gimgroi, gimgroisq;
|
||||
|
||||
int grp_per_CU = 12;
|
||||
|
||||
size_t blocksize = 8;
|
||||
@ -783,7 +773,7 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
|
||||
roi2 = Rect(0, 0, sz.width - 1, sz.height - 1);
|
||||
resizeroi = gimg1(roi2);
|
||||
gimgroi = gsum(roi);
|
||||
gimgroisq = gsqsum_t(roi);
|
||||
gimgroisq = gsqsum(roi);
|
||||
int width = gimgroi.cols - 1 - cascade->orig_window_size.width;
|
||||
int height = gimgroi.rows - 1 - cascade->orig_window_size.height;
|
||||
scaleinfo[i].width_height = (width << 16) | height;
|
||||
@ -797,13 +787,8 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
|
||||
scaleinfo[i].factor = factor;
|
||||
cv::ocl::resize(gimg, resizeroi, Size(sz.width - 1, sz.height - 1), 0, 0, INTER_LINEAR);
|
||||
cv::ocl::integral(resizeroi, gimgroi, gimgroisq);
|
||||
|
||||
indexy += sz.height;
|
||||
}
|
||||
if(gsqsum_t.depth() == CV_64F)
|
||||
gsqsum_t.convertTo(gsqsum, CV_32FC1);
|
||||
else
|
||||
gsqsum = gsqsum_t;
|
||||
|
||||
gcascade = (GpuHidHaarClassifierCascade *)cascade->hid_cascade;
|
||||
stage = (GpuHidHaarStageClassifier *)(gcascade + 1);
|
||||
@ -1040,12 +1025,7 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
|
||||
int n_factors = 0;
|
||||
oclMat gsum;
|
||||
oclMat gsqsum;
|
||||
oclMat gsqsum_t;
|
||||
cv::ocl::integral(gimg, gsum, gsqsum_t);
|
||||
if(gsqsum_t.depth() == CV_64F)
|
||||
gsqsum_t.convertTo(gsqsum, CV_32FC1);
|
||||
else
|
||||
gsqsum = gsqsum_t;
|
||||
cv::ocl::integral(gimg, gsum, gsqsum);
|
||||
CvSize sz;
|
||||
vector<CvSize> sizev;
|
||||
vector<float> scalev;
|
||||
@ -1320,16 +1300,12 @@ void cv::ocl::OclCascadeClassifierBuf::detectMultiScale(oclMat &gimg, CV_OUT std
|
||||
roi2 = Rect(0, 0, sz.width - 1, sz.height - 1);
|
||||
resizeroi = gimg1(roi2);
|
||||
gimgroi = gsum(roi);
|
||||
gimgroisq = gsqsum_t(roi);
|
||||
gimgroisq = gsqsum(roi);
|
||||
|
||||
cv::ocl::resize(gimg, resizeroi, Size(sz.width - 1, sz.height - 1), 0, 0, INTER_LINEAR);
|
||||
cv::ocl::integral(resizeroi, gimgroi, gimgroisq);
|
||||
indexy += sz.height;
|
||||
}
|
||||
if(gsqsum_t.depth() == CV_64F)
|
||||
gsqsum_t.convertTo(gsqsum, CV_32FC1);
|
||||
else
|
||||
gsqsum = gsqsum_t;
|
||||
|
||||
gcascade = (GpuHidHaarClassifierCascade *)(cascade->hid_cascade);
|
||||
stage = (GpuHidHaarStageClassifier *)(gcascade + 1);
|
||||
@ -1391,11 +1367,7 @@ void cv::ocl::OclCascadeClassifierBuf::detectMultiScale(oclMat &gimg, CV_OUT std
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::ocl::integral(gimg, gsum, gsqsum_t);
|
||||
if(gsqsum_t.depth() == CV_64F)
|
||||
gsqsum_t.convertTo(gsqsum, CV_32FC1);
|
||||
else
|
||||
gsqsum = gsqsum_t;
|
||||
cv::ocl::integral(gimg, gsum, gsqsum);
|
||||
|
||||
gcascade = (GpuHidHaarClassifierCascade *)cascade->hid_cascade;
|
||||
|
||||
@ -1621,7 +1593,6 @@ void cv::ocl::OclCascadeClassifierBuf::CreateFactorRelatedBufs(
|
||||
gimg1.release();
|
||||
gsum.release();
|
||||
gsqsum.release();
|
||||
gsqsum_t.release();
|
||||
}
|
||||
else if (!(m_flags & CV_HAAR_SCALE_IMAGE) && (flags & CV_HAAR_SCALE_IMAGE))
|
||||
{
|
||||
@ -1696,16 +1667,6 @@ void cv::ocl::OclCascadeClassifierBuf::CreateFactorRelatedBufs(
|
||||
gsum.create(totalheight + 4, cols + 1, CV_32SC1);
|
||||
gsqsum.create(totalheight + 4, cols + 1, CV_32FC1);
|
||||
|
||||
int sdepth = 0;
|
||||
if(Context::getContext()->supportsFeature(FEATURE_CL_DOUBLE))
|
||||
sdepth = CV_64FC1;
|
||||
else
|
||||
sdepth = CV_32FC1;
|
||||
sdepth = CV_MAT_DEPTH(sdepth);
|
||||
int type = CV_MAKE_TYPE(sdepth, 1);
|
||||
|
||||
gsqsum_t.create(totalheight + 4, cols + 1, type);
|
||||
|
||||
scaleinfo = (detect_piramid_info *)malloc(sizeof(detect_piramid_info) * loopcount);
|
||||
for( int i = 0; i < loopcount; i++ )
|
||||
{
|
||||
|
@ -898,7 +898,7 @@ namespace cv
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// integral
|
||||
|
||||
void integral(const oclMat &src, oclMat &sum, oclMat &sqsum, int sdepth)
|
||||
void integral(const oclMat &src, oclMat &sum, oclMat &sqsum)
|
||||
{
|
||||
CV_Assert(src.type() == CV_8UC1);
|
||||
if (!src.clCxt->supportsFeature(ocl::FEATURE_CL_DOUBLE) && src.depth() == CV_64F)
|
||||
@ -907,11 +907,6 @@ namespace cv
|
||||
return;
|
||||
}
|
||||
|
||||
if( sdepth <= 0 )
|
||||
sdepth = CV_32S;
|
||||
sdepth = CV_MAT_DEPTH(sdepth);
|
||||
int type = CV_MAKE_TYPE(sdepth, 1);
|
||||
|
||||
int vlen = 4;
|
||||
int offset = src.offset / vlen;
|
||||
int pre_invalid = src.offset % vlen;
|
||||
@ -919,26 +914,17 @@ namespace cv
|
||||
|
||||
oclMat t_sum , t_sqsum;
|
||||
int w = src.cols + 1, h = src.rows + 1;
|
||||
|
||||
char build_option[250];
|
||||
if(Context::getContext()->supportsFeature(ocl::FEATURE_CL_DOUBLE))
|
||||
{
|
||||
t_sqsum.create(src.cols, src.rows, CV_64FC1);
|
||||
sqsum.create(h, w, CV_64FC1);
|
||||
sprintf(build_option, "-D TYPE=double -D TYPE4=double4 -D convert_TYPE4=convert_double4");
|
||||
}
|
||||
else
|
||||
{
|
||||
t_sqsum.create(src.cols, src.rows, CV_32FC1);
|
||||
sqsum.create(h, w, CV_32FC1);
|
||||
sprintf(build_option, "-D TYPE=float -D TYPE4=float4 -D convert_TYPE4=convert_float4");
|
||||
}
|
||||
int depth = src.depth() == CV_8U ? CV_32S : CV_64F;
|
||||
int type = CV_MAKE_TYPE(depth, 1);
|
||||
|
||||
t_sum.create(src.cols, src.rows, type);
|
||||
sum.create(h, w, type);
|
||||
|
||||
int sum_offset = sum.offset / sum.elemSize();
|
||||
int sqsum_offset = sqsum.offset / sqsum.elemSize();
|
||||
t_sqsum.create(src.cols, src.rows, CV_32FC1);
|
||||
sqsum.create(h, w, CV_32FC1);
|
||||
|
||||
int sum_offset = sum.offset / vlen;
|
||||
int sqsum_offset = sqsum.offset / vlen;
|
||||
|
||||
vector<pair<size_t , const void *> > args;
|
||||
args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data ));
|
||||
@ -950,9 +936,8 @@ namespace cv
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.cols ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.step ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.step));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sqsum.step));
|
||||
size_t gt[3] = {((vcols + 1) / 2) * 256, 1, 1}, lt[3] = {256, 1, 1};
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_cols", gt, lt, args, -1, sdepth, build_option);
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_cols", gt, lt, args, -1, depth);
|
||||
|
||||
args.clear();
|
||||
args.push_back( make_pair( sizeof(cl_mem) , (void *)&t_sum.data ));
|
||||
@ -962,16 +947,15 @@ namespace cv
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.rows ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.cols ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.step ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sqsum.step));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum.step));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sqsum.step));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum_offset));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sqsum_offset));
|
||||
size_t gt2[3] = {t_sum.cols * 32, 1, 1}, lt2[3] = {256, 1, 1};
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_rows", gt2, lt2, args, -1, sdepth, build_option);
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_rows", gt2, lt2, args, -1, depth);
|
||||
}
|
||||
|
||||
void integral(const oclMat &src, oclMat &sum, int sdepth)
|
||||
void integral(const oclMat &src, oclMat &sum)
|
||||
{
|
||||
CV_Assert(src.type() == CV_8UC1);
|
||||
int vlen = 4;
|
||||
@ -979,13 +963,10 @@ namespace cv
|
||||
int pre_invalid = src.offset % vlen;
|
||||
int vcols = (pre_invalid + src.cols + vlen - 1) / vlen;
|
||||
|
||||
if( sdepth <= 0 )
|
||||
sdepth = CV_32S;
|
||||
sdepth = CV_MAT_DEPTH(sdepth);
|
||||
int type = CV_MAKE_TYPE(sdepth, 1);
|
||||
|
||||
oclMat t_sum;
|
||||
int w = src.cols + 1, h = src.rows + 1;
|
||||
int depth = src.depth() == CV_8U ? CV_32S : CV_32F;
|
||||
int type = CV_MAKE_TYPE(depth, 1);
|
||||
|
||||
t_sum.create(src.cols, src.rows, type);
|
||||
sum.create(h, w, type);
|
||||
@ -1001,7 +982,7 @@ namespace cv
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.step ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.step));
|
||||
size_t gt[3] = {((vcols + 1) / 2) * 256, 1, 1}, lt[3] = {256, 1, 1};
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_cols", gt, lt, args, -1, sdepth);
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_cols", gt, lt, args, -1, depth);
|
||||
|
||||
args.clear();
|
||||
args.push_back( make_pair( sizeof(cl_mem) , (void *)&t_sum.data ));
|
||||
@ -1012,7 +993,7 @@ namespace cv
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum.step));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum_offset));
|
||||
size_t gt2[3] = {t_sum.cols * 32, 1, 1}, lt2[3] = {256, 1, 1};
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_rows", gt2, lt2, args, -1, sdepth);
|
||||
openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_rows", gt2, lt2, args, -1, depth);
|
||||
}
|
||||
|
||||
/////////////////////// corner //////////////////////////////
|
||||
|
@ -245,15 +245,12 @@ namespace cv
|
||||
void matchTemplate_CCORR_NORMED(
|
||||
const oclMat &image, const oclMat &templ, oclMat &result, MatchTemplateBuf &buf)
|
||||
{
|
||||
cv::ocl::oclMat temp;
|
||||
matchTemplate_CCORR(image, templ, result, buf);
|
||||
buf.image_sums.resize(1);
|
||||
buf.image_sqsums.resize(1);
|
||||
integral(image.reshape(1), buf.image_sums[0], temp);
|
||||
if(temp.depth() == CV_64F)
|
||||
temp.convertTo(buf.image_sqsums[0], CV_32FC1);
|
||||
else
|
||||
buf.image_sqsums[0] = temp;
|
||||
|
||||
integral(image.reshape(1), buf.image_sums[0], buf.image_sqsums[0]);
|
||||
|
||||
unsigned long long templ_sqsum = (unsigned long long)sqrSum(templ.reshape(1))[0];
|
||||
|
||||
Context *clCxt = image.clCxt;
|
||||
@ -419,12 +416,7 @@ namespace cv
|
||||
{
|
||||
buf.image_sums.resize(1);
|
||||
buf.image_sqsums.resize(1);
|
||||
cv::ocl::oclMat temp;
|
||||
integral(image, buf.image_sums[0], temp);
|
||||
if(temp.depth() == CV_64F)
|
||||
temp.convertTo(buf.image_sqsums[0], CV_32FC1);
|
||||
else
|
||||
buf.image_sqsums[0] = temp;
|
||||
integral(image, buf.image_sums[0], buf.image_sqsums[0]);
|
||||
|
||||
templ_sum[0] = (float)sum(templ)[0];
|
||||
|
||||
@ -460,14 +452,10 @@ namespace cv
|
||||
templ_sum *= scale;
|
||||
buf.image_sums.resize(buf.images.size());
|
||||
buf.image_sqsums.resize(buf.images.size());
|
||||
cv::ocl::oclMat temp;
|
||||
|
||||
for(int i = 0; i < image.oclchannels(); i ++)
|
||||
{
|
||||
integral(buf.images[i], buf.image_sums[i], temp);
|
||||
if(temp.depth() == CV_64F)
|
||||
temp.convertTo(buf.image_sqsums[i], CV_32FC1);
|
||||
else
|
||||
buf.image_sqsums[i] = temp;
|
||||
integral(buf.images[i], buf.image_sums[i], buf.image_sqsums[i]);
|
||||
}
|
||||
|
||||
switch(image.oclchannels())
|
||||
|
@ -62,13 +62,13 @@ typedef struct __attribute__((aligned (128) )) GpuHidHaarTreeNode
|
||||
GpuHidHaarTreeNode;
|
||||
|
||||
|
||||
//typedef struct __attribute__((aligned (32))) GpuHidHaarClassifier
|
||||
//{
|
||||
// int count __attribute__((aligned (4)));
|
||||
// GpuHidHaarTreeNode* node __attribute__((aligned (8)));
|
||||
// float* alpha __attribute__((aligned (8)));
|
||||
//}
|
||||
//GpuHidHaarClassifier;
|
||||
typedef struct __attribute__((aligned (32))) GpuHidHaarClassifier
|
||||
{
|
||||
int count __attribute__((aligned (4)));
|
||||
GpuHidHaarTreeNode* node __attribute__((aligned (8)));
|
||||
float* alpha __attribute__((aligned (8)));
|
||||
}
|
||||
GpuHidHaarClassifier;
|
||||
|
||||
|
||||
typedef struct __attribute__((aligned (64))) GpuHidHaarStageClassifier
|
||||
@ -84,22 +84,22 @@ typedef struct __attribute__((aligned (64))) GpuHidHaarStageClassifier
|
||||
GpuHidHaarStageClassifier;
|
||||
|
||||
|
||||
//typedef struct __attribute__((aligned (64))) GpuHidHaarClassifierCascade
|
||||
//{
|
||||
// int count __attribute__((aligned (4)));
|
||||
// int is_stump_based __attribute__((aligned (4)));
|
||||
// int has_tilted_features __attribute__((aligned (4)));
|
||||
// int is_tree __attribute__((aligned (4)));
|
||||
// int pq0 __attribute__((aligned (4)));
|
||||
// int pq1 __attribute__((aligned (4)));
|
||||
// int pq2 __attribute__((aligned (4)));
|
||||
// int pq3 __attribute__((aligned (4)));
|
||||
// int p0 __attribute__((aligned (4)));
|
||||
// int p1 __attribute__((aligned (4)));
|
||||
// int p2 __attribute__((aligned (4)));
|
||||
// int p3 __attribute__((aligned (4)));
|
||||
// float inv_window_area __attribute__((aligned (4)));
|
||||
//} GpuHidHaarClassifierCascade;
|
||||
typedef struct __attribute__((aligned (64))) GpuHidHaarClassifierCascade
|
||||
{
|
||||
int count __attribute__((aligned (4)));
|
||||
int is_stump_based __attribute__((aligned (4)));
|
||||
int has_tilted_features __attribute__((aligned (4)));
|
||||
int is_tree __attribute__((aligned (4)));
|
||||
int pq0 __attribute__((aligned (4)));
|
||||
int pq1 __attribute__((aligned (4)));
|
||||
int pq2 __attribute__((aligned (4)));
|
||||
int pq3 __attribute__((aligned (4)));
|
||||
int p0 __attribute__((aligned (4)));
|
||||
int p1 __attribute__((aligned (4)));
|
||||
int p2 __attribute__((aligned (4)));
|
||||
int p3 __attribute__((aligned (4)));
|
||||
float inv_window_area __attribute__((aligned (4)));
|
||||
} GpuHidHaarClassifierCascade;
|
||||
|
||||
|
||||
#ifdef PACKED_CLASSIFIER
|
||||
@ -196,12 +196,10 @@ __kernel void gpuRunHaarClassifierCascadePacked(
|
||||
for(int stageloop = start_stage; (stageloop < end_stage) && result; stageloop++ )
|
||||
{// iterate until candidate is valid
|
||||
float stage_sum = 0.0f;
|
||||
__global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
|
||||
((__global uchar*)stagecascadeptr+stageloop*sizeof(GpuHidHaarStageClassifier));
|
||||
int lcl_off = (yl*DATA_SIZE_X)+(xl);
|
||||
int stagecount = stageinfo->count;
|
||||
float stagethreshold = stageinfo->threshold;
|
||||
for(int nodeloop = 0; nodeloop < stagecount; nodecounter++,nodeloop++ )
|
||||
int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
|
||||
float stagethreshold = as_float(stageinfo.y);
|
||||
int lcl_off = (lid_y*DATA_SIZE_X)+(lid_x);
|
||||
for(int nodeloop = 0; nodeloop < stageinfo.x; nodecounter++,nodeloop++ )
|
||||
{
|
||||
// simple macro to extract shorts from int
|
||||
#define M0(_t) ((_t)&0xFFFF)
|
||||
@ -357,17 +355,14 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
|
||||
variance_norm_factor = variance_norm_factor * correction - mean * mean;
|
||||
variance_norm_factor = variance_norm_factor >=0.f ? sqrt(variance_norm_factor) : 1.f;
|
||||
|
||||
for(int stageloop = start_stage; (stageloop < split_stage) && result; stageloop++ )
|
||||
for(int stageloop = start_stage; (stageloop < split_stage) && result; stageloop++ )
|
||||
{
|
||||
float stage_sum = 0.f;
|
||||
__global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
|
||||
((__global uchar*)stagecascadeptr+stageloop*sizeof(GpuHidHaarStageClassifier));
|
||||
int stagecount = stageinfo->count;
|
||||
float stagethreshold = stageinfo->threshold;
|
||||
for(int nodeloop = 0; nodeloop < stagecount; )
|
||||
int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
|
||||
float stagethreshold = as_float(stageinfo.y);
|
||||
for(int nodeloop = 0; nodeloop < stageinfo.x; )
|
||||
{
|
||||
__global GpuHidHaarTreeNode* currentnodeptr = (__global GpuHidHaarTreeNode*)
|
||||
(((__global uchar*)nodeptr) + nodecounter * sizeof(GpuHidHaarTreeNode));
|
||||
__global GpuHidHaarTreeNode* currentnodeptr = (nodeptr + nodecounter);
|
||||
|
||||
int4 info1 = *(__global int4*)(&(currentnodeptr->p[0][0]));
|
||||
int4 info2 = *(__global int4*)(&(currentnodeptr->p[1][0]));
|
||||
@ -423,7 +418,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
|
||||
#endif
|
||||
}
|
||||
|
||||
result = (stage_sum >= stagethreshold) ? 1 : 0;
|
||||
result = (stage_sum >= stagethreshold);
|
||||
}
|
||||
if(factor < 2)
|
||||
{
|
||||
@ -452,17 +447,14 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
|
||||
lclcount[0]=0;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
//int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
|
||||
__global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
|
||||
((__global uchar*)stagecascadeptr+stageloop*sizeof(GpuHidHaarStageClassifier));
|
||||
int stagecount = stageinfo->count;
|
||||
float stagethreshold = stageinfo->threshold;
|
||||
int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
|
||||
float stagethreshold = as_float(stageinfo.y);
|
||||
|
||||
int perfscale = queuecount > 4 ? 3 : 2;
|
||||
int queuecount_loop = (queuecount + (1<<perfscale)-1) >> perfscale;
|
||||
int lcl_compute_win = lcl_sz >> perfscale;
|
||||
int lcl_compute_win_id = (lcl_id >>(6-perfscale));
|
||||
int lcl_loops = (stagecount + lcl_compute_win -1) >> (6-perfscale);
|
||||
int lcl_loops = (stageinfo.x + lcl_compute_win -1) >> (6-perfscale);
|
||||
int lcl_compute_id = lcl_id - (lcl_compute_win_id << (6-perfscale));
|
||||
for(int queueloop=0; queueloop<queuecount_loop; queueloop++)
|
||||
{
|
||||
@ -477,10 +469,10 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
|
||||
float part_sum = 0.f;
|
||||
const int stump_factor = STUMP_BASED ? 1 : 2;
|
||||
int root_offset = 0;
|
||||
for(int lcl_loop=0; lcl_loop<lcl_loops && tempnodecounter<stagecount;)
|
||||
for(int lcl_loop=0; lcl_loop<lcl_loops && tempnodecounter<stageinfo.x;)
|
||||
{
|
||||
__global GpuHidHaarTreeNode* currentnodeptr = (__global GpuHidHaarTreeNode*)
|
||||
(((__global uchar*)nodeptr) + sizeof(GpuHidHaarTreeNode) * ((nodecounter + tempnodecounter) * stump_factor + root_offset));
|
||||
__global GpuHidHaarTreeNode* currentnodeptr =
|
||||
nodeptr + (nodecounter + tempnodecounter) * stump_factor + root_offset;
|
||||
|
||||
int4 info1 = *(__global int4*)(&(currentnodeptr->p[0][0]));
|
||||
int4 info2 = *(__global int4*)(&(currentnodeptr->p[1][0]));
|
||||
@ -557,7 +549,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
|
||||
|
||||
queuecount = lclcount[0];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
nodecounter += stagecount;
|
||||
nodecounter += stageinfo.x;
|
||||
}//end for(int stageloop = splitstage; stageloop< endstage && queuecount>0;stageloop++)
|
||||
|
||||
if(lcl_id<queuecount)
|
||||
|
@ -59,13 +59,13 @@ typedef struct __attribute__((aligned(128))) GpuHidHaarTreeNode
|
||||
int right __attribute__((aligned(4)));
|
||||
}
|
||||
GpuHidHaarTreeNode;
|
||||
//typedef struct __attribute__((aligned(32))) GpuHidHaarClassifier
|
||||
//{
|
||||
// int count __attribute__((aligned(4)));
|
||||
// GpuHidHaarTreeNode *node __attribute__((aligned(8)));
|
||||
// float *alpha __attribute__((aligned(8)));
|
||||
//}
|
||||
//GpuHidHaarClassifier;
|
||||
typedef struct __attribute__((aligned(32))) GpuHidHaarClassifier
|
||||
{
|
||||
int count __attribute__((aligned(4)));
|
||||
GpuHidHaarTreeNode *node __attribute__((aligned(8)));
|
||||
float *alpha __attribute__((aligned(8)));
|
||||
}
|
||||
GpuHidHaarClassifier;
|
||||
typedef struct __attribute__((aligned(64))) GpuHidHaarStageClassifier
|
||||
{
|
||||
int count __attribute__((aligned(4)));
|
||||
@ -77,29 +77,29 @@ typedef struct __attribute__((aligned(64))) GpuHidHaarStageClassifier
|
||||
int reserved3 __attribute__((aligned(8)));
|
||||
}
|
||||
GpuHidHaarStageClassifier;
|
||||
//typedef struct __attribute__((aligned(64))) GpuHidHaarClassifierCascade
|
||||
//{
|
||||
// int count __attribute__((aligned(4)));
|
||||
// int is_stump_based __attribute__((aligned(4)));
|
||||
// int has_tilted_features __attribute__((aligned(4)));
|
||||
// int is_tree __attribute__((aligned(4)));
|
||||
// int pq0 __attribute__((aligned(4)));
|
||||
// int pq1 __attribute__((aligned(4)));
|
||||
// int pq2 __attribute__((aligned(4)));
|
||||
// int pq3 __attribute__((aligned(4)));
|
||||
// int p0 __attribute__((aligned(4)));
|
||||
// int p1 __attribute__((aligned(4)));
|
||||
// int p2 __attribute__((aligned(4)));
|
||||
// int p3 __attribute__((aligned(4)));
|
||||
// float inv_window_area __attribute__((aligned(4)));
|
||||
//} GpuHidHaarClassifierCascade;
|
||||
typedef struct __attribute__((aligned(64))) GpuHidHaarClassifierCascade
|
||||
{
|
||||
int count __attribute__((aligned(4)));
|
||||
int is_stump_based __attribute__((aligned(4)));
|
||||
int has_tilted_features __attribute__((aligned(4)));
|
||||
int is_tree __attribute__((aligned(4)));
|
||||
int pq0 __attribute__((aligned(4)));
|
||||
int pq1 __attribute__((aligned(4)));
|
||||
int pq2 __attribute__((aligned(4)));
|
||||
int pq3 __attribute__((aligned(4)));
|
||||
int p0 __attribute__((aligned(4)));
|
||||
int p1 __attribute__((aligned(4)));
|
||||
int p2 __attribute__((aligned(4)));
|
||||
int p3 __attribute__((aligned(4)));
|
||||
float inv_window_area __attribute__((aligned(4)));
|
||||
} GpuHidHaarClassifierCascade;
|
||||
|
||||
__kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
global GpuHidHaarStageClassifier *stagecascadeptr_,
|
||||
global GpuHidHaarStageClassifier *stagecascadeptr,
|
||||
global int4 *info,
|
||||
global GpuHidHaarTreeNode *nodeptr_,
|
||||
global GpuHidHaarTreeNode *nodeptr,
|
||||
global const int *restrict sum,
|
||||
global const float *restrict sqsum,
|
||||
global const float *restrict sqsum,
|
||||
global int4 *candidate,
|
||||
const int rows,
|
||||
const int cols,
|
||||
@ -132,7 +132,8 @@ __kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
int max_idx = rows * cols - 1;
|
||||
for (int scalei = 0; scalei < loopcount; scalei++)
|
||||
{
|
||||
int4 scaleinfo1 = info[scalei];
|
||||
int4 scaleinfo1;
|
||||
scaleinfo1 = info[scalei];
|
||||
int grpnumperline = (scaleinfo1.y & 0xffff0000) >> 16;
|
||||
int totalgrp = scaleinfo1.y & 0xffff;
|
||||
float factor = as_float(scaleinfo1.w);
|
||||
@ -173,18 +174,15 @@ __kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
for (int stageloop = start_stage; (stageloop < end_stage) && result; stageloop++)
|
||||
{
|
||||
float stage_sum = 0.f;
|
||||
__global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
|
||||
(((__global uchar*)stagecascadeptr_)+stageloop*sizeof(GpuHidHaarStageClassifier));
|
||||
int stagecount = stageinfo->count;
|
||||
int stagecount = stagecascadeptr[stageloop].count;
|
||||
for (int nodeloop = 0; nodeloop < stagecount;)
|
||||
{
|
||||
__global GpuHidHaarTreeNode* currentnodeptr = (__global GpuHidHaarTreeNode*)
|
||||
(((__global uchar*)nodeptr_) + nodecounter * sizeof(GpuHidHaarTreeNode));
|
||||
__global GpuHidHaarTreeNode *currentnodeptr = (nodeptr + nodecounter);
|
||||
int4 info1 = *(__global int4 *)(&(currentnodeptr->p[0][0]));
|
||||
int4 info2 = *(__global int4 *)(&(currentnodeptr->p[1][0]));
|
||||
int4 info3 = *(__global int4 *)(&(currentnodeptr->p[2][0]));
|
||||
float4 w = *(__global float4 *)(&(currentnodeptr->weight[0]));
|
||||
float3 alpha3 = *(__global float3*)(&(currentnodeptr->alpha[0]));
|
||||
float3 alpha3 = *(__global float3 *)(&(currentnodeptr->alpha[0]));
|
||||
float nodethreshold = w.w * variance_norm_factor;
|
||||
|
||||
info1.x += p_offset;
|
||||
@ -206,7 +204,7 @@ __kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
sum[clamp(mad24(info3.w, step, info3.x), 0, max_idx)]
|
||||
+ sum[clamp(mad24(info3.w, step, info3.z), 0, max_idx)]) * w.z;
|
||||
|
||||
bool passThres = (classsum >= nodethreshold) ? 1 : 0;
|
||||
bool passThres = classsum >= nodethreshold;
|
||||
|
||||
#if STUMP_BASED
|
||||
stage_sum += passThres ? alpha3.y : alpha3.x;
|
||||
@ -236,8 +234,7 @@ __kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
result = (stage_sum >= stageinfo->threshold) ? 1 : 0;
|
||||
result = (int)(stage_sum >= stagecascadeptr[stageloop].threshold);
|
||||
}
|
||||
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
@ -284,14 +281,11 @@ __kernel void gpuRunHaarClassifierCascade_scaled2(
|
||||
}
|
||||
}
|
||||
}
|
||||
__kernel void gpuscaleclassifier(global GpuHidHaarTreeNode *orinode, global GpuHidHaarTreeNode *newnode, float scale, float weight_scale, const int nodenum)
|
||||
__kernel void gpuscaleclassifier(global GpuHidHaarTreeNode *orinode, global GpuHidHaarTreeNode *newnode, float scale, float weight_scale, int nodenum)
|
||||
{
|
||||
const int counter = get_global_id(0);
|
||||
int counter = get_global_id(0);
|
||||
int tr_x[3], tr_y[3], tr_h[3], tr_w[3], i = 0;
|
||||
GpuHidHaarTreeNode t1 = *(__global GpuHidHaarTreeNode*)
|
||||
(((__global uchar*)orinode) + counter * sizeof(GpuHidHaarTreeNode));
|
||||
__global GpuHidHaarTreeNode* pNew = (__global GpuHidHaarTreeNode*)
|
||||
(((__global uchar*)newnode) + (counter + nodenum) * sizeof(GpuHidHaarTreeNode));
|
||||
GpuHidHaarTreeNode t1 = *(orinode + counter);
|
||||
|
||||
#pragma unroll
|
||||
for (i = 0; i < 3; i++)
|
||||
@ -303,21 +297,22 @@ __kernel void gpuscaleclassifier(global GpuHidHaarTreeNode *orinode, global GpuH
|
||||
}
|
||||
|
||||
t1.weight[0] = -(t1.weight[1] * tr_h[1] * tr_w[1] + t1.weight[2] * tr_h[2] * tr_w[2]) / (tr_h[0] * tr_w[0]);
|
||||
counter += nodenum;
|
||||
|
||||
#pragma unroll
|
||||
for (i = 0; i < 3; i++)
|
||||
{
|
||||
pNew->p[i][0] = tr_x[i];
|
||||
pNew->p[i][1] = tr_y[i];
|
||||
pNew->p[i][2] = tr_x[i] + tr_w[i];
|
||||
pNew->p[i][3] = tr_y[i] + tr_h[i];
|
||||
pNew->weight[i] = t1.weight[i] * weight_scale;
|
||||
newnode[counter].p[i][0] = tr_x[i];
|
||||
newnode[counter].p[i][1] = tr_y[i];
|
||||
newnode[counter].p[i][2] = tr_x[i] + tr_w[i];
|
||||
newnode[counter].p[i][3] = tr_y[i] + tr_h[i];
|
||||
newnode[counter].weight[i] = t1.weight[i] * weight_scale;
|
||||
}
|
||||
|
||||
pNew->left = t1.left;
|
||||
pNew->right = t1.right;
|
||||
pNew->threshold = t1.threshold;
|
||||
pNew->alpha[0] = t1.alpha[0];
|
||||
pNew->alpha[1] = t1.alpha[1];
|
||||
pNew->alpha[2] = t1.alpha[2];
|
||||
newnode[counter].left = t1.left;
|
||||
newnode[counter].right = t1.right;
|
||||
newnode[counter].threshold = t1.threshold;
|
||||
newnode[counter].alpha[0] = t1.alpha[0];
|
||||
newnode[counter].alpha[1] = t1.alpha[1];
|
||||
newnode[counter].alpha[2] = t1.alpha[2];
|
||||
}
|
||||
|
@ -49,9 +49,6 @@
|
||||
#elif defined (cl_khr_fp64)
|
||||
#pragma OPENCL EXTENSION cl_khr_fp64:enable
|
||||
#endif
|
||||
#define CONVERT(step) ((step)>>1)
|
||||
#else
|
||||
#define CONVERT(step) ((step))
|
||||
#endif
|
||||
|
||||
#define LSIZE 256
|
||||
@ -64,17 +61,17 @@
|
||||
#define GET_CONFLICT_OFFSET(lid) ((lid) >> LOG_NUM_BANKS)
|
||||
|
||||
|
||||
kernel void integral_cols_D4(__global uchar4 *src,__global int *sum ,__global TYPE *sqsum,
|
||||
int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step,int dst1_step)
|
||||
kernel void integral_cols_D4(__global uchar4 *src,__global int *sum ,__global float *sqsum,
|
||||
int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
int4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsum_t[2];
|
||||
float4 sqsum_t[2];
|
||||
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local int* sum_p;
|
||||
__local TYPE* sqsum_p;
|
||||
__local float* sqsum_p;
|
||||
src_step = src_step >> 2;
|
||||
gid = gid << 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
@ -83,237 +80,17 @@ kernel void integral_cols_D4(__global uchar4 *src,__global int *sum ,__global TY
|
||||
src_t[1] = (i + lid < rows ? convert_int4(src[src_offset + (lid+i) * src_step + min(gid + 1, cols - 1)]) : 0);
|
||||
|
||||
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = convert_TYPE4(src_t[0] * src_t[0]);
|
||||
lm_sqsum[0][bf_loc] = convert_float4(src_t[0] * src_t[0]);
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = convert_TYPE4(src_t[1] * src_t[1]);
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step /4, loc_s1 = loc_s0 + dst_step ;
|
||||
int loc_sq0 = gid * CONVERT(dst1_step) + i + lid - 1 - pre_invalid * dst1_step / sizeof(TYPE),loc_sq1 = loc_sq0 + CONVERT(dst1_step);
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
|
||||
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
|
||||
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
kernel void integral_rows_D4(__global int4 *srcsum,__global TYPE4 * srcsqsum,__global int *sum ,
|
||||
__global TYPE *sqsum,int rows,int cols,int src_step,int src1_step,int sum_step,
|
||||
int sqsum_step,int sum_offset,int sqsum_offset)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
int4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsrc_t[2],sqsum_t[2];
|
||||
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local int *sum_p;
|
||||
__local TYPE *sqsum_p;
|
||||
src_step = src_step >> 4;
|
||||
src1_step = (src1_step / sizeof(TYPE)) >> 2 ;
|
||||
gid <<= 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid ] : (int4)0;
|
||||
sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid ] : (TYPE4)0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid + 1] : (int4)0;
|
||||
sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid + 1] : (TYPE4)0;
|
||||
|
||||
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = sqsrc_t[0];
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = sqsrc_t[1];
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(gid == 0 && (i + lid) <= rows)
|
||||
{
|
||||
sum[sum_offset + i + lid] = 0;
|
||||
sqsum[sqsum_offset + i + lid] = 0;
|
||||
}
|
||||
if(i + lid == 0)
|
||||
{
|
||||
int loc0 = gid * sum_step;
|
||||
int loc1 = gid * CONVERT(sqsum_step);
|
||||
for(int k = 1; k <= 8; k++)
|
||||
{
|
||||
if(gid * 4 + k > cols) break;
|
||||
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
|
||||
sqsum[sqsum_offset + loc1 + k * sqsum_step / sizeof(TYPE)] = 0;
|
||||
}
|
||||
}
|
||||
int loc_s0 = sum_offset + gid * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
|
||||
int loc_sq0 = sqsum_offset + gid * CONVERT(sqsum_step) + sqsum_step / sizeof(TYPE) + i + lid, loc_sq1 = loc_sq0 + CONVERT(sqsum_step) ;
|
||||
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
kernel void integral_cols_D5(__global uchar4 *src,__global float *sum ,__global TYPE *sqsum,
|
||||
int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step, int dst1_step)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float* sum_p;
|
||||
__local TYPE* sqsum_p;
|
||||
src_step = src_step >> 2;
|
||||
gid = gid << 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + min(gid, cols - 1)]) : (float4)0);
|
||||
src_t[1] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + min(gid + 1, cols - 1)]) : (float4)0);
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = convert_TYPE4(src_t[0] * src_t[0]);
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = convert_TYPE4(src_t[1] * src_t[1]);
|
||||
lm_sqsum[1][bf_loc] = convert_float4(src_t[1] * src_t[1]);
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
@ -355,28 +132,27 @@ kernel void integral_cols_D5(__global uchar4 *src,__global float *sum ,__global
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step / 4, loc_s1 = loc_s0 + dst_step ;
|
||||
int loc_sq0 = gid * CONVERT(dst1_step) + i + lid - 1 - pre_invalid * dst1_step / sizeof(TYPE), loc_sq1 = loc_sq0 + CONVERT(dst1_step);
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
|
||||
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
sqsum[loc_s0 + k * dst_step / 4] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
|
||||
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
sqsum[loc_s1 + k * dst_step / 4] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
@ -384,31 +160,30 @@ kernel void integral_cols_D5(__global uchar4 *src,__global float *sum ,__global
|
||||
}
|
||||
|
||||
|
||||
kernel void integral_rows_D5(__global float4 *srcsum,__global TYPE4 * srcsqsum,__global float *sum ,
|
||||
__global TYPE *sqsum,int rows,int cols,int src_step,int src1_step, int sum_step,
|
||||
kernel void integral_rows_D4(__global int4 *srcsum,__global float4 * srcsqsum,__global int *sum ,
|
||||
__global float *sqsum,int rows,int cols,int src_step,int sum_step,
|
||||
int sqsum_step,int sum_offset,int sqsum_offset)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsrc_t[2],sqsum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float *sum_p;
|
||||
__local TYPE *sqsum_p;
|
||||
int4 src_t[2], sum_t[2];
|
||||
float4 sqsrc_t[2],sqsum_t[2];
|
||||
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local int *sum_p;
|
||||
__local float *sqsum_p;
|
||||
src_step = src_step >> 4;
|
||||
src1_step = (src1_step / sizeof(TYPE)) >> 2;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2] : (float4)0;
|
||||
sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid * 2] : (TYPE4)0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
|
||||
sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid * 2 + 1] : (TYPE4)0;
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2] : (int4)0;
|
||||
sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2] : (float4)0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2 + 1] : (int4)0;
|
||||
sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
@ -465,16 +240,114 @@ kernel void integral_rows_D5(__global float4 *srcsum,__global TYPE4 * srcsqsum,_
|
||||
if(i + lid == 0)
|
||||
{
|
||||
int loc0 = gid * 2 * sum_step;
|
||||
int loc1 = gid * 2 * CONVERT(sqsum_step);
|
||||
int loc1 = gid * 2 * sqsum_step;
|
||||
for(int k = 1; k <= 8; k++)
|
||||
{
|
||||
if(gid * 8 + k > cols) break;
|
||||
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
|
||||
sqsum[sqsum_offset + loc1 + k * sqsum_step / sizeof(TYPE)] = 0;
|
||||
sqsum[sqsum_offset + loc1 + k * sqsum_step / 4] = 0;
|
||||
}
|
||||
}
|
||||
int loc_s0 = sum_offset + gid * 2 * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
|
||||
int loc_sq0 = sqsum_offset + gid * 2 * CONVERT(sqsum_step) + sqsum_step / sizeof(TYPE) + i + lid, loc_sq1 = loc_sq0 + CONVERT(sqsum_step) ;
|
||||
int loc_sq0 = sqsum_offset + gid * 2 * sqsum_step + sqsum_step / 4 + i + lid, loc_sq1 = loc_sq0 + sqsum_step ;
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * sqsum_step / 4] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * sqsum_step / 4] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
kernel void integral_cols_D5(__global uchar4 *src,__global float *sum ,__global float *sqsum,
|
||||
int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
float4 sqsum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float* sum_p;
|
||||
__local float* sqsum_p;
|
||||
src_step = src_step >> 2;
|
||||
gid = gid << 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + min(gid, cols - 1)]) : (float4)0);
|
||||
src_t[1] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + min(gid + 1, cols - 1)]) : (float4)0);
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = convert_float4(src_t[0] * src_t[0]);
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = convert_float4(src_t[1] * src_t[1]);
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step / 4, loc_s1 = loc_s0 + dst_step ;
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
@ -482,20 +355,138 @@ kernel void integral_rows_D5(__global float4 *srcsum,__global TYPE4 * srcsqsum,_
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
|
||||
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_s0 + k * dst_step / 4] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
|
||||
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_s1 + k * dst_step / 4] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
kernel void integral_rows_D5(__global float4 *srcsum,__global float4 * srcsqsum,__global float *sum ,
|
||||
__global float *sqsum,int rows,int cols,int src_step,int sum_step,
|
||||
int sqsum_step,int sum_offset,int sqsum_offset)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
float4 sqsrc_t[2],sqsum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float *sum_p;
|
||||
__local float *sqsum_p;
|
||||
src_step = src_step >> 4;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2] : (float4)0;
|
||||
sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2] : (float4)0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
|
||||
sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = sqsrc_t[0];
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = sqsrc_t[1];
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(gid == 0 && (i + lid) <= rows)
|
||||
{
|
||||
sum[sum_offset + i + lid] = 0;
|
||||
sqsum[sqsum_offset + i + lid] = 0;
|
||||
}
|
||||
if(i + lid == 0)
|
||||
{
|
||||
int loc0 = gid * 2 * sum_step;
|
||||
int loc1 = gid * 2 * sqsum_step;
|
||||
for(int k = 1; k <= 8; k++)
|
||||
{
|
||||
if(gid * 8 + k > cols) break;
|
||||
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
|
||||
sqsum[sqsum_offset + loc1 + k * sqsum_step / 4] = 0;
|
||||
}
|
||||
}
|
||||
int loc_s0 = sum_offset + gid * 2 * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
|
||||
int loc_sq0 = sqsum_offset + gid * 2 * sqsum_step + sqsum_step / 4 + i + lid, loc_sq1 = loc_sq0 + sqsum_step ;
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * sqsum_step / 4] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * sqsum_step / 4] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
@ -295,33 +295,23 @@ OCL_TEST_P(CornerHarris, Mat)
|
||||
|
||||
//////////////////////////////////integral/////////////////////////////////////////////////
|
||||
|
||||
struct Integral :
|
||||
public ImgprocTestBase
|
||||
{
|
||||
int sdepth;
|
||||
typedef ImgprocTestBase Integral;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
type = GET_PARAM(0);
|
||||
blockSize = GET_PARAM(1);
|
||||
sdepth = GET_PARAM(2);
|
||||
useRoi = GET_PARAM(3);
|
||||
}
|
||||
};
|
||||
OCL_TEST_P(Integral, Mat1)
|
||||
{
|
||||
for (int j = 0; j < LOOP_TIMES; j++)
|
||||
{
|
||||
random_roi();
|
||||
|
||||
ocl::integral(gsrc_roi, gdst_roi, sdepth);
|
||||
integral(src_roi, dst_roi, sdepth);
|
||||
ocl::integral(gsrc_roi, gdst_roi);
|
||||
integral(src_roi, dst_roi);
|
||||
|
||||
Near();
|
||||
}
|
||||
}
|
||||
|
||||
OCL_TEST_P(Integral, Mat2)
|
||||
// TODO wrong output type
|
||||
OCL_TEST_P(Integral, DISABLED_Mat2)
|
||||
{
|
||||
Mat dst1;
|
||||
ocl::oclMat gdst1;
|
||||
@ -330,12 +320,10 @@ OCL_TEST_P(Integral, Mat2)
|
||||
{
|
||||
random_roi();
|
||||
|
||||
integral(src_roi, dst_roi, dst1, sdepth);
|
||||
ocl::integral(gsrc_roi, gdst_roi, gdst1, sdepth);
|
||||
integral(src_roi, dst1, dst_roi);
|
||||
ocl::integral(gsrc_roi, gdst1, gdst_roi);
|
||||
|
||||
Near();
|
||||
if(gdst1.clCxt->supportsFeature(ocl::FEATURE_CL_DOUBLE))
|
||||
EXPECT_MAT_NEAR(dst1, Mat(gdst1), 0.);
|
||||
}
|
||||
}
|
||||
|
||||
@ -575,7 +563,7 @@ INSTANTIATE_TEST_CASE_P(Imgproc, CornerHarris, Combine(
|
||||
INSTANTIATE_TEST_CASE_P(Imgproc, Integral, Combine(
|
||||
Values((MatType)CV_8UC1), // TODO does not work with CV_32F, CV_64F
|
||||
Values(0), // not used
|
||||
Values((MatType)CV_32SC1, (MatType)CV_32FC1),
|
||||
Values(0), // not used
|
||||
Bool()));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Imgproc, Threshold, Combine(
|
||||
|
@ -426,7 +426,7 @@ class table(object):
|
||||
if r == 0:
|
||||
css = css[:-1] + "border-top:2px solid #6678B1;\""
|
||||
out.write(" <td%s%s>\n" % (attr, css))
|
||||
if th is not None:
|
||||
if td is not None:
|
||||
out.write(" %s\n" % htmlEncode(td.text))
|
||||
out.write(" </td>\n")
|
||||
i += colspan
|
||||
|
@ -1,8 +1,8 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
package="org.opencv.engine"
|
||||
android:versionCode="216@ANDROID_PLATFORM_VERSION_CODE@"
|
||||
android:versionName="2.16" >
|
||||
android:versionCode="218@ANDROID_PLATFORM_VERSION_CODE@"
|
||||
android:versionName="2.18" >
|
||||
|
||||
<uses-sdk android:minSdkVersion="@ANDROID_NATIVE_API_LEVEL@" />
|
||||
<uses-feature android:name="android.hardware.touchscreen" android:required="false"/>
|
||||
|
@ -15,7 +15,7 @@ using namespace android;
|
||||
|
||||
const int OpenCVEngine::Platform = DetectKnownPlatforms();
|
||||
const int OpenCVEngine::CpuID = GetCpuID();
|
||||
const int OpenCVEngine::KnownVersions[] = {2040000, 2040100, 2040200, 2040300, 2040301, 2040302, 2040400, 2040500, 2040600, 2040700, 2040701, 2040800};
|
||||
const int OpenCVEngine::KnownVersions[] = {2040000, 2040100, 2040200, 2040300, 2040301, 2040302, 2040400, 2040500, 2040600, 2040700, 2040701, 2040800, 2040900};
|
||||
|
||||
bool OpenCVEngine::ValidateVersion(int version)
|
||||
{
|
||||
|
@ -117,7 +117,7 @@ public class ManagerActivity extends Activity
|
||||
}
|
||||
else
|
||||
{
|
||||
HardwarePlatformView.setText("Tegra 5");
|
||||
HardwarePlatformView.setText("Tegra K1");
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -14,20 +14,20 @@ manually using adb tool:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
adb install OpenCV-2.4.8-android-sdk/apk/OpenCV_2.4.8_Manager_2.16_<platform>.apk
|
||||
adb install OpenCV-2.4.9-android-sdk/apk/OpenCV_2.4.9_Manager_2.18_<platform>.apk
|
||||
|
||||
Use the table below to determine proper OpenCV Manager package for your device:
|
||||
|
||||
+------------------------------+--------------+----------------------------------------------------+
|
||||
| Hardware Platform | Android ver. | Package name |
|
||||
+==============================+==============+====================================================+
|
||||
| armeabi-v7a (ARMv7-A + NEON) | >= 2.3 | OpenCV_2.4.8_Manager_2.16_armv7a-neon.apk |
|
||||
| armeabi-v7a (ARMv7-A + NEON) | >= 2.3 | OpenCV_2.4.9_Manager_2.18_armv7a-neon.apk |
|
||||
+------------------------------+--------------+----------------------------------------------------+
|
||||
| armeabi-v7a (ARMv7-A + NEON) | = 2.2 | OpenCV_2.4.8_Manager_2.16_armv7a-neon-android8.apk |
|
||||
| armeabi-v7a (ARMv7-A + NEON) | = 2.2 | OpenCV_2.4.9_Manager_2.18_armv7a-neon-android8.apk |
|
||||
+------------------------------+--------------+----------------------------------------------------+
|
||||
| armeabi (ARMv5, ARMv6) | >= 2.3 | OpenCV_2.4.8_Manager_2.16_armeabi.apk |
|
||||
| armeabi (ARMv5, ARMv6) | >= 2.3 | OpenCV_2.4.9_Manager_2.18_armeabi.apk |
|
||||
+------------------------------+--------------+----------------------------------------------------+
|
||||
| Intel x86 | >= 2.3 | OpenCV_2.4.8_Manager_2.16_x86.apk |
|
||||
| Intel x86 | >= 2.3 | OpenCV_2.4.9_Manager_2.18_x86.apk |
|
||||
+------------------------------+--------------+----------------------------------------------------+
|
||||
| MIPS | >= 2.3 | OpenCV_2.4.8_Manager_2.16_mips.apk |
|
||||
| MIPS | >= 2.3 | OpenCV_2.4.9_Manager_2.18_mips.apk |
|
||||
+------------------------------+--------------+----------------------------------------------------+
|
||||
|
@ -16,7 +16,7 @@ USAGE
|
||||
--feature - Feature to use. Can be sift, surf of orb. Append '-flann' to feature name
|
||||
to use Flann-based matcher instead bruteforce.
|
||||
|
||||
Press left mouse button on a feature point to see its mathcing point.
|
||||
Press left mouse button on a feature point to see its matching point.
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
|
@ -9,7 +9,7 @@ USAGE
|
||||
--feature - Feature to use. Can be sift, surf of orb. Append '-flann' to feature name
|
||||
to use Flann-based matcher instead bruteforce.
|
||||
|
||||
Press left mouse button on a feature point to see its mathcing point.
|
||||
Press left mouse button on a feature point to see its matching point.
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
|
Loading…
Reference in New Issue
Block a user