Merge branch '2.4'

This commit is contained in:
Andrey Kamaev 2013-02-12 16:30:18 +04:00
commit b44b920997
107 changed files with 1382 additions and 1806 deletions

View File

@ -1,15 +1,18 @@
#build TBB for Android from source
if(NOT ANDROID)
message(FATAL_ERROR "The script is designed for Android only!")
endif()
#Cross compile TBB from source
project(tbb)
# 4.1 update 1 - works fine
set(tbb_ver "tbb41_20121003oss")
set(tbb_url "http://threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb41_20121003oss_src.tgz")
set(tbb_md5 "2a684fefb855d2d0318d1ef09afa75ff")
# 4.1 update 2 - works fine
set(tbb_ver "tbb41_20130116oss")
set(tbb_url "http://threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb41_20130116oss_src.tgz")
set(tbb_md5 "3809790e1001a1b32d59c9fee590ee85")
set(tbb_version_file "version_string.ver")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshadow)
# 4.1 update 1 - works fine
#set(tbb_ver "tbb41_20121003oss")
#set(tbb_url "http://threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb41_20121003oss_src.tgz")
#set(tbb_md5 "2a684fefb855d2d0318d1ef09afa75ff")
#set(tbb_version_file "version_string.ver")
# 4.1 - works fine
#set(tbb_ver "tbb41_20120718oss")
@ -121,9 +124,9 @@ list(APPEND lib_srcs "${tbb_src_dir}/src/rml/client/rml_tbb.cpp")
add_definitions(-D__TBB_DYNAMIC_LOAD_ENABLED=0 #required
-D__TBB_BUILD=1 #required
-D__TBB_SURVIVE_THREAD_SWITCH=0 #no cilk on Android ?
-DUSE_PTHREAD #required
-DTBB_USE_GCC_BUILTINS=1 #required
-D__TBB_SURVIVE_THREAD_SWITCH=0 #no cilk support
-DUSE_PTHREAD #required for Unix
-DTBB_USE_GCC_BUILTINS=1 #required for ARM GCC
-DTBB_USE_DEBUG=0 #just to be sure
-DTBB_NO_LEGACY=1 #don't need backward compatibility
-DDO_ITT_NOTIFY=0 #it seems that we don't need these notifications
@ -140,14 +143,24 @@ if(tbb_need_GENERIC_DWORD_LOAD_STORE)
set(tbb_need_GENERIC_DWORD_LOAD_STORE ON PARENT_SCOPE)
endif()
add_library(tbb STATIC ${lib_srcs} ${lib_hdrs} "${CMAKE_CURRENT_SOURCE_DIR}/android_additional.h" "${CMAKE_CURRENT_SOURCE_DIR}/${tbb_version_file}")
set(TBB_SOURCE_FILES ${lib_srcs} ${lib_hdrs})
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm")
if (NOT ANDROID)
set(TBB_SOURCE_FILES ${TBB_SOURCE_FILES} "${CMAKE_CURRENT_SOURCE_DIR}/arm_linux_stub.cpp")
endif()
set(TBB_SOURCE_FILES ${TBB_SOURCE_FILES} "${CMAKE_CURRENT_SOURCE_DIR}/android_additional.h")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -include \"${CMAKE_CURRENT_SOURCE_DIR}/android_additional.h\"")
endif()
set(TBB_SOURCE_FILES ${TBB_SOURCE_FILES} "${CMAKE_CURRENT_SOURCE_DIR}/${tbb_version_file}")
add_library(tbb ${TBB_SOURCE_FILES})
target_link_libraries(tbb c m dl)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations)
string(REPLACE "-Werror=non-virtual-dtor" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -include \"${CMAKE_CURRENT_SOURCE_DIR}/android_additional.h\"")
set_target_properties(tbb
PROPERTIES OUTPUT_NAME tbb
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
@ -164,4 +177,3 @@ endif()
# get TBB version
ocv_parse_header("${tbb_src_dir}/include/tbb/tbb_stddef.h" TBB_VERSION_LINES TBB_VERSION_MAJOR TBB_VERSION_MINOR TBB_INTERFACE_VERSION CACHE)

10
3rdparty/tbb/arm_linux_stub.cpp vendored Normal file
View File

@ -0,0 +1,10 @@
#include "tbb/tbb_misc.h"
namespace tbb {
namespace internal {
void affinity_helper::protect_affinity_mask() {}
affinity_helper::~affinity_helper() {}
}
}

View File

@ -158,18 +158,18 @@ OCV_OPTION(BUILD_PERF_TESTS "Build performance tests"
OCV_OPTION(BUILD_TESTS "Build accuracy & regression tests" ON IF (NOT IOS) )
OCV_OPTION(BUILD_WITH_DEBUG_INFO "Include debug info into debug libs (not MSCV only)" ON )
OCV_OPTION(BUILD_WITH_STATIC_CRT "Enables use of staticaly linked CRT for staticaly linked OpenCV" ON IF MSVC )
OCV_OPTION(BUILD_FAT_JAVA_LIB "Create fat java wrapper containing the whole OpenCV library" ON IF ANDROID AND NOT BUILD_SHARED_LIBS AND CMAKE_COMPILER_IS_GNUCXX )
OCV_OPTION(BUILD_FAT_JAVA_LIB "Create fat java wrapper containing the whole OpenCV library" ON IF NOT BUILD_SHARED_LIBS AND CMAKE_COMPILER_IS_GNUCXX )
OCV_OPTION(BUILD_ANDROID_SERVICE "Build OpenCV Manager for Google Play" OFF IF ANDROID AND ANDROID_SOURCE_TREE )
OCV_OPTION(BUILD_ANDROID_PACKAGE "Build platform-specific package for Google Play" OFF IF ANDROID )
# 3rd party libs
OCV_OPTION(BUILD_ZLIB "Build zlib from source" WIN32 OR APPLE OR CARMA )
OCV_OPTION(BUILD_TIFF "Build libtiff from source" WIN32 OR ANDROID OR APPLE OR CARMA )
OCV_OPTION(BUILD_JASPER "Build libjasper from source" WIN32 OR ANDROID OR APPLE OR CARMA )
OCV_OPTION(BUILD_JPEG "Build libjpeg from source" WIN32 OR ANDROID OR APPLE OR CARMA )
OCV_OPTION(BUILD_PNG "Build libpng from source" WIN32 OR ANDROID OR APPLE OR CARMA )
OCV_OPTION(BUILD_OPENEXR "Build openexr from source" WIN32 OR ANDROID OR APPLE OR CARMA )
OCV_OPTION(BUILD_ZLIB "Build zlib from source" WIN32 OR APPLE OR CARMA )
OCV_OPTION(BUILD_TIFF "Build libtiff from source" WIN32 OR ANDROID OR APPLE OR CARMA )
OCV_OPTION(BUILD_JASPER "Build libjasper from source" WIN32 OR ANDROID OR APPLE OR CARMA )
OCV_OPTION(BUILD_JPEG "Build libjpeg from source" WIN32 OR ANDROID OR APPLE OR CARMA )
OCV_OPTION(BUILD_PNG "Build libpng from source" WIN32 OR ANDROID OR APPLE OR CARMA )
OCV_OPTION(BUILD_OPENEXR "Build openexr from source" WIN32 OR ANDROID OR APPLE OR CARMA )
OCV_OPTION(BUILD_TBB "Download and build TBB from source" ANDROID IF CMAKE_COMPILER_IS_GNUCXX )
# OpenCV installation options
# ===================================================

View File

@ -86,7 +86,7 @@ add_custom_command(
COMMAND ${CMAKE_COMMAND} -E touch "${APK_NAME}"
WORKING_DIRECTORY "${PACKAGE_DIR}"
MAIN_DEPENDENCY "${PACKAGE_DIR}/${ANDROID_MANIFEST_FILE}"
DEPENDS "${OpenCV_BINARY_DIR}/bin/.classes.jar.dephelper" "${PACKAGE_DIR}/res/values/strings.xml" "${PACKAGE_DIR}/res/drawable/icon.png" ${camera_wrappers} opencv_java
DEPENDS "${OpenCV_BINARY_DIR}/bin/classes.jar.dephelper" "${PACKAGE_DIR}/res/values/strings.xml" "${PACKAGE_DIR}/res/drawable/icon.png" ${camera_wrappers} opencv_java
)
install(FILES "${APK_NAME}" DESTINATION "apk/" COMPONENT main)

View File

@ -46,7 +46,9 @@ bool JavaBasedPackageManager::InstallPackage(const PackageInfo& package)
LOGD("Calling java package manager with package name %s\n", package.GetFullName().c_str());
jobject jpkgname = jenv->NewStringUTF(package.GetFullName().c_str());
bool result = jenv->CallNonvirtualBooleanMethod(JavaPackageManager, jclazz, jmethod, jpkgname);
jenv->DeleteLocalRef(jpkgname);
jenv->DeleteLocalRef(jclazz);
if (self_attached)
{
@ -104,9 +106,12 @@ vector<PackageInfo> JavaBasedPackageManager::GetInstalledPackages()
if (tmp.IsValid())
result.push_back(tmp);
jenv->DeleteLocalRef(jtmp);
}
jenv->DeleteLocalRef(jpkgs);
jenv->DeleteLocalRef(jclazz);
if (self_attached)
{
@ -118,6 +123,16 @@ vector<PackageInfo> JavaBasedPackageManager::GetInstalledPackages()
return result;
}
static jint GetAndroidVersion(JNIEnv* jenv)
{
jclass jclazz = jenv->FindClass("android/os/Build$VERSION");
jfieldID jfield = jenv->GetStaticFieldID(jclazz, "SDK_INT", "I");
jint api_level = jenv->GetStaticIntField(jclazz, jfield);
jenv->DeleteLocalRef(jclazz);
return api_level;
}
// IMPORTANT: This method can be called only if thread is attached to Dalvik
PackageInfo JavaBasedPackageManager::ConvertPackageFromJava(jobject package, JNIEnv* jenv)
{
@ -133,23 +148,27 @@ PackageInfo JavaBasedPackageManager::ConvertPackageFromJava(jobject package, JNI
const char* jversionstr = jenv->GetStringUTFChars(jversionobj, NULL);
string verison(jversionstr);
jenv->DeleteLocalRef(jversionobj);
jenv->DeleteLocalRef(jclazz);
static const jint api_level = GetAndroidVersion(jenv);
string path;
jclazz = jenv->FindClass("android/os/Build$VERSION");
jfield = jenv->GetStaticFieldID(jclazz, "SDK_INT", "I");
jint api_level = jenv->GetStaticIntField(jclazz, jfield);
if (api_level > 8)
{
jclazz = jenv->GetObjectClass(package);
jfield = jenv->GetFieldID(jclazz, "applicationInfo", "Landroid/content/pm/ApplicationInfo;");
jobject japp_info = jenv->GetObjectField(package, jfield);
jenv->DeleteLocalRef(jclazz);
jclazz = jenv->GetObjectClass(japp_info);
jfield = jenv->GetFieldID(jclazz, "nativeLibraryDir", "Ljava/lang/String;");
jstring jpathobj = static_cast<jstring>(jenv->GetObjectField(japp_info, jfield));
const char* jpathstr = jenv->GetStringUTFChars(jpathobj, NULL);
path = string(jpathstr);
jenv->ReleaseStringUTFChars(jpathobj, jpathstr);
jenv->DeleteLocalRef(japp_info);
jenv->DeleteLocalRef(jpathobj);
jenv->DeleteLocalRef(jclazz);
}
else
{

View File

@ -19,4 +19,4 @@ private:
JavaBasedPackageManager();
PackageInfo ConvertPackageFromJava(jobject package, JNIEnv* jenv);
};
};

View File

@ -1,4 +1,4 @@
if(ANDROID AND NOT MIPS)
if(BUILD_TBB)
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/tbb")
include_directories(SYSTEM ${TBB_INCLUDE_DIRS})
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} tbb)

View File

@ -6,26 +6,28 @@ Mat - The Basic Image Container
Goal
====
We have multiple ways to acquire digital images from the real world: digital cameras, scanners, computed tomography or magnetic resonance imaging to just name a few. In every case what we (humans) see are images. However, when transforming this to our digital devices what we record are numerical values for each of the points of the image.
We have multiple ways to acquire digital images from the real world: digital cameras, scanners, computed tomography, and magnetic resonance imaging to name a few. In every case what we (humans) see are images. However, when transforming this to our digital devices what we record are numerical values for each of the points of the image.
.. image:: images/MatBasicImageForComputer.jpg
:alt: A matrix of the mirror of a car
:align: center
For example in the above image you can see that the mirror of the care is nothing more than a matrix containing all the intensity values of the pixel points. Now, how we get and store the pixels values may vary according to what fits best our need, in the end all images inside a computer world may be reduced to numerical matrices and some other information's describing the matric itself. *OpenCV* is a computer vision library whose main focus is to process and manipulate these information to find out further ones. Therefore, the first thing you need to learn and get accommodated with is how OpenCV stores and handles images.
For example in the above image you can see that the mirror of the car is nothing more than a matrix containing all the intensity values of the pixel points. How we get and store the pixels values may vary according to our needs, but in the end all images inside a computer world may be reduced to numerical matrices and other information describing the matrix itself. *OpenCV* is a computer vision library whose main focus is to process and manipulate this information. Therefore, the first thing you need to be familiar with is how OpenCV stores and handles images.
*Mat*
=====
OpenCV has been around ever since 2001. In those days the library was built around a *C* interface. In those days to store the image in the memory they used a C structure entitled *IplImage*. This is the one you'll see in most of the older tutorials and educational materials. The problem with this is that it brings to the table all the minuses of the C language. The biggest issue is the manual management. It builds on the assumption that the user is responsible for taking care of memory allocation and deallocation. While this is no issue in case of smaller programs once your code base start to grove larger and larger it will be more and more a struggle to handle all this rather than focusing on actually solving your development goal.
OpenCV has been around since 2001. In those days the library was built around a *C* interface and to store the image in the memory they used a C structure called *IplImage*. This is the one you'll see in most of the older tutorials and educational materials. The problem with this is that it brings to the table all the minuses of the C language. The biggest issue is the manual memory management. It builds on the assumption that the user is responsible for taking care of memory allocation and deallocation. While this is not a problem with smaller programs, once your code base grows it will be more of a struggle to handle all this rather than focusing on solving your development goal.
Luckily C++ came around and introduced the concept of classes making possible to build another road for the user: automatic memory management (more or less). The good news is that C++ if fully compatible with C so no compatibility issues can arise from making the change. Therefore, OpenCV with its 2.0 version introduced a new C++ interface that by taking advantage of these offers a new way of doing things. A way, in which you do not need to fiddle with memory management; making your code concise (less to write, to achieve more). The only main downside of the C++ interface is that many embedded development systems at the moment support only C. Therefore, unless you are targeting this platform, there's no point on using the *old* methods (unless you're a masochist programmer and you're asking for trouble).
Luckily C++ came around and introduced the concept of classes making easier for the user through automatic memory management (more or less). The good news is that C++ is fully compatible with C so no compatibility issues can arise from making the change. Therefore, OpenCV 2.0 introduced a new C++ interface which offered a new way of doing things which means you do not need to fiddle with memory management, making your code concise (less to write, to achieve more). The main downside of the C++ interface is that many embedded development systems at the moment support only C. Therefore, unless you are targeting embedded platforms, there's no point to using the *old* methods (unless you're a masochist programmer and you're asking for trouble).
The first thing you need to know about *Mat* is that you no longer need to manually allocate its size and release it as soon as you do not need it. While doing this is still a possibility, most of the OpenCV functions will allocate its output data manually. As a nice bonus if you pass on an already existing *Mat* object, what already has allocated the required space for the matrix, this will be reused. In other words we use at all times only as much memory as much we must to perform the task.
The first thing you need to know about *Mat* is that you no longer need to manually allocate its memory and release it as soon as you do not need it. While doing this is still a possibility, most of the OpenCV functions will allocate its output data manually. As a nice bonus if you pass on an already existing *Mat* object, which has already allocated the required space for the matrix, this will be reused. In other words we use at all times only as much memory as we need to perform the task.
*Mat* is basically a class having two data parts: the matrix header (containing information such as the size of the matrix, the method used for storing, at which address is the matrix stored and so on) and a pointer to the matrix containing the pixel values (may take any dimensionality depending on the method chosen for storing) . The matrix header size is constant. However, the size of the matrix itself may vary from image to image and usually is larger by order of magnitudes. Therefore, when you're passing on images in your program and at some point you need to create a copy of the image the big price you will need to build is for the matrix itself rather than its header. OpenCV is an image processing library. It contains a large collection of image processing functions. To solve a computational challenge most of the time you will end up using multiple functions of the library. Due to this passing on images to functions is a common practice. We should not forget that we are talking about image processing algorithms, which tend to be quite computational heavy. The last thing we want to do is to further decrease the speed of your program by making unnecessary copies of potentially *large* images.
*Mat* is basically a class with two data parts: the matrix header (containing information such as the size of the matrix, the method used for storing, at which address is the matrix stored, and so on) and a pointer to the matrix containing the pixel values (taking any dimensionality depending on the method chosen for storing) . The matrix header size is constant, however the size of the matrix itself may vary from image to image and usually is larger by orders of magnitude.
To tackle this issue OpenCV uses a reference counting system. The idea is that each *Mat* object has its own header, however the matrix may be shared between two instance of them by having their matrix pointer point to the same address. Moreover, the copy operators **will only copy the headers**, and as also copy the pointer to the large matrix too, however not the matrix itself.
OpenCV is an image processing library. It contains a large collection of image processing functions. To solve a computational challenge, most of the time you will end up using multiple functions of the library. Because of this, passing images to functions is a common practice. We should not forget that we are talking about image processing algorithms, which tend to be quite computational heavy. The last thing we want to do is further decrease the speed of your program by making unnecessary copies of potentially *large* images.
To tackle this issue OpenCV uses a reference counting system. The idea is that each *Mat* object has its own header, however the matrix may be shared between two instance of them by having their matrix pointers point to the same address. Moreover, the copy operators **will only copy the headers** and the pointer to the large matrix, not the data itself.
.. code-block:: cpp
:linenos:
@ -37,7 +39,7 @@ To tackle this issue OpenCV uses a reference counting system. The idea is that e
C = A; // Assignment operator
All the above objects, in the end point to the same single data matrix. Their headers are different, however making any modification using either one of them will affect all the other ones too. In practice the different objects just provide different access method to the same underlying data. Nevertheless, their header parts are different. The real interesting part comes that you can create headers that refer only to a subsection of the full data. For example, to create a region of interest (*ROI*) in an image you just create a new header with the new boundaries:
All the above objects, in the end, point to the same single data matrix. Their headers are different, however, and making a modification using any of them will affect all the other ones as well. In practice the different objects just provide different access method to the same underlying data. Nevertheless, their header parts are different. The real interesting part is that you can create headers which refer to only a subsection of the full data. For example, to create a region of interest (*ROI*) in an image you just create a new header with the new boundaries:
.. code-block:: cpp
:linenos:
@ -45,7 +47,7 @@ All the above objects, in the end point to the same single data matrix. Their he
Mat D (A, Rect(10, 10, 100, 100) ); // using a rectangle
Mat E = A(Range:all(), Range(1,3)); // using row and column boundaries
Now you may ask if the matrix itself may belong to multiple *Mat* objects who will take responsibility for its cleaning when it's no longer needed. The short answer is: the last object that used it. For this a reference counting mechanism is used. Whenever somebody copies a header of a *Mat* object a counter is increased for the matrix. Whenever a header is cleaned this counter is decreased. When the counter reaches zero the matrix too is freed. Because, sometimes you will still want to copy the matrix itself too, there exists the :basicstructures:`clone() <mat-clone>` or the :basicstructures:`copyTo() <mat-copyto>` function.
Now you may ask if the matrix itself may belong to multiple *Mat* objects who takes responsibility for cleaning it up when it's no longer needed. The short answer is: the last object that used it. This is handled by using a reference counting mechanism. Whenever somebody copies a header of a *Mat* object, a counter is increased for the matrix. Whenever a header is cleaned this counter is decreased. When the counter reaches zero the matrix too is freed. Sometimes you will want to copy the matrix itself too, so OpenCV provides the :basicstructures:`clone() <mat-clone>` and :basicstructures:`copyTo() <mat-copyto>` functions.
.. code-block:: cpp
:linenos:
@ -59,34 +61,34 @@ Now modifying *F* or *G* will not affect the matrix pointed by the *Mat* header.
.. container:: enumeratevisibleitemswithsquare
* Output image allocation for OpenCV functions is automatic (unless specified otherwise).
* No need to think about memory freeing with OpenCVs C++ interface.
* The assignment operator and the copy constructor (*ctor*)copies only the header.
* Use the :basicstructures:`clone()<mat-clone>` or the :basicstructures:`copyTo() <mat-copyto>` function to copy the underlying matrix of an image.
* You do not need to think about memory management with OpenCVs C++ interface.
* The assignment operator and the copy constructor only copies the header.
* The underlying matrix of an image may be copied using the :basicstructures:`clone()<mat-clone>` and :basicstructures:`copyTo() <mat-copyto>` functions.
*Storing* methods
=================
This is about how you store the pixel values. You can select the color space and the data type used. The color space refers to how we combine color components in order to code a given color. The simplest one is the gray scale. Here the colors at our disposal are black and white. The combination of these allows us to create many shades of gray.
This is about how you store the pixel values. You can select the color space and the data type used. The color space refers to how we combine color components in order to code a given color. The simplest one is the gray scale where the colors at our disposal are black and white. The combination of these allows us to create many shades of gray.
For *colorful* ways we have a lot more of methods to choose from. However, every one of them breaks it down to three or four basic components and the combination of this will give all others. The most popular one of this is RGB, mainly because this is also how our eye builds up colors in our eyes. Its base colors are red, green and blue. To code the transparency of a color sometimes a fourth element: alpha (A) is added.
For *colorful* ways we have a lot more methods to choose from. Each of them breaks it down to three or four basic components and we can use the combination of these to create the others. The most popular one is RGB, mainly because this is also how our eye builds up colors. Its base colors are red, green and blue. To code the transparency of a color sometimes a fourth element: alpha (A) is added.
However, they are many color systems each with their own advantages:
There are, however, many other color systems each with their own advantages:
.. container:: enumeratevisibleitemswithsquare
* RGB is the most common as our eyes use something similar, our display systems also compose colors using these.
* The HSV and HLS decompose colors into their hue, saturation and value/luminance components, which is a more natural way for us to describe colors. Using you may for example dismiss the last component, making your algorithm less sensible to light conditions of the input image.
* The HSV and HLS decompose colors into their hue, saturation and value/luminance components, which is a more natural way for us to describe colors. You might, for example, dismiss the last component, making your algorithm less sensible to the light conditions of the input image.
* YCrCb is used by the popular JPEG image format.
* CIE L*a*b* is a perceptually uniform color space, which comes handy if you need to measure the *distance* of a given color to another color.
Now each of the building components has their own valid domains. This leads to the data type used. How we store a component defines just how fine control we have over its domain. The smallest data type possible is *char*, which means one byte or 8 bits. This may be unsigned (so can store values from 0 to 255) or signed (values from -127 to +127). Although in case of three components this already gives 16 million possible colors to represent (like in case of RGB) we may acquire an even finer control by using the float (4 byte = 32 bit) or double (8 byte = 64 bit) data types for each component. Nevertheless, remember that increasing the size of a component also increases the size of the whole picture in the memory.
Each of the building components has their own valid domains. This leads to the data type used. How we store a component defines the control we have over its domain. The smallest data type possible is *char*, which means one byte or 8 bits. This may be unsigned (so can store values from 0 to 255) or signed (values from -127 to +127). Although in case of three components this already gives 16 million possible colors to represent (like in case of RGB) we may acquire an even finer control by using the float (4 byte = 32 bit) or double (8 byte = 64 bit) data types for each component. Nevertheless, remember that increasing the size of a component also increases the size of the whole picture in the memory.
Creating explicitly a *Mat* object
Creating a *Mat* object explicitly
==================================
In the :ref:`Load_Save_Image` tutorial you could already see how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() <imwrite>` function. However, for debugging purposes it's much more convenient to see the actual values. You can achieve this via the << operator of *Mat*. However, be aware that this only works for two dimensional matrices.
In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() <imwrite>` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices.
Although *Mat* is a great class as image container it is also a general matrix class. Therefore, it is possible to create and manipulate multidimensional matrices. You can create a Mat object in multiple ways:
Although *Mat* works really well as an image container, it is also a general matrix class. Therefore, it is possible to create and manipulate multidimensional matrices. You can create a Mat object in multiple ways:
.. container:: enumeratevisibleitemswithsquare
@ -103,13 +105,13 @@ Although *Mat* is a great class as image container it is also a general matrix c
For two dimensional and multichannel images we first define their size: row and column count wise.
Then we need to specify the data type to use for storing the elements and the number of channels per matrix point. To do this we have multiple definitions made according to the following convention:
Then we need to specify the data type to use for storing the elements and the number of channels per matrix point. To do this we have multiple definitions constructed according to the following convention:
.. code-block:: cpp
CV_[The number of bits per item][Signed or Unsigned][Type Prefix]C[The channel number]
For instance, *CV_8UC3* means we use unsigned char types that are 8 bit long and each pixel has three items of this to form the three channels. This are predefined for up to four channel numbers. The :basicstructures:`Scalar <scalar>` is four element short vector. Specify this and you can initialize all matrix points with a custom value. However if you need more you can create the type with the upper macro and putting the channel number in parenthesis as you can see below.
For instance, *CV_8UC3* means we use unsigned char types that are 8 bit long and each pixel has three of these to form the three channels. This are predefined for up to four channel numbers. The :basicstructures:`Scalar <scalar>` is four element short vector. Specify this and you can initialize all matrix points with a custom value. If you need more you can create the type with the upper macro, setting the channel number in parenthesis as you can see below.
+ Use C\\C++ arrays and initialize via constructor
@ -176,7 +178,7 @@ Although *Mat* is a great class as image container it is also a general matrix c
.. note::
You can fill out a matrix with random values using the :operationsOnArrays:`randu() <randu>` function. You need to give the lower and upper value between what you want the random values:
You can fill out a matrix with random values using the :operationsOnArrays:`randu() <randu>` function. You need to give the lower and upper value for the random values:
.. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
:language: cpp
@ -184,10 +186,10 @@ Although *Mat* is a great class as image container it is also a general matrix c
:lines: 57-58
Print out formatting
====================
Output formatting
=================
In the above examples you could see the default formatting option. Nevertheless, OpenCV allows you to format your matrix output format to fit the rules of:
In the above examples you could see the default formatting option. OpenCV, however, allows you to format your matrix output:
.. container:: enumeratevisibleitemswithsquare
@ -246,10 +248,10 @@ In the above examples you could see the default formatting option. Nevertheless,
:alt: Default Output
:align: center
Print for other common items
Output of other common items
============================
OpenCV offers support for print of other common OpenCV data structures too via the << operator like:
OpenCV offers support for output of other common OpenCV data structures too via the << operator:
.. container:: enumeratevisibleitemswithsquare
@ -298,9 +300,9 @@ OpenCV offers support for print of other common OpenCV data structures too via t
:alt: Default Output
:align: center
Most of the samples here have been included into a small console application. You can download it from :download:`here <../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp>` or in the core section of the cpp samples.
Most of the samples here have been included in a small console application. You can download it from :download:`here <../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp>` or in the core section of the cpp samples.
A quick video demonstration of this you can find on `YouTube <https://www.youtube.com/watch?v=1tibU7vGWpk>`_.
You can also find a quick video demonstration of this on `YouTube <https://www.youtube.com/watch?v=1tibU7vGWpk>`_.
.. raw:: html

View File

@ -78,39 +78,33 @@ See the "15-puzzle" OpenCV sample for details.
.. code-block:: java
:linenos:
public class MyActivity extends Activity implements HelperCallbackInterface
{
private BaseLoaderCallback mOpenCVCallBack = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
// Create and set View
mView = new puzzle15View(mAppContext);
setContentView(mView);
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
public class Sample1Java extends Activity implements CvCameraViewListener {
/** Call on every application resume **/
@Override
protected void onResume()
{
Log.i(TAG, "called onResume");
super.onResume();
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
mOpenCvCameraView.enableView();
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
Log.i(TAG, "Trying to load OpenCV library");
if (!OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_2, this, mOpenCVCallBack))
@Override
public void onResume()
{
Log.e(TAG, "Cannot connect to OpenCV Manager");
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
}
...
}
It this case application works with OpenCV Manager in asynchronous fashion. ``OnManagerConnected``
@ -297,110 +291,43 @@ application. It will be capable of accessing camera output, processing it and di
result.
#. Open Eclipse IDE, create a new clean workspace, create a new Android project
:menuselection:`File --> New --> Android Project`.
:menuselection:`File --> New --> Android Project`
#. Set name, target, package and ``minSDKVersion`` accordingly.
#. Set name, target, package and ``minSDKVersion`` accordingly. The minimal SDK version for build
with OpenCV4Android SDK is 11. Minimal device API Level (for application manifest) is 8.
#. Create a new class :menuselection:`File -> New -> Class`. Name it for example:
*HelloOpenCVView*.
#. Allow Eclipse to create default activity. Lets name the activity ``HelloOpenCvActivity``.
.. image:: images/dev_OCV_new_class.png
:alt: Add a new class.
#. Choose Blank Activity with full screen layout. Lets name the layout ``HelloOpenCvLayout``.
#. Import OpenCV library project to your workspace.
#. Reference OpenCV library within your project properties.
.. image:: images/dev_OCV_reference.png
:alt: Reference OpenCV library.
:align: center
* It should extend ``SurfaceView`` class.
* It also should implement ``SurfaceHolder.Callback``, ``Runnable``.
#. Edit your layout file as xml file and pass the following layout there:
#. Edit ``HelloOpenCVView`` class.
* Add an ``import`` line for ``android.content.context``.
* Modify autogenerated stubs: ``HelloOpenCVView``, ``surfaceCreated``, ``surfaceDestroyed`` and
``surfaceChanged``.
.. code-block:: java
.. code-block:: xml
:linenos:
package com.hello.opencv.test;
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
xmlns:opencv="http://schemas.android.com/apk/res-auto"
android:layout_width="match_parent"
android:layout_height="match_parent" >
import android.content.Context;
<org.opencv.android.JavaCameraView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:visibility="gone"
android:id="@+id/HelloOpenCvView"
opencv:show_fps="true"
opencv:camera_id="any" />
public class HelloOpenCVView extends SurfaceView implements Callback, Runnable {
public HelloOpenCVView(Context context) {
super(context);
getHolder().addCallback(this);
}
public void surfaceCreated(SurfaceHolder holder) {
(new Thread(this)).start();
}
public void surfaceDestroyed(SurfaceHolder holder) {
cameraRelease();
}
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
cameraSetup(width, height);
}
* Add ``cameraOpen``, ``cameraRelease`` and ``cameraSetup`` voids as shown below.
* Also, don't forget to add the public void ``run()`` as follows:
.. code-block:: java
:linenos:
public void run() {
// TODO: loop { getFrame(), processFrame(), drawFrame() }
}
public boolean cameraOpen() {
return false; //TODO: open camera
}
private void cameraRelease() {
// TODO release camera
}
private void cameraSetup(int width, int height) {
// TODO setup camera
}
#. Create a new ``Activity`` :menuselection:`New -> Other -> Android -> Android Activity` and name
it, for example: *HelloOpenCVActivity*. For this activity define ``onCreate``, ``onResume`` and
``onPause`` voids.
.. code-block:: java
:linenos:
public void onCreate (Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
mView = new HelloOpenCVView(this);
setContentView (mView);
}
protected void onPause() {
super.onPause();
mView.cameraRelease();
}
protected void onResume() {
super.onResume();
if( !mView.cameraOpen() ) {
// MessageBox and exit app
AlertDialog ad = new AlertDialog.Builder(this).create();
ad.setCancelable(false); // This blocks the "BACK" button
ad.setMessage("Fatal error: can't open camera!");
ad.setButton("OK", new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
dialog.dismiss();
finish();
}
});
ad.show();
}
}
</LinearLayout>
#. Add the following permissions to the :file:`AndroidManifest.xml` file:
@ -409,108 +336,120 @@ result.
</application>
<uses-permission android:name="android.permission.CAMERA" />
<uses-feature android:name="android.hardware.camera" />
<uses-feature android:name="android.hardware.camera.autofocus" />
<uses-permission android:name="android.permission.CAMERA"/>
#. Reference OpenCV library within your project properties.
<uses-feature android:name="android.hardware.camera" android:required="false"/>
<uses-feature android:name="android.hardware.camera.autofocus" android:required="false"/>
<uses-feature android:name="android.hardware.camera.front" android:required="false"/>
<uses-feature android:name="android.hardware.camera.front.autofocus" android:required="false"/>
.. image:: images/dev_OCV_reference.png
:alt: Reference OpenCV library.
:align: center
#. Set application theme in AndroidManifest.xml to hide title and system buttons.
#. We now need some code to handle the camera. Update the ``HelloOpenCVView`` class as follows:
.. code-block:: xml
:linenos:
<application
android:icon="@drawable/icon"
android:label="@string/app_name"
android:theme="@android:style/Theme.NoTitleBar.Fullscreen" >
#. Add OpenCV library initialization to your activity. Fix errors by adding requited imports.
.. code-block:: java
:linenos:
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
mOpenCvCameraView.enableView();
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
@Override
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
}
#. Defines that your activity implements CvViewFrameListener interface and fix activity related
errors by defining missed methods. For this activity define ``onCreate``, ``onDestroy`` and
``onPause`` and implement them according code snippet bellow. Fix errors by adding requited
imports.
.. code-block:: java
:linenos:
private VideoCapture mCamera;
private CameraBridgeViewBase mOpenCvCameraView;
public boolean cameraOpen() {
synchronized (this) {
cameraRelease();
mCamera = new VideoCapture(Highgui.CV_CAP_ANDROID);
if (!mCamera.isOpened()) {
mCamera.release();
mCamera = null;
Log.e("HelloOpenCVView", "Failed to open native camera");
return false;
}
}
return true;
}
@Override
public void onCreate(Bundle savedInstanceState) {
Log.i(TAG, "called onCreate");
super.onCreate(savedInstanceState);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
setContentView(R.layout.HelloOpenCvLayout);
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.HelloOpenCvView);
mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE);
mOpenCvCameraView.setCvCameraViewListener(this);
}
public void cameraRelease() {
synchronized(this) {
if (mCamera != null) {
mCamera.release();
mCamera = null;
}
}
}
@Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
private void cameraSetup(int width, int height) {
synchronized (this) {
if (mCamera != null && mCamera.isOpened()) {
List<Size> sizes = mCamera.getSupportedPreviewSizes();
int mFrameWidth = width;
int mFrameHeight = height;
{ // selecting optimal camera preview size
double minDiff = Double.MAX_VALUE;
for (Size size : sizes) {
if (Math.abs(size.height - height) < minDiff) {
mFrameWidth = (int) size.width;
mFrameHeight = (int) size.height;
minDiff = Math.abs(size.height - height);
}
}
}
mCamera.set(Highgui.CV_CAP_PROP_FRAME_WIDTH, mFrameWidth);
mCamera.set(Highgui.CV_CAP_PROP_FRAME_HEIGHT, mFrameHeight);
}
}
}
public void onDestroy() {
super.onDestroy();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
#. The last step would be to update the ``run()`` void in ``HelloOpenCVView`` class as follows:
public void onCameraViewStarted(int width, int height) {
}
.. code-block:: java
:linenos:
public void onCameraViewStopped() {
}
public void run() {
while (true) {
Bitmap bmp = null;
synchronized (this) {
if (mCamera == null)
break;
if (!mCamera.grab())
break;
public Mat onCameraFrame(Mat inputFrame) {
return inputFrame;
}
bmp = processFrame(mCamera);
}
if (bmp != null) {
Canvas canvas = getHolder().lockCanvas();
if (canvas != null) {
canvas.drawBitmap(bmp, (canvas.getWidth() - bmp.getWidth()) / 2,
(canvas.getHeight() - bmp.getHeight()) / 2, null);
getHolder().unlockCanvasAndPost(canvas);
#. Run your application on device or emulator.
}
bmp.recycle();
}
}
}
Lets discuss some most important steps. Every Android application with UI must implement Activity
and View. By the first steps we create blank activity and default view layout. The simplest
OpenCV-centric application must implement OpenCV initialization, create its own view to show
preview from camera and implements ``CvViewFrameListener`` interface to get frames from camera and
process it.
protected Bitmap processFrame(VideoCapture capture) {
Mat mRgba = new Mat();
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
//process mRgba
Bitmap bmp = Bitmap.createBitmap(mRgba.cols(), mRgba.rows(), Bitmap.Config.ARGB_8888);
try {
Utils.matToBitmap(mRgba, bmp);
} catch(Exception e) {
Log.e("processFrame", "Utils.matToBitmap() throws an exception: " + e.getMessage());
bmp.recycle();
bmp = null;
}
return bmp;
}
First of all we create our application view using xml layout. Our layout consists of the only
one full screen component of class ``org.opencv.android.JavaCameraView``. This class is
implemented inside OpenCV library. It is inherited from ``CameraBridgeViewBase``, that extends
``SurfaceView`` and uses standard Android camera API. Alternatively you can use
``org.opencv.android.NativeCameraView`` class, that implements the same interface, but uses
``VideoCapture`` class as camera access back-end. ``opencv:show_fps="true"`` and
``opencv:camera_id="any"`` options enable FPS message and allow to use any camera on device.
Application tries to use back camera first.
After creating layout we need to implement ``Activity`` class. OpenCV initialization process has
been already discussed above. In this sample we use asynchronous initialization. Implementation of
``CvCameraViewListener`` interface allows you to add processing steps after frame grabbing from
camera and before its rendering on screen. The most important function is ``onCameraFrame``. It is
callback function and it is called on retrieving frame from camera. The callback input is frame
from camera. RGBA format is used by default. You can change this behavior by ``SetCaptureFormat``
method of ``View`` class. ``Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA`` and
``Highgui.CV_CAP_ANDROID_GREY_FRAME`` are supported. It expects that function returns RGBA frame
that will be drawn on the screen.

View File

@ -137,7 +137,7 @@ Here you can read tutorials about how to set up your computer to work with the O
================ =================================================
|AndroidLogo| **Title:** :ref:`dev_with_OCV_on_Android`
*Compatibility:* > OpenCV 2.4.2
*Compatibility:* > OpenCV 2.4.3
*Author:* |Author_VsevolodG|

View File

@ -857,105 +857,120 @@ Rect getValidDisparityROI( Rect roi1, Rect roi2,
}
namespace
{
template <typename T>
void filterSpecklesImpl(cv::Mat& img, int newVal, int maxSpeckleSize, int maxDiff, cv::Mat& _buf)
{
using namespace cv;
int width = img.cols, height = img.rows, npixels = width*height;
size_t bufSize = npixels*(int)(sizeof(Point2s) + sizeof(int) + sizeof(uchar));
if( !_buf.isContinuous() || !_buf.data || _buf.cols*_buf.rows*_buf.elemSize() < bufSize )
_buf.create(1, (int)bufSize, CV_8U);
uchar* buf = _buf.data;
int i, j, dstep = (int)(img.step/sizeof(T));
int* labels = (int*)buf;
buf += npixels*sizeof(labels[0]);
Point2s* wbuf = (Point2s*)buf;
buf += npixels*sizeof(wbuf[0]);
uchar* rtype = (uchar*)buf;
int curlabel = 0;
// clear out label assignments
memset(labels, 0, npixels*sizeof(labels[0]));
for( i = 0; i < height; i++ )
{
T* ds = img.ptr<T>(i);
int* ls = labels + width*i;
for( j = 0; j < width; j++ )
{
if( ds[j] != newVal ) // not a bad disparity
{
if( ls[j] ) // has a label, check for bad label
{
if( rtype[ls[j]] ) // small region, zero out disparity
ds[j] = (T)newVal;
}
// no label, assign and propagate
else
{
Point2s* ws = wbuf; // initialize wavefront
Point2s p((short)j, (short)i); // current pixel
curlabel++; // next label
int count = 0; // current region size
ls[j] = curlabel;
// wavefront propagation
while( ws >= wbuf ) // wavefront not empty
{
count++;
// put neighbors onto wavefront
T* dpp = &img.at<T>(p.y, p.x);
T dp = *dpp;
int* lpp = labels + width*p.y + p.x;
if( p.x < width-1 && !lpp[+1] && dpp[+1] != newVal && std::abs(dp - dpp[+1]) <= maxDiff )
{
lpp[+1] = curlabel;
*ws++ = Point2s(p.x+1, p.y);
}
if( p.x > 0 && !lpp[-1] && dpp[-1] != newVal && std::abs(dp - dpp[-1]) <= maxDiff )
{
lpp[-1] = curlabel;
*ws++ = Point2s(p.x-1, p.y);
}
if( p.y < height-1 && !lpp[+width] && dpp[+dstep] != newVal && std::abs(dp - dpp[+dstep]) <= maxDiff )
{
lpp[+width] = curlabel;
*ws++ = Point2s(p.x, p.y+1);
}
if( p.y > 0 && !lpp[-width] && dpp[-dstep] != newVal && std::abs(dp - dpp[-dstep]) <= maxDiff )
{
lpp[-width] = curlabel;
*ws++ = Point2s(p.x, p.y-1);
}
// pop most recent and propagate
// NB: could try least recent, maybe better convergence
p = *--ws;
}
// assign label type
if( count <= maxSpeckleSize ) // speckle region
{
rtype[ls[j]] = 1; // small region label
ds[j] = (T)newVal;
}
else
rtype[ls[j]] = 0; // large region label
}
}
}
}
}
}
void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSize,
double _maxDiff, InputOutputArray __buf )
{
Mat img = _img.getMat();
Mat temp, &_buf = __buf.needed() ? __buf.getMatRef() : temp;
CV_Assert( img.type() == CV_16SC1 );
CV_Assert( img.type() == CV_8UC1 || img.type() == CV_16SC1 );
int newVal = cvRound(_newval);
int maxDiff = cvRound(_maxDiff);
int width = img.cols, height = img.rows, npixels = width*height;
size_t bufSize = npixels*(int)(sizeof(Point2s) + sizeof(int) + sizeof(uchar));
if( !_buf.isContinuous() || !_buf.data || _buf.cols*_buf.rows*_buf.elemSize() < bufSize )
_buf.create(1, (int)bufSize, CV_8U);
uchar* buf = _buf.data;
int i, j, dstep = (int)(img.step/sizeof(short));
int* labels = (int*)buf;
buf += npixels*sizeof(labels[0]);
Point2s* wbuf = (Point2s*)buf;
buf += npixels*sizeof(wbuf[0]);
uchar* rtype = (uchar*)buf;
int curlabel = 0;
// clear out label assignments
memset(labels, 0, npixels*sizeof(labels[0]));
for( i = 0; i < height; i++ )
{
short* ds = img.ptr<short>(i);
int* ls = labels + width*i;
for( j = 0; j < width; j++ )
{
if( ds[j] != newVal ) // not a bad disparity
{
if( ls[j] ) // has a label, check for bad label
{
if( rtype[ls[j]] ) // small region, zero out disparity
ds[j] = (short)newVal;
}
// no label, assign and propagate
else
{
Point2s* ws = wbuf; // initialize wavefront
Point2s p((short)j, (short)i); // current pixel
curlabel++; // next label
int count = 0; // current region size
ls[j] = curlabel;
// wavefront propagation
while( ws >= wbuf ) // wavefront not empty
{
count++;
// put neighbors onto wavefront
short* dpp = &img.at<short>(p.y, p.x);
short dp = *dpp;
int* lpp = labels + width*p.y + p.x;
if( p.x < width-1 && !lpp[+1] && dpp[+1] != newVal && std::abs(dp - dpp[+1]) <= maxDiff )
{
lpp[+1] = curlabel;
*ws++ = Point2s(p.x+1, p.y);
}
if( p.x > 0 && !lpp[-1] && dpp[-1] != newVal && std::abs(dp - dpp[-1]) <= maxDiff )
{
lpp[-1] = curlabel;
*ws++ = Point2s(p.x-1, p.y);
}
if( p.y < height-1 && !lpp[+width] && dpp[+dstep] != newVal && std::abs(dp - dpp[+dstep]) <= maxDiff )
{
lpp[+width] = curlabel;
*ws++ = Point2s(p.x, p.y+1);
}
if( p.y > 0 && !lpp[-width] && dpp[-dstep] != newVal && std::abs(dp - dpp[-dstep]) <= maxDiff )
{
lpp[-width] = curlabel;
*ws++ = Point2s(p.x, p.y-1);
}
// pop most recent and propagate
// NB: could try least recent, maybe better convergence
p = *--ws;
}
// assign label type
if( count <= maxSpeckleSize ) // speckle region
{
rtype[ls[j]] = 1; // small region label
ds[j] = (short)newVal;
}
else
rtype[ls[j]] = 0; // large region label
}
}
}
}
if (img.type() == CV_8UC1)
filterSpecklesImpl<uchar>(img, newVal, maxSpeckleSize, maxDiff, _buf);
else
filterSpecklesImpl<short>(img, newVal, maxSpeckleSize, maxDiff, _buf);
}
void cv::validateDisparity( InputOutputArray _disp, InputArray _cost, int minDisparity,

View File

@ -1243,10 +1243,14 @@ static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
bool haveMask = !_mask.empty();
bool reallocate = false;
bool src1Scalar = checkScalar(src1, src2.type(), kind1, kind2);
bool src2Scalar = checkScalar(src2, src1.type(), kind2, kind1);
if( (kind1 == kind2 || src1.channels() == 1) && src1.dims <= 2 && src2.dims <= 2 &&
src1.size() == src2.size() && src1.type() == src2.type() &&
!haveMask && ((!_dst.fixedType() && (dtype < 0 || CV_MAT_DEPTH(dtype) == src1.depth())) ||
(_dst.fixedType() && _dst.type() == _src1.type())) )
(_dst.fixedType() && _dst.type() == _src1.type())) &&
((src1Scalar && src2Scalar) || (!src1Scalar && !src2Scalar)) )
{
_dst.create(src1.size(), src1.type());
Mat dst = _dst.getMat();

View File

@ -94,7 +94,7 @@ template<typename T1, typename T2=T1, typename T3=T1> struct OpAdd
typedef T1 type1;
typedef T2 type2;
typedef T3 rtype;
T3 operator ()(T1 a, T2 b) const { return saturate_cast<T3>(a + b); }
T3 operator ()(const T1 a, const T2 b) const { return saturate_cast<T3>(a + b); }
};
template<typename T1, typename T2=T1, typename T3=T1> struct OpSub
@ -102,7 +102,7 @@ template<typename T1, typename T2=T1, typename T3=T1> struct OpSub
typedef T1 type1;
typedef T2 type2;
typedef T3 rtype;
T3 operator ()(T1 a, T2 b) const { return saturate_cast<T3>(a - b); }
T3 operator ()(const T1 a, const T2 b) const { return saturate_cast<T3>(a - b); }
};
template<typename T1, typename T2=T1, typename T3=T1> struct OpRSub
@ -110,7 +110,7 @@ template<typename T1, typename T2=T1, typename T3=T1> struct OpRSub
typedef T1 type1;
typedef T2 type2;
typedef T3 rtype;
T3 operator ()(T1 a, T2 b) const { return saturate_cast<T3>(b - a); }
T3 operator ()(const T1 a, const T2 b) const { return saturate_cast<T3>(b - a); }
};
template<typename T> struct OpMin
@ -118,7 +118,7 @@ template<typename T> struct OpMin
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(T a, T b) const { return std::min(a, b); }
T operator ()(const T a, const T b) const { return std::min(a, b); }
};
template<typename T> struct OpMax
@ -126,7 +126,7 @@ template<typename T> struct OpMax
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(T a, T b) const { return std::max(a, b); }
T operator ()(const T a, const T b) const { return std::max(a, b); }
};
inline Size getContinuousSize( const Mat& m1, int widthScale=1 )

View File

@ -1530,4 +1530,24 @@ TEST(Multiply, FloatingPointRounding)
cv::multiply(src, s, dst, 1, CV_16U);
// with CV_32F this produce result 16202
ASSERT_EQ(dst.at<ushort>(0,0), 16201);
}
}
TEST(Core_Add, AddToColumnWhen3Rows)
{
cv::Mat m1 = (cv::Mat_<double>(3, 2) << 1, 2, 3, 4, 5, 6);
m1.col(1) += 10;
cv::Mat m2 = (cv::Mat_<double>(3, 2) << 1, 12, 3, 14, 5, 16);
ASSERT_EQ(0, countNonZero(m1 - m2));
}
TEST(Core_Add, AddToColumnWhen4Rows)
{
cv::Mat m1 = (cv::Mat_<double>(4, 2) << 1, 2, 3, 4, 5, 6, 7, 8);
m1.col(1) += 10;
cv::Mat m2 = (cv::Mat_<double>(4, 2) << 1, 12, 3, 14, 5, 16, 7, 18);
ASSERT_EQ(0, countNonZero(m1 - m2));
}

View File

@ -182,7 +182,7 @@ Enables the :ocv:class:`gpu::StereoBeliefPropagation` constructors.
.. math::
DataCost = data \_ weight \cdot \min ( \lvert I_2-I_1 \rvert , max \_ data \_ term)
DataCost = data \_ weight \cdot \min ( \lvert Img_Left(x,y)-Img_Right(x-d,y) \rvert , max \_ data \_ term)
.. math::

View File

@ -57,32 +57,11 @@ static CvCreateVideoWriter_Plugin icvCreateVideoWriter_FFMPEG_p = 0;
static CvReleaseVideoWriter_Plugin icvReleaseVideoWriter_FFMPEG_p = 0;
static CvWriteFrame_Plugin icvWriteFrame_FFMPEG_p = 0;
static cv::Mutex _icvInitFFMPEG_mutex;
class icvInitFFMPEG
static void
icvInitFFMPEG(void)
{
public:
static void Init()
{
cv::AutoLock al(_icvInitFFMPEG_mutex);
static icvInitFFMPEG init;
}
private:
#if defined WIN32 || defined _WIN32
HMODULE icvFFOpenCV;
~icvInitFFMPEG()
{
if (icvFFOpenCV)
{
FreeLibrary(icvFFOpenCV);
icvFFOpenCV = 0;
}
}
#endif
icvInitFFMPEG()
static int ffmpegInitialized = 0;
if( !ffmpegInitialized )
{
#if defined WIN32 || defined _WIN32
const char* module_name = "opencv_ffmpeg"
@ -92,7 +71,7 @@ private:
#endif
".dll";
icvFFOpenCV = LoadLibrary( module_name );
static HMODULE icvFFOpenCV = LoadLibrary( module_name );
if( icvFFOpenCV )
{
icvCreateFileCapture_FFMPEG_p =
@ -138,8 +117,10 @@ private:
icvReleaseVideoWriter_FFMPEG_p = (CvReleaseVideoWriter_Plugin)cvReleaseVideoWriter_FFMPEG;
icvWriteFrame_FFMPEG_p = (CvWriteFrame_Plugin)cvWriteFrame_FFMPEG;
#endif
ffmpegInitialized = 1;
}
};
}
class CvCapture_FFMPEG_proxy : public CvCapture
@ -174,9 +155,9 @@ public:
}
virtual bool open( const char* filename )
{
icvInitFFMPEG::Init();
close();
icvInitFFMPEG();
if( !icvCreateFileCapture_FFMPEG_p )
return false;
ffmpegCapture = icvCreateFileCapture_FFMPEG_p( filename );
@ -209,6 +190,7 @@ CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char * filename)
#endif
}
class CvVideoWriter_FFMPEG_proxy : public CvVideoWriter
{
public:
@ -226,8 +208,8 @@ public:
}
virtual bool open( const char* filename, int fourcc, double fps, CvSize frameSize, bool isColor )
{
icvInitFFMPEG::Init();
close();
icvInitFFMPEG();
if( !icvCreateVideoWriter_FFMPEG_p )
return false;
ffmpegWriter = icvCreateVideoWriter_FFMPEG_p( filename, fourcc, fps, frameSize.width, frameSize.height, isColor );

View File

@ -328,179 +328,28 @@ void CvCapture_FFMPEG::close()
#define AVSEEK_FLAG_ANY 1
#endif
class ImplMutex
static void icvInitFFMPEG_internal()
{
public:
ImplMutex();
~ImplMutex();
void lock();
bool trylock();
void unlock();
struct Impl;
protected:
Impl* impl;
private:
ImplMutex(const ImplMutex&);
ImplMutex& operator = (const ImplMutex& m);
};
#if defined WIN32 || defined _WIN32 || defined WINCE
struct ImplMutex::Impl
{
Impl() { InitializeCriticalSection(&cs); refcount = 1; }
~Impl() { DeleteCriticalSection(&cs); }
void lock() { EnterCriticalSection(&cs); }
bool trylock() { return TryEnterCriticalSection(&cs) != 0; }
void unlock() { LeaveCriticalSection(&cs); }
CRITICAL_SECTION cs;
int refcount;
};
#ifndef __GNUC__
static int _interlockedExchangeAdd(int* addr, int delta)
{
#if defined _MSC_VER && _MSC_VER >= 1500
return (int)_InterlockedExchangeAdd((long volatile*)addr, delta);
#else
return (int)InterlockedExchangeAdd((long volatile*)addr, delta);
#endif
}
#endif // __GNUC__
#elif defined __APPLE__
#include <libkern/OSAtomic.h>
struct ImplMutex::Impl
{
Impl() { sl = OS_SPINLOCK_INIT; refcount = 1; }
~Impl() {}
void lock() { OSSpinLockLock(&sl); }
bool trylock() { return OSSpinLockTry(&sl); }
void unlock() { OSSpinLockUnlock(&sl); }
OSSpinLock sl;
int refcount;
};
#elif defined __linux__ && !defined ANDROID
struct ImplMutex::Impl
{
Impl() { pthread_spin_init(&sl, 0); refcount = 1; }
~Impl() { pthread_spin_destroy(&sl); }
void lock() { pthread_spin_lock(&sl); }
bool trylock() { return pthread_spin_trylock(&sl) == 0; }
void unlock() { pthread_spin_unlock(&sl); }
pthread_spinlock_t sl;
int refcount;
};
#else
struct ImplMutex::Impl
{
Impl() { pthread_mutex_init(&sl, 0); refcount = 1; }
~Impl() { pthread_mutex_destroy(&sl); }
void lock() { pthread_mutex_lock(&sl); }
bool trylock() { return pthread_mutex_trylock(&sl) == 0; }
void unlock() { pthread_mutex_unlock(&sl); }
pthread_mutex_t sl;
int refcount;
};
#endif
ImplMutex::ImplMutex()
{
impl = new ImplMutex::Impl;
}
ImplMutex::~ImplMutex()
{
delete impl;
impl = 0;
}
void ImplMutex::lock() { impl->lock(); }
void ImplMutex::unlock() { impl->unlock(); }
bool ImplMutex::trylock() { return impl->trylock(); }
static int LockCallBack(void **mutex, AVLockOp op)
{
switch (op)
static volatile bool initialized = false;
if( !initialized )
{
case AV_LOCK_CREATE:
*mutex = reinterpret_cast<void*>(new ImplMutex());
if (!*mutex)
return 1;
break;
case AV_LOCK_OBTAIN:
reinterpret_cast<ImplMutex*>(*mutex)->lock();
break;
case AV_LOCK_RELEASE:
reinterpret_cast<ImplMutex*>(*mutex)->unlock();
break;
case AV_LOCK_DESTROY:
ImplMutex* cv_mutex = reinterpret_cast<ImplMutex*>(*mutex);
delete cv_mutex;
cv_mutex = NULL;
break;
}
return 0;
}
static ImplMutex _InternalFFMpegRegister_mutex;
class InternalFFMpegRegister
{
public:
static void Register()
{
_InternalFFMpegRegister_mutex.lock();
static InternalFFMpegRegister init;
_InternalFFMpegRegister_mutex.unlock();
}
~InternalFFMpegRegister()
{
av_lockmgr_register(NULL);
}
private:
InternalFFMpegRegister()
{
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 13, 0)
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 13, 0)
avformat_network_init();
#endif
#endif
/* register all codecs, demux and protocols */
av_register_all();
/* register a callback function for synchronization */
av_lockmgr_register(&LockCallBack);
av_log_set_level(AV_LOG_ERROR);
initialized = true;
}
};
}
bool CvCapture_FFMPEG::open( const char* _filename )
{
InternalFFMpegRegister::Register();
icvInitFFMPEG_internal();
unsigned i;
bool valid = false;
@ -512,8 +361,7 @@ bool CvCapture_FFMPEG::open( const char* _filename )
int err = av_open_input_file(&ic, _filename, NULL, 0, NULL);
#endif
if (err < 0)
{
if (err < 0) {
CV_WARN("Error opening file");
goto exit_func;
}
@ -523,8 +371,7 @@ bool CvCapture_FFMPEG::open( const char* _filename )
#else
av_find_stream_info(ic);
#endif
if (err < 0)
{
if (err < 0) {
CV_WARN("Could not find codec parameters");
goto exit_func;
}
@ -546,8 +393,7 @@ bool CvCapture_FFMPEG::open( const char* _filename )
#define AVMEDIA_TYPE_VIDEO CODEC_TYPE_VIDEO
#endif
if( AVMEDIA_TYPE_VIDEO == enc->codec_type && video_stream < 0)
{
if( AVMEDIA_TYPE_VIDEO == enc->codec_type && video_stream < 0) {
AVCodec *codec = avcodec_find_decoder(enc->codec_id);
if (!codec ||
#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
@ -555,8 +401,7 @@ bool CvCapture_FFMPEG::open( const char* _filename )
#else
avcodec_open(enc, codec)
#endif
< 0)
goto exit_func;
< 0) goto exit_func;
video_stream = i;
video_st = ic->streams[i];
@ -1430,7 +1275,7 @@ void CvVideoWriter_FFMPEG::close()
bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
double fps, int width, int height, bool is_color )
{
InternalFFMpegRegister::Register();
icvInitFFMPEG_internal();
CodecID codec_id = CODEC_ID_NONE;
int err, codec_pix_fmt;
@ -1650,7 +1495,6 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
frame_width = width;
frame_height = height;
ok = true;
return true;
}
@ -1662,7 +1506,6 @@ CvCapture_FFMPEG* cvCreateFileCapture_FFMPEG( const char* filename )
capture->init();
if( capture->open( filename ))
return capture;
capture->close();
free(capture);
return 0;
@ -1711,6 +1554,7 @@ CvVideoWriter_FFMPEG* cvCreateVideoWriter_FFMPEG( const char* filename, int four
return 0;
}
void cvReleaseVideoWriter_FFMPEG( CvVideoWriter_FFMPEG** writer )
{
if( writer && *writer )
@ -1897,12 +1741,15 @@ AVStream* OutputMediaStream_FFMPEG::addVideoStream(AVFormatContext *oc, CodecID
bool OutputMediaStream_FFMPEG::open(const char* fileName, int width, int height, double fps)
{
InternalFFMpegRegister::Register();
fmt_ = 0;
oc_ = 0;
video_st_ = 0;
// tell FFMPEG to register codecs
av_register_all();
av_log_set_level(AV_LOG_ERROR);
// auto detect the output format from the name and fourcc code
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
fmt_ = av_guess_format(NULL, fileName, NULL);
@ -2073,8 +1920,6 @@ private:
bool InputMediaStream_FFMPEG::open(const char* fileName, int* codec, int* chroma_format, int* width, int* height)
{
InternalFFMpegRegister::Register();
int err;
ctx_ = 0;
@ -2085,6 +1930,11 @@ bool InputMediaStream_FFMPEG::open(const char* fileName, int* codec, int* chroma
avformat_network_init();
#endif
// register all codecs, demux and protocols
av_register_all();
av_log_set_level(AV_LOG_ERROR);
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 6, 0)
err = avformat_open_input(&ctx_, fileName, 0, 0);
#else
@ -2204,7 +2054,7 @@ bool InputMediaStream_FFMPEG::read(unsigned char** data, int* size, int* endOfFi
if (ret < 0)
{
if (ret == (int)AVERROR_EOF)
if (ret == AVERROR_EOF)
*endOfFile = true;
return false;
}

View File

@ -241,7 +241,9 @@ public:
virtual void operator() (const Range& range) const
{
CV_Assert((range.start + 1) == range.end);
if((range.start + 1) != range.end)
return;
VideoWriter* writer = writers->operator[](range.start);
CV_Assert(writer != NULL);
CV_Assert(writer->isOpened());
@ -303,7 +305,9 @@ public:
virtual void operator() (const Range& range) const
{
CV_Assert(range.start + 1 == range.end);
if((range.start + 1) != range.end)
return;
VideoCapture* capture = readers->operator[](range.start);
CV_Assert(capture != NULL);
CV_Assert(capture->isOpened());
@ -355,7 +359,7 @@ private:
bool ReadImageAndTest::next;
TEST(Highgui_Video_parallel_writers_and_readers, accuracy)
TEST(Highgui_Video_parallel_writers_and_readers, DISABLED_accuracy)
{
const unsigned int threadsCount = 4;
cvtest::TS* ts = cvtest::TS::ptr();

View File

@ -0,0 +1,81 @@
#include "perf_precomp.hpp"
using namespace std;
using namespace cv;
using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
CV_ENUM(MethodType, CV_TM_SQDIFF, CV_TM_SQDIFF_NORMED, CV_TM_CCORR, CV_TM_CCORR_NORMED, CV_TM_CCOEFF, CV_TM_CCOEFF_NORMED)
typedef std::tr1::tuple<Size, Size, MethodType> ImgSize_TmplSize_Method_t;
typedef perf::TestBaseWithParam<ImgSize_TmplSize_Method_t> ImgSize_TmplSize_Method;
PERF_TEST_P(ImgSize_TmplSize_Method, matchTemplateSmall,
testing::Combine(
testing::Values(szSmall128, cv::Size(320, 240),
cv::Size(640, 480), cv::Size(800, 600),
cv::Size(1024, 768), cv::Size(1280, 1024)),
testing::Values(cv::Size(12, 12), cv::Size(28, 9),
cv::Size(8, 30), cv::Size(16, 16)),
testing::ValuesIn(MethodType::all())
)
)
{
Size imgSz = get<0>(GetParam());
Size tmplSz = get<1>(GetParam());
int method = get<2>(GetParam());
Mat img(imgSz, CV_8UC1);
Mat tmpl(tmplSz, CV_8UC1);
Mat result(imgSz - tmplSz + Size(1,1), CV_32F);
declare
.in(img, WARMUP_RNG)
.in(tmpl, WARMUP_RNG)
.out(result);
TEST_CYCLE() matchTemplate(img, tmpl, result, method);
bool isNormed =
method == CV_TM_CCORR_NORMED ||
method == CV_TM_SQDIFF_NORMED ||
method == CV_TM_CCOEFF_NORMED;
double eps = isNormed ? 1e-6
: 255 * 255 * tmpl.total() * 1e-6;
SANITY_CHECK(result, eps);
}
PERF_TEST_P(ImgSize_TmplSize_Method, matchTemplateBig,
testing::Combine(
testing::Values(cv::Size(1280, 1024)),
testing::Values(cv::Size(1260, 1000), cv::Size(1261, 1013)),
testing::ValuesIn(MethodType::all())
)
)
{
Size imgSz = get<0>(GetParam());
Size tmplSz = get<1>(GetParam());
int method = get<2>(GetParam());
Mat img(imgSz, CV_8UC1);
Mat tmpl(tmplSz, CV_8UC1);
Mat result(imgSz - tmplSz + Size(1,1), CV_32F);
declare
.in(img, WARMUP_RNG)
.in(tmpl, WARMUP_RNG)
.out(result);
TEST_CYCLE() matchTemplate(img, tmpl, result, method);
bool isNormed =
method == CV_TM_CCORR_NORMED ||
method == CV_TM_SQDIFF_NORMED ||
method == CV_TM_CCOEFF_NORMED;
double eps = isNormed ? 1e-6
: 255 * 255 * tmpl.total() * 1e-6;
SANITY_CHECK(result, eps);
}

View File

@ -56,7 +56,7 @@ template<typename T> struct MinOp
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(T a, T b) const { return std::min(a, b); }
T operator ()(const T a, const T b) const { return std::min(a, b); }
};
template<typename T> struct MaxOp
@ -64,7 +64,7 @@ template<typename T> struct MaxOp
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(T a, T b) const { return std::max(a, b); }
T operator ()(const T a, const T b) const { return std::max(a, b); }
};
#undef CV_MIN_8U
@ -72,8 +72,8 @@ template<typename T> struct MaxOp
#define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b)))
#define CV_MAX_8U(a,b) ((a) + CV_FAST_CAST_8U((b) - (a)))
template<> inline uchar MinOp<uchar>::operator ()(uchar a, uchar b) const { return CV_MIN_8U(a, b); }
template<> inline uchar MaxOp<uchar>::operator ()(uchar a, uchar b) const { return CV_MAX_8U(a, b); }
template<> inline uchar MinOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MIN_8U(a, b); }
template<> inline uchar MaxOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MAX_8U(a, b); }
struct MorphRowNoVec
{

View File

@ -252,6 +252,11 @@ void cv::matchTemplate( InputArray _img, InputArray _templ, OutputArray _result,
_result.create(corrSize, CV_32F);
Mat result = _result.getMat();
#ifdef HAVE_TEGRA_OPTIMIZATION
if (tegra::matchTemplate(img, templ, result, method))
return;
#endif
int cn = img.channels();
crossCorr( img, templ, result, result.size(), result.type(), Point(0,0), 0, 0);

View File

@ -47,11 +47,15 @@ CV_IMPL CvSeq* cvPointSeqFromMat( int seq_kind, const CvArr* arr,
CV_Assert( arr != 0 && contour_header != 0 && block != 0 );
int eltype;
CvMat hdr;
CvMat* mat = (CvMat*)arr;
if( !CV_IS_MAT( mat ))
CV_Error( CV_StsBadArg, "Input array is not a valid matrix" );
if( CV_MAT_CN(mat->type) == 1 && mat->width == 2 )
mat = cvReshape(mat, &hdr, 2);
eltype = CV_MAT_TYPE( mat->type );
if( eltype != CV_32SC2 && eltype != CV_32FC2 )
CV_Error( CV_StsUnsupportedFormat,

View File

@ -166,11 +166,7 @@ if(ANDROID)
set(android_copied_files "")
set(android_step3_input_files "")
foreach(file ${handwrittren_lib_project_files_rel})
add_custom_command(OUTPUT "${OpenCV_BINARY_DIR}/${file}"
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/android_lib/${file}" "${OpenCV_BINARY_DIR}/${file}"
MAIN_DEPENDENCY "${CMAKE_CURRENT_SOURCE_DIR}/android_lib/${file}"
COMMENT "Generating ${file}"
)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/android_lib/${file}" "${OpenCV_BINARY_DIR}/${file}" @ONLY)
list(APPEND android_copied_files "${OpenCV_BINARY_DIR}/${file}")
list(APPEND android_step3_input_files "${CMAKE_CURRENT_SOURCE_DIR}/android_lib/${file}")
@ -198,7 +194,7 @@ if(ANDROID AND ANDROID_EXECUTABLE)
ocv_list_add_prefix(lib_target_files "${OpenCV_BINARY_DIR}/")
android_get_compatible_target(lib_target_sdk_target ${ANDROID_NATIVE_API_LEVEL} ${ANDROID_SDK_TARGET} 11)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/android_lib/${ANDROID_MANIFEST_FILE}" "${CMAKE_CURRENT_BINARY_DIR}/${ANDROID_MANIFEST_FILE}")
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/android_lib/${ANDROID_MANIFEST_FILE}" "${CMAKE_CURRENT_BINARY_DIR}/${ANDROID_MANIFEST_FILE}" @ONLY)
add_custom_command(OUTPUT ${lib_target_files} "${OpenCV_BINARY_DIR}/${ANDROID_MANIFEST_FILE}"
COMMAND ${CMAKE_COMMAND} -E remove ${lib_target_files}
@ -257,6 +253,13 @@ else(ANDROID)
DEPENDS ${step3_depends}
COMMENT "Generating ${JAR_NAME}"
)
if(WIN32)
set(JAR_INSTALL_DIR java)
else(WIN32)
set(JAR_INSTALL_DIR share/OpenCV/java)
endif(WIN32)
install(FILES ${JAR_FILE} DESTINATION ${JAR_INSTALL_DIR} COMPONENT main)
endif(ANDROID)
# step 5: build native part
@ -291,7 +294,8 @@ endif()
# Additional target properties
set_target_properties(${the_module} PROPERTIES
OUTPUT_NAME "${the_module}"
OUTPUT_NAME "${the_module}${OPENCV_DLLVERSION}"
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
ARCHIVE_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_PATH}
RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH}
INSTALL_NAME_DIR ${OPENCV_LIB_INSTALL_PATH}
@ -308,9 +312,15 @@ if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${the_module} PROPERTIES FOLDER "bindings")
endif()
install(TARGETS ${the_module}
LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT main
ARCHIVE DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT main)
if(ANDROID)
install(TARGETS ${the_module}
LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT main
ARCHIVE DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT main)
else()
install(TARGETS ${the_module}
RUNTIME DESTINATION ${JAR_INSTALL_DIR} COMPONENT main
LIBRARY DESTINATION ${JAR_INSTALL_DIR} COMPONENT main)
endif()
######################################################################################################################################

View File

@ -1,2 +1,6 @@
# fixing project properties
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
org.eclipse.jdt.core.compiler.compliance=1.6
org.eclipse.jdt.core.compiler.source=1.6
org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=ignore

View File

@ -36,7 +36,7 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
private int mState = STOPPED;
private Bitmap mCacheBitmap;
private CvCameraViewListener mListener;
private CvCameraViewListener2 mListener;
private boolean mSurfaceExist;
private Object mSyncObject = new Object();
@ -92,9 +92,75 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
* TODO: pass the parameters specifying the format of the frame (BPP, YUV or RGB and etc)
*/
public Mat onCameraFrame(Mat inputFrame);
}
public interface CvCameraViewListener2 {
/**
* This method is invoked when camera preview has started. After this method is invoked
* the frames will start to be delivered to client via the onCameraFrame() callback.
* @param width - the width of the frames that will be delivered
* @param height - the height of the frames that will be delivered
*/
public void onCameraViewStarted(int width, int height);
/**
* This method is invoked when camera preview has been stopped for some reason.
* No frames will be delivered via onCameraFrame() callback after this method is called.
*/
public void onCameraViewStopped();
/**
* This method is invoked when delivery of the frame needs to be done.
* The returned values - is a modified frame which needs to be displayed on the screen.
* TODO: pass the parameters specifying the format of the frame (BPP, YUV or RGB and etc)
*/
public Mat onCameraFrame(CvCameraViewFrame inputFrame);
};
protected class CvCameraViewListenerAdapter implements CvCameraViewListener2 {
public CvCameraViewListenerAdapter(CvCameraViewListener oldStypeListener) {
mOldStyleListener = oldStypeListener;
}
public void onCameraViewStarted(int width, int height) {
mOldStyleListener.onCameraViewStarted(width, height);
}
public void onCameraViewStopped() {
mOldStyleListener.onCameraViewStopped();
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
Mat result = null;
switch (mPreviewFormat) {
case Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA:
result = mOldStyleListener.onCameraFrame(inputFrame.rgba());
break;
case Highgui.CV_CAP_ANDROID_GREY_FRAME:
result = mOldStyleListener.onCameraFrame(inputFrame.gray());
break;
default:
Log.e(TAG, "Invalid frame format! Only RGBA and Gray Scale are supported!");
};
return result;
}
public void setFrameFormat(int format) {
mPreviewFormat = format;
}
private CvCameraViewListenerAdapter() {}
private int mPreviewFormat = Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA;
private CvCameraViewListener mOldStyleListener;
};
public interface CvCameraViewFrame {
public abstract Mat rgba();
public abstract Mat gray();
};
public void surfaceChanged(SurfaceHolder arg0, int arg1, int arg2, int arg3) {
Log.d(TAG, "call surfaceChanged event");
synchronized(mSyncObject) {
@ -165,10 +231,16 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
* @param listener
*/
public void setCvCameraViewListener(CvCameraViewListener listener) {
public void setCvCameraViewListener(CvCameraViewListener2 listener) {
mListener = listener;
}
public void setCvCameraViewListener(CvCameraViewListener listener) {
CvCameraViewListenerAdapter adapter = new CvCameraViewListenerAdapter(listener);
adapter.setFrameFormat(mPreviewFormat);
mListener = adapter;
}
/**
* This method sets the maximum size that camera frame is allowed to be. When selecting
* size - the biggest size which less or equal the size set will be selected.
@ -186,6 +258,10 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
public void SetCaptureFormat(int format)
{
mPreviewFormat = format;
if (mListener instanceof CvCameraViewListenerAdapter) {
CvCameraViewListenerAdapter adapter = (CvCameraViewListenerAdapter) mListener;
adapter.setFrameFormat(mPreviewFormat);
}
}
/**
@ -276,13 +352,13 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
* then displayed on the screen.
* @param frame - the current frame to be delivered
*/
protected void deliverAndDrawFrame(Mat frame) {
protected void deliverAndDrawFrame(CvCameraViewFrame frame) {
Mat modified;
if (mListener != null) {
modified = mListener.onCameraFrame(frame);
} else {
modified = frame;
modified = frame.rgba();
}
boolean bmpValid = true;

View File

@ -16,7 +16,6 @@ import android.view.SurfaceHolder;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Size;
import org.opencv.highgui.Highgui;
import org.opencv.imgproc.Imgproc;
/**
@ -33,7 +32,6 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
private static final int MAGIC_TEXTURE_ID = 10;
private static final String TAG = "JavaCameraView";
private Mat mBaseMat;
private byte mBuffer[];
private Mat[] mFrameChain;
private int mChainIdx = 0;
@ -41,7 +39,7 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
private boolean mStopThread;
protected Camera mCamera;
protected JavaCameraFrame mCameraFrame;
private SurfaceTexture mSurfaceTexture;
public static class JavaCameraSizeAccessor implements ListItemAccessor {
@ -146,14 +144,14 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
mCamera.addCallbackBuffer(mBuffer);
mCamera.setPreviewCallbackWithBuffer(this);
mBaseMat = new Mat(mFrameHeight + (mFrameHeight/2), mFrameWidth, CvType.CV_8UC1);
mFrameChain = new Mat[2];
mFrameChain[0] = new Mat();
mFrameChain[1] = new Mat();
mFrameChain[0] = new Mat(mFrameHeight + (mFrameHeight/2), mFrameWidth, CvType.CV_8UC1);
mFrameChain[1] = new Mat(mFrameHeight + (mFrameHeight/2), mFrameWidth, CvType.CV_8UC1);
AllocateCache();
mCameraFrame = new JavaCameraFrame(mFrameChain[mChainIdx], mFrameWidth, mFrameHeight);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) {
mSurfaceTexture = new SurfaceTexture(MAGIC_TEXTURE_ID);
getHolder().setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
@ -183,12 +181,12 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
mCamera.release();
}
mCamera = null;
if (mBaseMat != null)
mBaseMat.release();
if (mFrameChain != null) {
mFrameChain[0].release();
mFrameChain[1].release();
}
if (mCameraFrame != null)
mCameraFrame.release();
}
}
@ -200,7 +198,7 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
*/
/* First step - initialize camera connection */
Log.d(TAG, "Connecting to camera");
if (!initializeCamera(getWidth(), getHeight()))
if (!initializeCamera(width, height))
return false;
/* now we can start update thread */
@ -242,13 +240,45 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
Log.i(TAG, "Frame size is " + frame.length);
synchronized (this)
{
mBaseMat.put(0, 0, frame);
mFrameChain[1 - mChainIdx].put(0, 0, frame);
this.notify();
}
if (mCamera != null)
mCamera.addCallbackBuffer(mBuffer);
}
private class JavaCameraFrame implements CvCameraViewFrame
{
public Mat gray() {
return mYuvFrameData.submat(0, mHeight, 0, mWidth);
}
public Mat rgba() {
Imgproc.cvtColor(mYuvFrameData, mRgba, Imgproc.COLOR_YUV2BGR_NV12, 4);
return mRgba;
}
public JavaCameraFrame(Mat Yuv420sp, int width, int height) {
super();
mWidth = width;
mHeight = height;
mYuvFrameData = Yuv420sp;
mRgba = new Mat();
}
public void release() {
mRgba.release();
}
private JavaCameraFrame(CvCameraViewFrame obj) {
}
private Mat mYuvFrameData;
private Mat mRgba;
private int mWidth;
private int mHeight;
};
private class CameraWorker implements Runnable {
public void run() {
@ -263,18 +293,8 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
}
if (!mStopThread) {
switch (mPreviewFormat) {
case Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA:
Imgproc.cvtColor(mBaseMat, mFrameChain[mChainIdx], Imgproc.COLOR_YUV2RGBA_NV21, 4);
break;
case Highgui.CV_CAP_ANDROID_GREY_FRAME:
mFrameChain[mChainIdx] = mBaseMat.submat(0, mFrameHeight, 0, mFrameWidth);
break;
default:
Log.e(TAG, "Invalid frame format! Only RGBA and Gray Scale are supported!");
};
if (!mFrameChain[mChainIdx].empty())
deliverAndDrawFrame(mFrameChain[mChainIdx]);
deliverAndDrawFrame(mCameraFrame);
mChainIdx = 1 - mChainIdx;
}
} while (!mStopThread);

View File

@ -37,7 +37,7 @@ public class NativeCameraView extends CameraBridgeViewBase {
* 2. We need to start thread which will be getting frames
*/
/* First step - initialize camera connection */
if (!initializeCamera(getWidth(), getHeight()))
if (!initializeCamera(width, height))
return false;
/* now we can start update thread */
@ -125,6 +125,31 @@ public class NativeCameraView extends CameraBridgeViewBase {
}
}
private class NativeCameraFrame implements CvCameraViewFrame {
@Override
public Mat rgba() {
mCamera.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
return mRgba;
}
@Override
public Mat gray() {
mCamera.retrieve(mGray, Highgui.CV_CAP_ANDROID_GREY_FRAME);
return mGray;
}
public NativeCameraFrame(VideoCapture capture) {
mCapture = capture;
mGray = new Mat();
mRgba = new Mat();
}
private VideoCapture mCapture;
private Mat mRgba;
private Mat mGray;
};
private class CameraWorker implements Runnable {
private Mat mRgba = new Mat();
@ -137,22 +162,9 @@ public class NativeCameraView extends CameraBridgeViewBase {
break;
}
switch (mPreviewFormat) {
case Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA:
{
mCamera.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
deliverAndDrawFrame(mRgba);
} break;
case Highgui.CV_CAP_ANDROID_GREY_FRAME:
mCamera.retrieve(mGray, Highgui.CV_CAP_ANDROID_GREY_FRAME);
deliverAndDrawFrame(mGray);
break;
default:
Log.e(TAG, "Invalid frame format! Only RGBA and Gray Scale are supported!");
}
deliverAndDrawFrame(new NativeCameraFrame(mCamera));
} while (!mStopThread);
}
}

View File

@ -363,22 +363,20 @@ int CV_DetectorTest::validate( int detectorIdx, vector<vector<Rect> >& objects )
}
noPair += (int)count_if( map.begin(), map.end(), isZero );
totalNoPair += noPair;
if( noPair > cvRound(valRects.size()*eps.noPair)+1 )
EXPECT_LE(noPair, cvRound(valRects.size()*eps.noPair)+1)
<< "detector " << detectorNames[detectorIdx] << " has overrated count of rectangles without pair on "
<< imageFilenames[imageIdx] << " image";
if (::testing::Test::HasFailure())
break;
}
if( imageIdx < (int)imageFilenames.size() )
{
char msg[500];
sprintf( msg, "detector %s has overrated count of rectangles without pair on %s-image\n",
detectorNames[detectorIdx].c_str(), imageFilenames[imageIdx].c_str() );
ts->printf( cvtest::TS::LOG, msg );
EXPECT_LE(totalNoPair, cvRound(totalValRectCount*eps./*total*/noPair)+1)
<< "detector " << detectorNames[detectorIdx] << " has overrated count of rectangles without pair on all images set";
if (::testing::Test::HasFailure())
return cvtest::TS::FAIL_BAD_ACCURACY;
}
if ( totalNoPair > cvRound(totalValRectCount*eps./*total*/noPair)+1 )
{
ts->printf( cvtest::TS::LOG, "overrated count of rectangles without pair on all images set" );
return cvtest::TS::FAIL_BAD_ACCURACY;
}
return cvtest::TS::OK;
}

View File

@ -1,7 +1,7 @@
Data Structures
=============================
.. ocv:class:: oclMat
.. ocv:class:: ocl::oclMat
OpenCV C++ 1-D or 2-D dense array class ::

View File

@ -77,8 +77,8 @@ void cv::ocl::blendLinear(const oclMat &img1, const oclMat &img2, const oclMat &
int cols = img1.cols;
int istep = img1.step1();
int wstep = weights1.step1();
size_t globalSize[] = {cols * channels, rows, 1};
size_t localSize[] = {16, 16, 1};
size_t globalSize[] = {cols * channels / 4, rows, 1};
size_t localSize[] = {256, 1, 1};
vector< pair<size_t, const void *> > args;

View File

@ -45,29 +45,28 @@
#include <iomanip>
#include "precomp.hpp"
#ifdef HAVE_CLAMDFFT
using namespace cv;
using namespace cv::ocl;
using namespace std;
#if !defined HAVE_OPENCL
void cv::ocl::dft(const oclMat &src, oclMat &dst, int flags)
void cv::ocl::dft(const oclMat &, oclMat &, Size , int )
{
throw_nogpu();
}
#elif !defined HAVE_CLAMDFFT
void cv::ocl::dft(const oclMat &src, oclMat &dst, int flags)
void cv::ocl::dft(const oclMat&, oclMat&, Size, int)
{
CV_Error(CV_StsNotImplemented, "OpenCL DFT is not implemented");
}
#else
#include <clAmdFft.h>
#include "clAmdFft.h"
namespace cv
{
namespace ocl
{
void fft_setup();
void fft_teardown();
enum FftType
{
C2R = 1, // complex to complex
@ -76,73 +75,94 @@ namespace cv
};
struct FftPlan
{
friend void fft_setup();
friend void fft_teardown();
~FftPlan();
protected:
clAmdFftPlanHandle plHandle;
FftPlan& operator=(const FftPlan&);
public:
FftPlan(Size _dft_size, int _src_step, int _dst_step, int _flags, FftType _type);
~FftPlan();
inline clAmdFftPlanHandle getPlanHandle() { return plHandle; }
const Size dft_size;
const int src_step, dst_step;
const int flags;
const FftType type;
clAmdFftPlanHandle plHandle;
static vector<FftPlan *> planStore;
static bool started;
static clAmdFftSetupData *setupData;
};
class PlanCache
{
protected:
PlanCache();
~PlanCache();
friend class auto_ptr<PlanCache>;
static auto_ptr<PlanCache> planCache;
bool started;
vector<FftPlan *> planStore;
clAmdFftSetupData *setupData;
public:
friend void fft_setup();
friend void fft_teardown();
static PlanCache* getPlanCache()
{
if( NULL == planCache.get())
planCache.reset(new PlanCache());
return planCache.get();
}
// return a baked plan->
// if there is one matched plan, return it
// if not, bake a new one, put it into the planStore and return it.
static clAmdFftPlanHandle getPlan(Size _dft_size, int _src_step, int _dst_step, int _flags, FftType _type);
static FftPlan* getPlan(Size _dft_size, int _src_step, int _dst_step, int _flags, FftType _type);
// remove a single plan from the store
// return true if the plan is successfully removed
// else
static bool removePlan(clAmdFftPlanHandle );
};
}
}
bool cv::ocl::FftPlan::started = false;
vector<cv::ocl::FftPlan *> cv::ocl::FftPlan::planStore = vector<cv::ocl::FftPlan *>();
clAmdFftSetupData *cv::ocl::FftPlan::setupData = 0;
auto_ptr<PlanCache> PlanCache::planCache;
void cv::ocl::fft_setup()
{
if(FftPlan::started)
PlanCache& pCache = *PlanCache::getPlanCache();
if(pCache.started)
{
return;
}
FftPlan::setupData = new clAmdFftSetupData;
openCLSafeCall(clAmdFftInitSetupData( FftPlan::setupData ));
FftPlan::started = true;
pCache.setupData = new clAmdFftSetupData;
openCLSafeCall(clAmdFftInitSetupData( pCache.setupData ));
pCache.started = true;
}
void cv::ocl::fft_teardown()
{
if(!FftPlan::started)
PlanCache& pCache = *PlanCache::getPlanCache();
if(!pCache.started)
{
return;
}
delete FftPlan::setupData;
for(int i = 0; i < FftPlan::planStore.size(); i ++)
delete pCache.setupData;
for(size_t i = 0; i < pCache.planStore.size(); i ++)
{
delete FftPlan::planStore[i];
delete pCache.planStore[i];
}
FftPlan::planStore.clear();
pCache.planStore.clear();
openCLSafeCall( clAmdFftTeardown( ) );
FftPlan::started = false;
pCache.started = false;
}
// bake a new plan
cv::ocl::FftPlan::FftPlan(Size _dft_size, int _src_step, int _dst_step, int _flags, FftType _type)
: dft_size(_dft_size), src_step(_src_step), dst_step(_dst_step), flags(_flags), type(_type), plHandle(0)
: plHandle(0), dft_size(_dft_size), src_step(_src_step), dst_step(_dst_step), flags(_flags), type(_type)
{
if(!FftPlan::started)
{
// implicitly do fft setup
fft_setup();
}
fft_setup();
bool is_1d_input = (_dft_size.height == 1);
int is_row_dft = flags & DFT_ROWS;
int is_scaled_dft = flags & DFT_SCALE;
int is_inverse = flags & DFT_INVERSE;
int is_scaled_dft = flags & DFT_SCALE;
int is_inverse = flags & DFT_INVERSE;
clAmdFftResultLocation place;
//clAmdFftResultLocation place;
clAmdFftLayout inLayout;
clAmdFftLayout outLayout;
clAmdFftDim dim = is_1d_input || is_row_dft ? CLFFT_1D : CLFFT_2D;
@ -150,7 +170,7 @@ cv::ocl::FftPlan::FftPlan(Size _dft_size, int _src_step, int _dst_step, int _fla
size_t batchSize = is_row_dft ? dft_size.height : 1;
size_t clLengthsIn[ 3 ] = {1, 1, 1};
size_t clStridesIn[ 3 ] = {1, 1, 1};
size_t clLengthsOut[ 3 ] = {1, 1, 1};
//size_t clLengthsOut[ 3 ] = {1, 1, 1};
size_t clStridesOut[ 3 ] = {1, 1, 1};
clLengthsIn[0] = dft_size.width;
clLengthsIn[1] = is_row_dft ? 1 : dft_size.height;
@ -166,14 +186,12 @@ cv::ocl::FftPlan::FftPlan(Size _dft_size, int _src_step, int _dst_step, int _fla
clStridesOut[1] = clStridesIn[1];
break;
case R2C:
CV_Assert(!is_row_dft); // this is not supported yet
inLayout = CLFFT_REAL;
outLayout = CLFFT_HERMITIAN_INTERLEAVED;
clStridesIn[1] = src_step / sizeof(float);
clStridesOut[1] = dst_step / sizeof(std::complex<float>);
break;
case C2R:
CV_Assert(!is_row_dft); // this is not supported yet
inLayout = CLFFT_HERMITIAN_INTERLEAVED;
outLayout = CLFFT_REAL;
clStridesIn[1] = src_step / sizeof(std::complex<float>);
@ -197,27 +215,39 @@ cv::ocl::FftPlan::FftPlan(Size _dft_size, int _src_step, int _dst_step, int _fla
openCLSafeCall( clAmdFftSetPlanInStride ( plHandle, dim, clStridesIn ) );
openCLSafeCall( clAmdFftSetPlanOutStride ( plHandle, dim, clStridesOut ) );
openCLSafeCall( clAmdFftSetPlanDistance ( plHandle, clStridesIn[ dim ], clStridesIn[ dim ]) );
openCLSafeCall( clAmdFftSetPlanDistance ( plHandle, clStridesIn[ dim ], clStridesOut[ dim ]) );
float scale_ = is_scaled_dft ? 1.f / _dft_size.area() : 1.f;
openCLSafeCall( clAmdFftSetPlanScale ( plHandle, is_inverse ? CLFFT_BACKWARD : CLFFT_FORWARD, scale_ ) );
//ready to bake
openCLSafeCall( clAmdFftBakePlan( plHandle, 1, &(Context::getContext()->impl->clCmdQueue), NULL, NULL ) );
}
cv::ocl::FftPlan::~FftPlan()
{
for(int i = 0; i < planStore.size(); i ++)
{
if(planStore[i]->plHandle == plHandle)
{
planStore.erase(planStore.begin() + i);
}
}
openCLSafeCall( clAmdFftDestroyPlan( &plHandle ) );
}
clAmdFftPlanHandle cv::ocl::FftPlan::getPlan(Size _dft_size, int _src_step, int _dst_step, int _flags, FftType _type)
cv::ocl::PlanCache::PlanCache()
: started(false),
planStore(vector<cv::ocl::FftPlan *>()),
setupData(NULL)
{
}
cv::ocl::PlanCache::~PlanCache()
{
fft_teardown();
}
FftPlan* cv::ocl::PlanCache::getPlan(Size _dft_size, int _src_step, int _dst_step, int _flags, FftType _type)
{
PlanCache& pCache = *PlanCache::getPlanCache();
vector<FftPlan *>& pStore = pCache.planStore;
// go through search
for(int i = 0; i < planStore.size(); i ++)
for(size_t i = 0; i < pStore.size(); i ++)
{
FftPlan *plan = planStore[i];
FftPlan *plan = pStore[i];
if(
plan->dft_size.width == _dft_size.width &&
plan->dft_size.height == _dft_size.height &&
@ -225,15 +255,31 @@ clAmdFftPlanHandle cv::ocl::FftPlan::getPlan(Size _dft_size, int _src_step, int
plan->src_step == _src_step &&
plan->dst_step == _dst_step &&
plan->type == _type
)
)
{
return plan->plHandle;
return plan;
}
}
// no baked plan is found
FftPlan *newPlan = new FftPlan(_dft_size, _src_step, _dst_step, _flags, _type);
planStore.push_back(newPlan);
return newPlan->plHandle;
pStore.push_back(newPlan);
return newPlan;
}
bool cv::ocl::PlanCache::removePlan(clAmdFftPlanHandle plHandle)
{
PlanCache& pCache = *PlanCache::getPlanCache();
vector<FftPlan *>& pStore = pCache.planStore;
for(size_t i = 0; i < pStore.size(); i ++)
{
if(pStore[i]->getPlanHandle() == plHandle)
{
pStore.erase(pStore.begin() + i);
delete pStore[i];
return true;
}
}
return false;
}
void cv::ocl::dft(const oclMat &src, oclMat &dst, Size dft_size, int flags)
@ -245,19 +291,20 @@ void cv::ocl::dft(const oclMat &src, oclMat &dst, Size dft_size, int flags)
// check if the given dft size is of optimal dft size
CV_Assert(dft_size.area() == getOptimalDFTSize(dft_size.area()));
// the two flags are not compatible
CV_Assert( !((flags & DFT_SCALE) && (flags & DFT_ROWS)) );
// similar assertions with cuda module
CV_Assert(src.type() == CV_32F || src.type() == CV_32FC2);
// we don't support DFT_SCALE flag
CV_Assert(!(DFT_SCALE & flags));
bool is_1d_input = (src.rows == 1);
int is_row_dft = flags & DFT_ROWS;
int is_scaled_dft = flags & DFT_SCALE;
//bool is_1d_input = (src.rows == 1);
//int is_row_dft = flags & DFT_ROWS;
//int is_scaled_dft = flags & DFT_SCALE;
int is_inverse = flags & DFT_INVERSE;
bool is_complex_input = src.channels() == 2;
bool is_complex_output = !(flags & DFT_REAL_OUTPUT);
// We don't support real-to-real transform
CV_Assert(is_complex_input || is_complex_output);
FftType type = (FftType)(is_complex_input << 0 | is_complex_output << 1);
@ -268,12 +315,10 @@ void cv::ocl::dft(const oclMat &src, oclMat &dst, Size dft_size, int flags)
dst.create(src.rows, src.cols, CV_32FC2);
break;
case R2C:
CV_Assert(!is_row_dft); // this is not supported yet
dst.create(src.rows, src.cols / 2 + 1, CV_32FC2);
break;
case C2R:
CV_Assert(dft_size.width / 2 + 1 == src.cols && dft_size.height == src.rows);
CV_Assert(!is_row_dft); // this is not supported yet
dst.create(src.rows, dft_size.width, CV_32FC1);
break;
default:
@ -282,13 +327,14 @@ void cv::ocl::dft(const oclMat &src, oclMat &dst, Size dft_size, int flags)
throw exception();
break;
}
clAmdFftPlanHandle plHandle = FftPlan::getPlan(dft_size, src.step, dst.step, flags, type);
clAmdFftPlanHandle plHandle = PlanCache::getPlan(dft_size, src.step, dst.step, flags, type)->getPlanHandle();
//get the buffersize
size_t buffersize = 0;
openCLSafeCall( clAmdFftGetTmpBufSize(plHandle, &buffersize ) );
//allocate the intermediate buffer
// TODO, bind this with the current FftPlan
cl_mem clMedBuffer = NULL;
if (buffersize)
{
@ -297,17 +343,17 @@ void cv::ocl::dft(const oclMat &src, oclMat &dst, Size dft_size, int flags)
openCLSafeCall( medstatus );
}
openCLSafeCall( clAmdFftEnqueueTransform( plHandle,
is_inverse ? CLFFT_BACKWARD : CLFFT_FORWARD,
1,
&src.clCxt->impl->clCmdQueue,
0, NULL, NULL,
(cl_mem *)&src.data, (cl_mem *)&dst.data, clMedBuffer ) );
is_inverse ? CLFFT_BACKWARD : CLFFT_FORWARD,
1,
&src.clCxt->impl->clCmdQueue,
0, NULL, NULL,
(cl_mem *)&src.data, (cl_mem *)&dst.data, clMedBuffer ) );
openCLSafeCall( clFinish(src.clCxt->impl->clCmdQueue) );
if(clMedBuffer)
{
openCLFree(clMedBuffer);
}
//fft_teardown();
}
#endif
#endif //HAVE_CLAMDFFT

View File

@ -19,6 +19,7 @@
// Jia Haipeng, jiahaipeng95@gmail.com
// Zero Lin, Zero.Lin@amd.com
// Zhang Ying, zhangying913@gmail.com
// Yao Wang, bitwangyaoyao@gmail.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
@ -309,21 +310,22 @@ public:
namespace
{
typedef void (*GPUMorfFilter_t)(const oclMat & , oclMat & , oclMat & , Size &, const Point);
typedef void (*GPUMorfFilter_t)(const oclMat & , oclMat & , oclMat & , Size &, const Point, bool rectKernel, bool usrROI);
class MorphFilter_GPU : public BaseFilter_GPU
{
public:
MorphFilter_GPU(const Size &ksize_, const Point &anchor_, const oclMat &kernel_, GPUMorfFilter_t func_) :
BaseFilter_GPU(ksize_, anchor_, BORDER_CONSTANT), kernel(kernel_), func(func_) {}
BaseFilter_GPU(ksize_, anchor_, BORDER_CONSTANT), kernel(kernel_), func(func_), rectKernel(false) {}
virtual void operator()(const oclMat &src, oclMat &dst)
{
func(src, dst, kernel, ksize, anchor) ;
func(src, dst, kernel, ksize, anchor, rectKernel, false) ;
}
oclMat kernel;
GPUMorfFilter_t func;
bool rectKernel;
};
}
@ -332,7 +334,8 @@ public:
**Extend this if necessary later.
**Note that the kernel need to be further refined.
*/
static void GPUErode(const oclMat &src, oclMat &dst, oclMat &mat_kernel, Size &ksize, const Point anchor)
static void GPUErode(const oclMat &src, oclMat &dst, oclMat &mat_kernel,
Size &ksize, const Point anchor, bool rectKernel, bool useROI)
{
//Normalize the result by default
//float alpha = ksize.height * ksize.width;
@ -388,7 +391,11 @@ static void GPUErode(const oclMat &src, oclMat &dst, oclMat &mat_kernel, Size &k
}
char compile_option[128];
sprintf(compile_option, "-D RADIUSX=%d -D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D ERODE %s", anchor.x, anchor.y, (int)localThreads[0], (int)localThreads[1], s);
sprintf(compile_option, "-D RADIUSX=%d -D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D ERODE %s %s %s",
anchor.x, anchor.y, (int)localThreads[0], (int)localThreads[1],
rectKernel?"-D RECTKERNEL":"",
useROI?"-D USEROI":"",
s);
vector< pair<size_t, const void *> > args;
args.push_back(make_pair(sizeof(cl_mem), (void *)&src.data));
args.push_back(make_pair(sizeof(cl_mem), (void *)&dst.data));
@ -407,7 +414,8 @@ static void GPUErode(const oclMat &src, oclMat &dst, oclMat &mat_kernel, Size &k
//! data type supported: CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4
static void GPUDilate(const oclMat &src, oclMat &dst, oclMat &mat_kernel, Size &ksize, const Point anchor)
static void GPUDilate(const oclMat &src, oclMat &dst, oclMat &mat_kernel,
Size &ksize, const Point anchor, bool rectKernel, bool useROI)
{
//Normalize the result by default
//float alpha = ksize.height * ksize.width;
@ -426,12 +434,13 @@ static void GPUDilate(const oclMat &src, oclMat &dst, oclMat &mat_kernel, Size &
Context *clCxt = src.clCxt;
string kernelName;
size_t localThreads[3] = {16, 16, 1};
size_t globalThreads[3] = {(src.cols + localThreads[0]) / localThreads[0] *localThreads[0], (src.rows + localThreads[1]) / localThreads[1] *localThreads[1], 1};
size_t globalThreads[3] = {(src.cols + localThreads[0] - 1) / localThreads[0] *localThreads[0],
(src.rows + localThreads[1] - 1) / localThreads[1] *localThreads[1], 1};
if (src.type() == CV_8UC1)
{
kernelName = "morph_C1_D0";
globalThreads[0] = ((src.cols + 3) / 4 + localThreads[0]) / localThreads[0] * localThreads[0];
globalThreads[0] = ((src.cols + 3) / 4 + localThreads[0] - 1) / localThreads[0] * localThreads[0];
CV_Assert(localThreads[0]*localThreads[1] * 8 >= (localThreads[0] * 4 + ksize.width - 1) * (localThreads[1] + ksize.height - 1));
}
else
@ -463,7 +472,11 @@ static void GPUDilate(const oclMat &src, oclMat &dst, oclMat &mat_kernel, Size &
}
char compile_option[128];
sprintf(compile_option, "-D RADIUSX=%d -D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D DILATE %s", anchor.x, anchor.y, (int)localThreads[0], (int)localThreads[1], s);
sprintf(compile_option, "-D RADIUSX=%d -D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D DILATE %s %s %s",
anchor.x, anchor.y, (int)localThreads[0], (int)localThreads[1],
rectKernel?"-D RECTKERNEL":"",
useROI?"-D USEROI":"",
s);
vector< pair<size_t, const void *> > args;
args.push_back(make_pair(sizeof(cl_mem), (void *)&src.data));
args.push_back(make_pair(sizeof(cl_mem), (void *)&dst.data));
@ -495,7 +508,14 @@ Ptr<BaseFilter_GPU> cv::ocl::getMorphologyFilter_GPU(int op, int type, const Mat
normalizeKernel(kernel, gpu_krnl);
normalizeAnchor(anchor, ksize);
return Ptr<BaseFilter_GPU>(new MorphFilter_GPU(ksize, anchor, gpu_krnl, GPUMorfFilter_callers[op][CV_MAT_CN(type)]));
bool noZero = true;
for(int i = 0; i < kernel.rows * kernel.cols; ++i)
if(kernel.data[i] != 1)
noZero = false;
MorphFilter_GPU* mfgpu=new MorphFilter_GPU(ksize, anchor, gpu_krnl, GPUMorfFilter_callers[op][CV_MAT_CN(type)]);
if(noZero)
mfgpu->rectKernel = true;
return Ptr<BaseFilter_GPU>(mfgpu);
}
namespace

View File

@ -538,10 +538,12 @@ namespace cv
filename = clCxt->impl->Binpath + kernelName + "_" + clCxt->impl->devName + ".clb";
}
FILE *fp;
fp = fopen(filename.c_str(), "rb");
if(fp == NULL || clCxt->impl->Binpath.size() == 0) //we should genetate a binary file for the first time.
FILE *fp = fopen(filename.c_str(), "rb");
if(fp == NULL || clCxt->impl->Binpath.size() == 0) //we should generate a binary file for the first time.
{
if(fp != NULL)
fclose(fp);
program = clCreateProgramWithSource(
clCxt->impl->clContext, 1, source, NULL, &status);
openCLVerifyCall(status);

View File

@ -43,11 +43,11 @@
//
//M*/
__kernel void BlendLinear_C1_D0(
__global uchar *dst,
__global uchar *img1,
__global uchar *img2,
__global float *weight1,
__global float *weight2,
__global uchar4 *dst,
__global uchar4 *img1,
__global uchar4 *img2,
__global float4 *weight1,
__global float4 *weight2,
int rows,
int cols,
int istep,
@ -56,47 +56,20 @@ __kernel void BlendLinear_C1_D0(
{
int idx = get_global_id(0);
int idy = get_global_id(1);
if (idx < cols && idy < rows)
if (idx << 2 < cols && idy < rows)
{
int pos = mad24(idy,istep,idx);
int wpos = mad24(idy,wstep,idx);
float w1 = weight1[wpos];
float w2 = weight2[wpos];
dst[pos] = (img1[pos] * w1 + img2[pos] * w2) / (w1 + w2 + 1e-5f);
int pos = mad24(idy,istep >> 2,idx);
int wpos = mad24(idy,wstep >> 2,idx);
float4 w1 = weight1[wpos], w2 = weight2[wpos];
dst[pos] = convert_uchar4((convert_float4(img1[pos]) * w1 +
convert_float4(img2[pos]) * w2) / (w1 + w2 + 1e-5f));
}
}
__kernel void BlendLinear_C4_D0(
__global uchar *dst,
__global uchar *img1,
__global uchar *img2,
__global float *weight1,
__global float *weight2,
int rows,
int cols,
int istep,
int wstep
)
{
int idx = get_global_id(0);
int idy = get_global_id(1);
int x = idx / 4;
int y = idy;
if (x < cols && y < rows)
{
int pos = mad24(idy,istep,idx);
int wpos = mad24(idy,wstep,x);
float w1 = weight1[wpos];
float w2 = weight2[wpos];
dst[pos] = (img1[pos] * w1 + img2[pos] * w2) / (w1 + w2 + 1e-5f);
}
}
__kernel void BlendLinear_C1_D5(
__global float *dst,
__global float *img1,
__global float *img2,
__global uchar4 *dst,
__global uchar4 *img1,
__global uchar4 *img2,
__global float *weight1,
__global float *weight2,
int rows,
@ -109,18 +82,43 @@ __kernel void BlendLinear_C1_D5(
int idy = get_global_id(1);
if (idx < cols && idy < rows)
{
int pos = mad24(idy,istep,idx);
int wpos = mad24(idy,wstep,idx);
int pos = mad24(idy,istep >> 2,idx);
int wpos = mad24(idy,wstep, idx);
float w1 = weight1[wpos];
float w2 = weight2[wpos];
dst[pos] = convert_uchar4((convert_float4(img1[pos]) * w1 +
convert_float4(img2[pos]) * w2) / (w1 + w2 + 1e-5f));
}
}
__kernel void BlendLinear_C1_D5(
__global float4 *dst,
__global float4 *img1,
__global float4 *img2,
__global float4 *weight1,
__global float4 *weight2,
int rows,
int cols,
int istep,
int wstep
)
{
int idx = get_global_id(0);
int idy = get_global_id(1);
if (idx << 2 < cols && idy < rows)
{
int pos = mad24(idy,istep >> 2,idx);
int wpos = mad24(idy,wstep >> 2,idx);
float4 w1 = weight1[wpos], w2 = weight2[wpos];
dst[pos] = (img1[pos] * w1 + img2[pos] * w2) / (w1 + w2 + 1e-5f);
}
}
__kernel void BlendLinear_C4_D5(
__global float *dst,
__global float *img1,
__global float *img2,
__global float4 *dst,
__global float4 *img1,
__global float4 *img2,
__global float *weight1,
__global float *weight2,
int rows,
@ -131,12 +129,10 @@ __kernel void BlendLinear_C4_D5(
{
int idx = get_global_id(0);
int idy = get_global_id(1);
int x = idx / 4;
int y = idy;
if (x < cols && y < rows)
if (idx < cols && idy < rows)
{
int pos = mad24(idy,istep,idx);
int wpos = mad24(idy,wstep,x);
int pos = mad24(idy,istep >> 2,idx);
int wpos = mad24(idy,wstep, idx);
float w1 = weight1[wpos];
float w2 = weight2[wpos];
dst[pos] = (img1[pos] * w1 + img2[pos] * w2) / (w1 + w2 + 1e-5f);

View File

@ -8,6 +8,7 @@
// @Authors
// Niko Li, newlife20080214@gmail.com
// Zero Lin, zero.lin@amd.com
// Yao Wang, bitwangyaoyao@gmail.com
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
@ -100,14 +101,26 @@ __kernel void morph_C1_D0(__global const uchar * restrict src,
LDS_DAT[point2] = temp1;
barrier(CLK_LOCAL_MEM_FENCE);
uchar4 res = (uchar4)VAL;
for(int i=0;i<2*RADIUSY+1;i++)
for(int j=0;j<2*RADIUSX+1;j++)
for(int i=0; i<2*RADIUSY+1; i++)
for(int j=0; j<2*RADIUSX+1; j++)
{
res =mat_kernel[i*(2*RADIUSX+1)+j]? MORPH_OP(res,vload4(0,(__local uchar*)&LDS_DAT[mad24((l_y+i),width,l_x)]+offset+j)):res;
res =
#ifndef RECTKERNEL
mat_kernel[i*(2*RADIUSX+1)+j] ?
#endif
MORPH_OP(res,vload4(0,(__local uchar*)&LDS_DAT[mad24((l_y+i),width,l_x)]+offset+j))
#ifndef RECTKERNEL
:res
#endif
;
}
int gidx = get_global_id(0)<<2;
int gidy = get_global_id(1);
int out_addr = mad24(gidy,dst_step_in_pixel,gidx+dst_offset_in_pixel);
#ifdef USEROI
if(gidx+3<cols && gidy<rows && (dst_offset_in_pixel&3==0))
{
*(__global uchar4*)&dst[out_addr] = res;
@ -137,16 +150,19 @@ __kernel void morph_C1_D0(__global const uchar * restrict src,
dst[out_addr] = res.x;
}
}
#else
*(__global uchar4*)&dst[out_addr] = res;
#endif
}
#else
__kernel void morph(__global const GENTYPE * restrict src,
__global GENTYPE *dst,
int src_offset_x, int src_offset_y,
int cols, int rows,
int src_step_in_pixel, int dst_step_in_pixel,
__constant uchar * mat_kernel,
int src_whole_cols, int src_whole_rows,
int dst_offset_in_pixel)
__global GENTYPE *dst,
int src_offset_x, int src_offset_y,
int cols, int rows,
int src_step_in_pixel, int dst_step_in_pixel,
__constant uchar * mat_kernel,
int src_whole_cols, int src_whole_rows,
int dst_offset_in_pixel)
{
int l_x = get_local_id(0);
int l_y = get_local_id(1);
@ -154,7 +170,7 @@ __kernel void morph(__global const GENTYPE * restrict src,
int y = get_group_id(1)*LSIZE1;
int start_x = x+src_offset_x-RADIUSX;
int end_x = x + src_offset_x+LSIZE0+RADIUSX;
int width = end_x -start_x+1;
int width = end_x -(x+src_offset_x-RADIUSX)+1;
int start_y = y+src_offset_y-RADIUSY;
int point1 = mad24(l_y,LSIZE0,l_x);
int point2 = point1 + LSIZE0*LSIZE1;
@ -188,10 +204,18 @@ __kernel void morph(__global const GENTYPE * restrict src,
LDS_DAT[point2] = temp1;
barrier(CLK_LOCAL_MEM_FENCE);
GENTYPE res = (GENTYPE)VAL;
for(int i=0;i<2*RADIUSY+1;i++)
for(int j=0;j<2*RADIUSX+1;j++)
for(int i=0; i<2*RADIUSY+1; i++)
for(int j=0; j<2*RADIUSX+1; j++)
{
res =mat_kernel[i*(2*RADIUSX+1)+j]? MORPH_OP(res,LDS_DAT[mad24(l_y+i,width,l_x+j)]):res;
res =
#ifndef RECTKERNEL
mat_kernel[i*(2*RADIUSX+1)+j] ?
#endif
MORPH_OP(res,LDS_DAT[mad24(l_y+i,width,l_x+j)])
#ifndef RECTKERNEL
:res
#endif
;
}
int gidx = get_global_id(0);
int gidy = get_global_id(1);

View File

@ -703,7 +703,7 @@ static cl_mem bindTexture(const oclMat &mat, int depth, int channels)
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = mat.step / mat.elemSize();
desc.image_height = mat.rows;
desc.image_depth = NULL;
desc.image_depth = 0;
desc.image_array_size = 1;
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;

View File

@ -48,50 +48,59 @@ using namespace std;
#ifdef HAVE_CLAMDFFT
////////////////////////////////////////////////////////////////////////////
// Dft
PARAM_TEST_CASE(Dft, cv::Size, bool)
PARAM_TEST_CASE(Dft, cv::Size, int)
{
cv::Size dft_size;
bool dft_rows;
//std::vector<cv::ocl::Info> oclinfo;
int dft_flags;
virtual void SetUp()
{
//int devnums = getDevice(oclinfo);
// CV_Assert(devnums > 0);
dft_size = GET_PARAM(0);
dft_rows = GET_PARAM(1);
dft_size = GET_PARAM(0);
dft_flags = GET_PARAM(1);
}
};
TEST_P(Dft, C2C)
{
cv::Mat a = randomMat(dft_size, CV_32FC2, 0.0, 10.0);
cv::Mat a = randomMat(dft_size, CV_32FC2, 0.0, 100.0);
cv::Mat b_gold;
int flags = 0;
flags |= dft_rows ? cv::DFT_ROWS : 0;
cv::ocl::oclMat d_b;
cv::dft(a, b_gold, flags);
cv::ocl::dft(cv::ocl::oclMat(a), d_b, a.size(), flags);
cv::dft(a, b_gold, dft_flags);
cv::ocl::dft(cv::ocl::oclMat(a), d_b, a.size(), dft_flags);
EXPECT_MAT_NEAR(b_gold, cv::Mat(d_b), a.size().area() * 1e-4, "");
}
TEST_P(Dft, R2C)
{
cv::Mat a = randomMat(dft_size, CV_32FC1, 0.0, 100.0);
cv::Mat b_gold, b_gold_roi;
cv::ocl::oclMat d_b, d_c;
cv::ocl::dft(cv::ocl::oclMat(a), d_b, a.size(), dft_flags);
cv::dft(a, b_gold, cv::DFT_COMPLEX_OUTPUT | dft_flags);
b_gold_roi = b_gold(cv::Rect(0, 0, d_b.cols, d_b.rows));
EXPECT_MAT_NEAR(b_gold_roi, cv::Mat(d_b), a.size().area() * 1e-4, "");
cv::Mat c_gold;
cv::dft(b_gold, c_gold, cv::DFT_INVERSE | cv::DFT_REAL_OUTPUT | cv::DFT_SCALE);
EXPECT_MAT_NEAR(b_gold_roi, cv::Mat(d_b), a.size().area() * 1e-4, "");
}
TEST_P(Dft, R2CthenC2R)
{
cv::Mat a = randomMat(dft_size, CV_32FC1, 0.0, 10.0);
int flags = 0;
//flags |= dft_rows ? cv::DFT_ROWS : 0; // not supported yet
cv::ocl::oclMat d_b, d_c;
cv::ocl::dft(cv::ocl::oclMat(a), d_b, a.size(), flags);
cv::ocl::dft(d_b, d_c, a.size(), flags + cv::DFT_INVERSE + cv::DFT_REAL_OUTPUT);
cv::ocl::dft(cv::ocl::oclMat(a), d_b, a.size(), 0);
cv::ocl::dft(d_b, d_c, a.size(), cv::DFT_SCALE | cv::DFT_INVERSE | cv::DFT_REAL_OUTPUT);
EXPECT_MAT_NEAR(a, d_c, a.size().area() * 1e-4, "");
}
INSTANTIATE_TEST_CASE_P(ocl_DFT, Dft, testing::Combine(
testing::Values(cv::Size(5, 4), cv::Size(20, 20)),
testing::Values(false, true)));
INSTANTIATE_TEST_CASE_P(OCL_ImgProc, Dft, testing::Combine(
testing::Values(cv::Size(2, 3), cv::Size(5, 4), cv::Size(25, 20), cv::Size(512, 1), cv::Size(1024, 768)),
testing::Values(0, (int)cv::DFT_ROWS, (int)cv::DFT_SCALE) ));
#endif // HAVE_CLAMDFFT

View File

@ -365,10 +365,10 @@ TEST_P(Laplacian, Accuracy)
/////////////////////////////////////////////////////////////////////////////////////////////////
// erode & dilate
PARAM_TEST_CASE(ErodeDilateBase, MatType, bool)
PARAM_TEST_CASE(ErodeDilateBase, MatType, int)
{
int type;
//int iterations;
int iterations;
//erode or dilate kernel
cv::Mat kernel;
@ -399,7 +399,7 @@ PARAM_TEST_CASE(ErodeDilateBase, MatType, bool)
virtual void SetUp()
{
type = GET_PARAM(0);
// iterations = GET_PARAM(1);
iterations = GET_PARAM(1);
cv::RNG &rng = TS::ptr()->get_rng();
cv::Size size(MWIDTH, MHEIGHT);
@ -409,10 +409,6 @@ PARAM_TEST_CASE(ErodeDilateBase, MatType, bool)
// rng.fill(kernel, cv::RNG::UNIFORM, cv::Scalar::all(0), cv::Scalar::all(3));
kernel = randomMat(rng, Size(3, 3), CV_8UC1, 0, 3, false);
//int devnums = getDevice(oclinfo, OPENCV_DEFAULT_OPENCL_DEVICE);
//CV_Assert(devnums > 0);
////if you want to use undefault device, set it here
////setDevice(oclinfo[0]);
}
void random_roi()
@ -456,12 +452,9 @@ TEST_P(Erode, Mat)
for(int j = 0; j < LOOP_TIMES; j++)
{
random_roi();
//int iterations =3;
//cv::erode(mat1_roi, dst_roi, kernel, Point(-1, -1), iterations);
//cv::ocl::erode(gmat1, gdst, kernel, Point(-1, -1), iterations);
cv::erode(mat1_roi, dst_roi, kernel);
cv::ocl::erode(gmat1, gdst, kernel);
cv::erode(mat1_roi, dst_roi, kernel, Point(-1, -1), iterations);
cv::ocl::erode(gmat1, gdst, kernel, Point(-1, -1), iterations);
cv::Mat cpu_dst;
gdst_whole.download(cpu_dst);
@ -486,12 +479,8 @@ TEST_P(Dilate, Mat)
for(int j = 0; j < LOOP_TIMES; j++)
{
random_roi();
//int iterations =3;
// cv::erode(mat1_roi, dst_roi, kernel, Point(-1, -1), iterations);
// cv::ocl::erode(gmat1, gdst, kernel, Point(-1, -1), iterations);
cv::dilate(mat1_roi, dst_roi, kernel);
cv::ocl::dilate(gmat1, gdst, kernel);
cv::erode(mat1_roi, dst_roi, kernel, Point(-1, -1), iterations);
cv::ocl::erode(gmat1, gdst, kernel, Point(-1, -1), iterations);
cv::Mat cpu_dst;
gdst_whole.download(cpu_dst);
@ -831,13 +820,13 @@ INSTANTIATE_TEST_CASE_P(Filters, Laplacian, Combine(
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4),
Values(1, 3)));
//INSTANTIATE_TEST_CASE_P(Filter, ErodeDilate, Combine(Values(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4), Values(1, 2, 3)));
INSTANTIATE_TEST_CASE_P(Filter, Erode, Combine(Values(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4), Values(1)));
INSTANTIATE_TEST_CASE_P(Filter, Erode, Combine(Values(CV_8UC1, CV_8UC1), Values(false)));
//INSTANTIATE_TEST_CASE_P(Filter, Erode, Combine(Values(CV_8UC1, CV_8UC1), Values(false)));
//INSTANTIATE_TEST_CASE_P(Filter, ErodeDilate, Combine(Values(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4), Values(1, 2, 3)));
INSTANTIATE_TEST_CASE_P(Filter, Dilate, Combine(Values(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4), Values(1)));
INSTANTIATE_TEST_CASE_P(Filter, Dilate, Combine(Values(CV_8UC1, CV_8UC1), Values(false)));
//INSTANTIATE_TEST_CASE_P(Filter, Dilate, Combine(Values(CV_8UC1, CV_8UC1), Values(false)));
INSTANTIATE_TEST_CASE_P(Filter, Sobel, Combine(Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4),

View File

@ -35,14 +35,14 @@ class NewOpenCVTests(unittest.TestCase):
# Tests to run first; check the handful of basic operations that the later tests rely on
class Hackathon244Tests(NewOpenCVTests):
def test_int_array(self):
a = np.array([-1, 2, -3, 4, -5])
absa0 = np.abs(a)
self.assert_(cv2.norm(a, cv2.NORM_L1) == 15)
absa1 = cv2.absdiff(a, 0)
self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0)
def test_imencode(self):
a = np.zeros((480, 640), dtype=np.uint8)
flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90])
@ -50,7 +50,7 @@ class Hackathon244Tests(NewOpenCVTests):
self.assertEqual(ajpg.dtype, np.uint8)
self.assertGreater(ajpg.shape[0], 1)
self.assertEqual(ajpg.shape[1], 1)
def test_projectPoints(self):
objpt = np.float64([[1,2,3]])
imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
@ -59,7 +59,7 @@ class Hackathon244Tests(NewOpenCVTests):
self.assertEqual(imgpt1.shape, imgpt0.shape)
self.assertEqual(jac0.shape, jac1.shape)
self.assertEqual(jac0.shape[0], 2*objpt.shape[0])
def test_estimateAffine3D(self):
pattern_size = (11, 8)
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
@ -71,7 +71,7 @@ class Hackathon244Tests(NewOpenCVTests):
out[2,2]=1
self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
def test_fast(self):
fd = cv2.FastFeatureDetector(30, True)
img = self.get_sample("samples/cpp/right02.jpg", 0)
@ -82,8 +82,39 @@ class Hackathon244Tests(NewOpenCVTests):
for kpt in keypoints:
self.assertNotEqual(kpt.response, 0)
def check_close_angles(self, a, b, angle_delta):
self.assert_(abs(a - b) <= angle_delta or
abs(360 - abs(a - b)) <= angle_delta)
def check_close_pairs(self, a, b, delta):
self.assertLessEqual(abs(a[0] - b[0]), delta)
self.assertLessEqual(abs(a[1] - b[1]), delta)
def check_close_boxes(self, a, b, delta, angle_delta):
self.check_close_pairs(a[0], b[0], delta)
self.check_close_pairs(a[1], b[1], delta)
self.check_close_angles(a[2], b[2], angle_delta)
def test_geometry(self):
npt = 100
np.random.seed(244)
a = np.random.randn(npt,2).astype('float32')*50 + 150
img = np.zeros((300, 300, 3), dtype='uint8')
be = cv2.fitEllipse(a)
br = cv2.minAreaRect(a)
mc, mr = cv2.minEnclosingCircle(a)
be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742)
br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582)
mc0, mr0 = (160.41790771484375, 144.55152893066406), 136.713500977
self.check_close_boxes(be, be0, 5, 15)
self.check_close_boxes(br, br0, 5, 15)
self.check_close_pairs(mc, mc0, 5)
self.assertLessEqual(abs(mr - mr0), 5)
if __name__ == '__main__':
print "testing", cv.__version__
print "testing", cv2.__version__
random.seed(0)
unittest.main()

View File

@ -15,115 +15,145 @@ if __name__ == "__main__":
help = "output results in text format (can be 'txt', 'html' or 'auto' - default)",
metavar = 'FMT', default = 'auto')
parser.add_option("--failed-only", action = "store_true", dest = "failedOnly",
help = "print only failed tests", default = False)
(options, args) = parser.parse_args()
if 1 != len(args):
parser.print_help()
exit(0)
options.generateHtml = detectHtmlOutputType(options.format)
input_file = args[0]
file = os.path.abspath(input_file)
if not os.path.isfile(file):
sys.stderr.write("IOError reading \"" + file + "\" - " + str(err) + os.linesep)
parser.print_help()
exit(0)
test_sets = []
try:
tests = testlog_parser.parseLogFile(file)
if tests:
test_sets.append((os.path.basename(file), tests))
except IOError as err:
sys.stderr.write("IOError reading \"" + file + "\" - " + str(err) + os.linesep)
except xml.parsers.expat.ExpatError as err:
sys.stderr.write("ExpatError reading \"" + file + "\" - " + str(err) + os.linesep)
if not test_sets:
sys.stderr.write("Error: no test data found" + os.linesep)
exit(0)
# find matches
setsCount = len(test_sets)
test_cases = {}
name_extractor = lambda name: str(name)
for i in range(setsCount):
for case in test_sets[i][1]:
name = name_extractor(case)
if name not in test_cases:
test_cases[name] = [None] * setsCount
test_cases[name][i] = case
testsuits = [] # testsuit name, time, num, flag for failed tests
files = []
testsuits = [] # testsuit module, name, time, num, flag for failed tests
overall_time = 0
prevGroupName = None
suit_time = 0
suit_num = 0
fails_num = 0
for name in sorted(test_cases.iterkeys(), key=alphanum_keyselector):
cases = test_cases[name]
groupName = next(c for c in cases if c).shortName()
if groupName != prevGroupName:
if prevGroupName != None:
suit_time = suit_time/60 #from seconds to minutes
testsuits.append({'name': prevGroupName, 'time': suit_time, \
'num': suit_num, 'failed': fails_num})
overall_time += suit_time
suit_time = 0
suit_num = 0
fails_num = 0
prevGroupName = groupName
seen = set()
for arg in args:
if ("*" in arg) or ("?" in arg):
flist = [os.path.abspath(f) for f in glob.glob(arg)]
flist = sorted(flist, key= lambda text: str(text).replace("M", "_"))
files.extend([ x for x in flist if x not in seen and not seen.add(x)])
else:
fname = os.path.abspath(arg)
if fname not in seen and not seen.add(fname):
files.append(fname)
for i in range(setsCount):
case = cases[i]
if not case is None:
suit_num += 1
if case.get('status') == 'run':
suit_time += case.get('time')
if case.get('status') == 'failed':
fails_num += 1
file = os.path.abspath(fname)
if not os.path.isfile(file):
sys.stderr.write("IOError reading \"" + file + "\" - " + str(err) + os.linesep)
parser.print_help()
exit(0)
testsuits.append({'name': prevGroupName, 'time': suit_time, \
'num': suit_num, 'failed': fails_num})
fname = os.path.basename(fname)
find_module_name = re.search(r'([^_]*)', fname)
module_name = find_module_name.group(0)
test_sets = []
try:
tests = testlog_parser.parseLogFile(file)
if tests:
test_sets.append((os.path.basename(file), tests))
except IOError as err:
sys.stderr.write("IOError reading \"" + file + "\" - " + str(err) + os.linesep)
except xml.parsers.expat.ExpatError as err:
sys.stderr.write("ExpatError reading \"" + file + "\" - " + str(err) + os.linesep)
if not test_sets:
continue
# find matches
setsCount = len(test_sets)
test_cases = {}
name_extractor = lambda name: str(name)
for i in range(setsCount):
for case in test_sets[i][1]:
name = name_extractor(case)
if name not in test_cases:
test_cases[name] = [None] * setsCount
test_cases[name][i] = case
prevGroupName = None
suit_time = 0
suit_num = 0
fails_num = 0
for name in sorted(test_cases.iterkeys(), key=alphanum_keyselector):
cases = test_cases[name]
groupName = next(c for c in cases if c).shortName()
if groupName != prevGroupName:
if prevGroupName != None:
suit_time = suit_time/60 #from seconds to minutes
testsuits.append({'module': module_name, 'name': prevGroupName, \
'time': suit_time, 'num': suit_num, 'failed': fails_num})
overall_time += suit_time
suit_time = 0
suit_num = 0
fails_num = 0
prevGroupName = groupName
for i in range(setsCount):
case = cases[i]
if not case is None:
suit_num += 1
if case.get('status') == 'run':
suit_time += case.get('time')
if case.get('status') == 'failed':
fails_num += 1
# last testsuit processing
suit_time = suit_time/60
testsuits.append({'module': module_name, 'name': prevGroupName, \
'time': suit_time, 'num': suit_num, 'failed': fails_num})
overall_time += suit_time
if len(testsuits)==0:
print 'No testsuits found'
exit(0)
tbl = table()
rows = 0
# header
tbl.newColumn('name', 'Testsuit', align = 'left', cssclass = 'col_name')
tbl.newColumn('time', 'Time (min)', align = 'center', cssclass = 'col_name')
tbl.newColumn('num', 'Num of tests', align = 'center', cssclass = 'col_name')
tbl.newColumn('failed', 'Failed', align = 'center', cssclass = 'col_name')
if not options.failedOnly:
tbl.newColumn('module', 'Module', align = 'left', cssclass = 'col_name')
tbl.newColumn('name', 'Testsuit', align = 'left', cssclass = 'col_name')
tbl.newColumn('time', 'Time (min)', align = 'center', cssclass = 'col_name')
tbl.newColumn('num', 'Num of tests', align = 'center', cssclass = 'col_name')
tbl.newColumn('failed', 'Failed', align = 'center', cssclass = 'col_name')
# rows
for suit in sorted(testsuits, key = lambda suit: suit['time'], reverse = True):
tbl.newRow()
tbl.newCell('name', suit['name'])
tbl.newCell('time', formatValue(suit['time'], '', ''), suit['time'])
tbl.newCell('num', suit['num'])
if (suit['failed'] != 0):
tbl.newCell('failed', suit['failed'])
else:
tbl.newCell('failed', ' ')
# rows
for suit in sorted(testsuits, key = lambda suit: suit['time'], reverse = True):
tbl.newRow()
tbl.newCell('module', suit['module'])
tbl.newCell('name', suit['name'])
tbl.newCell('time', formatValue(suit['time'], '', ''), suit['time'])
tbl.newCell('num', suit['num'])
if (suit['failed'] != 0):
tbl.newCell('failed', suit['failed'])
else:
tbl.newCell('failed', ' ')
rows += 1
else:
tbl.newColumn('module', 'Module', align = 'left', cssclass = 'col_name')
tbl.newColumn('name', 'Testsuit', align = 'left', cssclass = 'col_name')
tbl.newColumn('failed', 'Failed', align = 'center', cssclass = 'col_name')
# rows
for suit in sorted(testsuits, key = lambda suit: suit['time'], reverse = True):
if (suit['failed'] != 0):
tbl.newRow()
tbl.newCell('module', suit['module'])
tbl.newCell('name', suit['name'])
tbl.newCell('failed', suit['failed'])
rows += 1
# output table
if options.generateHtml:
tbl.htmlPrintTable(sys.stdout)
htmlPrintFooter(sys.stdout)
else:
input_file = re.sub(r'^[\.\/]*', '', input_file)
find_module_name = re.search(r'([^_]*)', input_file)
module_name = find_module_name.group(0)
splitter = 15 * '*'
print '\n%s\n %s\n%s\n' % (splitter, module_name, splitter)
print 'Overall time: %.2f min\n' % overall_time
tbl.consolePrintTable(sys.stdout)
print 4 * '\n'
if rows:
if options.generateHtml:
tbl.htmlPrintTable(sys.stdout)
htmlPrintFooter(sys.stdout)
else:
if not options.failedOnly:
print '\nOverall time: %.2f min\n' % overall_time
tbl.consolePrintTable(sys.stdout)
print 2 * '\n'

View File

@ -139,12 +139,12 @@ Regression& Regression::add(TestBase* test, const std::string& name, cv::InputAr
Regression& Regression::addKeypoints(TestBase* test, const std::string& name, const std::vector<cv::KeyPoint>& array, double eps, ERROR_TYPE err)
{
int len = (int)array.size();
cv::Mat pt (len, 1, CV_32FC2, (void*)&array[0].pt, sizeof(cv::KeyPoint));
cv::Mat size (len, 1, CV_32FC1, (void*)&array[0].size, sizeof(cv::KeyPoint));
cv::Mat angle (len, 1, CV_32FC1, (void*)&array[0].angle, sizeof(cv::KeyPoint));
cv::Mat response(len, 1, CV_32FC1, (void*)&array[0].response, sizeof(cv::KeyPoint));
cv::Mat octave (len, 1, CV_32SC1, (void*)&array[0].octave, sizeof(cv::KeyPoint));
cv::Mat class_id(len, 1, CV_32SC1, (void*)&array[0].class_id, sizeof(cv::KeyPoint));
cv::Mat pt (len, 1, CV_32FC2, len ? (void*)&array[0].pt : 0, sizeof(cv::KeyPoint));
cv::Mat size (len, 1, CV_32FC1, len ? (void*)&array[0].size : 0, sizeof(cv::KeyPoint));
cv::Mat angle (len, 1, CV_32FC1, len ? (void*)&array[0].angle : 0, sizeof(cv::KeyPoint));
cv::Mat response(len, 1, CV_32FC1, len ? (void*)&array[0].response : 0, sizeof(cv::KeyPoint));
cv::Mat octave (len, 1, CV_32SC1, len ? (void*)&array[0].octave : 0, sizeof(cv::KeyPoint));
cv::Mat class_id(len, 1, CV_32SC1, len ? (void*)&array[0].class_id : 0, sizeof(cv::KeyPoint));
return Regression::add(test, name + "-pt", pt, eps, ERROR_ABSOLUTE)
(name + "-size", size, eps, ERROR_ABSOLUTE)
@ -157,10 +157,10 @@ Regression& Regression::addKeypoints(TestBase* test, const std::string& name, co
Regression& Regression::addMatches(TestBase* test, const std::string& name, const std::vector<cv::DMatch>& array, double eps, ERROR_TYPE err)
{
int len = (int)array.size();
cv::Mat queryIdx(len, 1, CV_32SC1, (void*)&array[0].queryIdx, sizeof(cv::DMatch));
cv::Mat trainIdx(len, 1, CV_32SC1, (void*)&array[0].trainIdx, sizeof(cv::DMatch));
cv::Mat imgIdx (len, 1, CV_32SC1, (void*)&array[0].imgIdx, sizeof(cv::DMatch));
cv::Mat distance(len, 1, CV_32FC1, (void*)&array[0].distance, sizeof(cv::DMatch));
cv::Mat queryIdx(len, 1, CV_32SC1, len ? (void*)&array[0].queryIdx : 0, sizeof(cv::DMatch));
cv::Mat trainIdx(len, 1, CV_32SC1, len ? (void*)&array[0].trainIdx : 0, sizeof(cv::DMatch));
cv::Mat imgIdx (len, 1, CV_32SC1, len ? (void*)&array[0].imgIdx : 0, sizeof(cv::DMatch));
cv::Mat distance(len, 1, CV_32FC1, len ? (void*)&array[0].distance : 0, sizeof(cv::DMatch));
return Regression::add(test, name + "-queryIdx", queryIdx, DBL_EPSILON, ERROR_ABSOLUTE)
(name + "-trainIdx", trainIdx, DBL_EPSILON, ERROR_ABSOLUTE)

View File

@ -0,0 +1,100 @@
set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_SYSTEM_VERSION 1)
set(CMAKE_SYSTEM_PROCESSOR arm)
if (CARMA)
set(GCC_COMPILER_VERSION "4.5" CACHE STRING "GCC Compiler version")
else()
set(GCC_COMPILER_VERSION "4.6" CACHE STRING "GCC Compiler version")
endif()
set(FLOAT_ABI_SUFFIX "")
if (NOT SOFTFP)
set(FLOAT_ABI_SUFFIX "hf")
endif()
set(CMAKE_C_COMPILER arm-linux-gnueabi${FLOAT_ABI_SUFFIX}-gcc-${GCC_COMPILER_VERSION})
set(CMAKE_CXX_COMPILER arm-linux-gnueabi${FLOAT_ABI_SUFFIX}-g++-${GCC_COMPILER_VERSION})
set(ARM_LINUX_SYSROOT /usr/arm-linux-gnueabi${FLOAT_ABI_SUFFIX} CACHE PATH "ARM cross compilation system root")
set(CMAKE_CXX_FLAGS "" CACHE STRING "c++ flags")
set(CMAKE_C_FLAGS "" CACHE STRING "c flags")
set(CMAKE_SHARED_LINKER_FLAGS "" CACHE STRING "shared linker flags")
set(CMAKE_MODULE_LINKER_FLAGS "" CACHE STRING "module linker flags")
set(CMAKE_EXE_LINKER_FLAGS "-Wl,-z,nocopyreloc" CACHE STRING "executable linker flags")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mthumb -fdata-sections -Wa,--noexecstack -fsigned-char -Wno-psabi")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mthumb -fdata-sections -Wa,--noexecstack -fsigned-char -Wno-psabi")
set(CMAKE_SHARED_LINKER_FLAGS "-Wl,--fix-cortex-a8 -Wl,--no-undefined -Wl,--gc-sections -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now ${CMAKE_SHARED_LINKER_FLAGS}")
set(CMAKE_MODULE_LINKER_FLAGS "-Wl,--fix-cortex-a8 -Wl,--no-undefined -Wl,--gc-sections -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now ${CMAKE_MODULE_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "-Wl,--fix-cortex-a8 -Wl,--no-undefined -Wl,--gc-sections -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now ${CMAKE_EXE_LINKER_FLAGS}")
if(USE_NEON)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon")
elseif(USE_VFPV3)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=vfpv3")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfpv3")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=vfpv3-d16")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfpv3-d16")
endif()
set(CMAKE_FIND_ROOT_PATH ${CMAKE_FIND_ROOT_PATH} ${ARM_LINUX_SYSROOT})
if(EXISTS ${CUDA_TOOLKIT_ROOT_DIR})
set(CMAKE_FIND_ROOT_PATH ${CMAKE_FIND_ROOT_PATH} ${CUDA_TOOLKIT_ROOT_DIR})
endif()
set( CMAKE_SKIP_RPATH TRUE CACHE BOOL "If set, runtime paths are not added when using shared libraries." )
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY)
if (CARMA)
add_definitions(-DCARMA)
endif()
# macro to find programs on the host OS
macro( find_host_program )
set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER )
set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER )
if( CMAKE_HOST_WIN32 )
SET( WIN32 1 )
SET( UNIX )
elseif( CMAKE_HOST_APPLE )
SET( APPLE 1 )
SET( UNIX )
endif()
find_program( ${ARGN} )
SET( WIN32 )
SET( APPLE )
SET( UNIX 1 )
set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
endmacro()
# macro to find packages on the host OS
macro( find_host_package )
set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER )
set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER )
if( CMAKE_HOST_WIN32 )
SET( WIN32 1 )
SET( UNIX )
elseif( CMAKE_HOST_APPLE )
SET( APPLE 1 )
SET( UNIX )
endif()
find_package( ${ARGN} )
SET( WIN32 )
SET( APPLE )
SET( UNIX 1 )
set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
endmacro()

View File

@ -0,0 +1,7 @@
#!/bin/sh
cd `dirname $0`/..
mkdir -p build_hardfp
cd build_hardfp
cmake -DCMAKE_TOOLCHAIN_FILE=../arm-gnueabi.toolchain.cmake $@ ../../..

View File

@ -0,0 +1,7 @@
#!/bin/sh
cd `dirname $0`/..
mkdir -p build_softfp
cd build_softfp
cmake -DSOFTFP=ON -DCMAKE_TOOLCHAIN_FILE=../arm-gnueabi.toolchain.cmake $@ ../../..

View File

@ -0,0 +1,8 @@
#!/bin/sh
mkdir -p build_carma
cd build_carma
cmake -DSOFTFP=ON -DCARMA=ON -DWITH_TBB=ON -DBUILD_TBB=ON -DUSE_NEON=ON -DCUDA_TOOLKIT_ROOT_DIR=/usr/arm-linux-gnueabi/cuda/ \
-DCUDA_ARCH_BIN="2.1(2.0)" -DCUDA_ARCH_PTX="" -DCMAKE_SKIP_RPATH=ON -DWITH_CUDA=ON -DWITH_CUBLAS=ON \
-DCMAKE_TOOLCHAIN_FILE=../arm-gnueabi.toolchain.cmake $@ ../../..

1
platforms/readme.txt Normal file
View File

@ -0,0 +1 @@
This folder contains toolchains and additional files that are needed for cross compitation.

View File

@ -63,9 +63,9 @@ public class Puzzle15Activity extends Activity implements CvCameraViewListener,
@Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
super.onPause();
}
@Override
@ -109,10 +109,6 @@ public class Puzzle15Activity extends Activity implements CvCameraViewListener,
public void onCameraViewStopped() {
}
public Mat onCameraFrame(Mat inputFrame) {
return mPuzzle15.puzzleFrame(inputFrame);
}
public boolean onTouch(View view, MotionEvent event) {
int xpos, ypos;
@ -129,4 +125,8 @@ public class Puzzle15Activity extends Activity implements CvCameraViewListener,
return false;
}
public Mat onCameraFrame(Mat inputFrame) {
return mPuzzle15.puzzleFrame(inputFrame);
}
}

View File

@ -11,11 +11,9 @@ add_subdirectory(face-detection)
add_subdirectory(image-manipulations)
add_subdirectory(color-blob-detection)
add_subdirectory(tutorial-1-addopencv)
add_subdirectory(tutorial-2-opencvcamera)
add_subdirectory(tutorial-3-native)
add_subdirectory(tutorial-4-mixed)
add_subdirectory(tutorial-5-cameracontrol)
add_subdirectory(tutorial-1-camerapreview)
add_subdirectory(tutorial-2-mixedprocessing)
add_subdirectory(tutorial-3-cameracontrol)
#hello-android sample
if(HAVE_opencv_highgui)

View File

@ -3,6 +3,7 @@ package org.opencv.samples.colorblobdetect;
import java.util.List;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
@ -13,7 +14,7 @@ import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import org.opencv.imgproc.Imgproc;
import android.app.Activity;
@ -25,7 +26,7 @@ import android.view.Window;
import android.view.WindowManager;
import android.view.View.OnTouchListener;
public class ColorBlobDetectionActivity extends Activity implements OnTouchListener, CvCameraViewListener {
public class ColorBlobDetectionActivity extends Activity implements OnTouchListener, CvCameraViewListener2 {
private static final String TAG = "OCVSample::Activity";
private boolean mIsColorSelected = false;
@ -78,9 +79,9 @@ public class ColorBlobDetectionActivity extends Activity implements OnTouchListe
@Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
super.onPause();
}
@Override
@ -160,8 +161,8 @@ public class ColorBlobDetectionActivity extends Activity implements OnTouchListe
return false; // don't need subsequent touch events
}
public Mat onCameraFrame(Mat inputFrame) {
inputFrame.copyTo(mRgba);
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
if (mIsColorSelected) {
mDetector.process(mRgba);

View File

@ -6,6 +6,7 @@ import java.io.IOException;
import java.io.InputStream;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
@ -15,8 +16,7 @@ import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener;
import org.opencv.imgproc.Imgproc;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import org.opencv.objdetect.CascadeClassifier;
import android.app.Activity;
@ -27,7 +27,7 @@ import android.view.Menu;
import android.view.MenuItem;
import android.view.WindowManager;
public class FdActivity extends Activity implements CvCameraViewListener {
public class FdActivity extends Activity implements CvCameraViewListener2 {
private static final String TAG = "OCVSample::Activity";
private static final Scalar FACE_RECT_COLOR = new Scalar(0, 255, 0, 255);
@ -130,9 +130,9 @@ public class FdActivity extends Activity implements CvCameraViewListener {
@Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
super.onPause();
}
@Override
@ -157,10 +157,10 @@ public class FdActivity extends Activity implements CvCameraViewListener {
mRgba.release();
}
public Mat onCameraFrame(Mat inputFrame) {
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
inputFrame.copyTo(mRgba);
Imgproc.cvtColor(inputFrame, mGray, Imgproc.COLOR_RGBA2GRAY);
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();

View File

@ -1,50 +0,0 @@
package org.opencv.samples.facedetect;
import java.text.DecimalFormat;
import org.opencv.core.Core;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.util.Log;
public class FpsMeter {
private static final String TAG = "OCVSample::FpsMeter";
int step;
int framesCouner;
double freq;
long prevFrameTime;
String strfps;
DecimalFormat twoPlaces = new DecimalFormat("0.00");
Paint paint;
public void init() {
step = 20;
framesCouner = 0;
freq = Core.getTickFrequency();
prevFrameTime = Core.getTickCount();
strfps = "";
paint = new Paint();
paint.setColor(Color.BLUE);
paint.setTextSize(50);
}
public void measure() {
framesCouner++;
if (framesCouner % step == 0) {
long time = Core.getTickCount();
double fps = step * freq / (time - prevFrameTime);
prevFrameTime = time;
DecimalFormat twoPlaces = new DecimalFormat("0.00");
strfps = twoPlaces.format(fps) + " FPS";
Log.i(TAG, strfps);
}
}
public void draw(Canvas canvas, float offsetx, float offsety) {
canvas.drawText(strfps, 20 + offsetx, 10 + 50 + offsety, paint);
}
}

View File

@ -1,50 +0,0 @@
package org.opencv.samples.imagemanipulations;
import java.text.DecimalFormat;
import org.opencv.core.Core;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.util.Log;
public class FpsMeter {
private static final String TAG = "OCVSample::FpsMeter";
int step;
int framesCouner;
double freq;
long prevFrameTime;
String strfps;
DecimalFormat twoPlaces = new DecimalFormat("0.00");
Paint paint;
public void init() {
step = 20;
framesCouner = 0;
freq = Core.getTickFrequency();
prevFrameTime = Core.getTickCount();
strfps = "";
paint = new Paint();
paint.setColor(Color.BLUE);
paint.setTextSize(50);
}
public void measure() {
framesCouner++;
if (framesCouner % step == 0) {
long time = Core.getTickCount();
double fps = step * freq / (time - prevFrameTime);
prevFrameTime = time;
DecimalFormat twoPlaces = new DecimalFormat("0.00");
strfps = twoPlaces.format(fps) + " FPS";
Log.i(TAG, strfps);
}
}
public void draw(Canvas canvas, float offsetx, float offsety) {
canvas.drawText(strfps, 20 + offsetx, 10 + 50 + offsety, paint);
}
}

View File

@ -3,6 +3,7 @@ package org.opencv.samples.imagemanipulations;
import java.util.Arrays;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
@ -14,7 +15,7 @@ import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import org.opencv.imgproc.Imgproc;
import android.app.Activity;
@ -24,7 +25,7 @@ import android.view.Menu;
import android.view.MenuItem;
import android.view.WindowManager;
public class ImageManipulationsActivity extends Activity implements CvCameraViewListener {
public class ImageManipulationsActivity extends Activity implements CvCameraViewListener2 {
private static final String TAG = "OCVSample::Activity";
public static final int VIEW_MODE_RGBA = 0;
@ -111,9 +112,9 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
@Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
super.onPause();
}
@Override
@ -258,8 +259,8 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
mZoomWindow = null;
}
public Mat onCameraFrame(Mat inputFrame) {
inputFrame.copyTo(mRgba);
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
switch (ImageManipulationsActivity.viewMode) {
case ImageManipulationsActivity.VIEW_MODE_RGBA:
@ -315,7 +316,7 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
break;
case ImageManipulationsActivity.VIEW_MODE_SOBEL:
Imgproc.cvtColor(mRgba, mGray, Imgproc.COLOR_RGBA2GRAY);
mGray = inputFrame.gray();
if ((mRgbaInnerWindow == null) || (mGrayInnerWindow == null) || (mRgba.cols() != mSizeRgba.width) || (mRgba.height() != mSizeRgba.height))
CreateAuxiliaryMats();

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>OpenCV Tutorial 5 - Camera Control</name>
<name>OpenCV Tutorial 1 - Camera Preview</name>
<comment></comment>
<projects>
</projects>

View File

@ -9,7 +9,7 @@
android:icon="@drawable/icon"
android:theme="@android:style/Theme.NoTitleBar.Fullscreen" >
<activity android:name="Sample1Java"
<activity android:name="Tutorial1Activity"
android:label="@string/app_name"
android:screenOrientation="landscape"
android:configChanges="keyboardHidden|orientation">

View File

@ -1,7 +1,6 @@
set(sample example-tutorial-1-addopencv)
set(sample example-tutorial-1-camerapreview)
add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 11 ${ANDROID_SDK_TARGET})
if(TARGET ${sample})
add_dependencies(opencv_android_examples ${sample})
endif()

View File

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">OCV T1 Add OpenCV</string>
<string name="app_name">OCV T1 Preview</string>
</resources>

View File

@ -1,11 +1,12 @@
package org.opencv.samples.tutorial1;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Mat;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import android.app.Activity;
import android.os.Bundle;
@ -16,7 +17,7 @@ import android.view.SurfaceView;
import android.view.WindowManager;
import android.widget.Toast;
public class Sample1Java extends Activity implements CvCameraViewListener {
public class Tutorial1Activity extends Activity implements CvCameraViewListener2 {
private static final String TAG = "OCVSample::Activity";
private CameraBridgeViewBase mOpenCvCameraView;
@ -40,7 +41,7 @@ public class Sample1Java extends Activity implements CvCameraViewListener {
}
};
public Sample1Java() {
public Tutorial1Activity() {
Log.i(TAG, "Instantiated new " + this.getClass());
}
@ -66,9 +67,9 @@ public class Sample1Java extends Activity implements CvCameraViewListener {
@Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
super.onPause();
}
@Override
@ -87,7 +88,7 @@ public class Sample1Java extends Activity implements CvCameraViewListener {
@Override
public boolean onCreateOptionsMenu(Menu menu) {
Log.i(TAG, "called onCreateOptionsMenu");
mItemSwitchCamera = menu.add("Switch camera");
mItemSwitchCamera = menu.add("Toggle Native/Java camera");
return true;
}
@ -124,7 +125,7 @@ public class Sample1Java extends Activity implements CvCameraViewListener {
public void onCameraViewStopped() {
}
public Mat onCameraFrame(Mat inputFrame) {
return inputFrame;
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
return inputFrame.rgba();
}
}

View File

@ -60,7 +60,7 @@
</cconfiguration>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<project id="OpenCV Tutorial 4 - Mix Java+Native OpenCV.null.1819504790" name="OpenCV Tutorial 4 - Mix Java+Native OpenCV"/>
<project id="OpenCV Tutorial 2 - Mixed Processing OpenCV.null.1819504790" name="OpenCV Tutorial 2 - Mixed Processing"/>
</storageModule>
<storageModule moduleId="scannerConfiguration">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId=""/>
@ -69,6 +69,6 @@
</scannerConfigBuildInfo>
</storageModule>
<storageModule moduleId="refreshScope" versionNumber="1">
<resource resourceType="PROJECT" workspacePath="/OpenCV Tutorial 4 - Mix Java+Native OpenCV"/>
<resource resourceType="PROJECT" workspacePath="/OpenCV Tutorial 2 - Mixed Processing"/>
</storageModule>
</cproject>

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>OpenCV Tutorial 3 - Add Native OpenCV</name>
<name>OpenCV Tutorial 2 - Mixed Processing</name>
<comment></comment>
<projects>
</projects>

View File

@ -9,7 +9,7 @@
android:icon="@drawable/icon"
android:theme="@android:style/Theme.NoTitleBar.Fullscreen" >
<activity android:name="Sample2NativeCamera"
<activity android:name="Tutorial2Activity"
android:label="@string/app_name"
android:screenOrientation="landscape"
android:configChanges="keyboardHidden|orientation">

View File

@ -1,4 +1,4 @@
set(sample example-tutorial-3-native)
set(sample example-tutorial-2-mixedprocessing)
if(BUILD_FAT_JAVA_LIB)
set(native_deps opencv_java)

View File

@ -8,9 +8,9 @@ using namespace std;
using namespace cv;
extern "C" {
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial3_Sample3Native_FindFeatures(JNIEnv*, jobject, jlong addrGray, jlong addrRgba);
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial2_Tuturial2Activity_FindFeatures(JNIEnv*, jobject, jlong addrGray, jlong addrRgba);
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial3_Sample3Native_FindFeatures(JNIEnv*, jobject, jlong addrGray, jlong addrRgba)
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial2_Tutorial2Activity_FindFeatures(JNIEnv*, jobject, jlong addrGray, jlong addrRgba)
{
Mat& mGr = *(Mat*)addrGray;
Mat& mRgb = *(Mat*)addrRgba;

View File

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -3,7 +3,7 @@
android:layout_width="match_parent"
android:layout_height="match_parent" >
<org.opencv.android.NativeCameraView
<org.opencv.android.JavaCameraView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:id="@+id/tutorial2_activity_surface_view" />

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">OCV T2 Use OpenCV Camera</string>
<string name="app_name">OCV T2 Mixed Processing</string>
</resources>

View File

@ -1,13 +1,13 @@
package org.opencv.samples.tutorial4;
package org.opencv.samples.tutorial2;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener;
import org.opencv.highgui.Highgui;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import org.opencv.imgproc.Imgproc;
import android.app.Activity;
@ -17,7 +17,7 @@ import android.view.Menu;
import android.view.MenuItem;
import android.view.WindowManager;
public class Sample4Mixed extends Activity implements CvCameraViewListener {
public class Tutorial2Activity extends Activity implements CvCameraViewListener2 {
private static final String TAG = "OCVSample::Activity";
private static final int VIEW_MODE_RGBA = 0;
@ -28,7 +28,7 @@ public class Sample4Mixed extends Activity implements CvCameraViewListener {
private int mViewMode;
private Mat mRgba;
private Mat mIntermediateMat;
private Mat mGrayMat;
private Mat mGray;
private MenuItem mItemPreviewRGBA;
private MenuItem mItemPreviewGray;
@ -58,7 +58,7 @@ public class Sample4Mixed extends Activity implements CvCameraViewListener {
}
};
public Sample4Mixed() {
public Tutorial2Activity() {
Log.i(TAG, "Instantiated new " + this.getClass());
}
@ -69,9 +69,9 @@ public class Sample4Mixed extends Activity implements CvCameraViewListener {
super.onCreate(savedInstanceState);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
setContentView(R.layout.tutorial4_surface_view);
setContentView(R.layout.tutorial2_surface_view);
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial4_activity_surface_view);
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial2_activity_surface_view);
mOpenCvCameraView.setCvCameraViewListener(this);
}
@ -88,9 +88,9 @@ public class Sample4Mixed extends Activity implements CvCameraViewListener {
@Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
super.onPause();
}
@Override
@ -109,37 +109,37 @@ public class Sample4Mixed extends Activity implements CvCameraViewListener {
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC4);
mIntermediateMat = new Mat(height, width, CvType.CV_8UC4);
mGrayMat = new Mat(height, width, CvType.CV_8UC1);
mGray = new Mat(height, width, CvType.CV_8UC1);
}
public void onCameraViewStopped() {
mRgba.release();
mGrayMat.release();
mGray.release();
mIntermediateMat.release();
}
public Mat onCameraFrame(Mat inputFrame) {
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
final int viewMode = mViewMode;
switch (viewMode) {
case VIEW_MODE_GRAY:
// input frame has gray scale format
Imgproc.cvtColor(inputFrame, mRgba, Imgproc.COLOR_GRAY2RGBA, 4);
Imgproc.cvtColor(inputFrame.gray(), mRgba, Imgproc.COLOR_GRAY2RGBA, 4);
break;
case VIEW_MODE_RGBA:
// input frame has RBGA format
inputFrame.copyTo(mRgba);
mRgba = inputFrame.rgba();
break;
case VIEW_MODE_CANNY:
// input frame has gray scale format
Imgproc.Canny(inputFrame, mIntermediateMat, 80, 100);
Imgproc.cvtColor(mIntermediateMat, mRgba, Imgproc.COLOR_GRAY2BGRA, 4);
mRgba = inputFrame.rgba();
Imgproc.Canny(inputFrame.gray(), mIntermediateMat, 80, 100);
Imgproc.cvtColor(mIntermediateMat, mRgba, Imgproc.COLOR_GRAY2RGBA, 4);
break;
case VIEW_MODE_FEATURES:
// input frame has RGBA format
inputFrame.copyTo(mRgba);
Imgproc.cvtColor(mRgba, mGrayMat, Imgproc.COLOR_RGBA2GRAY);
FindFeatures(mGrayMat.getNativeObjAddr(), mRgba.getNativeObjAddr());
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
FindFeatures(mGray.getNativeObjAddr(), mRgba.getNativeObjAddr());
break;
}
@ -150,17 +150,13 @@ public class Sample4Mixed extends Activity implements CvCameraViewListener {
Log.i(TAG, "called onOptionsItemSelected; selected item: " + item);
if (item == mItemPreviewRGBA) {
mOpenCvCameraView.SetCaptureFormat(Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
mViewMode = VIEW_MODE_RGBA;
} else if (item == mItemPreviewGray) {
mOpenCvCameraView.SetCaptureFormat(Highgui.CV_CAP_ANDROID_GREY_FRAME);
mViewMode = VIEW_MODE_GRAY;
} else if (item == mItemPreviewCanny) {
mOpenCvCameraView.SetCaptureFormat(Highgui.CV_CAP_ANDROID_GREY_FRAME);
mViewMode = VIEW_MODE_CANNY;
} else if (item == mItemPreviewFeatures) {
mViewMode = VIEW_MODE_FEATURES;
mOpenCvCameraView.SetCaptureFormat(Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
}
return true;

View File

@ -1,33 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>OpenCV Tutorial 2 - Use OpenCV Camera</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ApkBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>com.android.ide.eclipse.adt.AndroidNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

View File

@ -1,155 +0,0 @@
package org.opencv.samples.tutorial2;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener;
import org.opencv.highgui.Highgui;
import org.opencv.imgproc.Imgproc;
import android.app.Activity;
import android.os.Bundle;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.view.WindowManager;
public class Sample2NativeCamera extends Activity implements CvCameraViewListener {
private static final String TAG = "OCVSample::Activity";
public static final int VIEW_MODE_RGBA = 0;
public static final int VIEW_MODE_GRAY = 1;
public static final int VIEW_MODE_CANNY = 2;
private static int viewMode = VIEW_MODE_RGBA;
private MenuItem mItemPreviewRGBA;
private MenuItem mItemPreviewGray;
private MenuItem mItemPreviewCanny;
private Mat mRgba;
private Mat mIntermediateMat;
private CameraBridgeViewBase mOpenCvCameraView;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
mOpenCvCameraView.enableView();
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
public Sample2NativeCamera() {
Log.i(TAG, "Instantiated new " + this.getClass());
}
/** Called when the activity is first created. */
@Override
public void onCreate(Bundle savedInstanceState) {
Log.i(TAG, "called onCreate");
super.onCreate(savedInstanceState);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
setContentView(R.layout.tutorial2_surface_view);
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial2_activity_surface_view);
mOpenCvCameraView.setCvCameraViewListener(this);
}
@Override
public void onPause()
{
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
super.onPause();
}
@Override
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
}
public void onDestroy() {
super.onDestroy();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC4);
mIntermediateMat = new Mat(height, width, CvType.CV_8UC4);
}
public void onCameraViewStopped() {
mRgba.release();
mIntermediateMat.release();
}
public Mat onCameraFrame(Mat inputFrame) {
switch (Sample2NativeCamera.viewMode) {
case Sample2NativeCamera.VIEW_MODE_GRAY:
{
Imgproc.cvtColor(inputFrame, mRgba, Imgproc.COLOR_GRAY2RGBA, 4);
} break;
case Sample2NativeCamera.VIEW_MODE_RGBA:
{
inputFrame.copyTo(mRgba);
Core.putText(mRgba, "OpenCV+Android", new Point(10, inputFrame.rows() - 10), 3, 1, new Scalar(255, 0, 0, 255), 2);
} break;
case Sample2NativeCamera.VIEW_MODE_CANNY:
{
Imgproc.Canny(inputFrame, mIntermediateMat, 80, 100);
Imgproc.cvtColor(mIntermediateMat, mRgba, Imgproc.COLOR_GRAY2BGRA, 4);
} break;
}
return mRgba;
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
Log.i(TAG, "called onCreateOptionsMenu");
mItemPreviewRGBA = menu.add("Preview RGBA");
mItemPreviewGray = menu.add("Preview GRAY");
mItemPreviewCanny = menu.add("Canny");
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
Log.i(TAG, "called onOptionsItemSelected; selected item: " + item);
if (item == mItemPreviewRGBA)
{
mOpenCvCameraView.SetCaptureFormat(Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
viewMode = VIEW_MODE_RGBA;
}
else if (item == mItemPreviewGray)
{
mOpenCvCameraView.SetCaptureFormat(Highgui.CV_CAP_ANDROID_GREY_FRAME);
viewMode = VIEW_MODE_GRAY;
}
else if (item == mItemPreviewCanny)
{
mOpenCvCameraView.SetCaptureFormat(Highgui.CV_CAP_ANDROID_GREY_FRAME);
viewMode = VIEW_MODE_CANNY;
}
return true;
}
}

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>OpenCV Tutorial 1 - Add OpenCV</name>
<name>OpenCV Tutorial 3 - Camera Control</name>
<comment></comment>
<projects>
</projects>

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.opencv.samples.tutorial5"
package="org.opencv.samples.tutorial3"
android:versionCode="21"
android:versionName="2.1">
@ -9,7 +9,7 @@
android:icon="@drawable/icon"
android:theme="@android:style/Theme.NoTitleBar.Fullscreen" >
<activity android:name="Sample5CameraControl"
<activity android:name="Tutorial3Activity"
android:label="@string/app_name"
android:screenOrientation="landscape"
android:configChanges="keyboardHidden|orientation">

View File

@ -1,7 +1,6 @@
set(sample example-tutorial-2-opencvcamera)
set(sample example-tutorial-3-cameracontrol)
add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 11 ${ANDROID_SDK_TARGET})
if(TARGET ${sample})
add_dependencies(opencv_android_examples ${sample})
endif()

View File

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -3,10 +3,10 @@
android:layout_width="match_parent"
android:layout_height="match_parent" >
<org.opencv.samples.tutorial5.SampleJavaCameraView
<org.opencv.samples.tutorial3.Tutorial3View
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:visibility="gone"
android:id="@+id/tutorial5_activity_java_surface_view" />
android:id="@+id/tutorial3_activity_java_surface_view" />
</LinearLayout>

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">OCV T5 Camera Control</string>
<string name="app_name">OCV T3 Camera Control</string>
</resources>

View File

@ -1,4 +1,4 @@
package org.opencv.samples.tutorial5;
package org.opencv.samples.tutorial3;
import java.text.SimpleDateFormat;
import java.util.Date;
@ -6,10 +6,11 @@ import java.util.List;
import java.util.ListIterator;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Mat;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import android.annotation.SuppressLint;
import android.app.Activity;
@ -27,10 +28,10 @@ import android.view.View.OnTouchListener;
import android.view.WindowManager;
import android.widget.Toast;
public class Sample5CameraControl extends Activity implements CvCameraViewListener, OnTouchListener {
public class Tutorial3Activity extends Activity implements CvCameraViewListener2, OnTouchListener {
private static final String TAG = "OCVSample::Activity";
private SampleJavaCameraView mOpenCvCameraView;
private Tutorial3View mOpenCvCameraView;
private List<Size> mResolutionList;
private MenuItem[] mEffectMenuItems;
private SubMenu mColorEffectsMenu;
@ -45,7 +46,7 @@ public class Sample5CameraControl extends Activity implements CvCameraViewListen
{
Log.i(TAG, "OpenCV loaded successfully");
mOpenCvCameraView.enableView();
mOpenCvCameraView.setOnTouchListener(Sample5CameraControl.this);
mOpenCvCameraView.setOnTouchListener(Tutorial3Activity.this);
} break;
default:
{
@ -55,7 +56,7 @@ public class Sample5CameraControl extends Activity implements CvCameraViewListen
}
};
public Sample5CameraControl() {
public Tutorial3Activity() {
Log.i(TAG, "Instantiated new " + this.getClass());
}
@ -66,9 +67,9 @@ public class Sample5CameraControl extends Activity implements CvCameraViewListen
super.onCreate(savedInstanceState);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
setContentView(R.layout.tutorial5_surface_view);
setContentView(R.layout.tutorial3_surface_view);
mOpenCvCameraView = (SampleJavaCameraView) findViewById(R.id.tutorial5_activity_java_surface_view);
mOpenCvCameraView = (Tutorial3View) findViewById(R.id.tutorial3_activity_java_surface_view);
mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE);
@ -78,9 +79,9 @@ public class Sample5CameraControl extends Activity implements CvCameraViewListen
@Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
super.onPause();
}
@Override
@ -102,8 +103,8 @@ public class Sample5CameraControl extends Activity implements CvCameraViewListen
public void onCameraViewStopped() {
}
public Mat onCameraFrame(Mat inputFrame) {
return inputFrame;
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
return inputFrame.rgba();
}
@Override

View File

@ -1,4 +1,4 @@
package org.opencv.samples.tutorial5;
package org.opencv.samples.tutorial3;
import java.io.FileOutputStream;
import java.util.List;
@ -14,11 +14,11 @@ import android.hardware.Camera.Size;
import android.util.AttributeSet;
import android.util.Log;
public class SampleJavaCameraView extends JavaCameraView {
public class Tutorial3View extends JavaCameraView {
private static final String TAG = "Sample::SampleJavaCameraView";
private static final String TAG = "Sample::Tutorial3View";
public SampleJavaCameraView(Context context, AttributeSet attrs) {
public Tutorial3View(Context context, AttributeSet attrs) {
super(context, attrs);
}

View File

@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="con" path="com.android.ide.eclipse.adt.ANDROID_FRAMEWORK"/>
<classpathentry exported="true" kind="con" path="com.android.ide.eclipse.adt.LIBRARIES"/>
<classpathentry kind="src" path="src"/>
<classpathentry kind="src" path="gen"/>
<classpathentry kind="output" path="bin/classes"/>
</classpath>

View File

@ -1,75 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?fileVersion 4.0.0?>
<cproject storage_type_id="org.eclipse.cdt.core.XmlProjectDescriptionStorage">
<storageModule moduleId="org.eclipse.cdt.core.settings">
<cconfiguration id="0.1851062783">
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="0.1851062783" moduleId="org.eclipse.cdt.core.settings" name="Default">
<externalSettings/>
<extensions>
<extension id="org.eclipse.cdt.core.VCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GmakeErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.CWDLocator" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GCCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GASErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GLDErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
</extensions>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactName="${ProjName}" buildProperties="" description="" id="0.1851062783" name="Default" parent="org.eclipse.cdt.build.core.prefbase.cfg">
<folderInfo id="0.1851062783." name="/" resourcePath="">
<toolChain id="org.eclipse.cdt.build.core.prefbase.toolchain.114738979" name="No ToolChain" resourceTypeBasedDiscovery="false" superClass="org.eclipse.cdt.build.core.prefbase.toolchain">
<targetPlatform id="org.eclipse.cdt.build.core.prefbase.toolchain.114738979.901399641" name=""/>
<builder autoBuildTarget="" command="${NDKROOT}/ndk-build.cmd" enableAutoBuild="true" enableCleanBuild="false" id="org.eclipse.cdt.build.core.settings.default.builder.1153158428" incrementalBuildTarget="" keepEnvironmentInBuildfile="false" managedBuildOn="false" name="Gnu Make Builder" superClass="org.eclipse.cdt.build.core.settings.default.builder"/>
<tool id="org.eclipse.cdt.build.core.settings.holder.libs.835006420" name="holder for library settings" superClass="org.eclipse.cdt.build.core.settings.holder.libs"/>
<tool id="org.eclipse.cdt.build.core.settings.holder.1350943194" name="Assembly" superClass="org.eclipse.cdt.build.core.settings.holder">
<inputType id="org.eclipse.cdt.build.core.settings.holder.inType.1640025837" languageId="org.eclipse.cdt.core.assembly" languageName="Assembly" sourceContentType="org.eclipse.cdt.core.asmSource" superClass="org.eclipse.cdt.build.core.settings.holder.inType"/>
</tool>
<tool id="org.eclipse.cdt.build.core.settings.holder.982773030" name="GNU C++" superClass="org.eclipse.cdt.build.core.settings.holder">
<option id="org.eclipse.cdt.build.core.settings.holder.incpaths.332905639" name="Include Paths" superClass="org.eclipse.cdt.build.core.settings.holder.incpaths" valueType="includePath">
<listOptionValue builtIn="false" value="&quot;${NDKROOT}/platforms/android-9/arch-arm/usr/include&quot;"/>
<listOptionValue builtIn="false" value="&quot;${NDKROOT}/sources/cxx-stl/gnu-libstdc++/4.6/include&quot;"/>
<listOptionValue builtIn="false" value="&quot;${NDKROOT}/sources/cxx-stl/gnu-libstdc++/4.6/libs/armeabi-v7a/include&quot;"/>
<listOptionValue builtIn="false" value="&quot;${ProjDirPath}/../../sdk/native/jni/include&quot;"/>
</option>
<option id="org.eclipse.cdt.build.core.settings.holder.symbols.1475512260" name="Symbols" superClass="org.eclipse.cdt.build.core.settings.holder.symbols" valueType="definedSymbols">
<listOptionValue builtIn="false" value="ANDROID=1"/>
</option>
<inputType id="org.eclipse.cdt.build.core.settings.holder.inType.1082980466" languageId="org.eclipse.cdt.core.g++" languageName="GNU C++" sourceContentType="org.eclipse.cdt.core.cxxSource,org.eclipse.cdt.core.cxxHeader" superClass="org.eclipse.cdt.build.core.settings.holder.inType"/>
</tool>
<tool id="org.eclipse.cdt.build.core.settings.holder.472513352" name="GNU C" superClass="org.eclipse.cdt.build.core.settings.holder">
<option id="org.eclipse.cdt.build.core.settings.holder.incpaths.1490236166" name="Include Paths" superClass="org.eclipse.cdt.build.core.settings.holder.incpaths" valueType="includePath">
<listOptionValue builtIn="false" value="&quot;${NDKROOT}/platforms/android-9/arch-arm/usr/include&quot;"/>
<listOptionValue builtIn="false" value="&quot;${NDKROOT}/sources/cxx-stl/gnu-libstdc++/4.6/include&quot;"/>
<listOptionValue builtIn="false" value="&quot;${NDKROOT}/sources/cxx-stl/gnu-libstdc++/4.6/libs/armeabi-v7a/include&quot;"/>
<listOptionValue builtIn="false" value="&quot;${ProjDirPath}/../../sdk/native/jni/include&quot;"/>
</option>
<option id="org.eclipse.cdt.build.core.settings.holder.symbols.945696849" name="Symbols" superClass="org.eclipse.cdt.build.core.settings.holder.symbols" valueType="definedSymbols">
<listOptionValue builtIn="false" value="ANDROID=1"/>
</option>
<inputType id="org.eclipse.cdt.build.core.settings.holder.inType.775624510" languageId="org.eclipse.cdt.core.gcc" languageName="GNU C" sourceContentType="org.eclipse.cdt.core.cSource,org.eclipse.cdt.core.cHeader" superClass="org.eclipse.cdt.build.core.settings.holder.inType"/>
</tool>
</toolChain>
</folderInfo>
<sourceEntries>
<entry flags="VALUE_WORKSPACE_PATH" kind="sourcePath" name="jni"/>
</sourceEntries>
</configuration>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.core.externalSettings"/>
</cconfiguration>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<project id="OpenCV Tutorial 3 - Add Native OpenCV.null.1740260315" name="OpenCV Tutorial 3 - Add Native OpenCV"/>
</storageModule>
<storageModule moduleId="scannerConfiguration">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId=""/>
<scannerConfigBuildInfo instanceId="0.1851062783">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId=""/>
</scannerConfigBuildInfo>
</storageModule>
<storageModule moduleId="refreshScope" versionNumber="1">
<resource resourceType="PROJECT" workspacePath="/OpenCV Tutorial 3 - Add Native OpenCV"/>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.internal.ui.text.commentOwnerProjectMappings"/>
</cproject>

View File

@ -1,38 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.opencv.samples.tutorial3"
android:versionCode="21"
android:versionName="2.1">
<application
android:label="@string/app_name"
android:icon="@drawable/icon"
android:theme="@android:style/Theme.NoTitleBar.Fullscreen" >
<activity android:name="Sample3Native"
android:label="@string/app_name"
android:screenOrientation="landscape"
android:configChanges="keyboardHidden|orientation">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
<supports-screens android:resizeable="true"
android:smallScreens="true"
android:normalScreens="true"
android:largeScreens="true"
android:anyDensity="true" />
<uses-sdk android:minSdkVersion="8" android:targetSdkVersion="11" />
<uses-permission android:name="android.permission.CAMERA"/>
<uses-feature android:name="android.hardware.camera" android:required="false"/>
<uses-feature android:name="android.hardware.camera.autofocus" android:required="false"/>
<uses-feature android:name="android.hardware.camera.front" android:required="false"/>
<uses-feature android:name="android.hardware.camera.front.autofocus" android:required="false"/>
</manifest>

View File

@ -1,11 +0,0 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
include ../../sdk/native/jni/OpenCV.mk
LOCAL_MODULE := native_sample
LOCAL_SRC_FILES := jni_part.cpp
LOCAL_LDLIBS += -llog -ldl
include $(BUILD_SHARED_LIBRARY)

View File

@ -1,11 +0,0 @@
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent" >
<org.opencv.android.JavaCameraView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:id="@+id/tutorial4_activity_surface_view" />
</LinearLayout>

View File

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">OCV T3 Add Native OpenCV</string>
</resources>

View File

@ -1,102 +0,0 @@
package org.opencv.samples.tutorial3;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener;
import org.opencv.imgproc.Imgproc;
import android.app.Activity;
import android.os.Bundle;
import android.util.Log;
import android.view.WindowManager;
public class Sample3Native extends Activity implements CvCameraViewListener {
private static final String TAG = "OCVSample::Activity";
private Mat mRgba;
private Mat mGrayMat;
private CameraBridgeViewBase mOpenCvCameraView;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
// Load native library after(!) OpenCV initialization
System.loadLibrary("native_sample");
mOpenCvCameraView.enableView();
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
public Sample3Native() {
Log.i(TAG, "Instantiated new " + this.getClass());
}
/** Called when the activity is first created. */
@Override
public void onCreate(Bundle savedInstanceState) {
Log.i(TAG, "called onCreate");
super.onCreate(savedInstanceState);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
setContentView(R.layout.tutorial3_surface_view);
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial4_activity_surface_view);
mOpenCvCameraView.setCvCameraViewListener(this);
}
@Override
public void onPause()
{
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
super.onPause();
}
@Override
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
}
public void onDestroy() {
super.onDestroy();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC4);
mGrayMat = new Mat(height, width, CvType.CV_8UC1);
}
public void onCameraViewStopped() {
mRgba.release();
mGrayMat.release();
}
public Mat onCameraFrame(Mat inputFrame) {
inputFrame.copyTo(mRgba);
Imgproc.cvtColor(mRgba, mGrayMat, Imgproc.COLOR_RGBA2GRAY);
FindFeatures(mGrayMat.getNativeObjAddr(), mRgba.getNativeObjAddr());
return mRgba;
}
public native void FindFeatures(long matAddrGr, long matAddrRgba);
}

View File

@ -1,101 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>OpenCV Tutorial 4 - Mix Java+Native OpenCV</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.cdt.managedbuilder.core.genmakebuilder</name>
<triggers>auto,full,incremental,</triggers>
<arguments>
<dictionary>
<key>?name?</key>
<value></value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.append_environment</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.autoBuildTarget</key>
<value></value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.buildArguments</key>
<value></value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.buildCommand</key>
<value>${NDKROOT}/ndk-build.cmd</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.cleanBuildTarget</key>
<value>clean</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.contents</key>
<value>org.eclipse.cdt.make.core.activeConfigSettings</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.enableAutoBuild</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.enableCleanBuild</key>
<value>false</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.enableFullBuild</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.fullBuildTarget</key>
<value></value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.stopOnError</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.useDefaultBuildCmd</key>
<value>false</value>
</dictionary>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ApkBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder</name>
<triggers>full,incremental,</triggers>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>com.android.ide.eclipse.adt.AndroidNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
<nature>org.eclipse.cdt.core.cnature</nature>
<nature>org.eclipse.cdt.core.ccnature</nature>
<nature>org.eclipse.cdt.managedbuilder.core.managedBuildNature</nature>
<nature>org.eclipse.cdt.managedbuilder.core.ScannerConfigNature</nature>
</natures>
</projectDescription>

View File

@ -1,4 +0,0 @@
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
org.eclipse.jdt.core.compiler.compliance=1.6
org.eclipse.jdt.core.compiler.source=1.6

View File

@ -1,38 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.opencv.samples.tutorial4"
android:versionCode="21"
android:versionName="2.1">
<application
android:label="@string/app_name"
android:icon="@drawable/icon"
android:theme="@android:style/Theme.NoTitleBar.Fullscreen" >
<activity android:name="Sample4Mixed"
android:label="@string/app_name"
android:screenOrientation="landscape"
android:configChanges="keyboardHidden|orientation">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
<supports-screens android:resizeable="true"
android:smallScreens="true"
android:normalScreens="true"
android:largeScreens="true"
android:anyDensity="true" />
<uses-sdk android:minSdkVersion="8" />
<uses-permission android:name="android.permission.CAMERA"/>
<uses-feature android:name="android.hardware.camera" android:required="false"/>
<uses-feature android:name="android.hardware.camera.autofocus" android:required="false"/>
<uses-feature android:name="android.hardware.camera.front" android:required="false"/>
<uses-feature android:name="android.hardware.camera.front.autofocus" android:required="false"/>
</manifest>

View File

@ -1,12 +0,0 @@
set(sample example-tutorial-4-mixed)
if(BUILD_FAT_JAVA_LIB)
set(native_deps opencv_java)
else()
set(native_deps opencv_features2d)
endif()
add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 11 ${ANDROID_SDK_TARGET} NATIVE_DEPS ${native_deps})
if(TARGET ${sample})
add_dependencies(opencv_android_examples ${sample})
endif()

View File

@ -1,4 +0,0 @@
APP_STL := gnustl_static
APP_CPPFLAGS := -frtti -fexceptions
APP_ABI := armeabi-v7a
APP_PLATFORM := android-8

View File

@ -1,27 +0,0 @@
#include <jni.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <vector>
using namespace std;
using namespace cv;
extern "C" {
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial4_Sample4Mixed_FindFeatures(JNIEnv*, jobject, jlong addrGray, jlong addrRgba);
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial4_Sample4Mixed_FindFeatures(JNIEnv*, jobject, jlong addrGray, jlong addrRgba)
{
Mat& mGr = *(Mat*)addrGray;
Mat& mRgb = *(Mat*)addrRgba;
vector<KeyPoint> v;
FastFeatureDetector detector(50);
detector.detect(mGr, v);
for( unsigned int i = 0; i < v.size(); i++ )
{
const KeyPoint& kp = v[i];
circle(mRgb, Point(kp.pt.x, kp.pt.y), 10, Scalar(255,0,0,255));
}
}
}

Some files were not shown because too many files have changed in this diff Show More