Merge pull request #2356 from SpecLad:merge-2.4

This commit is contained in:
Roman Donchenko 2014-02-18 12:14:57 +04:00 committed by OpenCV Buildbot
commit bcf4307345
22 changed files with 475 additions and 331 deletions

View File

@ -54,7 +54,7 @@
/* Native cpu byte order: 1 if big-endian (Motorola) or 0 if little-endian
(Intel) */
#define HOST_BIGENDIAN 0
#define HOST_BIGENDIAN @WORDS_BIGENDIAN@
/* Set the native cpu bit order (FILLORDER_LSB2MSB or FILLORDER_MSB2LSB) */
#define HOST_FILLORDER FILLORDER_LSB2MSB
@ -156,15 +156,7 @@
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
significant byte first (like Motorola and SPARC, unlike Intel). */
#if defined AC_APPLE_UNIVERSAL_BUILD
# if defined __BIG_ENDIAN__
# define WORDS_BIGENDIAN 1
# endif
#else
# ifndef WORDS_BIGENDIAN
/* # undef WORDS_BIGENDIAN */
# endif
#endif
#cmakedefine WORDS_BIGENDIAN 1
/* Support Deflate compression */
#define ZIP_SUPPORT 1

View File

@ -132,7 +132,7 @@ OCV_OPTION(WITH_JASPER "Include JPEG2K support" ON
OCV_OPTION(WITH_JPEG "Include JPEG support" ON)
OCV_OPTION(WITH_WEBP "Include WebP support" ON IF (NOT IOS) )
OCV_OPTION(WITH_OPENEXR "Include ILM support via OpenEXR" ON IF (NOT IOS) )
OCV_OPTION(WITH_OPENGL "Include OpenGL support" OFF IF (NOT ANDROID AND NOT APPLE) )
OCV_OPTION(WITH_OPENGL "Include OpenGL support" OFF IF (NOT ANDROID) )
OCV_OPTION(WITH_OPENNI "Include OpenNI support" OFF IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_PNG "Include PNG support" ON)
OCV_OPTION(WITH_PVAPI "Include Prosilica GigE support" ON IF (NOT ANDROID AND NOT IOS) )
@ -415,6 +415,12 @@ endif()
include(cmake/OpenCVPCHSupport.cmake)
include(cmake/OpenCVModule.cmake)
# ----------------------------------------------------------------------------
# Detect endianness of build platform
# ----------------------------------------------------------------------------
include(TestBigEndian)
test_big_endian(WORDS_BIGENDIAN)
# ----------------------------------------------------------------------------
# Detect 3rd-party libraries
# ----------------------------------------------------------------------------
@ -553,16 +559,46 @@ include(cmake/OpenCVGenConfig.cmake)
include(cmake/OpenCVGenInfoPlist.cmake)
# Generate environment setup file
if(INSTALL_TESTS AND OPENCV_TEST_DATA_PATH AND UNIX AND NOT ANDROID)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/templates/opencv_testing.sh.in"
"${CMAKE_BINARY_DIR}/unix-install/opencv_testing.sh" @ONLY)
install(FILES "${CMAKE_BINARY_DIR}/unix-install/opencv_testing.sh"
DESTINATION /etc/profile.d/ COMPONENT tests)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/templates/opencv_run_all_tests.sh.in"
"${CMAKE_BINARY_DIR}/unix-install/opencv_run_all_tests.sh" @ONLY)
install(FILES "${CMAKE_BINARY_DIR}/unix-install/opencv_run_all_tests.sh"
PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ OWNER_EXECUTE GROUP_EXECUTE WORLD_EXECUTE
DESTINATION ${OPENCV_TEST_INSTALL_PATH} COMPONENT tests)
if(INSTALL_TESTS AND OPENCV_TEST_DATA_PATH AND UNIX)
if(ANDROID)
get_filename_component(TEST_PATH ${OPENCV_TEST_INSTALL_PATH} DIRECTORY)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/templates/opencv_run_all_tests_android.sh.in"
"${CMAKE_BINARY_DIR}/unix-install/opencv_run_all_tests.sh" @ONLY)
install(PROGRAMS "${CMAKE_BINARY_DIR}/unix-install/opencv_run_all_tests.sh"
DESTINATION ${CMAKE_INSTALL_PREFIX} COMPONENT tests)
else()
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/templates/opencv_testing.sh.in"
"${CMAKE_BINARY_DIR}/unix-install/opencv_testing.sh" @ONLY)
install(FILES "${CMAKE_BINARY_DIR}/unix-install/opencv_testing.sh"
DESTINATION /etc/profile.d/ COMPONENT tests)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/templates/opencv_run_all_tests_unix.sh.in"
"${CMAKE_BINARY_DIR}/unix-install/opencv_run_all_tests.sh" @ONLY)
install(PROGRAMS "${CMAKE_BINARY_DIR}/unix-install/opencv_run_all_tests.sh"
DESTINATION ${OPENCV_TEST_INSTALL_PATH} COMPONENT tests)
endif()
endif()
if(NOT OPENCV_README_FILE)
if(ANDROID)
set(OPENCV_README_FILE ${CMAKE_CURRENT_SOURCE_DIR}/platforms/android/README.android)
endif()
endif()
if(NOT OPENCV_LICENSE_FILE)
set(OPENCV_LICENSE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/LICENSE)
endif()
# for UNIX it does not make sense as LICENSE and readme will be part of the package automatically
if(ANDROID OR NOT UNIX)
install(FILES ${OPENCV_LICENSE_FILE}
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ
DESTINATION ${CMAKE_INSTALL_PREFIX} COMPONENT libs)
if(OPENCV_README_FILE)
install(FILES ${OPENCV_README_FILE}
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ
DESTINATION ${CMAKE_INSTALL_PREFIX} COMPONENT libs)
endif()
endif()
# ----------------------------------------------------------------------------

View File

@ -1,5 +1,7 @@
### OpenCV: Open Source Computer Vision Library
[![Gittip](http://img.shields.io/gittip/OpenCV.png)](https://www.gittip.com/OpenCV/)
#### Resources
* Homepage: <http://opencv.org>
@ -18,6 +20,3 @@ Summary of guidelines:
* Include tests and documentation;
* Clean up "oops" commits before submitting;
* Follow the coding style guide.
[![Donate OpenCV project](http://opencv.org/wp-content/uploads/2013/07/gittip1.png)](https://www.gittip.com/OpenCV/)
[![Donate OpenCV project](http://opencv.org/wp-content/uploads/2013/07/paypal-donate-button.png)](https://www.paypal.com/cgi-bin/webscr?item_name=Donation+to+OpenCV&cmd=_donations&business=accountant%40opencv.org)

View File

@ -135,13 +135,13 @@ macro(ocv_add_module _name)
# parse list of dependencies
if("${ARGV1}" STREQUAL "INTERNAL" OR "${ARGV1}" STREQUAL "BINDINGS")
set(OPENCV_MODULE_${the_module}_CLASS "${ARGV1}" CACHE INTERNAL "The cathegory of the module")
set(OPENCV_MODULE_${the_module}_CLASS "${ARGV1}" CACHE INTERNAL "The category of the module")
set(__ocv_argn__ ${ARGN})
list(REMOVE_AT __ocv_argn__ 0)
ocv_add_dependencies(${the_module} ${__ocv_argn__})
unset(__ocv_argn__)
else()
set(OPENCV_MODULE_${the_module}_CLASS "PUBLIC" CACHE INTERNAL "The cathegory of the module")
set(OPENCV_MODULE_${the_module}_CLASS "PUBLIC" CACHE INTERNAL "The category of the module")
ocv_add_dependencies(${the_module} ${ARGN})
if(BUILD_${the_module})
set(OPENCV_MODULES_PUBLIC ${OPENCV_MODULES_PUBLIC} "${the_module}" CACHE INTERNAL "List of OpenCV modules marked for export")

View File

@ -167,6 +167,6 @@
/* Xine video library */
#cmakedefine HAVE_XINE
/* Define to 1 if your processor stores words with the most significant byte
/* Define if your processor stores words with the most significant byte
first (like Motorola and SPARC, unlike Intel and VAX). */
#cmakedefine WORDS_BIGENDIAN

View File

@ -0,0 +1,51 @@
#!/bin/sh
BASE_DIR=`dirname $0`
OPENCV_TEST_PATH=$BASE_DIR/@TEST_PATH@
OPENCV_TEST_DATA_PATH=$BASE_DIR/sdk/etc/testdata/
if [ $# -ne 1 ]; then
echo "Device architecture is not preset in command line"
echo "Tests are available for architectures: `ls -m ${OPENCV_TEST_PATH}`"
echo "Usage: $0 <target_device_arch>"
return 1
else
TARGET_ARCH=$1
fi
if [ -z `which adb` ]; then
echo "adb command was not found in PATH"
return 1
fi
adb push $OPENCV_TEST_DATA_PATH /sdcard/opencv_testdata
adb shell "mkdir -p /data/local/tmp/opencv_test"
SUMMARY_STATUS=0
for t in "$OPENCV_TEST_PATH/$TARGET_ARCH/"opencv_test_* "$OPENCV_TEST_PATH/$TARGET_ARCH/"opencv_perf_*;
do
test_name=`basename "$t"`
report="$test_name-`date --rfc-3339=date`.xml"
adb push $t /data/local/tmp/opencv_test/
adb shell "export OPENCV_TEST_DATA_PATH=/sdcard/opencv_testdata && /data/local/tmp/opencv_test/$test_name --perf_min_samples=1 --perf_force_samples=1 --gtest_output=xml:/data/local/tmp/opencv_test/$report"
adb pull "/data/local/tmp/opencv_test/$report" $report
TEST_STATUS=0
if [ -e $report ]; then
if [ `grep -c "<fail" $report` -ne 0 ]; then
TEST_STATUS=2
fi
else
TEST_STATUS=3
fi
if [ $TEST_STATUS -ne 0 ]; then
SUMMARY_STATUS=$TEST_STATUS
fi
done
if [ $SUMMARY_STATUS -eq 0 ]; then
echo "All OpenCV tests finished successfully"
else
echo "OpenCV tests finished with status $SUMMARY_STATUS"
fi
return $SUMMARY_STATUS

View File

@ -382,7 +382,7 @@ result.
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_6, this, mLoaderCallback);
}
#. Defines that your activity implements ``CvViewFrameListener2`` interface and fix activity related
#. Defines that your activity implements ``CvCameraViewListener2`` interface and fix activity related
errors by defining missed methods. For this activity define ``onCreate``, ``onDestroy`` and
``onPause`` and implement them according code snippet bellow. Fix errors by adding requited
imports.
@ -432,7 +432,7 @@ result.
Lets discuss some most important steps. Every Android application with UI must implement Activity
and View. By the first steps we create blank activity and default view layout. The simplest
OpenCV-centric application must implement OpenCV initialization, create its own view to show
preview from camera and implements ``CvViewFrameListener2`` interface to get frames from camera and
preview from camera and implements ``CvCameraViewListener2`` interface to get frames from camera and
process it.
First of all we create our application view using xml layout. Our layout consists of the only

View File

@ -114,7 +114,7 @@ void computeProjectiveMatrix( const Mat& ksi, Mat& Rt )
{
CV_Assert( ksi.size() == Size(1,6) && ksi.type() == CV_64FC1 );
#if defined(HAVE_EIGEN) && EIGEN_WORLD_VERSION == 3
#if defined(HAVE_EIGEN) && EIGEN_WORLD_VERSION == 3 && (!defined _MSC_VER || !defined _M_X64 || _MSC_VER > 1500)
const double* ksi_ptr = reinterpret_cast<const double*>(ksi.ptr(0));
Eigen::Matrix<double,4,4> twist, g;
twist << 0., -ksi_ptr[2], ksi_ptr[1], ksi_ptr[3],

View File

@ -158,7 +158,7 @@ enum {
CV_StsVecLengthErr= -28, /* incorrect vector length */
CV_StsFilterStructContentErr= -29, /* incorr. filter structure content */
CV_StsKernelStructContentErr= -30, /* incorr. transform kernel content */
CV_StsFilterOffsetErr= -31, /* incorrect filter ofset value */
CV_StsFilterOffsetErr= -31, /* incorrect filter offset value */
CV_StsBadSize= -201, /* the input/output structure size is incorrect */
CV_StsDivByZero= -202, /* division by zero */
CV_StsInplaceNotSupported= -203, /* in-place operation is not supported */

View File

@ -44,22 +44,27 @@
#include "gl_core_3_1.hpp"
#ifdef HAVE_OPENGL
#if defined(__APPLE__)
#include <mach-o/dyld.h>
#ifdef __APPLE__
#include <dlfcn.h>
static void* AppleGLGetProcAddress (const char* name)
{
static const struct mach_header* image = 0;
if (!image)
image = NSAddImage("/System/Library/Frameworks/OpenGL.framework/Versions/Current/OpenGL", NSADDIMAGE_OPTION_RETURN_ON_ERROR);
static bool initialized = false;
static void * handle = NULL;
if (!handle)
{
if (!initialized)
{
initialized = true;
const char * const path = "/System/Library/Frameworks/OpenGL.framework/Versions/Current/OpenGL";
// prepend a '_' for the Unix C symbol mangling convention
String symbolName = "_";
symbolName += String(name);
NSSymbol symbol = image ? NSLookupSymbolInImage(image, &symbolName[0], NSLOOKUPSYMBOLINIMAGE_OPTION_BIND | NSLOOKUPSYMBOLINIMAGE_OPTION_RETURN_ON_ERROR) : 0;
return symbol ? NSAddressOfSymbol(symbol) : 0;
handle = dlopen(path, RTLD_LAZY | RTLD_GLOBAL);
}
if (!handle)
return NULL;
}
return dlsym(handle, name);
}
#endif // __APPLE__

View File

@ -243,7 +243,14 @@ PERF_TEST_P(Sz_Type_Op, AlphaComp,
TEST_CYCLE() cv::cuda::alphaComp(d_img1, d_img2, dst, alpha_op);
CUDA_SANITY_CHECK(dst, 1e-3, ERROR_RELATIVE);
if (CV_MAT_DEPTH(type) < CV_32F)
{
CUDA_SANITY_CHECK(dst, 1);
}
else
{
CUDA_SANITY_CHECK(dst, 1e-3, ERROR_RELATIVE);
}
}
else
{

View File

@ -42,8 +42,6 @@
#if !defined CUDA_DISABLER
#include <utility>
#include <algorithm>
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/emulation.hpp"
#include "opencv2/core/cuda/transform.hpp"
@ -463,7 +461,10 @@ namespace canny
count = min(count, map.cols * map.rows);
std::swap(st1, st2);
//std::swap(st1, st2);
short2* tmp = st1;
st1 = st2;
st2 = tmp;
}
}
}

View File

@ -405,13 +405,15 @@ CUDA_TEST_P(OpticalFlowBM, Accuracy)
cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::resize(frame0, frame0, cv::Size(), 0.5, 0.5);
cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
cv::resize(frame1, frame1, cv::Size(), 0.5, 0.5);
cv::Size block_size(16, 16);
cv::Size block_size(8, 8);
cv::Size shift_size(1, 1);
cv::Size max_range(16, 16);
cv::Size max_range(8, 8);
cv::cuda::GpuMat d_velx, d_vely, buf;
cv::cuda::calcOpticalFlowBM(loadMat(frame0), loadMat(frame1),

View File

@ -118,18 +118,21 @@ bool TiffDecoder::readHeader()
bool result = false;
close();
TIFF* tif = TIFFOpen( m_filename.c_str(), "rb" );
// TIFFOpen() mode flags are different to fopen(). A 'b' in mode "rb" has no effect when reading.
// http://www.remotesensing.org/libtiff/man/TIFFOpen.3tiff.html
TIFF* tif = TIFFOpen( m_filename.c_str(), "r" );
if( tif )
{
int wdth = 0, hght = 0, photometric = 0;
uint32 wdth = 0, hght = 0;
uint16 photometric = 0;
m_tif = tif;
if( TIFFGetField( tif, TIFFTAG_IMAGEWIDTH, &wdth ) &&
TIFFGetField( tif, TIFFTAG_IMAGELENGTH, &hght ) &&
TIFFGetField( tif, TIFFTAG_PHOTOMETRIC, &photometric ))
{
int bpp=8, ncn = photometric > 1 ? 3 : 1;
uint16 bpp=8, ncn = photometric > 1 ? 3 : 1;
TIFFGetField( tif, TIFFTAG_BITSPERSAMPLE, &bpp );
TIFFGetField( tif, TIFFTAG_SAMPLESPERPIXEL, &ncn );
@ -195,12 +198,12 @@ bool TiffDecoder::readData( Mat& img )
if( m_tif && m_width && m_height )
{
TIFF* tif = (TIFF*)m_tif;
int tile_width0 = m_width, tile_height0 = 0;
uint32 tile_width0 = m_width, tile_height0 = 0;
int x, y, i;
int is_tiled = TIFFIsTiled(tif);
int photometric;
uint16 photometric;
TIFFGetField( tif, TIFFTAG_PHOTOMETRIC, &photometric );
int bpp = 8, ncn = photometric > 1 ? 3 : 1;
uint16 bpp = 8, ncn = photometric > 1 ? 3 : 1;
TIFFGetField( tif, TIFFTAG_BITSPERSAMPLE, &bpp );
TIFFGetField( tif, TIFFTAG_SAMPLESPERPIXEL, &ncn );
const int bitsPerByte = 8;

View File

@ -412,8 +412,8 @@ TEST(Highgui_Tiff, decode_tile16384x16384)
try
{
cv::imread(file3);
EXPECT_NO_THROW(cv::imread(file4));
cv::imread(file3, IMREAD_UNCHANGED);
EXPECT_NO_THROW(cv::imread(file4, IMREAD_UNCHANGED));
}
catch(const std::bad_alloc&)
{
@ -423,6 +423,54 @@ TEST(Highgui_Tiff, decode_tile16384x16384)
remove(file3.c_str());
remove(file4.c_str());
}
TEST(Highgui_Tiff, write_read_16bit_big_little_endian)
{
// see issue #2601 "16-bit Grayscale TIFF Load Failures Due to Buffer Underflow and Endianness"
// Setup data for two minimal 16-bit grayscale TIFF files in both endian formats
uchar tiff_sample_data[2][86] = { {
// Little endian
0x49, 0x49, 0x2a, 0x00, 0x0c, 0x00, 0x00, 0x00, 0xad, 0xde, 0xef, 0xbe, 0x06, 0x00, 0x00, 0x01,
0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x01, 0x00,
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x01, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00,
0x00, 0x00, 0x06, 0x01, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x01,
0x04, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x17, 0x01, 0x04, 0x00, 0x01, 0x00,
0x00, 0x00, 0x04, 0x00, 0x00, 0x00 }, {
// Big endian
0x4d, 0x4d, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x0c, 0xde, 0xad, 0xbe, 0xef, 0x00, 0x06, 0x01, 0x00,
0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x00, 0x01, 0x01, 0x00, 0x03, 0x00, 0x00,
0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x01, 0x02, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x10,
0x00, 0x00, 0x01, 0x06, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x01, 0x11,
0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x01, 0x17, 0x00, 0x04, 0x00, 0x00,
0x00, 0x01, 0x00, 0x00, 0x00, 0x04 }
};
// Test imread() for both a little endian TIFF and big endian TIFF
for (int i = 0; i < 2; i++)
{
string filename = cv::tempfile(".tiff");
// Write sample TIFF file
FILE* fp = fopen(filename.c_str(), "wb");
ASSERT_TRUE(fp != NULL);
ASSERT_EQ((size_t)1, fwrite(tiff_sample_data, 86, 1, fp));
fclose(fp);
Mat img = imread(filename, IMREAD_UNCHANGED);
EXPECT_EQ(1, img.rows);
EXPECT_EQ(2, img.cols);
EXPECT_EQ(CV_16U, img.type());
EXPECT_EQ(sizeof(ushort), img.elemSize());
EXPECT_EQ(1, img.channels());
EXPECT_EQ(0xDEAD, img.at<ushort>(0,0));
EXPECT_EQ(0xBEEF, img.at<ushort>(0,1));
remove(filename.c_str());
}
}
#endif
#ifdef HAVE_WEBP

View File

@ -87,7 +87,6 @@ public class TermCriteria {
@Override
public String toString() {
if (this == null) return "null";
return "{ type: " + type + ", maxCount: " + maxCount + ", epsilon: " + epsilon + "}";
}
}

View File

@ -61,6 +61,7 @@ TEST(CUDA_BruteForceNonLocalMeans, Regression)
cv::Mat bgr = readImage("../gpu/denoising/lena_noised_gaussian_sigma=20_multi_0.png", cv::IMREAD_COLOR);
ASSERT_FALSE(bgr.empty());
cv::resize(bgr, bgr, cv::Size(256, 256));
cv::Mat gray;
cv::cvtColor(bgr, gray, cv::COLOR_BGR2GRAY);
@ -77,6 +78,8 @@ TEST(CUDA_BruteForceNonLocalMeans, Regression)
cv::Mat bgr_gold = readImage("../gpu/denoising/nlm_denoised_lena_bgr.png", cv::IMREAD_COLOR);
cv::Mat gray_gold = readImage("../gpu/denoising/nlm_denoised_lena_gray.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(bgr_gold.empty() || gray_gold.empty());
cv::resize(bgr_gold, bgr_gold, cv::Size(256, 256));
cv::resize(gray_gold, gray_gold, cv::Size(256, 256));
EXPECT_MAT_NEAR(bgr_gold, dbgr, 1e-4);
EXPECT_MAT_NEAR(gray_gold, dgray, 1e-4);

View File

@ -21,6 +21,7 @@ public class CvNativeActivity extends Activity {
System.loadLibrary("native_activity");
Intent intent = new Intent(CvNativeActivity.this, android.app.NativeActivity.class);
CvNativeActivity.this.startActivity(intent);
CvNativeActivity.this.finish();
} break;
default:
{
@ -34,7 +35,7 @@ public class CvNativeActivity extends Activity {
Log.i(TAG, "Instantiated new " + this.getClass());
}
@Override
@Override
public void onResume()
{
super.onResume();

View File

@ -7,6 +7,6 @@ FIND_PACKAGE( OpenCV REQUIRED )
find_package (OpenGL REQUIRED)
ADD_EXECUTABLE(OpenGL_Qt_Binding main.cpp)
ADD_EXECUTABLE(OpenGL_Qt_Binding qt_opengl.cpp)
TARGET_LINK_LIBRARIES(OpenGL_Qt_Binding ${OpenCV_LIBS} ${OPENGL_LIBRARIES} )
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cube4.avi ${CMAKE_CURRENT_BINARY_DIR}/cube4.avi COPYONLY)

View File

@ -1,271 +0,0 @@
//Yannick Verdie 2010
//--- Please read help() below: ---
#include <iostream>
#include <vector>
#include <opencv2/core/core_c.h>
#include <opencv2/calib3d/calib3d_c.h>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/legacy/compat.hpp>
#if defined WIN32 || defined _WIN32 || defined WINCE
#include <windows.h>
#undef small
#undef min
#undef max
#undef abs
#endif
#ifdef __APPLE__
#include <OpenGL/gl.h>
#else
#include <GL/gl.h>
#endif
using namespace std;
using namespace cv;
static void help()
{
cout << "\nThis demo demonstrates the use of the Qt enhanced version of the highgui GUI interface\n"
" and dang if it doesn't throw in the use of of the POSIT 3D tracking algorithm too\n"
"It works off of the video: cube4.avi\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n\n"
" 1). This demo is mainly based on work from Javier Barandiaran Martirena\n"
" See this page http://code.opencv.org/projects/opencv/wiki/Posit.\n"
" 2). This is a demo to illustrate how to use **OpenGL Callback**.\n"
" 3). You need Qt binding to compile this sample with OpenGL support enabled.\n"
" 4). The features' detection is very basic and could highly be improved \n"
" (basic thresholding tuned for the specific video) but 2).\n"
" 5) THANKS TO Google Summer of Code 2010 for supporting this work!\n" << endl;
}
#define FOCAL_LENGTH 600
#define CUBE_SIZE 10
static void renderCube(float size)
{
glBegin(GL_QUADS);
// Front Face
glNormal3f( 0.0f, 0.0f, 1.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( size, 0.0f, 0.0f);
glVertex3f( size, size, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
// Back Face
glNormal3f( 0.0f, 0.0f,-1.0f);
glVertex3f( 0.0f, 0.0f, size);
glVertex3f( 0.0f, size, size);
glVertex3f( size, size, size);
glVertex3f( size, 0.0f, size);
// Top Face
glNormal3f( 0.0f, 1.0f, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
glVertex3f( size, size, 0.0f);
glVertex3f( size, size, size);
glVertex3f( 0.0f, size, size);
// Bottom Face
glNormal3f( 0.0f,-1.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, size);
glVertex3f( size, 0.0f, size);
glVertex3f( size, 0.0f, 0.0f);
// Right face
glNormal3f( 1.0f, 0.0f, 0.0f);
glVertex3f( size, 0.0f, 0.0f);
glVertex3f( size, 0.0f, size);
glVertex3f( size, size, size);
glVertex3f( size, size, 0.0f);
// Left Face
glNormal3f(-1.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
glVertex3f( 0.0f, size, size);
glVertex3f( 0.0f, 0.0f, size);
glEnd();
}
static void on_opengl(void* param)
{
//Draw the object with the estimated pose
glLoadIdentity();
glScalef( 1.0f, 1.0f, -1.0f);
glMultMatrixf( (float*)param );
glEnable( GL_LIGHTING );
glEnable( GL_LIGHT0 );
glEnable( GL_BLEND );
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
renderCube( CUBE_SIZE );
glDisable(GL_BLEND);
glDisable( GL_LIGHTING );
}
static void initPOSIT(std::vector<CvPoint3D32f> *modelPoints)
{
//Create the model pointss
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); //The first must be (0,0,0)
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, CUBE_SIZE));
modelPoints->push_back(cvPoint3D32f(CUBE_SIZE, 0.0f, 0.0f));
modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
}
static void foundCorners(vector<CvPoint2D32f> *srcImagePoints, const Mat& source, Mat& grayImage)
{
cvtColor(source, grayImage, COLOR_RGB2GRAY);
GaussianBlur(grayImage, grayImage, Size(11,11), 0, 0);
normalize(grayImage, grayImage, 0, 255, NORM_MINMAX);
threshold(grayImage, grayImage, 26, 255, THRESH_BINARY_INV); //25
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(grayImage, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE);
Point p;
vector<CvPoint2D32f> srcImagePoints_temp(4,cvPoint2D32f(0,0));
if (contours.size() == srcImagePoints_temp.size())
{
for(size_t i = 0 ; i<contours.size(); i++ )
{
p.x = p.y = 0;
for(size_t j = 0 ; j<contours[i].size(); j++ )
p+=contours[i][j];
srcImagePoints_temp.at(i)=cvPoint2D32f(float(p.x)/contours[i].size(),float(p.y)/contours[i].size());
}
//Need to keep the same order
//> y = 0
//> x = 1
//< x = 2
//< y = 3
//get point 0;
size_t index = 0;
for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).y > srcImagePoints_temp.at(index).y)
index = i;
}
srcImagePoints->at(0) = srcImagePoints_temp.at(index);
//get point 1;
index = 0;
for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).x > srcImagePoints_temp.at(index).x)
index = i;
}
srcImagePoints->at(1) = srcImagePoints_temp.at(index);
//get point 2;
index = 0;
for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).x < srcImagePoints_temp.at(index).x)
index = i;
}
srcImagePoints->at(2) = srcImagePoints_temp.at(index);
//get point 3;
index = 0;
for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
if (srcImagePoints_temp.at(i).y < srcImagePoints_temp.at(index).y)
index = i;
}
srcImagePoints->at(3) = srcImagePoints_temp.at(index);
Mat Msource = source;
stringstream ss;
for(size_t i = 0 ; i<srcImagePoints_temp.size(); i++ )
{
ss<<i;
circle(Msource,srcImagePoints->at(i),5,Scalar(0,0,255));
putText(Msource,ss.str(),srcImagePoints->at(i),FONT_HERSHEY_SIMPLEX,1,Scalar(0,0,255));
ss.str("");
//new coordinate system in the middle of the frame and reversed (camera coordinate system)
srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x-source.cols/2,source.rows/2-srcImagePoints_temp.at(i).y);
}
}
}
static void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMatrix, const CvVect32f &translationVector)
{
//coordinate system returned is relative to the first 3D input point
for (int f=0; f<3; f++)
{
for (int c=0; c<3; c++)
{
posePOSIT[c*4+f] = rotationMatrix[f*3+c]; //transposed
}
}
posePOSIT[3] = 0.0;
posePOSIT[7] = 0.0;
posePOSIT[11] = 0.0;
posePOSIT[12] = translationVector[0];
posePOSIT[13] = translationVector[1];
posePOSIT[14] = translationVector[2];
posePOSIT[15] = 1.0;
}
int main(void)
{
help();
VideoCapture video("cube4.avi");
CV_Assert(video.isOpened());
Mat source, grayImage;
video >> source;
namedWindow("original", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
namedWindow("POSIT", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
setOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
vector<CvPoint3D32f> modelPoints;
initPOSIT(&modelPoints);
//Create the POSIT object with the model points
CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size() );
CvMatr32f rotation_matrix = new float[9];
CvVect32f translation_vector = new float[3];
CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);
vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));
while(waitKey(33) != 27)
{
video >> source;
imshow("original",source);
foundCorners(&srcImagePoints, source, grayImage);
cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
imshow("POSIT",source);
if (video.get(CAP_PROP_POS_AVI_RATIO) > 0.99)
video.set(CAP_PROP_POS_AVI_RATIO, 0);
}
destroyAllWindows();
cvReleasePOSITObject(&positObject);
return 0;
}

View File

@ -0,0 +1,268 @@
// Yannick Verdie 2010
// --- Please read help() below: ---
#include <iostream>
#include <vector>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/legacy/compat.hpp>
#ifdef __APPLE__
#include <OpenGL/gl.h>
#else
#include <GL/gl.h>
#endif
using namespace std;
using namespace cv;
static void help()
{
cout << "This demo demonstrates the use of the Qt enhanced version of the highgui GUI interface\n"
"and dang if it doesn't throw in the use of of the POSIT 3D tracking algorithm too\n"
"It works off of the video: cube4.avi\n"
"Using OpenCV version " << CV_VERSION << "\n\n"
" 1) This demo is mainly based on work from Javier Barandiaran Martirena\n"
" See this page http://code.opencv.org/projects/opencv/wiki/Posit.\n"
" 2) This is a demo to illustrate how to use **OpenGL Callback**.\n"
" 3) You need Qt binding to compile this sample with OpenGL support enabled.\n"
" 4) The features' detection is very basic and could highly be improved\n"
" (basic thresholding tuned for the specific video) but 2).\n"
" 5) Thanks to Google Summer of Code 2010 for supporting this work!\n" << endl;
}
#define FOCAL_LENGTH 600
#define CUBE_SIZE 0.5
static void renderCube(float size)
{
glBegin(GL_QUADS);
// Front Face
glNormal3f( 0.0f, 0.0f, 1.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( size, 0.0f, 0.0f);
glVertex3f( size, size, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
// Back Face
glNormal3f( 0.0f, 0.0f,-1.0f);
glVertex3f( 0.0f, 0.0f, size);
glVertex3f( 0.0f, size, size);
glVertex3f( size, size, size);
glVertex3f( size, 0.0f, size);
// Top Face
glNormal3f( 0.0f, 1.0f, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
glVertex3f( size, size, 0.0f);
glVertex3f( size, size, size);
glVertex3f( 0.0f, size, size);
// Bottom Face
glNormal3f( 0.0f,-1.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, size);
glVertex3f( size, 0.0f, size);
glVertex3f( size, 0.0f, 0.0f);
// Right face
glNormal3f( 1.0f, 0.0f, 0.0f);
glVertex3f( size, 0.0f, 0.0f);
glVertex3f( size, 0.0f, size);
glVertex3f( size, size, size);
glVertex3f( size, size, 0.0f);
// Left Face
glNormal3f(-1.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
glVertex3f( 0.0f, size, size);
glVertex3f( 0.0f, 0.0f, size);
glEnd();
}
static void on_opengl(void* param)
{
//Draw the object with the estimated pose
glLoadIdentity();
glScalef( 1.0f, 1.0f, -1.0f);
glMultMatrixf( (float*)param );
glEnable( GL_LIGHTING );
glEnable( GL_LIGHT0 );
glEnable( GL_BLEND );
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
renderCube( CUBE_SIZE );
glDisable(GL_BLEND);
glDisable( GL_LIGHTING );
}
static void initPOSIT(std::vector<CvPoint3D32f> * modelPoints)
{
// Create the model pointss
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); // The first must be (0, 0, 0)
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, CUBE_SIZE));
modelPoints->push_back(cvPoint3D32f(CUBE_SIZE, 0.0f, 0.0f));
modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
}
static void foundCorners(vector<CvPoint2D32f> * srcImagePoints, const Mat & source, Mat & grayImage)
{
cvtColor(source, grayImage, COLOR_RGB2GRAY);
GaussianBlur(grayImage, grayImage, Size(11, 11), 0, 0);
normalize(grayImage, grayImage, 0, 255, NORM_MINMAX);
threshold(grayImage, grayImage, 26, 255, THRESH_BINARY_INV); //25
Mat MgrayImage = grayImage;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(MgrayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
Point p;
vector<CvPoint2D32f> srcImagePoints_temp(4, cvPoint2D32f(0, 0));
if (contours.size() == srcImagePoints_temp.size())
{
for (size_t i = 0; i < contours.size(); i++ )
{
p.x = p.y = 0;
for (size_t j = 0 ; j < contours[i].size(); j++)
p += contours[i][j];
srcImagePoints_temp.at(i) = cvPoint2D32f(float(p.x) / contours[i].size(), float(p.y) / contours[i].size());
}
// Need to keep the same order
// > y = 0
// > x = 1
// < x = 2
// < y = 3
// get point 0;
size_t index = 0;
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
if (srcImagePoints_temp.at(i).y > srcImagePoints_temp.at(index).y)
index = i;
srcImagePoints->at(0) = srcImagePoints_temp.at(index);
// get point 1;
index = 0;
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
if (srcImagePoints_temp.at(i).x > srcImagePoints_temp.at(index).x)
index = i;
srcImagePoints->at(1) = srcImagePoints_temp.at(index);
// get point 2;
index = 0;
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
if (srcImagePoints_temp.at(i).x < srcImagePoints_temp.at(index).x)
index = i;
srcImagePoints->at(2) = srcImagePoints_temp.at(index);
// get point 3;
index = 0;
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
if (srcImagePoints_temp.at(i).y < srcImagePoints_temp.at(index).y)
index = i;
srcImagePoints->at(3) = srcImagePoints_temp.at(index);
Mat Msource = source;
stringstream ss;
for (size_t i = 0; i<srcImagePoints_temp.size(); i++ )
{
ss << i;
circle(Msource, srcImagePoints->at(i), 5, Scalar(0, 0, 255));
putText(Msource, ss.str(), srcImagePoints->at(i), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 255));
ss.str("");
// new coordinate system in the middle of the frame and reversed (camera coordinate system)
srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x - source.cols / 2,
source.rows / 2 - srcImagePoints_temp.at(i).y);
}
}
}
static void createOpenGLMatrixFrom(float * posePOSIT, const CvMatr32f & rotationMatrix,
const CvVect32f & translationVector)
{
// coordinate system returned is relative to the first 3D input point
for (int f = 0; f < 3; f++)
for (int c = 0; c < 3; c++)
posePOSIT[c * 4 + f] = rotationMatrix[f * 3 + c]; // transposed
posePOSIT[3] = translationVector[0];
posePOSIT[7] = translationVector[1];
posePOSIT[11] = translationVector[2];
posePOSIT[12] = 0.0f;
posePOSIT[13] = 0.0f;
posePOSIT[14] = 0.0f;
posePOSIT[15] = 1.0f;
}
int main(void)
{
help();
string fileName = "cube4.avi";
VideoCapture video(fileName);
if (!video.isOpened())
{
cerr << "Video file " << fileName << " could not be opened" << endl;
return EXIT_FAILURE;
}
Mat source, grayImage;
video >> source;
namedWindow("Original", WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
namedWindow("POSIT", WINDOW_OPENGL | CV_WINDOW_FREERATIO);
resizeWindow("POSIT", source.cols, source.rows);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear).\n"
"This demo is only to illustrate how to use OpenGL callback.\n"
" -- Press ESC to exit.", 10000);
float OpenGLMatrix[] = { 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0 };
setOpenGlContext("POSIT");
setOpenGlDrawCallback("POSIT", on_opengl, OpenGLMatrix);
vector<CvPoint3D32f> modelPoints;
initPOSIT(&modelPoints);
// Create the POSIT object with the model points
CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size());
CvMatr32f rotation_matrix = new float[9];
CvVect32f translation_vector = new float[3];
CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1e-4f);
vector<CvPoint2D32f> srcImagePoints(4, cvPoint2D32f(0, 0));
while (waitKey(33) != 27)
{
video >> source;
if (source.empty())
break;
imshow("Original", source);
foundCorners(&srcImagePoints, source, grayImage);
cvPOSIT(positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector);
createOpenGLMatrixFrom(OpenGLMatrix, rotation_matrix, translation_vector);
updateWindow("POSIT");
if (video.get(CV_CAP_PROP_POS_AVI_RATIO) > 0.99)
video.set(CV_CAP_PROP_POS_AVI_RATIO, 0);
}
setOpenGlDrawCallback("POSIT", NULL, NULL);
destroyAllWindows();
cvReleasePOSITObject(&positObject);
delete[]rotation_matrix;
delete[]translation_vector;
return EXIT_SUCCESS;
}