Merge remote-tracking branch 'upstream/master'

Conflicts:
	modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
This commit is contained in:
edgarriba
2014-08-05 12:59:47 +02:00
1011 changed files with 94453 additions and 196987 deletions

View File

@@ -10,7 +10,6 @@ if(NOT CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_LIST_DIR)
#
# ----------------------------------------------------------------------------
add_subdirectory(c)
add_subdirectory(cpp)
add_subdirectory(gpu)
add_subdirectory(tapi)
@@ -60,7 +59,6 @@ if(MSVC)
endif()
endif()
add_subdirectory(c)
add_subdirectory(cpp)
# FIXIT: can't use cvconfig.h in samples: add_subdirectory(gpu)

View File

@@ -1,20 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>${EXECUTABLE_NAME}</string>
<key>CFBundleIdentifier</key>
<string>de.rwth-aachen.ient.FaceTracker</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>1.0</string>
</dict>
</plist>

View File

@@ -1,86 +0,0 @@
#include <OpenCV/OpenCV.h>
#include <cassert>
#include <iostream>
const char * WINDOW_NAME = "Face Tracker";
const CFIndex CASCADE_NAME_LEN = 2048;
char CASCADE_NAME[CASCADE_NAME_LEN] = "~/opencv/data/haarcascades/haarcascade_frontalface_alt2.xml";
using namespace std;
int main (int argc, char * const argv[])
{
const int scale = 2;
// locate haar cascade from inside application bundle
// (this is the mac way to package application resources)
CFBundleRef mainBundle = CFBundleGetMainBundle ();
assert (mainBundle);
CFURLRef cascade_url = CFBundleCopyResourceURL (mainBundle, CFSTR("haarcascade_frontalface_alt2"), CFSTR("xml"), NULL);
assert (cascade_url);
Boolean got_it = CFURLGetFileSystemRepresentation (cascade_url, true,
reinterpret_cast<UInt8 *>(CASCADE_NAME), CASCADE_NAME_LEN);
if (! got_it)
abort ();
// create all necessary instances
cvNamedWindow (WINDOW_NAME, CV_WINDOW_AUTOSIZE);
CvCapture * camera = cvCreateCameraCapture (CV_CAP_ANY);
CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*) cvLoad (CASCADE_NAME, 0, 0, 0);
CvMemStorage* storage = cvCreateMemStorage(0);
assert (storage);
// you do own an iSight, don't you ?!?
if (! camera)
abort ();
// did we load the cascade?!?
if (! cascade)
abort ();
// get an initial frame and duplicate it for later work
IplImage * current_frame = cvQueryFrame (camera);
IplImage * draw_image = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 3);
IplImage * gray_image = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
IplImage * small_image = cvCreateImage(cvSize (current_frame->width / scale, current_frame->height / scale), IPL_DEPTH_8U, 1);
assert (current_frame && gray_image && draw_image);
// as long as there are images ...
while (current_frame = cvQueryFrame (camera))
{
// convert to gray and downsize
cvCvtColor (current_frame, gray_image, CV_BGR2GRAY);
cvResize (gray_image, small_image, CV_INTER_LINEAR);
// detect faces
CvSeq* faces = cvHaarDetectObjects (small_image, cascade, storage,
1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
cvSize (30, 30));
// draw faces
cvFlip (current_frame, draw_image, 1);
for (int i = 0; i < (faces ? faces->total : 0); i++)
{
CvRect* r = (CvRect*) cvGetSeqElem (faces, i);
CvPoint center;
int radius;
center.x = cvRound((small_image->width - r->width*0.5 - r->x) *scale);
center.y = cvRound((r->y + r->height*0.5)*scale);
radius = cvRound((r->width + r->height)*0.25*scale);
cvCircle (draw_image, center, radius, CV_RGB(0,255,0), 3, 8, 0 );
}
// just show the image
cvShowImage (WINDOW_NAME, draw_image);
// wait a tenth of a second for keypress and window drawing
int key = cvWaitKey (100);
if (key == 'q' || key == 'Q')
break;
}
// be nice and return no error
return 0;
}

View File

@@ -1,262 +0,0 @@
// !$*UTF8*$!
{
archiveVersion = 1;
classes = {
};
objectVersion = 42;
objects = {
/* Begin PBXBuildFile section */
4D7DBE8E0C04A90C00D8835D /* FaceTracker.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 08FB7796FE84155DC02AAC07 /* FaceTracker.cpp */; };
4D95C9BE0C0577B200983E4D /* OpenCV.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 4D06E1E00C039982004AF23F /* OpenCV.framework */; };
4D95C9D80C0577BD00983E4D /* OpenCV.framework in CopyFiles */ = {isa = PBXBuildFile; fileRef = 4D06E1E00C039982004AF23F /* OpenCV.framework */; };
4DBF87310C05731500880673 /* haarcascade_frontalface_alt2.xml in Resources */ = {isa = PBXBuildFile; fileRef = 4DBF87300C05731500880673 /* haarcascade_frontalface_alt2.xml */; };
/* End PBXBuildFile section */
/* Begin PBXCopyFilesBuildPhase section */
4D7DBE8F0C04A93300D8835D /* CopyFiles */ = {
isa = PBXCopyFilesBuildPhase;
buildActionMask = 2147483647;
dstPath = "";
dstSubfolderSpec = 10;
files = (
4D95C9D80C0577BD00983E4D /* OpenCV.framework in CopyFiles */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXCopyFilesBuildPhase section */
/* Begin PBXFileReference section */
08FB7796FE84155DC02AAC07 /* FaceTracker.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = FaceTracker.cpp; sourceTree = "<group>"; };
4D06E1E00C039982004AF23F /* OpenCV.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = OpenCV.framework; path = ../../../OpenCV.framework; sourceTree = SOURCE_ROOT; };
4D4CDBCC0C0630060001A8A2 /* README.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = README.txt; sourceTree = "<group>"; };
4D7DBE570C04A8FF00D8835D /* FaceTracker.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = FaceTracker.app; sourceTree = BUILT_PRODUCTS_DIR; };
4D7DBE590C04A8FF00D8835D /* FaceTracker-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.xml; path = "FaceTracker-Info.plist"; sourceTree = "<group>"; };
4DBF87300C05731500880673 /* haarcascade_frontalface_alt2.xml */ = {isa = PBXFileReference; fileEncoding = 5; lastKnownFileType = text.xml; name = haarcascade_frontalface_alt2.xml; path = ../../../data/haarcascades/haarcascade_frontalface_alt2.xml; sourceTree = SOURCE_ROOT; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
4D7DBE550C04A8FF00D8835D /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
4D95C9BE0C0577B200983E4D /* OpenCV.framework in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXFrameworksBuildPhase section */
/* Begin PBXGroup section */
08FB7794FE84155DC02AAC07 /* FrameworkTest */ = {
isa = PBXGroup;
children = (
4D4CDBCC0C0630060001A8A2 /* README.txt */,
08FB7795FE84155DC02AAC07 /* Source */,
4DBF872C0C0572BC00880673 /* Resources */,
4D9D40B00C04AC1600EEFFD0 /* Frameworks */,
1AB674ADFE9D54B511CA2CBB /* Products */,
);
name = FrameworkTest;
sourceTree = "<group>";
};
08FB7795FE84155DC02AAC07 /* Source */ = {
isa = PBXGroup;
children = (
08FB7796FE84155DC02AAC07 /* FaceTracker.cpp */,
);
name = Source;
sourceTree = "<group>";
};
1AB674ADFE9D54B511CA2CBB /* Products */ = {
isa = PBXGroup;
children = (
4D7DBE570C04A8FF00D8835D /* FaceTracker.app */,
);
name = Products;
sourceTree = "<group>";
};
4D9D40B00C04AC1600EEFFD0 /* Frameworks */ = {
isa = PBXGroup;
children = (
4D06E1E00C039982004AF23F /* OpenCV.framework */,
);
name = Frameworks;
sourceTree = "<group>";
};
4DBF872C0C0572BC00880673 /* Resources */ = {
isa = PBXGroup;
children = (
4DBF87300C05731500880673 /* haarcascade_frontalface_alt2.xml */,
4D7DBE590C04A8FF00D8835D /* FaceTracker-Info.plist */,
);
name = Resources;
sourceTree = "<group>";
};
/* End PBXGroup section */
/* Begin PBXNativeTarget section */
4D7DBE560C04A8FF00D8835D /* FaceTracker */ = {
isa = PBXNativeTarget;
buildConfigurationList = 4D7DBE5A0C04A8FF00D8835D /* Build configuration list for PBXNativeTarget "FaceTracker" */;
buildPhases = (
4D7DBE530C04A8FF00D8835D /* Resources */,
4D7DBE540C04A8FF00D8835D /* Sources */,
4D7DBE550C04A8FF00D8835D /* Frameworks */,
4D7DBE8F0C04A93300D8835D /* CopyFiles */,
);
buildRules = (
);
dependencies = (
);
name = FaceTracker;
productName = FaceTracker;
productReference = 4D7DBE570C04A8FF00D8835D /* FaceTracker.app */;
productType = "com.apple.product-type.application";
};
/* End PBXNativeTarget section */
/* Begin PBXProject section */
08FB7793FE84155DC02AAC07 /* Project object */ = {
isa = PBXProject;
buildConfigurationList = 1DEB923508733DC60010E9CD /* Build configuration list for PBXProject "FaceTracker" */;
hasScannedForEncodings = 1;
mainGroup = 08FB7794FE84155DC02AAC07 /* FrameworkTest */;
projectDirPath = "";
targets = (
4D7DBE560C04A8FF00D8835D /* FaceTracker */,
);
};
/* End PBXProject section */
/* Begin PBXResourcesBuildPhase section */
4D7DBE530C04A8FF00D8835D /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
4DBF87310C05731500880673 /* haarcascade_frontalface_alt2.xml in Resources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXResourcesBuildPhase section */
/* Begin PBXSourcesBuildPhase section */
4D7DBE540C04A8FF00D8835D /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
4D7DBE8E0C04A90C00D8835D /* FaceTracker.cpp in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXSourcesBuildPhase section */
/* Begin XCBuildConfiguration section */
1DEB923608733DC60010E9CD /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
PREBINDING = NO;
SDKROOT = /Developer/SDKs/MacOSX10.4u.sdk;
};
name = Debug;
};
1DEB923708733DC60010E9CD /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
ARCHS = (
ppc,
i386,
);
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
PREBINDING = NO;
SDKROOT = /Developer/SDKs/MacOSX10.4u.sdk;
};
name = Release;
};
4D7DBE5B0C04A8FF00D8835D /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
COPY_PHASE_STRIP = NO;
FRAMEWORK_SEARCH_PATHS = (
"$(inherited)",
"$(FRAMEWORK_SEARCH_PATHS_QUOTED_1)",
"$(FRAMEWORK_SEARCH_PATHS_QUOTED_2)",
);
FRAMEWORK_SEARCH_PATHS_QUOTED_1 = "\"$(SRCROOT)/../opencv\"";
FRAMEWORK_SEARCH_PATHS_QUOTED_2 = "\"$(SRCROOT)/../../..\"";
GCC_DYNAMIC_NO_PIC = NO;
GCC_ENABLE_FIX_AND_CONTINUE = YES;
GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
GCC_MODEL_TUNING = G5;
GCC_OPTIMIZATION_LEVEL = 0;
GCC_PRECOMPILE_PREFIX_HEADER = YES;
GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Carbon.framework/Headers/Carbon.h";
INFOPLIST_FILE = "FaceTracker-Info.plist";
INSTALL_PATH = "$(HOME)/Applications";
OTHER_LDFLAGS = (
"-framework",
Carbon,
);
PREBINDING = NO;
PRODUCT_NAME = FaceTracker;
WRAPPER_EXTENSION = app;
ZERO_LINK = YES;
};
name = Debug;
};
4D7DBE5C0C04A8FF00D8835D /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
COPY_PHASE_STRIP = YES;
FRAMEWORK_SEARCH_PATHS = (
"$(inherited)",
"$(FRAMEWORK_SEARCH_PATHS_QUOTED_1)",
"$(FRAMEWORK_SEARCH_PATHS_QUOTED_2)",
);
FRAMEWORK_SEARCH_PATHS_QUOTED_1 = "\"$(SRCROOT)/../opencv\"";
FRAMEWORK_SEARCH_PATHS_QUOTED_2 = "\"$(SRCROOT)/../../..\"";
GCC_ENABLE_FIX_AND_CONTINUE = NO;
GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
GCC_MODEL_TUNING = G5;
GCC_PRECOMPILE_PREFIX_HEADER = YES;
GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Carbon.framework/Headers/Carbon.h";
INFOPLIST_FILE = "FaceTracker-Info.plist";
INSTALL_PATH = "$(HOME)/Applications";
OTHER_LDFLAGS = (
"-framework",
Carbon,
);
PREBINDING = NO;
PRODUCT_NAME = FaceTracker;
WRAPPER_EXTENSION = app;
ZERO_LINK = NO;
};
name = Release;
};
/* End XCBuildConfiguration section */
/* Begin XCConfigurationList section */
1DEB923508733DC60010E9CD /* Build configuration list for PBXProject "FaceTracker" */ = {
isa = XCConfigurationList;
buildConfigurations = (
1DEB923608733DC60010E9CD /* Debug */,
1DEB923708733DC60010E9CD /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
4D7DBE5A0C04A8FF00D8835D /* Build configuration list for PBXNativeTarget "FaceTracker" */ = {
isa = XCConfigurationList;
buildConfigurations = (
4D7DBE5B0C04A8FF00D8835D /* Debug */,
4D7DBE5C0C04A8FF00D8835D /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
/* End XCConfigurationList section */
};
rootObject = 08FB7793FE84155DC02AAC07 /* Project object */;
}

View File

@@ -1,35 +0,0 @@
FaceTracker/REAME.txt
2007-05-24, Mark Asbach <asbach@ient.rwth-aachen.de>
Objective:
This document is intended to get you up and running with an OpenCV Framework on Mac OS X
Building the OpenCV.framework:
In the main directory of the opencv distribution, you will find a shell script called
'make_frameworks.sh' that does all of the typical unixy './configure && make' stuff required
to build a universal binary framework. Invoke this script from Terminal.app, wait some minutes
and you are done.
OpenCV is a Private Framework:
On Mac OS X the concept of Framework bundles is meant to simplify distribution of shared libraries,
accompanying headers and documentation. There are however to subtly different 'flavours' of
Frameworks: public and private ones. The public frameworks get installed into the Frameworks
diretories in /Library, /System/Library or ~/Library and are meant to be shared amongst
applications. The private frameworks are only distributed as parts of an Application Bundle.
This makes it easier to deploy applications because they bring their own framework invisibly to
the user. No installation of the framework is necessary and different applications can bring
different versions of the same framework without any conflict.
Since OpenCV is still a moving target, it seems best to avoid any installation and versioning issues
for an end user. The OpenCV framework that currently comes with this demo application therefore
is a Private Framework.
Use it for targets that result in an Application Bundle:
Since it is a Private Framework, it must be copied to the Frameworks/ directory of an Application
Bundle, which means, it is useless for plain unix console applications. You should create a Carbon
or a Cocoa application target in XCode for your projects. Then add the OpenCV.framework just like
in this demo and add a Copy Files build phase to your target. Let that phase copy to the Framework
directory and drop the OpenCV.framework on the build phase (again just like in this demo code).
The resulting application bundle will be self contained and if you set compiler option correctly
(in the "Build" tab of the "Project Info" window you should find 'i386 ppc' for the architectures),
your application can just be copied to any OS 10.4 Mac and used without further installation.

View File

@@ -76,7 +76,13 @@ public class Puzzle15Activity extends Activity implements CvCameraViewListener,
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
public void onDestroy() {

View File

@@ -19,9 +19,9 @@ add_subdirectory(native-activity)
# hello-android sample
if(HAVE_opencv_highgui)
ocv_include_modules_recurse(opencv_highgui opencv_core)
ocv_include_modules_recurse(opencv_imgcodecs opencv_videoio opencv_highgui opencv_core)
add_executable(hello-android hello-android/main.cpp)
target_link_libraries(hello-android ${OPENCV_LINKER_LIBS} opencv_highgui opencv_core)
target_link_libraries(hello-android ${OPENCV_LINKER_LIBS} opencv_imgcodecs opencv_videoio opencv_highgui opencv_core)
set_target_properties(hello-android PROPERTIES OUTPUT_NAME hello-android RUNTIME_OUTPUT_DIRECTORY "${EXECUTABLE_OUTPUT_PATH}")
add_dependencies(opencv_android_examples hello-android)
endif()

View File

@@ -92,7 +92,13 @@ public class CameraCalibrationActivity extends Activity implements CvCameraViewL
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_2, this, mLoaderCallback);
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_2, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
public void onDestroy() {

View File

@@ -88,7 +88,13 @@ public class ColorBlobDetectionActivity extends Activity implements OnTouchListe
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
public void onDestroy() {

View File

@@ -1,6 +1,6 @@
#include <DetectionBasedTracker_jni.h>
#include <opencv2/core/core.hpp>
#include <opencv2/contrib/detection_based_tracker.hpp>
#include <opencv2/objdetect.hpp>
#include <string>
#include <vector>

View File

@@ -139,7 +139,13 @@ public class FdActivity extends Activity implements CvCameraViewListener2 {
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
public void onDestroy() {

View File

@@ -1,4 +1,5 @@
#include <opencv2/core/core.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;

View File

@@ -111,7 +111,13 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
public void onDestroy() {

View File

@@ -3,7 +3,7 @@ set(sample example-native-activity)
if(BUILD_FAT_JAVA_LIB)
set(native_deps opencv_java)
else()
set(native_deps opencv_highgui opencv_imgproc)
set(native_deps opencv_videoio opencv_imgcodecs opencv_highgui opencv_imgproc)
endif()
add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 9 ${ANDROID_SDK_TARGET} NATIVE_DEPS ${native_deps})

View File

@@ -39,6 +39,12 @@ public class CvNativeActivity extends Activity {
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
}

View File

@@ -76,7 +76,13 @@ public class Tutorial1Activity extends Activity implements CvCameraViewListener2
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
public void onDestroy() {

View File

@@ -97,7 +97,13 @@ public class Tutorial2Activity extends Activity implements CvCameraViewListener2
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
public void onDestroy() {

View File

@@ -88,7 +88,13 @@ public class Tutorial3Activity extends Activity implements CvCameraViewListener2
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
public void onDestroy() {

View File

@@ -1,59 +0,0 @@
# ----------------------------------------------------------------------------
# CMake file for C samples. See root CMakeLists.txt
#
# ----------------------------------------------------------------------------
SET(OPENCV_C_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc
opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib)
ocv_check_dependencies(${OPENCV_C_SAMPLES_REQUIRED_DEPS})
if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
project(c_samples)
if(CMAKE_COMPILER_IS_GNUCXX AND NOT ENABLE_NOISY_WARNINGS)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function")
endif()
ocv_include_modules(${OPENCV_C_SAMPLES_REQUIRED_DEPS})
# ---------------------------------------------
# Define executable targets
# ---------------------------------------------
MACRO(OPENCV_DEFINE_C_EXAMPLE name srcs)
set(the_target "example_${name}")
add_executable(${the_target} ${srcs})
target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_C_SAMPLES_REQUIRED_DEPS})
set_target_properties(${the_target} PROPERTIES
OUTPUT_NAME "c-example-${name}"
PROJECT_LABEL "(EXAMPLE) ${name}")
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${the_target} PROPERTIES FOLDER "samples//c")
endif()
if(WIN32)
if(MSVC AND NOT BUILD_SHARED_LIBS)
set_target_properties(${the_target} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG")
endif()
install(TARGETS ${the_target}
RUNTIME DESTINATION "${OPENCV_SAMPLES_BIN_INSTALL_PATH}/c" COMPONENT samples)
endif()
ENDMACRO()
file(GLOB cpp_samples RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp *.c)
foreach(sample_filename ${cpp_samples})
get_filename_component(sample ${sample_filename} NAME_WE)
OPENCV_DEFINE_C_EXAMPLE(${sample} ${sample_filename})
endforeach()
endif()
if(INSTALL_C_EXAMPLES AND NOT WIN32)
file(GLOB C_SAMPLES *.c *.cpp *.jpg *.png *.data makefile.* build_all.sh *.dsp *.cmd )
install(FILES ${C_SAMPLES}
DESTINATION ${OPENCV_SAMPLES_SRC_INSTALL_PATH}/c
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ COMPONENT samples)
endif ()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 372 B

View File

@@ -1,415 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install, copy or use the software.
//
// Copyright (C) 2009, Farhad Dadgostar
// Intel Corporation and third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <cstdio>
#include <cstring>
#include <ctime>
#include "opencv2/contrib/compat.hpp"
#include "opencv2/highgui/highgui_c.h"
#ifndef _CRT_SECURE_NO_WARNINGS
# define _CRT_SECURE_NO_WARNINGS
#endif
static void help(char **argv)
{
std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n"
<< "Usage: " << std::endl <<
argv[0] << " fileMask firstFrame lastFrame" << std::endl << std::endl <<
"Example: " << std::endl <<
argv[0] << " C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg 0 1000" << std::endl <<
" iterates through temp_00000.jpg to temp_01000.jpg" << std::endl << std::endl <<
"If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<
"Please note: Background should not contain large surfaces with skin tone." <<
"\n\n ESC will stop\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
<< std::endl;
}
class ASDFrameHolder
{
private:
IplImage *image;
double timeStamp;
public:
ASDFrameHolder();
virtual ~ASDFrameHolder();
virtual void assignFrame(IplImage *sourceImage, double frameTime);
inline IplImage *getImage();
inline double getTimeStamp();
virtual void setImage(IplImage *sourceImage);
};
class ASDFrameSequencer
{
public:
virtual ~ASDFrameSequencer();
virtual IplImage *getNextImage();
virtual void close();
virtual bool isOpen();
virtual void getFrameCaption(char *caption);
};
class ASDCVFrameSequencer : public ASDFrameSequencer
{
protected:
CvCapture *capture;
public:
virtual IplImage *getNextImage();
virtual void close();
virtual bool isOpen();
};
class ASDFrameSequencerWebCam : public ASDCVFrameSequencer
{
public:
virtual bool open(int cameraIndex);
};
class ASDFrameSequencerVideoFile : public ASDCVFrameSequencer
{
public:
virtual bool open(const char *fileName);
};
class ASDFrameSequencerImageFile : public ASDFrameSequencer {
private:
char sFileNameMask[2048];
int nCurrentIndex, nStartIndex, nEndIndex;
public:
virtual void open(const char *fileNameMask, int startIndex, int endIndex);
virtual void getFrameCaption(char *caption);
virtual IplImage *getNextImage();
virtual void close();
virtual bool isOpen();
};
//-------------------- ASDFrameHolder -----------------------//
ASDFrameHolder::ASDFrameHolder( )
{
image = NULL;
timeStamp = 0;
}
ASDFrameHolder::~ASDFrameHolder( )
{
cvReleaseImage(&image);
}
void ASDFrameHolder::assignFrame(IplImage *sourceImage, double frameTime)
{
if (image != NULL)
{
cvReleaseImage(&image);
image = NULL;
}
image = cvCloneImage(sourceImage);
timeStamp = frameTime;
}
IplImage *ASDFrameHolder::getImage()
{
return image;
}
double ASDFrameHolder::getTimeStamp()
{
return timeStamp;
}
void ASDFrameHolder::setImage(IplImage *sourceImage)
{
image = sourceImage;
}
//-------------------- ASDFrameSequencer -----------------------//
ASDFrameSequencer::~ASDFrameSequencer()
{
close();
}
IplImage *ASDFrameSequencer::getNextImage()
{
return NULL;
}
void ASDFrameSequencer::close()
{
}
bool ASDFrameSequencer::isOpen()
{
return false;
}
void ASDFrameSequencer::getFrameCaption(char* /*caption*/) {
return;
}
IplImage* ASDCVFrameSequencer::getNextImage()
{
IplImage *image;
image = cvQueryFrame(capture);
if (image != NULL)
{
return cvCloneImage(image);
}
else
{
return NULL;
}
}
void ASDCVFrameSequencer::close()
{
if (capture != NULL)
{
cvReleaseCapture(&capture);
}
}
bool ASDCVFrameSequencer::isOpen()
{
return (capture != NULL);
}
//-------------------- ASDFrameSequencerWebCam -----------------------//
bool ASDFrameSequencerWebCam::open(int cameraIndex)
{
close();
capture = cvCaptureFromCAM(cameraIndex);
if (!capture)
{
return false;
}
else
{
return true;
}
}
//-------------------- ASDFrameSequencerVideoFile -----------------------//
bool ASDFrameSequencerVideoFile::open(const char *fileName)
{
close();
capture = cvCaptureFromFile(fileName);
if (!capture)
{
return false;
}
else
{
return true;
}
}
//-------------------- ASDFrameSequencerImageFile -----------------------//
void ASDFrameSequencerImageFile::open(const char *fileNameMask, int startIndex, int endIndex)
{
nCurrentIndex = startIndex-1;
nStartIndex = startIndex;
nEndIndex = endIndex;
std::sprintf(sFileNameMask, "%s", fileNameMask);
}
void ASDFrameSequencerImageFile::getFrameCaption(char *caption) {
std::sprintf(caption, sFileNameMask, nCurrentIndex);
}
IplImage* ASDFrameSequencerImageFile::getNextImage()
{
char fileName[2048];
nCurrentIndex++;
if (nCurrentIndex > nEndIndex)
return NULL;
std::sprintf(fileName, sFileNameMask, nCurrentIndex);
IplImage* img = cvLoadImage(fileName);
return img;
}
void ASDFrameSequencerImageFile::close()
{
nCurrentIndex = nEndIndex+1;
}
bool ASDFrameSequencerImageFile::isOpen()
{
return (nCurrentIndex <= nEndIndex);
}
static void putTextWithShadow(IplImage *img, const char *str, CvPoint point, CvFont *font, CvScalar color = CV_RGB(255, 255, 128))
{
cvPutText(img, str, cvPoint(point.x-1,point.y-1), font, CV_RGB(0, 0, 0));
cvPutText(img, str, point, font, color);
}
#define ASD_RGB_SET_PIXEL(pointer, r, g, b) { (*pointer) = (unsigned char)b; (*(pointer+1)) = (unsigned char)g; (*(pointer+2)) = (unsigned char)r; }
#define ASD_RGB_GET_PIXEL(pointer, r, g, b) {b = (unsigned char)(*(pointer)); g = (unsigned char)(*(pointer+1)); r = (unsigned char)(*(pointer+2));}
static void displayBuffer(IplImage *rgbDestImage, IplImage *buffer, int rValue, int gValue, int bValue)
{
int x, y, nWidth, nHeight;
double destX, destY, dx, dy;
uchar c;
unsigned char *pSrc;
nWidth = buffer->width;
nHeight = buffer->height;
dx = double(rgbDestImage->width)/double(nWidth);
dy = double(rgbDestImage->height)/double(nHeight);
destX = 0;
for (x = 0; x < nWidth; x++)
{
destY = 0;
for (y = 0; y < nHeight; y++)
{
c = ((uchar*)(buffer->imageData + buffer->widthStep*y))[x];
if (c)
{
pSrc = (unsigned char *)rgbDestImage->imageData + rgbDestImage->widthStep*int(destY) + (int(destX)*rgbDestImage->nChannels);
ASD_RGB_SET_PIXEL(pSrc, rValue, gValue, bValue);
}
destY += dy;
}
destY = 0;
destX += dx;
}
}
int main(int argc, char** argv )
{
IplImage *img, *filterMask = NULL;
CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);
ASDFrameSequencer *sequencer;
CvFont base_font;
char caption[2048], s[256], windowName[256];
long int clockTotal = 0, numFrames = 0;
std::clock_t clock;
if (argc < 4)
{
help(argv);
sequencer = new ASDFrameSequencerWebCam();
(dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);
if (! sequencer->isOpen())
{
std::cout << std::endl << "Error: Cannot initialize the default Webcam" << std::endl << std::endl;
}
}
else
{
sequencer = new ASDFrameSequencerImageFile();
(dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here
}
std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");
cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);
cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);
// Usage:
// c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000
std::cout << "Press ESC to stop." << std::endl << std::endl;
while ((img = sequencer->getNextImage()) != 0)
{
numFrames++;
if (filterMask == NULL)
{
filterMask = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1);
}
clock = std::clock();
filter.process(img, filterMask); // DETECT SKIN
clockTotal += (std::clock() - clock);
displayBuffer(img, filterMask, 0, 255, 0);
sequencer->getFrameCaption(caption);
std::sprintf(s, "%s - %d x %d", caption, img->width, img->height);
putTextWithShadow(img, s, cvPoint(10, img->height-35), &base_font);
std::sprintf(s, "Average processing time per frame: %5.2fms", (double(clockTotal*1000/CLOCKS_PER_SEC))/numFrames);
putTextWithShadow(img, s, cvPoint(10, img->height-15), &base_font);
cvShowImage (windowName, img);
cvReleaseImage(&img);
if (cvWaitKey(1) == 27)
break;
}
sequencer->close();
delete sequencer;
cvReleaseImage(&filterMask);
cvDestroyWindow(windowName);
std::cout << "Finished, " << numFrames << " frames processed." << std::endl;
return 0;
}

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 176 KiB

View File

@@ -1,241 +0,0 @@
// Background average sample code done with averages and done with codebooks
// (adapted from the OpenCV book sample)
//
// NOTE: To get the keyboard to work, you *have* to have one of the video windows be active
// and NOT the consule window.
//
// Gary Bradski Oct 3, 2008.
//
/* *************** License:**************************
Oct. 3, 2008
Right to use this code in any way you want without warrenty, support or any guarentee of it working.
BOOK: It would be nice if you cited it:
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008
AVAILABLE AT:
http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
Or: http://oreilly.com/catalog/9780596516130/
ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
************************************************** */
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/video/background_segm.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/legacy.hpp"
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
using namespace std;
using namespace cv;
//VARIABLES for CODEBOOK METHOD:
CvBGCodeBookModel* model = 0;
const int NCHANNELS = 3;
bool ch[NCHANNELS]={true,true,true}; // This sets what channels should be adjusted for background bounds
static void help()
{
printf("\nLearn background and find foreground using simple average and average difference learning method:\n"
"Originally from the book: Learning OpenCV by O'Reilly press\n"
"\nUSAGE:\n"
" bgfg_codebook [--nframes(-nf)=300] [--movie_filename(-mf)=tree.avi] [--camera(-c), use camera or not]\n"
"***Keep the focus on the video windows, NOT the consol***\n\n"
"INTERACTIVE PARAMETERS:\n"
"\tESC,q,Q - quit the program\n"
"\th - print this help\n"
"\tp - pause toggle\n"
"\ts - single step\n"
"\tr - run mode (single step off)\n"
"=== AVG PARAMS ===\n"
"\t- - bump high threshold UP by 0.25\n"
"\t= - bump high threshold DOWN by 0.25\n"
"\t[ - bump low threshold UP by 0.25\n"
"\t] - bump low threshold DOWN by 0.25\n"
"=== CODEBOOK PARAMS ===\n"
"\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n"
"\ta - adjust all 3 channels at once\n"
"\tb - adjust both 2 and 3 at once\n"
"\ti,o - bump upper threshold up,down by 1\n"
"\tk,l - bump lower threshold up,down by 1\n"
"\tSPACE - reset the model\n"
);
}
//
//USAGE: ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
//
const char *keys =
{
"{nf nframes |300 |frames number}"
"{c camera |false |use the camera or not}"
"{mf movie_file|tree.avi |used movie video file}"
};
int main(int argc, const char** argv)
{
help();
CommandLineParser parser(argc, argv, keys);
int nframesToLearnBG = parser.get<int>("nf");
bool useCamera = parser.has("c");
string filename = parser.get<string>("mf");
IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
CvCapture* capture = 0;
int c, n, nframes = 0;
model = cvCreateBGCodeBookModel();
//Set color thresholds to default values
model->modMin[0] = 3;
model->modMin[1] = model->modMin[2] = 3;
model->modMax[0] = 10;
model->modMax[1] = model->modMax[2] = 10;
model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10;
bool pause = false;
bool singlestep = false;
if( useCamera )
{
printf("Capture from camera\n");
capture = cvCaptureFromCAM( 0 );
}
else
{
printf("Capture from file %s\n",filename.c_str());
capture = cvCreateFileCapture( filename.c_str() );
}
if( !capture )
{
printf( "Can not initialize video capturing\n\n" );
help();
return -1;
}
//MAIN PROCESSING LOOP:
for(;;)
{
if( !pause )
{
rawImage = cvQueryFrame( capture );
++nframes;
if(!rawImage)
break;
}
if( singlestep )
pause = true;
//First time:
if( nframes == 1 && rawImage )
{
// CODEBOOK METHOD ALLOCATION
yuvImage = cvCloneImage(rawImage);
ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
cvSet(ImaskCodeBook,cvScalar(255));
cvNamedWindow( "Raw", 1 );
cvNamedWindow( "ForegroundCodeBook",1);
cvNamedWindow( "CodeBook_ConnectComp",1);
}
// If we've got an rawImage and are good to go:
if( rawImage )
{
cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method
//This is where we build our background model
if( !pause && nframes-1 < nframesToLearnBG )
cvBGCodeBookUpdate( model, yuvImage );
if( nframes-1 == nframesToLearnBG )
cvBGCodeBookClearStale( model, model->t/2 );
//Find the foreground if any
if( nframes-1 >= nframesToLearnBG )
{
// Find foreground by codebook method
cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );
// This part just to visualize bounding boxes and centers if desired
cvCopy(ImaskCodeBook,ImaskCodeBookCC);
cvSegmentFGMask( ImaskCodeBookCC );
}
//Display
cvShowImage( "Raw", rawImage );
cvShowImage( "ForegroundCodeBook",ImaskCodeBook);
cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);
}
// User input:
c = cvWaitKey(10)&0xFF;
c = tolower(c);
// End processing on ESC, q or Q
if(c == 27 || c == 'q')
break;
//Else check for user input
switch( c )
{
case 'h':
help();
break;
case 'p':
pause = !pause;
break;
case 's':
singlestep = !singlestep;
pause = false;
break;
case 'r':
pause = false;
singlestep = false;
break;
case ' ':
cvBGCodeBookClearStale( model, 0 );
nframes = 0;
break;
//CODEBOOK PARAMS
case 'y': case '0':
case 'u': case '1':
case 'v': case '2':
case 'a': case '3':
case 'b':
ch[0] = c == 'y' || c == '0' || c == 'a' || c == '3';
ch[1] = c == 'u' || c == '1' || c == 'a' || c == '3' || c == 'b';
ch[2] = c == 'v' || c == '2' || c == 'a' || c == '3' || c == 'b';
printf("CodeBook YUV Channels active: %d, %d, %d\n", ch[0], ch[1], ch[2] );
break;
case 'i': //modify max classification bounds (max bound goes higher)
case 'o': //modify max classification bounds (max bound goes lower)
case 'k': //modify min classification bounds (min bound goes lower)
case 'l': //modify min classification bounds (min bound goes higher)
{
uchar* ptr = c == 'i' || c == 'o' ? model->modMax : model->modMin;
for(n=0; n<NCHANNELS; n++)
{
if( ch[n] )
{
int v = ptr[n] + (c == 'i' || c == 'l' ? 1 : -1);
ptr[n] = cv::saturate_cast<uchar>(v);
}
printf("%d,", ptr[n]);
}
printf(" CodeBook %s Side\n", c == 'i' || c == 'o' ? "High" : "Low" );
}
break;
}
}
cvReleaseCapture( &capture );
cvDestroyWindow( "Raw" );
cvDestroyWindow( "ForegroundCodeBook");
cvDestroyWindow( "CodeBook_ConnectComp");
return 0;
}

View File

@@ -1,755 +0,0 @@
#include "opencv2/video/background_segm.hpp"
#include "opencv2/legacy/blobtrack.hpp"
#include "opencv2/legacy/legacy.hpp"
#include <opencv2/highgui/highgui_c.h>
#include <opencv2/imgproc/imgproc_c.h>
#include <stdio.h>
/* Select appropriate case insensitive string comparison function: */
#if defined WIN32 || defined _MSC_VER
# define MY_STRNICMP _strnicmp
# define MY_STRICMP _stricmp
# define MY_STRDUP _strdup
#else
# define MY_STRNICMP strncasecmp
# define MY_STRICMP strcasecmp
# define MY_STRDUP strdup
#endif
/* List of foreground (FG) DETECTION modules: */
static CvFGDetector* cvCreateFGDetector0 () { return cvCreateFGDetectorBase(CV_BG_MODEL_FGD, NULL); }
static CvFGDetector* cvCreateFGDetector0Simple() { return cvCreateFGDetectorBase(CV_BG_MODEL_FGD_SIMPLE, NULL); }
static CvFGDetector* cvCreateFGDetector1 () { return cvCreateFGDetectorBase(CV_BG_MODEL_MOG, NULL); }
typedef struct DefModule_FGDetector
{
CvFGDetector* (*create)();
const char* nickname;
const char* description;
} DefModule_FGDetector;
DefModule_FGDetector FGDetector_Modules[] =
{
{cvCreateFGDetector0,"FG_0","Foreground Object Detection from Videos Containing Complex Background. ACM MM2003."},
{cvCreateFGDetector0Simple,"FG_0S","Simplified version of FG_0"},
{cvCreateFGDetector1,"FG_1","Adaptive background mixture models for real-time tracking. CVPR1999"},
{NULL,NULL,NULL}
};
/* List of BLOB DETECTION modules: */
typedef struct DefModule_BlobDetector
{
CvBlobDetector* (*create)();
const char* nickname;
const char* description;
} DefModule_BlobDetector;
DefModule_BlobDetector BlobDetector_Modules[] =
{
{cvCreateBlobDetectorCC,"BD_CC","Detect new blob by tracking CC of FG mask"},
{cvCreateBlobDetectorSimple,"BD_Simple","Detect new blob by uniform moving of connected components of FG mask"},
{NULL,NULL,NULL}
};
/* List of BLOB TRACKING modules: */
typedef struct DefModule_BlobTracker
{
CvBlobTracker* (*create)();
const char* nickname;
const char* description;
} DefModule_BlobTracker;
DefModule_BlobTracker BlobTracker_Modules[] =
{
{cvCreateBlobTrackerCCMSPF,"CCMSPF","connected component tracking and MSPF resolver for collision"},
{cvCreateBlobTrackerCC,"CC","Simple connected component tracking"},
{cvCreateBlobTrackerMS,"MS","Mean shift algorithm "},
{cvCreateBlobTrackerMSFG,"MSFG","Mean shift algorithm with FG mask using"},
{cvCreateBlobTrackerMSPF,"MSPF","Particle filtering based on MS weight"},
{NULL,NULL,NULL}
};
/* List of BLOB TRAJECTORY GENERATION modules: */
typedef struct DefModule_BlobTrackGen
{
CvBlobTrackGen* (*create)();
const char* nickname;
const char* description;
} DefModule_BlobTrackGen;
DefModule_BlobTrackGen BlobTrackGen_Modules[] =
{
{cvCreateModuleBlobTrackGenYML,"YML","Generate track record in YML format as synthetic video data"},
{cvCreateModuleBlobTrackGen1,"RawTracks","Generate raw track record (x,y,sx,sy),()... in each line"},
{NULL,NULL,NULL}
};
/* List of BLOB TRAJECTORY POST PROCESSING modules: */
typedef struct DefModule_BlobTrackPostProc
{
CvBlobTrackPostProc* (*create)();
const char* nickname;
const char* description;
} DefModule_BlobTrackPostProc;
DefModule_BlobTrackPostProc BlobTrackPostProc_Modules[] =
{
{cvCreateModuleBlobTrackPostProcKalman,"Kalman","Kalman filtering of blob position and size"},
{NULL,"None","No post processing filter"},
// {cvCreateModuleBlobTrackPostProcTimeAverRect,"TimeAverRect","Average by time using rectangle window"},
// {cvCreateModuleBlobTrackPostProcTimeAverExp,"TimeAverExp","Average by time using exponential window"},
{NULL,NULL,NULL}
};
/* List of BLOB TRAJECTORY ANALYSIS modules: */
CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisDetector();
typedef struct DefModule_BlobTrackAnalysis
{
CvBlobTrackAnalysis* (*create)();
const char* nickname;
const char* description;
} DefModule_BlobTrackAnalysis;
DefModule_BlobTrackAnalysis BlobTrackAnalysis_Modules[] =
{
{cvCreateModuleBlobTrackAnalysisHistPVS,"HistPVS","Histogram of 5D feature vector analysis (x,y,vx,vy,state)"},
{NULL,"None","No trajectory analiser"},
{cvCreateModuleBlobTrackAnalysisHistP,"HistP","Histogram of 2D feature vector analysis (x,y)"},
{cvCreateModuleBlobTrackAnalysisHistPV,"HistPV","Histogram of 4D feature vector analysis (x,y,vx,vy)"},
{cvCreateModuleBlobTrackAnalysisHistSS,"HistSS","Histogram of 4D feature vector analysis (startpos,endpos)"},
{cvCreateModuleBlobTrackAnalysisTrackDist,"TrackDist","Compare tracks directly"},
{cvCreateModuleBlobTrackAnalysisIOR,"IOR","Integrator (by OR operation) of several analysers "},
{NULL,NULL,NULL}
};
/* List of Blob Trajectory ANALYSIS modules: */
/*================= END MODULES DECRIPTION ===================================*/
/* Run pipeline on all frames: */
static int RunBlobTrackingAuto( CvCapture* pCap, CvBlobTrackerAuto* pTracker,char* fgavi_name = NULL, char* btavi_name = NULL )
{
int OneFrameProcess = 0;
int key;
int FrameNum = 0;
CvVideoWriter* pFGAvi = NULL;
CvVideoWriter* pBTAvi = NULL;
//cvNamedWindow( "FG", 0 );
/* Main loop: */
for( FrameNum=0; pCap && (key=cvWaitKey(OneFrameProcess?0:1))!=27;
FrameNum++)
{ /* Main loop: */
IplImage* pImg = NULL;
IplImage* pMask = NULL;
if(key!=-1)
{
OneFrameProcess = 1;
if(key=='r')OneFrameProcess = 0;
}
pImg = cvQueryFrame(pCap);
if(pImg == NULL) break;
/* Process: */
pTracker->Process(pImg, pMask);
if(fgavi_name)
if(pTracker->GetFGMask())
{ /* Debug FG: */
IplImage* pFG = pTracker->GetFGMask();
CvSize S = cvSize(pFG->width,pFG->height);
static IplImage* pI = NULL;
if(pI==NULL)pI = cvCreateImage(S,pFG->depth,3);
cvCvtColor( pFG, pI, CV_GRAY2BGR );
if(fgavi_name)
{ /* Save fg to avi file: */
if(pFGAvi==NULL)
{
pFGAvi=cvCreateVideoWriter(
fgavi_name,
CV_FOURCC('x','v','i','d'),
25,
S );
}
cvWriteFrame( pFGAvi, pI );
}
if(pTracker->GetBlobNum()>0)
{ /* Draw detected blobs: */
int i;
for(i=pTracker->GetBlobNum();i>0;i--)
{
CvBlob* pB = pTracker->GetBlob(i-1);
CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pB));
CvSize s = cvSize(MAX(1,cvRound(CV_BLOB_RX(pB))), MAX(1,cvRound(CV_BLOB_RY(pB))));
int c = cvRound(255*pTracker->GetState(CV_BLOB_ID(pB)));
cvEllipse( pI,
p,
s,
0, 0, 360,
CV_RGB(c,255-c,0), cvRound(1+(3*c)/255) );
} /* Next blob: */;
}
cvNamedWindow( "FG",0);
cvShowImage( "FG",pI);
} /* Debug FG. */
/* Draw debug info: */
if(pImg)
{ /* Draw all information about test sequence: */
char str[1024];
int line_type = CV_AA; // Change it to 8 to see non-antialiased graphics.
CvFont font;
int i;
IplImage* pI = cvCloneImage(pImg);
cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 0.7, 0.7, 0, 1, line_type );
for(i=pTracker->GetBlobNum(); i>0; i--)
{
CvSize TextSize;
CvBlob* pB = pTracker->GetBlob(i-1);
CvPoint p = cvPoint(cvRound(pB->x*256),cvRound(pB->y*256));
CvSize s = cvSize(MAX(1,cvRound(CV_BLOB_RX(pB)*256)), MAX(1,cvRound(CV_BLOB_RY(pB)*256)));
int c = cvRound(255*pTracker->GetState(CV_BLOB_ID(pB)));
cvEllipse( pI,
p,
s,
0, 0, 360,
CV_RGB(c,255-c,0), cvRound(1+(3*0)/255), CV_AA, 8 );
p.x >>= 8;
p.y >>= 8;
s.width >>= 8;
s.height >>= 8;
sprintf(str,"%03d",CV_BLOB_ID(pB));
cvGetTextSize( str, &font, &TextSize, NULL );
p.y -= s.height;
cvPutText( pI, str, p, &font, CV_RGB(0,255,255));
{
const char* pS = pTracker->GetStateDesc(CV_BLOB_ID(pB));
if(pS)
{
char* pStr = MY_STRDUP(pS);
char* pStrFree = pStr;
while (pStr && strlen(pStr) > 0)
{
char* str_next = strchr(pStr,'\n');
if(str_next)
{
str_next[0] = 0;
str_next++;
}
p.y += TextSize.height+1;
cvPutText( pI, pStr, p, &font, CV_RGB(0,255,255));
pStr = str_next;
}
free(pStrFree);
}
}
} /* Next blob. */;
cvNamedWindow( "Tracking", 0);
cvShowImage( "Tracking",pI );
if(btavi_name && pI)
{ /* Save to avi file: */
CvSize S = cvSize(pI->width,pI->height);
if(pBTAvi==NULL)
{
pBTAvi=cvCreateVideoWriter(
btavi_name,
CV_FOURCC('x','v','i','d'),
25,
S );
}
cvWriteFrame( pBTAvi, pI );
}
cvReleaseImage(&pI);
} /* Draw all information about test sequence. */
} /* Main loop. */
if(pFGAvi)cvReleaseVideoWriter( &pFGAvi );
if(pBTAvi)cvReleaseVideoWriter( &pBTAvi );
return 0;
} /* RunBlobTrackingAuto */
/* Read parameters from command line
* and transfer to specified module:
*/
static void set_params(int argc, char* argv[], CvVSModule* pM, const char* prefix, const char* module)
{
int prefix_len = (int)strlen(prefix);
int i;
for(i=0; i<argc; ++i)
{
int j;
char* ptr_eq = NULL;
int cmd_param_len=0;
char* cmd = argv[i];
if(MY_STRNICMP(prefix,cmd,prefix_len)!=0) continue;
cmd += prefix_len;
if(cmd[0]!=':')continue;
cmd++;
ptr_eq = strchr(cmd,'=');
if(ptr_eq)
cmd_param_len = (int)(ptr_eq-cmd);
for(j=0; ; ++j)
{
int param_len;
const char* param = pM->GetParamName(j);
if(param==NULL) break;
param_len = (int)strlen(param);
if(cmd_param_len!=param_len) continue;
if(MY_STRNICMP(param,cmd,param_len)!=0) continue;
cmd+=param_len;
if(cmd[0]!='=')continue;
cmd++;
pM->SetParamStr(param,cmd);
printf("%s:%s param set to %g\n",module,param,pM->GetParam(param));
}
}
pM->ParamUpdate();
} /* set_params */
/* Print all parameter values for given module: */
static void print_params(CvVSModule* pM, const char* module, const char* log_name)
{
FILE* log = log_name?fopen(log_name,"at"):NULL;
int i;
if(pM->GetParamName(0) == NULL ) return;
printf("%s(%s) module parameters:\n",module,pM->GetNickName());
if(log)
fprintf(log,"%s(%s) module parameters:\n",module,pM->GetNickName());
for (i=0; ; ++i)
{
const char* param = pM->GetParamName(i);
const char* str = param?pM->GetParamStr(param):NULL;
if(param == NULL)break;
if(str)
{
printf(" %s: %s\n",param,str);
if(log)
fprintf(log," %s: %s\n",param,str);
}
else
{
printf(" %s: %g\n",param,pM->GetParam(param));
if(log)
fprintf(log," %s: %g\n",param,pM->GetParam(param));
}
}
if(log) fclose(log);
} /* print_params */
int main(int argc, char* argv[])
{ /* Main function: */
CvCapture* pCap = NULL;
CvBlobTrackerAutoParam1 param = {0,0,0,0,0,0,0,0};
CvBlobTrackerAuto* pTracker = NULL;
//float scale = 1;
const char* scale_name = NULL;
char* yml_name = NULL;
char** yml_video_names = NULL;
int yml_video_num = 0;
char* avi_name = NULL;
const char* fg_name = NULL;
char* fgavi_name = NULL;
char* btavi_name = NULL;
const char* bd_name = NULL;
const char* bt_name = NULL;
const char* btgen_name = NULL;
const char* btpp_name = NULL;
const char* bta_name = NULL;
char* bta_data_name = NULL;
char* track_name = NULL;
//char* comment_name = NULL;
char* FGTrainFrames = NULL;
char* log_name = NULL;
char* savestate_name = NULL;
char* loadstate_name = NULL;
const char* bt_corr = NULL;
DefModule_FGDetector* pFGModule = NULL;
DefModule_BlobDetector* pBDModule = NULL;
DefModule_BlobTracker* pBTModule = NULL;
DefModule_BlobTrackPostProc* pBTPostProcModule = NULL;
DefModule_BlobTrackGen* pBTGenModule = NULL;
DefModule_BlobTrackAnalysis* pBTAnalysisModule = NULL;
cvInitSystem(argc, argv);
if(argc < 2)
{ /* Print help: */
int i;
printf("blobtrack [fg=<fg_name>] [bd=<bd_name>]\n"
" [bt=<bt_name>] [btpp=<btpp_name>]\n"
" [bta=<bta_name>\n"
" [bta_data=<bta_data_name>\n"
" [bt_corr=<bt_corr_way>]\n"
" [btgen=<btgen_name>]\n"
" [track=<track_file_name>]\n"
" [scale=<scale val>] [noise=<noise_name>] [IVar=<IVar_name>]\n"
" [FGTrainFrames=<FGTrainFrames>]\n"
" [btavi=<avi output>] [fgavi=<avi output on FG>]\n"
" <avi_file>\n");
printf(" <bt_corr_way> is the method of blob position correction for the \"Blob Tracking\" module\n"
" <bt_corr_way>=none,PostProcRes\n"
" <FGTrainFrames> is number of frames for FG training\n"
" <track_file_name> is file name for save tracked trajectories\n"
" <bta_data> is file name for data base of trajectory analysis module\n"
" <avi_file> is file name of avi to process by BlobTrackerAuto\n");
puts("\nModules:");
#define PR(_name,_m,_mt)\
printf("<%s> is \"%s\" module name and can be:\n",_name,_mt);\
for(i=0; _m[i].nickname; ++i)\
{\
printf(" %d. %s",i+1,_m[i].nickname);\
if(_m[i].description)printf(" - %s",_m[i].description);\
printf("\n");\
}
PR("fg_name",FGDetector_Modules,"FG/BG Detection");
PR("bd_name",BlobDetector_Modules,"Blob Entrance Detection");
PR("bt_name",BlobTracker_Modules,"Blob Tracking");
PR("btpp_name",BlobTrackPostProc_Modules, "Blob Trajectory Post Processing");
PR("btgen_name",BlobTrackGen_Modules, "Blob Trajectory Generation");
PR("bta_name",BlobTrackAnalysis_Modules, "Blob Trajectory Analysis");
#undef PR
return 0;
} /* Print help. */
{ /* Parse arguments: */
int i;
for(i=1; i<argc; ++i)
{
int bParsed = 0;
size_t len = strlen(argv[i]);
#define RO(_n1,_n2) if(strncmp(argv[i],_n1,strlen(_n1))==0) {_n2 = argv[i]+strlen(_n1);bParsed=1;};
RO("fg=",fg_name);
RO("fgavi=",fgavi_name);
RO("btavi=",btavi_name);
RO("bd=",bd_name);
RO("bt=",bt_name);
RO("bt_corr=",bt_corr);
RO("btpp=",btpp_name);
RO("bta=",bta_name);
RO("bta_data=",bta_data_name);
RO("btgen=",btgen_name);
RO("track=",track_name);
//RO("comment=",comment_name);
RO("FGTrainFrames=",FGTrainFrames);
RO("log=",log_name);
RO("savestate=",savestate_name);
RO("loadstate=",loadstate_name);
#undef RO
{
char* ext = argv[i] + len-4;
if( strrchr(argv[i],'=') == NULL &&
!bParsed &&
(len>3 && (MY_STRICMP(ext,".avi") == 0 )))
{
avi_name = argv[i];
break;
}
} /* Next argument. */
}
} /* Parse arguments. */
if(track_name)
{ /* Set Trajectory Generator module: */
int i;
if(!btgen_name)btgen_name=BlobTrackGen_Modules[0].nickname;
for(i=0; BlobTrackGen_Modules[i].nickname; ++i)
{
if(MY_STRICMP(BlobTrackGen_Modules[i].nickname,btgen_name)==0)
pBTGenModule = BlobTrackGen_Modules + i;
}
} /* Set Trajectory Generato module. */
/* Initialize postprocessing module if tracker
* correction by postprocessing is required.
*/
if(bt_corr && MY_STRICMP(bt_corr,"PostProcRes")!=0 && !btpp_name)
{
btpp_name = bt_corr;
if(MY_STRICMP(btpp_name,"none")!=0)bt_corr = "PostProcRes";
}
{ /* Set default parameters for one processing: */
if(!bt_corr) bt_corr = "none";
if(!fg_name) fg_name = FGDetector_Modules[0].nickname;
if(!bd_name) bd_name = BlobDetector_Modules[0].nickname;
if(!bt_name) bt_name = BlobTracker_Modules[0].nickname;
if(!btpp_name) btpp_name = BlobTrackPostProc_Modules[0].nickname;
if(!bta_name) bta_name = BlobTrackAnalysis_Modules[0].nickname;
if(!scale_name) scale_name = "1";
}
// if(scale_name)
// scale = (float)atof(scale_name);
for(pFGModule=FGDetector_Modules; pFGModule->nickname; ++pFGModule)
if( fg_name && MY_STRICMP(fg_name,pFGModule->nickname)==0 ) break;
for(pBDModule=BlobDetector_Modules; pBDModule->nickname; ++pBDModule)
if( bd_name && MY_STRICMP(bd_name,pBDModule->nickname)==0 ) break;
for(pBTModule=BlobTracker_Modules; pBTModule->nickname; ++pBTModule)
if( bt_name && MY_STRICMP(bt_name,pBTModule->nickname)==0 ) break;
for(pBTPostProcModule=BlobTrackPostProc_Modules; pBTPostProcModule->nickname; ++pBTPostProcModule)
if( btpp_name && MY_STRICMP(btpp_name,pBTPostProcModule->nickname)==0 ) break;
for(pBTAnalysisModule=BlobTrackAnalysis_Modules; pBTAnalysisModule->nickname; ++pBTAnalysisModule)
if( bta_name && MY_STRICMP(bta_name,pBTAnalysisModule->nickname)==0 ) break;
/* Create source video: */
if(avi_name)
pCap = cvCaptureFromFile(avi_name);
if(pCap==NULL)
{
printf("Can't open %s file\n",avi_name);
return -1;
}
{ /* Display parameters: */
int i;
FILE* log = log_name?fopen(log_name,"at"):NULL;
if(log)
{ /* Print to log file: */
fprintf(log,"\n=== Blob Tracking pipline in processing mode===\n");
if(avi_name)
{
fprintf(log,"AVIFile: %s\n",avi_name);
}
fprintf(log,"FGDetector: %s\n", pFGModule->nickname);
fprintf(log,"BlobDetector: %s\n", pBDModule->nickname);
fprintf(log,"BlobTracker: %s\n", pBTModule->nickname);
fprintf(log,"BlobTrackPostProc: %s\n", pBTPostProcModule->nickname);
fprintf(log,"BlobCorrection: %s\n", bt_corr);
fprintf(log,"Blob Trajectory Generator: %s (%s)\n",
pBTGenModule?pBTGenModule->nickname:"None",
track_name?track_name:"none");
fprintf(log,"BlobTrackAnalysis: %s\n", pBTAnalysisModule->nickname);
fclose(log);
}
printf("\n=== Blob Tracking pipline in %s mode===\n","processing");
if(yml_name)
{
printf("ConfigFile: %s\n",yml_name);
printf("BG: %s\n",yml_video_names[0]);
printf("FG: ");
for(i=1;i<(yml_video_num);++i){printf("%s",yml_video_names[i]);if((i+1)<yml_video_num)printf("|");};
printf("\n");
}
if(avi_name)
{
printf("AVIFile: %s\n",avi_name);
}
printf("FGDetector: %s\n", pFGModule->nickname);
printf("BlobDetector: %s\n", pBDModule->nickname);
printf("BlobTracker: %s\n", pBTModule->nickname);
printf("BlobTrackPostProc: %s\n", pBTPostProcModule->nickname);
printf("BlobCorrection: %s\n", bt_corr);
printf("Blob Trajectory Generator: %s (%s)\n",
pBTGenModule?pBTGenModule->nickname:"None",
track_name?track_name:"none");
printf("BlobTrackAnalysis: %s\n", pBTAnalysisModule->nickname);
} /* Display parameters. */
{ /* Create autotracker module and its components: */
param.FGTrainFrames = FGTrainFrames?atoi(FGTrainFrames):0;
/* Create FG Detection module: */
param.pFG = pFGModule->create();
if(!param.pFG)
puts("Can not create FGDetector module");
param.pFG->SetNickName(pFGModule->nickname);
set_params(argc, argv, param.pFG, "fg", pFGModule->nickname);
/* Create Blob Entrance Detection module: */
param.pBD = pBDModule->create();
if(!param.pBD)
puts("Can not create BlobDetector module");
param.pBD->SetNickName(pBDModule->nickname);
set_params(argc, argv, param.pBD, "bd", pBDModule->nickname);
/* Create blob tracker module: */
param.pBT = pBTModule->create();
if(!param.pBT)
puts("Can not create BlobTracker module");
param.pBT->SetNickName(pBTModule->nickname);
set_params(argc, argv, param.pBT, "bt", pBTModule->nickname);
/* Create blob trajectory generation module: */
param.pBTGen = NULL;
if(pBTGenModule && track_name && pBTGenModule->create)
{
param.pBTGen = pBTGenModule->create();
param.pBTGen->SetFileName(track_name);
}
if(param.pBTGen)
{
param.pBTGen->SetNickName(pBTGenModule->nickname);
set_params(argc, argv, param.pBTGen, "btgen", pBTGenModule->nickname);
}
/* Create blob trajectory post processing module: */
param.pBTPP = NULL;
if(pBTPostProcModule && pBTPostProcModule->create)
{
param.pBTPP = pBTPostProcModule->create();
}
if(param.pBTPP)
{
param.pBTPP->SetNickName(pBTPostProcModule->nickname);
set_params(argc, argv, param.pBTPP, "btpp", pBTPostProcModule->nickname);
}
param.UsePPData = (bt_corr && MY_STRICMP(bt_corr,"PostProcRes")==0);
/* Create blob trajectory analysis module: */
param.pBTA = NULL;
if(pBTAnalysisModule && pBTAnalysisModule->create)
{
param.pBTA = pBTAnalysisModule->create();
param.pBTA->SetFileName(bta_data_name);
}
if(param.pBTA)
{
param.pBTA->SetNickName(pBTAnalysisModule->nickname);
set_params(argc, argv, param.pBTA, "bta", pBTAnalysisModule->nickname);
}
/* Create whole pipline: */
pTracker = cvCreateBlobTrackerAuto1(&param);
if(!pTracker)
puts("Can not create BlobTrackerAuto");
}
{ /* Load states of each module from state file: */
CvFileStorage* fs = NULL;
if(loadstate_name)
fs=cvOpenFileStorage(loadstate_name,NULL,CV_STORAGE_READ);
if(fs)
{
printf("Load states for modules...\n");
if(param.pBT)
{
CvFileNode* fn = cvGetFileNodeByName(fs,NULL,"BlobTracker");
param.pBT->LoadState(fs,fn);
}
if(param.pBTA)
{
CvFileNode* fn = cvGetFileNodeByName(fs,NULL,"BlobTrackAnalyser");
param.pBTA->LoadState(fs,fn);
}
if(pTracker)
{
CvFileNode* fn = cvGetFileNodeByName(fs,NULL,"BlobTrackerAuto");
pTracker->LoadState(fs,fn);
}
cvReleaseFileStorage(&fs);
printf("... Modules states loaded\n");
}
} /* Load states of each module. */
{ /* Print module parameters: */
struct DefMMM
{
CvVSModule* pM;
const char* name;
} Modules[] = {
{(CvVSModule*)param.pFG,"FGdetector"},
{(CvVSModule*)param.pBD,"BlobDetector"},
{(CvVSModule*)param.pBT,"BlobTracker"},
{(CvVSModule*)param.pBTGen,"TrackGen"},
{(CvVSModule*)param.pBTPP,"PostProcessing"},
{(CvVSModule*)param.pBTA,"TrackAnalysis"},
{NULL,NULL}
};
int i;
for(i=0; Modules[i].name; ++i)
{
if(Modules[i].pM)
print_params(Modules[i].pM,Modules[i].name,log_name);
}
} /* Print module parameters. */
/* Run pipeline: */
RunBlobTrackingAuto( pCap, pTracker, fgavi_name, btavi_name );
{ /* Save state and release modules: */
CvFileStorage* fs = NULL;
if(savestate_name)
{
fs=cvOpenFileStorage(savestate_name,NULL,CV_STORAGE_WRITE);
}
if(fs)
{
cvStartWriteStruct(fs,"BlobTracker",CV_NODE_MAP);
if(param.pBT)param.pBT->SaveState(fs);
cvEndWriteStruct(fs);
cvStartWriteStruct(fs,"BlobTrackerAuto",CV_NODE_MAP);
if(pTracker)pTracker->SaveState(fs);
cvEndWriteStruct(fs);
cvStartWriteStruct(fs,"BlobTrackAnalyser",CV_NODE_MAP);
if(param.pBTA)param.pBTA->SaveState(fs);
cvEndWriteStruct(fs);
cvReleaseFileStorage(&fs);
}
if(param.pBT)cvReleaseBlobTracker(&param.pBT);
if(param.pBD)cvReleaseBlobDetector(&param.pBD);
if(param.pBTGen)cvReleaseBlobTrackGen(&param.pBTGen);
if(param.pBTA)cvReleaseBlobTrackAnalysis(&param.pBTA);
if(param.pFG)cvReleaseFGDetector(&param.pFG);
if(pTracker)cvReleaseBlobTrackerAuto(&pTracker);
} /* Save state and release modules. */
if(pCap)
cvReleaseCapture(&pCap);
return 0;
} /* main() */

View File

@@ -1,16 +0,0 @@
#!/bin/sh
if [ $# -gt 0 ] ; then
base=`basename $1 .c`
echo "compiling $base"
gcc -ggdb `pkg-config opencv --cflags --libs` $base.c -o $base
else
for i in *.c; do
echo "compiling $i"
gcc -ggdb `pkg-config --cflags opencv` -o `basename $i .c` $i `pkg-config --libs opencv`;
done
for i in *.cpp; do
echo "compiling $i"
g++ -ggdb `pkg-config --cflags opencv` -o `basename $i .cpp` $i `pkg-config --libs opencv`;
done
fi

View File

@@ -1,10 +0,0 @@
<?xml version="1.0"?>
<opencv_storage>
<numTrees>20</numTrees>
<depth>7</depth>
<views>1000</views>
<patchSize>20</patchSize>
<reducedNumDim>30</reducedNumDim>
<numQuantBits>4</numQuantBits>
<printStatus>1</printStatus>
</opencv_storage>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

View File

@@ -1,167 +0,0 @@
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui/highgui_c.h"
#include <stdio.h>
static void help(void)
{
printf("\nThis program creates an image to demonstrate the use of the \"c\" contour\n"
"functions: cvFindContours() and cvApproxPoly() along with the storage\n"
"functions cvCreateMemStorage() and cvDrawContours().\n"
"It also shows the use of a trackbar to control contour retrieval.\n"
"\n"
"Usage :\n"
"./contours\n");
}
#define w 500
int levels = 3;
CvSeq* contours = 0;
static void on_trackbar(int pos)
{
IplImage* cnt_img = cvCreateImage( cvSize(w,w), 8, 3 );
CvSeq* _contours = contours;
int _levels = levels - 3;
(void)pos;
if( _levels <= 0 ) // get to the nearest face to make it look more funny
_contours = _contours->h_next->h_next->h_next;
cvZero( cnt_img );
cvDrawContours( cnt_img, _contours, CV_RGB(255,0,0), CV_RGB(0,255,0), _levels, 3, CV_AA, cvPoint(0,0) );
cvShowImage( "contours", cnt_img );
cvReleaseImage( &cnt_img );
}
static void findCComp( IplImage* img )
{
int x, y, cidx = 1;
IplImage* mask = cvCreateImage( cvSize(img->width+2, img->height+2), 8, 1 );
cvZero(mask);
cvRectangle( mask, cvPoint(0, 0), cvPoint(mask->width-1, mask->height-1),
cvScalarAll(1), 1, 8, 0 );
for( y = 0; y < img->height; y++ )
for( x = 0; x < img->width; x++ )
{
if( CV_IMAGE_ELEM(mask, uchar, y+1, x+1) != 0 )
continue;
cvFloodFill(img, cvPoint(x,y), cvScalarAll(cidx),
cvScalarAll(0), cvScalarAll(0), 0, 4, mask);
cidx++;
}
}
int main(int argc, char* argv[])
{
int i, j;
CvMemStorage* storage = cvCreateMemStorage(0);
IplImage* img = cvCreateImage( cvSize(w,w), 8, 1 );
IplImage* img32f = cvCreateImage( cvSize(w,w), IPL_DEPTH_32F, 1 );
IplImage* img32s = cvCreateImage( cvSize(w,w), IPL_DEPTH_32S, 1 );
IplImage* img3 = cvCreateImage( cvSize(w,w), 8, 3 );
(void)argc; (void)argv;
help();
cvZero( img );
for( i=0; i < 6; i++ )
{
int dx = (i%2)*250 - 30;
int dy = (i/2)*150;
CvScalar white = cvRealScalar(255);
CvScalar black = cvRealScalar(0);
if( i == 0 )
{
for( j = 0; j <= 10; j++ )
{
double angle = (j+5)*CV_PI/21;
cvLine(img, cvPoint(cvRound(dx+100+j*10-80*cos(angle)),
cvRound(dy+100-90*sin(angle))),
cvPoint(cvRound(dx+100+j*10-30*cos(angle)),
cvRound(dy+100-30*sin(angle))), white, 3, 8, 0);
}
}
cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(100,70), 0, 0, 360, white, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(10,5), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+150, dy+150), cvSize(40,10), 0, 0, 360, black, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+27, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 );
cvEllipse( img, cvPoint(dx+273, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 );
}
cvNamedWindow( "image", 1 );
cvShowImage( "image", img );
cvConvert( img, img32f );
findCComp( img32f );
cvConvert( img32f, img32s );
cvFindContours( img32s, storage, &contours, sizeof(CvContour),
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
//cvFindContours( img, storage, &contours, sizeof(CvContour),
// CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
{
const char* attrs[] = {"recursive", "1", 0};
cvSave("contours.xml", contours, 0, 0, cvAttrList(attrs, 0));
contours = (CvSeq*)cvLoad("contours.xml", storage, 0, 0);
}
// comment this out if you do not want approximation
contours = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 );
cvNamedWindow( "contours", 1 );
cvCreateTrackbar( "levels+3", "contours", &levels, 7, on_trackbar );
{
CvRNG rng = cvRNG(-1);
CvSeq* tcontours = contours;
cvCvtColor( img, img3, CV_GRAY2BGR );
while( tcontours->h_next )
tcontours = tcontours->h_next;
for( ; tcontours != 0; tcontours = tcontours->h_prev )
{
CvScalar color;
color.val[0] = cvRandInt(&rng) % 256;
color.val[1] = cvRandInt(&rng) % 256;
color.val[2] = cvRandInt(&rng) % 256;
color.val[3] = cvRandInt(&rng) % 256;
cvDrawContours(img3, tcontours, color, color, 0, -1, 8, cvPoint(0,0));
if( tcontours->v_next )
{
color.val[0] = cvRandInt(&rng) % 256;
color.val[1] = cvRandInt(&rng) % 256;
color.val[2] = cvRandInt(&rng) % 256;
color.val[3] = cvRandInt(&rng) % 256;
cvDrawContours(img3, tcontours->v_next, color, color, 1, -1, 8, cvPoint(0,0));
}
}
}
cvShowImage( "colored", img3 );
on_trackbar(0);
cvWaitKey(0);
cvReleaseMemStorage( &storage );
cvReleaseImage( &img );
cvReleaseImage( &img32f );
cvReleaseImage( &img32s );
cvReleaseImage( &img3 );
return 0;
}
#ifdef _EiC
main(1,"");
#endif

View File

@@ -1,50 +0,0 @@
#include "opencv2/objdetect/objdetect_c.h"
#include "opencv2/highgui/highgui_c.h"
#include <ctype.h>
#include <stdio.h>
static void help(void)
{
printf("\n This sample demonstrates cascade's convertation \n"
"Usage:\n"
"./convert_cascade --size=\"<width>x<height>\"<convertation size> \n"
" input_cascade_path \n"
" output_cascade_filename\n"
"Example: \n"
"./convert_cascade --size=640x480 ../../opencv/data/haarcascades/haarcascade_eye.xml ../../opencv/data/haarcascades/test_cascade.xml \n"
);
}
int main( int argc, char** argv )
{
const char* size_opt = "--size=";
char comment[1024];
CvHaarClassifierCascade* cascade = 0;
CvSize size;
help();
if( argc != 4 || strncmp( argv[1], size_opt, strlen(size_opt) ) != 0 )
{
help();
return -1;
}
sscanf( argv[1], "--size=%ux%u", &size.width, &size.height );
cascade = cvLoadHaarClassifierCascade( argv[2], size );
if( !cascade )
{
fprintf( stderr, "Input cascade could not be found/opened\n" );
return -1;
}
sprintf( comment, "Automatically converted from %s, window size = %dx%d", argv[2], size.width, size.height );
cvSave( argv[3], cascade, 0, comment, cvAttrList(0,0) );
return 0;
}
#ifdef _EiC
main(1,"facedetect.c");
#endif

View File

@@ -1,92 +0,0 @@
# Microsoft Developer Studio Project File - Name="cvsample" - Package Owner=<4>
# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) Console Application" 0x0103
CFG=cvsample - Win32 Release
!MESSAGE This is not a valid makefile. To build this project using NMAKE,
!MESSAGE use the Export Makefile command and run
!MESSAGE
!MESSAGE NMAKE /f "cvsample.mak".
!MESSAGE
!MESSAGE You can specify a configuration when running NMAKE
!MESSAGE by defining the macro CFG on the command line. For example:
!MESSAGE
!MESSAGE NMAKE /f "cvsample.mak" CFG="cvsample - Win32 Release"
!MESSAGE
!MESSAGE Possible choices for configuration are:
!MESSAGE
!MESSAGE "cvsample - Win32 Release" (based on "Win32 (x86) Console Application")
!MESSAGE "cvsample - Win32 Debug" (based on "Win32 (x86) Console Application")
!MESSAGE
# Begin Project
# PROP AllowPerConfigDependencies 0
# PROP Scc_ProjName ""
# PROP Scc_LocalPath ""
CPP=cl.exe
RSC=rc.exe
!IF "$(CFG)" == "cvsample - Win32 Release"
# PROP BASE Use_MFC 0
# PROP BASE Use_Debug_Libraries 0
# PROP BASE Output_Dir "Release"
# PROP BASE Intermediate_Dir "Release"
# PROP BASE Target_Dir ""
# PROP Use_MFC 0
# PROP Use_Debug_Libraries 0
# PROP Output_Dir "..\..\_temp\cvsample_Release"
# PROP Intermediate_Dir "..\..\_temp\cvsample_Release"
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
F90=df.exe
# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
# ADD CPP /nologo /MD /W4 /Gm /GX /Zi /O2 /I "../../cxcore/include" /I "../../cv/include" /I "../../otherlibs/highgui" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
# ADD BASE BSC32 /nologo
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib cxcore.lib cv.lib highgui.lib /nologo /subsystem:console /debug /machine:I386 /nodefaultlib:"libmmd.lib" /out:".\cvsample.exe" /libpath:"../../lib"
!ELSEIF "$(CFG)" == "cvsample - Win32 Debug"
# PROP BASE Use_MFC 0
# PROP BASE Use_Debug_Libraries 1
# PROP BASE Output_Dir "Debug"
# PROP BASE Intermediate_Dir "Debug"
# PROP BASE Target_Dir ""
# PROP Use_MFC 0
# PROP Use_Debug_Libraries 1
# PROP Output_Dir "..\..\_temp\cvsample_Debug"
# PROP Intermediate_Dir "..\..\_temp\cvsample_Debug"
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
F90=df.exe
# ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
# ADD CPP /nologo /MDd /W4 /Gm /GX /Zi /Od /I "../../cxcore/include" /I "../../cv/include" /I "../../otherlibs/highgui" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
# ADD BASE RSC /l 0x409 /d "_DEBUG"
# ADD RSC /l 0x409 /d "_DEBUG"
BSC32=bscmake.exe
# ADD BASE BSC32 /nologo
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib cxcored.lib cvd.lib highguid.lib /nologo /subsystem:console /debug /machine:I386 /nodefaultlib:"libmmdd.lib" /out:".\cvsampled.exe" /pdbtype:sept /libpath:"../../lib"
!ENDIF
# Begin Target
# Name "cvsample - Win32 Release"
# Name "cvsample - Win32 Debug"
# Begin Source File
SOURCE=.\squares.c
# End Source File
# End Target
# End Project

View File

@@ -1,413 +0,0 @@
<?xml version="1.0" encoding="windows-1251"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="8,00"
Name="cvsample"
ProjectGUID="{2820F96A-13D2-4EFE-BC9F-A9AF482026AE}"
RootNamespace="cvsample"
>
<Platforms>
<Platform
Name="Win32"
/>
<Platform
Name="x64"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="$(TEMP)\opencv.build\$(ProjectName)_$(ConfigurationName).$(PlatformName)"
IntermediateDirectory="$(OutDir)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\..\..\_temp\cvsample_Dbg/cvsample.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../../cxcore/include,../../cv/include,../../otherlibs/highgui"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
PrecompiledHeaderFile=".\..\..\_temp\cvsample_Dbg/cvsample.pch"
AssemblerListingLocation="$(IntDir)\"
ObjectFile="$(IntDir)\"
ProgramDataBaseFileName="$(IntDir)\"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1033"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
AdditionalDependencies="odbc32.lib odbccp32.lib cxcored.lib cvd.lib highguid.lib"
OutputFile=".\cvsampled.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
AdditionalLibraryDirectories="../../lib"
IgnoreDefaultLibraryNames="libmmdd.lib"
GenerateDebugInformation="true"
ProgramDatabaseFile="$(IntDir)/$(ProjectName)d.pdb"
SubSystem="1"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile="$(IntDir)\$(ProjectName).bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCWebDeploymentTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|x64"
OutputDirectory="$(TEMP)\opencv.build\$(ProjectName)_$(ConfigurationName).$(PlatformName)"
IntermediateDirectory="$(OutDir)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\..\..\_temp\cvsample_Dbg64/cvsample.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../../cxcore/include,../../cv/include,../../otherlibs/highgui"
PreprocessorDefinitions="WIN32;WIN64;EM64T;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
PrecompiledHeaderFile=".\..\..\_temp\cvsample_Dbg64/cvsample.pch"
AssemblerListingLocation="$(IntDir)\"
ObjectFile="$(IntDir)\"
ProgramDataBaseFileName="$(IntDir)\"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1033"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
AdditionalDependencies="odbc32.lib odbccp32.lib cxcored_64.lib cvd_64.lib highguid_64.lib"
OutputFile=".\cvsampled_64.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
AdditionalLibraryDirectories="../../lib"
IgnoreDefaultLibraryNames="libmmdd.lib"
GenerateDebugInformation="true"
ProgramDatabaseFile="$(IntDir)/$(ProjectName)d_64.pdb"
SubSystem="1"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile="$(IntDir)\$(ProjectName).bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCWebDeploymentTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="$(TEMP)\opencv.build\$(ProjectName)_$(ConfigurationName).$(PlatformName)"
IntermediateDirectory="$(OutDir)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\..\..\_temp\cvsample_Rls/cvsample.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../../cxcore/include,../../cv/include,../../otherlibs/highgui"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
MinimalRebuild="true"
RuntimeLibrary="2"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\..\..\_temp\cvsample_Rls/cvsample.pch"
AssemblerListingLocation="$(IntDir)\"
ObjectFile="$(IntDir)\"
ProgramDataBaseFileName="$(IntDir)\"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1033"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
AdditionalDependencies="odbc32.lib odbccp32.lib cxcore.lib cv.lib highgui.lib"
OutputFile=".\cvsample.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
AdditionalLibraryDirectories="../../lib"
IgnoreDefaultLibraryNames="libmmd.lib"
GenerateDebugInformation="true"
ProgramDatabaseFile="$(IntDir)/$(ProjectName).pdb"
SubSystem="1"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile="$(IntDir)\$(ProjectName).bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCWebDeploymentTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|x64"
OutputDirectory="$(TEMP)\opencv.build\$(ProjectName)_$(ConfigurationName).$(PlatformName)"
IntermediateDirectory="$(OutDir)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\..\..\_temp\cvsample_Rls64/cvsample.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../../cxcore/include,../../cv/include,../../otherlibs/highgui"
PreprocessorDefinitions="WIN32;WIN64;EM64T;NDEBUG;_CONSOLE"
StringPooling="true"
MinimalRebuild="true"
RuntimeLibrary="2"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\..\..\_temp\cvsample_Rls64/cvsample.pch"
AssemblerListingLocation="$(IntDir)\"
ObjectFile="$(IntDir)\"
ProgramDataBaseFileName="$(IntDir)\"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1033"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
AdditionalDependencies="odbc32.lib odbccp32.lib cxcore_64.lib cv_64.lib highgui_64.lib"
OutputFile=".\cvsample_64.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
AdditionalLibraryDirectories="../../lib"
IgnoreDefaultLibraryNames="libmmd.lib"
GenerateDebugInformation="true"
ProgramDatabaseFile="$(IntDir)/$(ProjectName)_64.pdb"
SubSystem="1"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile="$(IntDir)\$(ProjectName).bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCWebDeploymentTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<File
RelativePath=".\stereo_calib.cpp"
>
</File>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View File

@@ -1,242 +0,0 @@
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/highgui/highgui_c.h>
#include <opencv2/legacy.hpp>
#include <stdio.h>
static void help( void )
{
printf("\nThis program demostrates iterative construction of\n"
"delaunay triangulation and voronoi tesselation.\n"
"It draws a random set of points in an image and then delaunay triangulates them.\n"
"Usage: \n"
"./delaunay \n"
"\nThis program builds the traingulation interactively, you may stop this process by\n"
"hitting any key.\n");
}
static CvSubdiv2D* init_delaunay( CvMemStorage* storage,
CvRect rect )
{
CvSubdiv2D* subdiv;
subdiv = cvCreateSubdiv2D( CV_SEQ_KIND_SUBDIV2D, sizeof(*subdiv),
sizeof(CvSubdiv2DPoint),
sizeof(CvQuadEdge2D),
storage );
cvInitSubdivDelaunay2D( subdiv, rect );
return subdiv;
}
static void draw_subdiv_point( IplImage* img, CvPoint2D32f fp, CvScalar color )
{
cvCircle( img, cvPoint(cvRound(fp.x), cvRound(fp.y)), 3, color, CV_FILLED, 8, 0 );
}
static void draw_subdiv_edge( IplImage* img, CvSubdiv2DEdge edge, CvScalar color )
{
CvSubdiv2DPoint* org_pt;
CvSubdiv2DPoint* dst_pt;
CvPoint2D32f org;
CvPoint2D32f dst;
CvPoint iorg, idst;
org_pt = cvSubdiv2DEdgeOrg(edge);
dst_pt = cvSubdiv2DEdgeDst(edge);
if( org_pt && dst_pt )
{
org = org_pt->pt;
dst = dst_pt->pt;
iorg = cvPoint( cvRound( org.x ), cvRound( org.y ));
idst = cvPoint( cvRound( dst.x ), cvRound( dst.y ));
cvLine( img, iorg, idst, color, 1, CV_AA, 0 );
}
}
static void draw_subdiv( IplImage* img, CvSubdiv2D* subdiv,
CvScalar delaunay_color, CvScalar voronoi_color )
{
CvSeqReader reader;
int i, total = subdiv->edges->total;
int elem_size = subdiv->edges->elem_size;
cvStartReadSeq( (CvSeq*)(subdiv->edges), &reader, 0 );
for( i = 0; i < total; i++ )
{
CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);
if( CV_IS_SET_ELEM( edge ))
{
draw_subdiv_edge( img, (CvSubdiv2DEdge)edge + 1, voronoi_color );
draw_subdiv_edge( img, (CvSubdiv2DEdge)edge, delaunay_color );
}
CV_NEXT_SEQ_ELEM( elem_size, reader );
}
}
static void locate_point( CvSubdiv2D* subdiv, CvPoint2D32f fp, IplImage* img,
CvScalar active_color )
{
CvSubdiv2DEdge e;
CvSubdiv2DEdge e0 = 0;
CvSubdiv2DPoint* p = 0;
cvSubdiv2DLocate( subdiv, fp, &e0, &p );
if( e0 )
{
e = e0;
do
{
draw_subdiv_edge( img, e, active_color );
e = cvSubdiv2DGetEdge(e,CV_NEXT_AROUND_LEFT);
}
while( e != e0 );
}
draw_subdiv_point( img, fp, active_color );
}
static void draw_subdiv_facet( IplImage* img, CvSubdiv2DEdge edge )
{
CvSubdiv2DEdge t = edge;
int i, count = 0;
CvPoint* buf = 0;
// count number of edges in facet
do
{
count++;
t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
} while (t != edge );
buf = (CvPoint*)malloc( count * sizeof(buf[0]));
// gather points
t = edge;
for( i = 0; i < count; i++ )
{
CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
if( !pt ) break;
buf[i] = cvPoint( cvRound(pt->pt.x), cvRound(pt->pt.y));
t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
}
if( i == count )
{
CvSubdiv2DPoint* pt = cvSubdiv2DEdgeDst( cvSubdiv2DRotateEdge( edge, 1 ));
cvFillConvexPoly( img, buf, count, CV_RGB(rand()&255,rand()&255,rand()&255), CV_AA, 0 );
cvPolyLine( img, &buf, &count, 1, 1, CV_RGB(0,0,0), 1, CV_AA, 0);
draw_subdiv_point( img, pt->pt, CV_RGB(0,0,0));
}
free( buf );
}
static void paint_voronoi( CvSubdiv2D* subdiv, IplImage* img )
{
CvSeqReader reader;
int i, total = subdiv->edges->total;
int elem_size = subdiv->edges->elem_size;
cvCalcSubdivVoronoi2D( subdiv );
cvStartReadSeq( (CvSeq*)(subdiv->edges), &reader, 0 );
for( i = 0; i < total; i++ )
{
CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);
if( CV_IS_SET_ELEM( edge ))
{
CvSubdiv2DEdge e = (CvSubdiv2DEdge)edge;
// left
draw_subdiv_facet( img, cvSubdiv2DRotateEdge( e, 1 ));
// right
draw_subdiv_facet( img, cvSubdiv2DRotateEdge( e, 3 ));
}
CV_NEXT_SEQ_ELEM( elem_size, reader );
}
}
static void run(void)
{
char win[] = "source";
int i;
CvRect rect = { 0, 0, 600, 600 };
CvMemStorage* storage;
CvSubdiv2D* subdiv;
IplImage* img;
CvScalar active_facet_color, delaunay_color, voronoi_color, bkgnd_color;
active_facet_color = CV_RGB( 255, 0, 0 );
delaunay_color = CV_RGB( 0,0,0);
voronoi_color = CV_RGB(0, 180, 0);
bkgnd_color = CV_RGB(255,255,255);
img = cvCreateImage( cvSize(rect.width,rect.height), 8, 3 );
cvSet( img, bkgnd_color, 0 );
cvNamedWindow( win, 1 );
storage = cvCreateMemStorage(0);
subdiv = init_delaunay( storage, rect );
printf("Delaunay triangulation will be build now interactively.\n"
"To stop the process, press any key\n\n");
for( i = 0; i < 200; i++ )
{
CvPoint2D32f fp = cvPoint2D32f( (float)(rand()%(rect.width-10)+5),
(float)(rand()%(rect.height-10)+5));
locate_point( subdiv, fp, img, active_facet_color );
cvShowImage( win, img );
if( cvWaitKey( 100 ) >= 0 )
break;
cvSubdivDelaunay2DInsert( subdiv, fp );
cvCalcSubdivVoronoi2D( subdiv );
cvSet( img, bkgnd_color, 0 );
draw_subdiv( img, subdiv, delaunay_color, voronoi_color );
cvShowImage( win, img );
if( cvWaitKey( 100 ) >= 0 )
break;
}
cvSet( img, bkgnd_color, 0 );
paint_voronoi( subdiv, img );
cvShowImage( win, img );
cvWaitKey(0);
cvReleaseMemStorage( &storage );
cvReleaseImage(&img);
cvDestroyWindow( win );
}
int main( int argc, char** argv )
{
(void)argc; (void)argv;
help();
run();
return 0;
}
#ifdef _EiC
main( 1, "delaunay.c" );
#endif

View File

@@ -1,15 +0,0 @@
PROJECT(opencv_example)
CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
if(COMMAND cmake_policy)
cmake_policy(SET CMP0003 NEW)
endif(COMMAND cmake_policy)
FIND_PACKAGE( OpenCV REQUIRED )
# Declare the target (an executable)
ADD_EXECUTABLE(opencv_example minarea.c)
TARGET_LINK_LIBRARIES(opencv_example ${OpenCV_LIBS})
#MESSAGE(STATUS "OpenCV_LIBS: ${OpenCV_LIBS}")

View File

@@ -1,27 +0,0 @@
Example for CMake build system.
Compile OpenCV with cmake, preferently in an off-tree build, for example:
$ mkdir opencv-release
$ cd opencv-release
$ cmake <OPENCV_SRC_PATH>
$ make
And, *only optionally*, install it with.
$ sudo make install
Then create the binary directory for the example with:
$ mkdir example-release
$ cd example-release
Then, if "make install" have been executed, directly running
$ cmake <OPENCV_SRC_PATH>/samples/c/example_cmake/
will detect the "OpenCVConfig.cmake" file and the project is ready to compile.
If "make install" has not been executed, you'll have to manually pick the opencv
binary directory (Under Windows CMake may remember the correct directory). Open
the CMake gui with:
$ cmake-gui <OPENCV_SRC_PATH>/samples/c/example_cmake/
And pick the correct value for OpenCV_DIR.

View File

@@ -1,116 +0,0 @@
#ifdef _CH_
#pragma package <opencv>
#endif
#ifndef _EiC
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <stdlib.h>
#endif
#define ARRAY 1
void help()
{
printf("\nThis program demonstrates finding the minimum enclosing box or circle of a set\n"
"of points using functions: minAreaRect() minEnclosingCircle().\n"
"Random points are generated and then enclosed.\n"
"Call:\n"
"./minarea\n");
}
int main( int argc, char** argv )
{
IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
#if !ARRAY
CvMemStorage* storage = cvCreateMemStorage(0);
#endif
help();
cvNamedWindow( "rect & circle", 1 );
for(;;)
{
char key;
int i, count = rand()%100 + 1;
CvPoint pt0, pt;
CvBox2D box;
CvPoint2D32f box_vtx[4];
CvPoint2D32f center;
CvPoint icenter;
float radius;
#if !ARRAY
CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),
sizeof(CvPoint), storage );
for( i = 0; i < count; i++ )
{
pt0.x = rand() % (img->width/2) + img->width/4;
pt0.y = rand() % (img->height/2) + img->height/4;
cvSeqPush( ptseq, &pt0 );
}
#ifndef _EiC /* unfortunately, here EiC crashes */
box = cvMinAreaRect2( ptseq, 0 );
#endif
cvMinEnclosingCircle( ptseq, &center, &radius );
#else
CvPoint* points = (CvPoint*)malloc( count * sizeof(points[0]));
CvMat pointMat = cvMat( 1, count, CV_32SC2, points );
for( i = 0; i < count; i++ )
{
pt0.x = rand() % (img->width/2) + img->width/4;
pt0.y = rand() % (img->height/2) + img->height/4;
points[i] = pt0;
}
#ifndef _EiC
box = cvMinAreaRect2( &pointMat, 0 );
#endif
cvMinEnclosingCircle( &pointMat, &center, &radius );
#endif
cvBoxPoints( box, box_vtx );
cvZero( img );
for( i = 0; i < count; i++ )
{
#if !ARRAY
pt0 = *CV_GET_SEQ_ELEM( CvPoint, ptseq, i );
#else
pt0 = points[i];
#endif
cvCircle( img, pt0, 2, CV_RGB( 255, 0, 0 ), CV_FILLED, CV_AA, 0 );
}
#ifndef _EiC
pt0.x = cvRound(box_vtx[3].x);
pt0.y = cvRound(box_vtx[3].y);
for( i = 0; i < 4; i++ )
{
pt.x = cvRound(box_vtx[i].x);
pt.y = cvRound(box_vtx[i].y);
cvLine(img, pt0, pt, CV_RGB(0, 255, 0), 1, CV_AA, 0);
pt0 = pt;
}
#endif
icenter.x = cvRound(center.x);
icenter.y = cvRound(center.y);
cvCircle( img, icenter, cvRound(radius), CV_RGB(255, 255, 0), 1, CV_AA, 0 );
cvShowImage( "rect & circle", img );
key = (char) cvWaitKey(0);
if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
break;
#if !ARRAY
cvClearMemStorage( storage );
#else
free( points );
#endif
}
cvDestroyWindow( "rect & circle" );
return 0;
}
#ifdef _EiC
main(1,"convexhull.c");
#endif

View File

@@ -1,2 +0,0 @@
REM an example of using haar cascade recognition for face and eye detection.
facedetect --cascade="../../data/haarcascades/haarcascade_frontalface_alt.xml" --nested-cascade="../../data/haarcascades/haarcascade_eye.xml" --scale=1.3 %1

View File

@@ -1,75 +0,0 @@
#include "opencv2/video/tracking_c.h"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/imgproc/imgproc_c.h"
#include <stdio.h>
static void help(void)
{
printf(
"\n This program demonstrate dense \"Farneback\n optical flow\n"
"It read from camera 0, and shows how to use and display dense Franeback optical flow\n"
"Usage: \n"
"./fback_c \n");
}
static void drawOptFlowMap(const CvMat* flow, CvMat* cflowmap, int step,
double scale, CvScalar color)
{
int x, y;
(void)scale;
for( y = 0; y < cflowmap->rows; y += step)
for( x = 0; x < cflowmap->cols; x += step)
{
CvPoint2D32f fxy = CV_MAT_ELEM(*flow, CvPoint2D32f, y, x);
cvLine(cflowmap, cvPoint(x,y), cvPoint(cvRound(x+fxy.x), cvRound(y+fxy.y)),
color, 1, 8, 0);
cvCircle(cflowmap, cvPoint(x,y), 2, color, -1, 8, 0);
}
}
int main( int argc, char** argv )
{
CvCapture* capture = cvCreateCameraCapture(0);
CvMat* prevgray = 0, *gray = 0, *flow = 0, *cflow = 0;
(void)argc; (void)argv;
help();
if( !capture )
return -1;
cvNamedWindow("flow", 1);
for(;;)
{
int firstFrame = gray == 0;
IplImage* frame = cvQueryFrame(capture);
if(!frame)
break;
if(!gray)
{
gray = cvCreateMat(frame->height, frame->width, CV_8UC1);
prevgray = cvCreateMat(gray->rows, gray->cols, gray->type);
flow = cvCreateMat(gray->rows, gray->cols, CV_32FC2);
cflow = cvCreateMat(gray->rows, gray->cols, CV_8UC3);
}
cvCvtColor(frame, gray, CV_BGR2GRAY);
if( !firstFrame )
{
cvCalcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
cvCvtColor(prevgray, cflow, CV_GRAY2BGR);
drawOptFlowMap(flow, cflow, 16, 1.5, CV_RGB(0, 255, 0));
cvShowImage("flow", cflow);
}
if(cvWaitKey(30)>=0)
break;
{
CvMat* temp;
CV_SWAP(prevgray, gray, temp);
}
}
cvReleaseCapture(&capture);
return 0;
}

View File

@@ -1,322 +0,0 @@
/*
* A Demo to OpenCV Implementation of SURF
* Further Information Refer to "SURF: Speed-Up Robust Feature"
* Author: Liu Liu
* liuliu.1987+opencv@gmail.com
*/
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/legacy/legacy.hpp"
#include "opencv2/legacy/compat.hpp"
#include <iostream>
#include <vector>
#include <stdio.h>
using namespace std;
static void help()
{
printf(
"This program demonstrated the use of the SURF Detector and Descriptor using\n"
"either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
"on planar objects.\n"
"Usage:\n"
"./find_obj <object_filename> <scene_filename>, default is box.png and box_in_scene.png\n\n");
return;
}
// define whether to use approximate nearest-neighbor search
#define USE_FLANN
#ifdef USE_FLANN
static void
flannFindPairs( const CvSeq*, const CvSeq* objectDescriptors,
const CvSeq*, const CvSeq* imageDescriptors, vector<int>& ptpairs )
{
int length = (int)(objectDescriptors->elem_size/sizeof(float));
cv::Mat m_object(objectDescriptors->total, length, CV_32F);
cv::Mat m_image(imageDescriptors->total, length, CV_32F);
// copy descriptors
CvSeqReader obj_reader;
float* obj_ptr = m_object.ptr<float>(0);
cvStartReadSeq( objectDescriptors, &obj_reader );
for(int i = 0; i < objectDescriptors->total; i++ )
{
const float* descriptor = (const float*)obj_reader.ptr;
CV_NEXT_SEQ_ELEM( obj_reader.seq->elem_size, obj_reader );
memcpy(obj_ptr, descriptor, length*sizeof(float));
obj_ptr += length;
}
CvSeqReader img_reader;
float* img_ptr = m_image.ptr<float>(0);
cvStartReadSeq( imageDescriptors, &img_reader );
for(int i = 0; i < imageDescriptors->total; i++ )
{
const float* descriptor = (const float*)img_reader.ptr;
CV_NEXT_SEQ_ELEM( img_reader.seq->elem_size, img_reader );
memcpy(img_ptr, descriptor, length*sizeof(float));
img_ptr += length;
}
// find nearest neighbors using FLANN
cv::Mat m_indices(objectDescriptors->total, 2, CV_32S);
cv::Mat m_dists(objectDescriptors->total, 2, CV_32F);
cv::flann::Index flann_index(m_image, cv::flann::KDTreeIndexParams(4)); // using 4 randomized kdtrees
flann_index.knnSearch(m_object, m_indices, m_dists, 2, cv::flann::SearchParams(64) ); // maximum number of leafs checked
int* indices_ptr = m_indices.ptr<int>(0);
float* dists_ptr = m_dists.ptr<float>(0);
for (int i=0;i<m_indices.rows;++i) {
if (dists_ptr[2*i]<0.6*dists_ptr[2*i+1]) {
ptpairs.push_back(i);
ptpairs.push_back(indices_ptr[2*i]);
}
}
}
#else
static double
compareSURFDescriptors( const float* d1, const float* d2, double best, int length )
{
double total_cost = 0;
assert( length % 4 == 0 );
for( int i = 0; i < length; i += 4 )
{
double t0 = d1[i ] - d2[i ];
double t1 = d1[i+1] - d2[i+1];
double t2 = d1[i+2] - d2[i+2];
double t3 = d1[i+3] - d2[i+3];
total_cost += t0*t0 + t1*t1 + t2*t2 + t3*t3;
if( total_cost > best )
break;
}
return total_cost;
}
static int
naiveNearestNeighbor( const float* vec, int laplacian,
const CvSeq* model_keypoints,
const CvSeq* model_descriptors )
{
int length = (int)(model_descriptors->elem_size/sizeof(float));
int i, neighbor = -1;
double d, dist1 = 1e6, dist2 = 1e6;
CvSeqReader reader, kreader;
cvStartReadSeq( model_keypoints, &kreader, 0 );
cvStartReadSeq( model_descriptors, &reader, 0 );
for( i = 0; i < model_descriptors->total; i++ )
{
const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
const float* mvec = (const float*)reader.ptr;
CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
if( laplacian != kp->laplacian )
continue;
d = compareSURFDescriptors( vec, mvec, dist2, length );
if( d < dist1 )
{
dist2 = dist1;
dist1 = d;
neighbor = i;
}
else if ( d < dist2 )
dist2 = d;
}
if ( dist1 < 0.6*dist2 )
return neighbor;
return -1;
}
static void
findPairs( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
const CvSeq* imageKeypoints, const CvSeq* imageDescriptors, vector<int>& ptpairs )
{
int i;
CvSeqReader reader, kreader;
cvStartReadSeq( objectKeypoints, &kreader );
cvStartReadSeq( objectDescriptors, &reader );
ptpairs.clear();
for( i = 0; i < objectDescriptors->total; i++ )
{
const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
const float* descriptor = (const float*)reader.ptr;
CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
int nearest_neighbor = naiveNearestNeighbor( descriptor, kp->laplacian, imageKeypoints, imageDescriptors );
if( nearest_neighbor >= 0 )
{
ptpairs.push_back(i);
ptpairs.push_back(nearest_neighbor);
}
}
}
#endif
/* a rough implementation for object location */
static int
locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,
const CvPoint src_corners[4], CvPoint dst_corners[4] )
{
double h[9];
CvMat _h = cvMat(3, 3, CV_64F, h);
vector<int> ptpairs;
vector<CvPoint2D32f> pt1, pt2;
CvMat _pt1, _pt2;
int i, n;
#ifdef USE_FLANN
flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#else
findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#endif
n = (int)(ptpairs.size()/2);
if( n < 4 )
return 0;
pt1.resize(n);
pt2.resize(n);
for( i = 0; i < n; i++ )
{
pt1[i] = ((CvSURFPoint*)cvGetSeqElem(objectKeypoints,ptpairs[i*2]))->pt;
pt2[i] = ((CvSURFPoint*)cvGetSeqElem(imageKeypoints,ptpairs[i*2+1]))->pt;
}
_pt1 = cvMat(1, n, CV_32FC2, &pt1[0] );
_pt2 = cvMat(1, n, CV_32FC2, &pt2[0] );
if( !cvFindHomography( &_pt1, &_pt2, &_h, CV_RANSAC, 5 ))
return 0;
for( i = 0; i < 4; i++ )
{
double x = src_corners[i].x, y = src_corners[i].y;
double Z = 1./(h[6]*x + h[7]*y + h[8]);
double X = (h[0]*x + h[1]*y + h[2])*Z;
double Y = (h[3]*x + h[4]*y + h[5])*Z;
dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));
}
return 1;
}
int main(int argc, char** argv)
{
const char* object_filename = argc == 3 ? argv[1] : "box.png";
const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
cv::initModule_nonfree();
help();
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
if( !object || !image )
{
fprintf( stderr, "Can not load %s and/or %s\n",
object_filename, scene_filename );
exit(-1);
}
CvMemStorage* storage = cvCreateMemStorage(0);
cvNamedWindow("Object", 1);
cvNamedWindow("Object Correspond", 1);
static cv::Scalar colors[] =
{
cv::Scalar(0,0,255),
cv::Scalar(0,128,255),
cv::Scalar(0,255,255),
cv::Scalar(0,255,0),
cv::Scalar(255,128,0),
cv::Scalar(255,255,0),
cv::Scalar(255,0,0),
cv::Scalar(255,0,255),
cv::Scalar(255,255,255)
};
IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);
cvCvtColor( object, object_color, CV_GRAY2BGR );
CvSeq* objectKeypoints = 0, *objectDescriptors = 0;
CvSeq* imageKeypoints = 0, *imageDescriptors = 0;
int i;
CvSURFParams params = cvSURFParams(500, 1);
double tt = (double)cvGetTickCount();
cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, storage, params );
printf("Object Descriptors: %d\n", objectDescriptors->total);
cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );
printf("Image Descriptors: %d\n", imageDescriptors->total);
tt = (double)cvGetTickCount() - tt;
printf( "Extraction time = %gms\n", tt/(cvGetTickFrequency()*1000.));
CvPoint src_corners[4] = {CvPoint(0,0), CvPoint(object->width,0), CvPoint(object->width, object->height), CvPoint(0, object->height)};
CvPoint dst_corners[4];
IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );
cvSetImageROI( correspond, cvRect( 0, 0, object->width, object->height ) );
cvCopy( object, correspond );
cvSetImageROI( correspond, cvRect( 0, object->height, correspond->width, correspond->height ) );
cvCopy( image, correspond );
cvResetImageROI( correspond );
#ifdef USE_FLANN
printf("Using approximate nearest neighbor search\n");
#endif
if( locatePlanarObject( objectKeypoints, objectDescriptors, imageKeypoints,
imageDescriptors, src_corners, dst_corners ))
{
for( i = 0; i < 4; i++ )
{
CvPoint r1 = dst_corners[i%4];
CvPoint r2 = dst_corners[(i+1)%4];
cvLine( correspond, cvPoint(r1.x, r1.y+object->height ),
cvPoint(r2.x, r2.y+object->height ), colors[8] );
}
}
vector<int> ptpairs;
#ifdef USE_FLANN
flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#else
findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#endif
for( i = 0; i < (int)ptpairs.size(); i += 2 )
{
CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, ptpairs[i] );
CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, ptpairs[i+1] );
cvLine( correspond, cvPointFrom32f(r1->pt),
cvPoint(cvRound(r2->pt.x), cvRound(r2->pt.y+object->height)), colors[8] );
}
cvShowImage( "Object Correspond", correspond );
for( i = 0; i < objectKeypoints->total; i++ )
{
CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, i );
CvPoint center;
int radius;
center.x = cvRound(r->pt.x);
center.y = cvRound(r->pt.y);
radius = cvRound(r->size*1.2/9.*2);
cvCircle( object_color, center, radius, colors[0], 1, 8, 0 );
}
cvShowImage( "Object", object_color );
cvWaitKey(0);
cvDestroyWindow("Object");
cvDestroyWindow("Object Correspond");
return 0;
}

View File

@@ -1,166 +0,0 @@
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
static void help()
{
cout << "This program shows the use of the Calonder point descriptor classifier"
"SURF is used to detect interest points, Calonder is used to describe/match these points\n"
"Format:" << endl <<
" classifier_file(to write) test_image file_with_train_images_filenames(txt)" <<
" or" << endl <<
" classifier_file(to read) test_image" << "\n" << endl <<
"Using OpenCV version " << CV_VERSION << "\n" << endl;
return;
}
/*
* Generates random perspective transform of image
*/
static void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
{
H.create(3, 3, CV_32FC1);
H.at<float>(0,0) = rng.uniform( 0.8f, 1.2f);
H.at<float>(0,1) = rng.uniform(-0.1f, 0.1f);
H.at<float>(0,2) = rng.uniform(-0.1f, 0.1f)*src.cols;
H.at<float>(1,0) = rng.uniform(-0.1f, 0.1f);
H.at<float>(1,1) = rng.uniform( 0.8f, 1.2f);
H.at<float>(1,2) = rng.uniform(-0.1f, 0.1f)*src.rows;
H.at<float>(2,0) = rng.uniform( -1e-4f, 1e-4f);
H.at<float>(2,1) = rng.uniform( -1e-4f, 1e-4f);
H.at<float>(2,2) = rng.uniform( 0.8f, 1.2f);
warpPerspective( src, dst, H, src.size() );
}
/*
* Trains Calonder classifier and writes trained classifier in file:
* imgFilename - name of .txt file which contains list of full filenames of train images,
* classifierFilename - name of binary file in which classifier will be written.
*
* To train Calonder classifier RTreeClassifier class need to be used.
*/
static void trainCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
// Reads train images
ifstream is( imgFilename.c_str(), ifstream::in );
vector<Mat> trainImgs;
while( !is.eof() )
{
string str;
getline( is, str );
if (str.empty()) break;
Mat img = imread( str, IMREAD_GRAYSCALE );
if( !img.empty() )
trainImgs.push_back( img );
}
if( trainImgs.empty() )
{
cout << "All train images can not be read." << endl;
exit(-1);
}
cout << trainImgs.size() << " train images were read." << endl;
// Extracts keypoints from train images
SurfFeatureDetector detector;
vector<BaseKeypoint> trainPoints;
vector<IplImage> iplTrainImgs(trainImgs.size());
for( size_t imgIdx = 0; imgIdx < trainImgs.size(); imgIdx++ )
{
iplTrainImgs[imgIdx] = trainImgs[imgIdx];
vector<KeyPoint> kps; detector.detect( trainImgs[imgIdx], kps );
for( size_t pointIdx = 0; pointIdx < kps.size(); pointIdx++ )
{
Point2f p = kps[pointIdx].pt;
trainPoints.push_back( BaseKeypoint(cvRound(p.x), cvRound(p.y), &iplTrainImgs[imgIdx]) );
}
}
// Trains Calonder classifier on extracted points
RTreeClassifier classifier;
classifier.train( trainPoints, theRNG(), 48, 9, 100 );
// Writes classifier
classifier.write( classifierFilename.c_str() );
}
/*
* Test Calonder classifier to match keypoints on given image:
* classifierFilename - name of file from which classifier will be read,
* imgFilename - test image filename.
*
* To calculate keypoint descriptors you may use RTreeClassifier class (as to train),
* but it is convenient to use CalonderDescriptorExtractor class which is wrapper of
* RTreeClassifier.
*/
static void testCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
Mat img1 = imread( imgFilename, IMREAD_GRAYSCALE ), img2, H12;
if( img1.empty() )
{
cout << "Test image can not be read." << endl;
exit(-1);
}
warpPerspectiveRand( img1, img2, H12, theRNG() );
// Exstract keypoints from test images
SurfFeatureDetector detector;
vector<KeyPoint> keypoints1; detector.detect( img1, keypoints1 );
vector<KeyPoint> keypoints2; detector.detect( img2, keypoints2 );
// Compute descriptors
CalonderDescriptorExtractor<float> de( classifierFilename );
Mat descriptors1; de.compute( img1, keypoints1, descriptors1 );
Mat descriptors2; de.compute( img2, keypoints2, descriptors2 );
// Match descriptors
BFMatcher matcher(de.defaultNorm());
vector<DMatch> matches;
matcher.match( descriptors1, descriptors2, matches );
// Prepare inlier mask
vector<char> matchesMask( matches.size(), 0 );
vector<Point2f> points1; KeyPoint::convert( keypoints1, points1 );
vector<Point2f> points2; KeyPoint::convert( keypoints2, points2 );
Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
for( size_t mi = 0; mi < matches.size(); mi++ )
{
if( norm(points2[matches[mi].trainIdx] - points1t.at<Point2f>((int)mi,0)) < 4 ) // inlier
matchesMask[mi] = 1;
}
// Draw
Mat drawImg;
drawMatches( img1, keypoints1, img2, keypoints2, matches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask );
string winName = "Matches";
namedWindow( winName, WINDOW_AUTOSIZE );
imshow( winName, drawImg );
waitKey();
}
int main( int argc, char **argv )
{
if( argc != 4 && argc != 3 )
{
help();
return -1;
}
if( argc == 4 )
trainCalonderClassifier( argv[1], argv[3] );
testCalonderClassifier( argv[1], argv[2] );
return 0;
}

View File

@@ -1,157 +0,0 @@
#include "opencv2/core/utility.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/legacy.hpp"
#include <algorithm>
#include <iostream>
#include <vector>
#include <stdio.h>
using namespace std;
using namespace cv;
static void help()
{
printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n"
"descriptor classifier\n"
"Usage:\n"
"./find_obj_ferns <object_filename> <scene_filename>, default: box.png and box_in_scene.png\n\n");
return;
}
int main(int argc, char** argv)
{
int i;
const char* object_filename = argc > 1 ? argv[1] : "box.png";
const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png";
help();
Mat object = imread( object_filename, IMREAD_GRAYSCALE );
Mat scene = imread( scene_filename, IMREAD_GRAYSCALE );
if( !object.data || !scene.data )
{
fprintf( stderr, "Can not load %s and/or %s\n",
object_filename, scene_filename );
exit(-1);
}
double imgscale = 1;
Mat image;
resize(scene, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
namedWindow("Object", 1);
namedWindow("Image", 1);
namedWindow("Object Correspondence", 1);
Size patchSize(32, 32);
LDetector ldetector(7, 20, 2, 2000, patchSize.width, 2);
ldetector.setVerbose(true);
PlanarObjectDetector detector;
vector<Mat> objpyr, imgpyr;
int blurKSize = 3;
double sigma = 0;
GaussianBlur(object, object, Size(blurKSize, blurKSize), sigma, sigma);
GaussianBlur(image, image, Size(blurKSize, blurKSize), sigma, sigma);
buildPyramid(object, objpyr, ldetector.nOctaves-1);
buildPyramid(image, imgpyr, ldetector.nOctaves-1);
vector<KeyPoint> objKeypoints, imgKeypoints;
PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2);
string model_filename = format("%s_model.xml.gz", object_filename);
printf("Trying to load %s ...\n", model_filename.c_str());
FileStorage fs(model_filename, FileStorage::READ);
if( fs.isOpened() )
{
detector.read(fs.getFirstTopLevelNode());
printf("Successfully loaded %s.\n", model_filename.c_str());
}
else
{
printf("The file not found and can not be read. Let's train the model.\n");
printf("Step 1. Finding the robust keypoints ...\n");
ldetector.setVerbose(true);
ldetector.getMostStable2D(object, objKeypoints, 100, gen);
printf("Done.\nStep 2. Training ferns-based planar object detector ...\n");
detector.setVerbose(true);
detector.train(objpyr, objKeypoints, patchSize.width, 100, 11, 10000, ldetector, gen);
printf("Done.\nStep 3. Saving the model to %s ...\n", model_filename.c_str());
if( fs.open(model_filename, FileStorage::WRITE) )
detector.write(fs, "ferns_model");
}
printf("Now find the keypoints in the image, try recognize them and compute the homography matrix\n");
fs.release();
vector<Point2f> dst_corners;
Mat correspond( object.rows + image.rows, std::max(object.cols, image.cols), CV_8UC3);
correspond = Scalar(0.);
Mat part(correspond, Rect(0, 0, object.cols, object.rows));
cvtColor(object, part, CV_GRAY2BGR);
part = Mat(correspond, Rect(0, object.rows, image.cols, image.rows));
cvtColor(image, part, CV_GRAY2BGR);
vector<int> pairs;
Mat H;
double t = (double)getTickCount();
objKeypoints = detector.getModelPoints();
ldetector(imgpyr, imgKeypoints, 300);
std::cout << "Object keypoints: " << objKeypoints.size() << "\n";
std::cout << "Image keypoints: " << imgKeypoints.size() << "\n";
bool found = detector(imgpyr, imgKeypoints, H, dst_corners, &pairs);
t = (double)getTickCount() - t;
printf("%gms\n", t*1000/getTickFrequency());
if( found )
{
for( i = 0; i < 4; i++ )
{
Point r1 = dst_corners[i%4];
Point r2 = dst_corners[(i+1)%4];
line( correspond, Point(r1.x, r1.y+object.rows),
Point(r2.x, r2.y+object.rows), Scalar(0,0,255) );
}
}
for( i = 0; i < (int)pairs.size(); i += 2 )
{
line( correspond, objKeypoints[pairs[i]].pt,
imgKeypoints[pairs[i+1]].pt + Point2f(0,(float)object.rows),
Scalar(0,255,0) );
}
imshow( "Object Correspondence", correspond );
Mat objectColor;
cvtColor(object, objectColor, CV_GRAY2BGR);
for( i = 0; i < (int)objKeypoints.size(); i++ )
{
circle( objectColor, objKeypoints[i].pt, 2, Scalar(0,0,255), -1 );
circle( objectColor, objKeypoints[i].pt, (1 << objKeypoints[i].octave)*15, Scalar(0,255,0), 1 );
}
Mat imageColor;
cvtColor(image, imageColor, CV_GRAY2BGR);
for( i = 0; i < (int)imgKeypoints.size(); i++ )
{
circle( imageColor, imgKeypoints[i].pt, 2, Scalar(0,0,255), -1 );
circle( imageColor, imgKeypoints[i].pt, (1 << imgKeypoints[i].octave)*15, Scalar(0,255,0), 1 );
}
imwrite("correspond.png", correspond );
imshow( "Object", objectColor );
imshow( "Image", imageColor );
waitKey(0);
return 0;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

View File

@@ -1,88 +0,0 @@
#include "opencv2/objdetect/objdetect_c.h"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/core/utility.hpp"
#include <stdio.h>
using namespace cv;
static void help()
{
printf( "This program demonstrated the use of the latentSVM detector.\n"
"It reads in a trained object model and then uses that to detect the object in an image\n"
"Call:\n"
"./latentsvmdetect [<image_filename> <model_filename> [<threads_number>]]\n"
" The defaults for image_filename and model_filename are cat.jpg and cat.xml respectively\n"
" Press any key to quit.\n");
}
const char* model_filename = "cat.xml";
const char* image_filename = "cat.jpg";
int tbbNumThreads = -1;
static void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, int numThreads = -1)
{
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* detections = 0;
int i = 0;
int64 start = 0, finish = 0;
setNumThreads(numThreads);
numThreads = getNumThreads();
printf("Number of threads %i\n", numThreads);
start = cvGetTickCount();
detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads);
finish = cvGetTickCount();
printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0));
setNumThreads(-1);
for( i = 0; i < detections->total; i++ )
{
CvObjectDetection detection = *(CvObjectDetection*)cvGetSeqElem( detections, i );
float score = detection.score;
CvRect bounding_box = detection.rect;
cvRectangle( image, cvPoint(bounding_box.x, bounding_box.y),
cvPoint(bounding_box.x + bounding_box.width,
bounding_box.y + bounding_box.height),
CV_RGB(cvRound(255.0f*score),0,0), 3 );
}
cvReleaseMemStorage( &storage );
}
int main(int argc, char* argv[])
{
help();
if (argc > 2)
{
image_filename = argv[1];
model_filename = argv[2];
if (argc > 3)
{
tbbNumThreads = atoi(argv[3]);
}
}
IplImage* image = cvLoadImage(image_filename);
if (!image)
{
printf( "Unable to load the image\n"
"Pass it as the first parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );
return -1;
}
CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(model_filename);
if (!detector)
{
printf( "Unable to load the model\n"
"Pass it as the second parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );
cvReleaseImage( &image );
return -1;
}
detect_and_draw_objects( image, detector, tbbNumThreads );
cvNamedWindow( "test", 0 );
cvShowImage( "test", image );
cvWaitKey(0);
cvReleaseLatentSvmDetector( &detector );
cvReleaseImage( &image );
cvDestroyAllWindows();
return 0;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 90 KiB

View File

@@ -1,130 +0,0 @@
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/imgproc/imgproc_c.h"
#include <stdio.h>
IplImage* src = 0;
IplImage* dst = 0;
IplConvKernel* element = 0;
int element_shape = CV_SHAPE_RECT;
//the address of variable which receives trackbar position update
int max_iters = 10;
int open_close_pos = 0;
int erode_dilate_pos = 0;
// callback function for open/close trackbar
static void OpenClose(int pos)
{
int n = open_close_pos - max_iters;
int an = n > 0 ? n : -n;
(void)pos;
element = cvCreateStructuringElementEx( an*2+1, an*2+1, an, an, element_shape, 0 );
if( n < 0 )
{
cvErode(src,dst,element,1);
cvDilate(dst,dst,element,1);
}
else
{
cvDilate(src,dst,element,1);
cvErode(dst,dst,element,1);
}
cvReleaseStructuringElement(&element);
cvShowImage("Open/Close",dst);
}
// callback function for erode/dilate trackbar
static void ErodeDilate(int pos)
{
int n = erode_dilate_pos - max_iters;
int an = n > 0 ? n : -n;
(void)pos;
element = cvCreateStructuringElementEx( an*2+1, an*2+1, an, an, element_shape, 0 );
if( n < 0 )
{
cvErode(src,dst,element,1);
}
else
{
cvDilate(src,dst,element,1);
}
cvReleaseStructuringElement(&element);
cvShowImage("Erode/Dilate",dst);
}
static void help(void)
{
printf( "This program demonstrated the use of the morphology operator, especially open, close, erode, dilate operations\n"
"Morphology operators are built on max (close) and min (open) operators as measured by pixels covered by small structuring elements.\n"
"These operators are very efficient.\n"
"This program also allows you to play with elliptical, rectangluar and cross structure elements\n"
"Usage: \n"
"./morphologyc [image_name -- Default baboon.jpg]\n"
"\nHot keys: \n"
"\tESC - quit the program\n"
"\tr - use rectangle structuring element\n"
"\te - use elliptic structuring element\n"
"\tc - use cross-shaped structuring element\n"
"\tSPACE - loop through all the options\n" );
}
int main( int argc, char** argv )
{
char* filename = 0;
help();
filename = argc == 2 ? argv[1] : (char*)"baboon.jpg";
if( (src = cvLoadImage(filename,1)) == 0 )
{
printf("Cannot load file image %s\n", filename);
help();
return -1;
}
dst = cvCloneImage(src);
//create windows for output images
cvNamedWindow("Open/Close",1);
cvNamedWindow("Erode/Dilate",1);
open_close_pos = erode_dilate_pos = max_iters;
cvCreateTrackbar("iterations", "Open/Close",&open_close_pos,max_iters*2+1,OpenClose);
cvCreateTrackbar("iterations", "Erode/Dilate",&erode_dilate_pos,max_iters*2+1,ErodeDilate);
for(;;)
{
int c;
OpenClose(open_close_pos);
ErodeDilate(erode_dilate_pos);
c = cvWaitKey(0);
if( (char)c == 27 )
break;
if( (char)c == 'e' )
element_shape = CV_SHAPE_ELLIPSE;
else if( (char)c == 'r' )
element_shape = CV_SHAPE_RECT;
else if( (char)c == 'c' )
element_shape = CV_SHAPE_CROSS;
else if( (char)c == ' ' )
element_shape = (element_shape + 1) % 3;
}
//release images
cvReleaseImage(&src);
cvReleaseImage(&dst);
//destroy windows
cvDestroyWindow("Open/Close");
cvDestroyWindow("Erode/Dilate");
return 0;
}

View File

@@ -1,87 +0,0 @@
/* This sample code was originally provided by Liu Liu
* Copyright (C) 2009, Liu Liu All rights reserved.
*/
#include <opencv2/core/utility.hpp>
#include "opencv2/highgui.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
static void help()
{
cout << "\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
"It finds the most stable (in size) dark and white regions as a threshold is increased.\n"
"\nCall:\n"
"./mser_sample <path_and_image_filename, Default is 'puzzle.png'>\n\n";
}
static const Vec3b bcolors[] =
{
Vec3b(0,0,255),
Vec3b(0,128,255),
Vec3b(0,255,255),
Vec3b(0,255,0),
Vec3b(255,128,0),
Vec3b(255,255,0),
Vec3b(255,0,0),
Vec3b(255,0,255),
Vec3b(255,255,255)
};
int main( int argc, char** argv )
{
string path;
Mat img0, img, yuv, gray, ellipses;
help();
img0 = imread( argc != 2 ? "puzzle.png" : argv[1], 1 );
if( img0.empty() )
{
if( argc != 2 )
cout << "\nUsage: mser_sample <path_to_image>\n";
else
cout << "Unable to load image " << argv[1] << endl;
return 0;
}
cvtColor(img0, yuv, COLOR_BGR2YCrCb);
cvtColor(img0, gray, COLOR_BGR2GRAY);
cvtColor(gray, img, COLOR_GRAY2BGR);
img.copyTo(ellipses);
vector<vector<Point> > contours;
double t = (double)getTickCount();
MSER()(yuv, contours);
t = (double)getTickCount() - t;
printf( "MSER extracted %d contours in %g ms.\n", (int)contours.size(),
t*1000./getTickFrequency() );
// draw mser's with different colors
for( int i = (int)contours.size()-1; i >= 0; i-- )
{
const vector<Point>& r = contours[i];
for ( int j = 0; j < (int)r.size(); j++ )
{
Point pt = r[j];
img.at<Vec3b>(pt) = bcolors[i%9];
}
// find ellipse (it seems cvfitellipse2 have error or sth?)
RotatedRect box = fitEllipse( r );
box.angle=(float)CV_PI/2-box.angle;
ellipse( ellipses, box, Scalar(196,255,255), 2 );
}
imshow( "original", img0 );
imshow( "response", img );
imshow( "ellipses", ellipses );
waitKey(0);
}

View File

@@ -1,322 +0,0 @@
#include "opencv2/core/core_c.h"
#include "opencv2/ml/ml.hpp"
#include <stdio.h>
static void help()
{
printf("\nThis program demonstrated the use of OpenCV's decision tree function for learning and predicting data\n"
"Usage :\n"
"./mushroom <path to agaricus-lepiota.data>\n"
"\n"
"The sample demonstrates how to build a decision tree for classifying mushrooms.\n"
"It uses the sample base agaricus-lepiota.data from UCI Repository, here is the link:\n"
"\n"
"Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).\n"
"UCI Repository of machine learning databases\n"
"[http://www.ics.uci.edu/~mlearn/MLRepository.html].\n"
"Irvine, CA: University of California, Department of Information and Computer Science.\n"
"\n"
"// loads the mushroom database, which is a text file, containing\n"
"// one training sample per row, all the input variables and the output variable are categorical,\n"
"// the values are encoded by characters.\n\n");
}
static int mushroom_read_database( const char* filename, CvMat** data, CvMat** missing, CvMat** responses )
{
const int M = 1024;
FILE* f = fopen( filename, "rt" );
CvMemStorage* storage;
CvSeq* seq;
char buf[M+2], *ptr;
float* el_ptr;
CvSeqReader reader;
int i, j, var_count = 0;
if( !f )
return 0;
// read the first line and determine the number of variables
if( !fgets( buf, M, f ))
{
fclose(f);
return 0;
}
for( ptr = buf; *ptr != '\0'; ptr++ )
var_count += *ptr == ',';
assert( ptr - buf == (var_count+1)*2 );
// create temporary memory storage to store the whole database
el_ptr = new float[var_count+1];
storage = cvCreateMemStorage();
seq = cvCreateSeq( 0, sizeof(*seq), (var_count+1)*sizeof(float), storage );
for(;;)
{
for( i = 0; i <= var_count; i++ )
{
int c = buf[i*2];
el_ptr[i] = c == '?' ? -1.f : (float)c;
}
if( i != var_count+1 )
break;
cvSeqPush( seq, el_ptr );
if( !fgets( buf, M, f ) || !strchr( buf, ',' ) )
break;
}
fclose(f);
// allocate the output matrices and copy the base there
*data = cvCreateMat( seq->total, var_count, CV_32F );
*missing = cvCreateMat( seq->total, var_count, CV_8U );
*responses = cvCreateMat( seq->total, 1, CV_32F );
cvStartReadSeq( seq, &reader );
for( i = 0; i < seq->total; i++ )
{
const float* sdata = (float*)reader.ptr + 1;
float* ddata = data[0]->data.fl + var_count*i;
float* dr = responses[0]->data.fl + i;
uchar* dm = missing[0]->data.ptr + var_count*i;
for( j = 0; j < var_count; j++ )
{
ddata[j] = sdata[j];
dm[j] = sdata[j] < 0;
}
*dr = sdata[-1];
CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
}
cvReleaseMemStorage( &storage );
delete [] el_ptr;
return 1;
}
static CvDTree* mushroom_create_dtree( const CvMat* data, const CvMat* missing,
const CvMat* responses, float p_weight )
{
CvDTree* dtree;
CvMat* var_type;
int i, hr1 = 0, hr2 = 0, p_total = 0;
float priors[] = { 1, p_weight };
var_type = cvCreateMat( data->cols + 1, 1, CV_8U );
cvSet( var_type, cvScalarAll(CV_VAR_CATEGORICAL) ); // all the variables are categorical
dtree = new CvDTree;
dtree->train( data, CV_ROW_SAMPLE, responses, 0, 0, var_type, missing,
CvDTreeParams( 8, // max depth
10, // min sample count
0, // regression accuracy: N/A here
true, // compute surrogate split, as we have missing data
15, // max number of categories (use sub-optimal algorithm for larger numbers)
10, // the number of cross-validation folds
true, // use 1SE rule => smaller tree
true, // throw away the pruned tree branches
priors // the array of priors, the bigger p_weight, the more attention
// to the poisonous mushrooms
// (a mushroom will be judjed to be poisonous with bigger chance)
));
// compute hit-rate on the training database, demonstrates predict usage.
for( i = 0; i < data->rows; i++ )
{
CvMat sample, mask;
cvGetRow( data, &sample, i );
cvGetRow( missing, &mask, i );
double r = dtree->predict( &sample, &mask )->value;
int d = fabs(r - responses->data.fl[i]) >= FLT_EPSILON;
if( d )
{
if( r != 'p' )
hr1++;
else
hr2++;
}
p_total += responses->data.fl[i] == 'p';
}
printf( "Results on the training database:\n"
"\tPoisonous mushrooms mis-predicted: %d (%g%%)\n"
"\tFalse-alarms: %d (%g%%)\n", hr1, (double)hr1*100/p_total,
hr2, (double)hr2*100/(data->rows - p_total) );
cvReleaseMat( &var_type );
return dtree;
}
static const char* var_desc[] =
{
"cap shape (bell=b,conical=c,convex=x,flat=f)",
"cap surface (fibrous=f,grooves=g,scaly=y,smooth=s)",
"cap color (brown=n,buff=b,cinnamon=c,gray=g,green=r,\n\tpink=p,purple=u,red=e,white=w,yellow=y)",
"bruises? (bruises=t,no=f)",
"odor (almond=a,anise=l,creosote=c,fishy=y,foul=f,\n\tmusty=m,none=n,pungent=p,spicy=s)",
"gill attachment (attached=a,descending=d,free=f,notched=n)",
"gill spacing (close=c,crowded=w,distant=d)",
"gill size (broad=b,narrow=n)",
"gill color (black=k,brown=n,buff=b,chocolate=h,gray=g,\n\tgreen=r,orange=o,pink=p,purple=u,red=e,white=w,yellow=y)",
"stalk shape (enlarging=e,tapering=t)",
"stalk root (bulbous=b,club=c,cup=u,equal=e,rhizomorphs=z,rooted=r)",
"stalk surface above ring (ibrous=f,scaly=y,silky=k,smooth=s)",
"stalk surface below ring (ibrous=f,scaly=y,silky=k,smooth=s)",
"stalk color above ring (brown=n,buff=b,cinnamon=c,gray=g,orange=o,\n\tpink=p,red=e,white=w,yellow=y)",
"stalk color below ring (brown=n,buff=b,cinnamon=c,gray=g,orange=o,\n\tpink=p,red=e,white=w,yellow=y)",
"veil type (partial=p,universal=u)",
"veil color (brown=n,orange=o,white=w,yellow=y)",
"ring number (none=n,one=o,two=t)",
"ring type (cobwebby=c,evanescent=e,flaring=f,large=l,\n\tnone=n,pendant=p,sheathing=s,zone=z)",
"spore print color (black=k,brown=n,buff=b,chocolate=h,green=r,\n\torange=o,purple=u,white=w,yellow=y)",
"population (abundant=a,clustered=c,numerous=n,\n\tscattered=s,several=v,solitary=y)",
"habitat (grasses=g,leaves=l,meadows=m,paths=p\n\turban=u,waste=w,woods=d)",
0
};
static void print_variable_importance( CvDTree* dtree )
{
const CvMat* var_importance = dtree->get_var_importance();
int i;
char input[1000];
if( !var_importance )
{
printf( "Error: Variable importance can not be retrieved\n" );
return;
}
printf( "Print variable importance information? (y/n) " );
int values_read = scanf( "%1s", input );
CV_Assert(values_read == 1);
if( input[0] != 'y' && input[0] != 'Y' )
return;
for( i = 0; i < var_importance->cols*var_importance->rows; i++ )
{
double val = var_importance->data.db[i];
char buf[100];
int len = (int)(strchr( var_desc[i], '(' ) - var_desc[i] - 1);
strncpy( buf, var_desc[i], len );
buf[len] = '\0';
printf( "%s", buf );
printf( ": %g%%\n", val*100. );
}
}
static void interactive_classification( CvDTree* dtree )
{
char input[1000];
const CvDTreeNode* root;
CvDTreeTrainData* data;
if( !dtree )
return;
root = dtree->get_root();
data = dtree->get_data();
for(;;)
{
const CvDTreeNode* node;
printf( "Start/Proceed with interactive mushroom classification (y/n): " );
int values_read = scanf( "%1s", input );
CV_Assert(values_read == 1);
if( input[0] != 'y' && input[0] != 'Y' )
break;
printf( "Enter 1-letter answers, '?' for missing/unknown value...\n" );
// custom version of predict
node = root;
for(;;)
{
CvDTreeSplit* split = node->split;
int dir = 0;
if( !node->left || node->Tn <= dtree->get_pruned_tree_idx() || !node->split )
break;
for( ; split != 0; )
{
int vi = split->var_idx, j;
int count = data->cat_count->data.i[vi];
const int* map = data->cat_map->data.i + data->cat_ofs->data.i[vi];
printf( "%s: ", var_desc[vi] );
values_read = scanf( "%1s", input );
CV_Assert(values_read == 1);
if( input[0] == '?' )
{
split = split->next;
continue;
}
// convert the input character to the normalized value of the variable
for( j = 0; j < count; j++ )
if( map[j] == input[0] )
break;
if( j < count )
{
dir = (split->subset[j>>5] & (1 << (j&31))) ? -1 : 1;
if( split->inversed )
dir = -dir;
break;
}
else
printf( "Error: unrecognized value\n" );
}
if( !dir )
{
printf( "Impossible to classify the sample\n");
node = 0;
break;
}
node = dir < 0 ? node->left : node->right;
}
if( node )
printf( "Prediction result: the mushroom is %s\n",
node->class_idx == 0 ? "EDIBLE" : "POISONOUS" );
printf( "\n-----------------------------\n" );
}
}
int main( int argc, char** argv )
{
CvMat *data = 0, *missing = 0, *responses = 0;
CvDTree* dtree;
const char* base_path = argc >= 2 ? argv[1] : "agaricus-lepiota.data";
help();
if( !mushroom_read_database( base_path, &data, &missing, &responses ) )
{
printf( "\nUnable to load the training database\n\n");
help();
return -1;
}
dtree = mushroom_create_dtree( data, missing, responses,
10 // poisonous mushrooms will have 10x higher weight in the decision tree
);
cvReleaseMat( &data );
cvReleaseMat( &missing );
cvReleaseMat( &responses );
print_variable_importance( dtree );
interactive_classification( dtree );
delete dtree;
return 0;
}

View File

@@ -1,119 +0,0 @@
/*
* one_way_sample.cpp
* outlet_detection
*
* Created by Victor Eruhimov on 8/5/09.
* Copyright 2009 Argus Corp. All rights reserved.
*
*/
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include "opencv2/legacy/compat.hpp"
#include <string>
#include <stdio.h>
static void help()
{
printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n"
"Correspondences are drawn\n");
printf("Format: \n./one_way_sample <path_to_samples> <image1> <image2>\n");
printf("For example: ./one_way_sample . ../c/scene_l.bmp ../c/scene_r.bmp\n");
}
using namespace std;
using namespace cv;
Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, const Mat& img2,
const vector<KeyPoint>& features2, const vector<int>& desc_idx);
int main(int argc, char** argv)
{
const char images_list[] = "one_way_train_images.txt";
const CvSize patch_size = cvSize(24, 24);
const int pose_count = 50;
if (argc != 4)
{
help();
return 0;
}
std::string path_name = argv[1];
std::string img1_name = path_name + "/" + std::string(argv[2]);
std::string img2_name = path_name + "/" + std::string(argv[3]);
printf("Reading the images...\n");
Mat img1 = imread(img1_name, IMREAD_GRAYSCALE);
Mat img2 = imread(img2_name, IMREAD_GRAYSCALE);
// extract keypoints from the first image
SURF surf_extractor(5.0e3);
vector<KeyPoint> keypoints1;
// printf("Extracting keypoints\n");
surf_extractor(img1, Mat(), keypoints1);
printf("Extracted %d keypoints...\n", (int)keypoints1.size());
printf("Training one way descriptors... \n");
// create descriptors
OneWayDescriptorBase descriptors(patch_size, pose_count, OneWayDescriptorBase::GetPCAFilename(), path_name,
images_list);
IplImage img1_c = img1;
IplImage img2_c = img2;
descriptors.CreateDescriptorsFromImage(&img1_c, keypoints1);
printf("done\n");
// extract keypoints from the second image
vector<KeyPoint> keypoints2;
surf_extractor(img2, Mat(), keypoints2);
printf("Extracted %d keypoints from the second image...\n", (int)keypoints2.size());
printf("Finding nearest neighbors...");
// find NN for each of keypoints2 in keypoints1
vector<int> desc_idx;
desc_idx.resize(keypoints2.size());
for (size_t i = 0; i < keypoints2.size(); i++)
{
int pose_idx = 0;
float distance = 0;
descriptors.FindDescriptor(&img2_c, keypoints2[i].pt, desc_idx[i], pose_idx, distance);
}
printf("done\n");
Mat img_corr = DrawCorrespondences(img1, keypoints1, img2, keypoints2, desc_idx);
imshow("correspondences", img_corr);
waitKey(0);
}
Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, const Mat& img2,
const vector<KeyPoint>& features2, const vector<int>& desc_idx)
{
Mat part, img_corr(Size(img1.cols + img2.cols, MAX(img1.rows, img2.rows)), CV_8UC3);
img_corr = Scalar::all(0);
part = img_corr(Rect(0, 0, img1.cols, img1.rows));
cvtColor(img1, part, COLOR_GRAY2RGB);
part = img_corr(Rect(img1.cols, 0, img2.cols, img2.rows));
cvtColor(img1, part, COLOR_GRAY2RGB);
for (size_t i = 0; i < features1.size(); i++)
{
circle(img_corr, features1[i].pt, 3, CV_RGB(255, 0, 0));
}
for (size_t i = 0; i < features2.size(); i++)
{
Point pt((int)features2[i].pt.x + img1.cols, (int)features2[i].pt.y);
circle(img_corr, pt, 3, Scalar(0, 0, 255));
line(img_corr, features1[desc_idx[i]].pt, pt, Scalar(0, 255, 0));
}
return img_corr;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

View File

@@ -1,2 +0,0 @@
one_way_train_0000.jpg
one_way_train_0001.jpg

View File

@@ -1,98 +0,0 @@
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/legacy.hpp"
#include <stdio.h>
static void help(void)
{
printf("\nThis program demonstrated color pyramid segmentation cvcvPyrSegmentation() which is controlled\n"
"by two trhesholds which can be manipulated by a trackbar. It can take an image file name or defaults to 'fruits.jpg'\n"
"Usage :\n"
"./pyaramid_segmentation [image_path_filename -- Defaults to fruits.jpg]\n\n"
);
}
IplImage* image[2] = { 0, 0 }, *image0 = 0, *image1 = 0;
CvSize size;
int w0, h0,i;
int threshold1, threshold2;
int l,level = 4;
int sthreshold1, sthreshold2;
int l_comp;
int block_size = 1000;
float parameter;
double threshold;
double rezult, min_rezult;
int filter = CV_GAUSSIAN_5x5;
CvConnectedComp *cur_comp, min_comp;
CvSeq *comp;
CvMemStorage *storage;
CvPoint pt1, pt2;
static void ON_SEGMENT(int a)
{
(void)a;
cvPyrSegmentation(image0, image1, storage, &comp,
level, threshold1+1, threshold2+1);
cvShowImage("Segmentation", image1);
}
int main( int argc, char** argv )
{
char* filename;
help();
filename = argc == 2 ? argv[1] : (char*)"fruits.jpg";
if( (image[0] = cvLoadImage( filename, 1)) == 0 )
{
help();
printf("Cannot load fileimage - %s\n", filename);
return -1;
}
cvNamedWindow("Source", 0);
cvShowImage("Source", image[0]);
cvNamedWindow("Segmentation", 0);
storage = cvCreateMemStorage ( block_size );
image[0]->width &= -(1<<level);
image[0]->height &= -(1<<level);
image0 = cvCloneImage( image[0] );
image1 = cvCloneImage( image[0] );
// segmentation of the color image
l = 1;
threshold1 =255;
threshold2 =30;
ON_SEGMENT(1);
sthreshold1 = cvCreateTrackbar("Threshold1", "Segmentation", &threshold1, 255, ON_SEGMENT);
sthreshold2 = cvCreateTrackbar("Threshold2", "Segmentation", &threshold2, 255, ON_SEGMENT);
cvShowImage("Segmentation", image1);
cvWaitKey(0);
cvDestroyWindow("Segmentation");
cvDestroyWindow("Source");
cvReleaseMemStorage(&storage );
cvReleaseImage(&image[0]);
cvReleaseImage(&image0);
cvReleaseImage(&image1);
return 0;
}
#ifdef _EiC
main(1,"pyramid_segmentation.c");
#endif

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

View File

@@ -1,139 +0,0 @@
#include "opencv2/ml/ml.hpp"
#include "opencv2/core/core_c.h"
#include "opencv2/core/utility.hpp"
#include <stdio.h>
#include <map>
static void help()
{
printf(
"\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n"
"CvDTree dtree;\n"
"CvBoost boost;\n"
"CvRTrees rtrees;\n"
"CvERTrees ertrees;\n"
"CvGBTrees gbtrees;\n"
"Call:\n\t./tree_engine [-r <response_column>] [-c] <csv filename>\n"
"where -r <response_column> specified the 0-based index of the response (0 by default)\n"
"-c specifies that the response is categorical (it's ordered by default) and\n"
"<csv filename> is the name of training data file in comma-separated value format\n\n");
}
static int count_classes(CvMLData& data)
{
cv::Mat r = cv::cvarrToMat(data.get_responses());
std::map<int, int> rmap;
int i, n = (int)r.total();
for( i = 0; i < n; i++ )
{
float val = r.at<float>(i);
int ival = cvRound(val);
if( ival != val )
return -1;
rmap[ival] = 1;
}
return (int)rmap.size();
}
static void print_result(float train_err, float test_err, const CvMat* _var_imp)
{
printf( "train error %f\n", train_err );
printf( "test error %f\n\n", test_err );
if (_var_imp)
{
cv::Mat var_imp = cv::cvarrToMat(_var_imp), sorted_idx;
cv::sortIdx(var_imp, sorted_idx, CV_SORT_EVERY_ROW + CV_SORT_DESCENDING);
printf( "variable importance:\n" );
int i, n = (int)var_imp.total();
int type = var_imp.type();
CV_Assert(type == CV_32F || type == CV_64F);
for( i = 0; i < n; i++)
{
int k = sorted_idx.at<int>(i);
printf( "%d\t%f\n", k, type == CV_32F ? var_imp.at<float>(k) : var_imp.at<double>(k));
}
}
printf("\n");
}
int main(int argc, char** argv)
{
if(argc < 2)
{
help();
return 0;
}
const char* filename = 0;
int response_idx = 0;
bool categorical_response = false;
for(int i = 1; i < argc; i++)
{
if(strcmp(argv[i], "-r") == 0)
sscanf(argv[++i], "%d", &response_idx);
else if(strcmp(argv[i], "-c") == 0)
categorical_response = true;
else if(argv[i][0] != '-' )
filename = argv[i];
else
{
printf("Error. Invalid option %s\n", argv[i]);
help();
return -1;
}
}
printf("\nReading in %s...\n\n",filename);
CvDTree dtree;
CvBoost boost;
CvRTrees rtrees;
CvERTrees ertrees;
CvGBTrees gbtrees;
CvMLData data;
CvTrainTestSplit spl( 0.5f );
if ( data.read_csv( filename ) == 0)
{
data.set_response_idx( response_idx );
if(categorical_response)
data.change_var_type( response_idx, CV_VAR_CATEGORICAL );
data.set_train_test_split( &spl );
printf("======DTREE=====\n");
dtree.train( &data, CvDTreeParams( 10, 2, 0, false, 16, 0, false, false, 0 ));
print_result( dtree.calc_error( &data, CV_TRAIN_ERROR), dtree.calc_error( &data, CV_TEST_ERROR ), dtree.get_var_importance() );
if( categorical_response && count_classes(data) == 2 )
{
printf("======BOOST=====\n");
boost.train( &data, CvBoostParams(CvBoost::DISCRETE, 100, 0.95, 2, false, 0));
print_result( boost.calc_error( &data, CV_TRAIN_ERROR ), boost.calc_error( &data, CV_TEST_ERROR ), 0 ); //doesn't compute importance
}
printf("======RTREES=====\n");
rtrees.train( &data, CvRTParams( 10, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER ));
print_result( rtrees.calc_error( &data, CV_TRAIN_ERROR), rtrees.calc_error( &data, CV_TEST_ERROR ), rtrees.get_var_importance() );
printf("======ERTREES=====\n");
ertrees.train( &data, CvRTParams( 18, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER ));
print_result( ertrees.calc_error( &data, CV_TRAIN_ERROR), ertrees.calc_error( &data, CV_TEST_ERROR ), ertrees.get_var_importance() );
printf("======GBTREES=====\n");
if (categorical_response)
gbtrees.train( &data, CvGBTreesParams(CvGBTrees::DEVIANCE_LOSS, 100, 0.1f, 0.8f, 5, false));
else
gbtrees.train( &data, CvGBTreesParams(CvGBTrees::SQUARED_LOSS, 100, 0.1f, 0.8f, 5, false));
print_result( gbtrees.calc_error( &data, CV_TRAIN_ERROR), gbtrees.calc_error( &data, CV_TEST_ERROR ), 0 ); //doesn't compute importance
}
else
printf("File can not be read");
return 0;
}

View File

@@ -4,6 +4,7 @@
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdio.h>

View File

@@ -3,9 +3,10 @@
#
# ----------------------------------------------------------------------------
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc
opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree opencv_softcascade
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching opencv_videostab opencv_shape)
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_flann
opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video
opencv_objdetect opencv_photo opencv_nonfree opencv_features2d opencv_calib3d
opencv_stitching opencv_videostab opencv_shape)
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})

11
samples/cpp/H1to3p.xml Executable file
View File

@@ -0,0 +1,11 @@
<?xml version="1.0"?>
<opencv_storage>
<H13 type_id="opencv-matrix">
<rows>3</rows>
<cols>3</cols>
<dt>d</dt>
<data>
7.6285898e-01 -2.9922929e-01 2.2567123e+02
3.3443473e-01 1.0143901e+00 -7.6999973e+01
3.4663091e-04 -1.4364524e-05 1.0000000e+00 </data></H13>
</opencv_storage>

View File

@@ -1,12 +0,0 @@
#http://www.cmake.org/cmake/help/cmake2.6docs.html
cmake_minimum_required (VERSION 2.6)
project (OpenGL_Qt_Binding)
FIND_PACKAGE( OpenCV REQUIRED )
find_package (OpenGL REQUIRED)
ADD_EXECUTABLE(OpenGL_Qt_Binding qt_opengl.cpp)
TARGET_LINK_LIBRARIES(OpenGL_Qt_Binding ${OpenCV_LIBS} ${OPENGL_LIBRARIES} )
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cube4.avi ${CMAKE_CURRENT_BINARY_DIR}/cube4.avi COPYONLY)

Binary file not shown.

View File

@@ -1,269 +0,0 @@
// Yannick Verdie 2010
// --- Please read help() below: ---
#include <iostream>
#include <vector>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/calib3d/calib3d_c.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/legacy/compat.hpp>
#ifdef __APPLE__
#include <OpenGL/gl.h>
#else
#include <GL/gl.h>
#endif
using namespace std;
using namespace cv;
static void help()
{
cout << "This demo demonstrates the use of the Qt enhanced version of the highgui GUI interface\n"
"and dang if it doesn't throw in the use of of the POSIT 3D tracking algorithm too\n"
"It works off of the video: cube4.avi\n"
"Using OpenCV version " << CV_VERSION << "\n\n"
" 1) This demo is mainly based on work from Javier Barandiaran Martirena\n"
" See this page http://code.opencv.org/projects/opencv/wiki/Posit.\n"
" 2) This is a demo to illustrate how to use **OpenGL Callback**.\n"
" 3) You need Qt binding to compile this sample with OpenGL support enabled.\n"
" 4) The features' detection is very basic and could highly be improved\n"
" (basic thresholding tuned for the specific video) but 2).\n"
" 5) Thanks to Google Summer of Code 2010 for supporting this work!\n" << endl;
}
#define FOCAL_LENGTH 600
#define CUBE_SIZE 0.5
static void renderCube(float size)
{
glBegin(GL_QUADS);
// Front Face
glNormal3f( 0.0f, 0.0f, 1.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( size, 0.0f, 0.0f);
glVertex3f( size, size, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
// Back Face
glNormal3f( 0.0f, 0.0f,-1.0f);
glVertex3f( 0.0f, 0.0f, size);
glVertex3f( 0.0f, size, size);
glVertex3f( size, size, size);
glVertex3f( size, 0.0f, size);
// Top Face
glNormal3f( 0.0f, 1.0f, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
glVertex3f( size, size, 0.0f);
glVertex3f( size, size, size);
glVertex3f( 0.0f, size, size);
// Bottom Face
glNormal3f( 0.0f,-1.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, size);
glVertex3f( size, 0.0f, size);
glVertex3f( size, 0.0f, 0.0f);
// Right face
glNormal3f( 1.0f, 0.0f, 0.0f);
glVertex3f( size, 0.0f, 0.0f);
glVertex3f( size, 0.0f, size);
glVertex3f( size, size, size);
glVertex3f( size, size, 0.0f);
// Left Face
glNormal3f(-1.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, size, 0.0f);
glVertex3f( 0.0f, size, size);
glVertex3f( 0.0f, 0.0f, size);
glEnd();
}
static void on_opengl(void* param)
{
//Draw the object with the estimated pose
glLoadIdentity();
glScalef( 1.0f, 1.0f, -1.0f);
glMultMatrixf( (float*)param );
glEnable( GL_LIGHTING );
glEnable( GL_LIGHT0 );
glEnable( GL_BLEND );
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
renderCube( CUBE_SIZE );
glDisable(GL_BLEND);
glDisable( GL_LIGHTING );
}
static void initPOSIT(std::vector<CvPoint3D32f> * modelPoints)
{
// Create the model pointss
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); // The first must be (0, 0, 0)
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, CUBE_SIZE));
modelPoints->push_back(cvPoint3D32f(CUBE_SIZE, 0.0f, 0.0f));
modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
}
static void foundCorners(vector<CvPoint2D32f> * srcImagePoints, const Mat & source, Mat & grayImage)
{
cvtColor(source, grayImage, COLOR_RGB2GRAY);
GaussianBlur(grayImage, grayImage, Size(11, 11), 0, 0);
normalize(grayImage, grayImage, 0, 255, NORM_MINMAX);
threshold(grayImage, grayImage, 26, 255, THRESH_BINARY_INV); //25
Mat MgrayImage = grayImage;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(MgrayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
Point p;
vector<CvPoint2D32f> srcImagePoints_temp(4, cvPoint2D32f(0, 0));
if (contours.size() == srcImagePoints_temp.size())
{
for (size_t i = 0; i < contours.size(); i++ )
{
p.x = p.y = 0;
for (size_t j = 0 ; j < contours[i].size(); j++)
p += contours[i][j];
srcImagePoints_temp.at(i) = cvPoint2D32f(float(p.x) / contours[i].size(), float(p.y) / contours[i].size());
}
// Need to keep the same order
// > y = 0
// > x = 1
// < x = 2
// < y = 3
// get point 0;
size_t index = 0;
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
if (srcImagePoints_temp.at(i).y > srcImagePoints_temp.at(index).y)
index = i;
srcImagePoints->at(0) = srcImagePoints_temp.at(index);
// get point 1;
index = 0;
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
if (srcImagePoints_temp.at(i).x > srcImagePoints_temp.at(index).x)
index = i;
srcImagePoints->at(1) = srcImagePoints_temp.at(index);
// get point 2;
index = 0;
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
if (srcImagePoints_temp.at(i).x < srcImagePoints_temp.at(index).x)
index = i;
srcImagePoints->at(2) = srcImagePoints_temp.at(index);
// get point 3;
index = 0;
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
if (srcImagePoints_temp.at(i).y < srcImagePoints_temp.at(index).y)
index = i;
srcImagePoints->at(3) = srcImagePoints_temp.at(index);
Mat Msource = source;
stringstream ss;
for (size_t i = 0; i<srcImagePoints_temp.size(); i++ )
{
ss << i;
circle(Msource, srcImagePoints->at(i), 5, Scalar(0, 0, 255));
putText(Msource, ss.str(), srcImagePoints->at(i), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 255));
ss.str("");
// new coordinate system in the middle of the frame and reversed (camera coordinate system)
srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x - source.cols / 2,
source.rows / 2 - srcImagePoints_temp.at(i).y);
}
}
}
static void createOpenGLMatrixFrom(float * posePOSIT, const CvMatr32f & rotationMatrix,
const CvVect32f & translationVector)
{
// coordinate system returned is relative to the first 3D input point
for (int f = 0; f < 3; f++)
for (int c = 0; c < 3; c++)
posePOSIT[c * 4 + f] = rotationMatrix[f * 3 + c]; // transposed
posePOSIT[3] = translationVector[0];
posePOSIT[7] = translationVector[1];
posePOSIT[11] = translationVector[2];
posePOSIT[12] = 0.0f;
posePOSIT[13] = 0.0f;
posePOSIT[14] = 0.0f;
posePOSIT[15] = 1.0f;
}
int main(void)
{
help();
string fileName = "cube4.avi";
VideoCapture video(fileName);
if (!video.isOpened())
{
cerr << "Video file " << fileName << " could not be opened" << endl;
return EXIT_FAILURE;
}
Mat source, grayImage;
video >> source;
namedWindow("Original", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
namedWindow("POSIT", WINDOW_OPENGL | WINDOW_FREERATIO);
resizeWindow("POSIT", source.cols, source.rows);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear).\n"
"This demo is only to illustrate how to use OpenGL callback.\n"
" -- Press ESC to exit.", 10000);
float OpenGLMatrix[] = { 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0 };
setOpenGlContext("POSIT");
setOpenGlDrawCallback("POSIT", on_opengl, OpenGLMatrix);
vector<CvPoint3D32f> modelPoints;
initPOSIT(&modelPoints);
// Create the POSIT object with the model points
CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size());
CvMatr32f rotation_matrix = new float[9];
CvVect32f translation_vector = new float[3];
CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1e-4f);
vector<CvPoint2D32f> srcImagePoints(4, cvPoint2D32f(0, 0));
while (waitKey(33) != 27)
{
video >> source;
if (source.empty())
break;
imshow("Original", source);
foundCorners(&srcImagePoints, source, grayImage);
cvPOSIT(positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector);
createOpenGLMatrixFrom(OpenGLMatrix, rotation_matrix, translation_vector);
updateWindow("POSIT");
if (video.get(CAP_PROP_POS_AVI_RATIO) > 0.99)
video.set(CAP_PROP_POS_AVI_RATIO, 0);
}
setOpenGlDrawCallback("POSIT", NULL, NULL);
destroyAllWindows();
cvReleasePOSITObject(&positObject);
delete[]rotation_matrix;
delete[]translation_vector;
return EXIT_SUCCESS;
}

View File

Before

Width:  |  Height:  |  Size: 83 KiB

After

Width:  |  Height:  |  Size: 83 KiB

View File

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 21 KiB

View File

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -1,13 +1,10 @@
#include "opencv2/opencv_modules.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/ml/ml.hpp"
#ifdef HAVE_OPENCV_OCL
#define _OCL_SVM_ 1 //select whether using ocl::svm method or not, default is using
#include "opencv2/ocl/ocl.hpp"
#endif
#include <fstream>
#include <iostream>
@@ -26,6 +23,7 @@
#define DEBUG_DESC_PROGRESS
using namespace cv;
using namespace cv::ml;
using namespace std;
const string paramsFile = "params.xml";
@@ -680,7 +678,7 @@ void VocData::writeClassifierResultsFile( const string& out_dir, const string& o
result_file.close();
} else {
string err_msg = "could not open classifier results file '" + output_file + "' for writing. Before running for the first time, a 'results' subdirectory should be created within the VOC dataset base directory. e.g. if the VOC data is stored in /VOC/VOC2010 then the path /VOC/results must be created.";
CV_Error(CV_StsError,err_msg.c_str());
CV_Error(Error::StsError,err_msg.c_str());
}
}
@@ -704,9 +702,9 @@ void VocData::writeClassifierResultsFile( const string& out_dir, const string& o
string VocData::getResultsFilename(const string& obj_class, const VocTask task, const ObdDatasetType dataset, const int competition, const int number)
{
if ((competition < 1) && (competition != -1))
CV_Error(CV_StsBadArg,"competition argument should be a positive non-zero number or -1 to accept the default");
CV_Error(Error::StsBadArg,"competition argument should be a positive non-zero number or -1 to accept the default");
if ((number < 1) && (number != -1))
CV_Error(CV_StsBadArg,"number argument should be a positive non-zero number or -1 to accept the default");
CV_Error(Error::StsBadArg,"number argument should be a positive non-zero number or -1 to accept the default");
string dset, task_type;
@@ -818,7 +816,7 @@ void VocData::calcClassifierPrecRecall(const string& input_file, vector<float>&
scoregt_file.close();
} else {
string err_msg = "could not open scoregt file '" + scoregt_file_str + "' for writing.";
CV_Error(CV_StsError,err_msg.c_str());
CV_Error(Error::StsError,err_msg.c_str());
}
}
@@ -977,7 +975,7 @@ void VocData::calcClassifierConfMatRow(const string& obj_class, const vector<Obd
if (target_idx_it == output_headers.end())
{
string err_msg = "could not find the target object class '" + obj_class + "' in list of valid classes.";
CV_Error(CV_StsError,err_msg.c_str());
CV_Error(Error::StsError,err_msg.c_str());
}
/* convert iterator to index */
target_idx = (int)std::distance(output_headers.begin(),target_idx_it);
@@ -1040,7 +1038,7 @@ void VocData::calcClassifierConfMatRow(const string& obj_class, const vector<Obd
if (class_idx_it == output_headers.end())
{
string err_msg = "could not find object class '" + img_objects[obj_idx].object_class + "' specified in the ground truth file of '" + images[ranking[image_idx]].id +"'in list of valid classes.";
CV_Error(CV_StsError,err_msg.c_str());
CV_Error(Error::StsError,err_msg.c_str());
}
/* convert iterator to index */
int class_idx = (int)std::distance(output_headers.begin(),class_idx_it);
@@ -1192,7 +1190,7 @@ void VocData::calcDetectorConfMatRow(const string& obj_class, const ObdDatasetTy
if (class_idx_it == output_headers.end())
{
string err_msg = "could not find object class '" + img_objects[max_gt_obj_idx].object_class + "' specified in the ground truth file of '" + images[ranking[image_idx]].id +"'in list of valid classes.";
CV_Error(CV_StsError,err_msg.c_str());
CV_Error(Error::StsError,err_msg.c_str());
}
/* convert iterator to index */
int class_idx = (int)std::distance(output_headers.begin(),class_idx_it);
@@ -1285,7 +1283,7 @@ void VocData::savePrecRecallToGnuplot(const string& output_file, const vector<fl
plot_file.close();
} else {
string err_msg = "could not open plot file '" + output_file_std + "' for writing.";
CV_Error(CV_StsError,err_msg.c_str());
CV_Error(Error::StsError,err_msg.c_str());
}
}
@@ -1449,7 +1447,7 @@ void VocData::readClassifierGroundTruth(const string& filename, vector<string>&
if (!gtfile.is_open())
{
string err_msg = "could not open VOC ground truth textfile '" + filename + "'.";
CV_Error(CV_StsError,err_msg.c_str());
CV_Error(Error::StsError,err_msg.c_str());
}
string line;
@@ -1465,7 +1463,7 @@ void VocData::readClassifierGroundTruth(const string& filename, vector<string>&
image_codes.push_back(image);
object_present.push_back(obj_present == 1);
} else {
if (!gtfile.eof()) CV_Error(CV_StsParseError,"error parsing VOC ground truth textfile.");
if (!gtfile.eof()) CV_Error(Error::StsParseError,"error parsing VOC ground truth textfile.");
}
}
gtfile.close();
@@ -1491,13 +1489,13 @@ void VocData::readClassifierResultsFile(const string& input_file, vector<string>
image_codes.push_back(image);
scores.push_back(score);
} else {
if(!result_file.eof()) CV_Error(CV_StsParseError,"error parsing VOC classifier results file.");
if(!result_file.eof()) CV_Error(Error::StsParseError,"error parsing VOC classifier results file.");
}
}
result_file.close();
} else {
string err_msg = "could not open classifier results file '" + input_file + "' for reading.";
CV_Error(CV_StsError,err_msg.c_str());
CV_Error(Error::StsError,err_msg.c_str());
}
}
@@ -1548,13 +1546,13 @@ void VocData::readDetectorResultsFile(const string& input_file, vector<string>&
bounding_boxes[image_idx].push_back(bounding_box);
}
} else {
if(!result_file.eof()) CV_Error(CV_StsParseError,"error parsing VOC detector results file.");
if(!result_file.eof()) CV_Error(Error::StsParseError,"error parsing VOC detector results file.");
}
}
result_file.close();
} else {
string err_msg = "could not open detector results file '" + input_file + "' for reading.";
CV_Error(CV_StsError,err_msg.c_str());
CV_Error(Error::StsError,err_msg.c_str());
}
}
@@ -1598,23 +1596,23 @@ void VocData::extractVocObjects(const string filename, vector<ObdObject>& object
//object class -------------
if (extractXMLBlock(object_contents, "name", 0, tag_contents) == -1) CV_Error(CV_StsError,"missing <name> tag in object definition of '" + filename + "'");
if (extractXMLBlock(object_contents, "name", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <name> tag in object definition of '" + filename + "'");
object.object_class.swap(tag_contents);
//object bounding box -------------
int xmax, xmin, ymax, ymin;
if (extractXMLBlock(object_contents, "xmax", 0, tag_contents) == -1) CV_Error(CV_StsError,"missing <xmax> tag in object definition of '" + filename + "'");
if (extractXMLBlock(object_contents, "xmax", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <xmax> tag in object definition of '" + filename + "'");
xmax = stringToInteger(tag_contents);
if (extractXMLBlock(object_contents, "xmin", 0, tag_contents) == -1) CV_Error(CV_StsError,"missing <xmin> tag in object definition of '" + filename + "'");
if (extractXMLBlock(object_contents, "xmin", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <xmin> tag in object definition of '" + filename + "'");
xmin = stringToInteger(tag_contents);
if (extractXMLBlock(object_contents, "ymax", 0, tag_contents) == -1) CV_Error(CV_StsError,"missing <ymax> tag in object definition of '" + filename + "'");
if (extractXMLBlock(object_contents, "ymax", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <ymax> tag in object definition of '" + filename + "'");
ymax = stringToInteger(tag_contents);
if (extractXMLBlock(object_contents, "ymin", 0, tag_contents) == -1) CV_Error(CV_StsError,"missing <ymin> tag in object definition of '" + filename + "'");
if (extractXMLBlock(object_contents, "ymin", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <ymin> tag in object definition of '" + filename + "'");
ymin = stringToInteger(tag_contents);
object.boundingBox.x = xmin-1; //convert to 0-based indexing
@@ -1717,11 +1715,11 @@ void VocData::extractDataFromResultsFilename(const string& input_file, string& c
size_t fnameend = input_file_std.rfind(".txt");
if ((fnamestart == input_file_std.npos) || (fnameend == input_file_std.npos))
CV_Error(CV_StsError,"Could not extract filename of results file.");
CV_Error(Error::StsError,"Could not extract filename of results file.");
++fnamestart;
if (fnamestart >= fnameend)
CV_Error(CV_StsError,"Could not extract filename of results file.");
CV_Error(Error::StsError,"Could not extract filename of results file.");
//extract dataset and class names, triggering exception if the filename format is not correct
string filename = input_file_std.substr(fnamestart, fnameend-fnamestart);
@@ -1732,11 +1730,11 @@ void VocData::extractDataFromResultsFilename(const string& input_file, string& c
size_t classend = filename.find("_",classstart+1);
if (classend == filename.npos) classend = filename.size();
if ((datasetstart == filename.npos) || (classstart == filename.npos))
CV_Error(CV_StsError,"Error parsing results filename. Is it in standard format of 'comp<n>_{cls/det}_<dataset>_<objclass>.txt'?");
CV_Error(Error::StsError,"Error parsing results filename. Is it in standard format of 'comp<n>_{cls/det}_<dataset>_<objclass>.txt'?");
++datasetstart;
++classstart;
if (((datasetstart-classstart) < 1) || ((classend-datasetstart) < 1))
CV_Error(CV_StsError,"Error parsing results filename. Is it in standard format of 'comp<n>_{cls/det}_<dataset>_<objclass>.txt'?");
CV_Error(Error::StsError,"Error parsing results filename. Is it in standard format of 'comp<n>_{cls/det}_<dataset>_<objclass>.txt'?");
dataset_name = filename.substr(datasetstart,classstart-datasetstart-1);
class_name = filename.substr(classstart,classend-classstart);
@@ -1784,7 +1782,7 @@ bool VocData::getClassifierGroundTruthImage(const string& obj_class, const strin
return m_classifier_gt_all_present[std::distance(m_classifier_gt_all_ids.begin(),it)] != 0;
} else {
string err_msg = "could not find classifier ground truth for image '" + id + "' and class '" + obj_class + "'";
CV_Error(CV_StsError,err_msg.c_str());
CV_Error(Error::StsError,err_msg.c_str());
}
return true;
@@ -1817,7 +1815,7 @@ void VocData::getSortOrder(const vector<float>& values, vector<size_t>& order, b
void VocData::readFileToString(const string filename, string& file_contents)
{
std::ifstream ifs(filename.c_str());
if (!ifs.is_open()) CV_Error(CV_StsError,"could not open text file");
if (!ifs.is_open()) CV_Error(Error::StsError,"could not open text file");
stringstream oss;
oss << ifs.rdbuf();
@@ -1832,7 +1830,7 @@ int VocData::stringToInteger(const string input_str)
stringstream ss(input_str);
if ((ss >> result).fail())
{
CV_Error(CV_StsBadArg,"could not perform string to integer conversion");
CV_Error(Error::StsBadArg,"could not perform string to integer conversion");
}
return result;
}
@@ -1844,7 +1842,7 @@ string VocData::integerToString(const int input_int)
stringstream ss;
if ((ss << input_int).fail())
{
CV_Error(CV_StsBadArg,"could not perform integer to string conversion");
CV_Error(Error::StsBadArg,"could not perform integer to string conversion");
}
result = ss.str();
return result;
@@ -2328,14 +2326,14 @@ static void removeBowImageDescriptorsByCount( vector<ObdImage>& images, vector<M
CV_Assert( bowImageDescriptors.size() == objectPresent.size() );
}
static void setSVMParams( CvSVMParams& svmParams, CvMat& class_wts_cv, const Mat& responses, bool balanceClasses )
static void setSVMParams( SVM::Params& svmParams, Mat& class_wts_cv, const Mat& responses, bool balanceClasses )
{
int pos_ex = countNonZero(responses == 1);
int neg_ex = countNonZero(responses == -1);
cout << pos_ex << " positive training samples; " << neg_ex << " negative training samples" << endl;
svmParams.svm_type = CvSVM::C_SVC;
svmParams.kernel_type = CvSVM::RBF;
svmParams.svmType = SVM::C_SVC;
svmParams.kernelType = SVM::RBF;
if( balanceClasses )
{
Mat class_wts( 2, 1, CV_32FC1 );
@@ -2353,49 +2351,44 @@ static void setSVMParams( CvSVMParams& svmParams, CvMat& class_wts_cv, const Mat
class_wts.at<float>(1) = static_cast<float>(pos_ex)/static_cast<float>(pos_ex+neg_ex);
}
class_wts_cv = class_wts;
svmParams.class_weights = &class_wts_cv;
svmParams.classWeights = class_wts_cv;
}
}
static void setSVMTrainAutoParams( CvParamGrid& c_grid, CvParamGrid& gamma_grid,
CvParamGrid& p_grid, CvParamGrid& nu_grid,
CvParamGrid& coef_grid, CvParamGrid& degree_grid )
static void setSVMTrainAutoParams( ParamGrid& c_grid, ParamGrid& gamma_grid,
ParamGrid& p_grid, ParamGrid& nu_grid,
ParamGrid& coef_grid, ParamGrid& degree_grid )
{
c_grid = CvSVM::get_default_grid(CvSVM::C);
c_grid = SVM::getDefaultGrid(SVM::C);
gamma_grid = CvSVM::get_default_grid(CvSVM::GAMMA);
gamma_grid = SVM::getDefaultGrid(SVM::GAMMA);
p_grid = CvSVM::get_default_grid(CvSVM::P);
p_grid.step = 0;
p_grid = SVM::getDefaultGrid(SVM::P);
p_grid.logStep = 0;
nu_grid = CvSVM::get_default_grid(CvSVM::NU);
nu_grid.step = 0;
nu_grid = SVM::getDefaultGrid(SVM::NU);
nu_grid.logStep = 0;
coef_grid = CvSVM::get_default_grid(CvSVM::COEF);
coef_grid.step = 0;
coef_grid = SVM::getDefaultGrid(SVM::COEF);
coef_grid.logStep = 0;
degree_grid = CvSVM::get_default_grid(CvSVM::DEGREE);
degree_grid.step = 0;
degree_grid = SVM::getDefaultGrid(SVM::DEGREE);
degree_grid.logStep = 0;
}
#if defined HAVE_OPENCV_OCL && _OCL_SVM_
static void trainSVMClassifier( cv::ocl::CvSVM_OCL& svm, const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData,
Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
const string& resPath )
#else
static void trainSVMClassifier( CvSVM& svm, const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData,
static Ptr<SVM> trainSVMClassifier( const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData,
Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
const string& resPath )
#endif
{
/* first check if a previously trained svm for the current class has been saved to file */
string svmFilename = resPath + svmsDir + "/" + objClassName + ".xml.gz";
Ptr<SVM> svm;
FileStorage fs( svmFilename, FileStorage::READ);
if( fs.isOpened() )
{
cout << "*** LOADING SVM CLASSIFIER FOR CLASS " << objClassName << " ***" << endl;
svm.load( svmFilename.c_str() );
svm = StatModel::load<SVM>( svmFilename );
}
else
{
@@ -2446,28 +2439,26 @@ static void trainSVMClassifier( CvSVM& svm, const SVMTrainParamsExt& svmParamsEx
}
cout << "TRAINING SVM FOR CLASS ..." << objClassName << "..." << endl;
CvSVMParams svmParams;
CvMat class_wts_cv;
SVM::Params svmParams;
Mat class_wts_cv;
setSVMParams( svmParams, class_wts_cv, responses, svmParamsExt.balanceClasses );
CvParamGrid c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid;
svm = SVM::create(svmParams);
ParamGrid c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid;
setSVMTrainAutoParams( c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid );
svm.train_auto( trainData, responses, Mat(), Mat(), svmParams, 10, c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid );
svm->trainAuto(TrainData::create(trainData, ROW_SAMPLE, responses), 10,
c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid);
cout << "SVM TRAINING FOR CLASS " << objClassName << " COMPLETED" << endl;
svm.save( svmFilename.c_str() );
svm->save( svmFilename );
cout << "SAVED CLASSIFIER TO FILE" << endl;
}
return svm;
}
#if defined HAVE_OPENCV_OCL && _OCL_SVM_
static void computeConfidences( cv::ocl::CvSVM_OCL& svm, const string& objClassName, VocData& vocData,
Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
const string& resPath )
#else
static void computeConfidences( CvSVM& svm, const string& objClassName, VocData& vocData,
static void computeConfidences( const Ptr<SVM>& svm, const string& objClassName, VocData& vocData,
Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
const string& resPath )
#endif
{
cout << "*** CALCULATING CONFIDENCES FOR CLASS " << objClassName << " ***" << endl;
cout << "CALCULATING BOW VECTORS FOR TEST SET OF " << objClassName << "..." << endl;
@@ -2491,12 +2482,12 @@ static void computeConfidences( CvSVM& svm, const string& objClassName, VocData&
if( imageIdx == 0 )
{
// In the first iteration, determine the sign of the positive class
float classVal = confidences[imageIdx] = svm.predict( bowImageDescriptors[imageIdx], false );
float scoreVal = confidences[imageIdx] = svm.predict( bowImageDescriptors[imageIdx], true );
float classVal = confidences[imageIdx] = svm->predict( bowImageDescriptors[imageIdx], noArray(), 0 );
float scoreVal = confidences[imageIdx] = svm->predict( bowImageDescriptors[imageIdx], noArray(), StatModel::RAW_OUTPUT );
signMul = (classVal < 0) == (scoreVal < 0) ? 1.f : -1.f;
}
// svm output of decision function
confidences[imageIdx] = signMul * svm.predict( bowImageDescriptors[imageIdx], true );
confidences[imageIdx] = signMul * svm->predict( bowImageDescriptors[imageIdx], noArray(), StatModel::RAW_OUTPUT );
}
cout << "WRITING QUERY RESULTS TO VOC RESULTS FILE FOR CLASS " << objClassName << "..." << endl;
@@ -2606,13 +2597,8 @@ int main(int argc, char** argv)
for( size_t classIdx = 0; classIdx < objClasses.size(); ++classIdx )
{
// Train a classifier on train dataset
#if defined HAVE_OPENCV_OCL && _OCL_SVM_
cv::ocl::CvSVM_OCL svm;
#else
CvSVM svm;
#endif
trainSVMClassifier( svm, svmTrainParamsExt, objClasses[classIdx], vocData,
bowExtractor, featureDetector, resPath );
Ptr<SVM> svm = trainSVMClassifier( svmTrainParamsExt, objClasses[classIdx], vocData,
bowExtractor, featureDetector, resPath );
// Now use the classifier over all images on the test dataset and rank according to score order
// also calculating precision-recall etc.

View File

@@ -6,6 +6,7 @@
*/
#include "opencv2/video.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <opencv2/core/utility.hpp>
#include <iostream>

View File

@@ -2,6 +2,7 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/video/background_segm.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <stdio.h>

View File

Before

Width:  |  Height:  |  Size: 50 KiB

After

Width:  |  Height:  |  Size: 50 KiB

View File

Before

Width:  |  Height:  |  Size: 120 KiB

After

Width:  |  Height:  |  Size: 120 KiB

View File

@@ -1,131 +0,0 @@
/*
* matching_test.cpp
*
* Created on: Oct 17, 2010
* Author: ethan
*/
#include "opencv2/core.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/calib3d.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <vector>
#include <iostream>
using namespace cv;
using namespace std;
//Copy (x,y) location of descriptor matches found from KeyPoint data structures into Point2f vectors
static void matches2points(const vector<DMatch>& matches, const vector<KeyPoint>& kpts_train,
const vector<KeyPoint>& kpts_query, vector<Point2f>& pts_train, vector<Point2f>& pts_query)
{
pts_train.clear();
pts_query.clear();
pts_train.reserve(matches.size());
pts_query.reserve(matches.size());
for (size_t i = 0; i < matches.size(); i++)
{
const DMatch& match = matches[i];
pts_query.push_back(kpts_query[match.queryIdx].pt);
pts_train.push_back(kpts_train[match.trainIdx].pt);
}
}
static double match(const vector<KeyPoint>& /*kpts_train*/, const vector<KeyPoint>& /*kpts_query*/, DescriptorMatcher& matcher,
const Mat& train, const Mat& query, vector<DMatch>& matches)
{
double t = (double)getTickCount();
matcher.match(query, train, matches); //Using features2d
return ((double)getTickCount() - t) / getTickFrequency();
}
static void help()
{
cout << "This program shows how to use BRIEF descriptor to match points in features2d" << endl <<
"It takes in two images, finds keypoints and matches them displaying matches and final homography warped results" << endl <<
"Usage: " << endl <<
"image1 image2 " << endl <<
"Example: " << endl <<
"box.png box_in_scene.png " << endl;
}
const char* keys =
{
"{@first_image | box.png | the first image}"
"{@second_image | box_in_scene.png | the second image}"
};
int main(int argc, const char ** argv)
{
help();
CommandLineParser parser(argc, argv, keys);
string im1_name = parser.get<string>(0);
string im2_name = parser.get<string>(1);
Mat im1 = imread(im1_name, IMREAD_GRAYSCALE);
Mat im2 = imread(im2_name, IMREAD_GRAYSCALE);
if (im1.empty() || im2.empty())
{
cout << "could not open one of the images..." << endl;
cout << "the cmd parameters have next current value: " << endl;
parser.printMessage();
return 1;
}
double t = (double)getTickCount();
FastFeatureDetector detector(50);
BriefDescriptorExtractor extractor(32); //this is really 32 x 8 matches since they are binary matches packed into bytes
vector<KeyPoint> kpts_1, kpts_2;
detector.detect(im1, kpts_1);
detector.detect(im2, kpts_2);
t = ((double)getTickCount() - t) / getTickFrequency();
cout << "found " << kpts_1.size() << " keypoints in " << im1_name << endl << "fount " << kpts_2.size()
<< " keypoints in " << im2_name << endl << "took " << t << " seconds." << endl;
Mat desc_1, desc_2;
cout << "computing descriptors..." << endl;
t = (double)getTickCount();
extractor.compute(im1, kpts_1, desc_1);
extractor.compute(im2, kpts_2, desc_2);
t = ((double)getTickCount() - t) / getTickFrequency();
cout << "done computing descriptors... took " << t << " seconds" << endl;
//Do matching using features2d
cout << "matching with BruteForceMatcher<Hamming>" << endl;
BFMatcher matcher_popcount(extractor.defaultNorm());
vector<DMatch> matches_popcount;
double pop_time = match(kpts_1, kpts_2, matcher_popcount, desc_1, desc_2, matches_popcount);
cout << "done BruteForceMatcher<Hamming> matching. took " << pop_time << " seconds" << endl;
vector<Point2f> mpts_1, mpts_2;
matches2points(matches_popcount, kpts_1, kpts_2, mpts_1, mpts_2); //Extract a list of the (x,y) location of the matches
vector<char> outlier_mask;
Mat H = findHomography(mpts_2, mpts_1, RANSAC, 1, outlier_mask);
Mat outimg;
drawMatches(im2, kpts_2, im1, kpts_1, matches_popcount, outimg, Scalar::all(-1), Scalar::all(-1), outlier_mask);
imshow("matches - popcount - outliers removed", outimg);
Mat warped;
Mat diff;
warpPerspective(im2, warped, H, im1.size());
imshow("warped", warped);
absdiff(im1,warped,diff);
imshow("diff", diff);
waitKey();
return 0;
}

View File

@@ -1,778 +0,0 @@
#include "opencv2/core.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include <map>
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
static void help()
{
printf("\nSigh: This program is not complete/will be replaced. \n"
"So: Use this just to see hints of how to use things like Rodrigues\n"
" conversions, finding the fundamental matrix, using descriptor\n"
" finding and matching in features2d and using camera parameters\n"
"Usage: build3dmodel -i <intrinsics_filename>\n"
"\t[-d <detector>] [-de <descriptor_extractor>] -m <model_name>\n\n");
return;
}
static bool readCameraMatrix(const string& filename,
Mat& cameraMatrix, Mat& distCoeffs,
Size& calibratedImageSize )
{
FileStorage fs(filename, FileStorage::READ);
fs["image_width"] >> calibratedImageSize.width;
fs["image_height"] >> calibratedImageSize.height;
fs["distortion_coefficients"] >> distCoeffs;
fs["camera_matrix"] >> cameraMatrix;
if( distCoeffs.type() != CV_64F )
distCoeffs = Mat_<double>(distCoeffs);
if( cameraMatrix.type() != CV_64F )
cameraMatrix = Mat_<double>(cameraMatrix);
return true;
}
static bool readModelViews( const string& filename, vector<Point3f>& box,
vector<string>& imagelist,
vector<Rect>& roiList, vector<Vec6f>& poseList )
{
imagelist.resize(0);
roiList.resize(0);
poseList.resize(0);
box.resize(0);
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
fs["box"] >> box;
FileNode all = fs["views"];
if( all.type() != FileNode::SEQ )
return false;
FileNodeIterator it = all.begin(), it_end = all.end();
for(; it != it_end; ++it)
{
FileNode n = *it;
imagelist.push_back((string)n["image"]);
FileNode nr = n["roi"];
roiList.push_back(Rect((int)nr[0], (int)nr[1], (int)nr[2], (int)nr[3]));
FileNode np = n["pose"];
poseList.push_back(Vec6f((float)np[0], (float)np[1], (float)np[2],
(float)np[3], (float)np[4], (float)np[5]));
}
return true;
}
struct PointModel
{
vector<Point3f> points;
vector<vector<int> > didx;
Mat descriptors;
string name;
};
static void writeModel(const string& modelFileName, const string& modelname,
const PointModel& model)
{
FileStorage fs(modelFileName, FileStorage::WRITE);
fs << modelname << "{" <<
"points" << "[:" << model.points << "]" <<
"idx" << "[:";
for( size_t i = 0; i < model.didx.size(); i++ )
fs << "[:" << model.didx[i] << "]";
fs << "]" << "descriptors" << model.descriptors;
}
static void unpackPose(const Vec6f& pose, Mat& R, Mat& t)
{
Mat rvec = (Mat_<double>(3,1) << pose[0], pose[1], pose[2]);
t = (Mat_<double>(3,1) << pose[3], pose[4], pose[5]);
Rodrigues(rvec, R);
}
static Mat getFundamentalMat( const Mat& R1, const Mat& t1,
const Mat& R2, const Mat& t2,
const Mat& cameraMatrix )
{
Mat_<double> R = R2*R1.t(), t = t2 - R*t1;
double tx = t.at<double>(0,0), ty = t.at<double>(1,0), tz = t.at<double>(2,0);
Mat E = (Mat_<double>(3,3) << 0, -tz, ty, tz, 0, -tx, -ty, tx, 0)*R;
Mat iK = cameraMatrix.inv();
Mat F = iK.t()*E*iK;
#if 0
static bool checked = false;
if(!checked)
{
vector<Point3f> objpoints(100);
Mat O(objpoints);
randu(O, Scalar::all(-10), Scalar::all(10));
vector<Point2f> imgpoints1, imgpoints2;
projectPoints(Mat(objpoints), R1, t1, cameraMatrix, Mat(), imgpoints1);
projectPoints(Mat(objpoints), R2, t2, cameraMatrix, Mat(), imgpoints2);
double* f = (double*)F.data;
for( size_t i = 0; i < objpoints.size(); i++ )
{
Point2f p1 = imgpoints1[i], p2 = imgpoints2[i];
double diff = p2.x*(f[0]*p1.x + f[1]*p1.y + f[2]) +
p2.y*(f[3]*p1.x + f[4]*p1.y + f[5]) +
f[6]*p1.x + f[7]*p1.y + f[8];
CV_Assert(fabs(diff) < 1e-3);
}
checked = true;
}
#endif
return F;
}
static void findConstrainedCorrespondences(const Mat& _F,
const vector<KeyPoint>& keypoints1,
const vector<KeyPoint>& keypoints2,
const Mat& descriptors1,
const Mat& descriptors2,
vector<Vec2i>& matches,
double eps, double ratio)
{
float F[9]={0};
int dsize = descriptors1.cols;
Mat Fhdr = Mat(3, 3, CV_32F, F);
_F.convertTo(Fhdr, CV_32F);
matches.clear();
for( int i = 0; i < (int)keypoints1.size(); i++ )
{
Point2f p1 = keypoints1[i].pt;
double bestDist1 = DBL_MAX, bestDist2 = DBL_MAX;
int bestIdx1 = -1;//, bestIdx2 = -1;
const float* d1 = descriptors1.ptr<float>(i);
for( int j = 0; j < (int)keypoints2.size(); j++ )
{
Point2f p2 = keypoints2[j].pt;
double e = p2.x*(F[0]*p1.x + F[1]*p1.y + F[2]) +
p2.y*(F[3]*p1.x + F[4]*p1.y + F[5]) +
F[6]*p1.x + F[7]*p1.y + F[8];
if( fabs(e) > eps )
continue;
const float* d2 = descriptors2.ptr<float>(j);
double dist = 0;
int k = 0;
for( ; k <= dsize - 8; k += 8 )
{
float t0 = d1[k] - d2[k], t1 = d1[k+1] - d2[k+1];
float t2 = d1[k+2] - d2[k+2], t3 = d1[k+3] - d2[k+3];
float t4 = d1[k+4] - d2[k+4], t5 = d1[k+5] - d2[k+5];
float t6 = d1[k+6] - d2[k+6], t7 = d1[k+7] - d2[k+7];
dist += t0*t0 + t1*t1 + t2*t2 + t3*t3 +
t4*t4 + t5*t5 + t6*t6 + t7*t7;
if( dist >= bestDist2 )
break;
}
if( dist < bestDist2 )
{
for( ; k < dsize; k++ )
{
float t = d1[k] - d2[k];
dist += t*t;
}
if( dist < bestDist1 )
{
bestDist2 = bestDist1;
//bestIdx2 = bestIdx1;
bestDist1 = dist;
bestIdx1 = (int)j;
}
else if( dist < bestDist2 )
{
bestDist2 = dist;
//bestIdx2 = (int)j;
}
}
}
if( bestIdx1 >= 0 && bestDist1 < bestDist2*ratio )
{
Point2f p2 = keypoints1[bestIdx1].pt;
double e = p2.x*(F[0]*p1.x + F[1]*p1.y + F[2]) +
p2.y*(F[3]*p1.x + F[4]*p1.y + F[5]) +
F[6]*p1.x + F[7]*p1.y + F[8];
if( e > eps*0.25 )
continue;
double threshold = bestDist1/ratio;
const float* d22 = descriptors2.ptr<float>(bestIdx1);
int i1 = 0;
for( ; i1 < (int)keypoints1.size(); i1++ )
{
if( i1 == i )
continue;
Point2f pt1 = keypoints1[i1].pt;
const float* d11 = descriptors1.ptr<float>(i1);
double dist = 0;
e = p2.x*(F[0]*pt1.x + F[1]*pt1.y + F[2]) +
p2.y*(F[3]*pt1.x + F[4]*pt1.y + F[5]) +
F[6]*pt1.x + F[7]*pt1.y + F[8];
if( fabs(e) > eps )
continue;
for( int k = 0; k < dsize; k++ )
{
float t = d11[k] - d22[k];
dist += t*t;
if( dist >= threshold )
break;
}
if( dist < threshold )
break;
}
if( i1 == (int)keypoints1.size() )
matches.push_back(Vec2i(i,bestIdx1));
}
}
}
static Point3f findRayIntersection(Point3f k1, Point3f b1, Point3f k2, Point3f b2)
{
float a[4], b[2], x[2];
a[0] = k1.dot(k1);
a[1] = a[2] = -k1.dot(k2);
a[3] = k2.dot(k2);
b[0] = k1.dot(b2 - b1);
b[1] = k2.dot(b1 - b2);
Mat_<float> A(2, 2, a), B(2, 1, b), X(2, 1, x);
solve(A, B, X);
float s1 = X.at<float>(0, 0);
float s2 = X.at<float>(1, 0);
return (k1*s1 + b1 + k2*s2 + b2)*0.5f;
}
static Point3f triangulatePoint(const vector<Point2f>& ps,
const vector<Mat>& Rs,
const vector<Mat>& ts,
const Mat& cameraMatrix)
{
Mat_<double> K(cameraMatrix);
/*if( ps.size() > 2 )
{
Mat_<double> L(ps.size()*3, 4), U, evalues;
Mat_<double> P(3,4), Rt(3,4), Rt_part1=Rt.colRange(0,3), Rt_part2=Rt.colRange(3,4);
for( size_t i = 0; i < ps.size(); i++ )
{
double x = ps[i].x, y = ps[i].y;
Rs[i].convertTo(Rt_part1, Rt_part1.type());
ts[i].convertTo(Rt_part2, Rt_part2.type());
P = K*Rt;
for( int k = 0; k < 4; k++ )
{
L(i*3, k) = x*P(2,k) - P(0,k);
L(i*3+1, k) = y*P(2,k) - P(1,k);
L(i*3+2, k) = x*P(1,k) - y*P(0,k);
}
}
eigen(L.t()*L, evalues, U);
CV_Assert(evalues(0,0) >= evalues(3,0));
double W = fabs(U(3,3)) > FLT_EPSILON ? 1./U(3,3) : 0;
return Point3f((float)(U(3,0)*W), (float)(U(3,1)*W), (float)(U(3,2)*W));
}
else*/
{
Mat_<float> iK = K.inv();
Mat_<float> R1t = Mat_<float>(Rs[0]).t();
Mat_<float> R2t = Mat_<float>(Rs[1]).t();
Mat_<float> m1 = (Mat_<float>(3,1) << ps[0].x, ps[0].y, 1);
Mat_<float> m2 = (Mat_<float>(3,1) << ps[1].x, ps[1].y, 1);
Mat_<float> K1 = R1t*(iK*m1), K2 = R2t*(iK*m2);
Mat_<float> B1 = -R1t*Mat_<float>(ts[0]);
Mat_<float> B2 = -R2t*Mat_<float>(ts[1]);
return findRayIntersection(*K1.ptr<Point3f>(), *B1.ptr<Point3f>(),
*K2.ptr<Point3f>(), *B2.ptr<Point3f>());
}
}
static void triangulatePoint_test(void)
{
int i, n = 100;
vector<Point3f> objpt(n), delta1(n), delta2(n);
Mat rvec1(3,1,CV_32F), tvec1(3,1,CV_64F);
Mat rvec2(3,1,CV_32F), tvec2(3,1,CV_64F);
Mat objptmat(objpt), deltamat1(delta1), deltamat2(delta2);
randu(rvec1, Scalar::all(-10), Scalar::all(10));
randu(tvec1, Scalar::all(-10), Scalar::all(10));
randu(rvec2, Scalar::all(-10), Scalar::all(10));
randu(tvec2, Scalar::all(-10), Scalar::all(10));
randu(objptmat, Scalar::all(-10), Scalar::all(10));
double eps = 1e-2;
randu(deltamat1, Scalar::all(-eps), Scalar::all(eps));
randu(deltamat2, Scalar::all(-eps), Scalar::all(eps));
vector<Point2f> imgpt1, imgpt2;
Mat_<float> cameraMatrix(3,3);
double fx = 1000., fy = 1010., cx = 400.5, cy = 300.5;
cameraMatrix << fx, 0, cx, 0, fy, cy, 0, 0, 1;
projectPoints(Mat(objpt)+Mat(delta1), rvec1, tvec1, cameraMatrix, Mat(), imgpt1);
projectPoints(Mat(objpt)+Mat(delta2), rvec2, tvec2, cameraMatrix, Mat(), imgpt2);
vector<Point3f> objptt(n);
vector<Point2f> pts(2);
vector<Mat> Rv(2), tv(2);
Rodrigues(rvec1, Rv[0]);
Rodrigues(rvec2, Rv[1]);
tv[0] = tvec1; tv[1] = tvec2;
for( i = 0; i < n; i++ )
{
pts[0] = imgpt1[i]; pts[1] = imgpt2[i];
objptt[i] = triangulatePoint(pts, Rv, tv, cameraMatrix);
}
double err = norm(Mat(objpt), Mat(objptt), NORM_INF);
CV_Assert(err < 1e-1);
}
typedef pair<int, int> Pair2i;
typedef map<Pair2i, int> Set2i;
struct EqKeypoints
{
EqKeypoints(const vector<int>* _dstart, const Set2i* _pairs)
: dstart(_dstart), pairs(_pairs) {}
bool operator()(const Pair2i& a, const Pair2i& b) const
{
return pairs->find(Pair2i(dstart->at(a.first) + a.second,
dstart->at(b.first) + b.second)) != pairs->end();
}
const vector<int>* dstart;
const Set2i* pairs;
};
template<typename _Tp, class _EqPredicate> static
int partition( const std::vector<_Tp>& _vec, std::vector<int>& labels,
_EqPredicate predicate=_EqPredicate())
{
int i, j, N = (int)_vec.size();
const _Tp* vec = &_vec[0];
const int PARENT=0;
const int RANK=1;
std::vector<int> _nodes(N*2);
int (*nodes)[2] = (int(*)[2])&_nodes[0];
// The first O(N) pass: create N single-vertex trees
for(i = 0; i < N; i++)
{
nodes[i][PARENT]=-1;
nodes[i][RANK] = 0;
}
// The main O(N^2) pass: merge connected components
for( i = 0; i < N; i++ )
{
int root = i;
// find root
while( nodes[root][PARENT] >= 0 )
root = nodes[root][PARENT];
for( j = 0; j < N; j++ )
{
if( i == j || !predicate(vec[i], vec[j]))
continue;
int root2 = j;
while( nodes[root2][PARENT] >= 0 )
root2 = nodes[root2][PARENT];
if( root2 != root )
{
// unite both trees
int rank = nodes[root][RANK], rank2 = nodes[root2][RANK];
if( rank > rank2 )
nodes[root2][PARENT] = root;
else
{
nodes[root][PARENT] = root2;
nodes[root2][RANK] += rank == rank2;
root = root2;
}
CV_Assert( nodes[root][PARENT] < 0 );
int k = j, parent;
// compress the path from node2 to root
while( (parent = nodes[k][PARENT]) >= 0 )
{
nodes[k][PARENT] = root;
k = parent;
}
// compress the path from node to root
k = i;
while( (parent = nodes[k][PARENT]) >= 0 )
{
nodes[k][PARENT] = root;
k = parent;
}
}
}
}
// Final O(N) pass: enumerate classes
labels.resize(N);
int nclasses = 0;
for( i = 0; i < N; i++ )
{
int root = i;
while( nodes[root][PARENT] >= 0 )
root = nodes[root][PARENT];
// re-use the rank as the class label
if( nodes[root][RANK] >= 0 )
nodes[root][RANK] = ~nclasses++;
labels[i] = ~nodes[root][RANK];
}
return nclasses;
}
static void build3dmodel( const Ptr<FeatureDetector>& detector,
const Ptr<DescriptorExtractor>& descriptorExtractor,
const vector<Point3f>& /*modelBox*/,
const vector<string>& imageList,
const vector<Rect>& roiList,
const vector<Vec6f>& poseList,
const Mat& cameraMatrix,
PointModel& model )
{
int progressBarSize = 10;
const double Feps = 5;
const double DescriptorRatio = 0.7;
vector<vector<KeyPoint> > allkeypoints;
vector<int> dstart;
vector<float> alldescriptorsVec;
vector<Vec2i> pairwiseMatches;
vector<Mat> Rs, ts;
int descriptorSize = 0;
Mat descriptorbuf;
Set2i pairs, keypointsIdxMap;
model.points.clear();
model.didx.clear();
dstart.push_back(0);
size_t nimages = imageList.size();
size_t nimagePairs = (nimages - 1)*nimages/2 - nimages;
printf("\nComputing descriptors ");
// 1. find all the keypoints and all the descriptors
for( size_t i = 0; i < nimages; i++ )
{
Mat img = imread(imageList[i], 1), gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
vector<KeyPoint> keypoints;
detector->detect(gray, keypoints);
descriptorExtractor->compute(gray, keypoints, descriptorbuf);
Point2f roiofs = roiList[i].tl();
for( size_t k = 0; k < keypoints.size(); k++ )
keypoints[k].pt += roiofs;
allkeypoints.push_back(keypoints);
Mat buf = descriptorbuf;
if( !buf.isContinuous() || buf.type() != CV_32F )
{
buf.release();
descriptorbuf.convertTo(buf, CV_32F);
}
descriptorSize = buf.cols;
size_t prev = alldescriptorsVec.size();
size_t delta = buf.rows*buf.cols;
alldescriptorsVec.resize(prev + delta);
std::copy(buf.ptr<float>(), buf.ptr<float>() + delta,
alldescriptorsVec.begin() + prev);
dstart.push_back(dstart.back() + (int)keypoints.size());
Mat R, t;
unpackPose(poseList[i], R, t);
Rs.push_back(R);
ts.push_back(t);
if( (i+1)*progressBarSize/nimages > i*progressBarSize/nimages )
{
putchar('.');
fflush(stdout);
}
}
Mat alldescriptors((int)alldescriptorsVec.size()/descriptorSize, descriptorSize, CV_32F,
&alldescriptorsVec[0]);
printf("\nOk. total images = %d. total keypoints = %d\n",
(int)nimages, alldescriptors.rows);
printf("\nFinding correspondences ");
int pairsFound = 0;
vector<Point2f> pts_k(2);
vector<Mat> Rs_k(2), ts_k(2);
//namedWindow("img1", 1);
//namedWindow("img2", 1);
// 2. find pairwise correspondences
for( size_t i = 0; i < nimages; i++ )
for( size_t j = i+1; j < nimages; j++ )
{
const vector<KeyPoint>& keypoints1 = allkeypoints[i];
const vector<KeyPoint>& keypoints2 = allkeypoints[j];
Mat descriptors1 = alldescriptors.rowRange(dstart[i], dstart[i+1]);
Mat descriptors2 = alldescriptors.rowRange(dstart[j], dstart[j+1]);
Mat F = getFundamentalMat(Rs[i], ts[i], Rs[j], ts[j], cameraMatrix);
findConstrainedCorrespondences( F, keypoints1, keypoints2,
descriptors1, descriptors2,
pairwiseMatches, Feps, DescriptorRatio );
//pairsFound += (int)pairwiseMatches.size();
//Mat img1 = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)i), 1);
//Mat img2 = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)j), 1);
//double avg_err = 0;
for( size_t k = 0; k < pairwiseMatches.size(); k++ )
{
int i1 = pairwiseMatches[k][0], i2 = pairwiseMatches[k][1];
pts_k[0] = keypoints1[i1].pt;
pts_k[1] = keypoints2[i2].pt;
Rs_k[0] = Rs[i]; Rs_k[1] = Rs[j];
ts_k[0] = ts[i]; ts_k[1] = ts[j];
Point3f objpt = triangulatePoint(pts_k, Rs_k, ts_k, cameraMatrix);
vector<Point3f> objpts;
objpts.push_back(objpt);
vector<Point2f> imgpts1, imgpts2;
projectPoints(Mat(objpts), Rs_k[0], ts_k[0], cameraMatrix, Mat(), imgpts1);
projectPoints(Mat(objpts), Rs_k[1], ts_k[1], cameraMatrix, Mat(), imgpts2);
double e1 = norm(imgpts1[0] - keypoints1[i1].pt);
double e2 = norm(imgpts2[0] - keypoints2[i2].pt);
if( e1 + e2 > 5 )
continue;
pairsFound++;
//model.points.push_back(objpt);
pairs[Pair2i(i1+dstart[i], i2+dstart[j])] = 1;
pairs[Pair2i(i2+dstart[j], i1+dstart[i])] = 1;
keypointsIdxMap[Pair2i((int)i,i1)] = 1;
keypointsIdxMap[Pair2i((int)j,i2)] = 1;
//CV_Assert(e1 < 5 && e2 < 5);
//Scalar color(rand()%256,rand()%256, rand()%256);
//circle(img1, keypoints1[i1].pt, 2, color, -1, CV_AA);
//circle(img2, keypoints2[i2].pt, 2, color, -1, CV_AA);
}
//printf("avg err = %g\n", pairwiseMatches.size() ? avg_err/(2*pairwiseMatches.size()) : 0.);
//imshow("img1", img1);
//imshow("img2", img2);
//waitKey();
if( (i+1)*progressBarSize/nimagePairs > i*progressBarSize/nimagePairs )
{
putchar('.');
fflush(stdout);
}
}
printf("\nOk. Total pairs = %d\n", pairsFound );
// 3. build the keypoint clusters
vector<Pair2i> keypointsIdx;
Set2i::iterator kpidx_it = keypointsIdxMap.begin(), kpidx_end = keypointsIdxMap.end();
for( ; kpidx_it != kpidx_end; ++kpidx_it )
keypointsIdx.push_back(kpidx_it->first);
printf("\nClustering correspondences ");
vector<int> labels;
int nclasses = partition( keypointsIdx, labels, EqKeypoints(&dstart, &pairs) );
printf("\nOk. Total classes (i.e. 3d points) = %d\n", nclasses );
model.descriptors.create((int)keypointsIdx.size(), descriptorSize, CV_32F);
model.didx.resize(nclasses);
model.points.resize(nclasses);
vector<vector<Pair2i> > clusters(nclasses);
for( size_t i = 0; i < keypointsIdx.size(); i++ )
clusters[labels[i]].push_back(keypointsIdx[i]);
// 4. now compute 3D points corresponding to each cluster and fill in the model data
printf("\nComputing 3D coordinates ");
int globalDIdx = 0;
for( int k = 0; k < nclasses; k++ )
{
int i, n = (int)clusters[k].size();
pts_k.resize(n);
Rs_k.resize(n);
ts_k.resize(n);
model.didx[k].resize(n);
for( i = 0; i < n; i++ )
{
int imgidx = clusters[k][i].first, ptidx = clusters[k][i].second;
Mat dstrow = model.descriptors.row(globalDIdx);
alldescriptors.row(dstart[imgidx] + ptidx).copyTo(dstrow);
model.didx[k][i] = globalDIdx++;
pts_k[i] = allkeypoints[imgidx][ptidx].pt;
Rs_k[i] = Rs[imgidx];
ts_k[i] = ts[imgidx];
}
Point3f objpt = triangulatePoint(pts_k, Rs_k, ts_k, cameraMatrix);
model.points[k] = objpt;
if( (i+1)*progressBarSize/nclasses > i*progressBarSize/nclasses )
{
putchar('.');
fflush(stdout);
}
}
Mat img(768, 1024, CV_8UC3);
vector<Point2f> imagePoints;
namedWindow("Test", 1);
// visualize the cloud
for( size_t i = 0; i < nimages; i++ )
{
img = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)i), 1);
projectPoints(Mat(model.points), Rs[i], ts[i], cameraMatrix, Mat(), imagePoints);
for( int k = 0; k < (int)imagePoints.size(); k++ )
circle(img, imagePoints[k], 2, Scalar(0,255,0), -1, LINE_AA, 0);
imshow("Test", img);
int c = waitKey();
if( c == 'q' || c == 'Q' )
break;
}
}
int main(int argc, char** argv)
{
const char* intrinsicsFilename = 0;
const char* modelName = 0;
const char* detectorName = "SURF";
const char* descriptorExtractorName = "SURF";
vector<Point3f> modelBox;
vector<string> imageList;
vector<Rect> roiList;
vector<Vec6f> poseList;
if(argc < 3)
{
help();
return -1;
}
for( int i = 1; i < argc; i++ )
{
if( strcmp(argv[i], "-i") == 0 )
intrinsicsFilename = argv[++i];
else if( strcmp(argv[i], "-m") == 0 )
modelName = argv[++i];
else if( strcmp(argv[i], "-d") == 0 )
detectorName = argv[++i];
else if( strcmp(argv[i], "-de") == 0 )
descriptorExtractorName = argv[++i];
else
{
help();
printf("Incorrect option\n");
return -1;
}
}
if( !intrinsicsFilename || !modelName )
{
printf("Some of the required parameters are missing\n");
help();
return -1;
}
triangulatePoint_test();
Mat cameraMatrix, distCoeffs;
Size calibratedImageSize;
readCameraMatrix(intrinsicsFilename, cameraMatrix, distCoeffs, calibratedImageSize);
Ptr<FeatureDetector> detector = FeatureDetector::create(detectorName);
Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create(descriptorExtractorName);
string modelIndexFilename = format("%s_segm/frame_index.yml", modelName);
if(!readModelViews( modelIndexFilename, modelBox, imageList, roiList, poseList))
{
printf("Can not read the model. Check the parameters and the working directory\n");
help();
return -1;
}
PointModel model;
model.name = modelName;
build3dmodel( detector, descriptorExtractor, modelBox,
imageList, roiList, poseList, cameraMatrix, model );
string outputModelName = format("%s_model.yml.gz", modelName);
printf("\nDone! Now saving the model ...\n");
writeModel(outputModelName, modelName, model);
return 0;
}

View File

@@ -2,6 +2,8 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <cctype>

View File

@@ -1,334 +0,0 @@
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <vector>
#include <algorithm>
#include <iterator>
#include <stdio.h>
using namespace cv;
using namespace std;
static void help()
{
printf( "\nThis code generates an artificial camera and artificial chessboard images,\n"
"and then calibrates. It is basically test code for calibration that shows\n"
"how to package calibration points and then calibrate the camera.\n"
"Usage:\n"
"./calibration_artificial\n\n");
}
namespace cv
{
/* copy of class defines int tests/cv/chessboardgenerator.h */
class ChessBoardGenerator
{
public:
double sensorWidth;
double sensorHeight;
size_t squareEdgePointsNum;
double min_cos;
mutable double cov;
Size patternSize;
int rendererResolutionMultiplier;
ChessBoardGenerator(const Size& patternSize = Size(8, 6));
Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const;
Size cornersSize() const;
private:
void generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const;
Mat generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
float sqWidth, float sqHeight, const vector<Point3f>& whole, vector<Point2f>& corners) const;
void generateBasis(Point3f& pb1, Point3f& pb2) const;
Point3f generateChessBoardCenter(const Mat& camMat, const Size& imgSize) const;
Mat rvec, tvec;
};
}
const Size imgSize(800, 600);
const Size brdSize(8, 7);
const size_t brds_num = 20;
template<class T> ostream& operator<<(ostream& out, const Mat_<T>& mat)
{
for(int j = 0; j < mat.rows; ++j)
for(int i = 0; i < mat.cols; ++i)
out << mat(j, i) << " ";
return out;
}
int main()
{
help();
cout << "Initializing background...";
Mat background(imgSize, CV_8UC3);
randu(background, Scalar::all(32), Scalar::all(255));
GaussianBlur(background, background, Size(5, 5), 2);
cout << "Done" << endl;
cout << "Initializing chess board generator...";
ChessBoardGenerator cbg(brdSize);
cbg.rendererResolutionMultiplier = 4;
cout << "Done" << endl;
/* camera params */
Mat_<double> camMat(3, 3);
camMat << 300., 0., background.cols/2., 0, 300., background.rows/2., 0., 0., 1.;
Mat_<double> distCoeffs(1, 5);
distCoeffs << 1.2, 0.2, 0., 0., 0.;
cout << "Generating chessboards...";
vector<Mat> boards(brds_num);
vector<Point2f> tmp;
for(size_t i = 0; i < brds_num; ++i)
cout << (boards[i] = cbg(background, camMat, distCoeffs, tmp), i) << " ";
cout << "Done" << endl;
vector<Point3f> chessboard3D;
for(int j = 0; j < cbg.cornersSize().height; ++j)
for(int i = 0; i < cbg.cornersSize().width; ++i)
chessboard3D.push_back(Point3i(i, j, 0));
/* init points */
vector< vector<Point3f> > objectPoints;
vector< vector<Point2f> > imagePoints;
cout << endl << "Finding chessboards' corners...";
for(size_t i = 0; i < brds_num; ++i)
{
cout << i;
namedWindow("Current chessboard"); imshow("Current chessboard", boards[i]); waitKey(100);
bool found = findChessboardCorners(boards[i], cbg.cornersSize(), tmp);
if (found)
{
imagePoints.push_back(tmp);
objectPoints.push_back(chessboard3D);
cout<< "-found ";
}
else
cout<< "-not-found ";
drawChessboardCorners(boards[i], cbg.cornersSize(), Mat(tmp), found);
imshow("Current chessboard", boards[i]); waitKey(1000);
}
cout << "Done" << endl;
destroyAllWindows();
Mat camMat_est;
Mat distCoeffs_est;
vector<Mat> rvecs, tvecs;
cout << "Calibrating...";
double rep_err = calibrateCamera(objectPoints, imagePoints, imgSize, camMat_est, distCoeffs_est, rvecs, tvecs);
cout << "Done" << endl;
cout << endl << "Average Reprojection error: " << rep_err/brds_num/cbg.cornersSize().area() << endl;
cout << "==================================" << endl;
cout << "Original camera matrix:\n" << camMat << endl;
cout << "Original distCoeffs:\n" << distCoeffs << endl;
cout << "==================================" << endl;
cout << "Estimated camera matrix:\n" << (Mat_<double>&)camMat_est << endl;
cout << "Estimated distCoeffs:\n" << (Mat_<double>&)distCoeffs_est << endl;
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Copy of tests/cv/src/chessboardgenerator code. Just do not want to add dependency.
ChessBoardGenerator::ChessBoardGenerator(const Size& _patternSize) : sensorWidth(32), sensorHeight(24),
squareEdgePointsNum(200), min_cos(std::sqrt(2.f)*0.5f), cov(0.5),
patternSize(_patternSize), rendererResolutionMultiplier(4), tvec(Mat::zeros(1, 3, CV_32F))
{
Rodrigues(Mat::eye(3, 3, CV_32F), rvec);
}
void cv::ChessBoardGenerator::generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const
{
Point3f step = (p2 - p1) * (1.f/squareEdgePointsNum);
for(size_t n = 0; n < squareEdgePointsNum; ++n)
out.push_back( p1 + step * (float)n);
}
Size cv::ChessBoardGenerator::cornersSize() const
{
return Size(patternSize.width-1, patternSize.height-1);
}
struct Mult
{
float m;
Mult(int mult) : m((float)mult) {}
Point2f operator()(const Point2f& p)const { return p * m; }
};
void cv::ChessBoardGenerator::generateBasis(Point3f& pb1, Point3f& pb2) const
{
RNG& rng = theRNG();
Vec3f n;
for(;;)
{
n[0] = rng.uniform(-1.f, 1.f);
n[1] = rng.uniform(-1.f, 1.f);
n[2] = rng.uniform(-1.f, 1.f);
float len = (float)norm(n);
n[0]/=len;
n[1]/=len;
n[2]/=len;
if (fabs(n[2]) > min_cos)
break;
}
Vec3f n_temp = n; n_temp[0] += 100;
Vec3f b1 = n.cross(n_temp);
Vec3f b2 = n.cross(b1);
float len_b1 = (float)norm(b1);
float len_b2 = (float)norm(b2);
pb1 = Point3f(b1[0]/len_b1, b1[1]/len_b1, b1[2]/len_b1);
pb2 = Point3f(b2[0]/len_b1, b2[1]/len_b2, b2[2]/len_b2);
}
Mat cv::ChessBoardGenerator::generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
float sqWidth, float sqHeight, const vector<Point3f>& whole,
vector<Point2f>& corners) const
{
vector< vector<Point> > squares_black;
for(int i = 0; i < patternSize.width; ++i)
for(int j = 0; j < patternSize.height; ++j)
if ( (i % 2 == 0 && j % 2 == 0) || (i % 2 != 0 && j % 2 != 0) )
{
vector<Point3f> pts_square3d;
vector<Point2f> pts_square2d;
Point3f p1 = zero + (i + 0) * sqWidth * pb1 + (j + 0) * sqHeight * pb2;
Point3f p2 = zero + (i + 1) * sqWidth * pb1 + (j + 0) * sqHeight * pb2;
Point3f p3 = zero + (i + 1) * sqWidth * pb1 + (j + 1) * sqHeight * pb2;
Point3f p4 = zero + (i + 0) * sqWidth * pb1 + (j + 1) * sqHeight * pb2;
generateEdge(p1, p2, pts_square3d);
generateEdge(p2, p3, pts_square3d);
generateEdge(p3, p4, pts_square3d);
generateEdge(p4, p1, pts_square3d);
projectPoints( Mat(pts_square3d), rvec, tvec, camMat, distCoeffs, pts_square2d);
squares_black.resize(squares_black.size() + 1);
vector<Point2f> temp;
approxPolyDP(Mat(pts_square2d), temp, 1.0, true);
transform(temp.begin(), temp.end(), back_inserter(squares_black.back()), Mult(rendererResolutionMultiplier));
}
/* calculate corners */
vector<Point3f> corners3d;
for(int j = 0; j < patternSize.height - 1; ++j)
for(int i = 0; i < patternSize.width - 1; ++i)
corners3d.push_back(zero + (i + 1) * sqWidth * pb1 + (j + 1) * sqHeight * pb2);
corners.clear();
projectPoints( Mat(corners3d), rvec, tvec, camMat, distCoeffs, corners);
vector<Point3f> whole3d;
vector<Point2f> whole2d;
generateEdge(whole[0], whole[1], whole3d);
generateEdge(whole[1], whole[2], whole3d);
generateEdge(whole[2], whole[3], whole3d);
generateEdge(whole[3], whole[0], whole3d);
projectPoints( Mat(whole3d), rvec, tvec, camMat, distCoeffs, whole2d);
vector<Point2f> temp_whole2d;
approxPolyDP(Mat(whole2d), temp_whole2d, 1.0, true);
vector< vector<Point > > whole_contour(1);
transform(temp_whole2d.begin(), temp_whole2d.end(),
back_inserter(whole_contour.front()), Mult(rendererResolutionMultiplier));
Mat result;
if (rendererResolutionMultiplier == 1)
{
result = bg.clone();
drawContours(result, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
drawContours(result, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
}
else
{
Mat tmp;
resize(bg, tmp, bg.size() * rendererResolutionMultiplier);
drawContours(tmp, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
drawContours(tmp, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
resize(tmp, result, bg.size(), 0, 0, INTER_AREA);
}
return result;
}
Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const
{
cov = std::min(cov, 0.8);
double fovx, fovy, focalLen;
Point2d principalPoint;
double aspect;
calibrationMatrixValues( camMat, bg.size(), sensorWidth, sensorHeight,
fovx, fovy, focalLen, principalPoint, aspect);
RNG& rng = theRNG();
float d1 = static_cast<float>(rng.uniform(0.1, 10.0));
float ah = static_cast<float>(rng.uniform(-fovx/2 * cov, fovx/2 * cov) * CV_PI / 180);
float av = static_cast<float>(rng.uniform(-fovy/2 * cov, fovy/2 * cov) * CV_PI / 180);
Point3f p;
p.z = cos(ah) * d1;
p.x = sin(ah) * d1;
p.y = p.z * tan(av);
Point3f pb1, pb2;
generateBasis(pb1, pb2);
float cbHalfWidth = static_cast<float>(norm(p) * sin( std::min(fovx, fovy) * 0.5 * CV_PI / 180));
float cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
vector<Point3f> pts3d(4);
vector<Point2f> pts2d(4);
for(;;)
{
pts3d[0] = p + pb1 * cbHalfWidth + cbHalfHeight * pb2;
pts3d[1] = p + pb1 * cbHalfWidth - cbHalfHeight * pb2;
pts3d[2] = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
pts3d[3] = p - pb1 * cbHalfWidth + cbHalfHeight * pb2;
/* can remake with better perf */
projectPoints( Mat(pts3d), rvec, tvec, camMat, distCoeffs, pts2d);
bool inrect1 = pts2d[0].x < bg.cols && pts2d[0].y < bg.rows && pts2d[0].x > 0 && pts2d[0].y > 0;
bool inrect2 = pts2d[1].x < bg.cols && pts2d[1].y < bg.rows && pts2d[1].x > 0 && pts2d[1].y > 0;
bool inrect3 = pts2d[2].x < bg.cols && pts2d[2].y < bg.rows && pts2d[2].x > 0 && pts2d[2].y > 0;
bool inrect4 = pts2d[3].x < bg.cols && pts2d[3].y < bg.rows && pts2d[3].x > 0 && pts2d[3].y > 0;
if ( inrect1 && inrect2 && inrect3 && inrect4)
break;
cbHalfWidth*=0.8f;
cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
}
cbHalfWidth *= static_cast<float>(patternSize.width)/(patternSize.width + 1);
cbHalfHeight *= static_cast<float>(patternSize.height)/(patternSize.height + 1);
Point3f zero = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
float sqWidth = 2 * cbHalfWidth/patternSize.width;
float sqHeight = 2 * cbHalfHeight/patternSize.height;
return generageChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2, sqWidth, sqHeight, pts3d, corners);
}

View File

@@ -1,6 +1,7 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>

View File

@@ -1,75 +0,0 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/contrib.hpp"
#include <iostream>
using namespace cv;
using namespace std;
static void help()
{
cout << "\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
"edge template and a query edge image.\n"
"Usage: \n"
"./chamfer <image edge map> <template edge map>,"
" By default the inputs are logo_in_clutter.png logo.png\n";
}
const char* keys =
{
"{@logo1 |logo_in_clutter.png |image edge map }"
"{@logo2 |logo.png |template edge map}"
};
int main( int argc, const char** argv )
{
help();
CommandLineParser parser(argc, argv, keys);
string image = parser.get<string>(0);
string templ = parser.get<string>(1);
Mat img = imread(image.c_str(), 0);
Mat tpl = imread(templ.c_str(), 0);
if (img.empty() || tpl.empty())
{
cout << "Could not read image file " << image << " or " << templ << "." << endl;
return -1;
}
Mat cimg;
cvtColor(img, cimg, COLOR_GRAY2BGR);
// if the image and the template are not edge maps but normal grayscale images,
// you might want to uncomment the lines below to produce the maps. You can also
// run Sobel instead of Canny.
// Canny(img, img, 5, 50, 3);
// Canny(tpl, tpl, 5, 50, 3);
vector<vector<Point> > results;
vector<float> costs;
int best = chamerMatching( img, tpl, results, costs );
if( best < 0 )
{
cout << "matching not found" << endl;
return -1;
}
size_t i, n = results[best].size();
for( i = 0; i < n; i++ )
{
Point pt = results[best][i];
if( pt.inside(Rect(0, 0, cimg.cols, cimg.rows)) )
cimg.at<Vec3b>(pt) = Vec3b(0, 255, 0);
}
imshow("result", cimg);
waitKey();
return 0;
}

View File

@@ -23,6 +23,7 @@
#include "opencv2/photo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>

View File

@@ -33,6 +33,7 @@
#include <signal.h>
#include "opencv2/photo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>

View File

@@ -1,5 +1,6 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>

View File

@@ -17,8 +17,8 @@ static void help()
<< "\n------------------------------------------------------------------\n"
<< " This program shows the serial out capabilities of cv::Mat\n"
<< "That is, cv::Mat M(...); cout << M; Now works.\n"
<< "Output can be formated to OpenCV, python, numpy, csv and C styles"
<< "Usage:\n"
<< "Output can be formated to OpenCV, matlab, python, numpy, csv and \n"
<< "C styles Usage:\n"
<< "./cvout_sample\n"
<< "------------------------------------------------------------------\n\n"
<< endl;
@@ -36,6 +36,7 @@ int main(int,char**)
randu(r, Scalar::all(0), Scalar::all(255));
cout << "r (default) = \n" << r << ";" << endl << endl;
cout << "r (matlab) = \n" << format(r, Formatter::FMT_MATLAB) << ";" << endl << endl;
cout << "r (python) = \n" << format(r, Formatter::FMT_PYTHON) << ";" << endl << endl;
cout << "r (numpy) = \n" << format(r, Formatter::FMT_NUMPY) << ";" << endl << endl;
cout << "r (csv) = \n" << format(r, Formatter::FMT_CSV) << ";" << endl << endl;

View File

@@ -10,6 +10,7 @@
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>

View File

@@ -2,9 +2,10 @@
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/videoio/videoio.hpp>
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/contrib/detection_based_tracker.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <stdio.h>
#include <string>

View File

@@ -1,5 +1,6 @@
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>

View File

@@ -1,3 +1,4 @@
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"

View File

@@ -1,193 +0,0 @@
#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/objdetect.hpp>
#include "opencv2/contrib/detection_based_tracker.hpp"
#include <vector>
#include <iostream>
#include <stdio.h>
#define DEBUGLOGS 1
#ifdef ANDROID
#include <android/log.h>
#define LOG_TAG "DETECTIONBASEDTRACKER__TEST_APPLICAT"
#define LOGD0(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
#define LOGI0(...) ((void)__android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__))
#define LOGW0(...) ((void)__android_log_print(ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__))
#define LOGE0(...) ((void)__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__))
#else
#include <stdio.h>
#define LOGD0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
#define LOGI0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
#define LOGW0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
#define LOGE0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
#endif
#if DEBUGLOGS
#define LOGD(_str, ...) LOGD0(_str , ## __VA_ARGS__)
#define LOGI(_str, ...) LOGI0(_str , ## __VA_ARGS__)
#define LOGW(_str, ...) LOGW0(_str , ## __VA_ARGS__)
#define LOGE(_str, ...) LOGE0(_str , ## __VA_ARGS__)
#else
#define LOGD(...) do{} while(0)
#define LOGI(...) do{} while(0)
#define LOGW(...) do{} while(0)
#define LOGE(...) do{} while(0)
#endif
using namespace cv;
using namespace std;
#define ORIGINAL 0
#define SHOULD_USE_EXTERNAL_BUFFERS 1
static void usage()
{
LOGE0("usage: filepattern outfilepattern cascadefile");
LOGE0("\t where ");
LOGE0("\t filepattern --- pattern for the paths to the source images");
LOGE0("\t (e.g.\"./Videos/FACESJPG2/Faces2_%%08d.jpg\" ");
LOGE0("\t outfilepattern --- pattern for the paths for images which will be generated");
LOGE0("\t (e.g.\"./resFaces2_%%08d.jpg\" ");
LOGE0("\t cascadefile --- path to the cascade file");
LOGE0("\t (e.g.\"opencv/data/lbpcascades/lbpcascade_frontalface.xml\" ");
}
class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
{
public:
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
Detector(detector)
{
CV_Assert(detector);
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
{
Detector->detectMultiScale(Image, objects, 1.1, 3, 0, minObjSize, maxObjSize);
}
virtual ~CascadeDetectorAdapter()
{}
private:
CascadeDetectorAdapter();
cv::Ptr<cv::CascadeClassifier> Detector;
};
static int test_FaceDetector(int argc, char *argv[])
{
if (argc < 4)
{
usage();
return -1;
}
const char* filepattern=argv[1];
const char* outfilepattern=argv[2];
const char* cascadefile=argv[3];
LOGD0("filepattern='%s'", filepattern);
LOGD0("outfilepattern='%s'", outfilepattern);
LOGD0("cascadefile='%s'", cascadefile);
vector<Mat> images;
{
char filename[256];
for(int n=1; ; n++)
{
snprintf(filename, sizeof(filename), filepattern, n);
LOGD("filename='%s'", filename);
Mat m0;
m0=imread(filename);
if (m0.empty())
{
LOGI0("Cannot read the file --- break");
break;
}
images.push_back(m0);
}
LOGD("read %d images", (int)images.size());
}
std::string cascadeFrontalfilename=cascadefile;
cv::Ptr<cv::CascadeClassifier> cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = makePtr<CascadeDetectorAdapter>(cascade);
cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = makePtr<CascadeDetectorAdapter>(cascade);
DetectionBasedTracker::Parameters params;
DetectionBasedTracker fd(MainDetector, TrackingDetector, params);
fd.run();
Mat gray;
Mat m;
int64 tprev=getTickCount();
double freq=getTickFrequency();
int num_images=images.size();
for(int n=1; n <= num_images; n++)
{
int64 tcur=getTickCount();
int64 dt=tcur-tprev;
tprev=tcur;
double t_ms=((double)dt)/freq * 1000.0;
LOGD("\n\nSTEP n=%d from prev step %f ms\n", n, t_ms);
m=images[n-1];
CV_Assert(! m.empty());
cvtColor(m, gray, COLOR_BGR2GRAY);
fd.process(gray);
vector<Rect> result;
fd.getObjects(result);
for(size_t i=0; i < result.size(); i++)
{
Rect r=result[i];
CV_Assert(r.area() > 0);
Point tl=r.tl();
Point br=r.br();
Scalar color=Scalar(0, 250, 0);
rectangle(m, tl, br, color, 3);
}
}
char outfilename[256];
for(int n=1; n <= num_images; n++)
{
snprintf(outfilename, sizeof(outfilename), outfilepattern, n);
LOGD("outfilename='%s'", outfilename);
m=images[n-1];
imwrite(outfilename, m);
}
fd.stop();
return 0;
}
int main(int argc, char *argv[])
{
return test_FaceDetector(argc, argv);
}
#else // #if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
#include <stdio.h>
int main()
{
printf("This sample works for UNIX or ANDROID only\n");
return 0;
}
#endif

View File

@@ -1,983 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/legacy.hpp"
#include <limits>
#include <cstdio>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
string data_path;
/****************************************************************************************\
* Functions to evaluate affine covariant detectors and descriptors. *
\****************************************************************************************/
static inline Point2f applyHomography( const Mat_<double>& H, const Point2f& pt )
{
double z = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2);
if( z )
{
double w = 1./z;
return Point2f( (float)((H(0,0)*pt.x + H(0,1)*pt.y + H(0,2))*w),
(float)((H(1,0)*pt.x + H(1,1)*pt.y + H(1,2))*w) );
}
return Point2f( numeric_limits<float>::max(), numeric_limits<float>::max() );
}
static inline void linearizeHomographyAt( const Mat_<double>& H, const Point2f& pt, Mat_<double>& A )
{
A.create(2,2);
double p1 = H(0,0)*pt.x + H(0,1)*pt.y + H(0,2),
p2 = H(1,0)*pt.x + H(1,1)*pt.y + H(1,2),
p3 = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2),
p3_2 = p3*p3;
if( p3 )
{
A(0,0) = H(0,0)/p3 - p1*H(2,0)/p3_2; // fxdx
A(0,1) = H(0,1)/p3 - p1*H(2,1)/p3_2; // fxdy
A(1,0) = H(1,0)/p3 - p2*H(2,0)/p3_2; // fydx
A(1,1) = H(1,1)/p3 - p2*H(2,1)/p3_2; // fydx
}
else
A.setTo(Scalar::all(numeric_limits<double>::max()));
}
static void calcKeyPointProjections( const vector<KeyPoint>& src, const Mat_<double>& H, vector<KeyPoint>& dst )
{
if( !src.empty() )
{
CV_Assert( !H.empty() && H.cols == 3 && H.rows == 3);
dst.resize(src.size());
vector<KeyPoint>::const_iterator srcIt = src.begin();
vector<KeyPoint>::iterator dstIt = dst.begin();
for( ; srcIt != src.end(); ++srcIt, ++dstIt )
{
Point2f dstPt = applyHomography(H, srcIt->pt);
float srcSize2 = srcIt->size * srcIt->size;
Mat_<double> M(2, 2);
M(0,0) = M(1,1) = 1./srcSize2;
M(1,0) = M(0,1) = 0;
Mat_<double> invM; invert(M, invM);
Mat_<double> Aff; linearizeHomographyAt(H, srcIt->pt, Aff);
Mat_<double> dstM; invert(Aff*invM*Aff.t(), dstM);
Mat_<double> eval; eigen( dstM, eval );
CV_Assert( eval(0,0) && eval(1,0) );
float dstSize = (float)pow(1./(eval(0,0)*eval(1,0)), 0.25);
// TODO: check angle projection
float srcAngleRad = (float)(srcIt->angle*CV_PI/180);
Point2f vec1(cos(srcAngleRad), sin(srcAngleRad)), vec2;
vec2.x = (float)(Aff(0,0)*vec1.x + Aff(0,1)*vec1.y);
vec2.y = (float)(Aff(1,0)*vec1.x + Aff(0,1)*vec1.y);
float dstAngleGrad = fastAtan2(vec2.y, vec2.x);
*dstIt = KeyPoint( dstPt, dstSize, dstAngleGrad, srcIt->response, srcIt->octave, srcIt->class_id );
}
}
}
static void filterKeyPointsByImageSize( vector<KeyPoint>& keypoints, const Size& imgSize )
{
if( !keypoints.empty() )
{
vector<KeyPoint> filtered;
filtered.reserve(keypoints.size());
Rect r(0, 0, imgSize.width, imgSize.height);
vector<KeyPoint>::const_iterator it = keypoints.begin();
for( int i = 0; it != keypoints.end(); ++it, i++ )
if( r.contains(it->pt) )
filtered.push_back(*it);
keypoints.assign(filtered.begin(), filtered.end());
}
}
/****************************************************************************************\
* Detectors evaluation *
\****************************************************************************************/
const int DATASETS_COUNT = 8;
const int TEST_CASE_COUNT = 5;
const string IMAGE_DATASETS_DIR = "detectors_descriptors_evaluation/images_datasets/";
const string DETECTORS_DIR = "detectors_descriptors_evaluation/detectors/";
const string DESCRIPTORS_DIR = "detectors_descriptors_evaluation/descriptors/";
const string KEYPOINTS_DIR = "detectors_descriptors_evaluation/keypoints_datasets/";
const string PARAMS_POSTFIX = "_params.xml";
const string RES_POSTFIX = "_res.xml";
const string REPEAT = "repeatability";
const string CORRESP_COUNT = "correspondence_count";
string DATASET_NAMES[DATASETS_COUNT] = { "bark", "bikes", "boat", "graf", "leuven", "trees", "ubc", "wall"};
string DEFAULT_PARAMS = "default";
string IS_ACTIVE_PARAMS = "isActiveParams";
string IS_SAVE_KEYPOINTS = "isSaveKeypoints";
class BaseQualityEvaluator
{
public:
BaseQualityEvaluator( const char* _algName, const char* _testName ) : algName(_algName), testName(_testName)
{
//TODO: change this
isWriteGraphicsData = true;
}
void run();
virtual ~BaseQualityEvaluator(){}
protected:
virtual string getRunParamsFilename() const = 0;
virtual string getResultsFilename() const = 0;
virtual string getPlotPath() const = 0;
virtual void calcQualityClear( int datasetIdx ) = 0;
virtual bool isCalcQualityEmpty( int datasetIdx ) const = 0;
void readAllDatasetsRunParams();
virtual void readDatasetRunParams( FileNode& fn, int datasetIdx ) = 0;
void writeAllDatasetsRunParams() const;
virtual void writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const = 0;
void setDefaultAllDatasetsRunParams();
virtual void setDefaultDatasetRunParams( int datasetIdx ) = 0;
virtual void readDefaultRunParams( FileNode& /*fn*/ ) {}
virtual void writeDefaultRunParams( FileStorage& /*fs*/ ) const {}
bool readDataset( const string& datasetName, vector<Mat>& Hs, vector<Mat>& imgs );
virtual void readAlgorithm() {}
virtual void processRunParamsFile() {}
virtual void runDatasetTest( const vector<Mat>& /*imgs*/, const vector<Mat>& /*Hs*/, int /*di*/, int& /*progress*/ ) {}
virtual void processResults( int datasetIdx );
virtual void processResults();
virtual void writePlotData( int /*datasetIdx*/ ) const {}
string algName, testName;
bool isWriteParams, isWriteGraphicsData;
};
void BaseQualityEvaluator::readAllDatasetsRunParams()
{
string filename = getRunParamsFilename();
FileStorage fs( filename, FileStorage::READ );
if( !fs.isOpened() )
{
isWriteParams = true;
setDefaultAllDatasetsRunParams();
printf("All runParams are default.\n");
}
else
{
isWriteParams = false;
FileNode topfn = fs.getFirstTopLevelNode();
FileNode pfn = topfn[DEFAULT_PARAMS];
readDefaultRunParams(pfn);
for( int i = 0; i < DATASETS_COUNT; i++ )
{
FileNode fn = topfn[DATASET_NAMES[i]];
if( fn.empty() )
{
printf( "%d-runParams is default.\n", i);
setDefaultDatasetRunParams(i);
}
else
readDatasetRunParams(fn, i);
}
}
}
void BaseQualityEvaluator::writeAllDatasetsRunParams() const
{
string filename = getRunParamsFilename();
FileStorage fs( filename, FileStorage::WRITE );
if( fs.isOpened() )
{
fs << "run_params" << "{"; // top file node
fs << DEFAULT_PARAMS << "{";
writeDefaultRunParams(fs);
fs << "}";
for( int i = 0; i < DATASETS_COUNT; i++ )
{
fs << DATASET_NAMES[i] << "{";
writeDatasetRunParams(fs, i);
fs << "}";
}
fs << "}";
}
else
printf( "File %s for writing run params can not be opened.\n", filename.c_str() );
}
void BaseQualityEvaluator::setDefaultAllDatasetsRunParams()
{
for( int i = 0; i < DATASETS_COUNT; i++ )
setDefaultDatasetRunParams(i);
}
bool BaseQualityEvaluator::readDataset( const string& datasetName, vector<Mat>& Hs, vector<Mat>& imgs )
{
Hs.resize( TEST_CASE_COUNT );
imgs.resize( TEST_CASE_COUNT+1 );
string dirname = data_path + IMAGE_DATASETS_DIR + datasetName + "/";
for( int i = 0; i < (int)Hs.size(); i++ )
{
stringstream filename; filename << "H1to" << i+2 << "p.xml";
FileStorage fs( dirname + filename.str(), FileStorage::READ );
if( !fs.isOpened() )
{
cout << "filename " << dirname + filename.str() << endl;
FileStorage fs2( dirname + filename.str(), FileStorage::READ );
return false;
}
fs.getFirstTopLevelNode() >> Hs[i];
}
for( int i = 0; i < (int)imgs.size(); i++ )
{
stringstream filename; filename << "img" << i+1 << ".png";
imgs[i] = imread( dirname + filename.str(), 0 );
if( imgs[i].empty() )
{
cout << "filename " << filename.str() << endl;
return false;
}
}
return true;
}
void BaseQualityEvaluator::processResults( int datasetIdx )
{
if( isWriteGraphicsData )
writePlotData( datasetIdx );
}
void BaseQualityEvaluator::processResults()
{
if( isWriteParams )
writeAllDatasetsRunParams();
}
void BaseQualityEvaluator::run()
{
readAlgorithm ();
processRunParamsFile ();
int notReadDatasets = 0;
int progress = 0;
FileStorage runParamsFS( getRunParamsFilename(), FileStorage::READ );
isWriteParams = (! runParamsFS.isOpened());
FileNode topfn = runParamsFS.getFirstTopLevelNode();
FileNode defaultParams = topfn[DEFAULT_PARAMS];
readDefaultRunParams (defaultParams);
cout << testName << endl;
for(int di = 0; di < DATASETS_COUNT; di++ )
{
cout << "Dataset " << di << " [" << DATASET_NAMES[di] << "] " << flush;
vector<Mat> imgs, Hs;
if( !readDataset( DATASET_NAMES[di], Hs, imgs ) )
{
calcQualityClear (di);
printf( "Images or homography matrices of dataset named %s can not be read\n",
DATASET_NAMES[di].c_str());
notReadDatasets++;
continue;
}
FileNode fn = topfn[DATASET_NAMES[di]];
readDatasetRunParams(fn, di);
runDatasetTest (imgs, Hs, di, progress);
processResults( di );
cout << endl;
}
if( notReadDatasets == DATASETS_COUNT )
{
printf( "All datasets were not be read\n");
exit(-1);
}
else
processResults();
runParamsFS.release();
}
class DetectorQualityEvaluator : public BaseQualityEvaluator
{
public:
DetectorQualityEvaluator( const char* _detectorName, const char* _testName ) : BaseQualityEvaluator( _detectorName, _testName )
{
calcQuality.resize(DATASETS_COUNT);
isSaveKeypoints.resize(DATASETS_COUNT);
isActiveParams.resize(DATASETS_COUNT);
isSaveKeypointsDefault = false;
isActiveParamsDefault = false;
}
protected:
virtual string getRunParamsFilename() const;
virtual string getResultsFilename() const;
virtual string getPlotPath() const;
virtual void calcQualityClear( int datasetIdx );
virtual bool isCalcQualityEmpty( int datasetIdx ) const;
virtual void readDatasetRunParams( FileNode& fn, int datasetIdx );
virtual void writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const;
virtual void setDefaultDatasetRunParams( int datasetIdx );
virtual void readDefaultRunParams( FileNode &fn );
virtual void writeDefaultRunParams( FileStorage &fs ) const;
virtual void writePlotData( int di ) const;
void openToWriteKeypointsFile( FileStorage& fs, int datasetIdx );
virtual void readAlgorithm();
virtual void processRunParamsFile() {}
virtual void runDatasetTest( const vector<Mat> &imgs, const vector<Mat> &Hs, int di, int &progress );
Ptr<FeatureDetector> specificDetector;
Ptr<FeatureDetector> defaultDetector;
struct Quality
{
float repeatability;
int correspondenceCount;
};
vector<vector<Quality> > calcQuality;
vector<bool> isSaveKeypoints;
vector<bool> isActiveParams;
bool isSaveKeypointsDefault;
bool isActiveParamsDefault;
};
string DetectorQualityEvaluator::getRunParamsFilename() const
{
return data_path + DETECTORS_DIR + algName + PARAMS_POSTFIX;
}
string DetectorQualityEvaluator::getResultsFilename() const
{
return data_path + DETECTORS_DIR + algName + RES_POSTFIX;
}
string DetectorQualityEvaluator::getPlotPath() const
{
return data_path + DETECTORS_DIR + "plots/";
}
void DetectorQualityEvaluator::calcQualityClear( int datasetIdx )
{
calcQuality[datasetIdx].clear();
}
bool DetectorQualityEvaluator::isCalcQualityEmpty( int datasetIdx ) const
{
return calcQuality[datasetIdx].empty();
}
void DetectorQualityEvaluator::readDefaultRunParams (FileNode &fn)
{
if (! fn.empty() )
{
isSaveKeypointsDefault = (int)fn[IS_SAVE_KEYPOINTS] != 0;
defaultDetector->read (fn);
}
}
void DetectorQualityEvaluator::writeDefaultRunParams (FileStorage &fs) const
{
fs << IS_SAVE_KEYPOINTS << isSaveKeypointsDefault;
defaultDetector->write (fs);
}
void DetectorQualityEvaluator::readDatasetRunParams( FileNode& fn, int datasetIdx )
{
isActiveParams[datasetIdx] = (int)fn[IS_ACTIVE_PARAMS] != 0;
if (isActiveParams[datasetIdx])
{
isSaveKeypoints[datasetIdx] = (int)fn[IS_SAVE_KEYPOINTS] != 0;
specificDetector->read (fn);
}
else
{
setDefaultDatasetRunParams(datasetIdx);
}
}
void DetectorQualityEvaluator::writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const
{
fs << IS_ACTIVE_PARAMS << isActiveParams[datasetIdx];
fs << IS_SAVE_KEYPOINTS << isSaveKeypoints[datasetIdx];
defaultDetector->write (fs);
}
void DetectorQualityEvaluator::setDefaultDatasetRunParams( int datasetIdx )
{
isSaveKeypoints[datasetIdx] = isSaveKeypointsDefault;
isActiveParams[datasetIdx] = isActiveParamsDefault;
}
void DetectorQualityEvaluator::writePlotData(int di ) const
{
int imgXVals[] = { 2, 3, 4, 5, 6 }; // if scale, blur or light changes
int viewpointXVals[] = { 20, 30, 40, 50, 60 }; // if viewpoint changes
int jpegXVals[] = { 60, 80, 90, 95, 98 }; // if jpeg compression
int* xVals = 0;
if( !DATASET_NAMES[di].compare("ubc") )
{
xVals = jpegXVals;
}
else if( !DATASET_NAMES[di].compare("graf") || !DATASET_NAMES[di].compare("wall") )
{
xVals = viewpointXVals;
}
else
xVals = imgXVals;
stringstream rFilename, cFilename;
rFilename << getPlotPath() << algName << "_" << DATASET_NAMES[di] << "_repeatability.csv";
cFilename << getPlotPath() << algName << "_" << DATASET_NAMES[di] << "_correspondenceCount.csv";
ofstream rfile(rFilename.str().c_str()), cfile(cFilename.str().c_str());
for( int ci = 0; ci < TEST_CASE_COUNT; ci++ )
{
rfile << xVals[ci] << ", " << calcQuality[di][ci].repeatability << endl;
cfile << xVals[ci] << ", " << calcQuality[di][ci].correspondenceCount << endl;
}
}
void DetectorQualityEvaluator::openToWriteKeypointsFile( FileStorage& fs, int datasetIdx )
{
string filename = data_path + KEYPOINTS_DIR + algName + "_"+ DATASET_NAMES[datasetIdx] + ".xml.gz" ;
fs.open(filename, FileStorage::WRITE);
if( !fs.isOpened() )
printf( "keypoints can not be written in file %s because this file can not be opened\n", filename.c_str() );
}
inline void writeKeypoints( FileStorage& fs, const vector<KeyPoint>& keypoints, int imgIdx )
{
if( fs.isOpened() )
{
stringstream imgName; imgName << "img" << imgIdx;
write( fs, imgName.str(), keypoints );
}
}
inline void readKeypoints( FileStorage& fs, vector<KeyPoint>& keypoints, int imgIdx )
{
CV_Assert( fs.isOpened() );
stringstream imgName; imgName << "img" << imgIdx;
read( fs[imgName.str()], keypoints);
}
void DetectorQualityEvaluator::readAlgorithm ()
{
defaultDetector = FeatureDetector::create( algName );
specificDetector = FeatureDetector::create( algName );
if( !defaultDetector )
{
printf( "Algorithm can not be read\n" );
exit(-1);
}
}
static int update_progress( const string& /*name*/, int progress, int test_case_idx, int count, double dt )
{
int width = 60 /*- (int)name.length()*/;
if( count > 0 )
{
int t = cvRound( ((double)test_case_idx * width)/count );
if( t > progress )
{
cout << "." << flush;
progress = t;
}
}
else if( cvRound(dt) > progress )
{
cout << "." << flush;
progress = cvRound(dt);
}
return progress;
}
void DetectorQualityEvaluator::runDatasetTest (const vector<Mat> &imgs, const vector<Mat> &Hs, int di, int &progress)
{
Ptr<FeatureDetector> detector = isActiveParams[di] ? specificDetector : defaultDetector;
FileStorage keypontsFS;
if( isSaveKeypoints[di] )
openToWriteKeypointsFile( keypontsFS, di );
calcQuality[di].resize(TEST_CASE_COUNT);
vector<KeyPoint> keypoints1;
detector->detect( imgs[0], keypoints1 );
writeKeypoints( keypontsFS, keypoints1, 0);
int progressCount = DATASETS_COUNT*TEST_CASE_COUNT;
for( int ci = 0; ci < TEST_CASE_COUNT; ci++ )
{
progress = update_progress( testName, progress, di*TEST_CASE_COUNT + ci + 1, progressCount, 0 );
vector<KeyPoint> keypoints2;
float rep;
evaluateFeatureDetector( imgs[0], imgs[ci+1], Hs[ci], &keypoints1, &keypoints2,
rep, calcQuality[di][ci].correspondenceCount,
detector );
calcQuality[di][ci].repeatability = rep == -1 ? rep : 100.f*rep;
writeKeypoints( keypontsFS, keypoints2, ci+1);
}
}
// static void testLog( bool isBadAccuracy )
// {
// if( isBadAccuracy )
// printf(" bad accuracy\n");
// else
// printf("\n");
// }
/****************************************************************************************\
* Descriptors evaluation *
\****************************************************************************************/
const string RECALL = "recall";
const string PRECISION = "precision";
const string KEYPOINTS_FILENAME = "keypointsFilename";
const string PROJECT_KEYPOINTS_FROM_1IMAGE = "projectKeypointsFrom1Image";
const string MATCH_FILTER = "matchFilter";
const string RUN_PARAMS_IS_IDENTICAL = "runParamsIsIdentical";
const string ONE_WAY_TRAIN_DIR = "detectors_descriptors_evaluation/one_way_train_images/";
const string ONE_WAY_IMAGES_LIST = "one_way_train_images.txt";
class DescriptorQualityEvaluator : public BaseQualityEvaluator
{
public:
enum{ NO_MATCH_FILTER = 0 };
DescriptorQualityEvaluator( const char* _descriptorName, const char* _testName, const char* _matcherName = 0 ) :
BaseQualityEvaluator( _descriptorName, _testName )
{
calcQuality.resize(DATASETS_COUNT);
calcDatasetQuality.resize(DATASETS_COUNT);
commRunParams.resize(DATASETS_COUNT);
commRunParamsDefault.projectKeypointsFrom1Image = true;
commRunParamsDefault.matchFilter = NO_MATCH_FILTER;
commRunParamsDefault.isActiveParams = false;
if( _matcherName )
matcherName = _matcherName;
}
protected:
virtual string getRunParamsFilename() const;
virtual string getResultsFilename() const;
virtual string getPlotPath() const;
virtual void calcQualityClear( int datasetIdx );
virtual bool isCalcQualityEmpty( int datasetIdx ) const;
virtual void readDatasetRunParams( FileNode& fn, int datasetIdx ); //
virtual void writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const;
virtual void setDefaultDatasetRunParams( int datasetIdx );
virtual void readDefaultRunParams( FileNode &fn );
virtual void writeDefaultRunParams( FileStorage &fs ) const;
virtual void readAlgorithm();
virtual void processRunParamsFile() {}
virtual void runDatasetTest( const vector<Mat> &imgs, const vector<Mat> &Hs, int di, int &progress );
virtual void writePlotData( int di ) const;
void calculatePlotData( vector<vector<DMatch> > &allMatches, vector<vector<uchar> > &allCorrectMatchesMask, int di );
struct Quality
{
float recall;
float precision;
};
vector<vector<Quality> > calcQuality;
vector<vector<Quality> > calcDatasetQuality;
struct CommonRunParams
{
string keypontsFilename;
bool projectKeypointsFrom1Image;
int matchFilter; // not used now
bool isActiveParams;
};
vector<CommonRunParams> commRunParams;
Ptr<GenericDescriptorMatch> specificDescMatcher;
Ptr<GenericDescriptorMatch> defaultDescMatcher;
CommonRunParams commRunParamsDefault;
string matcherName;
};
string DescriptorQualityEvaluator::getRunParamsFilename() const
{
return data_path + DESCRIPTORS_DIR + algName + PARAMS_POSTFIX;
}
string DescriptorQualityEvaluator::getResultsFilename() const
{
return data_path + DESCRIPTORS_DIR + algName + RES_POSTFIX;
}
string DescriptorQualityEvaluator::getPlotPath() const
{
return data_path + DESCRIPTORS_DIR + "plots/";
}
void DescriptorQualityEvaluator::calcQualityClear( int datasetIdx )
{
calcQuality[datasetIdx].clear();
}
bool DescriptorQualityEvaluator::isCalcQualityEmpty( int datasetIdx ) const
{
return calcQuality[datasetIdx].empty();
}
void DescriptorQualityEvaluator::readDefaultRunParams (FileNode &fn)
{
if (! fn.empty() )
{
commRunParamsDefault.projectKeypointsFrom1Image = (int)fn[PROJECT_KEYPOINTS_FROM_1IMAGE] != 0;
commRunParamsDefault.matchFilter = (int)fn[MATCH_FILTER];
defaultDescMatcher->read (fn);
}
}
void DescriptorQualityEvaluator::writeDefaultRunParams (FileStorage &fs) const
{
fs << PROJECT_KEYPOINTS_FROM_1IMAGE << commRunParamsDefault.projectKeypointsFrom1Image;
fs << MATCH_FILTER << commRunParamsDefault.matchFilter;
defaultDescMatcher->write (fs);
}
void DescriptorQualityEvaluator::readDatasetRunParams( FileNode& fn, int datasetIdx )
{
commRunParams[datasetIdx].isActiveParams = (int)fn[IS_ACTIVE_PARAMS] != 0;
if (commRunParams[datasetIdx].isActiveParams)
{
commRunParams[datasetIdx].keypontsFilename = (string)fn[KEYPOINTS_FILENAME];
commRunParams[datasetIdx].projectKeypointsFrom1Image = (int)fn[PROJECT_KEYPOINTS_FROM_1IMAGE] != 0;
commRunParams[datasetIdx].matchFilter = (int)fn[MATCH_FILTER];
specificDescMatcher->read (fn);
}
else
{
setDefaultDatasetRunParams(datasetIdx);
}
}
void DescriptorQualityEvaluator::writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const
{
fs << IS_ACTIVE_PARAMS << commRunParams[datasetIdx].isActiveParams;
fs << KEYPOINTS_FILENAME << commRunParams[datasetIdx].keypontsFilename;
fs << PROJECT_KEYPOINTS_FROM_1IMAGE << commRunParams[datasetIdx].projectKeypointsFrom1Image;
fs << MATCH_FILTER << commRunParams[datasetIdx].matchFilter;
defaultDescMatcher->write (fs);
}
void DescriptorQualityEvaluator::setDefaultDatasetRunParams( int datasetIdx )
{
commRunParams[datasetIdx] = commRunParamsDefault;
commRunParams[datasetIdx].keypontsFilename = "SURF_" + DATASET_NAMES[datasetIdx] + ".xml.gz";
}
void DescriptorQualityEvaluator::writePlotData( int di ) const
{
stringstream filename;
filename << getPlotPath() << algName << "_" << DATASET_NAMES[di] << ".csv";
FILE *file = fopen (filename.str().c_str(), "w");
size_t size = calcDatasetQuality[di].size();
for (size_t i=0;i<size;i++)
{
fprintf( file, "%f, %f\n", 1 - calcDatasetQuality[di][i].precision, calcDatasetQuality[di][i].recall);
}
fclose( file );
}
void DescriptorQualityEvaluator::readAlgorithm( )
{
defaultDescMatcher = GenericDescriptorMatcher::create( algName );
specificDescMatcher = GenericDescriptorMatcher::create( algName );
if( !defaultDescMatcher )
{
Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create( algName );
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create( matcherName );
defaultDescMatcher = makePtr<VectorDescriptorMatch>( extractor, matcher );
specificDescMatcher = makePtr<VectorDescriptorMatch>( extractor, matcher );
if( !extractor || !matcher )
{
printf("Algorithm can not be read\n");
exit(-1);
}
}
}
void DescriptorQualityEvaluator::calculatePlotData( vector<vector<DMatch> > &allMatches, vector<vector<uchar> > &allCorrectMatchesMask, int di )
{
vector<Point2f> recallPrecisionCurve;
computeRecallPrecisionCurve( allMatches, allCorrectMatchesMask, recallPrecisionCurve );
calcDatasetQuality[di].clear();
const float resultPrecision = 0.5;
bool isResultCalculated = false;
const double eps = 1e-2;
Quality initQuality;
initQuality.recall = 0;
initQuality.precision = 0;
calcDatasetQuality[di].push_back( initQuality );
for( size_t i=0;i<recallPrecisionCurve.size();i++ )
{
Quality quality;
quality.recall = recallPrecisionCurve[i].y;
quality.precision = 1 - recallPrecisionCurve[i].x;
Quality back = calcDatasetQuality[di].back();
if( fabs( quality.recall - back.recall ) < eps && fabs( quality.precision - back.precision ) < eps )
continue;
calcDatasetQuality[di].push_back( quality );
if( !isResultCalculated && quality.precision < resultPrecision )
{
for(int ci=0;ci<TEST_CASE_COUNT;ci++)
{
calcQuality[di][ci].recall = quality.recall;
calcQuality[di][ci].precision = quality.precision;
}
isResultCalculated = true;
}
}
}
void DescriptorQualityEvaluator::runDatasetTest (const vector<Mat> &imgs, const vector<Mat> &Hs, int di, int &progress)
{
FileStorage keypontsFS( data_path + KEYPOINTS_DIR + commRunParams[di].keypontsFilename, FileStorage::READ );
if( !keypontsFS.isOpened())
{
calcQuality[di].clear();
printf( "keypoints from file %s can not be read\n", commRunParams[di].keypontsFilename.c_str() );
return;
}
Ptr<GenericDescriptorMatcher> descMatch = commRunParams[di].isActiveParams ? specificDescMatcher : defaultDescMatcher;
calcQuality[di].resize(TEST_CASE_COUNT);
vector<KeyPoint> keypoints1;
readKeypoints( keypontsFS, keypoints1, 0);
int progressCount = DATASETS_COUNT*TEST_CASE_COUNT;
vector<vector<DMatch> > allMatches1to2;
vector<vector<uchar> > allCorrectMatchesMask;
for( int ci = 0; ci < TEST_CASE_COUNT; ci++ )
{
progress = update_progress( testName, progress, di*TEST_CASE_COUNT + ci + 1, progressCount, 0 );
vector<KeyPoint> keypoints2;
if( commRunParams[di].projectKeypointsFrom1Image )
{
// TODO need to test function calcKeyPointProjections
calcKeyPointProjections( keypoints1, Hs[ci], keypoints2 );
filterKeyPointsByImageSize( keypoints2, imgs[ci+1].size() );
}
else
readKeypoints( keypontsFS, keypoints2, ci+1 );
// TODO if( commRunParams[di].matchFilter )
vector<vector<DMatch> > matches1to2;
vector<vector<uchar> > correctMatchesMask;
vector<Point2f> recallPrecisionCurve; // not used because we need recallPrecisionCurve for
// all images in dataset
evaluateGenericDescriptorMatcher( imgs[0], imgs[ci+1], Hs[ci], keypoints1, keypoints2,
&matches1to2, &correctMatchesMask, recallPrecisionCurve,
descMatch );
allMatches1to2.insert( allMatches1to2.end(), matches1to2.begin(), matches1to2.end() );
allCorrectMatchesMask.insert( allCorrectMatchesMask.end(), correctMatchesMask.begin(), correctMatchesMask.end() );
}
calculatePlotData( allMatches1to2, allCorrectMatchesMask, di );
}
//--------------------------------- Calonder descriptor test --------------------------------------------
class CalonderDescriptorQualityEvaluator : public DescriptorQualityEvaluator
{
public:
CalonderDescriptorQualityEvaluator() :
DescriptorQualityEvaluator( "Calonder", "quality-descriptor-calonder") {}
virtual void readAlgorithm( )
{
string classifierFile = data_path + "/features2d/calonder_classifier.rtc";
Ptr<DescriptorExtractor> extractor = makePtr<CalonderDescriptorExtractor<float> >( classifierFile );
defaultDescMatcher = makePtr<VectorDescriptorMatch>(
extractor,
makePtr<BFMatcher>(extractor->defaultNorm()));
specificDescMatcher = defaultDescMatcher;
}
};
//--------------------------------- One Way descriptor test --------------------------------------------
class OneWayDescriptorQualityTest : public DescriptorQualityEvaluator
{
public:
OneWayDescriptorQualityTest() :
DescriptorQualityEvaluator("ONEWAY", "quality-descriptor-one-way")
{
}
protected:
virtual void processRunParamsFile ();
virtual void writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const;
};
void OneWayDescriptorQualityTest::processRunParamsFile ()
{
string filename = getRunParamsFilename();
FileStorage fs = FileStorage (filename, FileStorage::READ);
FileNode fn = fs.getFirstTopLevelNode();
fn = fn[DEFAULT_PARAMS];
string pcaFilename = data_path + (string)fn["pcaFilename"];
string trainPath = data_path + (string)fn["trainPath"];
string trainImagesList = (string)fn["trainImagesList"];
int patch_width = fn["patchWidth"];
int patch_height = fn["patchHeight"];
Size patchSize = cvSize (patch_width, patch_height);
int poseCount = fn["poseCount"];
if (trainImagesList.length () == 0 )
return;
fs.release ();
readAllDatasetsRunParams();
Ptr<OneWayDescriptorBase> base(
new OneWayDescriptorBase(patchSize, poseCount, pcaFilename,
trainPath, trainImagesList));
Ptr<OneWayDescriptorMatch> match = makePtr<OneWayDescriptorMatch>();
match->initialize( OneWayDescriptorMatch::Params (), base );
defaultDescMatcher = match;
writeAllDatasetsRunParams();
}
void OneWayDescriptorQualityTest::writeDatasetRunParams( FileStorage& fs, int datasetIdx ) const
{
fs << IS_ACTIVE_PARAMS << commRunParams[datasetIdx].isActiveParams;
fs << KEYPOINTS_FILENAME << commRunParams[datasetIdx].keypontsFilename;
fs << PROJECT_KEYPOINTS_FROM_1IMAGE << commRunParams[datasetIdx].projectKeypointsFrom1Image;
fs << MATCH_FILTER << commRunParams[datasetIdx].matchFilter;
}
int main( int argc, char** argv )
{
if( argc != 2 )
{
cout << "Format: " << argv[0] << " testdata path (path to testdata/cv)" << endl;
return -1;
}
data_path = argv[1];
#ifdef WIN32
if( *data_path.rbegin() != '\\' )
data_path = data_path + "\\";
#else
if( *data_path.rbegin() != '/' )
data_path = data_path + "/";
#endif
Ptr<BaseQualityEvaluator> evals[] =
{
makePtr<DetectorQualityEvaluator>( "FAST", "quality-detector-fast" ),
makePtr<DetectorQualityEvaluator>( "GFTT", "quality-detector-gftt" ),
makePtr<DetectorQualityEvaluator>( "HARRIS", "quality-detector-harris" ),
makePtr<DetectorQualityEvaluator>( "MSER", "quality-detector-mser" ),
makePtr<DetectorQualityEvaluator>( "STAR", "quality-detector-star" ),
makePtr<DetectorQualityEvaluator>( "SIFT", "quality-detector-sift" ),
makePtr<DetectorQualityEvaluator>( "SURF", "quality-detector-surf" ),
makePtr<DescriptorQualityEvaluator>( "SIFT", "quality-descriptor-sift", "BruteForce" ),
makePtr<DescriptorQualityEvaluator>( "SURF", "quality-descriptor-surf", "BruteForce" ),
makePtr<DescriptorQualityEvaluator>( "FERN", "quality-descriptor-fern"),
makePtr<CalonderDescriptorQualityEvaluator>()
};
for( size_t i = 0; i < sizeof(evals)/sizeof(evals[0]); i++ )
{
evals[i]->run();
cout << endl;
}
}

View File

@@ -1,346 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/features2d.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <sys/stat.h>
#include <limits>
#include <cstdio>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
/*
The algorithm:
for each tested combination of detector+descriptor+matcher:
create detector, descriptor and matcher,
load their params if they are there, otherwise use the default ones and save them
for each dataset:
load reference image
detect keypoints in it, compute descriptors
for each transformed image:
load the image
load the transformation matrix
detect keypoints in it too, compute descriptors
find matches
transform keypoints from the first image using the ground-truth matrix
compute the number of matched keypoints, i.e. for each pair (i,j) found by a matcher compare
j-th keypoint from the second image with the transformed i-th keypoint. If they are close, +1.
so, we have:
N - number of keypoints in the first image that are also visible
(after transformation) on the second image
N1 - number of keypoints in the first image that have been matched.
n - number of the correct matches found by the matcher
n/N1 - precision
n/N - recall (?)
we store (N, n/N1, n/N) (where N is stored primarily for tuning the detector's thresholds,
in order to semi-equalize their keypoints counts)
*/
typedef Vec3f TVec; // (N, n/N1, n/N) - see above
static void saveloadDDM( const string& params_filename,
Ptr<FeatureDetector>& detector,
Ptr<DescriptorExtractor>& descriptor,
Ptr<DescriptorMatcher>& matcher )
{
FileStorage fs(params_filename, FileStorage::READ);
if( fs.isOpened() )
{
detector->read(fs["detector"]);
descriptor->read(fs["descriptor"]);
matcher->read(fs["matcher"]);
}
else
{
fs.open(params_filename, FileStorage::WRITE);
fs << "detector" << "{";
detector->write(fs);
fs << "}" << "descriptor" << "{";
descriptor->write(fs);
fs << "}" << "matcher" << "{";
matcher->write(fs);
fs << "}";
}
}
static Mat loadMat(const string& fsname)
{
FileStorage fs(fsname, FileStorage::READ);
Mat m;
fs.getFirstTopLevelNode() >> m;
return m;
}
static void transformKeypoints( const vector<KeyPoint>& kp,
vector<vector<Point2f> >& contours,
const Mat& H )
{
const float scale = 256.f;
size_t i, n = kp.size();
contours.resize(n);
vector<Point> temp;
for( i = 0; i < n; i++ )
{
ellipse2Poly(Point2f(kp[i].pt.x*scale, kp[i].pt.y*scale),
Size2f(kp[i].size*scale, kp[i].size*scale),
0, 0, 360, 12, temp);
Mat(temp).convertTo(contours[i], CV_32F, 1./scale);
perspectiveTransform(contours[i], contours[i], H);
}
}
static TVec proccessMatches( Size imgsize,
const vector<DMatch>& matches,
const vector<vector<Point2f> >& kp1t_contours,
const vector<vector<Point2f> >& kp_contours,
double overlapThreshold )
{
const double visibilityThreshold = 0.6;
// 1. [preprocessing] find bounding rect for each element of kp1t_contours and kp_contours.
// 2. [cross-check] for each DMatch (iK, i1)
// update best_match[i1] using DMatch::distance.
// 3. [compute overlapping] for each i1 (keypoint from the first image) do:
// if i1-th keypoint is outside of image, skip it
// increment N
// if best_match[i1] is initialized, increment N1
// if kp_contours[best_match[i1]] and kp1t_contours[i1] overlap by overlapThreshold*100%,
// increment n. Use bounding rects to speedup this step
int i, size1 = (int)kp1t_contours.size(), size = (int)kp_contours.size(), msize = (int)matches.size();
vector<DMatch> best_match(size1);
vector<Rect> rects1(size1), rects(size);
// proprocess
for( i = 0; i < size1; i++ )
rects1[i] = boundingRect(kp1t_contours[i]);
for( i = 0; i < size; i++ )
rects[i] = boundingRect(kp_contours[i]);
// cross-check
for( i = 0; i < msize; i++ )
{
DMatch m = matches[i];
int i1 = m.trainIdx, iK = m.queryIdx;
CV_Assert( 0 <= i1 && i1 < size1 && 0 <= iK && iK < size );
if( best_match[i1].trainIdx < 0 || best_match[i1].distance > m.distance )
best_match[i1] = m;
}
int N = 0, N1 = 0, n = 0;
// overlapping
for( i = 0; i < size1; i++ )
{
int i1 = i, iK = best_match[i].queryIdx;
if( iK >= 0 )
N1++;
Rect r = rects1[i] & Rect(0, 0, imgsize.width, imgsize.height);
if( r.area() < visibilityThreshold*rects1[i].area() )
continue;
N++;
if( iK < 0 || (rects1[i1] & rects[iK]).area() == 0 )
continue;
double n_area = intersectConvexConvex(kp1t_contours[i1], kp_contours[iK], noArray(), true);
if( n_area == 0 )
continue;
double area1 = contourArea(kp1t_contours[i1], false);
double area = contourArea(kp_contours[iK], false);
double ratio = n_area/(area1 + area - n_area);
n += ratio >= overlapThreshold;
}
return TVec((float)N, (float)n/std::max(N1, 1), (float)n/std::max(N, 1));
}
static void saveResults(const string& dir, const string& name, const string& dsname,
const vector<TVec>& results, const int* xvals)
{
string fname1 = format("%s%s_%s_precision.csv", dir.c_str(), name.c_str(), dsname.c_str());
string fname2 = format("%s%s_%s_recall.csv", dir.c_str(), name.c_str(), dsname.c_str());
FILE* f1 = fopen(fname1.c_str(), "wt");
FILE* f2 = fopen(fname2.c_str(), "wt");
for( size_t i = 0; i < results.size(); i++ )
{
fprintf(f1, "%d, %.1f\n", xvals[i], results[i][1]*100);
fprintf(f2, "%d, %.1f\n", xvals[i], results[i][2]*100);
}
fclose(f1);
fclose(f2);
}
int main(int argc, char** argv)
{
static const char* ddms[] =
{
"ORBX_BF", "ORB", "ORB", "BruteForce-Hamming",
//"ORB_BF", "ORB", "ORB", "BruteForce-Hamming",
//"ORB3_BF", "ORB", "ORB", "BruteForce-Hamming(2)",
//"ORB4_BF", "ORB", "ORB", "BruteForce-Hamming(2)",
//"ORB_LSH", "ORB", "ORB", "LSH"
//"SURF_BF", "SURF", "SURF", "BruteForce",
0
};
static const char* datasets[] =
{
"bark", "bikes", "boat", "graf", "leuven", "trees", "ubc", "wall", 0
};
static const int imgXVals[] = { 2, 3, 4, 5, 6 }; // if scale, blur or light changes
static const int viewpointXVals[] = { 20, 30, 40, 50, 60 }; // if viewpoint changes
static const int jpegXVals[] = { 60, 80, 90, 95, 98 }; // if jpeg compression
const double overlapThreshold = 0.6;
vector<vector<vector<TVec> > > results; // indexed as results[ddm][dataset][testcase]
string dataset_dir = string(getenv("OPENCV_TEST_DATA_PATH")) +
"/cv/detectors_descriptors_evaluation/images_datasets";
string dir=argc > 1 ? argv[1] : ".";
if( dir[dir.size()-1] != '\\' && dir[dir.size()-1] != '/' )
dir += "/";
int result = system(("mkdir " + dir).c_str());
CV_Assert(result == 0);
for( int i = 0; ddms[i*4] != 0; i++ )
{
const char* name = ddms[i*4];
const char* detector_name = ddms[i*4+1];
const char* descriptor_name = ddms[i*4+2];
const char* matcher_name = ddms[i*4+3];
string params_filename = dir + string(name) + "_params.yml";
cout << "Testing " << name << endl;
Ptr<FeatureDetector> detector = FeatureDetector::create(detector_name);
Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create(descriptor_name);
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(matcher_name);
saveloadDDM( params_filename, detector, descriptor, matcher );
results.push_back(vector<vector<TVec> >());
for( int j = 0; datasets[j] != 0; j++ )
{
const char* dsname = datasets[j];
cout << "\ton " << dsname << " ";
cout.flush();
const int* xvals = strcmp(dsname, "ubc") == 0 ? jpegXVals :
strcmp(dsname, "graf") == 0 || strcmp(dsname, "wall") == 0 ? viewpointXVals : imgXVals;
vector<KeyPoint> kp1, kp;
vector<DMatch> matches;
vector<vector<Point2f> > kp1t_contours, kp_contours;
Mat desc1, desc;
Mat img1 = imread(format("%s/%s/img1.png", dataset_dir.c_str(), dsname), 0);
CV_Assert( !img1.empty() );
detector->detect(img1, kp1);
descriptor->compute(img1, kp1, desc1);
results[i].push_back(vector<TVec>());
for( int k = 2; ; k++ )
{
cout << ".";
cout.flush();
Mat imgK = imread(format("%s/%s/img%d.png", dataset_dir.c_str(), dsname, k), 0);
if( imgK.empty() )
break;
detector->detect(imgK, kp);
descriptor->compute(imgK, kp, desc);
matcher->match( desc, desc1, matches );
Mat H = loadMat(format("%s/%s/H1to%dp.xml", dataset_dir.c_str(), dsname, k));
transformKeypoints( kp1, kp1t_contours, H );
transformKeypoints( kp, kp_contours, Mat::eye(3, 3, CV_64F));
TVec r = proccessMatches( imgK.size(), matches, kp1t_contours, kp_contours, overlapThreshold );
results[i][j].push_back(r);
}
saveResults(dir, name, dsname, results[i][j], xvals);
cout << endl;
}
}
}

View File

@@ -1,6 +1,7 @@
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <stdio.h>

View File

@@ -1,5 +1,6 @@
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <stdio.h>

View File

@@ -1,5 +1,6 @@
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <stdio.h>

View File

@@ -1,7 +1,8 @@
#include "opencv2/highgui.hpp"
#include "opencv2/legacy.hpp"
#include "opencv2/ml.hpp"
using namespace cv;
using namespace cv::ml;
int main( int /*argc*/, char** /*argv*/ )
{
@@ -19,8 +20,6 @@ int main( int /*argc*/, char** /*argv*/ )
Mat labels;
Mat img = Mat::zeros( Size( 500, 500 ), CV_8UC3 );
Mat sample( 1, 2, CV_32FC1 );
CvEM em_model;
CvEMParams params;
samples = samples.reshape(2, 0);
for( i = 0; i < N; i++ )
@@ -35,37 +34,11 @@ int main( int /*argc*/, char** /*argv*/ )
}
samples = samples.reshape(1, 0);
// initialize model parameters
params.covs = NULL;
params.means = NULL;
params.weights = NULL;
params.probs = NULL;
params.nclusters = N;
params.cov_mat_type = CvEM::COV_MAT_SPHERICAL;
params.start_step = CvEM::START_AUTO_STEP;
params.term_crit.max_iter = 300;
params.term_crit.epsilon = 0.1;
params.term_crit.type = TermCriteria::COUNT|TermCriteria::EPS;
// cluster the data
em_model.train( samples, Mat(), params, &labels );
Ptr<EM> em_model = EM::train( samples, noArray(), labels, noArray(),
EM::Params(N, EM::COV_MAT_SPHERICAL,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1)));
#if 0
// the piece of code shows how to repeatedly optimize the model
// with less-constrained parameters
//(COV_MAT_DIAGONAL instead of COV_MAT_SPHERICAL)
// when the output of the first stage is used as input for the second one.
CvEM em_model2;
params.cov_mat_type = CvEM::COV_MAT_DIAGONAL;
params.start_step = CvEM::START_E_STEP;
params.means = em_model.get_means();
params.covs = em_model.get_covs();
params.weights = em_model.get_weights();
em_model2.train( samples, Mat(), params, &labels );
// to use em_model2, replace em_model.predict()
// with em_model2.predict() below
#endif
// classify every image pixel
for( i = 0; i < img.rows; i++ )
{
@@ -73,7 +46,7 @@ int main( int /*argc*/, char** /*argv*/ )
{
sample.at<float>(0) = (float)j;
sample.at<float>(1) = (float)i;
int response = cvRound(em_model.predict( sample ));
int response = cvRound(em_model->predict2( sample, noArray() )[1]);
Scalar c = colors[response];
circle( img, Point(j, i), 1, c*0.75, FILLED );

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Some files were not shown because too many files have changed in this diff Show More