Mergin itseez

This commit is contained in:
Fedor Morozov
2013-09-18 18:55:12 +04:00
1586 changed files with 85437 additions and 29692 deletions

View File

@@ -2,19 +2,19 @@
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>${EXECUTABLE_NAME}</string>
<key>CFBundleIdentifier</key>
<string>de.rwth-aachen.ient.FaceTracker</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>1.0</string>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>${EXECUTABLE_NAME}</string>
<key>CFBundleIdentifier</key>
<string>de.rwth-aachen.ient.FaceTracker</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>1.0</string>
</dict>
</plist>

View File

@@ -4,4 +4,4 @@ build.xml
local.properties
proguard-project.txt
project.properties
default.properties
default.properties

View File

@@ -9,6 +9,7 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS -Wmissing-declarations)
add_subdirectory(15-puzzle)
add_subdirectory(face-detection)
add_subdirectory(image-manipulations)
add_subdirectory(camera-calibration)
add_subdirectory(color-blob-detection)
add_subdirectory(tutorial-1-camerapreview)
add_subdirectory(tutorial-2-mixedprocessing)

View File

@@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry exported="true" kind="con" path="com.android.ide.eclipse.adt.LIBRARIES"/>
<classpathentry kind="con" path="com.android.ide.eclipse.adt.ANDROID_FRAMEWORK"/>
<classpathentry exported="true" kind="con" path="com.android.ide.eclipse.adt.DEPENDENCIES"/>
<classpathentry kind="src" path="src"/>
<classpathentry kind="src" path="gen"/>
<classpathentry kind="output" path="bin/classes"/>
</classpath>

View File

@@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>OpenCV Sample - camera-calibration</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ApkBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>com.android.ide.eclipse.adt.AndroidNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

View File

@@ -0,0 +1,4 @@
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
org.eclipse.jdt.core.compiler.compliance=1.6
org.eclipse.jdt.core.compiler.source=1.6

View File

@@ -0,0 +1,38 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.opencv.samples.cameracalibration"
android:versionCode="1"
android:versionName="1.0">
<application
android:label="@string/app_name"
android:icon="@drawable/icon"
android:theme="@android:style/Theme.NoTitleBar.Fullscreen">
<activity android:name="CameraCalibrationActivity"
android:label="@string/app_name"
android:screenOrientation="landscape"
android:configChanges="keyboardHidden|orientation">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
<supports-screens android:resizeable="true"
android:smallScreens="true"
android:normalScreens="true"
android:largeScreens="true"
android:anyDensity="true" />
<uses-sdk android:minSdkVersion="8" />
<uses-permission android:name="android.permission.CAMERA"/>
<uses-feature android:name="android.hardware.camera" android:required="false"/>
<uses-feature android:name="android.hardware.camera.autofocus" android:required="false"/>
<uses-feature android:name="android.hardware.camera.front" android:required="false"/>
<uses-feature android:name="android.hardware.camera.front.autofocus" android:required="false"/>
</manifest>

View File

@@ -0,0 +1,6 @@
set(sample example-camera-calibration)
add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 11 ${ANDROID_SDK_TARGET})
if(TARGET ${sample})
add_dependencies(opencv_android_examples ${sample})
endif()

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

View File

@@ -0,0 +1,12 @@
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
xmlns:opencv="http://schemas.android.com/apk/res-auto"
android:layout_width="match_parent"
android:layout_height="match_parent" >
<org.opencv.android.JavaCameraView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:id="@+id/camera_calibration_java_surface_view" />
</LinearLayout>

View File

@@ -0,0 +1,22 @@
<?xml version="1.0" encoding="utf-8"?>
<menu xmlns:android="http://schemas.android.com/apk/res/android" >
<group android:checkableBehavior="single">
<item android:id="@+id/calibrate"
android:title="@string/action_calibrate"
android:showAsAction="ifRoom|withText" />
<item android:id="@+id/preview_mode"
android:title="@string/preview_mode">
<menu>
<group android:checkableBehavior="single">
<item android:id="@+id/calibration"
android:title="@string/calibration"
android:checked="true" />
<item android:id="@+id/undistortion"
android:title="@string/undistortion" />
<item android:id="@+id/comparison"
android:title="@string/comparison" />
</group>
</menu>
</item>
</group>
</menu>

View File

@@ -0,0 +1,18 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">OCV Camera Calibration</string>
<string name="action_calibrate">Calibrate</string>
<string name="calibration">Calibration</string>
<string name="undistortion">Undistortion</string>
<string name="comparison">Comparison</string>
<string name="preview_mode">Preview mode</string>
<string name="calibration_successful">Successfully calibrated!\nAvg. re-projection error:</string>
<string name="calibration_unsuccessful">Unsuccessful calibration.\nTry again</string>
<string name="more_samples">Please, capture more samples</string>
<string name="calibrating">Calibrating...</string>
<string name="please_wait">Please, wait</string>
<string name="original">Original</string>
<string name="undistorted">Undistorted</string>
</resources>

View File

@@ -0,0 +1,69 @@
package org.opencv.samples.cameracalibration;
import org.opencv.core.Mat;
import android.app.Activity;
import android.content.Context;
import android.content.SharedPreferences;
import android.util.Log;
public abstract class CalibrationResult {
private static final String TAG = "OCVSample::CalibrationResult";
private static final int CAMERA_MATRIX_ROWS = 3;
private static final int CAMERA_MATRIX_COLS = 3;
private static final int DISTORTION_COEFFICIENTS_SIZE = 5;
public static void save(Activity activity, Mat cameraMatrix, Mat distortionCoefficients) {
SharedPreferences sharedPref = activity.getPreferences(Context.MODE_PRIVATE);
SharedPreferences.Editor editor = sharedPref.edit();
double[] cameraMatrixArray = new double[CAMERA_MATRIX_ROWS * CAMERA_MATRIX_COLS];
cameraMatrix.get(0, 0, cameraMatrixArray);
for (int i = 0; i < CAMERA_MATRIX_ROWS; i++) {
for (int j = 0; j < CAMERA_MATRIX_COLS; j++) {
Integer id = i * CAMERA_MATRIX_ROWS + j;
editor.putFloat(id.toString(), (float)cameraMatrixArray[id]);
}
}
double[] distortionCoefficientsArray = new double[DISTORTION_COEFFICIENTS_SIZE];
distortionCoefficients.get(0, 0, distortionCoefficientsArray);
int shift = CAMERA_MATRIX_ROWS * CAMERA_MATRIX_COLS;
for (Integer i = shift; i < DISTORTION_COEFFICIENTS_SIZE + shift; i++) {
editor.putFloat(i.toString(), (float)distortionCoefficientsArray[i-shift]);
}
editor.commit();
Log.i(TAG, "Saved camera matrix: " + cameraMatrix.dump());
Log.i(TAG, "Saved distortion coefficients: " + distortionCoefficients.dump());
}
public static boolean tryLoad(Activity activity, Mat cameraMatrix, Mat distortionCoefficients) {
SharedPreferences sharedPref = activity.getPreferences(Context.MODE_PRIVATE);
if (sharedPref.getFloat("0", -1) == -1) {
Log.i(TAG, "No previous calibration results found");
return false;
}
double[] cameraMatrixArray = new double[CAMERA_MATRIX_ROWS * CAMERA_MATRIX_COLS];
for (int i = 0; i < CAMERA_MATRIX_ROWS; i++) {
for (int j = 0; j < CAMERA_MATRIX_COLS; j++) {
Integer id = i * CAMERA_MATRIX_ROWS + j;
cameraMatrixArray[id] = sharedPref.getFloat(id.toString(), -1);
}
}
cameraMatrix.put(0, 0, cameraMatrixArray);
Log.i(TAG, "Loaded camera matrix: " + cameraMatrix.dump());
double[] distortionCoefficientsArray = new double[DISTORTION_COEFFICIENTS_SIZE];
int shift = CAMERA_MATRIX_ROWS * CAMERA_MATRIX_COLS;
for (Integer i = shift; i < DISTORTION_COEFFICIENTS_SIZE + shift; i++) {
distortionCoefficientsArray[i - shift] = sharedPref.getFloat(i.toString(), -1);
}
distortionCoefficients.put(0, 0, distortionCoefficientsArray);
Log.i(TAG, "Loaded distortion coefficients: " + distortionCoefficients.dump());
return true;
}
}

View File

@@ -0,0 +1,216 @@
// This sample is based on "Camera calibration With OpenCV" tutorial:
// http://docs.opencv.org/doc/tutorials/calib3d/camera_calibration/camera_calibration.html
//
// It uses standard OpenCV asymmetric circles grid pattern 11x4:
// https://github.com/Itseez/opencv/blob/2.4/doc/acircles_pattern.png.
// The results are the camera matrix and 5 distortion coefficients.
//
// Tap on highlighted pattern to capture pattern corners for calibration.
// Move pattern along the whole screen and capture data.
//
// When you've captured necessary amount of pattern corners (usually ~20 are enough),
// press "Calibrate" button for performing camera calibration.
package org.opencv.samples.cameracalibration;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Mat;
import android.app.Activity;
import android.app.ProgressDialog;
import android.content.res.Resources;
import android.os.AsyncTask;
import android.os.Bundle;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.view.MotionEvent;
import android.view.SurfaceView;
import android.view.View;
import android.view.View.OnTouchListener;
import android.view.WindowManager;
import android.widget.Toast;
public class CameraCalibrationActivity extends Activity implements CvCameraViewListener2, OnTouchListener {
private static final String TAG = "OCVSample::Activity";
private CameraBridgeViewBase mOpenCvCameraView;
private CameraCalibrator mCalibrator;
private OnCameraFrameRender mOnCameraFrameRender;
private int mWidth;
private int mHeight;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
mOpenCvCameraView.enableView();
mOpenCvCameraView.setOnTouchListener(CameraCalibrationActivity.this);
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
public CameraCalibrationActivity() {
Log.i(TAG, "Instantiated new " + this.getClass());
}
@Override
public void onCreate(Bundle savedInstanceState) {
Log.i(TAG, "called onCreate");
super.onCreate(savedInstanceState);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
setContentView(R.layout.camera_calibration_surface_view);
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.camera_calibration_java_surface_view);
mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE);
mOpenCvCameraView.setCvCameraViewListener(this);
}
@Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
@Override
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_2, this, mLoaderCallback);
}
public void onDestroy() {
super.onDestroy();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
super.onCreateOptionsMenu(menu);
getMenuInflater().inflate(R.menu.calibration, menu);
return true;
}
@Override
public boolean onPrepareOptionsMenu (Menu menu) {
super.onPrepareOptionsMenu(menu);
menu.findItem(R.id.preview_mode).setEnabled(true);
if (!mCalibrator.isCalibrated())
menu.findItem(R.id.preview_mode).setEnabled(false);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case R.id.calibration:
mOnCameraFrameRender =
new OnCameraFrameRender(new CalibrationFrameRender(mCalibrator));
item.setChecked(true);
return true;
case R.id.undistortion:
mOnCameraFrameRender =
new OnCameraFrameRender(new UndistortionFrameRender(mCalibrator));
item.setChecked(true);
return true;
case R.id.comparison:
mOnCameraFrameRender =
new OnCameraFrameRender(new ComparisonFrameRender(mCalibrator, mWidth, mHeight, getResources()));
item.setChecked(true);
return true;
case R.id.calibrate:
final Resources res = getResources();
if (mCalibrator.getCornersBufferSize() < 2) {
(Toast.makeText(this, res.getString(R.string.more_samples), Toast.LENGTH_SHORT)).show();
return true;
}
mOnCameraFrameRender = new OnCameraFrameRender(new PreviewFrameRender());
new AsyncTask<Void, Void, Void>() {
private ProgressDialog calibrationProgress;
@Override
protected void onPreExecute() {
calibrationProgress = new ProgressDialog(CameraCalibrationActivity.this);
calibrationProgress.setTitle(res.getString(R.string.calibrating));
calibrationProgress.setMessage(res.getString(R.string.please_wait));
calibrationProgress.setCancelable(false);
calibrationProgress.setIndeterminate(true);
calibrationProgress.show();
}
@Override
protected Void doInBackground(Void... arg0) {
mCalibrator.calibrate();
return null;
}
@Override
protected void onPostExecute(Void result) {
calibrationProgress.dismiss();
mCalibrator.clearCorners();
mOnCameraFrameRender = new OnCameraFrameRender(new CalibrationFrameRender(mCalibrator));
String resultMessage = (mCalibrator.isCalibrated()) ?
res.getString(R.string.calibration_successful) + " " + mCalibrator.getAvgReprojectionError() :
res.getString(R.string.calibration_unsuccessful);
(Toast.makeText(CameraCalibrationActivity.this, resultMessage, Toast.LENGTH_SHORT)).show();
if (mCalibrator.isCalibrated()) {
CalibrationResult.save(CameraCalibrationActivity.this,
mCalibrator.getCameraMatrix(), mCalibrator.getDistortionCoefficients());
}
}
}.execute();
return true;
default:
return super.onOptionsItemSelected(item);
}
}
public void onCameraViewStarted(int width, int height) {
if (mWidth != width || mHeight != height) {
mWidth = width;
mHeight = height;
mCalibrator = new CameraCalibrator(mWidth, mHeight);
if (CalibrationResult.tryLoad(this, mCalibrator.getCameraMatrix(), mCalibrator.getDistortionCoefficients())) {
mCalibrator.setCalibrated();
}
mOnCameraFrameRender = new OnCameraFrameRender(new CalibrationFrameRender(mCalibrator));
}
}
public void onCameraViewStopped() {
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
return mOnCameraFrameRender.render(inputFrame);
}
@Override
public boolean onTouch(View v, MotionEvent event) {
Log.d(TAG, "onTouch invoked");
mCalibrator.addCorners();
return false;
}
}

View File

@@ -0,0 +1,169 @@
package org.opencv.samples.cameracalibration;
import java.util.ArrayList;
import java.util.List;
import org.opencv.calib3d.Calib3d;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfDouble;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.MatOfPoint3f;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import android.util.Log;
public class CameraCalibrator {
private static final String TAG = "OCVSample::CameraCalibrator";
private final Size mPatternSize = new Size(4, 11);
private final int mCornersSize = (int)(mPatternSize.width * mPatternSize.height);
private boolean mPatternWasFound = false;
private MatOfPoint2f mCorners = new MatOfPoint2f();
private List<Mat> mCornersBuffer = new ArrayList<Mat>();
private boolean mIsCalibrated = false;
private Mat mCameraMatrix = new Mat();
private Mat mDistortionCoefficients = new Mat();
private int mFlags;
private double mRms;
private double mSquareSize = 0.0181;
private Size mImageSize;
public CameraCalibrator(int width, int height) {
mImageSize = new Size(width, height);
mFlags = Calib3d.CALIB_FIX_PRINCIPAL_POINT +
Calib3d.CALIB_ZERO_TANGENT_DIST +
Calib3d.CALIB_FIX_ASPECT_RATIO +
Calib3d.CALIB_FIX_K4 +
Calib3d.CALIB_FIX_K5;
Mat.eye(3, 3, CvType.CV_64FC1).copyTo(mCameraMatrix);
mCameraMatrix.put(0, 0, 1.0);
Mat.zeros(5, 1, CvType.CV_64FC1).copyTo(mDistortionCoefficients);
Log.i(TAG, "Instantiated new " + this.getClass());
}
public void processFrame(Mat grayFrame, Mat rgbaFrame) {
findPattern(grayFrame);
renderFrame(rgbaFrame);
}
public void calibrate() {
ArrayList<Mat> rvecs = new ArrayList<Mat>();
ArrayList<Mat> tvecs = new ArrayList<Mat>();
Mat reprojectionErrors = new Mat();
ArrayList<Mat> objectPoints = new ArrayList<Mat>();
objectPoints.add(Mat.zeros(mCornersSize, 1, CvType.CV_32FC3));
calcBoardCornerPositions(objectPoints.get(0));
for (int i = 1; i < mCornersBuffer.size(); i++) {
objectPoints.add(objectPoints.get(0));
}
Calib3d.calibrateCamera(objectPoints, mCornersBuffer, mImageSize,
mCameraMatrix, mDistortionCoefficients, rvecs, tvecs, mFlags);
mIsCalibrated = Core.checkRange(mCameraMatrix)
&& Core.checkRange(mDistortionCoefficients);
mRms = computeReprojectionErrors(objectPoints, rvecs, tvecs, reprojectionErrors);
Log.i(TAG, String.format("Average re-projection error: %f", mRms));
Log.i(TAG, "Camera matrix: " + mCameraMatrix.dump());
Log.i(TAG, "Distortion coefficients: " + mDistortionCoefficients.dump());
}
public void clearCorners() {
mCornersBuffer.clear();
}
private void calcBoardCornerPositions(Mat corners) {
final int cn = 3;
float positions[] = new float[mCornersSize * cn];
for (int i = 0; i < mPatternSize.height; i++) {
for (int j = 0; j < mPatternSize.width * cn; j += cn) {
positions[(int) (i * mPatternSize.width * cn + j + 0)] =
(2 * (j / cn) + i % 2) * (float) mSquareSize;
positions[(int) (i * mPatternSize.width * cn + j + 1)] =
i * (float) mSquareSize;
positions[(int) (i * mPatternSize.width * cn + j + 2)] = 0;
}
}
corners.create(mCornersSize, 1, CvType.CV_32FC3);
corners.put(0, 0, positions);
}
private double computeReprojectionErrors(List<Mat> objectPoints,
List<Mat> rvecs, List<Mat> tvecs, Mat perViewErrors) {
MatOfPoint2f cornersProjected = new MatOfPoint2f();
double totalError = 0;
double error;
float viewErrors[] = new float[objectPoints.size()];
MatOfDouble distortionCoefficients = new MatOfDouble(mDistortionCoefficients);
int totalPoints = 0;
for (int i = 0; i < objectPoints.size(); i++) {
MatOfPoint3f points = new MatOfPoint3f(objectPoints.get(i));
Calib3d.projectPoints(points, rvecs.get(i), tvecs.get(i),
mCameraMatrix, distortionCoefficients, cornersProjected);
error = Core.norm(mCornersBuffer.get(i), cornersProjected, Core.NORM_L2);
int n = objectPoints.get(i).rows();
viewErrors[i] = (float) Math.sqrt(error * error / n);
totalError += error * error;
totalPoints += n;
}
perViewErrors.create(objectPoints.size(), 1, CvType.CV_32FC1);
perViewErrors.put(0, 0, viewErrors);
return Math.sqrt(totalError / totalPoints);
}
private void findPattern(Mat grayFrame) {
mPatternWasFound = Calib3d.findCirclesGrid(grayFrame, mPatternSize,
mCorners, Calib3d.CALIB_CB_ASYMMETRIC_GRID);
}
public void addCorners() {
if (mPatternWasFound) {
mCornersBuffer.add(mCorners.clone());
}
}
private void drawPoints(Mat rgbaFrame) {
Calib3d.drawChessboardCorners(rgbaFrame, mPatternSize, mCorners, mPatternWasFound);
}
private void renderFrame(Mat rgbaFrame) {
drawPoints(rgbaFrame);
Core.putText(rgbaFrame, "Captured: " + mCornersBuffer.size(), new Point(rgbaFrame.cols() / 3 * 2, rgbaFrame.rows() * 0.1),
Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 0));
}
public Mat getCameraMatrix() {
return mCameraMatrix;
}
public Mat getDistortionCoefficients() {
return mDistortionCoefficients;
}
public int getCornersBufferSize() {
return mCornersBuffer.size();
}
public double getAvgReprojectionError() {
return mRms;
}
public boolean isCalibrated() {
return mIsCalibrated;
}
public void setCalibrated() {
mIsCalibrated = true;
}
}

View File

@@ -0,0 +1,102 @@
package org.opencv.samples.cameracalibration;
import java.util.ArrayList;
import java.util.List;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.Point;
import org.opencv.core.Range;
import org.opencv.core.Scalar;
import org.opencv.imgproc.Imgproc;
import android.content.res.Resources;
abstract class FrameRender {
protected CameraCalibrator mCalibrator;
public abstract Mat render(CvCameraViewFrame inputFrame);
}
class PreviewFrameRender extends FrameRender {
@Override
public Mat render(CvCameraViewFrame inputFrame) {
return inputFrame.rgba();
}
}
class CalibrationFrameRender extends FrameRender {
public CalibrationFrameRender(CameraCalibrator calibrator) {
mCalibrator = calibrator;
}
@Override
public Mat render(CvCameraViewFrame inputFrame) {
Mat rgbaFrame = inputFrame.rgba();
Mat grayFrame = inputFrame.gray();
mCalibrator.processFrame(grayFrame, rgbaFrame);
return rgbaFrame;
}
}
class UndistortionFrameRender extends FrameRender {
public UndistortionFrameRender(CameraCalibrator calibrator) {
mCalibrator = calibrator;
}
@Override
public Mat render(CvCameraViewFrame inputFrame) {
Mat renderedFrame = new Mat(inputFrame.rgba().size(), inputFrame.rgba().type());
Imgproc.undistort(inputFrame.rgba(), renderedFrame,
mCalibrator.getCameraMatrix(), mCalibrator.getDistortionCoefficients());
return renderedFrame;
}
}
class ComparisonFrameRender extends FrameRender {
private int mWidth;
private int mHeight;
private Resources mResources;
public ComparisonFrameRender(CameraCalibrator calibrator, int width, int height, Resources resources) {
mCalibrator = calibrator;
mWidth = width;
mHeight = height;
mResources = resources;
}
@Override
public Mat render(CvCameraViewFrame inputFrame) {
Mat undistortedFrame = new Mat(inputFrame.rgba().size(), inputFrame.rgba().type());
Imgproc.undistort(inputFrame.rgba(), undistortedFrame,
mCalibrator.getCameraMatrix(), mCalibrator.getDistortionCoefficients());
Mat comparisonFrame = inputFrame.rgba();
undistortedFrame.colRange(new Range(0, mWidth / 2)).copyTo(comparisonFrame.colRange(new Range(mWidth / 2, mWidth)));
List<MatOfPoint> border = new ArrayList<MatOfPoint>();
final int shift = (int)(mWidth * 0.005);
border.add(new MatOfPoint(new Point(mWidth / 2 - shift, 0), new Point(mWidth / 2 + shift, 0),
new Point(mWidth / 2 + shift, mHeight), new Point(mWidth / 2 - shift, mHeight)));
Core.fillPoly(comparisonFrame, border, new Scalar(255, 255, 255));
Core.putText(comparisonFrame, mResources.getString(R.string.original), new Point(mWidth * 0.1, mHeight * 0.1),
Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 0));
Core.putText(comparisonFrame, mResources.getString(R.string.undistorted), new Point(mWidth * 0.6, mHeight * 0.1),
Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 0));
return comparisonFrame;
}
}
class OnCameraFrameRender {
private FrameRender mFrameRender;
public OnCameraFrameRender(FrameRender frameRender) {
mFrameRender = frameRender;
}
public Mat render(CvCameraViewFrame inputFrame) {
return mFrameRender.render(inputFrame);
}
}

View File

@@ -4,4 +4,3 @@ add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenC
if(TARGET ${sample})
add_dependencies(opencv_android_examples ${sample})
endif()

View File

@@ -187,4 +187,4 @@ public class ColorBlobDetectionActivity extends Activity implements OnTouchListe
return new Scalar(pointMatRgba.get(0, 0));
}
}
}

View File

@@ -10,4 +10,3 @@ add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenC
if(TARGET ${sample})
add_dependencies(opencv_android_examples ${sample})
endif()

View File

@@ -13,4 +13,4 @@ LOCAL_LDLIBS += -llog -ldl
LOCAL_MODULE := detection_based_tracker
include $(BUILD_SHARED_LIBRARY)
include $(BUILD_SHARED_LIBRARY)

View File

@@ -26,7 +26,7 @@ public:
Detector(detector)
{
LOGD("CascadeDetectorAdapter::Detect::Detect");
CV_Assert(!detector.empty());
CV_Assert(detector);
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
@@ -57,11 +57,11 @@ struct DetectorAgregator
mainDetector(_mainDetector),
trackingDetector(_trackingDetector)
{
CV_Assert(!_mainDetector.empty());
CV_Assert(!_trackingDetector.empty());
CV_Assert(_mainDetector);
CV_Assert(_trackingDetector);
DetectionBasedTracker::Parameters DetectorParams;
tracker = new DetectionBasedTracker(mainDetector.ptr<DetectionBasedTracker::IDetector>(), trackingDetector.ptr<DetectionBasedTracker::IDetector>(), DetectorParams);
tracker = makePtr<DetectionBasedTracker>(mainDetector, trackingDetector, DetectorParams);
}
};
@@ -77,8 +77,10 @@ JNIEXPORT jlong JNICALL Java_org_opencv_samples_facedetect_DetectionBasedTracker
try
{
cv::Ptr<CascadeDetectorAdapter> mainDetector = new CascadeDetectorAdapter(new CascadeClassifier(stdFileName));
cv::Ptr<CascadeDetectorAdapter> trackingDetector = new CascadeDetectorAdapter(new CascadeClassifier(stdFileName));
cv::Ptr<CascadeDetectorAdapter> mainDetector = makePtr<CascadeDetectorAdapter>(
makePtr<CascadeClassifier>(stdFileName));
cv::Ptr<CascadeDetectorAdapter> trackingDetector = makePtr<CascadeDetectorAdapter>(
makePtr<CascadeClassifier>(stdFileName));
result = (jlong)new DetectorAgregator(mainDetector, trackingDetector);
if (faceSize > 0)
{

View File

@@ -24,4 +24,3 @@ int main(int argc, char* argv[])
#endif
return 0;
}

View File

@@ -48,17 +48,12 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
private CameraBridgeViewBase mOpenCvCameraView;
private Size mSize0;
private Size mSizeRgba;
private Size mSizeRgbaInner;
private Mat mRgba;
private Mat mGray;
private Mat mIntermediateMat;
private Mat mHist;
private Mat mMat0;
private MatOfInt mChannels[];
private MatOfInt mHistSize;
private int mHistSizeNum;
private int mHistSizeNum = 25;
private MatOfFloat mRanges;
private Scalar mColorsRGB[];
private Scalar mColorsHue[];
@@ -66,10 +61,6 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
private Point mP1;
private Point mP2;
private float mBuff[];
private Mat mRgbaInnerWindow;
private Mat mGrayInnerWindow;
private Mat mZoomWindow;
private Mat mZoomCorner;
private Mat mSepiaKernel;
public static int viewMode = VIEW_MODE_RGBA;
@@ -166,13 +157,9 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
}
public void onCameraViewStarted(int width, int height) {
mGray = new Mat();
mRgba = new Mat();
mIntermediateMat = new Mat();
mSize0 = new Size();
mHist = new Mat();
mChannels = new MatOfInt[] { new MatOfInt(0), new MatOfInt(1), new MatOfInt(2) };
mHistSizeNum = 25;
mBuff = new float[mHistSizeNum];
mHistSize = new MatOfInt(mHistSizeNum);
mRanges = new MatOfFloat(0f, 256f);
@@ -197,14 +184,22 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
mSepiaKernel.put(3, 0, /* A */0.000f, 0.000f, 0.000f, 1f);
}
private void CreateAuxiliaryMats() {
if (mRgba.empty())
return;
public void onCameraViewStopped() {
// Explicitly deallocate Mats
if (mIntermediateMat != null)
mIntermediateMat.release();
mSizeRgba = mRgba.size();
mIntermediateMat = null;
}
int rows = (int) mSizeRgba.height;
int cols = (int) mSizeRgba.width;
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
Mat rgba = inputFrame.rgba();
Size sizeRgba = rgba.size();
Mat rgbaInnerWindow;
int rows = (int) sizeRgba.height;
int cols = (int) sizeRgba.width;
int left = cols / 8;
int top = rows / 8;
@@ -212,151 +207,107 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
int width = cols * 3 / 4;
int height = rows * 3 / 4;
if (mRgbaInnerWindow == null)
mRgbaInnerWindow = mRgba.submat(top, top + height, left, left + width);
mSizeRgbaInner = mRgbaInnerWindow.size();
if (mGrayInnerWindow == null && !mGray.empty())
mGrayInnerWindow = mGray.submat(top, top + height, left, left + width);
if (mZoomCorner == null)
mZoomCorner = mRgba.submat(0, rows / 2 - rows / 10, 0, cols / 2 - cols / 10);
if (mZoomWindow == null)
mZoomWindow = mRgba.submat(rows / 2 - 9 * rows / 100, rows / 2 + 9 * rows / 100, cols / 2 - 9 * cols / 100, cols / 2 + 9 * cols / 100);
}
public void onCameraViewStopped() {
// Explicitly deallocate Mats
if (mZoomWindow != null)
mZoomWindow.release();
if (mZoomCorner != null)
mZoomCorner.release();
if (mGrayInnerWindow != null)
mGrayInnerWindow.release();
if (mRgbaInnerWindow != null)
mRgbaInnerWindow.release();
if (mRgba != null)
mRgba.release();
if (mGray != null)
mGray.release();
if (mIntermediateMat != null)
mIntermediateMat.release();
mRgba = null;
mGray = null;
mIntermediateMat = null;
mRgbaInnerWindow = null;
mGrayInnerWindow = null;
mZoomCorner = null;
mZoomWindow = null;
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
switch (ImageManipulationsActivity.viewMode) {
case ImageManipulationsActivity.VIEW_MODE_RGBA:
break;
case ImageManipulationsActivity.VIEW_MODE_HIST:
if ((mSizeRgba == null) || (mRgba.cols() != mSizeRgba.width) || (mRgba.height() != mSizeRgba.height))
CreateAuxiliaryMats();
int thikness = (int) (mSizeRgba.width / (mHistSizeNum + 10) / 5);
Mat hist = new Mat();
int thikness = (int) (sizeRgba.width / (mHistSizeNum + 10) / 5);
if(thikness > 5) thikness = 5;
int offset = (int) ((mSizeRgba.width - (5*mHistSizeNum + 4*10)*thikness)/2);
int offset = (int) ((sizeRgba.width - (5*mHistSizeNum + 4*10)*thikness)/2);
// RGB
for(int c=0; c<3; c++) {
Imgproc.calcHist(Arrays.asList(mRgba), mChannels[c], mMat0, mHist, mHistSize, mRanges);
Core.normalize(mHist, mHist, mSizeRgba.height/2, 0, Core.NORM_INF);
mHist.get(0, 0, mBuff);
Imgproc.calcHist(Arrays.asList(rgba), mChannels[c], mMat0, hist, mHistSize, mRanges);
Core.normalize(hist, hist, sizeRgba.height/2, 0, Core.NORM_INF);
hist.get(0, 0, mBuff);
for(int h=0; h<mHistSizeNum; h++) {
mP1.x = mP2.x = offset + (c * (mHistSizeNum + 10) + h) * thikness;
mP1.y = mSizeRgba.height-1;
mP1.y = sizeRgba.height-1;
mP2.y = mP1.y - 2 - (int)mBuff[h];
Core.line(mRgba, mP1, mP2, mColorsRGB[c], thikness);
Core.line(rgba, mP1, mP2, mColorsRGB[c], thikness);
}
}
// Value and Hue
Imgproc.cvtColor(mRgba, mIntermediateMat, Imgproc.COLOR_RGB2HSV_FULL);
Imgproc.cvtColor(rgba, mIntermediateMat, Imgproc.COLOR_RGB2HSV_FULL);
// Value
Imgproc.calcHist(Arrays.asList(mIntermediateMat), mChannels[2], mMat0, mHist, mHistSize, mRanges);
Core.normalize(mHist, mHist, mSizeRgba.height/2, 0, Core.NORM_INF);
mHist.get(0, 0, mBuff);
Imgproc.calcHist(Arrays.asList(mIntermediateMat), mChannels[2], mMat0, hist, mHistSize, mRanges);
Core.normalize(hist, hist, sizeRgba.height/2, 0, Core.NORM_INF);
hist.get(0, 0, mBuff);
for(int h=0; h<mHistSizeNum; h++) {
mP1.x = mP2.x = offset + (3 * (mHistSizeNum + 10) + h) * thikness;
mP1.y = mSizeRgba.height-1;
mP1.y = sizeRgba.height-1;
mP2.y = mP1.y - 2 - (int)mBuff[h];
Core.line(mRgba, mP1, mP2, mWhilte, thikness);
Core.line(rgba, mP1, mP2, mWhilte, thikness);
}
// Hue
Imgproc.calcHist(Arrays.asList(mIntermediateMat), mChannels[0], mMat0, mHist, mHistSize, mRanges);
Core.normalize(mHist, mHist, mSizeRgba.height/2, 0, Core.NORM_INF);
mHist.get(0, 0, mBuff);
Imgproc.calcHist(Arrays.asList(mIntermediateMat), mChannels[0], mMat0, hist, mHistSize, mRanges);
Core.normalize(hist, hist, sizeRgba.height/2, 0, Core.NORM_INF);
hist.get(0, 0, mBuff);
for(int h=0; h<mHistSizeNum; h++) {
mP1.x = mP2.x = offset + (4 * (mHistSizeNum + 10) + h) * thikness;
mP1.y = mSizeRgba.height-1;
mP1.y = sizeRgba.height-1;
mP2.y = mP1.y - 2 - (int)mBuff[h];
Core.line(mRgba, mP1, mP2, mColorsHue[h], thikness);
Core.line(rgba, mP1, mP2, mColorsHue[h], thikness);
}
break;
case ImageManipulationsActivity.VIEW_MODE_CANNY:
if ((mRgbaInnerWindow == null) || (mGrayInnerWindow == null) || (mRgba.cols() != mSizeRgba.width) || (mRgba.height() != mSizeRgba.height))
CreateAuxiliaryMats();
Imgproc.Canny(mRgbaInnerWindow, mIntermediateMat, 80, 90);
Imgproc.cvtColor(mIntermediateMat, mRgbaInnerWindow, Imgproc.COLOR_GRAY2BGRA, 4);
rgbaInnerWindow = rgba.submat(top, top + height, left, left + width);
Imgproc.Canny(rgbaInnerWindow, mIntermediateMat, 80, 90);
Imgproc.cvtColor(mIntermediateMat, rgbaInnerWindow, Imgproc.COLOR_GRAY2BGRA, 4);
rgbaInnerWindow.release();
break;
case ImageManipulationsActivity.VIEW_MODE_SOBEL:
mGray = inputFrame.gray();
if ((mRgbaInnerWindow == null) || (mGrayInnerWindow == null) || (mRgba.cols() != mSizeRgba.width) || (mRgba.height() != mSizeRgba.height))
CreateAuxiliaryMats();
Imgproc.Sobel(mGrayInnerWindow, mIntermediateMat, CvType.CV_8U, 1, 1);
Mat gray = inputFrame.gray();
Mat grayInnerWindow = gray.submat(top, top + height, left, left + width);
rgbaInnerWindow = rgba.submat(top, top + height, left, left + width);
Imgproc.Sobel(grayInnerWindow, mIntermediateMat, CvType.CV_8U, 1, 1);
Core.convertScaleAbs(mIntermediateMat, mIntermediateMat, 10, 0);
Imgproc.cvtColor(mIntermediateMat, mRgbaInnerWindow, Imgproc.COLOR_GRAY2BGRA, 4);
Imgproc.cvtColor(mIntermediateMat, rgbaInnerWindow, Imgproc.COLOR_GRAY2BGRA, 4);
grayInnerWindow.release();
rgbaInnerWindow.release();
break;
case ImageManipulationsActivity.VIEW_MODE_SEPIA:
if ((mRgbaInnerWindow == null) || (mRgba.cols() != mSizeRgba.width) || (mRgba.height() != mSizeRgba.height))
CreateAuxiliaryMats();
Core.transform(mRgbaInnerWindow, mRgbaInnerWindow, mSepiaKernel);
rgbaInnerWindow = rgba.submat(top, top + height, left, left + width);
Core.transform(rgbaInnerWindow, rgbaInnerWindow, mSepiaKernel);
rgbaInnerWindow.release();
break;
case ImageManipulationsActivity.VIEW_MODE_ZOOM:
if ((mZoomCorner == null) || (mZoomWindow == null) || (mRgba.cols() != mSizeRgba.width) || (mRgba.height() != mSizeRgba.height))
CreateAuxiliaryMats();
Imgproc.resize(mZoomWindow, mZoomCorner, mZoomCorner.size());
Mat zoomCorner = rgba.submat(0, rows / 2 - rows / 10, 0, cols / 2 - cols / 10);
Mat mZoomWindow = rgba.submat(rows / 2 - 9 * rows / 100, rows / 2 + 9 * rows / 100, cols / 2 - 9 * cols / 100, cols / 2 + 9 * cols / 100);
Imgproc.resize(mZoomWindow, zoomCorner, zoomCorner.size());
Size wsize = mZoomWindow.size();
Core.rectangle(mZoomWindow, new Point(1, 1), new Point(wsize.width - 2, wsize.height - 2), new Scalar(255, 0, 0, 255), 2);
zoomCorner.release();
mZoomWindow.release();
break;
case ImageManipulationsActivity.VIEW_MODE_PIXELIZE:
if ((mRgbaInnerWindow == null) || (mRgba.cols() != mSizeRgba.width) || (mRgba.height() != mSizeRgba.height))
CreateAuxiliaryMats();
Imgproc.resize(mRgbaInnerWindow, mIntermediateMat, mSize0, 0.1, 0.1, Imgproc.INTER_NEAREST);
Imgproc.resize(mIntermediateMat, mRgbaInnerWindow, mSizeRgbaInner, 0., 0., Imgproc.INTER_NEAREST);
rgbaInnerWindow = rgba.submat(top, top + height, left, left + width);
Imgproc.resize(rgbaInnerWindow, mIntermediateMat, mSize0, 0.1, 0.1, Imgproc.INTER_NEAREST);
Imgproc.resize(mIntermediateMat, rgbaInnerWindow, rgbaInnerWindow.size(), 0., 0., Imgproc.INTER_NEAREST);
rgbaInnerWindow.release();
break;
case ImageManipulationsActivity.VIEW_MODE_POSTERIZE:
if ((mRgbaInnerWindow == null) || (mRgba.cols() != mSizeRgba.width) || (mRgba.height() != mSizeRgba.height))
CreateAuxiliaryMats();
/*
Imgproc.cvtColor(mRgbaInnerWindow, mIntermediateMat, Imgproc.COLOR_RGBA2RGB);
Imgproc.cvtColor(rgbaInnerWindow, mIntermediateMat, Imgproc.COLOR_RGBA2RGB);
Imgproc.pyrMeanShiftFiltering(mIntermediateMat, mIntermediateMat, 5, 50);
Imgproc.cvtColor(mIntermediateMat, mRgbaInnerWindow, Imgproc.COLOR_RGB2RGBA);
Imgproc.cvtColor(mIntermediateMat, rgbaInnerWindow, Imgproc.COLOR_RGB2RGBA);
*/
Imgproc.Canny(mRgbaInnerWindow, mIntermediateMat, 80, 90);
mRgbaInnerWindow.setTo(new Scalar(0, 0, 0, 255), mIntermediateMat);
Core.convertScaleAbs(mRgbaInnerWindow, mIntermediateMat, 1./16, 0);
Core.convertScaleAbs(mIntermediateMat, mRgbaInnerWindow, 16, 0);
rgbaInnerWindow = rgba.submat(top, top + height, left, left + width);
Imgproc.Canny(rgbaInnerWindow, mIntermediateMat, 80, 90);
rgbaInnerWindow.setTo(new Scalar(0, 0, 0, 255), mIntermediateMat);
Core.convertScaleAbs(rgbaInnerWindow, mIntermediateMat, 1./16, 0);
Core.convertScaleAbs(mIntermediateMat, rgbaInnerWindow, 16, 0);
rgbaInnerWindow.release();
break;
}
return mRgba;
return rgba;
}
}

View File

@@ -20,7 +20,7 @@
<folderInfo id="0.882924228." name="/" resourcePath="">
<toolChain id="org.eclipse.cdt.build.core.prefbase.toolchain.1667980868" name="No ToolChain" resourceTypeBasedDiscovery="false" superClass="org.eclipse.cdt.build.core.prefbase.toolchain">
<targetPlatform id="org.eclipse.cdt.build.core.prefbase.toolchain.1667980868.2108168132" name=""/>
<builder autoBuildTarget="" command="&quot;${NDKROOT}/ndk-build.cmd&quot;" enableAutoBuild="true" enableCleanBuild="false" id="org.eclipse.cdt.build.core.settings.default.builder.328915772" incrementalBuildTarget="" keepEnvironmentInBuildfile="false" managedBuildOn="false" name="Gnu Make Builder" superClass="org.eclipse.cdt.build.core.settings.default.builder"/>
<builder autoBuildTarget="" command="${NDKROOT}/ndk-build.cmd" enableAutoBuild="true" enableCleanBuild="false" id="org.eclipse.cdt.build.core.settings.default.builder.328915772" incrementalBuildTarget="" keepEnvironmentInBuildfile="false" managedBuildOn="false" name="Gnu Make Builder" superClass="org.eclipse.cdt.build.core.settings.default.builder"/>
<tool id="org.eclipse.cdt.build.core.settings.holder.libs.630148311" name="holder for library settings" superClass="org.eclipse.cdt.build.core.settings.holder.libs"/>
<tool id="org.eclipse.cdt.build.core.settings.holder.525090327" name="Assembly" superClass="org.eclipse.cdt.build.core.settings.holder">
<inputType id="org.eclipse.cdt.build.core.settings.holder.inType.1491216279" languageId="org.eclipse.cdt.core.assembly" languageName="Assembly" sourceContentType="org.eclipse.cdt.core.asmSource" superClass="org.eclipse.cdt.build.core.settings.holder.inType"/>

View File

@@ -10,6 +10,7 @@
<activity android:name="CvNativeActivity"
android:label="@string/app_name"
android:screenOrientation="landscape"
android:configChanges="orientation|keyboardHidden">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
@@ -17,7 +18,9 @@
</intent-filter>
</activity>
<activity android:name="android.app.NativeActivity"
android:label="@string/app_name">
android:label="@string/app_name"
android:screenOrientation="landscape"
android:configChanges="keyboardHidden|orientation">
<meta-data android:name="android.app.lib_name"
android:value="native_activity" />
</activity>

View File

@@ -7,7 +7,7 @@ include ../../sdk/native/jni/OpenCV.mk
LOCAL_MODULE := native_activity
LOCAL_SRC_FILES := native.cpp
LOCAL_LDLIBS += -lm -llog -landroid
LOCAL_STATIC_LIBRARIES := android_native_app_glue
LOCAL_STATIC_LIBRARIES += android_native_app_glue
include $(BUILD_SHARED_LIBRARY)

View File

@@ -11,9 +11,10 @@
#include <math.h>
#include <queue>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#define LOG_TAG "OCV:libnative_activity"
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG,LOG_TAG,__VA_ARGS__)
@@ -78,18 +79,29 @@ static void engine_draw_frame(Engine* engine, const cv::Mat& frame)
return;
}
void* pixels = buffer.bits;
int32_t* pixels = (int32_t*)buffer.bits;
int left_indent = (buffer.width-frame.cols)/2;
int top_indent = (buffer.height-frame.rows)/2;
for (int yy = top_indent; yy < std::min(frame.rows+top_indent, buffer.height); yy++)
if (top_indent > 0)
{
unsigned char* line = (unsigned char*)pixels + left_indent*4*sizeof(unsigned char);
size_t line_size = std::min(frame.cols, buffer.width)*4*sizeof(unsigned char);
memset(pixels, 0, top_indent*buffer.stride*sizeof(int32_t));
pixels += top_indent*buffer.stride;
}
for (int yy = 0; yy < frame.rows; yy++)
{
if (left_indent > 0)
{
memset(pixels, 0, left_indent*sizeof(int32_t));
memset(pixels+left_indent+frame.cols, 0, (buffer.stride-frame.cols-left_indent)*sizeof(int32_t));
}
int32_t* line = pixels + left_indent;
size_t line_size = frame.cols*4*sizeof(unsigned char);
memcpy(line, frame.ptr<unsigned char>(yy), line_size);
// go to next line
pixels = (int32_t*)pixels + buffer.stride;
pixels += buffer.stride;
}
ANativeWindow_unlockAndPost(engine->app->window);
}
@@ -104,10 +116,10 @@ static void engine_handle_cmd(android_app* app, int32_t cmd)
{
LOGI("APP_CMD_INIT_WINDOW");
engine->capture = new cv::VideoCapture(0);
engine->capture = cv::makePtr<cv::VideoCapture>(0);
union {double prop; const char* name;} u;
u.prop = engine->capture->get(CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING);
u.prop = engine->capture->get(cv::CAP_PROP_ANDROID_PREVIEW_SIZES_STRING);
int view_width = ANativeWindow_getWidth(app->window);
int view_height = ANativeWindow_getHeight(app->window);
@@ -124,8 +136,8 @@ static void engine_handle_cmd(android_app* app, int32_t cmd)
if ((camera_resolution.width != 0) && (camera_resolution.height != 0))
{
engine->capture->set(CV_CAP_PROP_FRAME_WIDTH, camera_resolution.width);
engine->capture->set(CV_CAP_PROP_FRAME_HEIGHT, camera_resolution.height);
engine->capture->set(cv::CAP_PROP_FRAME_WIDTH, camera_resolution.width);
engine->capture->set(cv::CAP_PROP_FRAME_HEIGHT, camera_resolution.height);
}
float scale = std::min((float)view_width/camera_resolution.width,
@@ -199,7 +211,7 @@ void android_main(android_app* app)
if (!engine.capture.empty())
{
if (engine.capture->grab())
engine.capture->retrieve(drawing_frame, CV_CAP_ANDROID_COLOR_FRAME_RGBA);
engine.capture->retrieve(drawing_frame, cv::CAP_ANDROID_COLOR_FRAME_RGBA);
char buffer[256];
sprintf(buffer, "Display performance: %dx%d @ %.3f", drawing_frame.cols, drawing_frame.rows, fps);

View File

@@ -40,4 +40,4 @@ public class CvNativeActivity extends Activity {
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
}
}
}

View File

@@ -84,4 +84,4 @@ public class Tutorial3View extends JavaCameraView implements PictureCallback {
}
}
}
}

View File

@@ -57,4 +57,3 @@ if (INSTALL_C_EXAMPLES AND NOT WIN32)
DESTINATION share/OpenCV/samples/c
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ)
endif ()

View File

@@ -410,4 +410,3 @@ int main(int argc, char** argv )
return 0;
}

View File

@@ -751,6 +751,3 @@ int main(int argc, char* argv[])
return 0;
} /* main() */

View File

@@ -1,16 +1,16 @@
#!/bin/sh
if [ $# -gt 0 ] ; then
base=`basename $1 .c`
echo "compiling $base"
gcc -ggdb `pkg-config opencv --cflags --libs` $base.c -o $base
base=`basename $1 .c`
echo "compiling $base"
gcc -ggdb `pkg-config opencv --cflags --libs` $base.c -o $base
else
for i in *.c; do
echo "compiling $i"
gcc -ggdb `pkg-config --cflags opencv` -o `basename $i .c` $i `pkg-config --libs opencv`;
done
for i in *.cpp; do
echo "compiling $i"
g++ -ggdb `pkg-config --cflags opencv` -o `basename $i .cpp` $i `pkg-config --libs opencv`;
done
for i in *.c; do
echo "compiling $i"
gcc -ggdb `pkg-config --cflags opencv` -o `basename $i .c` $i `pkg-config --libs opencv`;
done
for i in *.cpp; do
echo "compiling $i"
g++ -ggdb `pkg-config --cflags opencv` -o `basename $i .cpp` $i `pkg-config --libs opencv`;
done
fi

View File

@@ -13,6 +13,3 @@ ADD_EXECUTABLE(opencv_example minarea.c)
TARGET_LINK_LIBRARIES(opencv_example ${OpenCV_LIBS})
#MESSAGE(STATUS "OpenCV_LIBS: ${OpenCV_LIBS}")

View File

@@ -25,8 +25,3 @@ the CMake gui with:
$ cmake-gui <OPENCV_SRC_PATH>/samples/c/example_cmake/
And pick the correct value for OpenCV_DIR.

View File

@@ -114,4 +114,3 @@ int main( int argc, char** argv )
#ifdef _EiC
main(1,"convexhull.c");
#endif

View File

@@ -90,7 +90,7 @@ static int mushroom_read_database( const char* filename, CvMat** data, CvMat** m
}
cvReleaseMemStorage( &storage );
delete el_ptr;
delete [] el_ptr;
return 1;
}

View File

@@ -5,7 +5,7 @@
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc
opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree opencv_softcascade
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching opencv_videostab)
opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching opencv_videostab opencv_bioinspired)
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
@@ -95,4 +95,3 @@ if (INSTALL_C_EXAMPLES AND NOT WIN32)
DESTINATION share/OpenCV/samples/cpp
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ)
endif()

View File

@@ -10,8 +10,9 @@
#include <iostream>
#include <cstring>
#include "opencv2/contrib.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/bioinspired.hpp" // retina based algorithms
#include "opencv2/imgproc.hpp" // cvCvtcolor function
#include "opencv2/highgui.hpp" // display
static void help(std::string errorMessage)
{
@@ -127,7 +128,7 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
normalize(imageInputRescaled, imageInputRescaled, 0.0, 255.0, cv::NORM_MINMAX);
}
cv::Ptr<cv::Retina> retina;
cv::Ptr<cv::bioinspired::Retina> retina;
int retinaHcellsGain;
int localAdaptation_photoreceptors, localAdaptation_Gcells;
static void callBack_updateRetinaParams(int, void*)
@@ -175,6 +176,12 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
}
bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
int chosenMethod=0;
if (!strcmp(argv[argc-1], "fast"))
{
chosenMethod=1;
std::cout<<"Using fast method (no spectral whithning), adaptation of Meylan&al 2008 method"<<std::endl;
}
std::string inputImageName=argv[1];
@@ -210,17 +217,22 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
* -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
*/
if (useLogSampling)
{
retina = cv::createRetina(inputImage.size(),true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0);
{
retina = cv::bioinspired::createRetina(inputImage.size(),true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
else// -> else allocate "classical" retina :
retina = cv::createRetina(inputImage.size());
retina = cv::bioinspired::createRetina(inputImage.size());
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
retina->write("RetinaDefaultParameters.xml");
// create a fast retina tone mapper (Meyla&al algorithm)
std::cout<<"Allocating fast tone mapper..."<<std::endl;
//cv::Ptr<cv::RetinaFastToneMapping> fastToneMapper=createRetinaFastToneMapping(inputImage.size());
std::cout<<"Fast tone mapper allocated"<<std::endl;
// desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
retina->activateMovingContoursProcessing(false);
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
retina->write("RetinaDefaultParameters.xml");
// desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
retina->activateMovingContoursProcessing(false);
// declare retina output buffers
cv::Mat retinaOutput_parvo;
@@ -230,20 +242,19 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
histogramClippingValue=0; // default value... updated with interface slider
//inputRescaleMat = inputImage;
//outputRescaleMat = imageInputRescaled;
cv::namedWindow("Retina input image (with cut edges histogram for basic pixels error avoidance)",1);
cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
cv::namedWindow("Processing configuration",1);
cv::createTrackbar("histogram edges clipping limit", "Processing configuration",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
cv::namedWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", 1);
colorSaturationFactor=3;
cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);
cv::createTrackbar("Color saturation", "Processing configuration", &colorSaturationFactor,5,callback_saturateColors);
retinaHcellsGain=40;
cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);
cv::createTrackbar("Hcells gain", "Processing configuration",&retinaHcellsGain,100,callBack_updateRetinaParams);
localAdaptation_photoreceptors=197;
localAdaptation_Gcells=190;
cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
cv::createTrackbar("Ph sensitivity", "Processing configuration", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
cv::createTrackbar("Gcells sensitivity", "Processing configuration", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
/////////////////////////////////////////////
@@ -257,11 +268,28 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
while(continueProcessing)
{
// run retina filter
retina->run(imageInputRescaled);
// Retrieve and display retina output
retina->getParvo(retinaOutput_parvo);
cv::imshow("Retina input image (with cut edges histogram for basic pixels error avoidance)", imageInputRescaled/255.0);
cv::imshow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", retinaOutput_parvo);
if (!chosenMethod)
{
retina->run(imageInputRescaled);
// Retrieve and display retina output
retina->getParvo(retinaOutput_parvo);
cv::imshow("Retina input image (with cut edges histogram for basic pixels error avoidance)", imageInputRescaled/255.0);
cv::imshow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", retinaOutput_parvo);
cv::imwrite("HDRinput.jpg",imageInputRescaled/255.0);
cv::imwrite("RetinaToneMapping.jpg",retinaOutput_parvo);
}
else
{
// apply the simplified hdr tone mapping method
cv::Mat fastToneMappingOutput;
retina->applyFastToneMapping(imageInputRescaled, fastToneMappingOutput);
cv::imshow("Retina fast tone mapping output : 16bit=>8bit image retina tonemapping", fastToneMappingOutput);
}
/*cv::Mat fastToneMappingOutput_specificObject;
fastToneMapper->setup(3.f, 1.5f, 1.f);
fastToneMapper->applyFastToneMapping(imageInputRescaled, fastToneMappingOutput_specificObject);
cv::imshow("### Retina fast tone mapping output : 16bit=>8bit image retina tonemapping", fastToneMappingOutput_specificObject);
*/
cv::waitKey(10);
}
}catch(cv::Exception e)
@@ -274,5 +302,3 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
return 0;
}

View File

@@ -14,8 +14,9 @@
#include <stdio.h>
#include <cstring>
#include "opencv2/contrib.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/bioinspired.hpp" // retina based algorithms
#include "opencv2/imgproc.hpp" // cvCvtcolor function
#include "opencv2/highgui.hpp" // display
static void help(std::string errorMessage)
{
@@ -160,7 +161,7 @@ static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, con
}
cv::Ptr<cv::Retina> retina;
cv::Ptr<cv::bioinspired::Retina> retina;
int retinaHcellsGain;
int localAdaptation_photoreceptors, localAdaptation_Gcells;
static void callBack_updateRetinaParams(int, void*)
@@ -280,10 +281,10 @@ static void loadNewFrame(const std::string filenamePrototype, const int currentF
*/
if (useLogSampling)
{
retina = cv::createRetina(inputImage.size(),true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0);
retina = cv::bioinspired::createRetina(inputImage.size(),true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
else// -> else allocate "classical" retina :
retina = cv::createRetina(inputImage.size());
retina = cv::bioinspired::createRetina(inputImage.size());
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
retina->write("RetinaDefaultParameters.xml");
@@ -358,5 +359,3 @@ static void loadNewFrame(const std::string filenamePrototype, const int currentF
return 0;
}

View File

@@ -2563,19 +2563,19 @@ int main(int argc, char** argv)
Ptr<FeatureDetector> featureDetector = FeatureDetector::create( ddmParams.detectorType );
Ptr<DescriptorExtractor> descExtractor = DescriptorExtractor::create( ddmParams.descriptorType );
Ptr<BOWImgDescriptorExtractor> bowExtractor;
if( featureDetector.empty() || descExtractor.empty() )
if( !featureDetector || !descExtractor )
{
cout << "featureDetector or descExtractor was not created" << endl;
return -1;
}
{
Ptr<DescriptorMatcher> descMatcher = DescriptorMatcher::create( ddmParams.matcherType );
if( featureDetector.empty() || descExtractor.empty() || descMatcher.empty() )
if( !featureDetector || !descExtractor || !descMatcher )
{
cout << "descMatcher was not created" << endl;
return -1;
}
bowExtractor = new BOWImgDescriptorExtractor( descExtractor, descMatcher );
bowExtractor = makePtr<BOWImgDescriptorExtractor>( descExtractor, descMatcher );
}
// Print configuration to screen

View File

@@ -35,7 +35,7 @@ int main(int argc, char** argv)
setNumThreads(8);
Ptr<BackgroundSubtractor> fgbg = createBackgroundSubtractorGMG(20, 0.7);
if (fgbg.empty())
if (!fgbg)
{
std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl;
return -1;
@@ -78,4 +78,3 @@ int main(int argc, char** argv)
return 0;
}

View File

@@ -332,4 +332,3 @@ Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const
return generageChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2, sqWidth, sqHeight, pts3d, corners);
}

View File

@@ -22,7 +22,7 @@ class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
IDetector(),
Detector(detector)
{
CV_Assert(!detector.empty());
CV_Assert(detector);
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
@@ -51,11 +51,11 @@ int main(int , char** )
}
std::string cascadeFrontalfilename = "../../data/lbpcascades/lbpcascade_frontalface.xml";
cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
cv::Ptr<cv::CascadeClassifier> cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = makePtr<CascadeDetectorAdapter>(cascade);
cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = makePtr<CascadeDetectorAdapter>(cascade);
DetectionBasedTracker::Parameters params;
DetectionBasedTracker Detector(MainDetector, TrackingDetector, params);

View File

@@ -153,7 +153,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
{
cout << "< Evaluate descriptor matcher..." << endl;
vector<Point2f> curve;
Ptr<GenericDescriptorMatcher> gdm = new VectorDescriptorMatcher( descriptorExtractor, descriptorMatcher );
Ptr<GenericDescriptorMatcher> gdm = makePtr<VectorDescriptorMatcher>( descriptorExtractor, descriptorMatcher );
evaluateGenericDescriptorMatcher( img1, img2, H12, keypoints1, keypoints2, 0, 0, curve, gdm );
Point2f firstPoint = *curve.begin();
@@ -253,7 +253,7 @@ int main(int argc, char** argv)
int mactherFilterType = getMatcherFilterType( argv[4] );
bool eval = !isWarpPerspective ? false : (atoi(argv[6]) == 0 ? false : true);
cout << ">" << endl;
if( detector.empty() || descriptorExtractor.empty() || descriptorMatcher.empty() )
if( !detector || !descriptorExtractor || !descriptorMatcher )
{
cout << "Can not create detector or descriptor exstractor or descriptor matcher of given types" << endl;
return -1;

View File

@@ -67,7 +67,7 @@ class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
Detector(detector)
{
CV_Assert(!detector.empty());
CV_Assert(detector);
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
@@ -117,11 +117,11 @@ static int test_FaceDetector(int argc, char *argv[])
}
std::string cascadeFrontalfilename=cascadefile;
cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
cv::Ptr<cv::CascadeClassifier> cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = makePtr<CascadeDetectorAdapter>(cascade);
cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = makePtr<CascadeDetectorAdapter>(cascade);
DetectionBasedTracker::Parameters params;
DetectionBasedTracker fd(MainDetector, TrackingDetector, params);

View File

@@ -535,7 +535,7 @@ void DetectorQualityEvaluator::readAlgorithm ()
{
defaultDetector = FeatureDetector::create( algName );
specificDetector = FeatureDetector::create( algName );
if( defaultDetector.empty() )
if( !defaultDetector )
{
printf( "Algorithm can not be read\n" );
exit(-1);
@@ -769,14 +769,14 @@ void DescriptorQualityEvaluator::readAlgorithm( )
defaultDescMatcher = GenericDescriptorMatcher::create( algName );
specificDescMatcher = GenericDescriptorMatcher::create( algName );
if( defaultDescMatcher.empty() )
if( !defaultDescMatcher )
{
Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create( algName );
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create( matcherName );
defaultDescMatcher = new VectorDescriptorMatch( extractor, matcher );
specificDescMatcher = new VectorDescriptorMatch( extractor, matcher );
defaultDescMatcher = makePtr<VectorDescriptorMatch>( extractor, matcher );
specificDescMatcher = makePtr<VectorDescriptorMatch>( extractor, matcher );
if( extractor.empty() || matcher.empty() )
if( !extractor || !matcher )
{
printf("Algorithm can not be read\n");
exit(-1);
@@ -881,8 +881,9 @@ public:
virtual void readAlgorithm( )
{
string classifierFile = data_path + "/features2d/calonder_classifier.rtc";
defaultDescMatcher = new VectorDescriptorMatch( new CalonderDescriptorExtractor<float>( classifierFile ),
new BFMatcher(NORM_L2) );
defaultDescMatcher = makePtr<VectorDescriptorMatch>(
makePtr<CalonderDescriptorExtractor<float> >( classifierFile ),
makePtr<BFMatcher>(int(NORM_L2)));
specificDescMatcher = defaultDescMatcher;
}
};
@@ -922,10 +923,11 @@ void OneWayDescriptorQualityTest::processRunParamsFile ()
readAllDatasetsRunParams();
OneWayDescriptorBase *base = new OneWayDescriptorBase(patchSize, poseCount, pcaFilename,
trainPath, trainImagesList);
Ptr<OneWayDescriptorBase> base(
new OneWayDescriptorBase(patchSize, poseCount, pcaFilename,
trainPath, trainImagesList));
OneWayDescriptorMatch *match = new OneWayDescriptorMatch ();
Ptr<OneWayDescriptorMatch> match = makePtr<OneWayDescriptorMatch>();
match->initialize( OneWayDescriptorMatch::Params (), base );
defaultDescMatcher = match;
writeAllDatasetsRunParams();
@@ -958,18 +960,18 @@ int main( int argc, char** argv )
Ptr<BaseQualityEvaluator> evals[] =
{
new DetectorQualityEvaluator( "FAST", "quality-detector-fast" ),
new DetectorQualityEvaluator( "GFTT", "quality-detector-gftt" ),
new DetectorQualityEvaluator( "HARRIS", "quality-detector-harris" ),
new DetectorQualityEvaluator( "MSER", "quality-detector-mser" ),
new DetectorQualityEvaluator( "STAR", "quality-detector-star" ),
new DetectorQualityEvaluator( "SIFT", "quality-detector-sift" ),
new DetectorQualityEvaluator( "SURF", "quality-detector-surf" ),
makePtr<DetectorQualityEvaluator>( "FAST", "quality-detector-fast" ),
makePtr<DetectorQualityEvaluator>( "GFTT", "quality-detector-gftt" ),
makePtr<DetectorQualityEvaluator>( "HARRIS", "quality-detector-harris" ),
makePtr<DetectorQualityEvaluator>( "MSER", "quality-detector-mser" ),
makePtr<DetectorQualityEvaluator>( "STAR", "quality-detector-star" ),
makePtr<DetectorQualityEvaluator>( "SIFT", "quality-detector-sift" ),
makePtr<DetectorQualityEvaluator>( "SURF", "quality-detector-surf" ),
new DescriptorQualityEvaluator( "SIFT", "quality-descriptor-sift", "BruteForce" ),
new DescriptorQualityEvaluator( "SURF", "quality-descriptor-surf", "BruteForce" ),
new DescriptorQualityEvaluator( "FERN", "quality-descriptor-fern"),
new CalonderDescriptorQualityEvaluator()
makePtr<DescriptorQualityEvaluator>( "SIFT", "quality-descriptor-sift", "BruteForce" ),
makePtr<DescriptorQualityEvaluator>( "SURF", "quality-descriptor-surf", "BruteForce" ),
makePtr<DescriptorQualityEvaluator>( "FERN", "quality-descriptor-fern"),
makePtr<CalonderDescriptorQualityEvaluator>()
};
for( size_t i = 0; i < sizeof(evals)/sizeof(evals[0]); i++ )

View File

@@ -80,4 +80,3 @@ int main(int argc, const char ** argv)
waitKey();
return 0;
}

120
samples/cpp/erfilter.cpp Normal file
View File

@@ -0,0 +1,120 @@
//--------------------------------------------------------------------------------------------------
// A demo program of the Extremal Region Filter algorithm described in
// Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
//--------------------------------------------------------------------------------------------------
#include "opencv2/opencv.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <vector>
#include <iostream>
#include <iomanip>
using namespace std;
using namespace cv;
void er_draw(Mat &src, Mat &dst, ERStat& er);
void er_draw(Mat &src, Mat &dst, ERStat& er)
{
if (er.parent != NULL) // deprecate the root region
{
int newMaskVal = 255;
int flags = 4 + (newMaskVal << 8) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
floodFill(src,dst,Point(er.pixel%src.cols,er.pixel/src.cols),Scalar(255),0,Scalar(er.level),Scalar(0),flags);
}
}
int main(int argc, const char * argv[])
{
vector<ERStat> regions;
if (argc < 2) {
cout << "Demo program of the Extremal Region Filter algorithm described in " << endl;
cout << "Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012" << endl << endl;
cout << " Usage: " << argv[0] << " input_image <optional_groundtruth_image>" << endl;
cout << " Default classifier files (trained_classifierNM*.xml) should be in ./" << endl;
return -1;
}
Mat original = imread(argv[1]);
Mat gt;
if (argc > 2)
{
gt = imread(argv[2]);
cvtColor(gt, gt, COLOR_RGB2GRAY);
threshold(gt, gt, 254, 255, THRESH_BINARY);
}
Mat grey(original.size(),CV_8UC1);
cvtColor(original,grey,COLOR_RGB2GRAY);
double t = (double)getTickCount();
// Build ER tree and filter with the 1st stage default classifier
Ptr<ERFilter> er_filter1 = createERFilterNM1(loadClassifierNM1("trained_classifierNM1.xml"));
er_filter1->run(grey, regions);
t = (double)getTickCount() - t;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << "\t FIRST STAGE CLASSIFIER done in " << t * 1000. / getTickFrequency() << " ms." << endl;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << setw(9) << regions.size()+er_filter1->getNumRejected() << "\t Extremal Regions extracted " << endl;
cout << setw(9) << regions.size() << "\t Extremal Regions selected by the first stage of the sequential classifier." << endl;
cout << "\t \t (saving into out_second_stage.jpg)" << endl;
cout << " --------------------------------------------------------------------------------------------------" << endl;
er_filter1.release();
// draw regions
Mat mask = Mat::zeros(grey.rows+2,grey.cols+2,CV_8UC1);
for (int r=0; r<(int)regions.size(); r++)
er_draw(grey, mask, regions.at(r));
mask = 255-mask;
imwrite("out_first_stage.jpg", mask);
if (argc > 2)
{
Mat tmp_mask = (255-gt) & (255-mask(Rect(Point(1,1),Size(mask.cols-2,mask.rows-2))));
cout << "Recall for the 1st stage filter = " << (float)countNonZero(tmp_mask) / countNonZero(255-gt) << endl;
}
t = (double)getTickCount();
// Default second stage classifier
Ptr<ERFilter> er_filter2 = createERFilterNM2(loadClassifierNM2("trained_classifierNM2.xml"));
er_filter2->run(grey, regions);
t = (double)getTickCount() - t;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << "\t SECOND STAGE CLASSIFIER done in " << t * 1000. / getTickFrequency() << " ms." << endl;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << setw(9) << regions.size() << "\t Extremal Regions selected by the second stage of the sequential classifier." << endl;
cout << "\t \t (saving into out_second_stage.jpg)" << endl;
cout << " --------------------------------------------------------------------------------------------------" << endl;
er_filter2.release();
// draw regions
mask = mask*0;
for (int r=0; r<(int)regions.size(); r++)
er_draw(grey, mask, regions.at(r));
mask = 255-mask;
imwrite("out_second_stage.jpg", mask);
if (argc > 2)
{
Mat tmp_mask = (255-gt) & (255-mask(Rect(Point(1,1),Size(mask.cols-2,mask.rows-2))));
cout << "Recall for the 2nd stage filter = " << (float)countNonZero(tmp_mask) / countNonZero(255-gt) << endl;
}
regions.clear();
}

View File

@@ -131,11 +131,11 @@ int main(int argc, char * argv[]) {
//generate test data
cout << "Extracting Test Data from images" << endl <<
endl;
Ptr<FeatureDetector> detector =
Ptr<FeatureDetector> detector(
new DynamicAdaptedFeatureDetector(
AdjusterAdapter::create("STAR"), 130, 150, 5);
Ptr<DescriptorExtractor> extractor =
new SurfDescriptorExtractor(1000, 4, 2, false, true);
AdjusterAdapter::create("STAR"), 130, 150, 5));
Ptr<DescriptorExtractor> extractor(
new SurfDescriptorExtractor(1000, 4, 2, false, true));
Ptr<DescriptorMatcher> matcher =
DescriptorMatcher::create("FlannBased");
@@ -183,8 +183,8 @@ int main(int argc, char * argv[]) {
endl;
Ptr<of2::FabMap> fabmap;
fabmap = new of2::FabMap2(tree, 0.39, 0, of2::FabMap::SAMPLED |
of2::FabMap::CHOW_LIU);
fabmap.reset(new of2::FabMap2(tree, 0.39, 0, of2::FabMap::SAMPLED |
of2::FabMap::CHOW_LIU));
fabmap->addTraining(trainData);
vector<of2::IMatch> matches;

View File

@@ -56,7 +56,7 @@ static void help( char** argv )
}
int main( int argc, char** argv ) {
// check http://opencv.itseez.com/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html
// check http://docs.opencv.org/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html
// for OpenCV general detection/matching framework details
if( argc != 3 ) {

View File

@@ -33,7 +33,7 @@ int main(int argc, char** argv)
std::string params_filename = std::string(argv[4]);
Ptr<GenericDescriptorMatcher> descriptorMatcher = GenericDescriptorMatcher::create(alg_name, params_filename);
if( descriptorMatcher.empty() )
if( !descriptorMatcher )
{
printf ("Cannot create descriptor\n");
return 0;

View File

@@ -61,4 +61,3 @@ int main(int argc, char** argv)
return 0;
}

View File

@@ -31,8 +31,8 @@ int main( int argc, char** argv )
help();
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#if DEMO_MIXED_API_USE
Ptr<IplImage> iplimg = cvLoadImage(imagename); // Ptr<T> is safe ref-conting pointer class
if(iplimg.empty())
Ptr<IplImage> iplimg(cvLoadImage(imagename)); // Ptr<T> is safe ref-counting pointer class
if(!iplimg)
{
fprintf(stderr, "Can not load image %s\n", imagename);
return -1;

View File

@@ -0,0 +1,57 @@
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
static void help(char** argv)
{
cout << "\nThis sample shows you how to read a sequence of images using the VideoCapture interface.\n"
<< "Usage: " << argv[0] << " <image_mask> (example mask: example_%%02d.jpg)\n"
<< "Image mask defines the name variation for the input images that have to be read as a sequence. \n"
<< "Using the mask example_%%02d.jpg will read in images labeled as 'example_00.jpg', 'example_01.jpg', etc."
<< endl;
}
int main(int argc, char** argv)
{
if(argc != 2)
{
help(argv);
return 1;
}
string first_file = argv[1];
VideoCapture sequence(first_file);
if (!sequence.isOpened())
{
cerr << "Failed to open the image sequence!\n" << endl;
return 1;
}
Mat image;
namedWindow("Image sequence | press ESC to close", 1);
for(;;)
{
// Read in image from sequence
sequence >> image;
// If no image was retrieved -> end of sequence
if(image.empty())
{
cout << "End of Sequence" << endl;
break;
}
imshow("Image sequence | press ESC to close", image);
if(waitKey(500) == 27)
break;
}
return 0;
}

View File

@@ -3,7 +3,7 @@
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
#ifdef WIN32
#if defined(WIN32) || defined(_WIN32)
#include <io.h>
#else
#include <dirent.h>
@@ -59,7 +59,7 @@ static void readDirectory( const string& directoryName, vector<String>& filename
{
filenames.clear();
#ifdef WIN32
#if defined(WIN32) | defined(_WIN32)
struct _finddata_t s_file;
string str = directoryName + "\\*.*";

View File

@@ -114,7 +114,7 @@ private:
// Functions to store detector and templates in single XML/YAML file
static cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
{
cv::Ptr<cv::linemod::Detector> detector = new cv::linemod::Detector;
cv::Ptr<cv::linemod::Detector> detector = cv::makePtr<cv::linemod::Detector>();
cv::FileStorage fs(filename, cv::FileStorage::READ);
detector->read(fs.root());

54
samples/cpp/lsd_lines.cpp Normal file
View File

@@ -0,0 +1,54 @@
#include <iostream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
std::string in;
if (argc != 2)
{
std::cout << "Usage: lsd_lines [input image]. Now loading building.jpg" << std::endl;
in = "building.jpg";
}
else
{
in = argv[1];
}
Mat image = imread(in, IMREAD_GRAYSCALE);
#if 0
Canny(image, image, 50, 200, 3); // Apply canny edge
#endif
// Create and LSD detector with standard or no refinement.
#if 1
Ptr<LineSegmentDetector> ls = createLineSegmentDetectorPtr(LSD_REFINE_STD);
#else
Ptr<LineSegmentDetector> ls = createLineSegmentDetectorPtr(LSD_REFINE_NONE);
#endif
double start = double(getTickCount());
vector<Vec4i> lines_std;
// Detect the lines
ls->detect(image, lines_std);
double duration_ms = (double(getTickCount()) - start) * 1000 / getTickFrequency();
std::cout << "It took " << duration_ms << " ms." << std::endl;
// Show found lines
Mat drawnLines(image);
ls->drawSegments(drawnLines, lines_std);
imshow("Standard refinement", drawnLines);
waitKey();
return 0;
}

View File

@@ -84,7 +84,7 @@ static bool createDetectorDescriptorMatcher( const string& detectorType, const s
descriptorMatcher = DescriptorMatcher::create( matcherType );
cout << ">" << endl;
bool isCreated = !( featureDetector.empty() || descriptorExtractor.empty() || descriptorMatcher.empty() );
bool isCreated = featureDetector && descriptorExtractor && descriptorMatcher;
if( !isCreated )
cout << "Can not create feature detector or descriptor extractor or descriptor matcher of given types." << endl << ">" << endl;

View File

@@ -43,7 +43,3 @@ int main(int, char* [])
return 0;
}

View File

@@ -9,7 +9,7 @@
#include <iostream>
#include <cstring>
#include "opencv2/contrib.hpp"
#include "opencv2/bioinspired.hpp"
#include "opencv2/highgui.hpp"
static void help(std::string errorMessage)
@@ -106,15 +106,15 @@ int main(int argc, char* argv[]) {
try
{
// create a retina instance with default parameters setup, uncomment the initialisation you wanna test
cv::Ptr<cv::Retina> myRetina;
cv::Ptr<cv::bioinspired::Retina> myRetina;
// if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
if (useLogSampling)
{
myRetina = cv::createRetina(inputFrame.size(), true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0);
myRetina = cv::bioinspired::createRetina(inputFrame.size(), true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
else// -> else allocate "classical" retina :
myRetina = cv::createRetina(inputFrame.size());
myRetina = cv::bioinspired::createRetina(inputFrame.size());
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
myRetina->write("RetinaDefaultParameters.xml");
@@ -143,7 +143,8 @@ int main(int argc, char* argv[]) {
cv::imshow("retina input", inputFrame);
cv::imshow("Retina Parvo", retinaOutput_parvo);
cv::imshow("Retina Magno", retinaOutput_magno);
cv::waitKey(10);
cv::waitKey(5);
}
}catch(cv::Exception e)
{
@@ -155,4 +156,3 @@ int main(int argc, char* argv[]) {
return 0;
}

BIN
samples/cpp/scenetext.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

@@ -41,15 +41,15 @@ namespace {
cout << "press space to save a picture. q or esc to quit" << endl;
namedWindow(window_name, WINDOW_KEEPRATIO); //resizable window;
Mat frame;
for (;;) {
capture >> frame;
if (frame.empty())
break;
imshow(window_name, frame);
char key = (char)waitKey(30); //delay N millis, usually long enough to display and capture input
switch (key) {
case 'q':
case 'Q':

View File

@@ -14,15 +14,12 @@
Or: http://oreilly.com/catalog/9780596516130/
ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
OTHER OPENCV SITES:
* The source code is on sourceforge at:
http://sourceforge.net/projects/opencvlibrary/
* The OpenCV wiki page (As of Oct 1, 2008 this is down for changing over servers, but should come back):
http://opencvlibrary.sourceforge.net/
* An active user group is at:
http://tech.groups.yahoo.com/group/OpenCV/
* The minutes of weekly OpenCV development meetings are at:
http://code.opencv.org/projects/opencv/wiki/Meeting_notes
OPENCV WEBSITES:
Homepage: http://opencv.org
Online docs: http://docs.opencv.org
Q&A forum: http://answers.opencv.org
Issue tracker: http://code.opencv.org
GitHub: https://github.com/Itseez/opencv/
************************************************** */
#include "opencv2/calib3d/calib3d.hpp"
@@ -404,4 +401,3 @@ int main(int argc, char** argv)
StereoCalib(imagelist, boardSize, true, showRectified);
return 0;
}

View File

@@ -134,5 +134,3 @@ int parseCmdArgs(int argc, char** argv)
}
return 0;
}

View File

@@ -358,14 +358,14 @@ int main(int argc, char* argv[])
{
#ifdef HAVE_OPENCV_NONFREE
if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
finder = new SurfFeaturesFinderGpu();
finder = makePtr<SurfFeaturesFinderGpu>();
else
#endif
finder = new SurfFeaturesFinder();
finder = makePtr<SurfFeaturesFinder>();
}
else if (features_type == "orb")
{
finder = new OrbFeaturesFinder();
finder = makePtr<OrbFeaturesFinder>();
}
else
{
@@ -469,7 +469,11 @@ int main(int argc, char* argv[])
HomographyBasedEstimator estimator;
vector<CameraParams> cameras;
estimator(features, pairwise_matches, cameras);
if (!estimator(features, pairwise_matches, cameras))
{
cout << "Homography estimation failed.\n";
return -1;
}
for (size_t i = 0; i < cameras.size(); ++i)
{
@@ -480,8 +484,8 @@ int main(int argc, char* argv[])
}
Ptr<detail::BundleAdjusterBase> adjuster;
if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();
else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();
if (ba_cost_func == "reproj") adjuster = makePtr<detail::BundleAdjusterReproj>();
else if (ba_cost_func == "ray") adjuster = makePtr<detail::BundleAdjusterRay>();
else
{
cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
@@ -495,7 +499,11 @@ int main(int argc, char* argv[])
if (ba_refine_mask[3] == 'x') refine_mask(1,1) = 1;
if (ba_refine_mask[4] == 'x') refine_mask(1,2) = 1;
adjuster->setRefinementMask(refine_mask);
(*adjuster)(features, pairwise_matches, cameras);
if (!(*adjuster)(features, pairwise_matches, cameras))
{
cout << "Camera parameters adjusting failed.\n";
return -1;
}
// Find median focal length
@@ -547,31 +555,49 @@ int main(int argc, char* argv[])
#ifdef HAVE_OPENCV_GPUWARPING
if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
{
if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
if (warp_type == "plane")
warper_creator = makePtr<cv::PlaneWarperGpu>();
else if (warp_type == "cylindrical")
warper_creator = makePtr<cv::CylindricalWarperGpu>();
else if (warp_type == "spherical")
warper_creator = makePtr<cv::SphericalWarperGpu>();
}
else
#endif
{
if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
if (warp_type == "plane")
warper_creator = makePtr<cv::PlaneWarper>();
else if (warp_type == "cylindrical")
warper_creator = makePtr<cv::CylindricalWarper>();
else if (warp_type == "spherical")
warper_creator = makePtr<cv::SphericalWarper>();
else if (warp_type == "fisheye")
warper_creator = makePtr<cv::FisheyeWarper>();
else if (warp_type == "stereographic")
warper_creator = makePtr<cv::StereographicWarper>();
else if (warp_type == "compressedPlaneA2B1")
warper_creator = makePtr<cv::CompressedRectilinearWarper>(2.0f, 1.0f);
else if (warp_type == "compressedPlaneA1.5B1")
warper_creator = makePtr<cv::CompressedRectilinearWarper>(1.5f, 1.0f);
else if (warp_type == "compressedPlanePortraitA2B1")
warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(2.0f, 1.0f);
else if (warp_type == "compressedPlanePortraitA1.5B1")
warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(1.5f, 1.0f);
else if (warp_type == "paniniA2B1")
warper_creator = makePtr<cv::PaniniWarper>(2.0f, 1.0f);
else if (warp_type == "paniniA1.5B1")
warper_creator = makePtr<cv::PaniniWarper>(1.5f, 1.0f);
else if (warp_type == "paniniPortraitA2B1")
warper_creator = makePtr<cv::PaniniPortraitWarper>(2.0f, 1.0f);
else if (warp_type == "paniniPortraitA1.5B1")
warper_creator = makePtr<cv::PaniniPortraitWarper>(1.5f, 1.0f);
else if (warp_type == "mercator")
warper_creator = makePtr<cv::MercatorWarper>();
else if (warp_type == "transverseMercator")
warper_creator = makePtr<cv::TransverseMercatorWarper>();
}
if (warper_creator.empty())
if (!warper_creator)
{
cout << "Can't create the following warper '" << warp_type << "'\n";
return 1;
@@ -604,32 +630,32 @@ int main(int argc, char* argv[])
Ptr<SeamFinder> seam_finder;
if (seam_find_type == "no")
seam_finder = new detail::NoSeamFinder();
seam_finder = makePtr<detail::NoSeamFinder>();
else if (seam_find_type == "voronoi")
seam_finder = new detail::VoronoiSeamFinder();
seam_finder = makePtr<detail::VoronoiSeamFinder>();
else if (seam_find_type == "gc_color")
{
#ifdef HAVE_OPENCV_GPU
if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR);
seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR);
else
#endif
seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR);
}
else if (seam_find_type == "gc_colorgrad")
{
#ifdef HAVE_OPENCV_GPU
if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR_GRAD);
seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR_GRAD);
else
#endif
seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR_GRAD);
seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR_GRAD);
}
else if (seam_find_type == "dp_color")
seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR);
seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR);
else if (seam_find_type == "dp_colorgrad")
seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR_GRAD);
if (seam_finder.empty())
seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR_GRAD);
if (!seam_finder)
{
cout << "Can't create the following seam finder '" << seam_find_type << "'\n";
return 1;
@@ -727,7 +753,7 @@ int main(int argc, char* argv[])
resize(dilated_mask, seam_mask, mask_warped.size());
mask_warped = seam_mask & mask_warped;
if (blender.empty())
if (!blender)
{
blender = Blender::createDefault(blend_type, try_gpu);
Size dst_sz = resultRoi(corners, sizes).size();
@@ -736,13 +762,13 @@ int main(int argc, char* argv[])
blender = Blender::createDefault(Blender::NO, try_gpu);
else if (blend_type == Blender::MULTI_BAND)
{
MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(blender.get());
mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
LOGLN("Multi-band blender, number of bands: " << mb->numBands());
}
else if (blend_type == Blender::FEATHER)
{
FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));
FeatherBlender* fb = dynamic_cast<FeatherBlender*>(blender.get());
fb->setSharpness(1.f/blend_width);
LOGLN("Feather blender, sharpness: " << fb->sharpness());
}
@@ -763,5 +789,3 @@ int main(int argc, char* argv[])
LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
return 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -76,5 +76,3 @@ void Morphology_Operations( int, void* )
morphologyEx( src, dst, operation, element );
imshow( window_name, dst );
}

View File

@@ -66,10 +66,3 @@ int main( void )
return 0;
}

View File

@@ -61,5 +61,3 @@ int main( int, char** argv )
return 0;
}

View File

@@ -15,7 +15,6 @@ using namespace cv;
Mat src, dst;
int top, bottom, left, right;
int borderType;
Scalar value;
const char* window_name = "copyMakeBorder Demo";
RNG rng(12345);
@@ -64,7 +63,7 @@ int main( int, char** argv )
else if( (char)c == 'r' )
{ borderType = BORDER_REPLICATE; }
value = Scalar( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255) );
Scalar value( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255) );
copyMakeBorder( src, dst, top, bottom, left, right, borderType, value );
imshow( window_name, dst );
@@ -72,5 +71,3 @@ int main( int, char** argv )
return 0;
}

View File

@@ -92,4 +92,3 @@ void thresh_callback(int, void* )
circle( drawing, mc[i], 4, color, -1, 8, 0 );
}
}

View File

@@ -79,5 +79,3 @@ int main( void )
waitKey(0);
return(0);
}

View File

@@ -120,4 +120,3 @@ void myHarris_function( int, void* )
}
imshow( myHarris_window, myHarris_copy );
}

View File

@@ -102,4 +102,3 @@ void goodFeaturesToTrack_Demo( int, void* )
for( size_t i = 0; i < corners.size(); i++ )
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
}

View File

@@ -90,4 +90,3 @@ void goodFeaturesToTrack_Demo( int, void* )
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, copy );
}

View File

@@ -9,7 +9,7 @@
#include <iostream>
#include <cstring>
#include "opencv2/contrib.hpp"
#include "opencv2/bioinspired.hpp"
#include "opencv2/highgui.hpp"
static void help(std::string errorMessage)
@@ -95,16 +95,16 @@ int main(int argc, char* argv[]) {
try
{
// create a retina instance with default parameters setup, uncomment the initialisation you wanna test
cv::Ptr<cv::Retina> myRetina;
cv::Ptr<cv::bioinspired::Retina> myRetina;
// if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
if (useLogSampling)
{
myRetina = cv::createRetina(inputFrame.size(), true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0);
myRetina = cv::bioinspired::createRetina(inputFrame.size(), true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
else// -> else allocate "classical" retina :
{
myRetina = cv::createRetina(inputFrame.size());
myRetina = cv::bioinspired::createRetina(inputFrame.size());
}
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"

View File

@@ -168,5 +168,3 @@ void MyLine( Mat img, Point start, Point end )
thickness,
lineType );
}

View File

@@ -75,4 +75,4 @@ int main(int argc, char ** argv)
waitKey();
return 0;
}
}

View File

@@ -151,4 +151,4 @@ int main(int ac, char** av)
<< "Tip: Open up " << filename << " with a text editor to see the serialized data." << endl;
return 0;
}
}

View File

@@ -214,4 +214,4 @@ Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar* const table)
}
return I;
}
}

View File

@@ -32,8 +32,8 @@ int main( int argc, char** argv )
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#ifdef DEMO_MIXED_API_USE
Ptr<IplImage> IplI = cvLoadImage(imagename); // Ptr<T> is safe ref-counting pointer class
if(IplI.empty())
Ptr<IplImage> IplI(cvLoadImage(imagename)); // Ptr<T> is a safe ref-counting pointer class
if(!IplI)
{
cerr << "Can not load image " << imagename << endl;
return -1;

View File

@@ -84,4 +84,4 @@ void Sharpen(const Mat& myImage,Mat& Result)
Result.row(Result.rows-1).setTo(Scalar(0));
Result.col(0).setTo(Scalar(0));
Result.col(Result.cols-1).setTo(Scalar(0));
}
}

View File

@@ -82,4 +82,4 @@ int main(int,char**)
cout << "A vector of 2D Points = " << vPoints << endl << endl;
return 0;
}
}

View File

@@ -430,4 +430,3 @@ Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b)
}
return mssim;
}

View File

@@ -27,4 +27,4 @@ int main( int argc, char** argv )
waitKey(0); // Wait for a keystroke in the window
return 0;
}
}

View File

@@ -203,4 +203,4 @@ Scalar getMSSIM( const Mat& i1, const Mat& i2)
Scalar mssim = mean( ssim_map ); // mssim = average of ssim map
return mssim;
}
}

View File

@@ -127,4 +127,4 @@ int main()
imwrite("result.png", I); // save the Image
imshow("SVM for Non-Linear Training Data", I); // show it to the user
waitKey(0);
}
}

View File

@@ -1,14 +1,6 @@
/**
* @file objectDetection.cpp
* @author A. Huaman ( based in the classic facedetect.cpp in samples/c )
* @brief A simplified version of facedetect.cpp, show how to load a cascade classifier and how to find objects (Face + eyes) in a video stream
*/
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
@@ -20,79 +12,73 @@ using namespace cv;
void detectAndDisplay( Mat frame );
/** Global variables */
//-- Note, either copy these two files from opencv/data/haarscascades to your current folder, or change these locations
string face_cascade_name = "haarcascade_frontalface_alt.xml";
string eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
String window_name = "Capture - Face detection";
/**
* @function main
*/
/** @function main */
int main( void )
{
CvCapture* capture;
Mat frame;
VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
{
for(;;)
//-- 2. Read the video stream
capture.open( -1 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
frame = cv::cvarrToMat(cvQueryFrame( capture ));
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
int c = waitKey(10);
if( (char)c == 27 ) { break; } // escape
}
}
return 0;
return 0;
}
/**
* @function detectAndDisplay
*/
/** @function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
for( size_t i = 0; i < faces.size(); i++ )
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
for ( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 3, 8, 0 );
}
for ( size_t j = 0; j < eyes.size(); j++ )
{
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
//-- Show what you got
imshow( window_name, frame );
}

View File

@@ -3,12 +3,9 @@
* @author A. Huaman ( based in the classic facedetect.cpp in samples/c )
* @brief A simplified version of facedetect.cpp, show how to load a cascade classifier and how to find objects (Face + eyes) in a video stream - Using LBP here
*/
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
@@ -20,46 +17,43 @@ using namespace cv;
void detectAndDisplay( Mat frame );
/** Global variables */
string face_cascade_name = "lbpcascade_frontalface.xml";
string eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
String face_cascade_name = "lbpcascade_frontalface.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
String window_name = "Capture - Face detection";
/**
* @function main
*/
int main( void )
{
CvCapture* capture;
Mat frame;
VideoCapture capture;
Mat frame;
//-- 1. Load the cascade
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 1. Load the cascade
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
{
for(;;)
//-- 2. Read the video stream
capture.open( -1 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
frame = cv::cvarrToMat(cvQueryFrame( capture ));
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
//-- bail out if escape was pressed
int c = waitKey(10);
if( (char)c == 27 ) { break; }
}
}
return 0;
return 0;
}
/**
@@ -67,37 +61,37 @@ int main( void )
*/
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0, Size(80, 80) );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0, Size(80, 80) );
for( size_t i = 0; i < faces.size(); i++ )
for( size_t i = 0; i < faces.size(); i++ )
{
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
if( eyes.size() == 2)
{
//-- Draw the face
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, Scalar( 255, 0, 0 ), 2, 8, 0 );
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
if( eyes.size() == 2)
{
//-- Draw the face
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 0 ), 2, 8, 0 );
for( size_t j = 0; j < eyes.size(); j++ )
{ //-- Draw the eyes
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 255 ), 3, 8, 0 );
}
}
for( size_t j = 0; j < eyes.size(); j++ )
{ //-- Draw the eyes
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 255 ), 3, 8, 0 );
}
}
}
//-- Show what you got
imshow( window_name, frame );
//-- Show what you got
imshow( window_name, frame );
}

View File

@@ -152,7 +152,7 @@ int main(int ac, char ** av)
Mat train_desc, query_desc;
const int DESIRED_FTRS = 500;
GridAdaptedFeatureDetector detector(new FastFeatureDetector(10, true), DESIRED_FTRS, 4, 4);
GridAdaptedFeatureDetector detector(makePtr<FastFeatureDetector>(10, true), DESIRED_FTRS, 4, 4);
Mat H_prev = Mat::eye(3, 3, CV_32FC1);
for (;;)

View File

@@ -193,7 +193,7 @@ public:
virtual Ptr<ImageMotionEstimatorBase> build()
{
MotionEstimatorRansacL2 *est = new MotionEstimatorRansacL2(motionModel(arg(prefix + "model")));
Ptr<MotionEstimatorRansacL2> est = makePtr<MotionEstimatorRansacL2>(motionModel(arg(prefix + "model")));
RansacParams ransac = est->ransacParams();
if (arg(prefix + "subset") != "auto")
@@ -205,10 +205,10 @@ public:
est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
Ptr<IOutlierRejector> outlierRejector = makePtr<NullOutlierRejector>();
if (arg(prefix + "local-outlier-rejection") == "yes")
{
TranslationBasedLocalOutlierRejector *tblor = new TranslationBasedLocalOutlierRejector();
Ptr<TranslationBasedLocalOutlierRejector> tblor = makePtr<TranslationBasedLocalOutlierRejector>();
RansacParams ransacParams = tblor->ransacParams();
if (arg(prefix + "thresh") != "auto")
ransacParams.thresh = argf(prefix + "thresh");
@@ -219,14 +219,14 @@ public:
#if defined(HAVE_OPENCV_GPUIMGPROC) && defined(HAVE_OPENCV_GPU) && defined(HAVE_OPENCV_GPUOPTFLOW)
if (gpu)
{
KeypointBasedMotionEstimatorGpu *kbest = new KeypointBasedMotionEstimatorGpu(est);
Ptr<KeypointBasedMotionEstimatorGpu> kbest = makePtr<KeypointBasedMotionEstimatorGpu>(est);
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
#endif
KeypointBasedMotionEstimator *kbest = new KeypointBasedMotionEstimator(est);
kbest->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
Ptr<KeypointBasedMotionEstimator> kbest = makePtr<KeypointBasedMotionEstimator>(est);
kbest->setDetector(makePtr<GoodFeaturesToTrackDetector>(argi(prefix + "nkps")));
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
@@ -244,12 +244,12 @@ public:
virtual Ptr<ImageMotionEstimatorBase> build()
{
MotionEstimatorL1 *est = new MotionEstimatorL1(motionModel(arg(prefix + "model")));
Ptr<MotionEstimatorL1> est = makePtr<MotionEstimatorL1>(motionModel(arg(prefix + "model")));
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
Ptr<IOutlierRejector> outlierRejector = makePtr<NullOutlierRejector>();
if (arg(prefix + "local-outlier-rejection") == "yes")
{
TranslationBasedLocalOutlierRejector *tblor = new TranslationBasedLocalOutlierRejector();
Ptr<TranslationBasedLocalOutlierRejector> tblor = makePtr<TranslationBasedLocalOutlierRejector>();
RansacParams ransacParams = tblor->ransacParams();
if (arg(prefix + "thresh") != "auto")
ransacParams.thresh = argf(prefix + "thresh");
@@ -260,14 +260,14 @@ public:
#if defined(HAVE_OPENCV_GPUIMGPROC) && defined(HAVE_OPENCV_GPU) && defined(HAVE_OPENCV_GPUOPTFLOW)
if (gpu)
{
KeypointBasedMotionEstimatorGpu *kbest = new KeypointBasedMotionEstimatorGpu(est);
Ptr<KeypointBasedMotionEstimatorGpu> kbest = makePtr<KeypointBasedMotionEstimatorGpu>(est);
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
#endif
KeypointBasedMotionEstimator *kbest = new KeypointBasedMotionEstimator(est);
kbest->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
Ptr<KeypointBasedMotionEstimator> kbest = makePtr<KeypointBasedMotionEstimator>(est);
kbest->setDetector(makePtr<GoodFeaturesToTrackDetector>(argi(prefix + "nkps")));
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
@@ -363,7 +363,7 @@ int main(int argc, const char **argv)
// get source video parameters
VideoFileSource *source = new VideoFileSource(inputPath);
Ptr<VideoFileSource> source = makePtr<VideoFileSource>(inputPath);
cout << "frame count (rough): " << source->count() << endl;
if (arg("fps") == "auto")
outputFps = source->fps();
@@ -374,15 +374,15 @@ int main(int argc, const char **argv)
Ptr<IMotionEstimatorBuilder> motionEstBuilder;
if (arg("lin-prog-motion-est") == "yes")
motionEstBuilder = new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes");
motionEstBuilder.reset(new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes"));
else
motionEstBuilder = new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes");
motionEstBuilder.reset(new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes"));
Ptr<IMotionEstimatorBuilder> wsMotionEstBuilder;
if (arg("ws-lp") == "yes")
wsMotionEstBuilder = new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes", "ws-");
wsMotionEstBuilder.reset(new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes", "ws-"));
else
wsMotionEstBuilder = new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes", "ws-");
wsMotionEstBuilder.reset(new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes", "ws-"));
// determine whether we must use one pass or two pass stabilizer
bool isTwoPass =
@@ -400,7 +400,7 @@ int main(int argc, const char **argv)
if (arg("lin-prog-stab") == "yes")
{
LpMotionStabilizer *stab = new LpMotionStabilizer();
Ptr<LpMotionStabilizer> stab = makePtr<LpMotionStabilizer>();
stab->setFrameSize(Size(source->width(), source->height()));
stab->setTrimRatio(arg("lps-trim-ratio") == "auto" ? argf("trim-ratio") : argf("lps-trim-ratio"));
stab->setWeight1(argf("lps-w1"));
@@ -410,18 +410,18 @@ int main(int argc, const char **argv)
twoPassStabilizer->setMotionStabilizer(stab);
}
else if (arg("stdev") == "auto")
twoPassStabilizer->setMotionStabilizer(new GaussianMotionFilter(argi("radius")));
twoPassStabilizer->setMotionStabilizer(makePtr<GaussianMotionFilter>(argi("radius")));
else
twoPassStabilizer->setMotionStabilizer(new GaussianMotionFilter(argi("radius"), argf("stdev")));
twoPassStabilizer->setMotionStabilizer(makePtr<GaussianMotionFilter>(argi("radius"), argf("stdev")));
// init wobble suppressor if necessary
if (arg("wobble-suppress") == "yes")
{
MoreAccurateMotionWobbleSuppressorBase *ws = new MoreAccurateMotionWobbleSuppressor();
Ptr<MoreAccurateMotionWobbleSuppressorBase> ws = makePtr<MoreAccurateMotionWobbleSuppressor>();
if (arg("gpu") == "yes")
#ifdef HAVE_OPENCV_GPU
ws = new MoreAccurateMotionWobbleSuppressorGpu();
ws = makePtr<MoreAccurateMotionWobbleSuppressorGpu>();
#else
throw runtime_error("OpenCV is built without GPU support");
#endif
@@ -433,12 +433,12 @@ int main(int argc, const char **argv)
MotionModel model = ws->motionEstimator()->motionModel();
if (arg("load-motions2") != "no")
{
ws->setMotionEstimator(new FromFileMotionReader(arg("load-motions2")));
ws->setMotionEstimator(makePtr<FromFileMotionReader>(arg("load-motions2")));
ws->motionEstimator()->setMotionModel(model);
}
if (arg("save-motions2") != "no")
{
ws->setMotionEstimator(new ToFileMotionWriter(arg("save-motions2"), ws->motionEstimator()));
ws->setMotionEstimator(makePtr<ToFileMotionWriter>(arg("save-motions2"), ws->motionEstimator()));
ws->motionEstimator()->setMotionModel(model);
}
}
@@ -450,26 +450,26 @@ int main(int argc, const char **argv)
OnePassStabilizer *onePassStabilizer = new OnePassStabilizer();
stabilizer = onePassStabilizer;
if (arg("stdev") == "auto")
onePassStabilizer->setMotionFilter(new GaussianMotionFilter(argi("radius")));
onePassStabilizer->setMotionFilter(makePtr<GaussianMotionFilter>(argi("radius")));
else
onePassStabilizer->setMotionFilter(new GaussianMotionFilter(argi("radius"), argf("stdev")));
onePassStabilizer->setMotionFilter(makePtr<GaussianMotionFilter>(argi("radius"), argf("stdev")));
}
stabilizer->setFrameSource(source);
stabilizer->setMotionEstimator(motionEstBuilder->build());
// cast stabilizer to simple frame source interface to read stabilized frames
stabilizedFrames = dynamic_cast<IFrameSource*>(stabilizer);
stabilizedFrames.reset(dynamic_cast<IFrameSource*>(stabilizer));
MotionModel model = stabilizer->motionEstimator()->motionModel();
if (arg("load-motions") != "no")
{
stabilizer->setMotionEstimator(new FromFileMotionReader(arg("load-motions")));
stabilizer->setMotionEstimator(makePtr<FromFileMotionReader>(arg("load-motions")));
stabilizer->motionEstimator()->setMotionModel(model);
}
if (arg("save-motions") != "no")
{
stabilizer->setMotionEstimator(new ToFileMotionWriter(arg("save-motions"), stabilizer->motionEstimator()));
stabilizer->setMotionEstimator(makePtr<ToFileMotionWriter>(arg("save-motions"), stabilizer->motionEstimator()));
stabilizer->motionEstimator()->setMotionModel(model);
}
@@ -478,7 +478,7 @@ int main(int argc, const char **argv)
// init deblurer
if (arg("deblur") == "yes")
{
WeightingDeblurer *deblurer = new WeightingDeblurer();
Ptr<WeightingDeblurer> deblurer = makePtr<WeightingDeblurer>();
deblurer->setRadius(argi("radius"));
deblurer->setSensitivity(argf("deblur-sens"));
stabilizer->setDeblurer(deblurer);
@@ -503,22 +503,22 @@ int main(int argc, const char **argv)
Ptr<InpainterBase> inpainters_(inpainters);
if (arg("mosaic") == "yes")
{
ConsistentMosaicInpainter *inp = new ConsistentMosaicInpainter();
Ptr<ConsistentMosaicInpainter> inp = makePtr<ConsistentMosaicInpainter>();
inp->setStdevThresh(argf("mosaic-stdev"));
inpainters->pushBack(inp);
}
if (arg("motion-inpaint") == "yes")
{
MotionInpainter *inp = new MotionInpainter();
Ptr<MotionInpainter> inp = makePtr<MotionInpainter>();
inp->setDistThreshold(argf("mi-dist-thresh"));
inpainters->pushBack(inp);
}
if (arg("color-inpaint") == "average")
inpainters->pushBack(new ColorAverageInpainter());
inpainters->pushBack(makePtr<ColorAverageInpainter>());
else if (arg("color-inpaint") == "ns")
inpainters->pushBack(new ColorInpainter(INPAINT_NS, argd("ci-radius")));
inpainters->pushBack(makePtr<ColorInpainter>(int(INPAINT_NS), argd("ci-radius")));
else if (arg("color-inpaint") == "telea")
inpainters->pushBack(new ColorInpainter(INPAINT_TELEA, argd("ci-radius")));
inpainters->pushBack(makePtr<ColorInpainter>(int(INPAINT_TELEA), argd("ci-radius")));
else if (arg("color-inpaint") != "no")
throw runtime_error("unknown color inpainting method: " + arg("color-inpaint"));
if (!inpainters->empty())

View File

@@ -5,7 +5,6 @@ SET(OPENCV_GPU_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc ope
opencv_gpuarithm opencv_gpufilters opencv_gpuwarping opencv_gpuimgproc
opencv_gpufeatures2d opencv_gpuoptflow opencv_gpubgsegm
opencv_gpustereo opencv_gpulegacy)
ocv_check_dependencies(${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})
if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
@@ -32,6 +31,10 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
ocv_include_directories(${CUDA_INCLUDE_DIRS})
endif()
if(HAVE_OPENCL)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/ocl/include")
endif()
if(CMAKE_COMPILER_IS_GNUCXX AND NOT ENABLE_NOISY_WARNINGS)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function")
endif()
@@ -44,6 +47,11 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
add_executable(${the_target} ${srcs})
target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})
if(HAVE_CUDA)
target_link_libraries(${the_target} ${CUDA_CUDA_LIBRARY})
endif()
if(HAVE_opencv_nonfree)
target_link_libraries(${the_target} opencv_nonfree)
endif()
@@ -51,6 +59,10 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
target_link_libraries(${the_target} opencv_gpucodec)
endif()
if(HAVE_OPENCL)
target_link_libraries(${the_target} opencv_ocl)
endif()
set_target_properties(${the_target} PROPERTIES
OUTPUT_NAME "${project}-example-${name}"
PROJECT_LABEL "(EXAMPLE_${project_upper}) ${name}")
@@ -84,4 +96,3 @@ if (INSTALL_C_EXAMPLES AND NOT WIN32)
DESTINATION share/OpenCV/samples/${project}
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ)
endif()

View File

@@ -18,10 +18,10 @@ using namespace cv::gpu;
enum Method
{
FGD_STAT,
MOG,
MOG2,
GMG
GMG,
FGD_STAT
};
int main(int argc, const char** argv)
@@ -29,7 +29,7 @@ int main(int argc, const char** argv)
cv::CommandLineParser cmd(argc, argv,
"{ c camera | | use camera }"
"{ f file | 768x576.avi | input video file }"
"{ m method | mog | method (fgd, mog, mog2, gmg) }"
"{ m method | mog | method (mog, mog2, gmg, fgd) }"
"{ h help | | print help message }");
if (cmd.has("help") || !cmd.check())
@@ -43,18 +43,18 @@ int main(int argc, const char** argv)
string file = cmd.get<string>("file");
string method = cmd.get<string>("method");
if (method != "fgd"
&& method != "mog"
if (method != "mog"
&& method != "mog2"
&& method != "gmg")
&& method != "gmg"
&& method != "fgd")
{
cerr << "Incorrect method" << endl;
return -1;
}
Method m = method == "fgd" ? FGD_STAT :
method == "mog" ? MOG :
Method m = method == "mog" ? MOG :
method == "mog2" ? MOG2 :
method == "fgd" ? FGD_STAT :
GMG;
VideoCapture cap;
@@ -75,11 +75,10 @@ int main(int argc, const char** argv)
GpuMat d_frame(frame);
FGDStatModel fgd_stat;
MOG_GPU mog;
MOG2_GPU mog2;
GMG_GPU gmg;
gmg.numInitializationFrames = 40;
Ptr<BackgroundSubtractor> mog = gpu::createBackgroundSubtractorMOG();
Ptr<BackgroundSubtractor> mog2 = gpu::createBackgroundSubtractorMOG2();
Ptr<BackgroundSubtractor> gmg = gpu::createBackgroundSubtractorGMG(40);
Ptr<BackgroundSubtractor> fgd = gpu::createBackgroundSubtractorFGD();
GpuMat d_fgmask;
GpuMat d_fgimg;
@@ -91,20 +90,20 @@ int main(int argc, const char** argv)
switch (m)
{
case FGD_STAT:
fgd_stat.create(d_frame);
break;
case MOG:
mog(d_frame, d_fgmask, 0.01f);
mog->apply(d_frame, d_fgmask, 0.01);
break;
case MOG2:
mog2(d_frame, d_fgmask);
mog2->apply(d_frame, d_fgmask);
break;
case GMG:
gmg.initialize(d_frame.size());
gmg->apply(d_frame, d_fgmask);
break;
case FGD_STAT:
fgd->apply(d_frame, d_fgmask);
break;
}
@@ -128,24 +127,23 @@ int main(int argc, const char** argv)
//update the model
switch (m)
{
case FGD_STAT:
fgd_stat.update(d_frame);
d_fgmask = fgd_stat.foreground;
d_bgimg = fgd_stat.background;
break;
case MOG:
mog(d_frame, d_fgmask, 0.01f);
mog.getBackgroundImage(d_bgimg);
mog->apply(d_frame, d_fgmask, 0.01);
mog->getBackgroundImage(d_bgimg);
break;
case MOG2:
mog2(d_frame, d_fgmask);
mog2.getBackgroundImage(d_bgimg);
mog2->apply(d_frame, d_fgmask);
mog2->getBackgroundImage(d_bgimg);
break;
case GMG:
gmg(d_frame, d_fgmask);
gmg->apply(d_frame, d_fgmask);
break;
case FGD_STAT:
fgd->apply(d_frame, d_fgmask);
fgd->getBackgroundImage(d_bgimg);
break;
}

View File

@@ -85,8 +85,8 @@ int main(int argc, const char* argv[])
Mat frame0Gray, frame1Gray;
cvtColor(frame0Color, frame0Gray, COLOR_BGR2GRAY);
cvtColor(frame1Color, frame1Gray, COLOR_BGR2GRAY);
cv::cvtColor(frame0Color, frame0Gray, COLOR_BGR2GRAY);
cv::cvtColor(frame1Color, frame1Gray, COLOR_BGR2GRAY);
GpuMat d_frame0(frame0Gray);
GpuMat d_frame1(frame1Gray);

View File

@@ -24,12 +24,11 @@ static void help()
}
template<class T>
void convertAndResize(const T& src, T& gray, T& resized, double scale)
static void convertAndResize(const Mat& src, Mat& gray, Mat& resized, double scale)
{
if (src.channels() == 3)
{
cvtColor( src, gray, COLOR_BGR2GRAY );
cv::cvtColor( src, gray, COLOR_BGR2GRAY );
}
else
{
@@ -40,7 +39,30 @@ void convertAndResize(const T& src, T& gray, T& resized, double scale)
if (scale != 1)
{
resize(gray, resized, sz);
cv::resize(gray, resized, sz);
}
else
{
resized = gray;
}
}
static void convertAndResize(const GpuMat& src, GpuMat& gray, GpuMat& resized, double scale)
{
if (src.channels() == 3)
{
cv::gpu::cvtColor( src, gray, COLOR_BGR2GRAY );
}
else
{
gray = src;
}
Size sz(cvRound(gray.cols * scale), cvRound(gray.rows * scale));
if (scale != 1)
{
cv::gpu::resize(gray, resized, sz);
}
else
{
@@ -272,7 +294,7 @@ int main(int argc, const char *argv[])
}
cout << endl;
cvtColor(resized_cpu, frameDisp, COLOR_GRAY2BGR);
cv::cvtColor(resized_cpu, frameDisp, COLOR_GRAY2BGR);
displayState(frameDisp, helpScreen, useGPU, findLargestObject, filterRects, fps);
imshow("result", frameDisp);

View File

@@ -86,8 +86,8 @@ int main()
if (!dev_info.isCompatible())
{
std::cout << "GPU module isn't built for GPU #" << i << " ("
<< dev_info.name() << ", CC " << dev_info.major()
<< dev_info.minor() << "\n";
<< dev_info.name() << ", CC " << dev_info.majorVersion()
<< dev_info.minorVersion() << "\n";
return -1;
}
}

View File

@@ -116,8 +116,8 @@ int main(int argc, char** argv)
if (!dev_info.isCompatible())
{
std::cout << "GPU module isn't built for GPU #" << i << " ("
<< dev_info.name() << ", CC " << dev_info.major()
<< dev_info.minor() << "\n";
<< dev_info.name() << ", CC " << dev_info.majorVersion()
<< dev_info.minorVersion() << "\n";
return -1;
}
}

Some files were not shown because too many files have changed in this diff Show More