merged 2.4 into trunk

This commit is contained in:
Vadim Pisarevsky
2012-04-30 14:33:52 +00:00
parent 3f1c6d7357
commit d5a0088bbe
194 changed files with 10158 additions and 8225 deletions

View File

@@ -117,7 +117,10 @@ public class puzzle15View extends SampleCvViewBase implements OnTouchListener {
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
int cols = mRgba.cols();
int rows = mRgba.rows();
rows = rows - rows%4;
cols = cols - cols%4;
if (mCells == null)
createPuzzle(cols, rows);

View File

@@ -7,6 +7,8 @@ add_custom_target(opencv_android_examples)
add_subdirectory(15-puzzle)
add_subdirectory(face-detection)
add_subdirectory(image-manipulations)
add_subdirectory(color-blob-detection)
add_subdirectory(tutorial-0-androidcamera)
add_subdirectory(tutorial-1-addopencv)
add_subdirectory(tutorial-2-opencvcamera)

View File

@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" path="src"/>
<classpathentry kind="src" path="gen"/>
<classpathentry kind="con" path="com.android.ide.eclipse.adt.ANDROID_FRAMEWORK"/>
<classpathentry kind="con" path="com.android.ide.eclipse.adt.LIBRARIES"/>
<classpathentry kind="output" path="bin/classes"/>
</classpath>

View File

@@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>ColorBlobDetection</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ApkBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>com.android.ide.eclipse.adt.AndroidNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

View File

@@ -0,0 +1,27 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.opencv.example.colorblobdetect"
android:versionCode="1"
android:versionName="1.0" >
<uses-sdk android:minSdkVersion="8" />
<application
android:icon="@drawable/ic_launcher"
android:label="@string/app_name" >
<activity
android:name="org.opencv.samples.colorblobdetect.ColorBlobDetectionActivity"
android:label="@string/app_name" >
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
<uses-permission android:name="android.permission.CAMERA"/>
<uses-feature android:name="android.hardware.camera" />
<uses-feature android:name="android.hardware.camera.autofocus" />
</manifest>

View File

@@ -0,0 +1,7 @@
set(sample example-color-blob-detection)
add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 11 ${ANDROID_SDK_TARGET})
if(TARGET ${sample})
add_dependencies(opencv_android_examples ${sample})
endif()

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@@ -0,0 +1,12 @@
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:orientation="vertical" >
<TextView
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:text="@string/hello" />
</LinearLayout>

View File

@@ -0,0 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="hello">Hello World, ColorBlobDetectionActivity!</string>
<string name="app_name">ColorBlobDetection</string>
</resources>

View File

@@ -0,0 +1,27 @@
package org.opencv.samples.colorblobdetect;
import android.app.Activity;
import android.os.Bundle;
import android.util.Log;
import android.view.Window;
public class ColorBlobDetectionActivity extends Activity {
private static final String TAG = "Example/CollorBlobDetection";
private ColorBlobDetectionView mView;
public ColorBlobDetectionActivity()
{
Log.i(TAG, "Instantiated new " + this.getClass());
}
/** Called when the activity is first created. */
@Override
public void onCreate(Bundle savedInstanceState) {
Log.i(TAG, "onCreate");
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);
mView = new ColorBlobDetectionView(this);
setContentView(mView);
}
}

View File

@@ -0,0 +1,87 @@
package org.opencv.samples.colorblobdetect;
import org.opencv.android.Utils;
import org.opencv.core.Mat;
import org.opencv.highgui.Highgui;
import org.opencv.highgui.VideoCapture;
import android.content.Context;
import android.graphics.Bitmap;
import android.util.Log;
import android.view.MotionEvent;
import android.view.SurfaceHolder;
import android.view.View;
import android.view.View.OnTouchListener;
public class ColorBlobDetectionView extends SampleCvViewBase implements
OnTouchListener {
private Mat mRgba;
private static final String TAG = "Example/CollorBlobDetection";
public ColorBlobDetectionView(Context context)
{
super(context);
setOnTouchListener(this);
}
@Override
public void surfaceChanged(SurfaceHolder _holder, int format, int width, int height) {
super.surfaceChanged(_holder, format, width, height);
synchronized (this) {
// initialize Mat before usage
mRgba = new Mat();
}
}
@Override
public boolean onTouch(View v, MotionEvent event)
{
// TODO Auto-generated method stub
int cols = mRgba.cols();
int rows = mRgba.rows();
int xoffset = (getWidth() - cols) / 2;
int yoffset = (getHeight() - rows) / 2;
int x = (int)event.getX() - xoffset;
int y = (int)event.getY() - yoffset;
double TouchedColor[] = mRgba.get(x,y);
Log.i(TAG, "Touch coordinates: (" + x + ", " + y + ")");
Log.i(TAG, "Touched rgba color: (" + TouchedColor[0] + ", " + TouchedColor[1] +
", " + TouchedColor[2] + ", " + TouchedColor[0] + ",)");
return false; // don't need subsequent touch events
}
@Override
protected Bitmap processFrame(VideoCapture capture) {
// TODO Auto-generated method stub
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
Bitmap bmp = Bitmap.createBitmap(mRgba.cols(), mRgba.rows(), Bitmap.Config.ARGB_8888);
try {
Utils.matToBitmap(mRgba, bmp);
} catch(Exception e) {
Log.e(TAG, "Utils.matToBitmap() throws an exception: " + e.getMessage());
bmp.recycle();
bmp = null;
}
return bmp;
}
@Override
public void run() {
super.run();
synchronized (this) {
// Explicitly deallocate Mats
if (mRgba != null)
mRgba.release();
mRgba = null;
}
}
}

View File

@@ -0,0 +1,110 @@
package org.opencv.samples.colorblobdetect;
import java.util.List;
import org.opencv.core.Size;
import org.opencv.highgui.VideoCapture;
import org.opencv.highgui.Highgui;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.util.Log;
import android.view.SurfaceHolder;
import android.view.SurfaceView;
public abstract class SampleCvViewBase extends SurfaceView implements SurfaceHolder.Callback, Runnable {
private static final String TAG = "Sample::SurfaceView";
private SurfaceHolder mHolder;
private VideoCapture mCamera;
public SampleCvViewBase(Context context) {
super(context);
mHolder = getHolder();
mHolder.addCallback(this);
Log.i(TAG, "Instantiated new " + this.getClass());
}
public void surfaceChanged(SurfaceHolder _holder, int format, int width, int height) {
Log.i(TAG, "surfaceCreated");
synchronized (this) {
if (mCamera != null && mCamera.isOpened()) {
Log.i(TAG, "before mCamera.getSupportedPreviewSizes()");
List<Size> sizes = mCamera.getSupportedPreviewSizes();
Log.i(TAG, "after mCamera.getSupportedPreviewSizes()");
int mFrameWidth = width;
int mFrameHeight = height;
// selecting optimal camera preview size
{
double minDiff = Double.MAX_VALUE;
for (Size size : sizes) {
if (Math.abs(size.height - height) < minDiff) {
mFrameWidth = (int) size.width;
mFrameHeight = (int) size.height;
minDiff = Math.abs(size.height - height);
}
}
}
mCamera.set(Highgui.CV_CAP_PROP_FRAME_WIDTH, mFrameWidth);
mCamera.set(Highgui.CV_CAP_PROP_FRAME_HEIGHT, mFrameHeight);
}
}
}
public void surfaceCreated(SurfaceHolder holder) {
Log.i(TAG, "surfaceCreated");
mCamera = new VideoCapture(Highgui.CV_CAP_ANDROID);
if (mCamera.isOpened()) {
(new Thread(this)).start();
} else {
mCamera.release();
mCamera = null;
Log.e(TAG, "Failed to open native camera");
}
}
public void surfaceDestroyed(SurfaceHolder holder) {
Log.i(TAG, "surfaceDestroyed");
if (mCamera != null) {
synchronized (this) {
mCamera.release();
mCamera = null;
}
}
}
protected abstract Bitmap processFrame(VideoCapture capture);
public void run() {
Log.i(TAG, "Starting processing thread");
while (true) {
Bitmap bmp = null;
synchronized (this) {
if (mCamera == null)
break;
if (!mCamera.grab()) {
Log.e(TAG, "mCamera.grab() failed");
break;
}
bmp = processFrame(mCamera);
}
if (bmp != null) {
Canvas canvas = mHolder.lockCanvas();
if (canvas != null) {
canvas.drawBitmap(bmp, (canvas.getWidth() - bmp.getWidth()) / 2, (canvas.getHeight() - bmp.getHeight()) / 2, null);
mHolder.unlockCanvasAndPost(canvas);
}
bmp.recycle();
}
}
Log.i(TAG, "Finishing processing thread");
}
}

View File

@@ -8,23 +8,27 @@ import android.view.MenuItem;
import android.view.Window;
public class ImageManipulationsActivity extends Activity {
private static final String TAG = "Sample::Activity";
private static final String TAG = "Sample::Activity";
public static final int VIEW_MODE_RGBA = 0;
public static final int VIEW_MODE_CANNY = 1;
public static final int VIEW_MODE_SEPIA = 2;
public static final int VIEW_MODE_SOBEL = 3;
public static final int VIEW_MODE_BLUR = 4;
public static final int VIEW_MODE_HIST = 1;
public static final int VIEW_MODE_CANNY = 2;
public static final int VIEW_MODE_SEPIA = 3;
public static final int VIEW_MODE_SOBEL = 4;
public static final int VIEW_MODE_ZOOM = 5;
public static final int VIEW_MODE_PIXELIZE = 6;
public static final int VIEW_MODE_POSTERIZE = 7;
private MenuItem mItemPreviewRGBA;
private MenuItem mItemPreviewHist;
private MenuItem mItemPreviewCanny;
private MenuItem mItemPreviewSepia;
private MenuItem mItemPreviewSobel;
private MenuItem mItemPreviewBlur;
private MenuItem mItemPreviewZoom;
private MenuItem mItemPreviewPixelize;
private MenuItem mItemPreviewPosterize;
public static int viewMode = VIEW_MODE_RGBA;
public static int viewMode = VIEW_MODE_RGBA;
public ImageManipulationsActivity() {
Log.i(TAG, "Instantiated new " + this.getClass());
@@ -42,12 +46,14 @@ public class ImageManipulationsActivity extends Activity {
@Override
public boolean onCreateOptionsMenu(Menu menu) {
Log.i(TAG, "onCreateOptionsMenu");
mItemPreviewRGBA = menu.add("Preview RGBA");
mItemPreviewRGBA = menu.add("Preview RGBA");
mItemPreviewHist = menu.add("Histograms");
mItemPreviewCanny = menu.add("Canny");
mItemPreviewSepia = menu.add("Sepia");
mItemPreviewSobel = menu.add("Sobel");
mItemPreviewBlur = menu.add("Blur");
mItemPreviewZoom = menu.add("Zoom");
mItemPreviewZoom = menu.add("Zoom");
mItemPreviewPixelize = menu.add("Pixelize");
mItemPreviewPosterize = menu.add("Posterize");
return true;
}
@@ -56,16 +62,20 @@ public class ImageManipulationsActivity extends Activity {
Log.i(TAG, "Menu Item selected " + item);
if (item == mItemPreviewRGBA)
viewMode = VIEW_MODE_RGBA;
if (item == mItemPreviewHist)
viewMode = VIEW_MODE_HIST;
else if (item == mItemPreviewCanny)
viewMode = VIEW_MODE_CANNY;
else if (item == mItemPreviewSepia)
viewMode = VIEW_MODE_SEPIA;
else if (item == mItemPreviewSobel)
viewMode = VIEW_MODE_SOBEL;
else if (item == mItemPreviewBlur)
viewMode = VIEW_MODE_BLUR;
else if (item == mItemPreviewZoom)
viewMode = VIEW_MODE_ZOOM;
else if (item == mItemPreviewPixelize)
viewMode = VIEW_MODE_PIXELIZE;
else if (item == mItemPreviewPosterize)
viewMode = VIEW_MODE_POSTERIZE;
return true;
}
}

View File

@@ -1,8 +1,12 @@
package org.opencv.samples.imagemanipulations;
import java.util.Arrays;
import org.opencv.android.Utils;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.Size;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
@@ -17,9 +21,20 @@ import android.util.Log;
import android.view.SurfaceHolder;
class ImageManipulationsView extends SampleCvViewBase {
private Size mSize0;
private Size mSizeRgba;
private Size mSizeRgbaInner;
private Mat mRgba;
private Mat mGray;
private Mat mIntermediateMat;
private Mat mHist, mMat0;
private MatOfInt mChannels[], mHistSize;
private int mHistSizeNum;
private MatOfFloat mRanges;
private Scalar mColorsRGB[], mColorsHue[], mWhilte;
private Point mP1, mP2;
float mBuff[];
private Mat mRgbaInnerWindow;
private Mat mGrayInnerWindow;
@@ -48,6 +63,25 @@ class ImageManipulationsView extends SampleCvViewBase {
mGray = new Mat();
mRgba = new Mat();
mIntermediateMat = new Mat();
mSize0 = new Size();
mHist = new Mat();
mChannels = new MatOfInt[] { new MatOfInt(0), new MatOfInt(1), new MatOfInt(2) };
mHistSizeNum = 25;
mBuff = new float[mHistSizeNum];
mHistSize = new MatOfInt(mHistSizeNum);
mRanges = new MatOfFloat(0f, 256f);
mMat0 = new Mat();
mColorsRGB = new Scalar[] { new Scalar(200, 0, 0, 255), new Scalar(0, 200, 0, 255), new Scalar(0, 0, 200, 255) };
mColorsHue = new Scalar[] {
new Scalar(255, 0, 0, 255), new Scalar(255, 60, 0, 255), new Scalar(255, 120, 0, 255), new Scalar(255, 180, 0, 255), new Scalar(255, 240, 0, 255),
new Scalar(215, 213, 0, 255), new Scalar(150, 255, 0, 255), new Scalar(85, 255, 0, 255), new Scalar(20, 255, 0, 255), new Scalar(0, 255, 30, 255),
new Scalar(0, 255, 85, 255), new Scalar(0, 255, 150, 255), new Scalar(0, 255, 215, 255), new Scalar(0, 234, 255, 255), new Scalar(0, 170, 255, 255),
new Scalar(0, 120, 255, 255), new Scalar(0, 60, 255, 255), new Scalar(0, 0, 255, 255), new Scalar(64, 0, 255, 255), new Scalar(120, 0, 255, 255),
new Scalar(180, 0, 255, 255), new Scalar(255, 0, 255, 255), new Scalar(255, 0, 215, 255), new Scalar(255, 0, 85, 255), new Scalar(255, 0, 0, 255)
};
mWhilte = Scalar.all(255);
mP1 = new Point();
mP2 = new Point();
}
}
@@ -55,8 +89,10 @@ class ImageManipulationsView extends SampleCvViewBase {
if (mRgba.empty())
return;
int rows = mRgba.rows();
int cols = mRgba.cols();
mSizeRgba = mRgba.size();
int rows = (int) mSizeRgba.height;
int cols = (int) mSizeRgba.width;
int left = cols / 8;
int top = rows / 8;
@@ -66,6 +102,7 @@ class ImageManipulationsView extends SampleCvViewBase {
if (mRgbaInnerWindow == null)
mRgbaInnerWindow = mRgba.submat(top, top + height, left, left + width);
mSizeRgbaInner = mRgbaInnerWindow.size();
if (mGrayInnerWindow == null && !mGray.empty())
mGrayInnerWindow = mGray.submat(top, top + height, left, left + width);
@@ -88,15 +125,56 @@ class ImageManipulationsView extends SampleCvViewBase {
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
break;
case ImageManipulationsActivity.VIEW_MODE_HIST:
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
if (mSizeRgba == null)
CreateAuxiliaryMats();
int thikness = (int) (mSizeRgba.width / (mHistSizeNum + 10) / 5);
if(thikness > 5) thikness = 5;
int offset = (int) ((mSizeRgba.width - (5*mHistSizeNum + 4*10)*thikness)/2);
// RGB
for(int c=0; c<3; c++) {
Imgproc.calcHist(Arrays.asList(mRgba), mChannels[c], mMat0, mHist, mHistSize, mRanges);
Core.normalize(mHist, mHist, mSizeRgba.height/2, 0, Core.NORM_INF);
mHist.get(0, 0, mBuff);
for(int h=0; h<mHistSizeNum; h++) {
mP1.x = mP2.x = offset + (c * (mHistSizeNum + 10) + h) * thikness;
mP1.y = mSizeRgba.height-1;
mP2.y = mP1.y - 2 - (int)mBuff[h];
Core.line(mRgba, mP1, mP2, mColorsRGB[c], thikness);
}
}
// Value and Hue
Imgproc.cvtColor(mRgba, mIntermediateMat, Imgproc.COLOR_RGB2HSV_FULL);
// Value
Imgproc.calcHist(Arrays.asList(mIntermediateMat), mChannels[2], mMat0, mHist, mHistSize, mRanges);
Core.normalize(mHist, mHist, mSizeRgba.height/2, 0, Core.NORM_INF);
mHist.get(0, 0, mBuff);
for(int h=0; h<mHistSizeNum; h++) {
mP1.x = mP2.x = offset + (3 * (mHistSizeNum + 10) + h) * thikness;
mP1.y = mSizeRgba.height-1;
mP2.y = mP1.y - 2 - (int)mBuff[h];
Core.line(mRgba, mP1, mP2, mWhilte, thikness);
}
// Hue
Imgproc.calcHist(Arrays.asList(mIntermediateMat), mChannels[0], mMat0, mHist, mHistSize, mRanges);
Core.normalize(mHist, mHist, mSizeRgba.height/2, 0, Core.NORM_INF);
mHist.get(0, 0, mBuff);
for(int h=0; h<mHistSizeNum; h++) {
mP1.x = mP2.x = offset + (4 * (mHistSizeNum + 10) + h) * thikness;
mP1.y = mSizeRgba.height-1;
mP2.y = mP1.y - 2 - (int)mBuff[h];
Core.line(mRgba, mP1, mP2, mColorsHue[h], thikness);
}
break;
case ImageManipulationsActivity.VIEW_MODE_CANNY:
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
capture.retrieve(mGray, Highgui.CV_CAP_ANDROID_GREY_FRAME);
if (mRgbaInnerWindow == null || mGrayInnerWindow == null)
CreateAuxiliaryMats();
Imgproc.Canny(mGrayInnerWindow, mGrayInnerWindow, 80, 90);
Imgproc.cvtColor(mGrayInnerWindow, mRgbaInnerWindow, Imgproc.COLOR_GRAY2BGRA, 4);
Imgproc.Canny(mRgbaInnerWindow, mIntermediateMat, 80, 90);
Imgproc.cvtColor(mIntermediateMat, mRgbaInnerWindow, Imgproc.COLOR_GRAY2BGRA, 4);
break;
case ImageManipulationsActivity.VIEW_MODE_SOBEL:
@@ -116,13 +194,6 @@ class ImageManipulationsView extends SampleCvViewBase {
Core.transform(mRgba, mRgba, mSepiaKernel);
break;
case ImageManipulationsActivity.VIEW_MODE_BLUR:
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
if (mBlurWindow == null)
CreateAuxiliaryMats();
Imgproc.blur(mBlurWindow, mBlurWindow, new Size(15, 15));
break;
case ImageManipulationsActivity.VIEW_MODE_ZOOM:
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
if (mZoomCorner == null || mZoomWindow == null)
@@ -132,6 +203,29 @@ class ImageManipulationsView extends SampleCvViewBase {
Size wsize = mZoomWindow.size();
Core.rectangle(mZoomWindow, new Point(1, 1), new Point(wsize.width - 2, wsize.height - 2), new Scalar(255, 0, 0, 255), 2);
break;
case ImageManipulationsActivity.VIEW_MODE_PIXELIZE:
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
if (mRgbaInnerWindow == null)
CreateAuxiliaryMats();
Imgproc.resize(mRgbaInnerWindow, mIntermediateMat, mSize0, 0.1, 0.1, Imgproc.INTER_NEAREST);
Imgproc.resize(mIntermediateMat, mRgbaInnerWindow, mSizeRgbaInner, 0., 0., Imgproc.INTER_NEAREST);
break;
case ImageManipulationsActivity.VIEW_MODE_POSTERIZE:
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
if (mRgbaInnerWindow == null)
CreateAuxiliaryMats();
/*
Imgproc.cvtColor(mRgbaInnerWindow, mIntermediateMat, Imgproc.COLOR_RGBA2RGB);
Imgproc.pyrMeanShiftFiltering(mIntermediateMat, mIntermediateMat, 5, 50);
Imgproc.cvtColor(mIntermediateMat, mRgbaInnerWindow, Imgproc.COLOR_RGB2RGBA);
*/
Imgproc.Canny(mRgbaInnerWindow, mIntermediateMat, 80, 90);
mRgbaInnerWindow.setTo(new Scalar(0, 0, 0, 255), mIntermediateMat);
Core.convertScaleAbs(mRgbaInnerWindow, mIntermediateMat, 1./16, 0);
Core.convertScaleAbs(mIntermediateMat, mRgbaInnerWindow, 16, 0);
break;
}
Bitmap bmp = Bitmap.createBitmap(mRgba.cols(), mRgba.rows(), Bitmap.Config.ARGB_8888);

View File

@@ -104,7 +104,7 @@ public abstract class SampleCvViewBase extends SurfaceView implements SurfaceHol
if (bmp != null) {
Canvas canvas = mHolder.lockCanvas();
if (canvas != null) {
canvas.drawBitmap(bmp, (canvas.getWidth() - bmp.getWidth()) / 2, (canvas.getHeight() - bmp.getHeight()) / 2, null);
canvas.drawBitmap(bmp, (canvas.getWidth() - bmp.getWidth()) / 2, (canvas.getHeight() - bmp.getHeight()), null);
mFps.draw(canvas, (canvas.getWidth() - bmp.getWidth()) / 2, 0);
mHolder.unlockCanvasAndPost(canvas);
}

View File

@@ -1,4 +1,5 @@
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/background_segm.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdio.h>
@@ -17,7 +18,7 @@ void help()
const char* keys =
{
"{c |camera |false | use camera or not}"
"{c |camera |true | use camera or not}"
"{fn|file_name|tree.avi | movie file }"
};
@@ -49,7 +50,8 @@ int main(int argc, const char** argv)
namedWindow("foreground image", CV_WINDOW_NORMAL);
namedWindow("mean background image", CV_WINDOW_NORMAL);
BackgroundSubtractorMOG2 bg_model;
BackgroundSubtractorMOG2 bg_model;//(100, 3, 0.3, 5);
Mat img, fgmask, fgimg;
for(;;)
@@ -59,6 +61,8 @@ int main(int argc, const char** argv)
if( img.empty() )
break;
//cvtColor(_img, img, COLOR_BGR2GRAY);
if( fgimg.empty() )
fgimg.create(img.size(), img.type());

View File

@@ -442,16 +442,30 @@ void find_decision_boundary_EM()
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
cv::EM em;
cv::EM::Params params;
params.nclusters = classColors.size();
params.covMatType = cv::EM::COV_MAT_GENERIC;
params.startStep = cv::EM::START_AUTO_STEP;
params.termCrit = cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::COUNT, 10, 0.1);
vector<cv::EM> em_models(classColors.size());
// learn classifier
em.train( trainSamples, Mat(), params, &trainClasses );
CV_Assert((int)trainClasses.total() == trainSamples.rows);
CV_Assert((int)trainClasses.type() == CV_32SC1);
for(size_t modelIndex = 0; modelIndex < em_models.size(); modelIndex++)
{
const int componentCount = 3;
em_models[modelIndex] = EM(componentCount, cv::EM::COV_MAT_DIAGONAL);
Mat modelSamples;
for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
{
if(trainClasses.at<int>(sampleIndex) == (int)modelIndex)
modelSamples.push_back(trainSamples.row(sampleIndex));
}
// learn models
if(!modelSamples.empty())
em_models[modelIndex].train(modelSamples);
}
// classify coordinate plane points using the bayes classifier, i.e.
// y(x) = arg max_i=1_modelsCount likelihoods_i(x)
Mat testSample(1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
{
@@ -460,7 +474,16 @@ void find_decision_boundary_EM()
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
int response = (int)em.predict( testSample );
Mat logLikelihoods(1, em_models.size(), CV_64FC1, Scalar(-DBL_MAX));
for(size_t modelIndex = 0; modelIndex < em_models.size(); modelIndex++)
{
if(em_models[modelIndex].isTrained())
logLikelihoods.at<double>(modelIndex) = em_models[modelIndex].predict(testSample)[0];
}
Point maxLoc;
minMaxLoc(logLikelihoods, 0, 0, 0, &maxLoc);
int response = maxLoc.x;
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
}
}

View File

@@ -87,7 +87,7 @@ int main(int argc, char** argv)
namedWindow("segmented", 1);
BackgroundSubtractorMOG bgsubtractor;
bgsubtractor.noiseSigma = 10;
bgsubtractor.set("noiseSigma", 10);
for(;;)
{

View File

@@ -21,80 +21,81 @@ using namespace std;
//hide the local functions in an anon namespace
namespace
{
void help(char** av)
{
cout << "\nThis program justs gets you started reading images from video\n"
"Usage:\n./" << av[0] << " <video device number>\n" << "q,Q,esc -- quit\n"
<< "space -- save frame\n\n"
<< "\tThis is a starter sample, to get you up and going in a copy pasta fashion\n"
<< "\tThe program captures frames from a camera connected to your computer.\n"
<< "\tTo find the video device number, try ls /dev/video* \n"
<< "\tYou may also pass a video file, like my_vide.avi instead of a device number"
<< "\n"
<< "DATA:\n"
<< "Generate a datamatrix from from http://datamatrix.kaywa.com/ \n"
<< " NOTE: This only handles strings of len 3 or less\n"
<< " Resize the screen to be large enough for your camera to see, and it should find an read it.\n\n"
<< endl;
}
int process(VideoCapture& capture)
{
std::vector<DataMatrixCode> codes;
int n = 0;
char filename[200];
string window_name = "video | q or esc to quit";
cout << "press space to save a picture. q or esc to quit" << endl;
namedWindow(window_name, CV_WINDOW_KEEPRATIO); //resizable window;
Mat frame;
for (;;)
{
capture >> frame;
if (frame.empty())
break;
cv::Mat gray;
cv::cvtColor(frame,gray,CV_RGB2GRAY);
findDataMatrix(gray, codes);
drawDataMatrixCodes(codes, frame);
imshow(window_name, frame);
char key = (char) waitKey(5); //delay N millis, usually long enough to display and capture input
switch (key)
void help(char** av)
{
case 'q':
case 'Q':
case 27: //escape key
return 0;
case ' ': //Save an image
sprintf(filename, "filename%.3d.jpg", n++);
imwrite(filename, frame);
cout << "Saved " << filename << endl;
break;
default:
break;
cout << "\nThis program justs gets you started reading images from video\n"
"Usage:\n./" << av[0] << " <video device number>\n" << "q,Q,esc -- quit\n"
<< "space -- save frame\n\n"
<< "\tThis is a starter sample, to get you up and going in a copy pasta fashion\n"
<< "\tThe program captures frames from a camera connected to your computer.\n"
<< "\tTo find the video device number, try ls /dev/video* \n"
<< "\tYou may also pass a video file, like my_vide.avi instead of a device number"
<< "\n"
<< "DATA:\n"
<< "Generate a datamatrix from from http://datamatrix.kaywa.com/ \n"
<< " NOTE: This only handles strings of len 3 or less\n"
<< " Resize the screen to be large enough for your camera to see, and it should find an read it.\n\n"
<< endl;
}
}
return 0;
}
int process(VideoCapture& capture)
{
int n = 0;
char filename[200];
string window_name = "video | q or esc to quit";
cout << "press space to save a picture. q or esc to quit" << endl;
namedWindow(window_name, CV_WINDOW_KEEPRATIO); //resizable window;
Mat frame;
for (;;)
{
capture >> frame;
if (frame.empty())
break;
cv::Mat gray;
cv::cvtColor(frame,gray,CV_RGB2GRAY);
vector<string> codes;
Mat corners;
findDataMatrix(gray, codes, corners);
drawDataMatrixCodes(frame, codes, corners);
imshow(window_name, frame);
char key = (char) waitKey(5); //delay N millis, usually long enough to display and capture input
switch (key)
{
case 'q':
case 'Q':
case 27: //escape key
return 0;
case ' ': //Save an image
sprintf(filename, "filename%.3d.jpg", n++);
imwrite(filename, frame);
cout << "Saved " << filename << endl;
break;
default:
break;
}
}
return 0;
}
}
int main(int ac, char** av)
{
if (ac != 2)
{
help(av);
return 1;
}
std::string arg = av[1];
VideoCapture capture(arg); //try to open string, this will attempt to open it as a video file
if (!capture.isOpened()) //if this fails, try to open as a video camera, through the use of an integer param
capture.open(atoi(arg.c_str()));
if (!capture.isOpened())
{
cerr << "Failed to open a video device or video file!\n" << endl;
help(av);
return 1;
}
return process(capture);
if (ac != 2)
{
help(av);
return 1;
}
std::string arg = av[1];
VideoCapture capture(arg); //try to open string, this will attempt to open it as a video file
if (!capture.isOpened()) //if this fails, try to open as a video camera, through the use of an integer param
capture.open(atoi(arg.c_str()));
if (!capture.isOpened())
{
cerr << "Failed to open a video device or video file!\n" << endl;
help(av);
return 1;
}
return process(capture);
}

View File

@@ -215,7 +215,6 @@ int main(int argc, const char* argv[])
switch (key)
{
case 27:
return 0;
case 'A':
if (currentFrame > 0)

View File

@@ -0,0 +1,68 @@
help='''
Data matrix detector sample.
Usage:
video_dmtx {<video device number>|<video file name>}
Generate a datamatrix from from http://datamatrix.kaywa.com/ and print it out.
NOTE: This only handles data matrices, generated for text strings of max 3 characters
Resize the screen to be large enough for your camera to see, and it should find an read it.
Keyboard shortcuts:
q or ESC - exit
space - save current image as datamatrix<frame_number>.jpg
'''
import cv2
import numpy as np
import sys
def data_matrix_demo(cap):
window_name = "Data Matrix Detector"
frame_number = 0
need_to_save = False
while 1:
ret, frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
codes, corners, dmtx = cv2.findDataMatrix(gray)
cv2.drawDataMatrixCodes(frame, codes, corners)
cv2.imshow(window_name, frame)
key = cv2.waitKey(30)
c = chr(key & 255)
if c in ['q', 'Q', chr(27)]:
break
if c == ' ':
need_to_save = True
if need_to_save and codes:
filename = ("datamatrix%03d.jpg" % frame_number)
cv2.imwrite(filename, frame)
print "Saved frame to " + filename
need_to_save = False
frame_number += 1
if __name__ == '__main__':
print help
if len(sys.argv) == 1:
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(sys.argv[1])
if not cap.isOpened():
cap = cv2.VideoCapture(int(sys.argv[1]))
if not cap.isOpened():
print 'Cannot initialize video capture'
sys.exit(-1)
data_matrix_demo(cap)