Merged the trunk r8852:8880

This commit is contained in:
Andrey Kamaev 2012-07-02 11:04:43 +00:00
parent 304dac7f00
commit b368f99d03
60 changed files with 507 additions and 255 deletions

View File

@ -89,7 +89,7 @@ if(CMAKE_COMPILER_IS_GNUCXX)
endif()
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES amd64.*|x86_64.*)
if(CMAKE_SYSTEM_PROCESSOR MATCHES amd64.*|x86_64.* OR CMAKE_GENERATOR MATCHES "Visual Studio.*Win64")
set(X86_64 1)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES i686.*|i386.*|x86.*)
set(X86 1)

View File

@ -2608,7 +2608,7 @@ void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCo
if( validPixROI )
{
icvGetRectangles( cameraMatrix, distCoeffs, 0, newCameraMatrix, imgSize, inner, outer );
icvGetRectangles( cameraMatrix, distCoeffs, 0, &matM, imgSize, inner, outer );
cv::Rect r = inner;
r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height);
*validPixROI = r;

View File

@ -1128,6 +1128,8 @@ void cv::computeCorrespondEpilines( InputArray _points, int whichImage,
{
Mat points = _points.getMat(), F = _Fmat.getMat();
int npoints = points.checkVector(2);
if( npoints < 0 )
npoints = points.checkVector(3);
CV_Assert( npoints >= 0 && (points.depth() == CV_32F || points.depth() == CV_32S));
_lines.create(npoints, 1, CV_32FC3, -1, true);

View File

@ -1249,6 +1249,7 @@ public:
~Ptr();
//! copy constructor. Copies the members and calls addref()
Ptr(const Ptr& ptr);
template<typename _Tp2> Ptr(const Ptr<_Tp2>& ptr);
//! copy operator. Calls ptr.addref() and release() before copying the members
Ptr& operator = (const Ptr& ptr);
//! increments the reference counter

View File

@ -51,6 +51,7 @@
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4714 ) //__forceinline is not inlined
#pragma warning( disable: 4127 ) //conditional expression is constant
#pragma warning( disable: 4244 ) //conversion from '__int64' to 'int', possible loss of data
#endif
namespace cv

View File

@ -2642,14 +2642,35 @@ template<typename _Tp> inline Ptr<_Tp>::operator const _Tp*() const { return obj
template<typename _Tp> inline bool Ptr<_Tp>::empty() const { return obj == 0; }
template<typename _Tp> template<typename _Tp2> Ptr<_Tp>::Ptr(const Ptr<_Tp2>& p)
: obj(0), refcount(0)
{
if (p.empty())
return;
_Tp* p_casted = dynamic_cast<_Tp*>(p.obj);
if (!p_casted)
return;
obj = p_casted;
refcount = p.refcount;
addref();
}
template<typename _Tp> template<typename _Tp2> inline Ptr<_Tp2> Ptr<_Tp>::ptr()
{
Ptr<_Tp2> p;
if( !obj )
return p;
_Tp2* obj_casted = dynamic_cast<_Tp2*>(obj);
if (!obj_casted)
return p;
if( refcount )
CV_XADD(refcount, 1);
p.obj = dynamic_cast<_Tp2*>(obj);
p.obj = obj_casted;
p.refcount = refcount;
return p;
}
@ -2659,9 +2680,15 @@ template<typename _Tp> template<typename _Tp2> inline const Ptr<_Tp2> Ptr<_Tp>::
Ptr<_Tp2> p;
if( !obj )
return p;
_Tp2* obj_casted = dynamic_cast<_Tp2*>(obj);
if (!obj_casted)
return p;
if( refcount )
CV_XADD(refcount, 1);
p.obj = dynamic_cast<_Tp2*>(obj);
p.obj = obj_casted;
p.refcount = refcount;
return p;
}

View File

@ -321,7 +321,8 @@ AlgorithmInfo::AlgorithmInfo(const string& _name, Algorithm::Constructor create)
{
data = new AlgorithmInfoData;
data->_name = _name;
alglist().add(_name, create);
if (!alglist().find(_name, create))
alglist().add(_name, create);
}
AlgorithmInfo::~AlgorithmInfo()

View File

@ -917,6 +917,25 @@ bool CV_OperationsTest::operations1()
if (!v10dzero[ii] == 0.0)
throw test_excep();
}
Mat A(1, 32, CV_32F), B;
for( int i = 0; i < A.cols; i++ )
A.at<float>(i) = (float)(i <= 12 ? i : 24 - i);
transpose(A, B);
int minidx[2] = {0, 0}, maxidx[2] = {0, 0};
double minval = 0, maxval = 0;
minMaxIdx(A, &minval, &maxval, minidx, maxidx);
if( !(minidx[0] == 0 && minidx[1] == 31 && maxidx[0] == 0 && maxidx[1] == 12 &&
minval == -7 && maxval == 12))
throw test_excep();
minMaxIdx(B, &minval, &maxval, minidx, maxidx);
if( !(minidx[0] == 31 && minidx[1] == 0 && maxidx[0] == 12 && maxidx[1] == 0 &&
minval == -7 && maxval == 12))
throw test_excep();
}
catch(const test_excep&)
{
@ -946,13 +965,13 @@ bool CV_OperationsTest::TestSVD()
Mat Q(3,3,CV_32FC1);
Mat U,Vt,R,T,W;
Dp.at<float>(0,0)=0.86483884; Dp.at<float>(0,1)= -0.3077251; Dp.at<float>(0,2)=-0.55711365;
Dp.at<float>(1,0)=0.49294353; Dp.at<float>(1,1)=-0.24209651; Dp.at<float>(1,2)=-0.25084701;
Dp.at<float>(2,0)=0; Dp.at<float>(2,1)=0; Dp.at<float>(2,2)=0;
Dp.at<float>(0,0)=0.86483884f; Dp.at<float>(0,1)= -0.3077251f; Dp.at<float>(0,2)=-0.55711365f;
Dp.at<float>(1,0)=0.49294353f; Dp.at<float>(1,1)=-0.24209651f; Dp.at<float>(1,2)=-0.25084701f;
Dp.at<float>(2,0)=0; Dp.at<float>(2,1)=0; Dp.at<float>(2,2)=0;
Dc.at<float>(0,0)=0.75632739; Dc.at<float>(0,1)= -0.38859656; Dc.at<float>(0,2)=-0.36773083;
Dc.at<float>(1,0)=0.9699229; Dc.at<float>(1,1)=-0.49858192; Dc.at<float>(1,2)=-0.47134098;
Dc.at<float>(2,0)=0.10566688; Dc.at<float>(2,1)=-0.060333252; Dc.at<float>(2,2)=-0.045333147;
Dc.at<float>(0,0)=0.75632739f; Dc.at<float>(0,1)= -0.38859656f; Dc.at<float>(0,2)=-0.36773083f;
Dc.at<float>(1,0)=0.9699229f; Dc.at<float>(1,1)=-0.49858192f; Dc.at<float>(1,2)=-0.47134098f;
Dc.at<float>(2,0)=0.10566688f; Dc.at<float>(2,1)=-0.060333252f; Dc.at<float>(2,2)=-0.045333147f;
Q=Dp*Dc.t();
SVD decomp;

View File

@ -134,49 +134,43 @@ static void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, vector<Mat
// Calculate the channels of the opponent color space
{
// (R - G) / sqrt(2)
// (R - G)/sqrt(2), but converted to the destination data type
MatConstIterator_<signed char> rIt = bgrChannels[2].begin<signed char>();
MatConstIterator_<signed char> gIt = bgrChannels[1].begin<signed char>();
MatIterator_<unsigned char> dstIt = opponentChannels[0].begin<unsigned char>();
float factor = 1.f / sqrt(2.f);
for( ; dstIt != opponentChannels[0].end<unsigned char>(); ++rIt, ++gIt, ++dstIt )
{
int value = static_cast<int>( static_cast<float>(static_cast<int>(*gIt)-static_cast<int>(*rIt)) * factor );
if( value < 0 ) value = 0;
if( value > 255 ) value = 255;
(*dstIt) = static_cast<unsigned char>(value);
float value = 0.5f * (static_cast<int>(*gIt) -
static_cast<int>(*rIt) + 255);
(*dstIt) = static_cast<unsigned char>(value + 0.5f);
}
}
{
// (R + G - 2B)/sqrt(6)
// (R + G - 2B)/sqrt(6), but converted to the destination data type
MatConstIterator_<signed char> rIt = bgrChannels[2].begin<signed char>();
MatConstIterator_<signed char> gIt = bgrChannels[1].begin<signed char>();
MatConstIterator_<signed char> bIt = bgrChannels[0].begin<signed char>();
MatIterator_<unsigned char> dstIt = opponentChannels[1].begin<unsigned char>();
float factor = 1.f / sqrt(6.f);
for( ; dstIt != opponentChannels[1].end<unsigned char>(); ++rIt, ++gIt, ++bIt, ++dstIt )
{
int value = static_cast<int>( static_cast<float>(static_cast<int>(*rIt) + static_cast<int>(*gIt) - 2*static_cast<int>(*bIt)) *
factor );
if( value < 0 ) value = 0;
if( value > 255 ) value = 255;
(*dstIt) = static_cast<unsigned char>(value);
float value = 0.25f * (static_cast<int>(*rIt) + static_cast<int>(*gIt) -
2*static_cast<int>(*bIt) + 510);
(*dstIt) = static_cast<unsigned char>(value + 0.5f);
}
}
{
// (R + G + B)/sqrt(3)
// (R + G + B)/sqrt(3), but converted to the destination data type
MatConstIterator_<signed char> rIt = bgrChannels[2].begin<signed char>();
MatConstIterator_<signed char> gIt = bgrChannels[1].begin<signed char>();
MatConstIterator_<signed char> bIt = bgrChannels[0].begin<signed char>();
MatIterator_<unsigned char> dstIt = opponentChannels[2].begin<unsigned char>();
float factor = 1.f / sqrt(3.f);
float factor = 1.f/3.f;
for( ; dstIt != opponentChannels[2].end<unsigned char>(); ++rIt, ++gIt, ++bIt, ++dstIt )
{
int value = static_cast<int>( static_cast<float>(static_cast<int>(*rIt) + static_cast<int>(*gIt) + static_cast<int>(*bIt)) *
factor );
if( value < 0 ) value = 0;
if( value > 255 ) value = 255;
(*dstIt) = static_cast<unsigned char>(value);
float value = factor * (static_cast<int>(*rIt) +
static_cast<int>(*gIt) +
static_cast<int>(*bIt));
(*dstIt) = static_cast<unsigned char>(value + 0.5f);
}
}
}

View File

@ -274,14 +274,14 @@ void FREAK::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat
// allocate descriptor memory, estimate orientations, extract descriptors
if( !extAll ) {
// extract the best comparisons only
descriptors = cv::Mat::zeros(keypoints.size(), FREAK_NB_PAIRS/8, CV_8U);
descriptors = cv::Mat::zeros((int)keypoints.size(), FREAK_NB_PAIRS/8, CV_8U);
#if CV_SSE2
__m128i* ptr= (__m128i*) (descriptors.data+(keypoints.size()-1)*descriptors.step[0]);
// binary: 10000000 => char: 128 or hex: 0x80
const __m128i binMask = _mm_set_epi8(0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80);
const __m128i binMask = _mm_set_epi8('\x80', '\x80', '\x80', '\x80',
'\x80', '\x80', '\x80', '\x80',
'\x80', '\x80', '\x80', '\x80',
'\x80', '\x80', '\x80', '\x80');
#else
std::bitset<FREAK_NB_PAIRS>* ptr = (std::bitset<FREAK_NB_PAIRS>*) (descriptors.data+(keypoints.size()-1)*descriptors.step[0]);
#endif
@ -390,7 +390,7 @@ void FREAK::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat
}
}
else { // extract all possible comparisons for selection
descriptors = cv::Mat::zeros(keypoints.size(), 128, CV_8U);
descriptors = cv::Mat::zeros((int)keypoints.size(), 128, CV_8U);
std::bitset<1024>* ptr = (std::bitset<1024>*) (descriptors.data+(keypoints.size()-1)*descriptors.step[0]);
for( size_t k = keypoints.size(); k--; ) {
@ -522,16 +522,16 @@ vector<int> FREAK::selectPairs(const std::vector<Mat>& images
Mat descriptorsFloat = Mat::zeros(descriptors.rows, 903, CV_32F);
std::bitset<1024>* ptr = (std::bitset<1024>*) (descriptors.data+(descriptors.rows-1)*descriptors.step[0]);
for( size_t m = descriptors.rows; m--; ) {
for( size_t n = 903; n--; ) {
for( int m = descriptors.rows; m--; ) {
for( int n = 903; n--; ) {
if( ptr->test(n) == true )
descriptorsFloat.at<float>(m,n)=1.0;
descriptorsFloat.at<float>(m,n)=1.0f;
}
--ptr;
}
std::vector<PairStat> pairStat;
for( size_t n = 903; n--; ) {
for( int n = 903; n--; ) {
// the higher the variance, the better --> mean = 0.5
PairStat tmp = { fabs( mean(descriptorsFloat.col(n))[0]-0.5 ) ,n};
pairStat.push_back(tmp);

View File

@ -997,7 +997,7 @@ TEST( Features2d_Detector_Harris, regression )
test.safe_run();
}
TEST( Features2d_Detector_MSER, regression )
TEST( Features2d_Detector_MSER, DISABLED_regression )
{
CV_FeatureDetectorTest test( "detector-mser", FeatureDetector::create("MSER") );
test.safe_run();

View File

@ -203,5 +203,5 @@ void CV_MserTest::run(int)
}
}
TEST(Features2d_MSER, regression) { CV_MserTest test; test.safe_run(); }
TEST(Features2d_MSER, DISABLED_regression) { CV_MserTest test; test.safe_run(); }

View File

@ -3278,7 +3278,7 @@ bool CvCaptureCAM_DShow::setProperty( int property_id, double value )
case CV_CAP_PROP_FPS:
int fps = cvRound(value);
if (fps != VI.getFPS(0))
if (fps != VI.getFPS(index))
{
VI.stopDevice(index);
VI.setIdealFramerate(index,fps);

View File

@ -172,10 +172,12 @@ CvVideoWriter* cvCreateVideoWriter_GStreamer( const char* filename, int fourcc,
void cvSetModeWindow_W32(const char* name, double prop_value);
void cvSetModeWindow_GTK(const char* name, double prop_value);
void cvSetModeWindow_CARBON(const char* name, double prop_value);
void cvSetModeWindow_COCOA(const char* name, double prop_value);
double cvGetModeWindow_W32(const char* name);
double cvGetModeWindow_GTK(const char* name);
double cvGetModeWindow_CARBON(const char* name);
double cvGetModeWindow_COCOA(const char* name);
double cvGetPropWindowAutoSize_W32(const char* name);
double cvGetPropWindowAutoSize_GTK(const char* name);

View File

@ -62,6 +62,8 @@ CV_IMPL void cvSetWindowProperty(const char* name, int prop_id, double prop_valu
cvSetModeWindow_GTK(name,prop_value);
#elif defined (HAVE_CARBON)
cvSetModeWindow_CARBON(name,prop_value);
#elif defined (HAVE_COCOA)
cvSetModeWindow_COCOA(name,prop_value);
#endif
break;
@ -99,6 +101,8 @@ CV_IMPL double cvGetWindowProperty(const char* name, int prop_id)
return cvGetModeWindow_GTK(name);
#elif defined (HAVE_CARBON)
return cvGetModeWindow_CARBON(name);
#elif defined (HAVE_COCOA)
return cvGetModeWindow_COCOA(name);
#else
return -1;
#endif

View File

@ -109,13 +109,15 @@ static bool wasInitialized = false;
CvMouseCallback mouseCallback;
void *mouseParam;
BOOL autosize;
BOOL firstContent;
BOOL firstContent;
int status;
}
@property(assign) CvMouseCallback mouseCallback;
@property(assign) void *mouseParam;
@property(assign) BOOL autosize;
@property(assign) BOOL firstContent;
@property(retain) NSMutableDictionary *sliders;
@property(readwrite) int status;
- (CVView *)contentView;
- (void)cvSendMouseEvent:(NSEvent *)event type:(int)type flags:(int)flags;
- (void)cvMouseEvent:(NSEvent *)event;
@ -533,6 +535,75 @@ CV_IMPL int cvWaitKey (int maxWait)
return returnCode;
}
double cvGetModeWindow_COCOA( const char* name )
{
double result = -1;
CVWindow *window = nil;
CV_FUNCNAME( "cvGetModeWindow_COCOA" );
__BEGIN__;
if( name == NULL )
{
CV_ERROR( CV_StsNullPtr, "NULL name string" );
}
window = cvGetWindow( name );
if ( window == NULL )
{
CV_ERROR( CV_StsNullPtr, "NULL window" );
}
result = window.status;
__END__;
return result;
}
void cvSetModeWindow_COCOA( const char* name, double prop_value )
{
CVWindow *window = nil;
NSDictionary *fullscreenOptions = nil;
NSAutoreleasePool* localpool = nil;
CV_FUNCNAME( "cvSetModeWindow_COCOA" );
__BEGIN__;
if( name == NULL )
{
CV_ERROR( CV_StsNullPtr, "NULL name string" );
}
window = cvGetWindow(name);
if ( window == NULL )
{
CV_ERROR( CV_StsNullPtr, "NULL window" );
}
if ( [window autosize] )
{
return;
}
localpool = [[NSAutoreleasePool alloc] init];
fullscreenOptions = [NSDictionary dictionaryWithObject:[NSNumber numberWithBool:YES] forKey:NSFullScreenModeSetting];
if ( [[window contentView] isInFullScreenMode] && prop_value==CV_WINDOW_NORMAL )
{
[[window contentView] exitFullScreenModeWithOptions:fullscreenOptions];
window.status=CV_WINDOW_NORMAL;
}
else if( ![[window contentView] isInFullScreenMode] && prop_value==CV_WINDOW_FULLSCREEN )
{
[[window contentView] enterFullScreenMode:[NSScreen mainScreen] withOptions:fullscreenOptions];
window.status=CV_WINDOW_FULLSCREEN;
}
[localpool drain];
__END__;
}
@implementation CVWindow
@synthesize mouseCallback;
@ -540,6 +611,7 @@ CV_IMPL int cvWaitKey (int maxWait)
@synthesize autosize;
@synthesize firstContent;
@synthesize sliders;
@synthesize status;
- (void)cvSendMouseEvent:(NSEvent *)event type:(int)type flags:(int)flags {
//cout << "cvSendMouseEvent" << endl;
@ -787,9 +859,11 @@ CV_IMPL int cvWaitKey (int maxWait)
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
CVWindow *cvwindow = (CVWindow *)[self window];
int height = 0;
for(NSString *key in [cvwindow sliders]) {
height += [[[cvwindow sliders] valueForKey:key] frame].size.height;
}
if ([cvwindow respondsToSelector:@selector(sliders)]) {
for(NSString *key in [cvwindow sliders]) {
height += [[[cvwindow sliders] valueForKey:key] frame].size.height;
}
}
NSRect imageRect = {{0,0}, {[image size].width, [image size].height}};

View File

@ -151,14 +151,13 @@ add_custom_target(${api_target} DEPENDS ${java_files} ${documented_java_files} $
add_library(${the_module} SHARED ${handwrittren_h_sources} ${handwrittren_cpp_sources} ${generated_cpp_sources})
if(BUILD_FAT_JAVA_LIB)
set(__deps ${OPENCV_MODULE_${the_module}_DEPS} ${OPENCV_MODULES_BUILD})
list(REMOVE_ITEM __deps ${the_module})
list(REMOVE_ITEM __deps ${the_module} opencv_ts)
ocv_list_unique(__deps)
set(__extradeps ${__deps})
ocv_list_filterout(__extradeps "^opencv_")
if(__extradeps)
list(REMOVE_ITEM __deps ${__extradeps})
endif()
target_link_libraries(${the_module} -Wl,-whole-archive ${__deps} -Wl,-no-whole-archive ${__extradeps} ${OPENCV_LINKER_LIBS})
else()
target_link_libraries(${the_module} ${OPENCV_MODULE_${the_module}_DEPS} ${OPENCV_LINKER_LIBS})

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>OpenCV-2.4.2</name>
<name>OpenCV Library-2.4.2</name>
<comment></comment>
<projects>
</projects>

View File

@ -1896,7 +1896,7 @@ void CvBoost::write_params( CvFileStorage* fs ) const
else
cvWriteInt( fs, "splitting_criteria", params.split_criteria );
cvWriteInt( fs, "ntrees", params.weak_count );
cvWriteInt( fs, "ntrees", weak->total );
cvWriteReal( fs, "weight_trimming_rate", params.weight_trim_rate );
data->write_params( fs );

View File

@ -273,14 +273,14 @@ namespace cv {
namespace detail {
void FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features)
{
{
find(image, features);
features.img_size = image.size();
}
void FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features, const vector<Rect> &rois)
{
{
vector<ImageFeatures> roi_features(rois.size());
size_t total_kps_count = 0;
int total_descriptors_height = 0;
@ -294,8 +294,8 @@ void FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features, cons
features.img_size = image.size();
features.keypoints.resize(total_kps_count);
features.descriptors.create(total_descriptors_height,
roi_features[0].descriptors.cols,
features.descriptors.create(total_descriptors_height,
roi_features[0].descriptors.cols,
roi_features[0].descriptors.type());
int kp_idx = 0;
@ -332,14 +332,14 @@ SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int
{
detector_ = Algorithm::create<FeatureDetector>("Feature2D.SURF");
extractor_ = Algorithm::create<DescriptorExtractor>("Feature2D.SURF");
if( detector_.empty() || extractor_.empty() )
CV_Error( CV_StsNotImplemented, "OpenCV was built without SURF support" );
detector_->set("hessianThreshold", hess_thresh);
detector_->set("nOctaves", num_octaves);
detector_->set("nOctaveLayers", num_layers);
extractor_->set("nOctaves", num_octaves_descr);
extractor_->set("nOctaveLayers", num_layers_descr);
}
@ -403,17 +403,17 @@ void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features)
int xr = (c+1) * gray_image.cols / grid_size.width;
int yr = (r+1) * gray_image.rows / grid_size.height;
LOGLN("OrbFeaturesFinder::find: gray_image.empty=" << (gray_image.empty()?"true":"false") << ", "
<< " gray_image.size()=(" << gray_image.size().width << "x" << gray_image.size().height << "), "
<< " yl=" << yl << ", yr=" << yr << ", "
<< " xl=" << xl << ", xr=" << xr << ", gray_image.data=" << ((size_t)gray_image.data) << ", "
<< "gray_image.dims=" << gray_image.dims << "\n");
// LOGLN("OrbFeaturesFinder::find: gray_image.empty=" << (gray_image.empty()?"true":"false") << ", "
// << " gray_image.size()=(" << gray_image.size().width << "x" << gray_image.size().height << "), "
// << " yl=" << yl << ", yr=" << yr << ", "
// << " xl=" << xl << ", xr=" << xr << ", gray_image.data=" << ((size_t)gray_image.data) << ", "
// << "gray_image.dims=" << gray_image.dims << "\n");
Mat gray_image_part=gray_image(Range(yl, yr), Range(xl, xr));
LOGLN("OrbFeaturesFinder::find: gray_image_part.empty=" << (gray_image_part.empty()?"true":"false") << ", "
<< " gray_image_part.size()=(" << gray_image_part.size().width << "x" << gray_image_part.size().height << "), "
<< " gray_image_part.dims=" << gray_image_part.dims << ", "
<< " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");
// LOGLN("OrbFeaturesFinder::find: gray_image_part.empty=" << (gray_image_part.empty()?"true":"false") << ", "
// << " gray_image_part.size()=(" << gray_image_part.size().width << "x" << gray_image_part.size().height << "), "
// << " gray_image_part.dims=" << gray_image_part.dims << ", "
// << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");
(*orb)(gray_image_part, Mat(), points, descriptors);
@ -583,11 +583,11 @@ void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFea
if (matches_info.inliers_mask[i])
matches_info.num_inliers++;
// These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic Image Stitching
// These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic Image Stitching
// using Invariant Features"
matches_info.confidence = matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());
// Set zero confidence to remove matches between too close images, as they don't provide
// Set zero confidence to remove matches between too close images, as they don't provide
// additional information anyway. The threshold was set experimentally.
matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;

View File

@ -52,6 +52,8 @@
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/internal.hpp"
#include <list>
#ifdef HAVE_TEGRA_OPTIMIZATION
#include "opencv2/video/video_tegra.hpp"
#endif

View File

@ -1,33 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>Sample - 15-puzzle</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ApkBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>com.android.ide.eclipse.adt.AndroidNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>OpenCV Sample - 15-puzzle</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ApkBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>com.android.ide.eclipse.adt.AndroidNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">OpenCV Sample - 15-puzzle</string>
<string name="app_name">OCV 15 Puzzle</string>
</resources>

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>ColorBlobDetection</name>
<name>OpenCV Sample - color-blob-detection</name>
<comment></comment>
<projects>
</projects>

View File

@ -7,7 +7,7 @@
<uses-sdk android:minSdkVersion="8" />
<application
android:icon="@drawable/ic_launcher"
android:icon="@drawable/icon"
android:label="@string/app_name" >
<activity
android:name="org.opencv.samples.colorblobdetect.ColorBlobDetectionActivity"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,12 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:orientation="vertical" >
<TextView
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:text="@string/hello" />
</LinearLayout>

View File

@ -1,7 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="hello">Hello World, ColorBlobDetectionActivity!</string>
<string name="app_name">ColorBlobDetection</string>
<string name="app_name">OCV Color Blob Detection</string>
</resources>

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>Sample - face-detection</name>
<name>OpenCV Sample - face-detection</name>
<comment></comment>
<projects>
</projects>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">OpenCV Sample - face-detection</string>
<string name="app_name">OCV Face Detection</string>
</resources>

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>Sample - image-manipulations</name>
<name>OpenCV Sample - image-manipulations</name>
<comment></comment>
<projects>
</projects>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">OpenCV Sample - image-manipulations</string>
<string name="app_name">OCV Image Manipulations</string>
</resources>

View File

@ -1,33 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>Tutorial 0 (Basic) - Android Camera</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ApkBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>com.android.ide.eclipse.adt.AndroidNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>OpenCV Tutorial 0 - Android Camera</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ApkBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>com.android.ide.eclipse.adt.AndroidNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">Tutorial 0 (Basic) - Android Camera</string>
<string name="app_name">OCV T0 Android Camera</string>
</resources>

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>Tutorial 1 (Basic) - Add OpenCV</name>
<name>OpenCV Tutorial 1 - Add OpenCV</name>
<comment></comment>
<projects>
</projects>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">Tutorial 1 (Basic) - Add OpenCV</string>
<string name="app_name">OCV T1 Add OpenCV</string>
</resources>

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>Tutorial 2 (Basic) - Use OpenCV Camera</name>
<name>OpenCV Tutorial 2 - Use OpenCV Camera</name>
<comment></comment>
<projects>
</projects>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">Tutorial 2 (Basic) - Use OpenCV Camera</string>
<string name="app_name">OCV T2 Use OpenCV Camera</string>
</resources>

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>Tutorial 3 (Advanced) - Add Native OpenCV</name>
<name>OpenCV Tutorial 3 - Add Native OpenCV</name>
<comment></comment>
<projects>
</projects>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">Tutorial 3 (Advanced) - Add Native OpenCV</string>
<string name="app_name">OCV T3 Add Native OpenCV</string>
</resources>

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>Tutorial 4 (Advanced) - Mix Java+Native OpenCV</name>
<name>OpenCV Tutorial 4 - Mix Java+Native OpenCV</name>
<comment></comment>
<projects>
</projects>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">Tutorial 4 (Advanced) - Mix Java+Native OpenCV</string>
<string name="app_name">OCV T4 Mix Java+Native OpenCV</string>
</resources>

View File

@ -2014,7 +2014,7 @@ struct VocabTrainParams
// It shouldn't matter which object class is specified here - visual vocab will still be the same.
int vocabSize; //number of visual words in vocabulary to train
int memoryUse; // Memory to preallocate (in MB) when training vocab.
// Change this depending on the size of the dataset/available memory.
// Change this depending on the size of the dataset/available memory.
float descProportion; // Specifies the number of descriptors to use from each image as a proportion of the total num descs.
};
@ -2126,8 +2126,10 @@ static Mat trainVocabulary( const string& filename, VocData& vocData, const Voca
if( !readVocabulary( filename, vocabulary) )
{
CV_Assert( dextractor->descriptorType() == CV_32FC1 );
const int descByteSize = dextractor->descriptorSize()*4;
const int maxDescCount = (trainParams.memoryUse * 1048576) / descByteSize; // Total number of descs to use for training.
const int elemSize = CV_ELEM_SIZE(dextractor->descriptorType());
const int descByteSize = dextractor->descriptorSize() * elemSize;
const int bytesInMB = 1048576;
const int maxDescCount = (trainParams.memoryUse * bytesInMB) / descByteSize; // Total number of descs to use for training.
cout << "Extracting VOC data..." << endl;
vector<ObdImage> images;
@ -2142,9 +2144,8 @@ static Mat trainVocabulary( const string& filename, VocData& vocData, const Voca
while( images.size() > 0 )
{
if( bowTrainer.descripotorsCount() >= maxDescCount )
if( bowTrainer.descripotorsCount() > maxDescCount )
{
assert( bowTrainer.descripotorsCount() == maxDescCount );
#ifdef DEBUG_DESC_PROGRESS
cout << "Breaking due to full memory ( descriptors count = " << bowTrainer.descripotorsCount()
<< "; descriptor size in bytes = " << descByteSize << "; all used memory = "

View File

@ -17,6 +17,7 @@ from common import clock, mosaic
SZ = 20 # size of each digit is SZ x SZ
CLASS_N = 10
DIGITS_FN = 'digits.png'
def load_digits(fn):
print 'loading "%s" ...' % fn
@ -95,7 +96,7 @@ def evaluate_model(model, digits, samples, labels):
if __name__ == '__main__':
print __doc__
digits, labels = load_digits('digits.png')
digits, labels = load_digits(DIGITS_FN)
print 'preprocessing...'
# shuffle digits

View File

@ -11,11 +11,10 @@ Usage:
digits_adjust.py [--model {svm|knearest}] [--cloud] [--env <PiCloud environment>]
--model {svm|knearest} - select the classifier (SVM is the default)
--cloud - use PiCloud computing platform (for SVM only)
--cloud - use PiCloud computing platform
--env - cloud environment name
'''
# TODO dataset preprocessing in cloud
# TODO cloud env setup tutorial
import numpy as np
@ -24,6 +23,14 @@ from multiprocessing.pool import ThreadPool
from digits import *
try:
import cloud
have_cloud = True
except ImportError:
have_cloud = False
def cross_validate(model_class, params, samples, labels, kfold = 3, pool = None):
n = len(samples)
folds = np.array_split(np.arange(n), kfold)
@ -46,66 +53,88 @@ def cross_validate(model_class, params, samples, labels, kfold = 3, pool = None)
scores = pool.map(f, xrange(kfold))
return np.mean(scores)
def adjust_KNearest(samples, labels):
print 'adjusting KNearest ...'
best_err, best_k = np.inf, -1
for k in xrange(1, 9):
err = cross_validate(KNearest, dict(k=k), samples, labels)
if err < best_err:
best_err, best_k = err, k
print 'k = %d, error: %.2f %%' % (k, err*100)
best_params = dict(k=best_k)
print 'best params:', best_params
return best_params
def adjust_SVM(samples, labels, usecloud=False, cloud_env=''):
Cs = np.logspace(0, 5, 10, base=2)
gammas = np.logspace(-7, -2, 10, base=2)
scores = np.zeros((len(Cs), len(gammas)))
scores[:] = np.nan
if usecloud:
try:
import cloud
except ImportError:
print 'cloud module is not installed'
class App(object):
def __init__(self, usecloud=False, cloud_env=''):
if usecloud and not have_cloud:
print 'warning: cloud module is not installed, running locally'
usecloud = False
if usecloud:
print 'uploading dataset to cloud...'
np.savez('train.npz', samples=samples, labels=labels)
cloud.files.put('train.npz')
self.usecloud = usecloud
self.cloud_env = cloud_env
print 'adjusting SVM (may take a long time) ...'
def f(job):
i, j = job
params = dict(C = Cs[i], gamma=gammas[j])
score = cross_validate(SVM, params, samples, labels)
return i, j, score
def fcloud(job):
i, j = job
cloud.files.get('train.npz')
npz = np.load('train.npz')
params = dict(C = Cs[i], gamma=gammas[j])
score = cross_validate(SVM, params, npz['samples'], npz['labels'])
return i, j, score
if usecloud:
jids = cloud.map(fcloud, np.ndindex(*scores.shape), _env=cloud_env, _profile=True)
ires = cloud.iresult(jids)
else:
pool = ThreadPool(processes=cv2.getNumberOfCPUs())
ires = pool.imap_unordered(f, np.ndindex(*scores.shape))
if self.usecloud:
print 'uploading dataset to cloud...'
cloud.files.put(DIGITS_FN)
self.preprocess_job = cloud.call(self.preprocess, _env=self.cloud_env)
else:
self._samples, self._labels = self.preprocess()
for count, (i, j, score) in enumerate(ires):
scores[i, j] = score
print '%d / %d (best error: %.2f %%, last: %.2f %%)' % (count+1, scores.size, np.nanmin(scores)*100, score*100)
print scores
def preprocess(self):
if self.usecloud:
cloud.files.get(DIGITS_FN)
digits, labels = load_digits(DIGITS_FN)
shuffle = np.random.permutation(len(digits))
digits, labels = digits[shuffle], labels[shuffle]
digits2 = map(deskew, digits)
samples = np.float32(digits2).reshape(-1, SZ*SZ) / 255.0
return samples, labels
def get_dataset(self):
if self.usecloud:
return cloud.result(self.preprocess_job)
else:
return self._samples, self._labels
def run_jobs(self, f, jobs):
if self.usecloud:
jids = cloud.map(f, jobs, _env=self.cloud_env, _profile=True, _depends_on=self.preprocess_job)
ires = cloud.iresult(jids)
else:
pool = ThreadPool(processes=cv2.getNumberOfCPUs())
ires = pool.imap_unordered(f, jobs)
return ires
def adjust_SVM(self):
Cs = np.logspace(0, 5, 10, base=2)
gammas = np.logspace(-7, -2, 10, base=2)
scores = np.zeros((len(Cs), len(gammas)))
scores[:] = np.nan
print 'adjusting SVM (may take a long time) ...'
def f(job):
i, j = job
samples, labels = self.get_dataset()
params = dict(C = Cs[i], gamma=gammas[j])
score = cross_validate(SVM, params, samples, labels)
return i, j, score
ires = self.run_jobs(f, np.ndindex(*scores.shape))
for count, (i, j, score) in enumerate(ires):
scores[i, j] = score
print '%d / %d (best error: %.2f %%, last: %.2f %%)' % (count+1, scores.size, np.nanmin(scores)*100, score*100)
print scores
i, j = np.unravel_index(scores.argmin(), scores.shape)
best_params = dict(C = Cs[i], gamma=gammas[j])
print 'best params:', best_params
print 'best error: %.2f %%' % (scores.min()*100)
return best_params
def adjust_KNearest(self):
print 'adjusting KNearest ...'
def f(k):
samples, labels = self.get_dataset()
err = cross_validate(KNearest, dict(k=k), samples, labels)
return k, err
best_err, best_k = np.inf, -1
for k, err in self.run_jobs(f, xrange(1, 9)):
if err < best_err:
best_err, best_k = err, k
print 'k = %d, error: %.2f %%' % (k, err*100)
best_params = dict(k=best_k)
print 'best params:', best_params, 'err: %.2f' % (best_err*100)
return best_params
i, j = np.unravel_index(scores.argmin(), scores.shape)
best_params = dict(C = Cs[i], gamma=gammas[j])
print 'best params:', best_params
print 'best error: %.2f %%' % (scores.min()*100)
return best_params
if __name__ == '__main__':
import getopt
@ -113,6 +142,7 @@ if __name__ == '__main__':
print __doc__
args, _ = getopt.getopt(sys.argv[1:], '', ['model=', 'cloud', 'env='])
args = dict(args)
args.setdefault('--model', 'svm')
@ -121,16 +151,10 @@ if __name__ == '__main__':
print 'unknown model "%s"' % args['--model']
sys.exit(1)
digits, labels = load_digits('digits.png')
shuffle = np.random.permutation(len(digits))
digits, labels = digits[shuffle], labels[shuffle]
digits2 = map(deskew, digits)
samples = np.float32(digits2).reshape(-1, SZ*SZ) / 255.0
t = clock()
app = App(usecloud='--cloud' in args, cloud_env = args['--env'])
if args['--model'] == 'knearest':
adjust_KNearest(samples, labels)
app.adjust_KNearest()
else:
adjust_SVM(samples, labels, usecloud='--cloud' in args, cloud_env = args['--env'])
app.adjust_SVM()
print 'work time: %f s' % (clock() - t)

View File

@ -0,0 +1,78 @@
'''
Robust line fitting.
==================
Example of using cv2.fitLine function for fitting line to points in presence of outliers.
Usage
-----
fitline.py
Switch through different M-estimator functions and see, how well the robust functions
fit the line even in case of ~50% of outliers.
Keys
----
SPACE - generaty random points
f - change distance function
ESC - exit
'''
import numpy as np
import cv2
import itertools as it
from common import draw_str
w, h = 512, 256
def toint(p):
return tuple(map(int, p))
def sample_line(p1, p2, n, noise=0.0):
p1 = np.float32(p1)
t = np.random.rand(n,1)
return p1 + (p2-p1)*t + np.random.normal(size=(n, 2))*noise
dist_func_names = it.cycle('CV_DIST_L2 CV_DIST_L1 CV_DIST_L12 CV_DIST_FAIR CV_DIST_WELSCH CV_DIST_HUBER'.split())
cur_func_name = dist_func_names.next()
def update(_=None):
noise = cv2.getTrackbarPos('noise', 'fit line')
n = cv2.getTrackbarPos('point n', 'fit line')
r = cv2.getTrackbarPos('outlier %', 'fit line') / 100.0
outn = int(n*r)
p0, p1 = (90, 80), (w-90, h-80)
img = np.zeros((h, w, 3), np.uint8)
cv2.line(img, toint(p0), toint(p1), (0, 255, 0))
if n > 0:
line_points = sample_line(p0, p1, n-outn, noise)
outliers = np.random.rand(outn, 2) * (w, h)
points = np.vstack([line_points, outliers])
for p in line_points:
cv2.circle(img, toint(p), 2, (255, 255, 255), -1)
for p in outliers:
cv2.circle(img, toint(p), 2, (64, 64, 255), -1)
func = getattr(cv2.cv, cur_func_name)
vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01)
cv2.line(img, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*w), int(cy+vy*w)), (0, 0, 255))
draw_str(img, (20, 20), cur_func_name)
cv2.imshow('fit line', img)
if __name__ == '__main__':
print __doc__
cv2.namedWindow('fit line')
cv2.createTrackbar('noise', 'fit line', 3, 50, update)
cv2.createTrackbar('point n', 'fit line', 100, 500, update)
cv2.createTrackbar('outlier %', 'fit line', 30, 100, update)
while True:
update()
ch = cv2.waitKey(0)
if ch == ord('f'):
cur_func_name = dist_func_names.next()
if ch == 27:
break

View File

@ -1,3 +1,32 @@
'''
Video capture sample.
Sample shows how VideoCapture class can be used to acquire video
frames from a camera of a movie file. Also the sample provides
an example of procedural video generation by an object, mimicking
the VideoCapture interface (see Chess class).
'create_capture' is a convinience function for capture creation,
falling back to procedural video in case of error.
Usage:
video.py [--shotdir <shot path>] [source0] [source1] ...'
sourceN is an
- integer number for camera capture
- name of video file
- synth:<params> for procedural video
Synth examples:
synth:bg=../cpp/lena.jpg:noise=0.1
synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480
Keys:
ESC - exit
SPACE - save current frame to <shot path> directory
'''
import numpy as np
import cv2
from time import clock
@ -100,8 +129,7 @@ presets = dict(
def create_capture(source = 0, fallback = presets['chess']):
'''
source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
'''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
'''
source = str(source).strip()
chunks = source.split(':')
@ -136,9 +164,7 @@ if __name__ == '__main__':
import sys
import getopt
print 'USAGE: video.py [--shotdir <dir>] [source0] [source1] ...'
print "source: '<int>' or '<filename>' or 'synth:<params>'"
print
print __doc__
args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=')
args = dict(args)
@ -146,8 +172,6 @@ if __name__ == '__main__':
if len(sources) == 0:
sources = [ 0 ]
print 'Press SPACE to save current frame'
caps = map(create_capture, sources)
shot_idx = 0
while True:

View File

@ -1,18 +1,31 @@
import numpy as np
import cv2
from common import Sketcher
'''
Watershed segmentation
=========
help_message = '''
USAGE: watershed.py [<image>]
This program demonstrates the watershed segmentation algorithm
in OpenCV: watershed().
Use keys 1 - 7 to switch marker color
Usage
-----
watershed.py [image filename]
Keys
----
1-7 - switch marker color
SPACE - update segmentation
r - reset
a - switch autoupdate
a - toggle autoupdate
ESC - exit
'''
import numpy as np
import cv2
from common import Sketcher
class App:
def __init__(self, fn):
self.img = cv2.imread(fn)
@ -60,5 +73,5 @@ if __name__ == '__main__':
import sys
try: fn = sys.argv[1]
except: fn = '../cpp/fruits.jpg'
print help_message
print __doc__
App(fn).run()