diff --git a/modules/python/CMakeLists.txt b/modules/python/CMakeLists.txt
index 91246563e..b8ec50470 100644
--- a/modules/python/CMakeLists.txt
+++ b/modules/python/CMakeLists.txt
@@ -5,7 +5,8 @@ project(opencv_python)
 
 include_directories(${PYTHON_INCLUDE_PATH})
 include_directories(
-    "${CMAKE_CURRENT_SOURCE_DIR}/src"
+    "${CMAKE_CURRENT_SOURCE_DIR}/src1"
+    "${CMAKE_CURRENT_SOURCE_DIR}/src2"
     "${CMAKE_SOURCE_DIR}/modules/core/include"
     "${CMAKE_SOURCE_DIR}/modules/imgproc/include"
     "${CMAKE_SOURCE_DIR}/modules/video/include"
@@ -30,64 +31,75 @@ set(opencv_hdrs "${CMAKE_SOURCE_DIR}/modules/core/include/opencv2/core/core.hpp"
     "${CMAKE_SOURCE_DIR}/modules/features2d/include/opencv2/features2d/features2d.hpp"
     "${CMAKE_SOURCE_DIR}/modules/calib3d/include/opencv2/calib3d/calib3d.hpp"
     "${CMAKE_SOURCE_DIR}/modules/objdetect/include/opencv2/objdetect/objdetect.hpp"
-    "${CMAKE_SOURCE_DIR}/modules/python/src/opencv_extra_api.hpp")
-
-set(generated_hdrs
-        "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_funcs.h"
-        "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_func_tab.h"
-        "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_types.h"
-        "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_type_reg.h"
-        "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_const_reg.h")
+    "${CMAKE_SOURCE_DIR}/modules/python/src2/opencv_extra_api.hpp")
 
 if(MSVC)
     set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W3")
 endif()
 
-file(GLOB lib_srcs "src/*.cpp")
-file(GLOB lib_hdrs "src/*.h")
-
 add_custom_command(
    OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/generated0.i
-   COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src/gen.py" "${CMAKE_CURRENT_SOURCE_DIR}/src"
-   DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/api
-   DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/defs
-   DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/gen.py
+   COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src1/gen.py" "${CMAKE_CURRENT_SOURCE_DIR}/src1"
+   DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src1/api
+   DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src1/defs
+   DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src1/gen.py
    )
 
-add_custom_command(
-  OUTPUT ${generated_hdrs}
-  COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src/gen2.py" ${CMAKE_CURRENT_BINARY_DIR} ${opencv_hdrs}
-  DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/gen2.py
-  DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/hdr_parser.py
-  DEPENDS ${opencv_hdrs}
-  )
 
-set(the_target "opencv_python")
-add_library(${the_target} ${lib_srcs} ${lib_hdrs} ${lib_int_hdrs} ${CMAKE_CURRENT_BINARY_DIR}/generated0.i src/opencv2x.h src/opencv_extra_api.hpp ${generated_hdrs})
-target_link_libraries(${the_target} ${PYTHON_LIBRARIES} opencv_core opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_objdetect opencv_legacy opencv_contrib)
+set(cv_target "opencv_python")
+add_library(${cv_target} src1/cv.cpp ${CMAKE_CURRENT_BINARY_DIR}/generated0.i)
+target_link_libraries(${cv_target} ${PYTHON_LIBRARIES} opencv_core opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_objdetect opencv_legacy opencv_contrib)
     
-set_target_properties(${the_target} PROPERTIES PREFIX "")
-set_target_properties(${the_target} PROPERTIES OUTPUT_NAME "cv")
+set_target_properties(${cv_target} PROPERTIES PREFIX "")
+set_target_properties(${cv_target} PROPERTIES OUTPUT_NAME "cv")
 
 execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import distutils.sysconfig; print distutils.sysconfig.get_config_var('SO')"
                 RESULT_VARIABLE PYTHON_CVPY_PROCESS
                 OUTPUT_VARIABLE CVPY_SUFFIX
                 OUTPUT_STRIP_TRAILING_WHITESPACE)
 
-set_target_properties(${the_target} PROPERTIES SUFFIX ${CVPY_SUFFIX})
+set_target_properties(${cv_target} PROPERTIES SUFFIX ${CVPY_SUFFIX})
 
-set(cvpy_files cv${CVPY_SUFFIX})
+set(cvpymodules ${cv_target})
+
+if(PYTHON_USE_NUMPY)
+
+set(cv2_generated_hdrs
+    "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_funcs.h"
+    "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_func_tab.h"
+    "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_types.h"
+    "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_type_reg.h"
+    "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_const_reg.h")
+
+add_custom_command(
+      OUTPUT ${cv2_generated_hdrs}
+      COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src2/gen2.py" ${CMAKE_CURRENT_BINARY_DIR} ${opencv_hdrs}
+      DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/gen2.py
+      DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/hdr_parser.py
+      DEPENDS ${opencv_hdrs})
+
+set(cv2_target "opencv2_python")
+add_library(${cv2_target} src2/cv2.cpp src2/opencv_extra_api.hpp ${cv2_generated_headers})
+target_link_libraries(${cv2_target} ${PYTHON_LIBRARIES} opencv_core opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_objdetect opencv_legacy opencv_contrib)
+
+set_target_properties(${cv2_target} PROPERTIES PREFIX "")
+set_target_properties(${cv2_target} PROPERTIES OUTPUT_NAME "cv2")
+set_target_properties(${cv2_target} PROPERTIES SUFFIX ${CVPY_SUFFIX})
+
+set(cvpymodules ${cvpymodules} ${cv2_target})
+
+endif()
 
 if(WIN32)
-    install(TARGETS ${the_target}
-            RUNTIME DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
-            LIBRARY DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
-            ARCHIVE DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
-            )
+  install(TARGETS ${cvpymodules}
+          RUNTIME DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
+          LIBRARY DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
+          ARCHIVE DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
+          )
 else()
-    #install(FILES ${LIBRARY_OUTPUT_PATH}/cv${CVPY_SUFFIX} DESTINATION ${PYTHON_PACKAGES_PATH})
-    install(TARGETS ${the_target} 
-            RUNTIME DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main
-            LIBRARY DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main
-            ARCHIVE DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main)
+  #install(FILES ${LIBRARY_OUTPUT_PATH}/cv${CVPY_SUFFIX} DESTINATION ${PYTHON_PACKAGES_PATH})
+  install(TARGETS ${cvpymodules} 
+          RUNTIME DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main
+          LIBRARY DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main
+          ARCHIVE DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main)
 endif()
diff --git a/modules/python/src/api b/modules/python/src/api
deleted file mode 100644
index 993640770..000000000
--- a/modules/python/src/api
+++ /dev/null
@@ -1,1804 +0,0 @@
-# Macros
-CV_RGB CvScalar
-  double red
-  double grn
-  double blu
-CV_MAT_CN int
-  int i
-CV_MAT_DEPTH int
-  int i
-Scalar CvScalar
-  double val0
-  double val1 0
-  double val2 0
-  double val3 0
-ScalarAll CvScalar
-  double val0123
-RealScalar CvScalar
-  double val0
-CV_IABS int
-  int a
-CV_CMP int
-  int a
-  int b
-CV_SIGN int
-  int a
-CV_FOURCC int
-  char c1
-  char c2
-  char c3
-  char c4
-CV_MAKETYPE int
-  int depth
-  int cn
-CV_8UC int
-  int n
-CV_8SC int
-  int n
-CV_16UC int
-  int n
-CV_16SC int
-  int n
-CV_32SC int
-  int n
-CV_32FC int
-  int n
-CV_64FC int
-  int n
-
-# Initialization
-CloneImage IplImage*
-  IplImage image
-SetImageCOI
-  IplImage image
-  int coi
-GetImageCOI int
-  IplImage image
-SetImageROI
-  IplImage image
-  CvRect rect
-ResetImageROI
-  IplImage image
-GetImageROI CvRect
-  IplImage image
-CloneMat CvMat*
-  CvMat mat
-CloneMatND CvMatND*
-  CvMatND mat
-
-# Accessing Elements and sub-Arrays
-
-Get1D CvScalar
-  CvArr arr
-  int idx
-Get2D CvScalar
-  CvArr arr
-  int idx0
-  int idx1
-Get3D CvScalar
-  CvArr arr
-  int idx0
-  int idx1
-  int idx2
-GetND CvScalar
-  CvArr arr
-  ints indices
-GetReal1D double
-  CvArr arr
-  int idx0
-GetReal2D double
-  CvArr arr
-  int idx0
-  int idx1
-GetReal3D double
-  CvArr arr
-  int idx0
-  int idx1
-  int idx2
-GetRealND double
-  CvArr arr
-  ints idx
-mGet double
-  CvMat mat
-  int row
-  int col
-Set1D
-  CvArr arr
-  int idx
-  CvScalar value
-Set2D
-  CvArr arr
-  int idx0
-  int idx1
-  CvScalar value
-Set3D
-  CvArr arr
-  int idx0
-  int idx1
-  int idx2
-  CvScalar value
-SetND
-  CvArr arr
-  ints indices
-  CvScalar value
-SetReal1D
-  CvArr arr
-  int idx
-  double value
-SetReal2D
-  CvArr arr
-  int idx0
-  int idx1
-  double value
-SetReal3D
-  CvArr arr
-  int idx0
-  int idx1
-  int idx2
-  double value
-SetRealND
-  CvArr arr
-  ints indices
-  double value
-mSet
-  CvMat mat
-  int row
-  int col
-  double value
-ClearND
-  CvArr arr
-  ints idx
-
-# Sequences
-CV_IS_SEQ_INDEX int
-  CvSeq s
-CV_IS_SEQ_CURVE int
-  CvSeq s
-CV_IS_SEQ_CLOSED int
-  CvSeq s
-CV_IS_SEQ_CONVEX int
-  CvSeq s
-CV_IS_SEQ_HOLE int
-  CvSeq s
-CV_IS_SEQ_SIMPLE int
-  CvSeq s
-
-
-# Curves and Shapes
-Line
-  CvArr img
-  CvPoint pt1
-  CvPoint pt2
-  CvScalar color
-  int thickness 1
-  int lineType 8
-  int shift 0
-Rectangle
-  CvArr img
-  CvPoint pt1
-  CvPoint pt2
-  CvScalar color
-  int thickness 1
-  int lineType 8
-  int shift 0
-Circle
-  CvArr img
-  CvPoint center
-  int radius
-  CvScalar color
-  int thickness 1
-  int lineType 8
-  int shift 0
-Ellipse
-  CvArr img
-  CvPoint center
-  CvSize axes
-  double angle
-  double start_angle
-  double end_angle
-  CvScalar color
-  int thickness 1
-  int lineType 8
-  int shift 0
-EllipseBox
-  CvArr img
-  CvBox2D box
-  CvScalar color
-  int thickness 1
-  int lineType 8
-  int shift 0
-FillPoly
-  CvArr img
-  pts_npts_contours polys
-  CvScalar color
-  int lineType 8
-  int shift 0
-FillConvexPoly
-  CvArr img
-  CvPoints pn
-  CvScalar color
-  int lineType 8
-  int shift 0
-PolyLine
-  CvArr img
-  pts_npts_contours polys
-  int is_closed
-  CvScalar color
-  int thickness 1
-  int lineType 8
-  int shift 0
-
-#Text
-InitFont font
-  CvFont font /O
-  int fontFace
-  double hscale
-  double vscale
-  double shear 0
-  int thickness 1
-  int lineType 8
-PutText
-  CvArr img
-  char* text
-  CvPoint org
-  CvFont* font
-  CvScalar color
-GetTextSize textSize,baseline
-  char* textString
-  CvFont* font
-  CvSize textSize /O
-  int baseline /O
-
-# Point Sets and Contours
-DrawContours
-  CvArr img
-  CvSeq contour
-  CvScalar external_color
-  CvScalar hole_color
-  int max_level
-  int thickness 1
-  int lineType 8
-  CvPoint offset cvPoint(0,0)
-
-# RTTI and Generic Functions
-Save
-  char* filename
-  generic structPtr
-  char* name NULL
-  char* comment NULL
-Load generic
-  char* filename
-  CvMemStorage storage NULL
-  char* name NULL
-
-# Accessing Elements and sub-Arrays
-GetRow submat
-  CvArr arr
-  CvMat submat /J:arr,O,A
-  int row
-GetRows submat
-  CvArr arr
-  CvMat submat /J:arr,O,A
-  int startRow
-  int endRow
-  int deltaRow 1
-GetCol submat
-  CvArr arr
-  CvMat submat /J:arr,O,A
-  int col
-GetCols submat
-  CvArr arr
-  CvMat submat /J:arr,O,A
-  int startCol
-  int endCol
-GetDiag submat
-  CvArr arr
-  CvMat submat /J:arr,O,A
-  int diag 0
-GetSubRect submat
-  CvArr arr
-  CvMat submat /J:arr,O,A
-  CvRect rect
-GetSize CvSize
-  CvArr arr
-GetElemType int
-  CvArr arr
-
-# Copying and Filling
-Copy
-  CvArr src
-  CvArr dst
-  CvArr mask NULL
-Set
-  CvArr arr
-  CvScalar value
-  CvArr mask NULL
-SetZero
-  CvArr arr
-Zero
-  CvArr arr
-SetIdentity
-  CvArr mat
-  CvScalar value cvRealScalar(1)
-Range
-  CvArr mat
-  double start
-  double end
-
-# Transforms and Permutations
-# Reshape, ReshapeND - requires special data refcount code
-Repeat
-  CvArr src
-  CvArr dst
-Flip
-  CvArr src
-  CvArr dst NULL
-  int flipMode 0
-Split
-  CvArr src
-  CvArr dst0
-  CvArr dst1
-  CvArr dst2
-  CvArr dst3
-CvtPixToPlane
-  CvArr src
-  CvArr dst0
-  CvArr dst1
-  CvArr dst2
-  CvArr dst3
-Merge
-  CvArr src0
-  CvArr src1
-  CvArr src2
-  CvArr src3
-  CvArr dst
-MixChannels
-  cvarr_count src /K
-  cvarr_count dst
-  intpair fromTo 
-RandShuffle
-  CvArr mat
-  CvRNG* rng
-  double iter_factor 1.0
-Sort
-  CvArr src
-  CvArr dst
-  CvArr idxmat
-  int flags 0
-
-# Arithmetic, Logic and Comparison
-LUT
-  CvArr src
-  CvArr dst
-  CvArr lut
-ConvertScale
-  CvArr src
-  CvArr dst
-  double scale 1.0
-  double shift 0.0
-CvtScale
-  CvArr src
-  CvArr dst
-  double scale 1.0
-  double shift 0.0
-Scale
-  CvArr src
-  CvArr dst
-  double scale 1.0
-  double shift 0.0
-Convert
-  CvArr src
-  CvArr dst
-ConvertScaleAbs
-  CvArr src
-  CvArr dst
-  double scale 1.0
-  double shift 0.0
-Add
-  CvArr src1
-  CvArr src2
-  CvArr dst
-  CvArr mask NULL
-AddS
-  CvArr src
-  CvScalar value
-  CvArr dst
-  CvArr mask NULL
-AddWeighted
-  CvArr src1
-  double alpha
-  CvArr src2
-  double beta
-  double gamma
-  CvArr dst
-Sub
-  CvArr src1
-  CvArr src2
-  CvArr dst
-  CvArr mask NULL
-SubS
-  CvArr src
-  CvScalar value
-  CvArr dst
-  CvArr mask NULL
-SubRS
-  CvArr src
-  CvScalar value
-  CvArr dst
-  CvArr mask NULL
-Mul
-  CvArr src1
-  CvArr src2
-  CvArr dst
-  double scale 1.0
-Div
-  CvArr src1
-  CvArr src2
-  CvArr dst
-  double scale 1.0
-And
-  CvArr src1
-  CvArr src2
-  CvArr dst
-  CvArr mask NULL
-AndS
-  CvArr src
-  CvScalar value
-  CvArr dst
-  CvArr mask NULL
-Or
-  CvArr src1
-  CvArr src2
-  CvArr dst
-  CvArr mask NULL
-OrS
-  CvArr src
-  CvScalar value
-  CvArr dst
-  CvArr mask NULL
-Xor
-  CvArr src1
-  CvArr src2
-  CvArr dst
-  CvArr mask NULL
-XorS
-  CvArr src
-  CvScalar value
-  CvArr dst
-  CvArr mask NULL
-Not
-  CvArr src
-  CvArr dst
-Cmp
-  CvArr src1
-  CvArr src2
-  CvArr dst
-  int cmpOp
-CmpS
-  CvArr src
-  double value
-  CvArr dst
-  int cmpOp
-InRange
-  CvArr src
-  CvArr lower
-  CvArr upper
-  CvArr dst
-InRangeS
-  CvArr src
-  CvScalar lower
-  CvScalar upper
-  CvArr dst
-Max
-  CvArr src1
-  CvArr src2
-  CvArr dst
-MaxS
-  CvArr src
-  double value
-  CvArr dst
-Min
-  CvArr src1
-  CvArr src2
-  CvArr dst
-MinS
-  CvArr src
-  double value
-  CvArr dst
-AbsDiff
-  CvArr src1
-  CvArr src2
-  CvArr dst
-AbsDiffS
-  CvArr src
-  CvArr dst
-  CvScalar value
-Abs
-  CvArr src
-  CvArr dst
-
-# Statistics
-CountNonZero int
-  CvArr arr
-Sum CvScalar
-  CvArr arr
-Avg CvScalar
-  CvArr arr
-  CvArr mask NULL
-AvgSdv mean,stdDev
-  CvArr arr
-  CvScalar mean /O
-  CvScalar stdDev /O
-  CvArr mask NULL
-MinMaxLoc minVal,maxVal,minLoc,maxLoc
-  CvArr arr
-  double minVal /O
-  double maxVal /O
-  CvPoint minLoc /O
-  CvPoint maxLoc /O
-  CvArr mask NULL
-Norm double
-  CvArr arr1
-  CvArr arr2
-  int normType CV_L2
-  CvArr mask NULL
-Reduce
-  CvArr src
-  CvArr dst
-  int dim -1
-  int op CV_REDUCE_SUM
-
-# Linear Algebra
-DotProduct double
-  CvArr src1
-  CvArr src2
-Normalize
-  CvArr src
-  CvArr dst
-  double a 1.0
-  double b 0.0
-  int norm_type CV_L2
-  CvArr mask NULL
-CrossProduct
-  CvArr src1
-  CvArr src2
-  CvArr dst
-ScaleAdd
-  CvArr src1
-  CvScalar scale
-  CvArr src2
-  CvArr dst
-GEMM
-  CvArr src1
-  CvArr src2
-  double alpha
-  CvArr src3
-  double beta
-  CvArr dst
-  int tABC 0
-MatMulAdd
-  CvArr src1
-  CvArr src2
-  CvArr src3
-  CvArr dst
-MatMul
-  CvArr src1
-  CvArr src2
-  CvArr dst
-Transform
-  CvArr src
-  CvArr dst
-  CvMat transmat
-  CvMat shiftvec NULL
-PerspectiveTransform
-  CvArr src
-  CvArr dst
-  CvMat mat
-MulTransposed
-  CvArr src
-  CvArr dst
-  int order
-  CvArr delta NULL
-  double scale 1.0
-Trace CvScalar
-  CvArr mat
-Transpose
-  CvArr src
-  CvArr dst
-Det double
-  CvArr mat
-Invert double
-  CvArr src
-  CvArr dst
-  int method CV_LU
-Solve
-  CvArr A
-  CvArr B
-  CvArr X
-  int method CV_LU
-SVD
-  CvArr A
-  CvArr W
-  CvArr U NULL
-  CvArr V NULL
-  int flags 0
-SVBkSb
-  CvArr W
-  CvArr U
-  CvArr V
-  CvArr B
-  CvArr X
-  int flags
-EigenVV
-  CvArr mat
-  CvArr evects
-  CvArr evals
-  double eps
-  int lowindex 0
-  int highindex 0
-CalcCovarMatrix
-  cvarr_count vects /K
-  CvArr covMat
-  CvArr avg
-  int flags
-Mahalonobis
-  CvArr vec1
-  CvArr vec2
-  CvArr mat
-CalcPCA
-  CvArr data
-  CvArr avg
-  CvArr eigenvalues
-  CvArr eigenvectors
-  int flags
-ProjectPCA
-  CvArr data
-  CvArr avg
-  CvArr eigenvectors
-  CvArr result
-BackProjectPCA
-  CvArr proj
-  CvArr avg
-  CvArr eigenvects
-  CvArr result
-
-# Math Functions
-Round int
-  double value
-Floor int
-  double value
-Ceil int
-  double value
-Sqrt float
-  float value
-InvSqrt float
-  float value
-Cbrt float
-  float value
-FastArctan float
-  float y
-  float x
-IsNaN int
-  double value
-IsInf int
-  double value
-CartToPolar
-  CvArr x
-  CvArr y
-  CvArr magnitude
-  CvArr angle NULL
-  int angleInDegrees 0
-PolarToCart
-  CvArr magnitude
-  CvArr angle
-  CvArr x
-  CvArr y
-  int angleInDegrees 0
-Pow
-  CvArr src
-  CvArr dst
-  double power
-Exp
-  CvArr src
-  CvArr dst
-Log
-  CvArr src
-  CvArr dst
-SolveCubic
-  CvMat coeffs
-  CvMat roots
-SolvePoly
-  CvMat coeffs
-  CvMat roots
-  int maxiter 10
-  int fig 10
-  
-# Random Number Generation
-RNG CvRNG
-  int64 seed -1LL
-RandArr
-  CvRNG* rng
-  CvArr arr
-  int distType
-  CvScalar param1
-  CvScalar param2
-RandInt unsigned
-  CvRNG* rng
-RandReal double
-  CvRNG* rng
-
-# Discrete Transforms
-DFT
-  CvArr src
-  CvArr dst
-  int flags
-  int nonzeroRows 0
-GetOptimalDFTSize int
-  int size0
-MulSpectrums
-  CvArr src1
-  CvArr src2
-  CvArr dst
-  int flags
-DCT
-  CvArr src
-  CvArr dst
-  int flags
-
-# Sequences
-SeqRemove
-  CvSeq seq
-  int index
-ClearSeq
-  CvSeq seq
-CloneSeq
-  CvSeq seq
-  CvMemStorage storage
-SeqRemoveSlice
-  CvSeq seq
-  CvSlice slice
-SeqInvert
-  CvSeq seq
-
-# Miscellaneous Functions
-CheckArr int
-  CvArr arr
-  int flags 0
-  double min_val 0
-  double max_val 0
-KMeans2 double
-  CvArr samples
-  int nclusters
-  CvArr labels
-  CvTermCriteria termcrit
-  int attempts 1
-  int flags 0
-  CvArr centers NULL
-
-# Gradients, Edges, Corners and Features
-Sobel
-  CvArr src
-  CvArr dst
-  int xorder
-  int yorder
-  int apertureSize 3
-Laplace
-  CvArr src
-  CvArr dst
-  int apertureSize 3
-Canny
-  CvArr image
-  CvArr edges
-  double threshold1
-  double threshold2
-  int aperture_size 3
-PreCornerDetect
-  CvArr image
-  CvArr corners
-  int apertureSize 3
-CornerEigenValsAndVecs
-  CvArr image
-  CvArr eigenvv
-  int blockSize
-  int aperture_size 3
-CornerMinEigenVal
-  CvArr image
-  CvArr eigenval
-  int blockSize
-  int aperture_size 3
-CornerHarris
-  CvArr image
-  CvArr harris_dst
-  int blockSize
-  int aperture_size 3
-  double k 0.04
-FindCornerSubPix corners
-  CvArr image
-  CvPoint2D32fs corners
-  CvSize win
-  CvSize zero_zone
-  CvTermCriteria criteria
-GoodFeaturesToTrack cornerCount
-  CvArr image
-  CvArr eigImage
-  CvArr tempImage
-  cvpoint2d32f_count cornerCount
-  double qualityLevel
-  double minDistance
-  CvArr mask NULL
-  int blockSize 3
-  int useHarris 0
-  double k 0.04
-ExtractSURF keypoints,descriptors
-  CvArr image
-  CvArr mask
-  CvSeqOfCvSURFPoint* keypoints /O
-  CvSeqOfCvSURFDescriptor* descriptors /O
-  CvMemStorage storage
-  CvSURFParams params
-GetStarKeypoints CvSeqOfCvStarKeypoint*
-  CvArr image
-  CvMemStorage storage
-  CvStarDetectorParams params cvStarDetectorParams()
-
-# Sampling, Interpolation and Geometrical Transforms
-GetRectSubPix
-  CvArr src
-  CvArr dst
-  CvPoint2D32f center
-GetQuadrangleSubPix
-  CvArr src
-  CvArr dst
-  CvMat mapMatrix
-Resize
-  CvArr src
-  CvArr dst
-  int interpolation CV_INTER_LINEAR
-WarpAffine
-  CvArr src
-  CvArr dst
-  CvMat mapMatrix
-  int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS
-  CvScalar fillval cvScalarAll(0)
-GetAffineTransform
-  CvPoint2D32f* src
-  CvPoint2D32f* dst
-  CvMat mapMatrix
-GetRotationMatrix2D
-  CvPoint2D32f center
-  double angle
-  double scale
-  CvMat mapMatrix
-WarpPerspective
-  CvArr src
-  CvArr dst
-  CvMat mapMatrix
-  int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS
-  CvScalar fillval cvScalarAll(0)
-GetPerspectiveTransform
-  CvPoint2D32f* src
-  CvPoint2D32f* dst
-  CvMat mapMatrix
-Remap
-  CvArr src
-  CvArr dst
-  CvArr mapx
-  CvArr mapy
-  int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS
-  CvScalar fillval cvScalarAll(0)
-ConvertMaps
-  CvArr mapx
-  CvArr mapy
-  CvArr mapxy
-  CvArr mapalpha
-LogPolar
-  CvArr src
-  CvArr dst
-  CvPoint2D32f center
-  double M
-  int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS
-
-# Morphological Operations
-CreateStructuringElementEx IplConvKernel*
-  int cols
-  int rows
-  int anchorX
-  int anchorY
-  int shape
-  ints values {NULL,0}
-Erode
-  CvArr src
-  CvArr dst
-  IplConvKernel* element NULL
-  int iterations 1
-Dilate
-  CvArr src
-  CvArr dst
-  IplConvKernel* element NULL
-  int iterations 1
-MorphologyEx
-  CvArr src
-  CvArr dst
-  CvArr temp
-  IplConvKernel* element
-  int operation
-  int iterations 1
-
-# Filters and Color Conversion
-Smooth
-  CvArr src
-  CvArr dst
-  int smoothtype CV_GAUSSIAN
-  int param1 3
-  int param2 0
-  double param3 0
-  double param4 0
-Filter2D
-  CvArr src
-  CvArr dst
-  CvMat kernel
-  CvPoint anchor cvPoint(-1,-1)
-CopyMakeBorder
-  CvArr src
-  CvArr dst
-  CvPoint offset
-  int bordertype
-  CvScalar value cvScalarAll(0)
-Integral
-  CvArr image
-  CvArr sum
-  CvArr sqsum NULL
-  CvArr tiltedSum NULL
-CvtColor
-  CvArr src
-  CvArr dst
-  int code
-Threshold
-  CvArr src
-  CvArr dst
-  double threshold
-  double maxValue
-  int thresholdType
-AdaptiveThreshold
-  CvArr src
-  CvArr dst
-  double maxValue
-  int adaptive_method CV_ADAPTIVE_THRESH_MEAN_C /ch_adaptive_method
-  int thresholdType CV_THRESH_BINARY /ch_threshold_type
-  int blockSize 3
-  double param1 5
-
-# Pyramids and the Applications
-PyrDown
-  CvArr src
-  CvArr dst
-  int filter CV_GAUSSIAN_5x5
-PyrUp
-  CvArr src
-  CvArr dst
-  int filter CV_GAUSSIAN_5x5
-PyrSegmentation comp
-  IplImage src
-  IplImage dst
-  CvMemStorage storage
-  CvSeq* comp /O
-  int level
-  double threshold1
-  double threshold2
-PyrMeanShiftFiltering
-  CvArr src
-  CvArr dst
-  double sp
-  double sr
-  int max_level 1
-  CvTermCriteria termcrit cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5,1)
-
-# Image Segmentation, Connected Components and Contour Retrieval
-FloodFill comp
-  CvArr image
-  CvPoint seed_point
-  CvScalar new_val
-  CvScalar lo_diff cvScalarAll(0)
-  CvScalar up_diff cvScalarAll(0)
-  CvConnectedComp comp /O
-  int flags 4
-  CvArr mask NULL
-Watershed
-  CvArr image
-  CvArr markers
-
-# Image and Contour Moments
-Moments moments
-  cvarrseq arr
-  CvMoments moments /O
-  int binary 0
-GetSpatialMoment double
-  CvMoments* moments
-  int x_order
-  int y_order
-GetCentralMoment double
-  CvMoments* moments
-  int x_order
-  int y_order
-GetNormalizedCentralMoment double
-  CvMoments* moments
-  int x_order
-  int y_order
-
-# Special Image Transforms
-HoughLines2 CvSeq*
-  CvArr image
-  CvMemStorage storage
-  int method
-  double rho
-  double theta
-  int threshold
-  double param1 0
-  double param2 0
-HoughCircles
-  CvArr image
-  CvMat circle_storage
-  int method
-  double dp
-  double min_dist
-  double param1 100
-  double param2 100
-  int min_radius 0
-  int max_radius 0
-DistTransform
-  CvArr src
-  CvArr dst
-  int distance_type CV_DIST_L2
-  int mask_size 3
-  floats mask {NULL,0}
-  CvArr labels NULL
-Inpaint
-  CvArr src
-  CvArr mask
-  CvArr dst
-  double inpaintRadius
-  int flags
-
-# Histograms
-ClearHist
-  CvHistogram hist
-CalcArrHist
-  CvArrs image
-  CvHistogram hist
-  int accumulate 0
-  CvArr mask NULL
-CalcHist
-  IplImages image
-  CvHistogram hist
-  int accumulate 0
-  CvArr mask NULL
-NormalizeHist
-  CvHistogram hist
-  double factor
-ThreshHist
-  CvHistogram hist
-  double threshold
-CompareHist double
-  CvHistogram hist1
-  CvHistogram hist2
-  int method
-# CopyHist
-CalcBackProject
-  IplImages image
-  CvArr back_project
-  CvHistogram hist
-CalcArrBackProject
-  CvArrs image
-  CvArr back_project
-  CvHistogram hist
-CalcBackProjectPatch
-  IplImages images
-  CvArr dst
-  CvSize patch_size
-  CvHistogram hist
-  int method
-  float factor
-CalcProbDensity
-  CvHistogram hist1
-  CvHistogram hist2
-  CvHistogram dst_hist
-  double scale 255
-EqualizeHist
-  CvArr src
-  CvArr dst
-QueryHistValue_1D double
-  CvHistogram hist
-  int idx0
-QueryHistValue_2D double
-  CvHistogram hist
-  int idx0
-  int idx1
-QueryHistValue_3D double
-  CvHistogram hist
-  int idx0
-  int idx1
-  int idx2
-QueryHistValue_nD double
-  CvHistogram hist
-  ints idx
-
-# Matching
-MatchTemplate
-  CvArr image
-  CvArr templ
-  CvArr result
-  int method
-MatchShapes double
-  CvSeq object1
-  CvSeq object2
-  int method
-  double parameter 0
-
-# Contour Processing Functions
-ApproxChains CvSeq*
-  CvSeq src_seq
-  CvMemStorage storage
-  int method CV_CHAIN_APPROX_SIMPLE
-  double parameter 0
-  int minimal_perimeter 0
-  int recursive 0
-BoundingRect CvRect
-  cvarrseq points
-  int update 0
-ContourArea double
-  cvarrseq contour
-  CvSlice slice CV_WHOLE_SEQ
-ArcLength double
-  cvarrseq curve
-  CvSlice slice CV_WHOLE_SEQ
-  int isClosed -1
-
-# Computational Geometry
-MaxRect CvRect
-  CvRect* rect1
-  CvRect* rect2
-# TODO PointSeqFromMat
-BoxPoints points
-  CvBox2D box
-  CvPoint2D32f_4 points /O,A
-FitEllipse2 CvBox2D
-  CvArr points
-ConvexHull2 CvSeq*
-  cvarrseq points
-  CvMemStorage storage
-  int orientation CV_CLOCKWISE
-  int return_points 0
-CheckContourConvexity int
-  cvarrseq contour
-ConvexityDefects CvSeqOfCvConvexityDefect*
-  cvarrseq contour
-  CvSeq convexhull
-  CvMemStorage storage
-PointPolygonTest double
-  cvarrseq contour
-  CvPoint2D32f pt
-  int measure_dist
-MinAreaRect2 CvBox2D
-  cvarrseq points
-  CvMemStorage storage NULL
-MinEnclosingCircle int,center,radius
-  cvarrseq points
-  CvPoint2D32f center /O
-  float radius /O
-
-# Planar Subdivisions
-
-Subdiv2DGetEdge CvSubdiv2DEdge
-  CvSubdiv2DEdge edge
-  CvNextEdgeType type
-Subdiv2DNextEdge CvSubdiv2DEdge
-  CvSubdiv2DEdge edge
-Subdiv2DRotateEdge CvSubdiv2DEdge
-  CvSubdiv2DEdge edge
-  int rotate
-Subdiv2DEdgeOrg CvSubdiv2DPoint*
-  CvSubdiv2DEdge edge
-Subdiv2DEdgeDst CvSubdiv2DPoint*
-  CvSubdiv2DEdge edge
-CreateSubdivDelaunay2D CvSubdiv2D*
-  CvRect rect
-  CvMemStorage storage
-SubdivDelaunay2DInsert CvSubdiv2DPoint*
-  CvSubdiv2D* subdiv
-  CvPoint2D32f pt
-CalcSubdivVoronoi2D
-  CvSubdiv2D* subdiv
-ClearSubdivVoronoi2D
-  CvSubdiv2D* subdiv
-FindNearestPoint2D CvSubdiv2DPoint*
-  CvSubdiv2D* subdiv
-  CvPoint2D32f pt
-
-# Object Detection
-HaarDetectObjects CvSeqOfCvAvgComp*
-  CvArr image
-  CvHaarClassifierCascade* cascade
-  CvMemStorage storage
-  double scale_factor 1.1   /ch_doubleAbove1
-  int min_neighbors 3
-  int flags 0
-  CvSize min_size cvSize(0,0)
-
-ComputeCorrespondEpilines
-  CvMat points
-  int whichImage
-  CvMat F
-  CvMat lines
-ConvertPointsHomogeneous
-  CvMat src
-  CvMat dst
-ProjectPoints2
-  CvMat objectPoints
-  CvMat rvec
-  CvMat tvec
-  CvMat cameraMatrix
-  CvMat distCoeffs
-  CvMat imagePoints
-  CvMat dpdrot NULL
-  CvMat dpdt NULL
-  CvMat dpdf NULL
-  CvMat dpdc NULL
-  CvMat dpddist NULL
-ReprojectImageTo3D
-  CvArr disparity
-  CvArr _3dImage
-  CvMat Q
-  int handleMissingValues 0
-RQDecomp3x3 eulerAngles
-  CvMat M
-  CvMat R
-  CvMat Q
-  CvMat Qx NULL
-  CvMat Qy NULL
-  CvMat Qz NULL
-  CvPoint3D64f eulerAngles /O
-FindHomography
-  CvMat srcPoints
-  CvMat dstPoints
-  CvMat H
-  int method 0
-  double ransacReprojThreshold 3.0
-  CvMat status NULL
-CreateStereoBMState CvStereoBMState*
-  int preset CV_STEREO_BM_BASIC
-  int numberOfDisparities 0
-CreateStereoGCState CvStereoGCState*
-  int numberOfDisparities
-  int maxIters
-FindStereoCorrespondenceBM
-  CvArr left
-  CvArr right
-  CvArr disparity
-  CvStereoBMState* state
-FindStereoCorrespondenceGC
-  CvArr left
-  CvArr right
-  CvArr dispLeft
-  CvArr dispRight
-  CvStereoGCState* state
-  int useDisparityGuess 0
-CalibrateCamera2 
-  CvMat objectPoints
-  CvMat imagePoints
-  CvMat pointCounts
-  CvSize imageSize
-  CvMat cameraMatrix
-  CvMat distCoeffs
-  CvMat rvecs
-  CvMat tvecs
-  int flags 0
-CalibrationMatrixValues fovx,fovy,focalLength,principalPoint,pixelAspectRatio
-  CvMat calibMatr
-  CvSize image_size
-  double apertureWidth 0
-  double apertureHeight 0
-  double fovx /O
-  double fovy /O
-  double focalLength /O
-  CvPoint2D64f principalPoint /O
-  double pixelAspectRatio /O
-FindExtrinsicCameraParams2
-  CvMat objectPoints
-  CvMat imagePoints
-  CvMat cameraMatrix
-  CvMat distCoeffs
-  CvMat rvec
-  CvMat tvec
-  int useExtrinsicGuess 0
-FindFundamentalMat int
-  CvMat points1
-  CvMat points2
-  CvMat fundamentalMatrix
-  int method CV_FM_RANSAC
-  double param1 1.
-  double param2 0.99
-  CvMat status NULL
-StereoCalibrate
-  CvMat objectPoints
-  CvMat imagePoints1
-  CvMat imagePoints2
-  CvMat pointCounts
-  CvMat cameraMatrix1
-  CvMat distCoeffs1
-  CvMat cameraMatrix2
-  CvMat distCoeffs2
-  CvSize imageSize
-  CvMat R
-  CvMat T
-  CvMat E NULL
-  CvMat F NULL
-  CvTermCriteria term_crit cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)
-  int flags CV_CALIB_FIX_INTRINSIC
-GetOptimalNewCameraMatrix
-  CvMat cameraMatrix
-  CvMat distCoeffs
-  CvSize imageSize
-  double alpha
-  CvMat newCameraMatrix
-  CvSize newImageSize cvSize(0,0)
-  CvRect* validPixROI NULL
-InitIntrinsicParams2D
-  CvMat objectPoints
-  CvMat imagePoints
-  CvMat npoints
-  CvSize imageSize
-  CvMat cameraMatrix
-  double aspectRatio 1.
-StereoRectify roi1,roi2
-  CvMat cameraMatrix1
-  CvMat cameraMatrix2
-  CvMat distCoeffs1
-  CvMat distCoeffs2
-  CvSize imageSize
-  CvMat R
-  CvMat T
-  CvMat R1
-  CvMat R2
-  CvMat P1
-  CvMat P2
-  CvMat Q NULL
-  int flags CV_CALIB_ZERO_DISPARITY
-  double alpha -1
-  CvSize newImageSize cvSize(0,0)
-  CvRect roi1 /O
-  CvRect roi2 /O
-StereoRectifyUncalibrated
-  CvMat points1
-  CvMat points2
-  CvMat F
-  CvSize imageSize
-  CvMat H1
-  CvMat H2
-  double threshold 5
-Rodrigues2
-  CvMat src
-  CvMat dst
-  CvMat jacobian 0
-Undistort2
-  CvArr src
-  CvArr dst
-  CvMat cameraMatrix
-  CvMat distCoeffs
-InitUndistortMap
-  CvMat cameraMatrix
-  CvMat distCoeffs
-  CvArr map1
-  CvArr map2
-InitUndistortRectifyMap
-  CvMat cameraMatrix
-  CvMat distCoeffs
-  CvMat R
-  CvMat newCameraMatrix
-  CvArr map1
-  CvArr map2
-UndistortPoints
-  CvMat src
-  CvMat dst
-  CvMat cameraMatrix
-  CvMat distCoeffs
-  CvMat R NULL
-  CvMat P NULL
-DecomposeProjectionMatrix eulerAngles
-  CvMat projMatrix
-  CvMat cameraMatrix
-  CvMat rotMatrix
-  CvMat transVect
-  CvMat rotMatrX NULL
-  CvMat rotMatrY NULL
-  CvMat rotMatrZ NULL
-  CvPoint3D64f eulerAngles /O
-DrawChessboardCorners
-  CvArr image
-  CvSize patternSize
-  CvPoint2D32fs corners
-  int patternWasFound
-
-CreatePOSITObject CvPOSITObject*
-  CvPoint3D32fs points
-POSIT rotationMatrix,translation_vector
-  CvPOSITObject* posit_object
-  CvPoint2D32f* imagePoints
-  double focal_length
-  CvTermCriteria criteria
-  CvMatr32f_i rotationMatrix /O,A
-  CvVect32f_i translation_vector /O,A
-
-EstimateRigidTransform
-  CvArr A
-  CvArr B
-  CvMat M
-  int full_affine
-
-# Accumulation of Background Statistics
-Acc
-  CvArr image
-  CvArr sum
-  CvArr mask NULL
-SquareAcc
-  CvArr image
-  CvArr sqsum
-  CvArr mask NULL
-MultiplyAcc
-  CvArr image1
-  CvArr image2
-  CvArr acc
-  CvArr mask NULL
-RunningAvg
-  CvArr image
-  CvArr acc
-  double alpha
-  CvArr mask NULL
-
-# Motion Templates
-UpdateMotionHistory
-  CvArr silhouette
-  CvArr mhi
-  double timestamp
-  double duration
-CalcMotionGradient
-  CvArr mhi          /ch_matF
-  CvArr mask
-  CvArr orientation  /ch_matF
-  double delta1
-  double delta2
-  int apertureSize 3 /ch_aperture
-CalcGlobalOrientation double
-  CvArr orientation
-  CvArr mask
-  CvArr mhi
-  double timestamp
-  double duration
-SegmentMotion CvSeq*
-  CvArr mhi
-  CvArr seg_mask
-  CvMemStorage storage
-  double timestamp
-  double seg_thresh
-
-# Object Tracking
-MeanShift comp
-  CvArr prob_image
-  CvRect window
-  CvTermCriteria criteria
-  CvConnectedComp comp /O
-CamShift int,comp,box
-  CvArr prob_image
-  CvRect window
-  CvTermCriteria criteria
-  CvConnectedComp comp /O
-  CvBox2D box /O
-CreateKalman CvKalman*
-  int dynam_params
-  int measure_params
-  int control_params 0
-KalmanCorrect ROCvMat*
-  CvKalman* kalman
-  CvMat measurement
-KalmanPredict ROCvMat*
-  CvKalman* kalman
-  CvMat control NULL
-SnakeImage points
-  IplImage image
-  CvPoints points
-  floats alpha
-  floats beta
-  floats gamma
-  CvSize win
-  CvTermCriteria criteria
-  int calc_gradient 1
-
-# Optical Flow
-CalcOpticalFlowLK
-  CvArr prev
-  CvArr curr
-  CvSize winSize
-  CvArr velx
-  CvArr vely
-CalcOpticalFlowBM
-  CvArr prev    /ch_image8
-  CvArr curr    /ch_image8
-  CvSize blockSize
-  CvSize shiftSize
-  CvSize max_range
-  int usePrevious
-  CvArr velx        /ch_vel
-  CvArr vely        /ch_vel
-CalcOpticalFlowHS
-  CvArr prev    /ch_image8
-  CvArr curr    /ch_image8
-  int usePrevious
-  CvArr velx        /ch_vel_64
-  CvArr vely        /ch_vel_64
-  double lambda
-  CvTermCriteria criteria
-CalcOpticalFlowFarneback
-  CvArr prev /ch_image8
-  CvArr curr /ch_image8
-  CvArr flow
-  double pyr_scale 0.5
-  int levels 3
-  int winsize 15
-  int iterations 3
-  int poly_n 7
-  double poly_sigma 1.5
-  int flags 0
-
-# Highgui
-ConvertImage
-  CvArr src
-  CvArr dst
-  int flags 0
-NamedWindow
-  char* name
-  int flags CV_WINDOW_AUTOSIZE
-DestroyWindow
-  char* name
-DestroyAllWindows
-ResizeWindow
-  char* name
-  int width
-  int height
-MoveWindow
-  char* name
-  int x
-  int y
-ShowImage
-  char* name
-  CvArr image
-GetTrackbarPos int
-  char* trackbarName
-  char* windowName
-SetTrackbarPos
-  char* trackbarName
-  char* windowName
-  int pos
-#WaitKey int
-#  int delay 0
-SaveImage
-  char* filename
-  CvArr image
-CaptureFromFile CvCapture*
-  char* filename
-CreateFileCapture CvCapture*
-  char* filename
-CaptureFromCAM CvCapture*
-  int index
-CreateCameraCapture CvCapture*
-  int index
-GrabFrame int
-  CvCapture* capture
-RetrieveFrame ROIplImage*
-  CvCapture* capture
-QueryFrame ROIplImage*
-  CvCapture* capture
-GetCaptureProperty double
-  CvCapture* capture
-  int property_id
-SetCaptureProperty int
-  CvCapture* capture
-  int property_id
-  double value
-CreateVideoWriter CvVideoWriter*
-  char* filename
-  int fourcc
-  double fps
-  CvSize frame_size
-  int is_color 1
-WriteFrame int
-  CvVideoWriter* writer
-  IplImage image
-EncodeImage CvMat*
-  char* ext
-  CvArr image
-  ints0 params {&zero,1}
-DecodeImage IplImage*
-  CvMat buf
-  int iscolor CV_LOAD_IMAGE_COLOR
-DecodeImageM CvMat*
-  CvMat buf
-  int iscolor CV_LOAD_IMAGE_COLOR
-StartWindowThread
-SetWindowProperty
-  char* name
-  int prop_id
-  double prop_value
-GetWindowProperty double
-  char* name
-  int prop_id
-
-GetTickCount int64
-GetTickFrequency int64
-
-# cvaux stuff
-HOGDetectMultiScale CvSeq*
-  CvArr image
-  CvMemStorage storage
-  CvArr svm_classifier NULL
-  CvSize win_stride cvSize(0,0)
-  double hit_threshold 0
-  double scale 1.05
-  int group_threshold 2
-  CvSize padding cvSize(0,0)
-  CvSize win_size cvSize(64,128)
-  CvSize block_size cvSize(16,16)
-  CvSize block_stride cvSize(8,8)
-  CvSize cell_size cvSize(8,8)
-  int nbins 9
-  int gammaCorrection 1
-
-GrabCut
-  CvArr image
-  CvArr mask
-  CvRect rect
-  CvArr bgdModel
-  CvArr fgdModel
-  int iterCount
-  int mode
-
-# These functions are handwritten in cv.cpp; they appear here as 'doconly' declarations
-# so that their documentation can be auto-generated
-ApproxPoly  /doconly
-  cvarrseq src_seq
-  CvMemStorage storage
-  int method
-  double parameter 0.0
-  int parameter2 0
-CalcEMD2  /doconly
-  CvArr signature1
-  CvArr signature2
-  int distance_type
-  PyCallableObject* distance_func NULL
-  CvArr cost_matrix NULL
-  CvArr flow NULL
-  float lower_bound 0.0
-  PyObject* userdata NULL
-CalcOpticalFlowPyrLK currFeatures,status,track_error /doconly
-  CvArr prev
-  CvArr curr
-  CvArr prevPyr
-  CvArr currPyr
-  CvPoint2D32f* prevFeatures
-  CvSize winSize
-  int level
-  CvTermCriteria criteria
-  int flags
-  CvPoint2D32f* guesses
-  CvPoint2D32f currFeatures /O
-  char status /O
-  float track_error /O
-ClipLine point1,point2 /doconly
-  CvSize imgSize
-  CvPoint pt1
-  CvPoint pt2
-CreateData /doconly
-  CvArr arr
-CreateHist CvHistogram /doconly
-  ints dims
-  int type
-  ranges ranges None
-  int uniform 1
-CreateImageHeader IplImage* /doconly
-  CvSize size
-  int depth
-  int channels
-CreateImage IplImage* /doconly
-  CvSize size
-  int depth
-  int channels
-CreateMatHeader CvMat /doconly
-  int rows
-  int cols
-  int type
-CreateMat CvMat /doconly
-  int rows
-  int cols
-  int type
-CreateMatNDHeader CvMatND /doconly
-  ints dims
-  int type
-CreateMatND CvMatND /doconly
-  ints dims
-  int type
-CreateMemStorage CvMemStorage /doconly
-  int blockSize
-CreateTrackbar /doconly
-  char* trackbarName
-  char* windowName
-  int value
-  int count
-  PyCallableObject* onChange
-FindChessboardCorners corners /doconly
-  CvArr image
-  CvSize patternSize
-  CvPoint2D32fs corners /O
-  int flags CV_CALIB_CB_ADAPTIVE_THRESH
-FindContours /doconly
-  CvArr image
-  CvMemStorage storage
-  int mode CV_RETR_LIST
-  int method CV_CHAIN_APPROX_SIMPLE
-  CvPoint offset (0,0)
-FitLine line /doconly
-  CvArr points
-  int dist_type
-  double param
-  double reps
-  double aeps
-  PyObject* line /O
-GetDims /doconly
-  CvArr arr
-GetHuMoments hu /doconly
-  CvMoments moments
-  PyObject* hu /O
-GetImage /doconly
-  CvMat arr
-GetMat /doconly
-  IplImage arr
-  int allowND 0
-GetMinMaxHistValue min_value,max_value,min_idx,max_idx /doconly
-  CvHistogram hist
-  CvScalar min_value /O
-  CvScalar max_value /O
-  ints min_idx /O
-  ints max_idx /O
-InitLineIterator line_iterator /doconly
-  CvArr image
-  CvPoint pt1
-  CvPoint pt2
-  iter line_iterator /O
-  int connectivity 8
-  int left_to_right 0
-LoadImageM /doconly
-  char* filename
-  int iscolor CV_LOAD_IMAGE_COLOR
-LoadImage /doconly
-  char* filename
-  int iscolor CV_LOAD_IMAGE_COLOR
-ReshapeMatND /doconly
-  CvMat arr
-  int newCn
-  ints newDims
-Reshape /doconly
-  CvArr arr
-  int newCn
-  int newRows
-SetData /doconly
-  CvArr arr
-  PyObject* data
-  int step
-SetMouseCallback /doconly
-  char* windowName
-  PyCallableObject* onMouse
-  PyObject* param None
-Subdiv2DLocate loc,where /doconly
-  CvSubdiv2D* subdiv
-  CvPoint2D32f pt
-  int loc /O
-  edgeorpoint where /O
-WaitKey /doconly
-  int delay 0
diff --git a/modules/python/src/cv.cpp b/modules/python/src/cv.cpp
deleted file mode 100644
index c4909491c..000000000
--- a/modules/python/src/cv.cpp
+++ /dev/null
@@ -1,4177 +0,0 @@
-#include <Python.h>
-
-#include <assert.h>
-
-#include <opencv/cxcore.h>
-#include <opencv/cv.h>
-#include <opencv/cvaux.h>
-#include <opencv/cvwimage.h>
-#include <opencv/highgui.h>
-
-#define MODULESTR "cv"
-
-static PyObject *opencv_error;
-
-struct memtrack_t {
-  PyObject_HEAD
-  int owner;
-  void *ptr;
-  int freeptr;
-  Py_ssize_t size;
-  PyObject *backing;
-  CvArr *backingmat;
-};
-
-struct iplimage_t {
-  PyObject_HEAD
-  IplImage *a;
-  PyObject *data;
-  size_t offset;
-};
-
-struct cvmat_t {
-  PyObject_HEAD
-  CvMat *a;
-  PyObject *data;
-  size_t offset;
-};
-
-struct cvmatnd_t {
-  PyObject_HEAD
-  CvMatND *a;
-  PyObject *data;
-  size_t offset;
-};
-
-struct cvhistogram_t {
-  PyObject_HEAD
-  CvHistogram h;
-  PyObject *bins;
-};
-
-struct cvmemstorage_t {
-  PyObject_HEAD
-  CvMemStorage *a;
-};
-
-struct cvseq_t {
-  PyObject_HEAD
-  CvSeq *a;
-  PyObject *container;  // Containing cvmemstorage_t
-};
-
-struct cvset_t {
-  PyObject_HEAD
-  CvSet *a;
-  PyObject *container;  // Containing cvmemstorage_t
-  int i;
-};
-
-struct cvsubdiv2d_t {
-  PyObject_HEAD
-  CvSubdiv2D *a;
-  PyObject *container;  // Containing cvmemstorage_t
-};
-
-struct cvsubdiv2dpoint_t {
-  PyObject_HEAD
-  CvSubdiv2DPoint *a;
-  PyObject *container;  // Containing cvmemstorage_t
-};
-
-struct cvsubdiv2dedge_t {
-  PyObject_HEAD
-  CvSubdiv2DEdge a;
-  PyObject *container;  // Containing cvmemstorage_t
-};
-
-struct cvlineiterator_t {
-  PyObject_HEAD
-  CvLineIterator iter;
-  int count;
-  int type;
-};
-
-typedef IplImage ROIplImage;
-typedef const CvMat ROCvMat;
-typedef PyObject PyCallableObject;
-
-struct cvfont_t {
-  PyObject_HEAD
-  CvFont a;
-};
-
-struct cvcontourtree_t {
-  PyObject_HEAD
-  CvContourTree *a;
-};
-
-struct cvrng_t {
-  PyObject_HEAD
-  CvRNG a;
-};
-
-static int is_iplimage(PyObject *o);
-static int is_cvmat(PyObject *o);
-static int is_cvmatnd(PyObject *o);
-static int convert_to_CvArr(PyObject *o, CvArr **dst, const char *name = "no_name");
-static int convert_to_IplImage(PyObject *o, IplImage **dst, const char *name = "no_name");
-static int convert_to_CvMat(PyObject *o, CvMat **dst, const char *name = "no_name");
-static int convert_to_CvMatND(PyObject *o, CvMatND **dst, const char *name = "no_name");
-static PyObject *what_data(PyObject *o);
-static PyObject *FROM_CvMat(CvMat *r);
-static PyObject *FROM_ROCvMatPTR(ROCvMat *r);
-static PyObject *shareDataND(PyObject *donor, CvMatND *pdonor, CvMatND *precipient);
-
-#define FROM_double(r)  PyFloat_FromDouble(r)
-#define FROM_float(r)  PyFloat_FromDouble(r)
-#define FROM_int(r)  PyInt_FromLong(r)
-#define FROM_int64(r)  PyLong_FromLongLong(r)
-#define FROM_unsigned(r)  PyLong_FromUnsignedLong(r)
-#define FROM_CvBox2D(r) Py_BuildValue("(ff)(ff)f", r.center.x, r.center.y, r.size.width, r.size.height, r.angle)
-#define FROM_CvScalar(r)  Py_BuildValue("(ffff)", r.val[0], r.val[1], r.val[2], r.val[3])
-#define FROM_CvPoint(r)  Py_BuildValue("(ii)", r.x, r.y)
-#define FROM_CvPoint2D32f(r) Py_BuildValue("(ff)", r.x, r.y)
-#define FROM_CvPoint3D64f(r) Py_BuildValue("(fff)", r.x, r.y, r.z)
-#define FROM_CvSize(r) Py_BuildValue("(ii)", r.width, r.height)
-#define FROM_CvRect(r) Py_BuildValue("(iiii)", r.x, r.y, r.width, r.height)
-#define FROM_CvSeqPTR(r) _FROM_CvSeqPTR(r, pyobj_storage)
-#define FROM_CvSubdiv2DPTR(r) _FROM_CvSubdiv2DPTR(r, pyobj_storage)
-#define FROM_CvPoint2D64f(r) Py_BuildValue("(ff)", r.x, r.y)
-#define FROM_CvConnectedComp(r) Py_BuildValue("(fNN)", (r).area, FROM_CvScalar((r).value), FROM_CvRect((r).rect))
-
-#if PYTHON_USE_NUMPY
-static PyObject *fromarray(PyObject *o, int allowND);
-#endif
-
-static void translate_error_to_exception(void)
-{
-  PyErr_SetString(opencv_error, cvErrorStr(cvGetErrStatus()));
-  cvSetErrStatus(0);
-}
-
-#define ERRCHK do { if (cvGetErrStatus() != 0) { translate_error_to_exception(); return NULL; } } while (0)
-#define ERRWRAPN(F, N) \
-    do { \
-        try \
-        { \
-            F; \
-        } \
-        catch (const cv::Exception &e) \
-        { \
-           PyErr_SetString(opencv_error, e.err.c_str()); \
-           return N; \
-        } \
-        ERRCHK; \
-    } while(0)
-#define ERRWRAP(F) ERRWRAPN(F, NULL) // for most functions, exception -> NULL return
-
-/************************************************************************/
-
-static int failmsg(const char *fmt, ...)
-{
-  char str[1000];
-
-  va_list ap;
-  va_start(ap, fmt);
-  vsnprintf(str, sizeof(str), fmt, ap);
-  va_end(ap);
-
-  PyErr_SetString(PyExc_TypeError, str);
-  return 0;
-}
-
-/************************************************************************/
-
-/* These get/setters are polymorphic, used in both iplimage and cvmat */
-
-static PyObject *PyObject_FromCvScalar(CvScalar s, int type)
-{
-  int i, spe = CV_MAT_CN(type);
-  PyObject *r;
-  if (spe > 1) {
-    r = PyTuple_New(spe);
-    for (i = 0; i < spe; i++)
-      PyTuple_SET_ITEM(r, i, PyFloat_FromDouble(s.val[i]));
-  } else {
-    r = PyFloat_FromDouble(s.val[0]);
-  }
-  return r;
-}
-
-static PyObject *cvarr_GetItem(PyObject *o, PyObject *key);
-static int cvarr_SetItem(PyObject *o, PyObject *key, PyObject *v);
-
-// o is a Python string or buffer object.  Return its size.
-
-static Py_ssize_t what_size(PyObject *o)
-{
-  void *buffer;
-  Py_ssize_t buffer_len;
-
-  if (PyString_Check(o)) {
-    return PyString_Size(o);
-  } else if (PyObject_AsWriteBuffer(o, &buffer, &buffer_len) == 0) {
-    return buffer_len;
-  } else {
-    assert(0);  // argument must be string or buffer.
-    return 0;
-  }
-}
-
-
-/************************************************************************/
-
-CvMat *PyCvMat_AsCvMat(PyObject *o)
-{
-  assert(0); // not yet implemented: reference counting for CvMat in Kalman is unclear...
-  return NULL;
-}
-
-#define cvReleaseIplConvKernel(x) cvReleaseStructuringElement(x)
-#include "generated3.i"
-
-/* iplimage */
-
-static void iplimage_dealloc(PyObject *self)
-{
-  iplimage_t *pc = (iplimage_t*)self;
-  cvReleaseImageHeader((IplImage**)&pc->a);
-  Py_DECREF(pc->data);
-  PyObject_Del(self);
-}
-
-static PyObject *iplimage_repr(PyObject *self)
-{
-  iplimage_t *cva = (iplimage_t*)self;
-  IplImage* ipl = (IplImage*)(cva->a);
-  char str[1000];
-  sprintf(str, "<iplimage(");
-  char *d = str + strlen(str);
-  sprintf(d, "nChannels=%d ", ipl->nChannels);
-  d += strlen(d);
-  sprintf(d, "width=%d ", ipl->width);
-  d += strlen(d);
-  sprintf(d, "height=%d ", ipl->height);
-  d += strlen(d);
-  sprintf(d, "widthStep=%d ", ipl->widthStep);
-  d += strlen(d);
-  sprintf(d, ")>");
-  return PyString_FromString(str);
-}
-
-static PyObject *iplimage_tostring(PyObject *self, PyObject *args)
-{
-  iplimage_t *pc = (iplimage_t*)self;
-  IplImage *i;
-  if (!convert_to_IplImage(self, &i, "self"))
-    return NULL;
-  if (i == NULL)
-    return NULL;
-  int bps;
-  switch (i->depth) {
-  case IPL_DEPTH_8U:
-  case IPL_DEPTH_8S:
-    bps = 1;
-    break;
-  case IPL_DEPTH_16U:
-  case IPL_DEPTH_16S:
-    bps = 2;
-    break;
-  case IPL_DEPTH_32S:
-  case IPL_DEPTH_32F:
-    bps = 4;
-    break;
-  case IPL_DEPTH_64F:
-    bps = 8;
-    break;
-  default:
-    return (PyObject*)failmsg("Unrecognised depth %d", i->depth);
-  }
-  int bpl = i->width * i->nChannels * bps;
-  if (PyString_Check(pc->data) && bpl == i->widthStep && pc->offset == 0 && ((bpl * i->height) == what_size(pc->data))) {
-    Py_INCREF(pc->data);
-    return pc->data;
-  } else {
-    int l = bpl * i->height;
-    char *s = new char[l];
-    int y;
-    for (y = 0; y < i->height; y++) {
-      memcpy(s + y * bpl, i->imageData + y * i->widthStep, bpl);
-    }
-    PyObject *r = PyString_FromStringAndSize(s, l);
-    delete s;
-    return r;
-  }
-}
-
-static struct PyMethodDef iplimage_methods[] =
-{
-  {"tostring", iplimage_tostring, METH_VARARGS},
-  {NULL,          NULL}
-};
-
-static PyObject *iplimage_getnChannels(iplimage_t *cva)
-{
-  return PyInt_FromLong(((IplImage*)(cva->a))->nChannels);
-}
-static PyObject *iplimage_getwidth(iplimage_t *cva)
-{
-  return PyInt_FromLong(((IplImage*)(cva->a))->width);
-}
-static PyObject *iplimage_getheight(iplimage_t *cva)
-{
-  return PyInt_FromLong(((IplImage*)(cva->a))->height);
-}
-static PyObject *iplimage_getdepth(iplimage_t *cva)
-{
-  return PyLong_FromUnsignedLong((unsigned)((IplImage*)(cva->a))->depth);
-}
-static PyObject *iplimage_getorigin(iplimage_t *cva)
-{
-  return PyInt_FromLong(((IplImage*)(cva->a))->origin);
-}
-static void iplimage_setorigin(iplimage_t *cva, PyObject *v)
-{
-  ((IplImage*)(cva->a))->origin = PyInt_AsLong(v);
-}
-
-static PyGetSetDef iplimage_getseters[] = {
-  {(char*)"nChannels", (getter)iplimage_getnChannels, (setter)NULL, (char*)"nChannels", NULL},
-  {(char*)"channels", (getter)iplimage_getnChannels, (setter)NULL, (char*)"nChannels", NULL},
-  {(char*)"width", (getter)iplimage_getwidth, (setter)NULL, (char*)"width", NULL},
-  {(char*)"height", (getter)iplimage_getheight, (setter)NULL, (char*)"height", NULL},
-  {(char*)"depth", (getter)iplimage_getdepth, (setter)NULL, (char*)"depth", NULL},
-  {(char*)"origin", (getter)iplimage_getorigin, (setter)iplimage_setorigin, (char*)"origin", NULL},
-  {NULL}  /* Sentinel */
-};
-
-static PyMappingMethods iplimage_as_map = {
-  NULL,
-  &cvarr_GetItem,
-  &cvarr_SetItem,
-};
-
-static PyTypeObject iplimage_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".iplimage",                          /*name*/
-  sizeof(iplimage_t),                        /*basicsize*/
-};
-
-static void iplimage_specials(void)
-{
-  iplimage_Type.tp_dealloc = iplimage_dealloc;
-  iplimage_Type.tp_as_mapping = &iplimage_as_map;
-  iplimage_Type.tp_repr = iplimage_repr;
-  iplimage_Type.tp_methods = iplimage_methods;
-  iplimage_Type.tp_getset = iplimage_getseters;
-}
-
-static int is_iplimage(PyObject *o)
-{
-  return PyType_IsSubtype(o->ob_type, &iplimage_Type);
-}
-
-/************************************************************************/
-
-/* cvmat */
-
-static void cvmat_dealloc(PyObject *self)
-{
-  cvmat_t *pc = (cvmat_t*)self;
-  if (pc->data) {
-    Py_DECREF(pc->data);
-  }
-  cvFree(&pc->a);
-  PyObject_Del(self);
-}
-
-static PyObject *cvmat_repr(PyObject *self)
-{
-  CvMat *m = ((cvmat_t*)self)->a;
-  char str[1000];
-  sprintf(str, "<cvmat(");
-  char *d = str + strlen(str);
-  sprintf(d, "type=%08x ", m->type);
-  d += strlen(d);
-  switch (CV_MAT_DEPTH(m->type)) {
-  case CV_8U: strcpy(d, "8U"); break;
-  case CV_8S: strcpy(d, "8S"); break;
-  case CV_16U: strcpy(d, "16U"); break;
-  case CV_16S: strcpy(d, "16S"); break;
-  case CV_32S: strcpy(d, "32S"); break;
-  case CV_32F: strcpy(d, "32F"); break;
-  case CV_64F: strcpy(d, "64F"); break;
-  }
-  d += strlen(d);
-  sprintf(d, "C%d ", CV_MAT_CN(m->type));
-  d += strlen(d);
-  sprintf(d, "rows=%d ", m->rows);
-  d += strlen(d);
-  sprintf(d, "cols=%d ", m->cols);
-  d += strlen(d);
-  sprintf(d, "step=%d ", m->step);
-  d += strlen(d);
-  sprintf(d, ")>");
-  return PyString_FromString(str);
-}
-
-static PyObject *cvmat_tostring(PyObject *self, PyObject *args)
-{
-  CvMat *m;
-  if (!convert_to_CvMat(self, &m, "self"))
-    return NULL;
-
-  int bps;                     // bytes per sample
-
-  switch (CV_MAT_DEPTH(m->type)) {
-  case CV_8U:
-  case CV_8S:
-    bps = CV_MAT_CN(m->type) * 1;
-    break;
-  case CV_16U:
-  case CV_16S:
-    bps = CV_MAT_CN(m->type) * 2;
-    break;
-  case CV_32S:
-  case CV_32F:
-    bps = CV_MAT_CN(m->type) * 4;
-    break;
-  case CV_64F:
-    bps = CV_MAT_CN(m->type) * 8;
-    break;
-  default:
-    return (PyObject*)failmsg("Unrecognised depth %d", CV_MAT_DEPTH(m->type));
-  }
-
-  int bpl = m->cols * bps; // bytes per line
-  cvmat_t *pc = (cvmat_t*)self;
-  if (PyString_Check(pc->data) && bpl == m->step && pc->offset == 0 && ((bpl * m->rows) == what_size(pc->data))) {
-    Py_INCREF(pc->data);
-    return pc->data;
-  } else {
-    int l = bpl * m->rows;
-    char *s = new char[l];
-    int y;
-    for (y = 0; y < m->rows; y++) {
-      memcpy(s + y * bpl, m->data.ptr + y * m->step, bpl);
-    }
-    PyObject *r = PyString_FromStringAndSize(s, l);
-    delete s;
-    return r;
-  }
-}
-
-static struct PyMethodDef cvmat_methods[] =
-{
-  {"tostring", cvmat_tostring, METH_VARARGS},
-  {NULL,          NULL}
-};
-
-static PyObject *cvmat_gettype(cvmat_t *cva)
-{
-  return PyInt_FromLong(cvGetElemType(cva->a));
-}
-
-static PyObject *cvmat_getstep(cvmat_t *cva)
-{
-  return PyInt_FromLong(cva->a->step);
-}
-
-static PyObject *cvmat_getrows(cvmat_t *cva)
-{
-  return PyInt_FromLong(cva->a->rows);
-}
-
-static PyObject *cvmat_getcols(cvmat_t *cva)
-{
-  return PyInt_FromLong(cva->a->cols);
-}
-
-static PyObject *cvmat_getchannels(cvmat_t *cva)
-{
-  return PyInt_FromLong(CV_MAT_CN(cva->a->type));
-}
-
-#if PYTHON_USE_NUMPY
-#include "numpy/ndarrayobject.h"
-
-// A PyArrayInterface, with an associated python object that should be DECREF'ed on release
-struct arrayTrack {
-  PyArrayInterface s;
-  PyObject *o;
-};
-
-static void arrayTrackDtor(void *p)
-{
-  struct arrayTrack *at = (struct arrayTrack *)p;
-  delete at->s.shape;
-  delete at->s.strides;
-  if (at->s.descr)
-    Py_DECREF(at->s.descr);
-  Py_DECREF(at->o);
-}
-
-// Fill in fields of PyArrayInterface s using mtype.  This code is common
-// to cvmat and cvmatnd
-
-static void arrayinterface_common(PyArrayInterface *s, int mtype)
-{
-  s->two = 2;
-
-  switch (CV_MAT_DEPTH(mtype)) {
-  case CV_8U:
-    s->typekind = 'u';
-    s->itemsize = 1;
-    break;
-  case CV_8S:
-    s->typekind = 'i';
-    s->itemsize = 1;
-    break;
-  case CV_16U:
-    s->typekind = 'u';
-    s->itemsize = 2;
-    break;
-  case CV_16S:
-    s->typekind = 'i';
-    s->itemsize = 2;
-    break;
-  case CV_32S:
-    s->typekind = 'i';
-    s->itemsize = 4;
-    break;
-  case CV_32F:
-    s->typekind = 'f';
-    s->itemsize = 4;
-    break;
-  case CV_64F:
-    s->typekind = 'f';
-    s->itemsize = 8;
-    break;
-  default:
-    assert(0);
-  }
-
-  s->flags = NPY_WRITEABLE | NPY_NOTSWAPPED;
-}
-
-static PyObject *cvmat_array_struct(cvmat_t *cva)
-{
-  CvMat *m;
-  convert_to_CvMat((PyObject *)cva, &m, "");
-
-  arrayTrack *at = new arrayTrack;
-  PyArrayInterface *s = &at->s;
-
-  at->o = cva->data;
-  Py_INCREF(at->o);
-
-  arrayinterface_common(s, m->type);
-
-  if (CV_MAT_CN(m->type) == 1) {
-    s->nd = 2;
-    s->shape = new npy_intp[2];
-    s->shape[0] = m->rows;
-    s->shape[1] = m->cols;
-    s->strides = new npy_intp[2];
-    s->strides[0] = m->step;
-    s->strides[1] = s->itemsize;
-  } else {
-    s->nd = 3;
-    s->shape = new npy_intp[3];
-    s->shape[0] = m->rows;
-    s->shape[1] = m->cols;
-    s->shape[2] = CV_MAT_CN(m->type);
-    s->strides = new npy_intp[3];
-    s->strides[0] = m->step;
-    s->strides[1] = s->itemsize * CV_MAT_CN(m->type);
-    s->strides[2] = s->itemsize;
-  }
-  s->data = (void*)(m->data.ptr);
-  s->descr = PyList_New(1);
-  char typestr[10];
-  sprintf(typestr, "<%c%d", s->typekind, s->itemsize);
-  PyList_SetItem(s->descr, 0, Py_BuildValue("(ss)", "x", typestr));
-
-  return PyCObject_FromVoidPtr(s, arrayTrackDtor);
-}
-
-static PyObject *cvmatnd_array_struct(cvmatnd_t *cva)
-{
-  CvMatND *m;
-  convert_to_CvMatND((PyObject *)cva, &m, "");
-
-  arrayTrack *at = new arrayTrack;
-  PyArrayInterface *s = &at->s;
-
-  at->o = cva->data;
-  Py_INCREF(at->o);
-
-  arrayinterface_common(s, m->type);
-
-  int i;
-  if (CV_MAT_CN(m->type) == 1) {
-    s->nd = m->dims;
-    s->shape = new npy_intp[s->nd];
-    for (i = 0; i < s->nd; i++)
-      s->shape[i] = m->dim[i].size;
-    s->strides = new npy_intp[s->nd];
-    for (i = 0; i < (s->nd - 1); i++)
-      s->strides[i] = m->dim[i].step;
-    s->strides[s->nd - 1] = s->itemsize;
-  } else {
-    s->nd = m->dims + 1;
-    s->shape = new npy_intp[s->nd];
-    for (i = 0; i < (s->nd - 1); i++)
-      s->shape[i] = m->dim[i].size;
-    s->shape[s->nd - 1] = CV_MAT_CN(m->type);
-
-    s->strides = new npy_intp[s->nd];
-    for (i = 0; i < (s->nd - 2); i++)
-      s->strides[i] = m->dim[i].step;
-    s->strides[s->nd - 2] = s->itemsize * CV_MAT_CN(m->type);
-    s->strides[s->nd - 1] = s->itemsize;
-  }
-  s->data = (void*)(m->data.ptr);
-  s->descr = PyList_New(1);
-  char typestr[10];
-  sprintf(typestr, "<%c%d", s->typekind, s->itemsize);
-  PyList_SetItem(s->descr, 0, Py_BuildValue("(ss)", "x", typestr));
-
-  return PyCObject_FromVoidPtr(s, arrayTrackDtor);
-}
-#endif
-
-static PyGetSetDef cvmat_getseters[] = {
-  {(char*)"type",   (getter)cvmat_gettype, (setter)NULL, (char*)"type",   NULL},
-  {(char*)"step",   (getter)cvmat_getstep, (setter)NULL, (char*)"step",   NULL},
-  {(char*)"rows",   (getter)cvmat_getrows, (setter)NULL, (char*)"rows",   NULL},
-  {(char*)"cols",   (getter)cvmat_getcols, (setter)NULL, (char*)"cols",   NULL},
-  {(char*)"channels",(getter)cvmat_getchannels, (setter)NULL, (char*)"channels",   NULL},
-  {(char*)"width",  (getter)cvmat_getcols, (setter)NULL, (char*)"width",  NULL},
-  {(char*)"height", (getter)cvmat_getrows, (setter)NULL, (char*)"height", NULL},
-#if PYTHON_USE_NUMPY
-  {(char*)"__array_struct__", (getter)cvmat_array_struct, (setter)NULL, (char*)"__array_struct__", NULL},
-#endif
-  {NULL}  /* Sentinel */
-};
-
-static PyMappingMethods cvmat_as_map = {
-  NULL,
-  &cvarr_GetItem,
-  &cvarr_SetItem,
-};
-
-static PyTypeObject cvmat_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvmat",                      /*name*/
-  sizeof(cvmat_t),                        /*basicsize*/
-};
-
-static int illegal_init(PyObject *self, PyObject *args, PyObject *kwds)
-{
-  PyErr_SetString(opencv_error, "Cannot create cvmat directly; use CreateMat() instead");
-  return -1;
-}
-
-static void cvmat_specials(void)
-{
-  cvmat_Type.tp_dealloc = cvmat_dealloc;
-  cvmat_Type.tp_as_mapping = &cvmat_as_map;
-  cvmat_Type.tp_repr = cvmat_repr;
-  cvmat_Type.tp_methods = cvmat_methods;
-  cvmat_Type.tp_getset = cvmat_getseters;
-  cvmat_Type.tp_init = illegal_init;
-}
-
-static int is_cvmat(PyObject *o)
-{
-  return PyType_IsSubtype(o->ob_type, &cvmat_Type);
-}
-
-/************************************************************************/
-
-/* cvmatnd */
-
-static void cvmatnd_dealloc(PyObject *self)
-{
-  cvmatnd_t *pc = (cvmatnd_t*)self;
-  Py_DECREF(pc->data);
-  cvFree(&pc->a);
-  PyObject_Del(self);
-}
-
-static PyObject *cvmatnd_repr(PyObject *self)
-{
-  CvMatND *m = ((cvmatnd_t*)self)->a;
-  char str[1000];
-  sprintf(str, "<cvmatnd(");
-  char *d = str + strlen(str);
-  sprintf(d, "type=%08x ", m->type);
-  d += strlen(d);
-  sprintf(d, ")>");
-  return PyString_FromString(str);
-}
-
-static size_t cvmatnd_size(CvMatND *m)
-{
-  int bps = 1;
-  switch (CV_MAT_DEPTH(m->type)) {
-  case CV_8U:
-  case CV_8S:
-    bps = CV_MAT_CN(m->type) * 1;
-    break;
-  case CV_16U:
-  case CV_16S:
-    bps = CV_MAT_CN(m->type) * 2;
-    break;
-  case CV_32S:
-  case CV_32F:
-    bps = CV_MAT_CN(m->type) * 4;
-    break;
-  case CV_64F:
-    bps = CV_MAT_CN(m->type) * 8;
-    break;
-  default:
-    assert(0);
-  }
-  size_t l = bps;
-  for (int d = 0; d < m->dims; d++) {
-    l *= m->dim[d].size;
-  }
-  return l;
-}
-
-static PyObject *cvmatnd_tostring(PyObject *self, PyObject *args)
-{
-  CvMatND *m;
-  if (!convert_to_CvMatND(self, &m, "self"))
-    return NULL;
-
-  int bps;
-  switch (CV_MAT_DEPTH(m->type)) {
-  case CV_8U:
-  case CV_8S:
-    bps = CV_MAT_CN(m->type) * 1;
-    break;
-  case CV_16U:
-  case CV_16S:
-    bps = CV_MAT_CN(m->type) * 2;
-    break;
-  case CV_32S:
-  case CV_32F:
-    bps = CV_MAT_CN(m->type) * 4;
-    break;
-  case CV_64F:
-    bps = CV_MAT_CN(m->type) * 8;
-    break;
-  default:
-    return (PyObject*)failmsg("Unrecognised depth %d", CV_MAT_DEPTH(m->type));
-  }
-
-  int l = bps;
-  for (int d = 0; d < m->dims; d++) {
-    l *= m->dim[d].size;
-  }
-  int i[CV_MAX_DIM];
-  int d;
-  for (d = 0; d < m->dims; d++) {
-    i[d] = 0;
-  }
-  int rowsize = m->dim[m->dims-1].size * bps;
-  char *s = new char[l];
-  char *ps = s;
-
-  int finished = 0;
-  while (!finished) {
-    memcpy(ps, cvPtrND(m, i), rowsize);
-    ps += rowsize;
-    for (d = m->dims - 2; 0 <= d; d--) {
-      if (++i[d] < cvGetDimSize(m, d)) {
-        break;
-      } else {
-        i[d] = 0;
-      }
-    }
-    if (d < 0)
-      finished = 1;
-  }
-
-  return PyString_FromStringAndSize(s, ps - s);
-}
-
-static struct PyMethodDef cvmatnd_methods[] =
-{
-  {"tostring", cvmatnd_tostring, METH_VARARGS},
-  {NULL,          NULL}
-};
-
-static PyObject *cvmatnd_getchannels(cvmatnd_t *cva)
-{
-  return PyInt_FromLong(CV_MAT_CN(cva->a->type));
-}
-
-static PyGetSetDef cvmatnd_getseters[] = {
-#if PYTHON_USE_NUMPY
-  {(char*)"__array_struct__", (getter)cvmatnd_array_struct, (setter)NULL, (char*)"__array_struct__", NULL},
-#endif
-  {(char*)"channels",(getter)cvmatnd_getchannels, (setter)NULL, (char*)"channels",   NULL},
-  {NULL}  /* Sentinel */
-};
-
-static PyMappingMethods cvmatnd_as_map = {
-  NULL,
-  &cvarr_GetItem,
-  &cvarr_SetItem,
-};
-
-static PyTypeObject cvmatnd_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvmatnd",                          /*name*/
-  sizeof(cvmatnd_t),                        /*basicsize*/
-};
-
-static void cvmatnd_specials(void)
-{
-  cvmatnd_Type.tp_dealloc = cvmatnd_dealloc;
-  cvmatnd_Type.tp_as_mapping = &cvmatnd_as_map;
-  cvmatnd_Type.tp_repr = cvmatnd_repr;
-  cvmatnd_Type.tp_methods = cvmatnd_methods;
-  cvmatnd_Type.tp_getset = cvmatnd_getseters;
-}
-
-static int is_cvmatnd(PyObject *o)
-{
-  return PyType_IsSubtype(o->ob_type, &cvmatnd_Type);
-}
-
-/************************************************************************/
-
-/* cvhistogram */
-
-static void cvhistogram_dealloc(PyObject *self)
-{
-  cvhistogram_t *cvh = (cvhistogram_t*)self;
-  Py_DECREF(cvh->bins);
-  PyObject_Del(self);
-}
-
-static PyTypeObject cvhistogram_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvhistogram",                /*name*/
-  sizeof(cvhistogram_t),                  /*basicsize*/
-};
-
-static PyObject *cvhistogram_getbins(cvhistogram_t *cvh)
-{
-  Py_INCREF(cvh->bins);
-  return cvh->bins;
-}
-
-static PyGetSetDef cvhistogram_getseters[] = {
-  {(char*)"bins", (getter)cvhistogram_getbins, (setter)NULL, (char*)"bins", NULL},
-  {NULL}  /* Sentinel */
-};
-
-static void cvhistogram_specials(void)
-{
-  cvhistogram_Type.tp_dealloc = cvhistogram_dealloc;
-  cvhistogram_Type.tp_getset = cvhistogram_getseters;
-}
-
-/************************************************************************/
-
-/* cvlineiterator */
-
-static PyObject *cvlineiterator_iter(PyObject *o)
-{
-  Py_INCREF(o);
-  return o;
-}
-
-static PyObject *cvlineiterator_next(PyObject *o)
-{
-  cvlineiterator_t *pi = (cvlineiterator_t*)o;
-
-  if (pi->count) {
-      pi->count--;
-
-      CvScalar r;
-      cvRawDataToScalar( (void*)(pi->iter.ptr), pi->type, &r);
-      PyObject *pr = PyObject_FromCvScalar(r, pi->type);
-
-      CV_NEXT_LINE_POINT(pi->iter);
-
-      return pr;
-  } else {
-    return NULL;
-  }
-}
-
-static PyTypeObject cvlineiterator_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvlineiterator",             /*name*/
-  sizeof(cvlineiterator_t),               /*basicsize*/
-};
-
-static void cvlineiterator_specials(void)
-{
-  cvlineiterator_Type.tp_iter = cvlineiterator_iter;
-  cvlineiterator_Type.tp_iternext = cvlineiterator_next;
-}
-
-/************************************************************************/
-
-/* memtrack */
-
-/* Motivation for memtrack is when the storage for a Mat is an array or buffer
-object.  By setting 'data' to be a memtrack, can deallocate the storage at
-object destruction.
-
-For array objects, 'backing' is the actual storage object.  memtrack holds the reference,
-then DECREF's it at dealloc.
-
-For MatND's, we need to cvDecRefData() on release, and this is what field 'backingmat' is for.
-
-If freeptr is true, then a straight cvFree() of ptr happens.
-
-*/
-
-
-static void memtrack_dealloc(PyObject *self)
-{
-  memtrack_t *pi = (memtrack_t*)self;
-  if (pi->backing)
-    Py_DECREF(pi->backing);
-  if (pi->backingmat)
-    cvDecRefData(pi->backingmat);
-  if (pi->freeptr)
-    cvFree(&pi->ptr);
-  PyObject_Del(self);
-}
-
-static PyTypeObject memtrack_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".memtrack",                          /*name*/
-  sizeof(memtrack_t),                        /*basicsize*/
-};
-
-Py_ssize_t memtrack_getreadbuffer(PyObject *self, Py_ssize_t segment, void **ptrptr)
-{
-  *ptrptr = &((memtrack_t*)self)->ptr;
-  return ((memtrack_t*)self)->size;
-}
-
-Py_ssize_t memtrack_getwritebuffer(PyObject *self, Py_ssize_t segment, void **ptrptr)
-{
-  *ptrptr = ((memtrack_t*)self)->ptr;
-  return ((memtrack_t*)self)->size;
-}
-
-Py_ssize_t memtrack_getsegcount(PyObject *self, Py_ssize_t *lenp)
-{
-  return (Py_ssize_t)1;
-}
-
-PyBufferProcs memtrack_as_buffer = {
-  memtrack_getreadbuffer,
-  memtrack_getwritebuffer,
-  memtrack_getsegcount
-};
-
-static void memtrack_specials(void)
-{
-  memtrack_Type.tp_dealloc = memtrack_dealloc;
-  memtrack_Type.tp_as_buffer = &memtrack_as_buffer;
-}
-
-/************************************************************************/
-
-/* cvmemstorage */
-
-static void cvmemstorage_dealloc(PyObject *self)
-{
-  cvmemstorage_t *ps = (cvmemstorage_t*)self;
-  cvReleaseMemStorage(&(ps->a));
-  PyObject_Del(self);
-}
-
-static PyTypeObject cvmemstorage_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvmemstorage",               /*name*/
-  sizeof(cvmemstorage_t),                 /*basicsize*/
-};
-
-static void cvmemstorage_specials(void)
-{
-  cvmemstorage_Type.tp_dealloc = cvmemstorage_dealloc;
-}
-
-/************************************************************************/
-
-/* cvfont */
-
-static PyTypeObject cvfont_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvfont",                     /*name*/
-  sizeof(cvfont_t),                       /*basicsize*/
-};
-
-static void cvfont_specials(void) { }
-
-/************************************************************************/
-
-/* cvrng */
-
-static PyTypeObject cvrng_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvrng",                     /*name*/
-  sizeof(cvrng_t),                       /*basicsize*/
-};
-
-static void cvrng_specials(void)
-{
-}
-
-/************************************************************************/
-
-/* cvcontourtree */
-
-static PyTypeObject cvcontourtree_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvcontourtree",                     /*name*/
-  sizeof(cvcontourtree_t),                       /*basicsize*/
-};
-
-static void cvcontourtree_specials(void) { }
-
-
-/************************************************************************/
-
-/* cvsubdiv2dedge */
-
-static PyTypeObject cvsubdiv2dedge_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvsubdiv2dedge",                     /*name*/
-  sizeof(cvsubdiv2dedge_t),                       /*basicsize*/
-};
-
-static int cvsubdiv2dedge_compare(PyObject *o1, PyObject *o2)
-{
-  cvsubdiv2dedge_t *e1 = (cvsubdiv2dedge_t*)o1;
-  cvsubdiv2dedge_t *e2 = (cvsubdiv2dedge_t*)o2;
-  if (e1->a < e2->a)
-    return -1;
-  else if (e1->a > e2->a)
-    return 1;
-  else
-    return 0;
-}
-
-static PyObject *cvquadedge_repr(PyObject *self)
-{
-  CvSubdiv2DEdge m = ((cvsubdiv2dedge_t*)self)->a;
-  char str[1000];
-  sprintf(str, "<cvsubdiv2dedge(");
-  char *d = str + strlen(str);
-  sprintf(d, "%zx.%d", m & ~3, (int)(m & 3));
-  d += strlen(d);
-  sprintf(d, ")>");
-  return PyString_FromString(str);
-}
-
-static void cvsubdiv2dedge_specials(void) {
-  cvsubdiv2dedge_Type.tp_compare = cvsubdiv2dedge_compare;
-  cvsubdiv2dedge_Type.tp_repr = cvquadedge_repr;
-}
-
-/************************************************************************/
-
-/* cvseq */
-
-static void cvseq_dealloc(PyObject *self)
-{
-  cvseq_t *ps = (cvseq_t*)self;
-  Py_DECREF(ps->container);
-  PyObject_Del(self);
-}
-
-static PyObject *cvseq_h_next(PyObject *self, PyObject *args);
-static PyObject *cvseq_h_prev(PyObject *self, PyObject *args);
-static PyObject *cvseq_v_next(PyObject *self, PyObject *args);
-static PyObject *cvseq_v_prev(PyObject *self, PyObject *args);
-
-static struct PyMethodDef cvseq_methods[] =
-{
-  {"h_next", cvseq_h_next, METH_VARARGS},
-  {"h_prev", cvseq_h_prev, METH_VARARGS},
-  {"v_next", cvseq_v_next, METH_VARARGS},
-  {"v_prev", cvseq_v_prev, METH_VARARGS},
-  {NULL,          NULL}
-};
-
-static Py_ssize_t cvseq_seq_length(PyObject *o)
-{
-  cvseq_t *ps = (cvseq_t*)o;
-  if (ps->a == NULL)
-    return (Py_ssize_t)0;
-  else
-    return (Py_ssize_t)(ps->a->total);
-}
-
-static PyObject* cvseq_seq_getitem(PyObject *o, Py_ssize_t i)
-{
-  cvseq_t *ps = (cvseq_t*)o;
-  CvPoint *pt;
-  struct pointpair{
-    CvPoint a, b;
-  } *pp;
-  CvPoint2D32f *pt2;
-  CvPoint3D32f *pt3;
-
-  if (i < cvseq_seq_length(o)) {
-    switch (CV_SEQ_ELTYPE(ps->a)) {
-
-    case CV_SEQ_ELTYPE_POINT:
-      pt = CV_GET_SEQ_ELEM(CvPoint, ps->a, i);
-      return Py_BuildValue("ii", pt->x, pt->y);
-
-    case CV_SEQ_ELTYPE_GENERIC:
-      switch (ps->a->elem_size) {
-      case sizeof(CvQuadEdge2D):
-        {
-          cvsubdiv2dedge_t *r = PyObject_NEW(cvsubdiv2dedge_t, &cvsubdiv2dedge_Type);
-          r->a = (CvSubdiv2DEdge)CV_GET_SEQ_ELEM(CvQuadEdge2D, ps->a, i);
-          r->container = ps->container;
-          Py_INCREF(r->container);
-          return (PyObject*)r;
-        }
-      case sizeof(CvConnectedComp):
-        {
-          CvConnectedComp *cc = CV_GET_SEQ_ELEM(CvConnectedComp, ps->a, i);
-          return FROM_CvConnectedComp(*cc);
-        }
-      default:
-        printf("seq elem size is %d\n", ps->a->elem_size);
-        printf("KIND %d\n", CV_SEQ_KIND(ps->a));
-        assert(0);
-      }
-      return PyInt_FromLong(*CV_GET_SEQ_ELEM(unsigned char, ps->a, i));
-
-    case CV_SEQ_ELTYPE_PTR:
-    case CV_SEQ_ELTYPE_INDEX:
-      return PyInt_FromLong(*CV_GET_SEQ_ELEM(int, ps->a, i));
-
-    case CV_32SC4:
-      pp = CV_GET_SEQ_ELEM(pointpair, ps->a, i);
-      return Py_BuildValue("(ii),(ii)", pp->a.x, pp->a.y, pp->b.x, pp->b.y);
-
-    case CV_32FC2:
-      pt2 = CV_GET_SEQ_ELEM(CvPoint2D32f, ps->a, i);
-      return Py_BuildValue("ff", pt2->x, pt2->y);
-
-    case CV_SEQ_ELTYPE_POINT3D:
-      pt3 = CV_GET_SEQ_ELEM(CvPoint3D32f, ps->a, i);
-      return Py_BuildValue("fff", pt3->x, pt3->y, pt3->z);
-
-    default:
-      printf("Unknown element type %08x\n", CV_SEQ_ELTYPE(ps->a));
-      assert(0);
-      return NULL;
-    }
-  } else
-    return NULL;
-}
-
-static PyObject* cvseq_map_getitem(PyObject *o, PyObject *item)
-{
-  if (PyInt_Check(item)) {
-    long i = PyInt_AS_LONG(item);
-    if (i < 0)
-      i += cvseq_seq_length(o);
-    return cvseq_seq_getitem(o, i);
-  } else if (PySlice_Check(item)) {
-    Py_ssize_t start, stop, step, slicelength, cur, i;
-    PyObject* result;
-
-    if (PySlice_GetIndicesEx((PySliceObject*)item, cvseq_seq_length(o),
-         &start, &stop, &step, &slicelength) < 0) {
-      return NULL;
-    }
-
-    if (slicelength <= 0) {
-      return PyList_New(0);
-    } else {
-      result = PyList_New(slicelength);
-      if (!result) return NULL;
-
-      for (cur = start, i = 0; i < slicelength;
-           cur += step, i++) {
-        PyList_SET_ITEM(result, i, cvseq_seq_getitem(o, cur));
-      }
-
-      return result;
-    }
-  } else {
-    PyErr_SetString(PyExc_TypeError, "CvSeq indices must be integers");
-    return NULL;
-  }
-}
-
-static 
-PySequenceMethods cvseq_sequence = {
-  cvseq_seq_length,
-  NULL,
-  NULL,
-  cvseq_seq_getitem
-};
-
-static PyMappingMethods cvseq_mapping = {
-  cvseq_seq_length,
-  cvseq_map_getitem,
-  NULL,
-};
-
-static PyTypeObject cvseq_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvseq",                          /*name*/
-  sizeof(cvseq_t),                        /*basicsize*/
-};
-
-static void cvseq_specials(void)
-{
-  cvseq_Type.tp_dealloc = cvseq_dealloc;
-  cvseq_Type.tp_as_sequence = &cvseq_sequence;
-  cvseq_Type.tp_as_mapping = &cvseq_mapping;
-  cvseq_Type.tp_methods = cvseq_methods;
-}
-
-#define MK_ACCESSOR(FIELD) \
-static PyObject *cvseq_##FIELD(PyObject *self, PyObject *args) \
-{ \
-  cvseq_t *ps = (cvseq_t*)self; \
-  CvSeq *s = ps->a; \
-  if (s->FIELD == NULL) { \
-    Py_RETURN_NONE; \
-  } else { \
-    cvseq_t *r = PyObject_NEW(cvseq_t, &cvseq_Type); \
-    r->a = s->FIELD; \
-    r->container = ps->container; \
-    Py_INCREF(r->container); \
-    return (PyObject*)r; \
-  } \
-}
-
-MK_ACCESSOR(h_next)
-MK_ACCESSOR(h_prev)
-MK_ACCESSOR(v_next)
-MK_ACCESSOR(v_prev)
-#undef MK_ACCESSOR
-
-/************************************************************************/
-
-/* cvset */
-
-static void cvset_dealloc(PyObject *self)
-{
-  cvset_t *ps = (cvset_t*)self;
-  Py_DECREF(ps->container);
-  PyObject_Del(self);
-}
-
-static PyTypeObject cvset_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvset",                          /*name*/
-  sizeof(cvset_t),                        /*basicsize*/
-};
-
-static PyObject *cvset_iter(PyObject *o)
-{
-  Py_INCREF(o);
-  cvset_t *ps = (cvset_t*)o;
-  ps->i = 0;
-  return o;
-}
-
-static PyObject *cvset_next(PyObject *o)
-{
-  cvset_t *ps = (cvset_t*)o;
-
-  while (ps->i < ps->a->total) {
-    CvSetElem *e = cvGetSetElem(ps->a, ps->i);
-    int prev_i = ps->i++;
-    if (e != NULL) {
-      return cvseq_seq_getitem(o, prev_i);
-    }
-  }
-  return NULL;
-}
-
-static void cvset_specials(void)
-{
-  cvset_Type.tp_dealloc = cvset_dealloc;
-  cvset_Type.tp_iter = cvset_iter;
-  cvset_Type.tp_iternext = cvset_next;
-}
-
-/************************************************************************/
-
-/* cvsubdiv2d */
-
-static PyTypeObject cvsubdiv2d_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                          /*size*/
-  MODULESTR".cvsubdiv2d",                     /*name*/
-  sizeof(cvsubdiv2d_t),                       /*basicsize*/
-};
-
-static PyObject *cvsubdiv2d_getattro(PyObject *o, PyObject *name)
-{
-  cvsubdiv2d_t *p = (cvsubdiv2d_t*)o;
-  if (strcmp(PyString_AsString(name), "edges") == 0) {
-    cvset_t *r = PyObject_NEW(cvset_t, &cvset_Type);
-    r->a = p->a->edges;
-    r->container = p->container;
-    Py_INCREF(r->container);
-    return (PyObject*)r;
-  } else {
-    PyErr_SetString(PyExc_TypeError, "cvsubdiv2d has no such attribute");
-    return NULL;
-  }
-}
-
-static void cvsubdiv2d_specials(void)
-{
-  cvsubdiv2d_Type.tp_getattro = cvsubdiv2d_getattro;
-}
-
-/************************************************************************/
-
-/* cvsubdiv2dpoint */
-
-static PyTypeObject cvsubdiv2dpoint_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".cvsubdiv2dpoint",                     /*name*/
-  sizeof(cvsubdiv2dpoint_t),                       /*basicsize*/
-};
-
-static PyObject *cvsubdiv2dpoint_getattro(PyObject *o, PyObject *name)
-{
-  cvsubdiv2dpoint_t *p = (cvsubdiv2dpoint_t*)o;
-  if (strcmp(PyString_AsString(name), "first") == 0) {
-    cvsubdiv2dedge_t *r = PyObject_NEW(cvsubdiv2dedge_t, &cvsubdiv2dedge_Type);
-    r->a = p->a->first;
-    r->container = p->container;
-    Py_INCREF(r->container);
-    return (PyObject*)r;
-  } else if (strcmp(PyString_AsString(name), "pt") == 0) {
-    return Py_BuildValue("(ff)", p->a->pt.x, p->a->pt.y);
-  } else {
-    PyErr_SetString(PyExc_TypeError, "cvsubdiv2dpoint has no such attribute");
-    return NULL;
-  }
-}
-
-static void cvsubdiv2dpoint_specials(void)
-{
-  cvsubdiv2dpoint_Type.tp_getattro = cvsubdiv2dpoint_getattro;
-}
-
-/************************************************************************/
-/* convert_to_X: used after PyArg_ParseTuple in the generated code  */
-
-/*static int convert_to_PyObjectPTR(PyObject *o, PyObject **dst, const char *name = "no_name")
-{
-  *dst = o;
-  return 1;
-}
-
-static int convert_to_PyCallableObjectPTR(PyObject *o, PyObject **dst, const char *name = "no_name")
-{
-  *dst = o;
-  return 1;
-}*/
-
-static int convert_to_char(PyObject *o, char *dst, const char *name = "no_name")
-{
-  if (PyString_Check(o) && PyString_Size(o) == 1) {
-    *dst = PyString_AsString(o)[0];
-    return 1;
-  } else {
-    (*dst) = 0;
-    return failmsg("Expected single character string for argument '%s'", name);
-  }
-}
-
-static int convert_to_CvMemStorage(PyObject *o, CvMemStorage **dst, const char *name = "no_name")
-{
-  if (PyType_IsSubtype(o->ob_type, &cvmemstorage_Type)) {
-    (*dst) = (((cvmemstorage_t*)o)->a);
-    return 1;
-  } else {
-    (*dst) = (CvMemStorage*)NULL;
-    return failmsg("Expected CvMemStorage for argument '%s'", name);
-  }
-}
-
-static int convert_to_CvSeq(PyObject *o, CvSeq **dst, const char *name = "no_name")
-{
-  if (PyType_IsSubtype(o->ob_type, &cvseq_Type)) {
-    (*dst) = (((cvseq_t*)o)->a);
-    return 1;
-  } else {
-    (*dst) = (CvSeq*)NULL;
-    return failmsg("Expected CvSeq for argument '%s'", name);
-  }
-}
-
-static int convert_to_CvSize(PyObject *o, CvSize *dst, const char *name = "no_name")
-{
-  if (!PyArg_ParseTuple(o, "ii", &dst->width, &dst->height))
-    return failmsg("CvSize argument '%s' expects two integers", name);
-  else
-    return 1;
-}
-
-static int convert_to_CvScalar(PyObject *o, CvScalar *s, const char *name = "no_name")
-{
-  if (PySequence_Check(o)) {
-    PyObject *fi = PySequence_Fast(o, name);
-    if (fi == NULL)
-      return 0;
-    if (4 < PySequence_Fast_GET_SIZE(fi))
-        return failmsg("CvScalar value for argument '%s' is longer than 4", name);
-    for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-      PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-      if (PyFloat_Check(item) || PyInt_Check(item)) {
-        s->val[i] = PyFloat_AsDouble(item);
-      } else {
-        return failmsg("CvScalar value for argument '%s' is not numeric", name);
-      }
-    }
-    Py_DECREF(fi);
-  } else {
-    if (PyFloat_Check(o) || PyInt_Check(o)) {
-      s->val[0] = PyFloat_AsDouble(o);
-    } else {
-      return failmsg("CvScalar value for argument '%s' is not numeric", name);
-    }
-  }
-  return 1;
-}
-
-static int convert_to_CvPointPTR(PyObject *o, CvPoint **p, const char *name = "no_name")
-{
-  if (!PySequence_Check(o))
-    return failmsg("Expected sequence for point list argument '%s'", name);
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  *p = new CvPoint[PySequence_Fast_GET_SIZE(fi)];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    if (!PyTuple_Check(item))
-      return failmsg("Expected tuple for element in point list argument '%s'", name);
-    if (!PyArg_ParseTuple(item, "ii", &((*p)[i].x), &((*p)[i].y))) {
-      return 0;
-    }
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-
-static int convert_to_CvPoint2D32fPTR(PyObject *o, CvPoint2D32f **p, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  *p = new CvPoint2D32f[PySequence_Fast_GET_SIZE(fi)];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    if (!PyTuple_Check(item))
-      return failmsg("Expected tuple for CvPoint2D32f argument '%s'", name);
-    if (!PyArg_ParseTuple(item, "ff", &((*p)[i].x), &((*p)[i].y))) {
-      return 0;
-    }
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-
-#if 0 // not used
-static int convert_to_CvPoint3D32fPTR(PyObject *o, CvPoint3D32f **p, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  *p = new CvPoint3D32f[PySequence_Fast_GET_SIZE(fi)];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    if (!PyTuple_Check(item))
-      return failmsg("Expected tuple for CvPoint3D32f argument '%s'", name);
-    if (!PyArg_ParseTuple(item, "fff", &((*p)[i].x), &((*p)[i].y), &((*p)[i].z))) {
-      return 0;
-    }
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-#endif
-
-static int convert_to_CvStarDetectorParams(PyObject *o, CvStarDetectorParams *dst, const char *name = "no_name")
-{
-  if (!PyArg_ParseTuple(o,
-                        "iiiii",
-                        &dst->maxSize,
-                        &dst->responseThreshold,
-                        &dst->lineThresholdProjected,
-                        &dst->lineThresholdBinarized,
-                        &dst->suppressNonmaxSize))
-    return failmsg("CvRect argument '%s' expects four integers", name);
-  else
-    return 1;
-}
-
-static int convert_to_CvRect(PyObject *o, CvRect *dst, const char *name = "no_name")
-{
-  if (!PyArg_ParseTuple(o, "iiii", &dst->x, &dst->y, &dst->width, &dst->height))
-    return failmsg("CvRect argument '%s' expects four integers", name);
-  else
-    return 1;
-}
-
-static int convert_to_CvRectPTR(PyObject *o, CvRect **dst, const char *name = "no_name")
-{
-  *dst = new CvRect;
-  if (!PyArg_ParseTuple(o, "iiii", &(*dst)->x, &(*dst)->y, &(*dst)->width, &(*dst)->height))
-    return failmsg("CvRect argument '%s' expects four integers", name);
-  else
-    return 1;
-}
-
-static int convert_to_CvSlice(PyObject *o, CvSlice *dst, const char *name = "no_name")
-{
-  if (!PyArg_ParseTuple(o, "ii", &dst->start_index, &dst->end_index))
-    return failmsg("CvSlice argument '%s' expects two integers", name);
-  else
-    return 1;
-}
-
-static int convert_to_CvPoint(PyObject *o, CvPoint *dst, const char *name = "no_name")
-{
-  if (!PyArg_ParseTuple(o, "ii", &dst->x, &dst->y))
-    return failmsg("CvPoint argument '%s' expects two integers", name);
-  else
-    return 1;
-}
-
-static int convert_to_CvPoint2D32f(PyObject *o, CvPoint2D32f *dst, const char *name = "no_name")
-{
-  if (!PyArg_ParseTuple(o, "ff", &dst->x, &dst->y))
-    return failmsg("CvPoint2D32f argument '%s' expects two floats", name);
-  else
-    return 1;
-}
-
-static int convert_to_CvPoint3D32f(PyObject *o, CvPoint3D32f *dst, const char *name = "no_name")
-{
-  if (!PyArg_ParseTuple(o, "fff", &dst->x, &dst->y, &dst->z))
-    return failmsg("CvPoint3D32f argument '%s' expects three floats", name);
-  else
-    return 1;
-}
-
-static int convert_to_IplImage(PyObject *o, IplImage **dst, const char *name)
-{
-  iplimage_t *ipl = (iplimage_t*)o;
-  void *buffer;
-  Py_ssize_t buffer_len;
-
-  if (!is_iplimage(o)) {
-    return failmsg("Argument '%s' must be IplImage", name);
-  } else if (PyString_Check(ipl->data)) {
-    cvSetData(ipl->a, PyString_AsString(ipl->data) + ipl->offset, ipl->a->widthStep);
-    assert(cvGetErrStatus() == 0);
-    *dst = ipl->a;
-    return 1;
-  } else if (ipl->data && PyObject_AsWriteBuffer(ipl->data, &buffer, &buffer_len) == 0) {
-    cvSetData(ipl->a, (void*)((char*)buffer + ipl->offset), ipl->a->widthStep);
-    assert(cvGetErrStatus() == 0);
-    *dst = ipl->a;
-    return 1;
-  } else {
-    return failmsg("IplImage argument '%s' has no data", name);
-  }
-}
-
-static int convert_to_CvMat(PyObject *o, CvMat **dst, const char *name)
-{
-  cvmat_t *m = (cvmat_t*)o;
-  void *buffer;
-  Py_ssize_t buffer_len;
-
-  if (!is_cvmat(o)) {
-#if !PYTHON_USE_NUMPY
-    return failmsg("Argument '%s' must be CvMat", name);
-#else
-    PyObject *asmat = fromarray(o, 0);
-    if (asmat == NULL)
-      return failmsg("Argument '%s' must be CvMat", name);
-    // now have the array obect as a cvmat, can use regular conversion
-    return convert_to_CvMat(asmat, dst, name);
-#endif
-  } else {
-    m->a->refcount = NULL;
-    if (m->data && PyString_Check(m->data)) {
-      assert(cvGetErrStatus() == 0);
-      char *ptr = PyString_AsString(m->data) + m->offset;
-      cvSetData(m->a, ptr, m->a->step);
-      assert(cvGetErrStatus() == 0);
-      *dst = m->a;
-      return 1;
-    } else if (m->data && PyObject_AsWriteBuffer(m->data, &buffer, &buffer_len) == 0) {
-      cvSetData(m->a, (void*)((char*)buffer + m->offset), m->a->step);
-      assert(cvGetErrStatus() == 0);
-      *dst = m->a;
-      return 1;
-    } else {
-      return failmsg("CvMat argument '%s' has no data", name);
-    }
-  }
-}
-
-static int convert_to_CvMatND(PyObject *o, CvMatND **dst, const char *name)
-{
-  cvmatnd_t *m = (cvmatnd_t*)o;
-  void *buffer;
-  Py_ssize_t buffer_len;
-
-  if (!is_cvmatnd(o)) {
-    return failmsg("Argument '%s' must be CvMatND", name);
-  } else if (m->data && PyString_Check(m->data)) {
-    m->a->data.ptr = ((uchar*)PyString_AsString(m->data)) + m->offset;
-    *dst = m->a;
-    return 1;
-  } else if (m->data && PyObject_AsWriteBuffer(m->data, &buffer, &buffer_len) == 0) {
-    m->a->data.ptr = ((uchar*)buffer + m->offset);
-    *dst = m->a;
-    return 1;
-  } else {
-    return failmsg("CvMatND argument '%s' has no data", name);
-  }
-}
-
-static int convert_to_CvArr(PyObject *o, CvArr **dst, const char *name)
-{
-  if (o == Py_None) {
-    *dst = (void*)NULL;
-    return 1;
-  } else if (is_iplimage(o)) {
-    return convert_to_IplImage(o, (IplImage**)dst, name);
-  } else if (is_cvmat(o)) {
-    return convert_to_CvMat(o, (CvMat**)dst, name);
-  } else if (is_cvmatnd(o)) {
-    return convert_to_CvMatND(o, (CvMatND**)dst, name);
-  } else {
-#if !PYTHON_USE_NUMPY
-    return failmsg("CvArr argument '%s' must be IplImage, CvMat or CvMatND", name);
-#else
-    PyObject *asmat = fromarray(o, 0);
-    if (asmat == NULL)
-      return failmsg("CvArr argument '%s' must be IplImage, CvMat, CvMatND, or support the array interface", name);
-    // now have the array obect as a cvmat, can use regular conversion
-    return convert_to_CvArr(asmat, dst, name);
-#endif
-  }
-}
-
-static int convert_to_CvHistogram(PyObject *o, CvHistogram **dst, const char *name = "no_name")
-{
-  if (PyType_IsSubtype(o->ob_type, &cvhistogram_Type)) {
-    cvhistogram_t *ht = (cvhistogram_t*)o;
-    *dst = &ht->h;
-    return convert_to_CvArr(ht->bins, &(ht->h.bins), "bins");
-  } else {
-    *dst = (CvHistogram *)NULL;
-    return failmsg("Expected CvHistogram for argument '%s'", name);
-  }
-}
-
-// Used by FillPoly, FillConvexPoly, PolyLine
-struct pts_npts_contours {
-  CvPoint** pts;
-  int* npts;
-  int contours;
-};
-
-static int convert_to_pts_npts_contours(PyObject *o, pts_npts_contours *dst, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  dst->contours = PySequence_Fast_GET_SIZE(fi);
-  dst->pts = new CvPoint*[dst->contours];
-  dst->npts = new int[dst->contours];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    if (!convert_to_CvPointPTR(PySequence_Fast_GET_ITEM(fi, i), &dst->pts[i], name))
-      return 0;
-    dst->npts[i] = PySequence_Size(PySequence_Fast_GET_ITEM(fi, i)); // safe because convert_ just succeeded
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-
-class cvarrseq {
-public:
-  union {
-    CvSeq *seq;
-    CvArr *mat;
-  };
-  int freemat;
-  cvarrseq() {
-    freemat = false;
-  }
-  ~cvarrseq() {
-    if (freemat) {
-      cvReleaseMat((CvMat**)&mat);
-    }
-  }
-};
-
-static int is_convertible_to_mat(PyObject *o)
-{
-#if PYTHON_USE_NUMPY
-  if (PyObject_HasAttrString(o, "__array_struct__")) {
-    PyObject *ao = PyObject_GetAttrString(o, "__array_struct__");
-    if (ao != NULL &&
-        PyCObject_Check(ao) &&
-        ((PyArrayInterface*)PyCObject_AsVoidPtr(ao))->two == 2) {
-      return 1;
-    }
-  }
-#endif
-  return is_iplimage(o) && is_cvmat(o) && is_cvmatnd(o);
-}
-
-static int convert_to_cvarrseq(PyObject *o, cvarrseq *dst, const char *name = "no_name")
-{
-  if (PyType_IsSubtype(o->ob_type, &cvseq_Type)) {
-    return convert_to_CvSeq(o, &(dst->seq), name);
-  } else if (is_convertible_to_mat(o)) {
-    int r = convert_to_CvArr(o, &(dst->mat), name);
-    return r;
-  } else if (PySequence_Check(o)) {
-    PyObject *fi = PySequence_Fast(o, name);
-    if (fi == NULL)
-      return 0;
-    Py_ssize_t size = -1;
-    // Make a pass through the sequence, checking that each element is
-    // a sequence and that they are all the same size
-    for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-      PyObject *e = PySequence_Fast_GET_ITEM(fi, i);
-
-      if (!PySequence_Check(e))
-        return failmsg("Sequence '%s' must contain sequences", name);
-      if (i == 0)
-        size = (int)PySequence_Size(e);
-      else if (size != PySequence_Size(e))
-        return failmsg("All elements of sequence '%s' must be same size", name);
-    }
-    assert(size != -1);
-    CvMat *mt = cvCreateMat((int)PySequence_Fast_GET_SIZE(fi), 1, CV_32SC(size));
-    dst->freemat = true; // dealloc this mat when done
-    for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-      PyObject *e = PySequence_Fast_GET_ITEM(fi, i);
-      PyObject *fe = PySequence_Fast(e, name);
-      assert(fe != NULL);
-      int *pdst = (int*)cvPtr2D(mt, i, 0);
-      for (Py_ssize_t j = 0; j < size; j++) {
-        PyObject *num = PySequence_Fast_GET_ITEM(fe, j);
-        if (!PyNumber_Check(num)) {
-          return failmsg("Sequence must contain numbers", name);
-        }
-        *pdst++ = PyInt_AsLong(num);
-      }
-      Py_DECREF(fe);
-    }
-    Py_DECREF(fi);
-    dst->mat = mt;
-    return 1;
-  } else {
-    return failmsg("Argument '%s' must be CvSeq, CvArr, or a sequence of numbers");
-  }
-}
-
-struct cvarr_count {
-  CvArr **cvarr;
-  int count;
-};
-
-static int convert_to_cvarr_count(PyObject *o, cvarr_count *dst, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  dst->count = PySequence_Fast_GET_SIZE(fi);
-  dst->cvarr = new CvArr*[dst->count];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    if (!convert_to_CvArr(PySequence_Fast_GET_ITEM(fi, i), &dst->cvarr[i], name))
-      return 0;
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-
-struct intpair
-{
-  int *pairs;
-  int count;
-};
-
-static int convert_to_intpair(PyObject *o, intpair *dst, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  dst->count = PySequence_Fast_GET_SIZE(fi);
-  dst->pairs = new int[2 * dst->count];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    if (!PyArg_ParseTuple(item, "ii", &dst->pairs[2 * i], &dst->pairs[2 * i + 1])) {
-      return 0;
-    }
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-
-struct cvpoint2d32f_count {
-  CvPoint2D32f* points;
-  int count;
-};
-
-static int convert_to_cvpoint2d32f_count(PyObject *o, cvpoint2d32f_count *dst, const char *name = "no_name")
-{
-  if (PyInt_Check(o)) {
-    dst->count = PyInt_AsLong(o);
-    dst->points = new CvPoint2D32f[dst->count];
-    return 1;
-  } else {
-    return failmsg("Expected integer for CvPoint2D32f count");
-  }
-}
-
-struct floats {
-  float *f;
-  int count;
-};
-static int convert_to_floats(PyObject *o, floats *dst, const char *name = "no_name")
-{
-  if (PySequence_Check(o)) {
-    PyObject *fi = PySequence_Fast(o, name);
-    if (fi == NULL)
-      return 0;
-    dst->count = PySequence_Fast_GET_SIZE(fi);
-    dst->f = new float[dst->count];
-    for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-      PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-      dst->f[i] = (float)PyFloat_AsDouble(item);
-    }
-    Py_DECREF(fi);
-  } else if (PyNumber_Check(o)) {
-    dst->count = 1;
-    dst->f = new float[1];
-    dst->f[0] = (float)PyFloat_AsDouble(o);
-  } else {
-    return failmsg("Expected list of floats, or float for argument '%s'", name);
-  }
-  return 1;
-}
-
-struct chars {
-  char *f;
-  int count;
-};
-/// convert_to_chars not used
-
-struct CvPoints {
-  CvPoint *p;
-  int count;
-};
-static int convert_to_CvPoints(PyObject *o, CvPoints *dst, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  dst->count = PySequence_Fast_GET_SIZE(fi);
-  dst->p = new CvPoint[dst->count];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    convert_to_CvPoint(item, &dst->p[i], name);
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-
-struct CvPoint3D32fs {
-  CvPoint3D32f *p;
-  int count;
-};
-static int convert_to_CvPoint3D32fs(PyObject *o, CvPoint3D32fs *dst, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  dst->count = PySequence_Fast_GET_SIZE(fi);
-  dst->p = new CvPoint3D32f[dst->count];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    convert_to_CvPoint3D32f(item, &dst->p[i], name);
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-
-struct CvPoint2D32fs {
-  CvPoint2D32f *p;
-  int count;
-};
-static int convert_to_CvPoint2D32fs(PyObject *o, CvPoint2D32fs *dst, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  dst->count = PySequence_Fast_GET_SIZE(fi);
-  dst->p = new CvPoint2D32f[dst->count];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    convert_to_CvPoint2D32f(item, &dst->p[i], name);
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-
-struct ints {
-  int *i;
-  int count;
-};
-static int convert_to_ints(PyObject *o, ints *dst, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  dst->count = PySequence_Fast_GET_SIZE(fi);
-  dst->i = new int[dst->count];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    dst->i[i] = PyInt_AsLong(item);
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-
-struct ints0 {
-  int *i;
-  int count;
-};
-static int convert_to_ints0(PyObject *o, ints0 *dst, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  dst->count = PySequence_Fast_GET_SIZE(fi);
-  dst->i = new int[dst->count + 1];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    dst->i[i] = PyInt_AsLong(item);
-  }
-  dst->i[dst->count] = 0;
-  Py_DECREF(fi);
-  return 1;
-}
-
-struct dims
-{
-  int count;
-  int i[CV_MAX_DIM];
-  int step[CV_MAX_DIM];
-  int length[CV_MAX_DIM];
-};
-
-static int convert_to_dim(PyObject *item, int i, dims *dst, CvArr *cva, const char *name = "no_name")
-{
-  if (PySlice_Check(item)) {
-    Py_ssize_t start, stop, step, slicelength;
-    PySlice_GetIndicesEx((PySliceObject*)item, cvGetDimSize(cva, i), &start, &stop, &step, &slicelength);
-    dst->i[i] = start;
-    dst->step[i] = step;
-    dst->length[i] = slicelength;
-  } else {
-    int index = PyInt_AsLong(item);
-    if (0 <= index)
-      dst->i[i] = index;
-    else
-      dst->i[i] = cvGetDimSize(cva, i) + index;
-    dst->step[i] = 0;
-    dst->length[i] = 1;
-  }
-  return 1;
-}
-
-static int convert_to_dims(PyObject *o, dims *dst, CvArr *cva, const char *name = "no_name")
-{
-  if (!PyTuple_Check(o)) {
-    dst->count = 1;
-    return convert_to_dim(o, 0, dst, cva, name);
-  } else {
-    PyObject *fi = PySequence_Fast(o, name);
-    if (fi == NULL) {
-      PyErr_SetString(PyExc_TypeError, "Expected tuple for index");
-      return 0;
-    }
-    dst->count = PySequence_Fast_GET_SIZE(fi);
-    for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-      if (i >= cvGetDims(cva)) {
-        return failmsg("Access specifies %d dimensions, but array only has %d", PySequence_Fast_GET_SIZE(fi), cvGetDims(cva));
-      }
-      PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-      if (!convert_to_dim(item, i, dst, cva, name))
-        return 0;
-    }
-    Py_DECREF(fi);
-    return 1;
-  }
-}
-
-struct IplImages {
-  IplImage **ims;
-  int count;
-};
-static int convert_to_IplImages(PyObject *o, IplImages *dst, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  dst->count = PySequence_Fast_GET_SIZE(fi);
-  dst->ims = new IplImage*[dst->count];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    if (!convert_to_IplImage(item, &dst->ims[i]))
-      return 0;
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-
-struct CvArrs {
-  CvArr **ims;
-  int count;
-};
-static int convert_to_CvArrs(PyObject *o, CvArrs *dst, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  dst->count = PySequence_Fast_GET_SIZE(fi);
-  dst->ims = new CvArr*[dst->count];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    if (!convert_to_CvArr(item, &dst->ims[i]))
-      return 0;
-  }
-  Py_DECREF(fi);
-  return 1;
-}
-
-/*static int convert_to_floatPTRPTR(PyObject *o, float*** dst, const char *name = "no_name")
-{
-  PyObject *fi = PySequence_Fast(o, name);
-  if (fi == NULL)
-    return 0;
-  Py_ssize_t sz = PySequence_Fast_GET_SIZE(fi);
-  float **r = new float*[sz];
-  for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) {
-    PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-    floats ff;
-    if (!convert_to_floats(item, &ff))
-      return 0;
-    r[i] = ff.f;
-  }
-  *dst = r;
-  return 1;
-}*/
-
-static int convert_to_CvFontPTR(PyObject *o, CvFont** dst, const char *name = "no_name")
-{
-  if (PyType_IsSubtype(o->ob_type, &cvfont_Type)) {
-    (*dst) = &(((cvfont_t*)o)->a);
-    return 1;
-  } else {
-    (*dst) = (CvFont*)NULL;
-    return failmsg("Expected CvFont for argument '%s'", name);
-  }
-}
-
-/*static int convert_to_CvContourTreePTR(PyObject *o, CvContourTree** dst, const char *name = "no_name")
-{
-  if (PyType_IsSubtype(o->ob_type, &cvcontourtree_Type)) {
-    (*dst) = ((cvcontourtree_t*)o)->a;
-    return 1;
-  } else {
-    (*dst) = NULL;
-    return failmsg("Expected CvContourTree for argument '%s'", name);
-  }
-}*/
-
-static int convert_to_CvRNGPTR(PyObject *o, CvRNG** dst, const char *name = "no_name")
-{
-  if (PyType_IsSubtype(o->ob_type, &cvrng_Type)) {
-    (*dst) = &(((cvrng_t*)o)->a);
-    return 1;
-  } else {
-    (*dst) = (CvRNG*)NULL;
-    return failmsg("Expected CvRNG for argument '%s'", name);
-  }
-}
-
-typedef void* generic;
-static int convert_to_generic(PyObject *o, generic *dst, const char *name = "no_name")
-{
-  if (PyType_IsSubtype(o->ob_type, &iplimage_Type))
-    return convert_to_IplImage(o, (IplImage**)dst, name);
-  else if (PyType_IsSubtype(o->ob_type, &cvmat_Type))
-    return convert_to_CvMat(o, (CvMat**)dst, name);
-  else if (PyType_IsSubtype(o->ob_type, &cvmatnd_Type))
-    return convert_to_CvMatND(o, (CvMatND**)dst, name);
-  else {
-    return failmsg("Cannot identify type of '%s'", name);
-  }
-}
-
-static int convert_to_CvTermCriteria(PyObject *o, CvTermCriteria* dst, const char *name = "no_name")
-{
-  if (!PyArg_ParseTuple(o, "iid", &dst->type, &dst->max_iter, &dst->epsilon))
-    return 0;
-  return 1;
-}
-
-static int convert_to_CvBox2D(PyObject *o, CvBox2D* dst, const char *name = "no_name")
-{
-  if (!PyArg_ParseTuple(o, "(ff)(ff)f", &dst->center.x, &dst->center.y, &dst->size.width, &dst->size.height, &dst->angle))
-    return 0;
-  return 1;
-}
-
-static int convert_to_CvSubdiv2DPTR(PyObject *o, CvSubdiv2D** dst, const char *name = "no_name")
-{
-  if (PyType_IsSubtype(o->ob_type, &cvsubdiv2d_Type)) {
-    (*dst) = (((cvsubdiv2d_t*)o)->a);
-    return 1;
-  } else {
-    (*dst) = (CvSubdiv2D*)NULL;
-    return failmsg("Expected CvSubdiv2D for argument '%s'", name);
-  }
-}
-
-static int convert_to_CvNextEdgeType(PyObject *o, CvNextEdgeType *dst, const char *name = "no_name")
-{
-  if (!PyInt_Check(o)) {
-    *dst = (CvNextEdgeType)NULL;
-    return failmsg("Expected number for CvNextEdgeType argument '%s'", name);
-  } else {
-    *dst = (CvNextEdgeType)PyInt_AsLong(o);
-    return 1;
-  }
-}
-
-static int convert_to_CvSubdiv2DEdge(PyObject *o, CvSubdiv2DEdge *dst, const char *name = "no_name")
-{
-  if (PyType_IsSubtype(o->ob_type, &cvsubdiv2dedge_Type)) {
-    (*dst) = (((cvsubdiv2dedge_t*)o)->a);
-    return 1;
-  } else {
-    *dst = 0L;
-    return failmsg("Expected CvSubdiv2DEdge for argument '%s'", name);
-  }
-}
-
-/************************************************************************/
-
-static PyObject *pythonize_CvMat(cvmat_t *m)
-{
-  // Need to make this CvMat look like any other, with a Python 
-  // buffer object as its data.
-  CvMat *mat = m->a;
-  assert(mat->step != 0);
-#if 0
-  PyObject *data = PyString_FromStringAndSize((char*)(mat->data.ptr), mat->rows * mat->step);
-#else
-  memtrack_t *o = PyObject_NEW(memtrack_t, &memtrack_Type);
-  size_t gap = mat->data.ptr - (uchar*)mat->refcount;
-  o->ptr = mat->refcount;
-  o->owner = __LINE__;
-  o->freeptr = true;
-  o->size = gap + mat->rows * mat->step;
-  o->backing = NULL;
-  o->backingmat = NULL;
-  PyObject *data = PyBuffer_FromReadWriteObject((PyObject*)o, (size_t)gap, mat->rows * mat->step);
-  if (data == NULL)
-    return NULL;
-#endif
-  m->data = data;
-  m->offset = 0;
-  Py_DECREF(o);
-
-  // Now m has a reference to data, which has a reference to o.
-
-  return (PyObject*)m;
-}
-
-static PyObject *pythonize_foreign_CvMat(cvmat_t *m)
-{
-  // Need to make this CvMat look like any other, with a Python 
-  // buffer object as its data.
-  // Difference here is that the buffer is 'foreign' (from NumPy, for example)
-  CvMat *mat = m->a;
-  assert(mat->step != 0);
-#if 0
-  PyObject *data = PyString_FromStringAndSize((char*)(mat->data.ptr), mat->rows * mat->step);
-#else
-  memtrack_t *o = PyObject_NEW(memtrack_t, &memtrack_Type);
-  o->ptr = mat->data.ptr;
-  o->owner = __LINE__;
-  o->freeptr = false;
-  o->size = mat->rows * mat->step;
-  o->backing = NULL;
-  o->backingmat = mat;
-  PyObject *data = PyBuffer_FromReadWriteObject((PyObject*)o, (size_t)0, mat->rows * mat->step);
-  if (data == NULL)
-    return NULL;
-#endif
-  m->data = data;
-  m->offset = 0;
-  Py_DECREF(o);
-
-  // Now m has a reference to data, which has a reference to o.
-
-  return (PyObject*)m;
-}
-
-static PyObject *pythonize_IplImage(iplimage_t *cva)
-{
-  // Need to make this iplimage look like any other, with a Python 
-  // string as its data.
-  // So copy the image data into a Python string object, then release 
-  // it.
-
-  IplImage *ipl = (IplImage*)(cva->a);
-  // PyObject *data = PyString_FromStringAndSize(ipl->imageData, ipl->imageSize);
-
-  memtrack_t *o = PyObject_NEW(memtrack_t, &memtrack_Type);
-  assert(ipl->imageDataOrigin == ipl->imageData);
-  o->ptr = ipl->imageDataOrigin;
-  o->owner = __LINE__;
-  o->freeptr = true;
-  o->size = ipl->height * ipl->widthStep;
-  o->backing = NULL;
-  o->backingmat = NULL;
-  PyObject *data = PyBuffer_FromReadWriteObject((PyObject*)o, (size_t)0, o->size);
-  if (data == NULL)
-    return NULL;
-  Py_DECREF(o);
-  cva->data = data;
-  cva->offset = 0;
-
-  return (PyObject*)cva;
-}
-
-static PyObject *pythonize_CvMatND(cvmatnd_t *m, PyObject *backing = NULL)
-{
-  //
-  // Need to make this CvMatND look like any other, with a Python 
-  // buffer object as its data.
-  //
-
-  CvMatND *mat = m->a;
-  assert(mat->dim[0].step != 0);
-#if 0
-  PyObject *data = PyString_FromStringAndSize((char*)(mat->data.ptr), mat->dim[0].size * mat->dim[0].step);
-#else
-  memtrack_t *o = PyObject_NEW(memtrack_t, &memtrack_Type);
-  o->ptr = mat->data.ptr;
-  o->owner = __LINE__;
-  o->freeptr = false;
-  o->size = cvmatnd_size(mat);
-  Py_XINCREF(backing);
-  o->backing = backing;
-  o->backingmat = mat;
-  PyObject *data = PyBuffer_FromReadWriteObject((PyObject*)o, (size_t)0, o->size);
-  Py_DECREF(o); // Now 'data' holds the only reference to 'o'
-  if (data == NULL)
-    return NULL;
-#endif
-  m->data = data;
-  m->offset = 0;
-
-  return (PyObject*)m;
-}
-
-/************************************************************************/
-/* FROM_xxx:   C -> Python converters.
- *
- * Turn various OpenCV types (and some aggregate types above)
- * into Python objects.  Used by the generated code.
- *
- * All these functions and macros return a new reference.
- */
-
-
-static PyObject *_FROM_CvSeqPTR(CvSeq *s, PyObject *storage)
-{
-  cvseq_t *ps = PyObject_NEW(cvseq_t, &cvseq_Type);
-  ps->a = s;
-  ps->container = storage;
-  Py_INCREF(ps->container);
-  return (PyObject*)ps;
-}
-
-static PyObject *_FROM_CvSubdiv2DPTR(CvSubdiv2D *s, PyObject *storage)
-{
-  cvsubdiv2d_t *ps = PyObject_NEW(cvsubdiv2d_t, &cvsubdiv2d_Type);
-  ps->a = s;
-  ps->container = storage;
-  Py_INCREF(ps->container);
-  return (PyObject*)ps;
-}
-
-static PyObject *FROM_floats(floats r)
-{
-  PyObject *pr;
-
-  pr = PyList_New(r.count);
-  for (Py_ssize_t i = 0; i < (Py_ssize_t)r.count; i++) {
-    PyList_SetItem(pr, i, PyFloat_FromDouble(r.f[i]));
-  }
-  return pr;
-}
-
-static PyObject *FROM_chars(chars r)
-{
-  PyObject *pr;
-
-  pr = PyList_New(r.count);
-  for (Py_ssize_t i = 0; i < (Py_ssize_t)r.count; i++) {
-    PyList_SetItem(pr, i, PyInt_FromLong(r.f[i]));
-  }
-  return pr;
-}
-
-static PyObject *FROM_cvpoint2d32f_count(cvpoint2d32f_count r)
-{
-  PyObject *pr;
-
-  pr = PyList_New(r.count);
-  for (Py_ssize_t i = 0; i < (Py_ssize_t)r.count; i++) {
-    PyList_SetItem(pr, i, FROM_CvPoint2D32f(r.points[i]));
-  }
-  return pr;
-}
-
-static PyObject *FROM_CvPoint2D32fs(CvPoint2D32fs r)
-{
-  PyObject *pr;
-
-  pr = PyList_New(r.count);
-  for (Py_ssize_t i = 0; i < (Py_ssize_t)r.count; i++) {
-    PyList_SetItem(pr, i, FROM_CvPoint2D32f(r.p[i]));
-  }
-  return pr;
-}
-
-typedef CvSeq CvSeqOfCvConvexityDefect;
-static PyObject *FROM_CvSeqOfCvConvexityDefectPTR(CvSeqOfCvConvexityDefect *r)
-{
-  PyObject *pr;
-  pr = PyList_New(r->total);
-  for (int i = 0; i < r->total; i++) {
-    CvConvexityDefect *pd = CV_GET_SEQ_ELEM(CvConvexityDefect, r, i);
-    PyList_SetItem(pr, i, Py_BuildValue("(ii)(ii)(ii)f",
-                                        pd->start->x, pd->start->y, 
-                                        pd->end->x, pd->end->y, 
-                                        pd->depth_point->x, pd->depth_point->y, 
-                                        pd->depth));
-  }
-  // This function has copied the CvSeq data into a list.  Hence the
-  // CvSeq is not being returned to the caller.  Hence, no reference
-  // count increase for the storage, unlike _FROM_CvSeqPTR.
-  return pr;
-}
-
-typedef CvSeq CvSeqOfCvAvgComp;
-static PyObject *FROM_CvSeqOfCvAvgCompPTR(CvSeqOfCvAvgComp *r)
-{
-  PyObject *pr;
-  pr = PyList_New(r->total);
-  for (int i = 0; i < r->total; i++) {
-    CvAvgComp *pd = CV_GET_SEQ_ELEM(CvAvgComp, r, i);
-    PyList_SetItem(pr, i, Py_BuildValue("(iiii)i",
-                                        pd->rect.x, pd->rect.y, 
-                                        pd->rect.width, pd->rect.height, 
-                                        pd->neighbors));
-  }
-  // This function has copied the CvSeq data into a list.  Hence the
-  // CvSeq is not being returned to the caller.  Hence, no reference
-  // count increase for the storage, unlike _FROM_CvSeqPTR.
-  return pr;
-}
-
-typedef CvSeq CvSeqOfCvStarKeypoint;
-static PyObject *FROM_CvSeqOfCvStarKeypointPTR(CvSeqOfCvStarKeypoint *r)
-{
-  PyObject *pr;
-  pr = PyList_New(r->total);
-  for (int i = 0; i < r->total; i++) {
-    CvStarKeypoint *pd = CV_GET_SEQ_ELEM(CvStarKeypoint, r, i);
-    PyList_SetItem(pr, i, Py_BuildValue("(ii)if",
-                                        pd->pt.x, pd->pt.y, 
-                                        pd->size,
-                                        pd->response));
-  }
-  // This function has copied the CvSeq data into a list.  Hence the
-  // CvSeq is not being returned to the caller.  Hence, no reference
-  // count increase for the storage, unlike _FROM_CvSeqPTR.
-  return pr;
-}
-
-typedef CvSeq CvSeqOfCvSURFPoint;
-static PyObject *FROM_CvSeqOfCvSURFPointPTR(CvSeqOfCvSURFPoint *r)
-{
-  PyObject *pr;
-  pr = PyList_New(r->total);
-  for (int i = 0; i < r->total; i++) {
-    CvSURFPoint *pd = CV_GET_SEQ_ELEM(CvSURFPoint, r, i);
-    PyList_SetItem(pr, i, Py_BuildValue("(ff)iiff",
-                                        pd->pt.x, pd->pt.y, 
-                                        pd->laplacian,
-                                        pd->size,
-                                        pd->dir,
-                                        pd->hessian));
-  }
-  // This function has copied the CvSeq data into a list.  Hence the
-  // CvSeq is not being returned to the caller.  Hence, no reference
-  // count increase for the storage, unlike _FROM_CvSeqPTR.
-  return pr;
-}
-
-typedef CvSeq CvSeqOfCvSURFDescriptor;
-static PyObject *FROM_CvSeqOfCvSURFDescriptorPTR(CvSeqOfCvSURFDescriptor *r)
-{
-  PyObject *pr;
-  pr = PyList_New(r->total);
-  for (int i = 0; i < r->total; i++) {
-    float *pd = (float*)cvGetSeqElem(r, i);
-    int count = r->elem_size / sizeof(float);
-    PyObject *oi = PyList_New(count);
-    for (int j = 0; j < count; j++) {
-      PyList_SetItem(oi, j, PyFloat_FromDouble(pd[j]));
-    }
-    PyList_SetItem(pr, i, oi);
-  }
-  // This function has copied the CvSeq data into a list.  Hence the
-  // CvSeq is not being returned to the caller.  Hence, no reference
-  // count increase for the storage, unlike _FROM_CvSeqPTR.
-  return pr;
-}
-
-typedef CvPoint2D32f CvPoint2D32f_4[4];
-static PyObject *FROM_CvPoint2D32f_4(CvPoint2D32f* r)
-{
-  return Py_BuildValue("(ff)(ff)(ff)(ff)",
-                       r[0].x, r[0].y,
-                       r[1].x, r[1].y,
-                       r[2].x, r[2].y,
-                       r[3].x, r[3].y);
-}
-
-typedef float CvMatr32f_i[9];
-
-static PyObject *FROM_CvMatr32f_i(CvMatr32f_i r)
-{
-  return Py_BuildValue("(fff)(fff)(fff)",
-    r[0], r[1], r[2],
-    r[3], r[4], r[5],
-    r[6], r[7], r[8]);
-}
-
-typedef float CvVect32f_i[3];
-static PyObject *FROM_CvVect32f_i(CvVect32f_i r)
-{
-  return Py_BuildValue("fff",
-    r[0], r[1], r[2]);
-}
-
-static PyObject *FROM_CvFont(CvFont r)
-{
-  cvfont_t *cf = PyObject_NEW(cvfont_t, &cvfont_Type);
-  cf->a = r;
-  return (PyObject*)cf;
-}
-
-static PyObject *FROM_CvSubdiv2DPointPTR(CvSubdiv2DPoint* r)
-{
-  if (r != NULL) {
-    cvsubdiv2dpoint_t *cf = PyObject_NEW(cvsubdiv2dpoint_t, &cvsubdiv2dpoint_Type);
-    cf->a = r;
-    return (PyObject*)cf;
-  } else {
-    Py_INCREF(Py_None);
-    return Py_None;
-  }
-}
-
-static PyObject *FROM_IplImagePTR(IplImage *r)
-{
-  iplimage_t *cva = PyObject_NEW(iplimage_t, &iplimage_Type);
-  cva->a = r;
-  return pythonize_IplImage(cva);
-}
-
-static PyObject *FROM_ROIplImagePTR(ROIplImage *r)
-{
-  if (r != NULL) {
-    iplimage_t *cva = PyObject_NEW(iplimage_t, &iplimage_Type);
-    cva->a = cvCreateImageHeader(cvSize(100,100), 8, 1);
-    *(cva->a) = *r;
-    cva->data = PyBuffer_FromReadWriteMemory(r->imageData, r->height * r->widthStep);
-    cva->offset = 0;
-    return (PyObject*)cva;
-  } else {
-    Py_RETURN_NONE;
-  }
-}
-
-static PyObject *FROM_ROCvMatPTR(ROCvMat *r)
-{
-  if (r != NULL) {
-    cvmat_t *cva = PyObject_NEW(cvmat_t, &cvmat_Type);
-    cva->a = cvCreateMatHeader(100, 100, CV_8U);
-    *(cva->a) = *r;
-    cva->data = PyBuffer_FromReadWriteMemory(r->data.ptr, r->rows * r->step);
-    cva->offset = 0;
-    return (PyObject*)cva;
-  } else {
-    Py_RETURN_NONE;
-  }
-}
-
-static PyObject *FROM_CvMatPTR(CvMat *r)
-{
-  cvmat_t *cvm = PyObject_NEW(cvmat_t, &cvmat_Type);
-  cvm->a = r;
-
-  return pythonize_CvMat(cvm);
-}
-
-static PyObject *FROM_CvMat(CvMat *r)
-{
-  cvmat_t *m = PyObject_NEW(cvmat_t, &cvmat_Type);
-  m->a = r;
-  return pythonize_CvMat(m);
-}
-
-static PyObject *FROM_CvMatNDPTR(CvMatND *r)
-{
-  cvmatnd_t *m = PyObject_NEW(cvmatnd_t, &cvmatnd_Type);
-  m->a = r;
-  return pythonize_CvMatND(m);
-}
-
-static PyObject *FROM_CvRNG(CvRNG r)
-{
-  cvrng_t *m = PyObject_NEW(cvrng_t, &cvrng_Type);
-  m->a = r;
-  return (PyObject*)m;
-}
-
-/*static PyObject *FROM_CvContourTreePTR(CvContourTree *r)
-{
-  cvcontourtree_t *m = PyObject_NEW(cvcontourtree_t, &cvcontourtree_Type);
-  m->a = r;
-  return (PyObject*)m;
-}*/
-
-static PyObject *FROM_generic(generic r)
-{
-  CvTypeInfo* t = cvTypeOf(r);
-  if (r == NULL) {
-    failmsg("OpenCV returned NULL");
-    return NULL;
-  } if (strcmp(t->type_name, "opencv-image") == 0)
-    return FROM_IplImagePTR((IplImage*)r);
-  else if (strcmp(t->type_name, "opencv-matrix") == 0)
-    return FROM_CvMat((CvMat*)r);
-  else if (strcmp(t->type_name, "opencv-nd-matrix") == 0)
-    return FROM_CvMatNDPTR((CvMatND*)r);
-  else if (strcmp(t->type_name, "opencv-haar-classifier") == 0)
-    return FROM_CvHaarClassifierCascadePTR((CvHaarClassifierCascade*)r);
-  else {
-    failmsg("Unknown OpenCV type '%s'", t->type_name);
-    return NULL;
-  }
-}
-
-static PyObject *FROM_CvSubdiv2DEdge(CvSubdiv2DEdge r)
-{
-  cvsubdiv2dedge_t *m = PyObject_NEW(cvsubdiv2dedge_t, &cvsubdiv2dedge_Type);
-  m->a = r;
-  m->container = Py_None; // XXX
-  Py_INCREF(m->container);
-  return (PyObject*)m;
-}
-
-static PyObject *FROM_CvPoints(CvPoints src)
-{
-  PyObject *pr;
-  pr = PyList_New(src.count);
-  for (int i = 0; i < src.count; i++) {
-    PyList_SetItem(pr, i, FROM_CvPoint(src.p[i]));
-  }
-  return pr;
-}
-
-/************************************************************************/
-
-/* A few functions are too odd to be generated, 
- * so are handwritten here */
-
-static PyObject *pycvWaitKey(PyObject *self, PyObject *args, PyObject *kw)
-{
-  int delay = 0;
-
-  const char *keywords[] = { "delay", NULL };
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "|i", (char**)keywords, &delay))
-    return NULL;
-  int r;
-  Py_BEGIN_ALLOW_THREADS
-  r = cvWaitKey(delay);
-  Py_END_ALLOW_THREADS
-  return FROM_int(r);
-}
-
-static PyObject *pycvLoadImage(PyObject *self, PyObject *args, PyObject *kw)
-{
-  const char *keywords[] = { "filename", "iscolor", NULL };
-  char *filename;
-  int iscolor = CV_LOAD_IMAGE_COLOR;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "s|i", (char**)keywords, &filename, &iscolor))
-    return NULL;
-
-  // Inside ALLOW_THREADS, must not reference 'filename' because it might move.
-  // So make a local copy 'filename_copy'.
-  char filename_copy[2048];
-  strncpy(filename_copy, filename, sizeof(filename_copy));
-
-  IplImage *r;
-  Py_BEGIN_ALLOW_THREADS
-  r = cvLoadImage(filename_copy, iscolor);
-  Py_END_ALLOW_THREADS
-
-  if (r == NULL) {
-    PyErr_SetFromErrnoWithFilename(PyExc_IOError, filename);
-    return NULL;
-  } else {
-    return FROM_IplImagePTR(r);
-  }
-}
-
-static PyObject *pycvLoadImageM(PyObject *self, PyObject *args, PyObject *kw)
-{
-  const char *keywords[] = { "filename", "iscolor", NULL };
-  char *filename;
-  int iscolor = CV_LOAD_IMAGE_COLOR;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "s|i", (char**)keywords, &filename, &iscolor))
-    return NULL;
-
-  // Inside ALLOW_THREADS, must not reference 'filename' because it might move.
-  // So make a local copy 'filename_copy'.
-  char filename_copy[2048];
-  strncpy(filename_copy, filename, sizeof(filename_copy));
-
-  CvMat *r;
-  Py_BEGIN_ALLOW_THREADS
-  r = cvLoadImageM(filename_copy, iscolor);
-  Py_END_ALLOW_THREADS
-
-  if (r == NULL) {
-    PyErr_SetFromErrnoWithFilename(PyExc_IOError, filename);
-    return NULL;
-  } else {
-    return FROM_CvMatPTR(r);
-  }
-}
-
-static PyObject *pycvCreateImageHeader(PyObject *self, PyObject *args)
-{
-  int w, h, depth, channels;
-  if (!PyArg_ParseTuple(args, "(ii)Ii", &w, &h, &depth, &channels))
-    return NULL;
-  iplimage_t *cva = PyObject_NEW(iplimage_t, &iplimage_Type);
-  cva->a = cvCreateImageHeader(cvSize(w, h), depth, channels);
-  if (cva->a == NULL) {
-    PyErr_SetString(PyExc_TypeError, "CreateImage failed");
-    return NULL;
-  } else {
-    cva->data = Py_None;
-    Py_INCREF(cva->data);
-    cva->offset = 0;
-
-    return (PyObject*)cva;
-  }
-}
-
-static PyObject *pycvCreateImage(PyObject *self, PyObject *args)
-{
-  int w, h, depth, channels;
-  if (!PyArg_ParseTuple(args, "(ii)Ii:CreateImage", &w, &h, &depth, &channels))
-    return NULL;
-  iplimage_t *cva = PyObject_NEW(iplimage_t, &iplimage_Type);
-  ERRWRAP(cva->a = cvCreateImage(cvSize(w, h), depth, channels));
-  if (cva->a == NULL) {
-    PyErr_SetString(PyExc_TypeError, "CreateImage failed");
-    return NULL;
-  } else {
-    return pythonize_IplImage(cva);
-  }
-}
-
-static PyObject *pycvCreateMatHeader(PyObject *self, PyObject *args)
-{
-  int rows, cols, type;
-  if (!PyArg_ParseTuple(args, "iii", &rows, &cols, &type))
-    return NULL;
-  cvmat_t *m = PyObject_NEW(cvmat_t, &cvmat_Type);
-  ERRWRAP(m->a = cvCreateMatHeader(rows, cols, type));
-  if (m->a == NULL) {
-    PyErr_SetString(PyExc_TypeError, "CreateMat failed");
-    return NULL;
-  } else {
-    m->data = Py_None;
-    Py_INCREF(m->data);
-    m->offset = 0;
-    return (PyObject*)m;
-  }
-}
-
-static PyObject *pycvCreateMat(PyObject *self, PyObject *args)
-{
-  int rows, cols, type;
-  if (!PyArg_ParseTuple(args, "iii", &rows, &cols, &type))
-    return NULL;
-  cvmat_t *m = PyObject_NEW(cvmat_t, &cvmat_Type);
-  ERRWRAP(m->a = cvCreateMat(rows, cols, type));
-  if (m->a == NULL) {
-    PyErr_SetString(PyExc_TypeError, "CreateMat failed");
-    return NULL;
-  } else {
-    return pythonize_CvMat(m);
-  }
-}
-
-static PyObject *pycvCreateMatNDHeader(PyObject *self, PyObject *args)
-{
-  ints dims;
-  int type;
-
-  if (!PyArg_ParseTuple(args, "O&i", convert_to_ints, (void*)&dims, &type))
-    return NULL;
-  cvmatnd_t *m = PyObject_NEW(cvmatnd_t, &cvmatnd_Type);
-  ERRWRAP(m->a = cvCreateMatNDHeader(dims.count, dims.i, type));
-
-  m->data = Py_None;
-  Py_INCREF(m->data);
-  delete [] dims.i;
-  return (PyObject*)m;
-}
-
-
-static PyObject *pycvCreateMatND(PyObject *self, PyObject *args)
-{
-  ints dims;
-  int type;
-
-  if (!PyArg_ParseTuple(args, "O&i", convert_to_ints, (void*)&dims, &type))
-    return NULL;
-  cvmatnd_t *m = PyObject_NEW(cvmatnd_t, &cvmatnd_Type);
-  ERRWRAP(m->a = cvCreateMatND(dims.count, dims.i, type));
-  delete [] dims.i;
-  return pythonize_CvMatND(m);
-}
-
-#if PYTHON_USE_NUMPY
-static PyObject *pycvfromarray(PyObject *self, PyObject *args, PyObject *kw)
-{
-  const char *keywords[] = { "arr", "allowND", NULL };
-  PyObject *o;
-  int allowND = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "O|i", (char**)keywords, &o, &allowND))
-    return NULL;
-  return fromarray(o, allowND);
-}
-
-static PyObject *fromarray(PyObject *o, int allowND)
-{
-  PyObject *ao = PyObject_GetAttrString(o, "__array_struct__");
-  PyObject *retval;
-
-  if ((ao == NULL) || !PyCObject_Check(ao)) {
-    PyErr_SetString(PyExc_TypeError, "object does not have array interface");
-    return NULL;
-  }
-  PyArrayInterface *pai = (PyArrayInterface*)PyCObject_AsVoidPtr(ao);
-  if (pai->two != 2) {
-    PyErr_SetString(PyExc_TypeError, "object does not have array interface");
-    return NULL;
-  }
-
-  int type = -1;
-
-  switch (pai->typekind) {
-  case 'i':
-    if (pai->itemsize == 1)
-      type = CV_8SC1;
-    else if (pai->itemsize == 2)
-      type = CV_16SC1;
-    else if (pai->itemsize == 4)
-      type = CV_32SC1;
-    else if (pai->itemsize == 8) {
-      PyErr_SetString(PyExc_TypeError, "OpenCV cannot handle 64-bit integer arrays");
-      return NULL;
-    }
-    break;
-
-  case 'u':
-    if (pai->itemsize == 1)
-      type = CV_8UC1;
-    else if (pai->itemsize == 2)
-      type = CV_16UC1;
-    break;
-
-  case 'f':
-    if (pai->itemsize == 4)
-      type = CV_32FC1;
-    else if (pai->itemsize == 8)
-      type = CV_64FC1;
-    break;
-
-  }
-  assert(type != -1);
-
-  if (!allowND) {
-    cvmat_t *m = PyObject_NEW(cvmat_t, &cvmat_Type);
-    if (pai->nd == 2) {
-      if (pai->strides[1] != pai->itemsize) {
-        return (PyObject*)failmsg("cv.fromarray array can only accept arrays with contiguous data");
-      }
-      ERRWRAP(m->a = cvCreateMatHeader(pai->shape[0], pai->shape[1], type));
-      m->a->step = pai->strides[0];
-    } else if (pai->nd == 3) {
-      if (pai->shape[2] > CV_CN_MAX)
-        return (PyObject*)failmsg("cv.fromarray too many channels, see allowND argument");
-      ERRWRAP(m->a = cvCreateMatHeader(pai->shape[0], pai->shape[1], type + ((pai->shape[2] - 1) << CV_CN_SHIFT)));
-      m->a->step = pai->strides[0];
-    } else {
-      return (PyObject*)failmsg("cv.fromarray array can be 2D or 3D only, see allowND argument");
-    }
-    m->a->data.ptr = (uchar*)pai->data;
-    retval = pythonize_foreign_CvMat(m);
-  } else {
-    int dims[CV_MAX_DIM];
-    int i;
-    for (i = 0; i < pai->nd; i++)
-      dims[i] = pai->shape[i];
-    cvmatnd_t *m = PyObject_NEW(cvmatnd_t, &cvmatnd_Type);
-    ERRWRAP(m->a = cvCreateMatND(pai->nd, dims, type));
-    m->a->data.ptr = (uchar*)pai->data;
-    
-    retval = pythonize_CvMatND(m, ao);
-  }
-  Py_DECREF(ao);
-  return retval;
-}
-#endif
-
-class ranges {
-public:
-  Py_ssize_t len;
-  float **rr;
-  ranges() {
-    len = 0;
-    rr = NULL;
-  }
-  int fromobj(PyObject *o, const char *name = "no_name") {
-    PyObject *fi = PySequence_Fast(o, name);
-    if (fi == NULL)
-      return 0;
-    len = PySequence_Fast_GET_SIZE(fi);
-    rr = new float*[len];
-    for (Py_ssize_t i = 0; i < len; i++) {
-      PyObject *item = PySequence_Fast_GET_ITEM(fi, i);
-      floats ff;
-      if (!convert_to_floats(item, &ff))
-        return 0;
-      rr[i] = ff.f;
-    }
-    Py_DECREF(fi);
-    return 1;
-  }
-  ~ranges() {
-    for (Py_ssize_t i = 0; i < len; i++)
-      delete rr[i];
-   delete rr;
-  }
-};
-
-static int ranges_converter(PyObject *o, ranges* dst)
-{
-  return dst->fromobj(o);
-}
-
-static PyObject *pycvCreateHist(PyObject *self, PyObject *args, PyObject *kw)
-{
-  const char *keywords[] = { "dims", "type", "ranges", "uniform", NULL };
-  PyObject *dims;
-  int type;
-  int uniform = 1;
-  ranges r;
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "Oi|O&i", (char**)keywords, &dims, &type, ranges_converter, (void*)&r, &uniform)) {
-    return NULL;
-  }
-  cvhistogram_t *h = PyObject_NEW(cvhistogram_t, &cvhistogram_Type);
-  args = Py_BuildValue("Oi", dims, CV_32FC1);
-  h->bins = pycvCreateMatND(self, args);
-  Py_DECREF(args);
-  if (h->bins == NULL) {
-    return NULL;
-  }
-  h->h.type = CV_HIST_MAGIC_VAL;
-  if (!convert_to_CvArr(h->bins, &(h->h.bins), "bins"))
-    return NULL;
-
-  ERRWRAP(cvSetHistBinRanges(&(h->h), r.rr, uniform));
-
-  return (PyObject*)h;
-}
-
-static PyObject *pycvInitLineIterator(PyObject *self, PyObject *args, PyObject *kw)
-{
-  const char *keywords[] = { "image", "pt1", "pt2", "connectivity", "left_to_right", NULL };
-  CvArr *image;
-  CvPoint pt1;
-  CvPoint pt2;
-  int connectivity = 8;
-  int left_to_right = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "O&O&O&|ii", (char**)keywords,
-                        convert_to_CvArr, &image,
-                        convert_to_CvPoint, &pt1,
-                        convert_to_CvPoint, &pt2,
-                        &connectivity,
-                        &left_to_right))
-    return NULL;
-
-  cvlineiterator_t *pi = PyObject_NEW(cvlineiterator_t, &cvlineiterator_Type);
-  pi->count = cvInitLineIterator(image, pt1, pt2, &pi->iter, connectivity, left_to_right);
-  ERRWRAP(pi->type = cvGetElemType(image));
-  return (PyObject*)pi;
-}
-
-static PyObject *pycvCreateMemStorage(PyObject *self, PyObject *args)
-{
-  int block_size = 0;
-  if (!PyArg_ParseTuple(args, "|i", &block_size))
-    return NULL;
-  cvmemstorage_t *pm = PyObject_NEW(cvmemstorage_t, &cvmemstorage_Type);
-  pm->a = cvCreateMemStorage(block_size);
-  return (PyObject*)pm;
-}
-
-// single index: return row
-// 2 indices: row, column
-// both row and column can be slices.  column slice must have a step of 1.
-//
-// returns a scalar when all dimensions are specified and all are integers.  Otherwise returns a CvMat.
-//
-static PyObject *cvarr_GetItem(PyObject *o, PyObject *key)
-{
-  dims dd;
-
-  CvArr *cva;
-  if (!convert_to_CvArr(o, &cva, "src"))
-    return NULL;
-
-  if (!convert_to_dims(key, &dd, cva, "key")) {
-    return NULL;
-  }
-
-  // Figure out if all supplied indices have a stride of zero - means they are not slices
-  // and if all indices are positive
-  int all0 = 1;
-  for (int i = 0; i < dd.count; i++) {
-    all0 &= (dd.step[i] == 0) && (0 <= dd.i[i]);
-  }
-
-  // if every dimension supplied, and none are slices, return the scalar
-  if ((cvGetDims(cva) == dd.count) && all0) {
-    CvScalar s;
-    ERRWRAP(s = cvGetND(cva, dd.i));
-    return PyObject_FromCvScalar(s, cvGetElemType(cva));
-  } else {
-    // pad missing dimensions
-    for (int i = dd.count; i < cvGetDims(cva); i++) {
-      dd.i[i] = 0;
-      dd.step[i] = 1;
-      dd.length[i] = cvGetDimSize(cva, i);
-    }
-    dd.count = cvGetDims(cva);
-
-    // negative steps are illegal for OpenCV
-    for (int i = 0; i < dd.count; i++) {
-      if (dd.step[i] < 0)
-        return (PyObject*)failmsg("Negative step is illegal");
-    }
-
-    // zero length illegal for OpenCV
-    for (int i = 0; i < dd.count; i++) {
-      if (dd.length[i] == 0)
-        return (PyObject*)failmsg("Zero sized dimension is illegal");
-    }
-
-    // column step can only be 0 or 1
-    if ((dd.step[dd.count-1] != 0) && (dd.step[dd.count-1] != 1))
-        return (PyObject*)failmsg("Column step is illegal");
-
-    if (is_cvmat(o) || is_iplimage(o)) {
-      cvmat_t *sub = PyObject_NEW(cvmat_t, &cvmat_Type);
-      sub->a = cvCreateMatHeader(dd.length[0], dd.length[1], cvGetElemType(cva));
-      uchar *old0;  // pointer to first element in old mat
-      int oldstep;
-      cvGetRawData(cva, &old0, &oldstep);
-      uchar *new0;  // pointer to first element in new mat
-      ERRWRAP(new0 = cvPtrND(cva, dd.i));
-
-      sub->a->step = oldstep * dd.step[0];
-      sub->data = what_data(o);
-      Py_INCREF(sub->data);
-      sub->offset = new0 - old0;
-      return (PyObject*)sub;
-    } else {
-      cvmatnd_t *sub = PyObject_NEW(cvmatnd_t, &cvmatnd_Type);
-      sub->a = cvCreateMatNDHeader(dd.count, dd.length, cvGetElemType(cva));
-      uchar *old0;  // pointer to first element in old mat
-      cvGetRawData(cva, &old0);
-      uchar *new0;  // pointer to first element in new mat
-      ERRWRAP(new0 = cvPtrND(cva, dd.i));
-
-      for (int d = 0; d < dd.count; d++) {
-        int stp = dd.step[d];
-        sub->a->dim[d].step = ((CvMatND*)cva)->dim[d].step * ((stp == 0) ? 1 : stp);
-        sub->a->dim[d].size = dd.length[d];
-      }
-      sub->data = what_data(o);
-      Py_INCREF(sub->data);
-      sub->offset = new0 - old0;
-      return (PyObject*)sub;
-    }
-  }
-}
-
-static int cvarr_SetItem(PyObject *o, PyObject *key, PyObject *v)
-{
-  dims dd;
-
-  CvArr *cva;
-  if (!convert_to_CvArr(o, &cva, "src"))
-    return -1;
-
-  if (!convert_to_dims(key, &dd, cva, "key")) {
-    return -1;
-  }
-
-  if (cvGetDims(cva) != dd.count) {
-    PyErr_SetString(PyExc_TypeError, "key length does not match array dimension");
-    return -1;
-  }
-
-  CvScalar s;
-  if (PySequence_Check(v)) {
-    PyObject *fi = PySequence_Fast(v, "v");
-    if (fi == NULL)
-      return -1;
-    if (PySequence_Fast_GET_SIZE(fi) != CV_MAT_CN(cvGetElemType(cva))) {
-      PyErr_SetString(PyExc_TypeError, "sequence size must be same as channel count");
-      return -1;
-    }
-    for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++)
-      s.val[i] = PyFloat_AsDouble(PySequence_Fast_GET_ITEM(fi, i));
-    Py_DECREF(fi);
-  } else {
-    if (1 != CV_MAT_CN(cvGetElemType(cva))) {
-      PyErr_SetString(PyExc_TypeError, "scalar supplied but channel count does not equal 1");
-      return -1;
-    }
-    s.val[0] = PyFloat_AsDouble(v);
-  }
-  switch (dd.count) {
-  case 1:
-    ERRWRAPN(cvSet1D(cva, dd.i[0], s), -1);
-    break;
-  case 2:
-    ERRWRAPN(cvSet2D(cva, dd.i[0], dd.i[1], s), -1);
-    break;
-  case 3:
-    ERRWRAPN(cvSet3D(cva, dd.i[0], dd.i[1], dd.i[2], s), -1);
-    break;
-  default:
-    ERRWRAPN(cvSetND(cva, dd.i, s), -1);
-    // XXX - OpenCV bug? - seems as if an error in cvSetND does not set error status?
-    break;
-  }
-  if (cvGetErrStatus() != 0) {
-    translate_error_to_exception();
-    return -1;
-  }
-
-  return 0;
-}
-
-
-static PyObject *pycvSetData(PyObject *self, PyObject *args)
-{
-  PyObject *o, *s;
-  int step = CV_AUTO_STEP;
-
-  if (!PyArg_ParseTuple(args, "OO|i", &o, &s, &step))
-    return NULL;
-  if (is_iplimage(o)) {
-    iplimage_t *ipl = (iplimage_t*)o;
-    ipl->a->widthStep = step;
-    Py_DECREF(ipl->data);
-    ipl->data = s;
-    Py_INCREF(ipl->data);
-  } else if (is_cvmat(o)) {
-    cvmat_t *m = (cvmat_t*)o;
-    m->a->step = step;
-    Py_DECREF(m->data);
-    m->data = s;
-    Py_INCREF(m->data);
-  } else if (is_cvmatnd(o)) {
-    cvmatnd_t *m = (cvmatnd_t*)o;
-    Py_DECREF(m->data);
-    m->data = s;
-    Py_INCREF(m->data);
-  } else {
-    PyErr_SetString(PyExc_TypeError, "SetData argument must be either IplImage, CvMat or CvMatND");
-    return NULL;
-  }
-
-  Py_RETURN_NONE;
-}
-
-static PyObject *what_data(PyObject *o)
-{
-  if (is_iplimage(o)) {
-    iplimage_t *ipl = (iplimage_t*)o;
-    return ipl->data;
-  } else if (is_cvmat(o)) {
-    cvmat_t *m = (cvmat_t*)o;
-    return m->data;
-  } else if (is_cvmatnd(o)) {
-    cvmatnd_t *m = (cvmatnd_t*)o;
-    return m->data;
-  } else {
-    assert(0);
-    return NULL;
-  }
-}
-
-static PyObject *pycvCreateData(PyObject *self, PyObject *args)
-{
-  PyObject *o;
-
-  if (!PyArg_ParseTuple(args, "O", &o))
-    return NULL;
-
-  CvArr *a;
-  if (!convert_to_CvArr(o, &a, "arr"))
-    return NULL;
-  ERRWRAP(cvCreateData(a));
-
-  Py_DECREF(what_data(o));
-  if (is_iplimage(o)) {
-    iplimage_t *ipl = (iplimage_t*)o;
-    pythonize_IplImage(ipl);
-  } else if (is_cvmat(o)) {
-    cvmat_t *m = (cvmat_t*)o;
-    pythonize_CvMat(m);
-  } else if (is_cvmatnd(o)) {
-    cvmatnd_t *m = (cvmatnd_t*)o;
-    pythonize_CvMatND(m);
-  } else {
-    PyErr_SetString(PyExc_TypeError, "CreateData argument must be either IplImage, CvMat or CvMatND");
-    return NULL;
-  }
-
-  Py_RETURN_NONE;
-}
-
-static PyObject *pycvGetDims(PyObject *self, PyObject *args)
-{
-  PyObject *o;
-
-  if (!PyArg_ParseTuple(args, "O", &o))
-    return NULL;
-  CvArr *cva;
-  if (!convert_to_CvArr(o, &cva, "src"))
-    return NULL;
-
-  int i, nd;
-  ERRWRAP(nd = cvGetDims(cva));
-  PyObject *r = PyTuple_New(nd);
-  for (i = 0; i < nd; i++)
-    PyTuple_SetItem(r, i, PyInt_FromLong(cvGetDimSize(cva, i)));
-  return r;
-}
-
-static PyObject *pycvGetImage(PyObject *self, PyObject *args)
-{
-  PyObject *o, *r;
-
-  if (!PyArg_ParseTuple(args, "O", &o))
-    return NULL;
-  if (is_iplimage(o)) {
-    r = o;
-    Py_INCREF(o);
-  } else {
-    IplImage *ipl = cvCreateImageHeader(cvSize(100,100), 8, 1); // these args do not matter, because overwritten
-    CvArr *cva;
-    if (!convert_to_CvArr(o, &cva, "src"))
-      return NULL;
-    ERRWRAP(cvGetImage(cva, ipl));
-
-    iplimage_t *oipl = PyObject_NEW(iplimage_t, &iplimage_Type);
-    oipl->a = ipl;
-    oipl->data = what_data(o);
-    Py_INCREF(oipl->data);
-    oipl->offset = 0;
-
-    r = (PyObject*)oipl;
-  }
-  return r;
-}
-
-static PyObject *pycvGetMat(PyObject *self, PyObject *args, PyObject *kw)
-{
-  const char *keywords[] = { "arr", "allowND", NULL };
-  PyObject *o, *r;
-  int allowND = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "O|i", (char**)keywords, &o, &allowND))
-    return NULL;
-  if (is_cvmat(o)) {
-    r = o;
-    Py_INCREF(o);
-  } else {
-    CvMat *m = cvCreateMatHeader(100,100, 1); // these args do not matter, because overwritten
-    CvArr *cva;
-    if (!convert_to_CvArr(o, &cva, "src"))
-      return NULL;
-    ERRWRAP(cvGetMat(cva, m, NULL, allowND));
-
-    cvmat_t *om = PyObject_NEW(cvmat_t, &cvmat_Type);
-    om->a = m;
-    om->data = what_data(o);
-    Py_INCREF(om->data);
-    om->offset = 0;
-
-    r = (PyObject*)om;
-  }
-  return r;
-}
-
-static PyObject *pycvReshape(PyObject *self, PyObject *args)
-{
-  PyObject *o;
-  int new_cn;
-  int new_rows = 0;
-
-  if (!PyArg_ParseTuple(args, "Oi|i", &o, &new_cn, &new_rows))
-    return NULL;
-
-  CvMat *m = cvCreateMatHeader(100,100, 1); // these args do not matter, because overwritten
-  CvArr *cva;
-  if (!convert_to_CvArr(o, &cva, "src"))
-    return NULL;
-  ERRWRAP(cvReshape(cva, m, new_cn, new_rows));
-
-  cvmat_t *om = PyObject_NEW(cvmat_t, &cvmat_Type);
-  om->a = m;
-  om->data = what_data(o);
-  Py_INCREF(om->data);
-  om->offset = 0;
-
-  return (PyObject*)om;
-}
-
-static PyObject *pycvReshapeMatND(PyObject *self, PyObject *args)
-{
-  PyObject *o;
-  int new_cn = 0;
-  PyObject *new_dims = NULL;
-
-  if (!PyArg_ParseTuple(args, "OiO", &o, &new_cn, &new_dims))
-    return NULL;
-
-  CvMatND *cva;
-  if (!convert_to_CvMatND(o, &cva, "src"))
-    return NULL;
-  ints dims;
-  if (new_dims != NULL) {
-    if (!convert_to_ints(new_dims, &dims, "new_dims"))
-      return NULL;
-  }
-
-  if (new_cn == 0)
-    new_cn = CV_MAT_CN(cvGetElemType(cva));
-
-  int i;
-  int count = CV_MAT_CN(cvGetElemType(cva));
-  for (i = 0; i < cva->dims; i++)
-    count *= cva->dim[i].size;
-
-  int newcount = new_cn;
-  for (i = 0; i < dims.count; i++)
-    newcount *= dims.i[i];
-
-  if (count != newcount) {
-    PyErr_SetString(PyExc_TypeError, "Total number of elements must be unchanged");
-    return NULL;
-  }
-
-  CvMatND *pn = cvCreateMatNDHeader(dims.count, dims.i, CV_MAKETYPE(CV_MAT_TYPE(cva->type), new_cn));
-  return shareDataND(o, cva, pn);
-}
-
-static void OnMouse(int event, int x, int y, int flags, void* param)
-{
-  PyGILState_STATE gstate;
-  gstate = PyGILState_Ensure();
-
-  PyObject *o = (PyObject*)param;
-  PyObject *args = Py_BuildValue("iiiiO", event, x, y, flags, PyTuple_GetItem(o, 1));
-
-  PyObject *r = PyObject_Call(PyTuple_GetItem(o, 0), args, NULL);
-  if (r == NULL)
-    PyErr_Print();
-  else
-    Py_DECREF(r);
-  Py_DECREF(args);
-  PyGILState_Release(gstate);
-}
-
-static PyObject *pycvSetMouseCallback(PyObject *self, PyObject *args, PyObject *kw)
-{
-  const char *keywords[] = { "window_name", "on_mouse", "param", NULL };
-  char* name;
-  PyObject *on_mouse;
-  PyObject *param = NULL;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "sO|O", (char**)keywords, &name, &on_mouse, &param))
-    return NULL;
-  if (!PyCallable_Check(on_mouse)) {
-    PyErr_SetString(PyExc_TypeError, "on_mouse must be callable");
-    return NULL;
-  }
-  if (param == NULL) {
-    param = Py_None;
-  }
-  ERRWRAP(cvSetMouseCallback(name, OnMouse, Py_BuildValue("OO", on_mouse, param)));
-  Py_RETURN_NONE;
-}
-
-void OnChange(int pos, void *param)
-{
-  PyGILState_STATE gstate;
-  gstate = PyGILState_Ensure();
-
-  PyObject *o = (PyObject*)param;
-  PyObject *args = Py_BuildValue("(i)", pos);
-  PyObject *r = PyObject_Call(PyTuple_GetItem(o, 0), args, NULL);
-  if (r == NULL)
-    PyErr_Print();
-  Py_DECREF(args);
-  PyGILState_Release(gstate);
-}
-
-static PyObject *pycvCreateTrackbar(PyObject *self, PyObject *args)
-{
-  PyObject *on_change;
-  char* trackbar_name;
-  char* window_name;
-  int *value = new int;
-  int count;
-
-  if (!PyArg_ParseTuple(args, "ssiiO", &trackbar_name, &window_name, value, &count, &on_change))
-    return NULL;
-  if (!PyCallable_Check(on_change)) {
-    PyErr_SetString(PyExc_TypeError, "on_change must be callable");
-    return NULL;
-  }
-  ERRWRAP(cvCreateTrackbar2(trackbar_name, window_name, value, count, OnChange, Py_BuildValue("OO", on_change, Py_None)));
-  Py_RETURN_NONE;
-}
-
-static PyObject *pycvFindContours(PyObject *self, PyObject *args, PyObject *kw)
-{
-  CvArr* image;
-  PyObject *pyobj_image = NULL;
-  CvMemStorage* storage;
-  PyObject *pyobj_storage = NULL;
-  CvSeq* first_contour;
-  int header_size = sizeof(CvContour);
-  int mode = CV_RETR_LIST;
-  int method = CV_CHAIN_APPROX_SIMPLE;
-  CvPoint offset = cvPoint(0,0);
-  PyObject *pyobj_offset = NULL;
-
-  const char *keywords[] = { "image", "storage", "mode", "method", "offset", NULL };
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "OO|iiO", (char**)keywords, &pyobj_image, &pyobj_storage, &mode, &method, &pyobj_offset))
-    return NULL;
-  if (!convert_to_CvArr(pyobj_image, &image, "image")) return NULL;
-  if (!convert_to_CvMemStorage(pyobj_storage, &storage, "storage")) return NULL;
-  if ((pyobj_offset != NULL) && !convert_to_CvPoint(pyobj_offset, &offset, "offset")) return NULL;
-  ERRWRAP(cvFindContours(image, storage, &first_contour, header_size, mode, method, offset));
-  cvseq_t *ps = PyObject_NEW(cvseq_t, &cvseq_Type);
-  ps->a = first_contour;
-  ps->container = PyTuple_GetItem(args, 1); // storage
-  Py_INCREF(ps->container);
-  return (PyObject*)ps;
-}
-
-static PyObject *pycvApproxPoly(PyObject *self, PyObject *args, PyObject *kw)
-{
-  cvarrseq src_seq;
-  PyObject *pyobj_src_seq = NULL;
-  int header_size = sizeof(CvContour);
-  CvMemStorage* storage;
-  PyObject *pyobj_storage = NULL;
-  int method;
-  double parameter = 0;
-  int parameter2 = 0;
-
-  const char *keywords[] = { "src_seq", "storage", "method", "parameter", "parameter2", NULL };
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "OOi|di", (char**)keywords, &pyobj_src_seq, &pyobj_storage, &method, &parameter, &parameter2))
-    return NULL;
-  if (!convert_to_cvarrseq(pyobj_src_seq, &src_seq, "src_seq")) return NULL;
-  if (!convert_to_CvMemStorage(pyobj_storage, &storage, "storage")) return NULL;
-  CvSeq* r;
-  ERRWRAP(r = cvApproxPoly(src_seq.mat, header_size, storage, method, parameter, parameter2));
-  return FROM_CvSeqPTR(r);
-}
-
-static float distance_function_glue( const float* a, const float* b, void* user_param )
-{
-  PyObject *o = (PyObject*)user_param;
-  PyObject *args = Py_BuildValue("(ff)(ff)O", a[0], a[1], b[0], b[1], PyTuple_GetItem(o, 1));
-  PyObject *r = PyObject_Call(PyTuple_GetItem(o, 0), args, NULL);
-  Py_DECREF(args);
-  return (float)PyFloat_AsDouble(r);
-}
-
-static PyObject *pycvCalcEMD2(PyObject *self, PyObject *args, PyObject *kw)
-{
-  const char *keywords[] = { "signature1", "signature2", "distance_type", "distance_func", "cost_matrix", "flow", "lower_bound", "userdata", NULL };
-  CvArr* signature1;
-  PyObject *pyobj_signature1;
-  CvArr* signature2;
-  PyObject *pyobj_signature2;
-  int distance_type;
-  PyObject *distance_func = NULL;
-  CvArr* cost_matrix=NULL;
-  PyObject *pyobj_cost_matrix = NULL;
-  CvArr* flow=NULL;
-  PyObject *pyobj_flow = NULL;
-  float lower_bound = 0.0;
-  PyObject *userdata = NULL;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "OOi|OOOfO", (char**)keywords,
-                                   &pyobj_signature1,
-                                   &pyobj_signature2,
-                                   &distance_type,
-                                   &distance_func,
-                                   &pyobj_cost_matrix,
-                                   &pyobj_flow,
-                                   &lower_bound,
-                                   &userdata))
-    return NULL;
-  if (!convert_to_CvArr(pyobj_signature1, &signature1, "signature1")) return NULL;
-  if (!convert_to_CvArr(pyobj_signature2, &signature2, "signature2")) return NULL;
-  if (pyobj_cost_matrix && !convert_to_CvArr(pyobj_cost_matrix, &cost_matrix, "cost_matrix")) return NULL;
-  if (pyobj_flow && !convert_to_CvArr(pyobj_flow, &flow, "flow")) return NULL;
-
-  if (distance_func == NULL) {
-    distance_func = Py_None;
-  }
-  if (userdata == NULL) {
-    userdata = Py_None;
-  }
-
-  PyObject *ud = Py_BuildValue("OO", distance_func, userdata);
-  float r;
-  ERRWRAP(r = cvCalcEMD2(signature1, signature2, distance_type, distance_function_glue, cost_matrix, flow, &lower_bound, (void*)ud));
-  Py_DECREF(ud);
-
-  return PyFloat_FromDouble(r);
-}
-
-static PyObject *pycvSubdiv2DLocate(PyObject *self, PyObject *args)
-{
-  PyObject *pyobj_subdiv;
-  PyObject *pyobj_pt;
-  CvSubdiv2D *subdiv;
-  CvPoint2D32f pt;
-  CvSubdiv2DEdge edge;
-  CvSubdiv2DPoint* vertex;
-
-  if (!PyArg_ParseTuple(args, "OO", &pyobj_subdiv, &pyobj_pt))
-    return NULL;
-  if (!convert_to_CvSubdiv2DPTR(pyobj_subdiv, &subdiv, "subdiv"))
-    return NULL;
-  if (!convert_to_CvPoint2D32f(pyobj_pt, &pt, "pt"))
-    return NULL;
-
-  CvSubdiv2DPointLocation loc = cvSubdiv2DLocate(subdiv, pt, &edge, &vertex);
-  PyObject *r;
-  switch (loc) {
-  case CV_PTLOC_INSIDE:
-  case CV_PTLOC_ON_EDGE:
-    r = FROM_CvSubdiv2DEdge(edge);
-    break;
-  case CV_PTLOC_VERTEX:
-    r = FROM_CvSubdiv2DPointPTR(vertex);
-    break;
-  case CV_PTLOC_OUTSIDE_RECT:
-    r = Py_None;
-    Py_INCREF(Py_None);
-    break;
-  default:
-    return (PyObject*)failmsg("Unexpected loc from cvSubdiv2DLocate");
-  }
-  return Py_BuildValue("iO", (int)loc, r);
-}
-
-static PyObject *pycvCalcOpticalFlowPyrLK(PyObject *self, PyObject *args)
-{
-  CvArr* prev;
-  PyObject *pyobj_prev = NULL;
-  CvArr* curr;
-  PyObject *pyobj_curr = NULL;
-  CvArr* prev_pyr;
-  PyObject *pyobj_prev_pyr = NULL;
-  CvArr* curr_pyr;
-  PyObject *pyobj_curr_pyr = NULL;
-  CvPoint2D32f* prev_features;
-  PyObject *pyobj_prev_features = NULL;
-  PyObject *pyobj_curr_features = NULL;
-  CvPoint2D32f* curr_features;
-  CvSize win_size;
-  int level;
-  CvTermCriteria criteria;
-  int flags;
-
-  if (!PyArg_ParseTuple(args, "OOOOO(ii)i(iif)i|O",
-    &pyobj_prev, &pyobj_curr, &pyobj_prev_pyr, &pyobj_curr_pyr,
-    &pyobj_prev_features,
-    &win_size.width, &win_size.height, &level,
-    &criteria.type, &criteria.max_iter, &criteria.epsilon,
-    &flags,
-    &pyobj_curr_features))
-    return NULL;
-  if (!convert_to_CvArr(pyobj_prev, &prev, "prev")) return NULL;
-  if (!convert_to_CvArr(pyobj_curr, &curr, "curr")) return NULL;
-  if (!convert_to_CvArr(pyobj_prev_pyr, &prev_pyr, "prev_pyr")) return NULL;
-  if (!convert_to_CvArr(pyobj_curr_pyr, &curr_pyr, "curr_pyr")) return NULL;
-  if (!convert_to_CvPoint2D32fPTR(pyobj_prev_features, &prev_features, "prev_features")) return NULL;
-  int count = (int)PySequence_Length(pyobj_prev_features);
-  if (flags & CV_LKFLOW_INITIAL_GUESSES) {
-    failmsg("flag CV_LKFLOW_INITIAL_GUESSES is determined automatically from function arguments - it is not required");
-    return NULL;
-  }
-  if (!pyobj_curr_features) {
-    curr_features = new CvPoint2D32f[count];
-  } else {
-    if (PySequence_Length(pyobj_curr_features) != count) {
-      failmsg("curr_features must have same length as prev_features");
-      return NULL;
-    }
-    if (!convert_to_CvPoint2D32fPTR(pyobj_curr_features, &curr_features, "curr_features")) return NULL;
-    flags |= CV_LKFLOW_INITIAL_GUESSES;
-  }
-  float *track_error = new float[count];
-  char* status = new char[count];
-  ERRWRAP(cvCalcOpticalFlowPyrLK(prev, curr, prev_pyr, curr_pyr, prev_features, curr_features, count, win_size, level, status, track_error, criteria, flags));
-
-  cvpoint2d32f_count r0;
-  r0.points = curr_features;
-  r0.count = count;
-
-  chars r1;
-  r1.f = status;
-  r1.count = count;
-
-  floats r2;
-  r2.f = track_error;
-  r2.count = count;
-
-  return Py_BuildValue("NNN", FROM_cvpoint2d32f_count(r0), FROM_chars(r1), FROM_floats(r2));
-}
-
-// pt1,pt2 are input and output arguments here
-
-static PyObject *pycvClipLine(PyObject *self, PyObject *args)
-{
-  CvSize img_size;
-  PyObject *pyobj_img_size = NULL;
-  CvPoint pt1;
-  PyObject *pyobj_pt1 = NULL;
-  CvPoint pt2;
-  PyObject *pyobj_pt2 = NULL;
-
-  if (!PyArg_ParseTuple(args, "OOO", &pyobj_img_size, &pyobj_pt1, &pyobj_pt2))
-    return NULL;
-  if (!convert_to_CvSize(pyobj_img_size, &img_size, "img_size")) return NULL;
-  if (!convert_to_CvPoint(pyobj_pt1, &pt1, "pt1")) return NULL;
-  if (!convert_to_CvPoint(pyobj_pt2, &pt2, "pt2")) return NULL;
-  int r;
-  ERRWRAP(r = cvClipLine(img_size, &pt1, &pt2));
-  if (r == 0) {
-    Py_RETURN_NONE;
-  } else {
-    return Py_BuildValue("NN", FROM_CvPoint(pt1), FROM_CvPoint(pt2));
-  }
-}
-
-static PyObject *pyfinddatamatrix(PyObject *self, PyObject *args)
-{
-  PyObject *pyim;
-  if (!PyArg_ParseTuple(args, "O", &pyim))
-    return NULL;
-
-  CvMat *image;
-  if (!convert_to_CvMat(pyim, &image, "image")) return NULL;
-
-  std::deque <DataMatrixCode> codes;
-  ERRWRAP(codes = cvFindDataMatrix(image));
-
-  PyObject *pycodes = PyList_New(codes.size());
-  for (size_t i = 0; i < codes.size(); i++) {
-    DataMatrixCode *pc = &codes[i];
-    PyList_SetItem(pycodes, i, Py_BuildValue("(sOO)", pc->msg, FROM_CvMat(pc->corners), FROM_CvMat(pc->original)));
-  }
-
-  return pycodes;
-}
-
-static PyObject *temp_test(PyObject *self, PyObject *args)
-{
-#if 0
-  CvArr *im = cvLoadImage("../samples/c/lena.jpg", 0);
-  printf("im=%p\n", im);
-  CvMat *m = cvEncodeImage(".jpeg", im);
-#endif
-#if 0
-  CvArr *im = cvLoadImage("lena.jpg", 0);
-  float r0[] = { 0, 255 };
-  float *ranges[] = { r0 };
-  int hist_size[] = { 256 };
-  CvHistogram *hist = cvCreateHist(1, hist_size, CV_HIST_ARRAY, ranges, 1);
-  cvCalcHist(im, hist, 0, 0);
-#endif
-
-#if 0
-  CvMat* mat = cvCreateMat( 3, 3, CV_32F );
-  CvMat row_header, *row;
-  row = cvReshape( mat, &row_header, 0, 1 );
-  printf("%d,%d\n", row_header.rows, row_header.cols);
-  printf("ge %08x\n", cvGetElemType(mat));
-#endif
-
-#if 0
-  CvMat *m = cvCreateMat(1, 10, CV_8UC1);
-  printf("CvMat stride ===> %d\n", m->step);
-#endif
-
-#if 0
-  CvPoint2D32f src[3] = { { 0,0 }, { 1,0 }, { 0,1 } };
-  CvPoint2D32f dst[3] = { { 0,0 }, { 17,0 }, { 0,17 } };
-
-  CvMat* mapping = cvCreateMat(2, 3, CV_32FC1);
-  cvGetAffineTransform(src, dst, mapping);
-  printf("===> %f\n", cvGetReal2D(mapping, 0, 0));
-#endif
-
-#if 0
-  CvArr *im = cvLoadImage("checker77.png");
-  CvPoint2D32f corners[49];
-  int count;
-  cvFindChessboardCorners(im, cvSize(7,7), corners, &count, 0);
-  printf("count=%d\n", count);
-#endif
-
-#if 0
-  CvMat *src = cvCreateMat(512, 512, CV_8UC3);
-  CvMat *dst = cvCreateMat(512, 512, CV_8UC3);
-  cvPyrMeanShiftFiltering(src, dst, 5, 5);
-  return FROM_CvMat(src);
-#endif
-
-  return PyFloat_FromDouble(0.0);
-}
-
-static PyObject *pycvFindChessboardCorners(PyObject *self, PyObject *args, PyObject *kw)
-{
-  CvArr* image;
-  PyObject *pyobj_image = NULL;
-  CvSize pattern_size;
-  PyObject *pyobj_pattern_size = NULL;
-  cvpoint2d32f_count corners;
-  int flags = CV_CALIB_CB_ADAPTIVE_THRESH;
-
-  const char *keywords[] = { "image", "pattern_size", "flags", NULL };
-  if (!PyArg_ParseTupleAndKeywords(args, kw, "OO|i", (char**)keywords, &pyobj_image, &pyobj_pattern_size, &flags))
-    return NULL;
-  if (!convert_to_CvArr(pyobj_image, &image, "image")) return NULL;
-  if (!convert_to_CvSize(pyobj_pattern_size, &pattern_size, "pattern_size")) return NULL;
-  int r;
-  corners.points = new CvPoint2D32f[pattern_size.width * pattern_size.height];
-  ERRWRAP(r = cvFindChessboardCorners(image, pattern_size, corners.points,&corners.count, flags));
-  return Py_BuildValue("NN", FROM_int(r), FROM_cvpoint2d32f_count(corners));
-}
-
-// For functions GetSubRect, GetRow, GetCol.
-// recipient has a view into donor's data, and needs to share it.
-// make recipient use the donor's data, compute the offset,
-// and manage reference counts.
-
-static void preShareData(CvArr *donor, CvMat **recipient)
-{
-  *recipient = cvCreateMatHeader(4, 4, cvGetElemType(donor));
-}
-
-static PyObject *shareData(PyObject *donor, CvArr *pdonor, CvMat *precipient)
-{
-  PyObject *recipient = (PyObject*)PyObject_NEW(cvmat_t, &cvmat_Type);
-  ((cvmat_t*)recipient)->a = precipient;
-  ((cvmat_t*)recipient)->offset = cvPtr1D(precipient, 0) - cvPtr1D(pdonor, 0);
-
-  PyObject *arr_data;
-  if (is_cvmat(donor)) {
-    arr_data = ((cvmat_t*)donor)->data;
-    ((cvmat_t*)recipient)->offset += ((cvmat_t*)donor)->offset;
-  } else if (is_iplimage(donor)) {
-    arr_data = ((iplimage_t*)donor)->data;
-    ((cvmat_t*)recipient)->offset += ((iplimage_t*)donor)->offset;
-  } else {
-    return (PyObject*)failmsg("Argument 'mat' must be either IplImage or CvMat");
-  }
-  ((cvmat_t*)recipient)->data = arr_data;
-  Py_INCREF(arr_data);
-  return recipient;
-}
-
-static PyObject *shareDataND(PyObject *donor, CvMatND *pdonor, CvMatND *precipient)
-{
-  PyObject *recipient = (PyObject*)PyObject_NEW(cvmatnd_t, &cvmatnd_Type);
-  ((cvmatnd_t*)recipient)->a = precipient;
-  ((cvmatnd_t*)recipient)->offset = 0;
-
-  PyObject *arr_data;
-  arr_data = ((cvmatnd_t*)donor)->data;
-  ((cvmatnd_t*)recipient)->data = arr_data;
-  Py_INCREF(arr_data);
-  return recipient;
-}
-
-static PyObject *pycvGetHuMoments(PyObject *self, PyObject *args)
-{
-  CvMoments* moments;
-  PyObject *pyobj_moments = NULL;
-
-  if (!PyArg_ParseTuple(args, "O", &pyobj_moments))
-    return NULL;
-  if (!convert_to_CvMomentsPTR(pyobj_moments, &moments, "moments")) return NULL;
-  CvHuMoments r;
-  ERRWRAP(cvGetHuMoments(moments, &r));
-  return Py_BuildValue("ddddddd", r.hu1, r.hu2, r.hu3, r.hu4, r.hu5, r.hu6, r.hu7);
-}
-
-static PyObject *pycvFitLine(PyObject *self, PyObject *args)
-{
-  cvarrseq points;
-  PyObject *pyobj_points = NULL;
-  int dist_type;
-  float param;
-  float reps;
-  float aeps;
-  float r[6];
-
-  if (!PyArg_ParseTuple(args, "Oifff", &pyobj_points, &dist_type, &param, &reps, &aeps))
-    return NULL;
-  if (!convert_to_cvarrseq(pyobj_points, &points, "points")) return NULL;
-  ERRWRAP(cvFitLine(points.mat, dist_type, param, reps, aeps, r));
-  int dimension;
-  if (strcmp("opencv-matrix", cvTypeOf(points.mat)->type_name) == 0)
-    dimension = CV_MAT_CN(cvGetElemType(points.mat));
-  else {
-    // sequence case... don't think there is a sequence of 3d points,
-    // so assume 2D
-    dimension = 2;
-  }
-  if (dimension == 2)
-    return Py_BuildValue("dddd", r[0], r[1], r[2], r[3]);
-  else
-    return Py_BuildValue("dddddd", r[0], r[1], r[2], r[3], r[4], r[5]);
-}
-
-static PyObject *pycvGetMinMaxHistValue(PyObject *self, PyObject *args)
-{
-  CvHistogram* hist;
-  PyObject *pyobj_hist = NULL;
-  float min_val;
-  float max_val;
-  int min_loc[CV_MAX_DIM];
-  int max_loc[CV_MAX_DIM];
-
-  if (!PyArg_ParseTuple(args, "O", &pyobj_hist))
-    return NULL;
-  if (!convert_to_CvHistogram(pyobj_hist, &hist, "hist")) return NULL;
-  ERRWRAP(cvGetMinMaxHistValue(hist, &min_val, &max_val, min_loc, max_loc));
-  int d = cvGetDims(hist->bins);
-  PyObject *pminloc = PyTuple_New(d), *pmaxloc = PyTuple_New(d);
-  for (int i = 0; i < d; i++) {
-    PyTuple_SetItem(pminloc, i, PyInt_FromLong(min_loc[i]));
-    PyTuple_SetItem(pmaxloc, i, PyInt_FromLong(max_loc[i]));
-  }
-  return Py_BuildValue("ffNN", min_val, max_val, pminloc, pmaxloc);
-}
-
-static CvSeq* cvHOGDetectMultiScale( const CvArr* image, CvMemStorage* storage,
-  const CvArr* svm_classifier=NULL, CvSize win_stride=cvSize(0,0),
-  double hit_threshold=0, double scale=1.05,
-  int group_threshold=2, CvSize padding=cvSize(0,0),
-  CvSize win_size=cvSize(64,128), CvSize block_size=cvSize(16,16),
-  CvSize block_stride=cvSize(8,8), CvSize cell_size=cvSize(8,8),
-  int nbins=9, int gammaCorrection=1 )
-{
-    cv::HOGDescriptor hog(win_size, block_size, block_stride, cell_size, nbins, 1, -1, cv::HOGDescriptor::L2Hys, 0.2, gammaCorrection!=0);
-    if(win_stride.width == 0 && win_stride.height == 0)
-        win_stride = block_stride;
-    cv::Mat img = cv::cvarrToMat(image);
-    std::vector<cv::Rect> found;
-    if(svm_classifier)
-    {
-        CvMat stub, *m = cvGetMat(svm_classifier, &stub);
-        int sz = m->cols*m->rows;
-        CV_Assert(CV_IS_MAT_CONT(m->type) && (m->cols == 1 || m->rows == 1) && CV_MAT_TYPE(m->type) == CV_32FC1);
-        std::vector<float> w(sz);
-        std::copy(m->data.fl, m->data.fl + sz, w.begin());
-        hog.setSVMDetector(w);
-    }
-    else
-        hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
-    hog.detectMultiScale(img, found, hit_threshold, win_stride, padding, scale, group_threshold);
-    CvSeq* seq = cvCreateSeq(cv::DataType<cv::Rect>::type, sizeof(CvSeq), sizeof(cv::Rect), storage);
-    if(found.size())
-        cvSeqPushMulti(seq, &found[0], (int)found.size());
-    return seq;
-}
-
-static void cvGrabCut(CvArr *image,
-                      CvArr *mask,
-                      CvRect rect,
-                      CvArr *bgdModel,
-                      CvArr *fgdModel,
-                      int iterCount,
-                      int mode)
-{
-  cv::Mat _image = cv::cvarrToMat(image);
-  cv::Mat _mask = cv::cvarrToMat(mask);
-  cv::Mat _bgdModel = cv::cvarrToMat(bgdModel);
-  cv::Mat _fgdModel = cv::cvarrToMat(fgdModel);
-  grabCut(_image, _mask, rect, _bgdModel, _fgdModel, iterCount, mode);
-}
-
-static int zero = 0;
-
-/************************************************************************/
-/* Custom Validators */
-
-#define CVPY_VALIDATE_DrawChessboardCorners() do { \
-  if ((patternSize.width * patternSize.height) != corners.count) \
-    return (PyObject*)failmsg("Size is %dx%d, but corner list is length %d", patternSize.width, patternSize.height, corners.count); \
-  } while (0)
-
-#define cvGetRotationMatrix2D cv2DRotationMatrix
-
-/************************************************************************/
-/* Generated functions */
-
-#define constCvMat const CvMat
-#define FROM_constCvMatPTR(x) FROM_CvMatPTR((CvMat*)x)
-
-#define cvSnakeImage(image, points, length, a, b, g, win, criteria, calc_gradient) \
-  do { \
-    int coeff_usage; \
-    if ((alpha.count == 1) && (beta.count == 1) && (gamma.count == 1)) \
-      coeff_usage = CV_VALUE; \
-    else if ((length == alpha.count) && (alpha.count == beta.count) && (beta.count == gamma.count)) \
-      coeff_usage = CV_ARRAY; \
-    else \
-      return (PyObject*)failmsg("SnakeImage weights invalid"); \
-    cvSnakeImage(image, points, length, a, b, g, coeff_usage, win, criteria, calc_gradient); \
-  } while (0)
-
-static double cppKMeans(const CvArr* _samples, int cluster_count, CvArr* _labels,
-           CvTermCriteria termcrit, int attempts, int flags, CvArr* _centers)
-{
-    cv::Mat data = cv::cvarrToMat(_samples), labels = cv::cvarrToMat(_labels), centers;
-    if( _centers )
-        centers = cv::cvarrToMat(_centers);
-    CV_Assert( labels.isContinuous() && labels.type() == CV_32S &&
-        (labels.cols == 1 || labels.rows == 1) &&
-        labels.cols + labels.rows - 1 == data.rows );
-    return cv::kmeans(data, cluster_count, labels, termcrit, attempts,
-                        flags, _centers ? cv::OutputArray(centers) : cv::OutputArray() );
-}
-
-#define cvKMeans2(samples, nclusters, labels, termcrit, attempts, flags, centers) \
-    cppKMeans(samples, nclusters, labels, termcrit, attempts, flags, centers)
-
-#include "generated0.i"
-
-#if PYTHON_USE_NUMPY
-#include "opencv2x.h"
-#include "pyopencv_generated_types.h"
-#include "pyopencv_generated_funcs.h"
-#endif
-
-static PyMethodDef methods[] = {
-
-#if PYTHON_USE_NUMPY
-    {"fromarray", (PyCFunction)pycvfromarray, METH_KEYWORDS, "fromarray(array) -> cvmatnd"},
-#endif
-
-  //{"CalcOpticalFlowFarneback", (PyCFunction)pycvCalcOpticalFlowFarneback, METH_KEYWORDS, "CalcOpticalFlowFarneback(prev, next, flow, pyr_scale=0.5, levels=3, win_size=15, iterations=3, poly_n=7, poly_sigma=1.5, flags=0) -> None"},
-  //{"_HOGComputeDescriptors", (PyCFunction)pycvHOGComputeDescriptors, METH_KEYWORDS, "_HOGComputeDescriptors(image, win_stride=block_stride, locations=None, padding=(0,0), win_size=(64,128), block_size=(16,16), block_stride=(8,8), cell_size=(8,8), nbins=9, gammaCorrection=true) -> list_of_descriptors"},
-  //{"_HOGDetect", (PyCFunction)pycvHOGDetect, METH_KEYWORDS, "_HOGDetect(image, svm_classifier, win_stride=block_stride, locations=None, padding=(0,0), win_size=(64,128), block_size=(16,16), block_stride=(8,8), cell_size=(8,8), nbins=9, gammaCorrection=true) -> list_of_points"},
-  //{"_HOGDetectMultiScale", (PyCFunction)pycvHOGDetectMultiScale, METH_KEYWORDS, "_HOGDetectMultiScale(image, svm_classifier, win_stride=block_stride, scale=1.05, group_threshold=2, padding=(0,0), win_size=(64,128), block_size=(16,16), block_stride=(8,8), cell_size=(8,8), nbins=9, gammaCorrection=true) -> list_of_points"},
-
-  {"FindDataMatrix", pyfinddatamatrix, METH_VARARGS},
-  {"temp_test", temp_test, METH_VARARGS},
-
-#include "generated1.i"
-
-#if PYTHON_USE_NUMPY
-#include "pyopencv_generated_func_tab.h"
-#endif
-
-  {NULL, NULL},
-};
-
-/************************************************************************/
-/* Module init */
-
-static int to_ok(PyTypeObject *to)
-{
-  to->tp_alloc = PyType_GenericAlloc;
-  to->tp_new = PyType_GenericNew;
-  to->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
-  return (PyType_Ready(to) == 0);
-}
-
-#define MKTYPE(NAME)  NAME##_specials(); if (!to_ok(&NAME##_Type)) return
-#define MKTYPE2(NAME) pyopencv_##NAME##_specials(); if (!to_ok(&pyopencv_##NAME##_Type)) return
-
-using namespace cv;
-
-extern "C"
-#if defined WIN32 || defined _WIN32
-__declspec(dllexport)
-#endif
-
-void initcv()
-{
-#if PYTHON_USE_NUMPY
-    import_array();
-#endif
-    
-  PyObject *m, *d;
-
-  cvSetErrMode(CV_ErrModeParent);
-
-  MKTYPE(cvcontourtree);
-  MKTYPE(cvfont);
-  MKTYPE(cvhistogram);
-  MKTYPE(cvlineiterator);
-  MKTYPE(cvmat);
-  MKTYPE(cvmatnd);
-  MKTYPE(cvmemstorage);
-  MKTYPE(cvsubdiv2dedge);
-  MKTYPE(cvrng);
-  MKTYPE(cvseq);
-  MKTYPE(cvset);
-  MKTYPE(cvsubdiv2d);
-  MKTYPE(cvsubdiv2dpoint);
-  MKTYPE(iplimage);
-  MKTYPE(memtrack);
-
-#include "generated4.i"
-
-#if PYTHON_USE_NUMPY
-#include "pyopencv_generated_type_reg.h"
-#endif
-
-  m = Py_InitModule(MODULESTR"", methods);
-  d = PyModule_GetDict(m);
-
-  PyDict_SetItemString(d, "__version__", PyString_FromString("$Rev: 4557 $"));
-
-  opencv_error = PyErr_NewException((char*)MODULESTR".error", NULL, NULL);
-  PyDict_SetItemString(d, "error", opencv_error);
-
-  // Couple of warnings about strict aliasing here.  Not clear how to fix.
-  union {
-    PyObject *o;
-    PyTypeObject *to;
-  } convert;
-  convert.to = &iplimage_Type;
-  PyDict_SetItemString(d, "iplimage", convert.o);
-  convert.to = &cvmat_Type;
-  PyDict_SetItemString(d, "cvmat", convert.o);
-
-  // AFAIK the only floating-point constant
-  PyDict_SetItemString(d, "CV_PI", PyFloat_FromDouble(CV_PI));
-
-#define PUBLISH(I) PyDict_SetItemString(d, #I, PyInt_FromLong(I))
-#define PUBLISHU(I) PyDict_SetItemString(d, #I, PyLong_FromUnsignedLong(I))
-#define PUBLISH2(I, value) PyDict_SetItemString(d, #I, PyLong_FromLong(value))
-
-  PUBLISHU(IPL_DEPTH_8U);
-  PUBLISHU(IPL_DEPTH_8S);
-  PUBLISHU(IPL_DEPTH_16U);
-  PUBLISHU(IPL_DEPTH_16S);
-  PUBLISHU(IPL_DEPTH_32S);
-  PUBLISHU(IPL_DEPTH_32F);
-  PUBLISHU(IPL_DEPTH_64F);
-
-  PUBLISH(CV_LOAD_IMAGE_COLOR);
-  PUBLISH(CV_LOAD_IMAGE_GRAYSCALE);
-  PUBLISH(CV_LOAD_IMAGE_UNCHANGED);
-  PUBLISH(CV_HIST_ARRAY);
-  PUBLISH(CV_HIST_SPARSE);
-  PUBLISH(CV_8U);
-  PUBLISH(CV_8UC1);
-  PUBLISH(CV_8UC2);
-  PUBLISH(CV_8UC3);
-  PUBLISH(CV_8UC4);
-  PUBLISH(CV_8S);
-  PUBLISH(CV_8SC1);
-  PUBLISH(CV_8SC2);
-  PUBLISH(CV_8SC3);
-  PUBLISH(CV_8SC4);
-  PUBLISH(CV_16U);
-  PUBLISH(CV_16UC1);
-  PUBLISH(CV_16UC2);
-  PUBLISH(CV_16UC3);
-  PUBLISH(CV_16UC4);
-  PUBLISH(CV_16S);
-  PUBLISH(CV_16SC1);
-  PUBLISH(CV_16SC2);
-  PUBLISH(CV_16SC3);
-  PUBLISH(CV_16SC4);
-  PUBLISH(CV_32S);
-  PUBLISH(CV_32SC1);
-  PUBLISH(CV_32SC2);
-  PUBLISH(CV_32SC3);
-  PUBLISH(CV_32SC4);
-  PUBLISH(CV_32F);
-  PUBLISH(CV_32FC1);
-  PUBLISH(CV_32FC2);
-  PUBLISH(CV_32FC3);
-  PUBLISH(CV_32FC4);
-  PUBLISH(CV_64F);
-  PUBLISH(CV_64FC1);
-  PUBLISH(CV_64FC2);
-  PUBLISH(CV_64FC3);
-  PUBLISH(CV_64FC4);
-  PUBLISH(CV_NEXT_AROUND_ORG);
-  PUBLISH(CV_NEXT_AROUND_DST);
-  PUBLISH(CV_PREV_AROUND_ORG);
-  PUBLISH(CV_PREV_AROUND_DST);
-  PUBLISH(CV_NEXT_AROUND_LEFT);
-  PUBLISH(CV_NEXT_AROUND_RIGHT);
-  PUBLISH(CV_PREV_AROUND_LEFT);
-  PUBLISH(CV_PREV_AROUND_RIGHT);
-
-  PUBLISH(CV_WINDOW_AUTOSIZE);
-
-  PUBLISH(CV_PTLOC_INSIDE);
-  PUBLISH(CV_PTLOC_ON_EDGE);
-  PUBLISH(CV_PTLOC_VERTEX);
-  PUBLISH(CV_PTLOC_OUTSIDE_RECT);
-
-  PUBLISH(GC_BGD);
-  PUBLISH(GC_FGD);
-  PUBLISH(GC_PR_BGD);
-  PUBLISH(GC_PR_FGD);
-  PUBLISH(GC_INIT_WITH_RECT);
-  PUBLISH(GC_INIT_WITH_MASK);
-  PUBLISH(GC_EVAL);
-
-#include "generated2.i"
-
-#if PYTHON_USE_NUMPY
-#include "pyopencv_generated_const_reg.h"
-#endif
-
-#if 0
-  {
-    int sizes[] = { 10 } ;
-    float ranges[] = { 0.0, 1.0 };
-    // CvHistogram*h = cvCreateHist(1, sizes, CV_HIST_ARRAY);
-    CvHistogram H;
-    float data[10];
-    CvHistogram*h = cvMakeHistHeaderForArray(1, sizes, &H, data);
-    printf("h->type = %08x\n", h->type);
-    printf("h->bins = %p\n", h->bins);
-    printf("h->mat = %p\n", &(h->mat));
-  }
-#endif
-}
-
diff --git a/modules/python/src/defs b/modules/python/src/defs
deleted file mode 100644
index 38b5753c0..000000000
--- a/modules/python/src/defs
+++ /dev/null
@@ -1,339 +0,0 @@
-#define CV_BLUR_NO_SCALE 0
-#define CV_BLUR  1
-#define CV_GAUSSIAN  2
-#define CV_MEDIAN 3
-#define CV_BILATERAL 4
-#define CV_INPAINT_NS      0
-#define CV_INPAINT_TELEA   1
-#define CV_SCHARR -1
-#define CV_MAX_SOBEL_KSIZE 7
-#define  CV_BGR2BGRA    0
-#define  CV_RGB2RGBA    CV_BGR2BGRA
-#define  CV_BGRA2BGR    1
-#define  CV_RGBA2RGB    CV_BGRA2BGR
-#define  CV_BGR2RGBA    2
-#define  CV_RGB2BGRA    CV_BGR2RGBA
-#define  CV_RGBA2BGR    3
-#define  CV_BGRA2RGB    CV_RGBA2BGR
-#define  CV_BGR2RGB     4
-#define  CV_RGB2BGR     CV_BGR2RGB
-#define  CV_BGRA2RGBA   5
-#define  CV_RGBA2BGRA   CV_BGRA2RGBA
-#define  CV_BGR2GRAY    6
-#define  CV_RGB2GRAY    7
-#define  CV_GRAY2BGR    8
-#define  CV_GRAY2RGB    CV_GRAY2BGR
-#define  CV_GRAY2BGRA   9
-#define  CV_GRAY2RGBA   CV_GRAY2BGRA
-#define  CV_BGRA2GRAY   10
-#define  CV_RGBA2GRAY   11
-#define  CV_BGR2BGR565  12
-#define  CV_RGB2BGR565  13
-#define  CV_BGR5652BGR  14
-#define  CV_BGR5652RGB  15
-#define  CV_BGRA2BGR565 16
-#define  CV_RGBA2BGR565 17
-#define  CV_BGR5652BGRA 18
-#define  CV_BGR5652RGBA 19
-#define  CV_GRAY2BGR565 20
-#define  CV_BGR5652GRAY 21
-#define  CV_BGR2BGR555  22
-#define  CV_RGB2BGR555  23
-#define  CV_BGR5552BGR  24
-#define  CV_BGR5552RGB  25
-#define  CV_BGRA2BGR555 26
-#define  CV_RGBA2BGR555 27
-#define  CV_BGR5552BGRA 28
-#define  CV_BGR5552RGBA 29
-#define  CV_GRAY2BGR555 30
-#define  CV_BGR5552GRAY 31
-#define  CV_BGR2XYZ     32
-#define  CV_RGB2XYZ     33
-#define  CV_XYZ2BGR     34
-#define  CV_XYZ2RGB     35
-#define  CV_BGR2YCrCb   36
-#define  CV_RGB2YCrCb   37
-#define  CV_YCrCb2BGR   38
-#define  CV_YCrCb2RGB   39
-#define  CV_BGR2HSV     40
-#define  CV_RGB2HSV     41
-#define  CV_BGR2Lab     44
-#define  CV_RGB2Lab     45
-#define  CV_BayerBG2BGR 46
-#define  CV_BayerGB2BGR 47
-#define  CV_BayerRG2BGR 48
-#define  CV_BayerGR2BGR 49
-#define  CV_BayerBG2RGB CV_BayerRG2BGR
-#define  CV_BayerGB2RGB CV_BayerGR2BGR
-#define  CV_BayerRG2RGB CV_BayerBG2BGR
-#define  CV_BayerGR2RGB CV_BayerGB2BGR
-#define  CV_BayerBG2BGR_VNG 62
-#define  CV_BayerGB2BGR_VNG 63
-#define  CV_BayerRG2BGR_VNG 64
-#define  CV_BayerGR2BGR_VNG 65
-#define  CV_BGR2Luv     50
-#define  CV_RGB2Luv     51
-#define  CV_BGR2HLS     52
-#define  CV_RGB2HLS     53
-#define  CV_HSV2BGR     54
-#define  CV_HSV2RGB     55
-#define  CV_Lab2BGR     56
-#define  CV_Lab2RGB     57
-#define  CV_Luv2BGR     58
-#define  CV_Luv2RGB     59
-#define  CV_HLS2BGR     60
-#define  CV_HLS2RGB     61
-#define  CV_COLORCVT_MAX  100
-#define  CV_INTER_NN        0
-#define  CV_INTER_LINEAR    1
-#define  CV_INTER_CUBIC     2
-#define  CV_INTER_AREA      3
-#define  CV_WARP_FILL_OUTLIERS 8
-#define  CV_WARP_INVERSE_MAP  16
-#define  CV_SHAPE_RECT      0
-#define  CV_SHAPE_CROSS     1
-#define  CV_SHAPE_ELLIPSE   2
-#define  CV_SHAPE_CUSTOM    100
-#define CV_MOP_OPEN         2
-#define CV_MOP_CLOSE        3
-#define CV_MOP_GRADIENT     4
-#define CV_MOP_TOPHAT       5
-#define CV_MOP_BLACKHAT     6
-#define  CV_TM_SQDIFF        0
-#define  CV_TM_SQDIFF_NORMED 1
-#define  CV_TM_CCORR         2
-#define  CV_TM_CCORR_NORMED  3
-#define  CV_TM_CCOEFF        4
-#define  CV_TM_CCOEFF_NORMED 5
-#define  CV_LKFLOW_PYR_A_READY       1
-#define  CV_LKFLOW_PYR_B_READY       2
-#define  CV_LKFLOW_INITIAL_GUESSES   4
-#define  CV_LKFLOW_GET_MIN_EIGENVALS 8
-#define CV_POLY_APPROX_DP 0
-#define CV_CONTOURS_MATCH_I1  1
-#define CV_CONTOURS_MATCH_I2  2
-#define CV_CONTOURS_MATCH_I3  3
-#define CV_CLOCKWISE         1
-#define CV_COUNTER_CLOCKWISE 2
-#define CV_COMP_CORREL        0
-#define CV_COMP_CHISQR        1
-#define CV_COMP_INTERSECT     2
-#define CV_COMP_BHATTACHARYYA 3
-#define CV_DIST_MASK_3   3
-#define CV_DIST_MASK_5   5
-#define CV_DIST_MASK_PRECISE 0
-#define CV_THRESH_BINARY      0  /* value = value > threshold ? max_value : 0       */
-#define CV_THRESH_BINARY_INV  1  /* value = value > threshold ? 0 : max_value       */
-#define CV_THRESH_TRUNC       2  /* value = value > threshold ? threshold : value   */
-#define CV_THRESH_TOZERO      3  /* value = value > threshold ? value : 0           */
-#define CV_THRESH_TOZERO_INV  4  /* value = value > threshold ? 0 : value           */
-#define CV_THRESH_MASK        7
-#define CV_THRESH_OTSU        8  /* use Otsu algorithm to choose the optimal threshold value;
-#define CV_ADAPTIVE_THRESH_MEAN_C  0
-#define CV_ADAPTIVE_THRESH_GAUSSIAN_C  1
-#define CV_FLOODFILL_FIXED_RANGE (1 << 16)
-#define CV_FLOODFILL_MASK_ONLY   (1 << 17)
-#define CV_CANNY_L2_GRADIENT  (1 << 31)
-#define CV_HOUGH_STANDARD 0
-#define CV_HOUGH_PROBABILISTIC 1
-#define CV_HOUGH_MULTI_SCALE 2
-#define CV_HOUGH_GRADIENT 3
-#define CV_HAAR_DO_CANNY_PRUNING    1
-#define CV_HAAR_SCALE_IMAGE         2
-#define CV_HAAR_FIND_BIGGEST_OBJECT 4 
-#define CV_HAAR_DO_ROUGH_SEARCH     8
-#define CV_LMEDS 4
-#define CV_RANSAC 8
-#define CV_CALIB_CB_ADAPTIVE_THRESH  1
-#define CV_CALIB_CB_NORMALIZE_IMAGE  2
-#define CV_CALIB_CB_FILTER_QUADS     4 
-#define CV_CALIB_USE_INTRINSIC_GUESS  1
-#define CV_CALIB_FIX_ASPECT_RATIO     2
-#define CV_CALIB_FIX_PRINCIPAL_POINT  4
-#define CV_CALIB_ZERO_TANGENT_DIST    8
-#define CV_CALIB_FIX_FOCAL_LENGTH 16
-#define CV_CALIB_FIX_K1  32
-#define CV_CALIB_FIX_K2  64
-#define CV_CALIB_FIX_K3  128
-#define CV_CALIB_FIX_INTRINSIC  256
-#define CV_CALIB_SAME_FOCAL_LENGTH 512
-#define CV_CALIB_ZERO_DISPARITY 1024
-#define CV_FM_7POINT 1
-#define CV_FM_8POINT 2
-#define CV_FM_LMEDS_ONLY  CV_LMEDS
-#define CV_FM_RANSAC_ONLY CV_RANSAC
-#define CV_FM_LMEDS CV_LMEDS
-#define CV_FM_RANSAC CV_RANSAC
-#define CV_STEREO_BM_NORMALIZED_RESPONSE  0
-#define CV_STEREO_BM_BASIC 0
-#define CV_STEREO_BM_FISH_EYE 1
-#define CV_STEREO_BM_NARROW 2
-#define CV_STEREO_GC_OCCLUDED  SHRT_MAX
-#define CV_AUTOSTEP  0x7fffffff
-#define CV_MAX_ARR 10
-#define CV_NO_DEPTH_CHECK     1
-#define CV_NO_CN_CHECK        2
-#define CV_NO_SIZE_CHECK      4
-#define CV_CMP_EQ   0
-#define CV_CMP_GT   1
-#define CV_CMP_GE   2
-#define CV_CMP_LT   3
-#define CV_CMP_LE   4
-#define CV_CMP_NE   5
-#define  CV_CHECK_RANGE    1
-#define  CV_CHECK_QUIET    2
-#define CV_RAND_UNI      0
-#define CV_RAND_NORMAL   1
-#define CV_SORT_EVERY_ROW 0
-#define CV_SORT_EVERY_COLUMN 1
-#define CV_SORT_ASCENDING 0
-#define CV_SORT_DESCENDING 16
-#define CV_GEMM_A_T 1
-#define CV_GEMM_B_T 2
-#define CV_GEMM_C_T 4
-#define CV_SVD_MODIFY_A   1
-#define CV_SVD_U_T        2
-#define CV_SVD_V_T        4
-#define CV_LU  0
-#define CV_SVD 1
-#define CV_SVD_SYM 2
-#define CV_CHOLESKY 3
-#define CV_QR  4
-#define CV_NORMAL 16
-#define CV_COVAR_SCRAMBLED 0
-#define CV_COVAR_NORMAL    1
-#define CV_COVAR_USE_AVG   2
-#define CV_COVAR_SCALE     4
-#define CV_COVAR_ROWS      8
-#define CV_COVAR_COLS     16
-#define CV_PCA_DATA_AS_ROW 0
-#define CV_PCA_DATA_AS_COL 1
-#define CV_PCA_USE_AVG 2
-#define CV_C            1
-#define CV_L1           2
-#define CV_L2           4
-#define CV_NORM_MASK    7
-#define CV_RELATIVE     8
-#define CV_DIFF         16
-#define CV_MINMAX       32
-#define CV_DIFF_C       (CV_DIFF | CV_C)
-#define CV_DIFF_L1      (CV_DIFF | CV_L1)
-#define CV_DIFF_L2      (CV_DIFF | CV_L2)
-#define CV_RELATIVE_C   (CV_RELATIVE | CV_C)
-#define CV_RELATIVE_L1  (CV_RELATIVE | CV_L1)
-#define CV_RELATIVE_L2  (CV_RELATIVE | CV_L2)
-#define CV_REDUCE_SUM 0
-#define CV_REDUCE_AVG 1
-#define CV_REDUCE_MAX 2
-#define CV_REDUCE_MIN 3
-#define CV_DXT_FORWARD  0
-#define CV_DXT_INVERSE  1
-#define CV_DXT_SCALE    2 /* divide result by size of array */
-#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE)
-#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE
-#define CV_DXT_ROWS     4 /* transform each row individually */
-#define CV_DXT_MUL_CONJ 8 /* conjugate the second argument of cvMulSpectrums */
-#define CV_FRONT 1
-#define CV_BACK 0
-#define  CV_GRAPH_VERTEX        1
-#define  CV_GRAPH_TREE_EDGE     2
-#define  CV_GRAPH_BACK_EDGE     4
-#define  CV_GRAPH_FORWARD_EDGE  8
-#define  CV_GRAPH_CROSS_EDGE    16
-#define  CV_GRAPH_ANY_EDGE      30
-#define  CV_GRAPH_NEW_TREE      32
-#define  CV_GRAPH_BACKTRACKING  64
-#define  CV_GRAPH_OVER          -1
-#define  CV_GRAPH_ALL_ITEMS    -1
-#define  CV_GRAPH_ITEM_VISITED_FLAG  (1 << 30)
-#define  CV_GRAPH_SEARCH_TREE_NODE_FLAG   (1 << 29)
-#define  CV_GRAPH_FORWARD_EDGE_FLAG       (1 << 28)
-#define CV_FILLED -1
-#define CV_AA 16
-#define CV_FONT_HERSHEY_SIMPLEX         0
-#define CV_FONT_HERSHEY_PLAIN           1
-#define CV_FONT_HERSHEY_DUPLEX          2
-#define CV_FONT_HERSHEY_COMPLEX         3
-#define CV_FONT_HERSHEY_TRIPLEX         4
-#define CV_FONT_HERSHEY_COMPLEX_SMALL   5
-#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX  6
-#define CV_FONT_HERSHEY_SCRIPT_COMPLEX  7
-#define CV_FONT_ITALIC                 16
-#define CV_FONT_VECTOR0    CV_FONT_HERSHEY_SIMPLEX
-#define CV_KMEANS_USE_INITIAL_LABELS    1
-#define CV_ErrModeLeaf     0   /* Print error and exit program */
-#define CV_ErrModeParent   1   /* Print error and continue */
-#define CV_ErrModeSilent   2   /* Don't print and continue */
-#define CV_RETR_EXTERNAL 0
-#define CV_RETR_LIST     1
-#define CV_RETR_CCOMP    2
-#define CV_RETR_TREE     3
-#define CV_CHAIN_CODE               0
-#define CV_CHAIN_APPROX_NONE        1
-#define CV_CHAIN_APPROX_SIMPLE      2
-#define CV_CHAIN_APPROX_TC89_L1     3
-#define CV_CHAIN_APPROX_TC89_KCOS   4
-#define CV_LINK_RUNS                5
-#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30)
-#define CV_DIST_USER    -1  /* User defined distance */
-#define CV_DIST_L1      1   /* distance = |x1-x2| + |y1-y2| */
-#define CV_DIST_L2      2   /* the simple euclidean distance */
-#define CV_DIST_C       3   /* distance = max(|x1-x2|,|y1-y2|) */
-#define CV_DIST_L12     4   /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */
-#define CV_DIST_FAIR    5   /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */
-#define CV_DIST_WELSCH  6   /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */
-#define CV_DIST_HUBER   7   /* distance = |x|<c ? x^2/2 : c(|x|-c/2), c=1.345 */
-#define CV_HAAR_MAGIC_VAL    0x42500000
-#define CV_HAAR_FEATURE_MAX  3
-#define CV_TERMCRIT_ITER    1
-#define CV_TERMCRIT_NUMBER  CV_TERMCRIT_ITER
-#define CV_TERMCRIT_EPS     2
-#define CV_EVENT_MOUSEMOVE      0
-#define CV_EVENT_LBUTTONDOWN    1
-#define CV_EVENT_RBUTTONDOWN    2
-#define CV_EVENT_MBUTTONDOWN    3
-#define CV_EVENT_LBUTTONUP      4
-#define CV_EVENT_RBUTTONUP      5
-#define CV_EVENT_MBUTTONUP      6
-#define CV_EVENT_LBUTTONDBLCLK  7
-#define CV_EVENT_RBUTTONDBLCLK  8
-#define CV_EVENT_MBUTTONDBLCLK  9
-#define CV_EVENT_FLAG_LBUTTON   1
-#define CV_EVENT_FLAG_RBUTTON   2
-#define CV_EVENT_FLAG_MBUTTON   4
-#define CV_EVENT_FLAG_CTRLKEY   8
-#define CV_EVENT_FLAG_SHIFTKEY  16
-#define CV_EVENT_FLAG_ALTKEY    32
-#define CV_MAX_DIM            32
-#define CV_CAP_PROP_POS_MSEC       0
-#define CV_CAP_PROP_POS_FRAMES     1
-#define CV_CAP_PROP_POS_AVI_RATIO  2
-#define CV_CAP_PROP_FRAME_WIDTH    3
-#define CV_CAP_PROP_FRAME_HEIGHT   4
-#define CV_CAP_PROP_FPS            5
-#define CV_CAP_PROP_FOURCC         6
-#define CV_CAP_PROP_FRAME_COUNT    7
-#define CV_CAP_PROP_FORMAT         8
-#define CV_CAP_PROP_MODE           9
-#define CV_CAP_PROP_BRIGHTNESS    10
-#define CV_CAP_PROP_CONTRAST      11
-#define CV_CAP_PROP_SATURATION    12
-#define CV_CAP_PROP_HUE           13
-#define CV_CAP_PROP_GAIN          14
-#define CV_CAP_PROP_EXPOSURE      15
-#define CV_CAP_PROP_CONVERT_RGB   16
-#define CV_CAP_PROP_RECTIFICATION 18
-#define CV_CN_SHIFT   3
-#define CV_IMWRITE_JPEG_QUALITY 1
-#define CV_IMWRITE_PNG_COMPRESSION 16
-#define CV_IMWRITE_PXM_BINARY 32
-#define IPL_ORIGIN_TL 0
-#define IPL_ORIGIN_BL 1
-#define CV_GAUSSIAN_5x5
-#define CV_CN_MAX
-#define CV_WINDOW_AUTOSIZE  1
-#define CV_WINDOW_NORMAL	 	 0
-#define CV_WINDOW_FULLSCREEN	 1
-#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE
-#define CV_CVTIMG_FLIP  1
-#define CV_CVTIMG_SWAP_RB 2
diff --git a/modules/python/src/gen.py b/modules/python/src/gen.py
deleted file mode 100644
index 1080a58cb..000000000
--- a/modules/python/src/gen.py
+++ /dev/null
@@ -1,631 +0,0 @@
-import sys
-from string import Template
-
-class argument:
-  def __init__(self, fields):
-    self.ty = fields[0]
-    self.nm = fields[1]
-    self.flags = ""
-    self.init = None
-
-    if len(fields) > 2:
-      if fields[2][0] == '/':
-        self.flags = fields[2][1:].split(",")
-      else:
-        self.init = fields[2]
-
-api = []
-for l in open("%s/api" % sys.argv[1]):
-  if l[0] == '#':
-    continue
-  l = l.rstrip()
-  if (not l.startswith(' ')) and ('/' in l):
-    (l, flags) = l.split('/')
-  else:
-    flags = ""
-  f = l.split()
-  if len(f) != 0:
-    if l[0] != ' ':
-      if len(f) > 1:
-        ty = f[1]
-      else:
-        ty = None
-      api.append((f[0], [], ty, flags))
-    else:
-      api[-1][1].append(argument(f))
-
-# Validation: check that any optional arguments are last
-had_error = False
-for (f, args, ty, flags) in api:
-    if f == 'PolarToCart':
-        print f, [(a.init != None) for a in args]
-    has_init = [(a.init != None) for a in args if not 'O' in a.flags]
-    if True in has_init and not all(has_init[has_init.index(True):]):
-        print 'Error in definition for "%s", optional arguments must be last' % f
-        had_error = True
-
-if had_error:
-    sys.exit(1)
-
-def cname(n):
-  if n.startswith("CV"):
-    return '_' + n
-  elif n[0].isdigit():
-    return '_' + n
-  else:
-    return n
-
-# RHS is how the aggregate gets expanded in the C call
-aggregate = {
-  'pts_npts_contours' :  '!.pts,!.npts,!.contours',
-  'cvarr_count' :        '!.cvarr,!.count',
-  'cvarr_plane_count' :  '!.cvarr,!.count',
-  'floats' :             '!.f',
-  'ints' :               '!.i',
-  'ints0' :              '!.i',
-  'CvPoints' :           '!.p,!.count',
-  'CvPoint2D32fs' :      '!.p,!.count',
-  'CvPoint3D32fs' :      '!.p,!.count',
-  'cvarrseq' :           '!.seq',
-  'CvArrs' :             '!.ims',
-  'IplImages' :          '!.ims',
-  'intpair' :            '!.pairs,!.count',
-  'cvpoint2d32f_count' : '!.points,&!.count'
-}
-conversion_types = [
-'char',
-'CvArr',
-'CvArrSeq',
-'CvBox2D', # '((ff)(ff)f)',
-'CvBox2D*',
-'CvCapture*',
-'CvStereoBMState*',
-'CvStereoGCState*',
-'CvKalman*',
-'CvVideoWriter*',
-'CvContourTree*',
-'CvFont',
-'CvFont*',
-'CvHaarClassifierCascade*',
-'CvHistogram',
-'CvMat',
-'CvMatND',
-'CvMemStorage',
-'CvMoments',
-'CvMoments*',
-'CvNextEdgeType',
-'CvPoint',
-'CvPoint*',
-'CvPoint2D32f', # '(ff)',
-'CvPoint2D32f*',
-'CvPoint3D32f*',
-'CvPoint2D64f',
-'CvPOSITObject*',
-'CvRect',
-'CvRect*',
-'CvRNG*',
-'CvScalar',
-'CvSeq',
-'CvSeqOfCvConvexityDefect',
-'CvSize',
-'CvSlice',
-'CvStarDetectorParams',
-'CvSubdiv2D*',
-'CvSubdiv2DEdge',
-'CvTermCriteria',
-'generic',
-'IplConvKernel*',
-'IplImage',
-'PyObject*',
-'PyCallableObject*'
-]
-
-def safename(s):
-  return s.replace('*', 'PTR').replace('[', '_').replace(']', '_')
-
-def has_optional(al):
-    """ return true if any argument is optional """
-    return any([a.init for a in al])
-
-def gen(name, args, ty, flags):
-  yield ""
-  if has_optional(args):
-      yield "static PyObject *pycv%s(PyObject *self, PyObject *args, PyObject *kw)" % cname(name) 
-  else:
-      yield "static PyObject *pycv%s(PyObject *self, PyObject *args)" % cname(name)
-  if 'doconly' in flags:
-    yield ";"
-  else:
-    yield "{"
-
-    destinations = []
-    for a in args:
-      remap = {
-       'CvArr' : 'CvArr*',
-       'CvMat' : 'CvMat*',
-       'CvMatND' : 'CvMatND*',
-       'IplImage' : 'IplImage*',
-       'CvMemStorage' : 'CvMemStorage*',
-       'CvHistogram':'CvHistogram*',
-       'CvSeq':'CvSeq*',
-       'CvHaarClassifierCascade' : 'CvHaarClassifierCascade*'
-      }
-      ctype = remap.get(a.ty, a.ty)
-      if a.init:
-        init = " = %s" % a.init
-      else:
-        init = ''
-      yield "  %s %s%s;" % (ctype, a.nm, init)
-      if 'O' in a.flags:
-        continue
-      if a.ty in (conversion_types + aggregate.keys()):
-        yield '  PyObject *pyobj_%s = NULL;' % (a.nm)
-        destinations.append('&pyobj_%s' % (a.nm))
-      elif a.ty in [ 'CvPoint2D32f' ]:
-        destinations.append('&%s.x, &%s.y' % (a.nm, a.nm))
-      elif a.ty in [ 'CvTermCriteria' ]:
-        destinations.append('&%s.type, &%s.max_iter, &%s.epsilon' % ((a.nm,)*3))
-      elif a.ty in [ 'CvSURFParams' ]:
-        destinations.append('&%s.extended, &%s.hessianThreshold, &%s.nOctaves, &%s.nOctaveLayers' % ((a.nm,)*4))
-      elif a.nm in [ 'CvBox2D' ]:
-        s = ", ".join([('&' + a.nm +'.' + fld) for fld in [ 'center.x', 'center.y', 'size.width', 'size.height', 'angle' ] ])
-        destinations.append(s)
-      else:
-        destinations.append('&%s' % a.nm)
-    fmap = {
-      'CvSURFParams' : '(idii)',
-      'double' : 'd',
-      'float' : 'f',
-      'int' : 'i',
-      'int64' : 'L',
-      'char*' : 's',
-    }
-    for k in (conversion_types + aggregate.keys()):
-      fmap[k] = 'O'
-    in_args = [ a for a in args if not 'O' in a.flags ]
-    fmt0 = "".join([ fmap[a.ty] for a in in_args if not a.init])
-    fmt1 = "".join([ fmap[a.ty] for a in in_args if a.init])
-        
-    yield ''
-    if len(fmt0 + fmt1) > 0:
-      if len(fmt1) > 0:
-        yield '  const char *keywords[] = { %s };' % (", ".join([ '"%s"' % arg.nm for arg in args if not 'O' in arg.flags ] + ['NULL']))
-        yield '  if (!PyArg_ParseTupleAndKeywords(args, kw, "%s|%s", %s))' % (fmt0, fmt1, ", ".join(['(char**)keywords'] + destinations))
-        if '(' in (fmt0 + fmt1):
-          print "Tuple with kwargs is not allowed, function", name
-          sys.exit(1)
-      else:
-        yield '  if (!PyArg_ParseTuple(args, "%s", %s))' % (fmt0, ", ".join(destinations))
-      yield '    return NULL;'
-
-    # Do the conversions:
-    for a in args:
-      joinwith = [f[2:] for f in a.flags if f.startswith("J:")]
-      if len(joinwith) > 0:
-        yield 'preShareData(%s, &%s);' % (joinwith[0], a.nm)
-      if 'O' in a.flags:
-        continue
-      if a.ty in (conversion_types + aggregate.keys()):
-        if a.init:
-          pred = '(pyobj_%s != NULL) && ' % a.nm
-        else:
-          pred = ''
-        yield '  if (%s!convert_to_%s(pyobj_%s, &%s, "%s")) return NULL;' % (pred, safename(a.ty), a.nm, a.nm, a.nm)
-
-    yield '#ifdef CVPY_VALIDATE_%s' % name
-    yield 'CVPY_VALIDATE_%s();' % name
-    yield '#endif'
-
-    def invokename(a):
-      if 'K' in a.flags:
-        prefix = "(const CvArr **)"
-      elif 'O' in a.flags and not 'A' in a.flags:
-        prefix = "&"
-      else:
-        prefix = ""
-      if a.ty in aggregate:
-        return prefix + aggregate[a.ty].replace('!', a.nm)
-      else:
-        return prefix + a.nm
-
-    def funcname(s):
-      # The name by which the function is called, in C
-      if s.startswith("CV"):
-        return s
-      else:
-        return "cv" + s
-    tocall = '%s(%s)' % (funcname(name), ", ".join(invokename(a) for a in args))
-    if 'stub' in flags:
-      yield '  return stub%s(%s);' % (name, ", ".join(invokename(a) for a in args))
-    elif ty == None:
-      yield '  ERRWRAP(%s);' % tocall
-      yield '  Py_RETURN_NONE;'
-    else:
-      Rtypes = [
-        'int',
-        'int64',
-        'double',
-        'CvCapture*',
-        'CvVideoWriter*',
-        'CvPOSITObject*',
-        'CvScalar',
-        'CvSize',
-        'CvRect',
-        'CvSeq*',
-        'CvBox2D',
-        'CvSeqOfCvAvgComp*',
-        'CvSeqOfCvConvexityDefect*',
-        'CvSeqOfCvStarKeypoint*',
-        'CvSeqOfCvSURFPoint*',
-        'CvSeqOfCvSURFDescriptor*',
-        'CvContourTree*',
-        'IplConvKernel*',
-        'IplImage*',
-        'CvMat*',
-        'constCvMat*',
-        'ROCvMat*',
-        'CvMatND*',
-        'CvPoint2D32f_4',
-        'CvRNG',
-        'CvSubdiv2D*',
-        'CvSubdiv2DPoint*',
-        'CvSubdiv2DEdge',
-        'ROIplImage*',
-        'CvStereoBMState*',
-        'CvStereoGCState*',
-        'CvKalman*',
-        'float',
-        'generic',
-        'unsigned' ]
-
-      if ty in Rtypes:
-        yield '  %s r;' % (ty)
-        yield '  ERRWRAP(r = %s);' % (tocall)
-        yield '  return FROM_%s(r);' % safename(ty)
-      else:
-        all_returns = ty.split(",")
-        return_value_from_call = len(set(Rtypes) & set(all_returns)) != 0
-        if return_value_from_call:
-          yield '  %s r;' % list(set(Rtypes) & set(all_returns))[0]
-          yield '  ERRWRAP(r = %s);' % (tocall)
-        else:
-          yield '  ERRWRAP(%s);' % (tocall)
-        typed = dict([ (a.nm,a.ty) for a in args])
-        for i in range(len(all_returns)):
-          if all_returns[i] in Rtypes:
-            typed['r'] = all_returns[i]
-            all_returns[i] = "r"
-        if len(all_returns) == 1:
-          af = dict([ (a.nm,a.flags) for a in args])
-          joinwith = [f[2:] for f in af.get(all_returns[0], []) if f.startswith("J:")]
-          if len(joinwith) > 0:
-              yield '  return shareData(pyobj_%s, %s, %s);' % (joinwith[0], joinwith[0], all_returns[0])
-          else:
-              yield '  return FROM_%s(%s);' % (safename(typed[all_returns[0]]), all_returns[0])
-        else:
-          yield '  return Py_BuildValue("%s", %s);' % ("N" * len(all_returns), ", ".join(["FROM_%s(%s)" % (safename(typed[n]), n) for n in all_returns]))
-
-    yield '}'
-
-gen_c = [ open("generated%d.i" % i, "w") for i in range(5) ]
-
-print "Generated %d functions" % len(api)
-for nm,args,ty,flags in sorted(api):
-
-  # Figure out docstring into ds_*
-  ds_args = []
-  mandatory = [a.nm for a in args if not ('O' in a.flags) and not a.init]
-  optional = [a.nm for a in args if not ('O' in a.flags) and a.init]
-  ds_args = ", ".join(mandatory)
-  def o2s(o):
-    if o == []:
-        return ""
-    else:
-        return ' [, %s%s]' % (o[0], o2s(o[1:]))
-  ds_args += o2s(optional)
-
-  ds = "%s(%s) -> %s" % (nm, ds_args, str(ty))
-  print ds
-
-  if has_optional(args):
-      entry = '{"%%s", (PyCFunction)pycv%s, METH_KEYWORDS, "%s"},' % (cname(nm), ds)
-  else:
-      entry = '{"%%s", pycv%s, METH_VARARGS, "%s"},' % (cname(nm), ds)
-  print >>gen_c[1], entry % (nm)
-  if nm.startswith('CV_'):
-    print >>gen_c[1], entry % (nm[3:])
-  for l in gen(nm,args,ty,flags):
-    print >>gen_c[0], l
-
-for l in open("%s/defs" % sys.argv[1]):
-  print >>gen_c[2], "PUBLISH(%s);" % l.split()[1]
-
-########################################################################
-# Generated objects.
-########################################################################
-
-# gen_c[3] is the code, gen_c[4] initializers
-
-gensimple = Template("""
-/*
-  ${cvtype} is the OpenCV C struct
-  ${ourname}_t is the Python object
-*/
-
-struct ${ourname}_t {
-  PyObject_HEAD
-  ${cvtype} v;
-};
-
-static PyObject *${ourname}_repr(PyObject *self)
-{
-  ${ourname}_t *p = (${ourname}_t*)self;
-  char str[1000];
-  sprintf(str, "<${ourname} %p>", p);
-  return PyString_FromString(str);
-}
-
-${getset_funcs}
-
-static PyGetSetDef ${ourname}_getseters[] = {
-
-  ${getset_inits}
-  {NULL}  /* Sentinel */
-};
-
-static PyTypeObject ${ourname}_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".${ourname}",              /*name*/
-  sizeof(${ourname}_t),                /*basicsize*/
-};
-
-static void ${ourname}_specials(void)
-{
-  ${ourname}_Type.tp_repr = ${ourname}_repr;
-  ${ourname}_Type.tp_getset = ${ourname}_getseters;
-}
-
-static PyObject *FROM_${cvtype}(${cvtype} r)
-{
-  ${ourname}_t *m = PyObject_NEW(${ourname}_t, &${ourname}_Type);
-  m->v = r;
-  return (PyObject*)m;
-}
-
-static int convert_to_${cvtype}PTR(PyObject *o, ${cvtype}** dst, const char *name = "no_name")
-{
-  ${allownull}
-  if (PyType_IsSubtype(o->ob_type, &${ourname}_Type)) {
-    *dst = &(((${ourname}_t*)o)->v);
-    return 1;
-  } else {
-    (*dst) = (${cvtype}*)NULL;
-    return failmsg("Expected ${cvtype} for argument '%s'", name);
-  }
-}
-
-""")
-
-genptr = Template("""
-/*
-  ${cvtype} is the OpenCV C struct
-  ${ourname}_t is the Python object
-*/
-
-struct ${ourname}_t {
-  PyObject_HEAD
-  ${cvtype} *v;
-};
-
-static void ${ourname}_dealloc(PyObject *self)
-{
-  ${ourname}_t *p = (${ourname}_t*)self;
-  cvRelease${ourname}(&p->v);
-  PyObject_Del(self);
-}
-
-static PyObject *${ourname}_repr(PyObject *self)
-{
-  ${ourname}_t *p = (${ourname}_t*)self;
-  char str[1000];
-  sprintf(str, "<${ourname} %p>", p);
-  return PyString_FromString(str);
-}
-
-${getset_funcs}
-
-static PyGetSetDef ${ourname}_getseters[] = {
-
-  ${getset_inits}
-  {NULL}  /* Sentinel */
-};
-
-static PyTypeObject ${ourname}_Type = {
-  PyObject_HEAD_INIT(&PyType_Type)
-  0,                                      /*size*/
-  MODULESTR".${ourname}",              /*name*/
-  sizeof(${ourname}_t),                /*basicsize*/
-};
-
-static void ${ourname}_specials(void)
-{
-  ${ourname}_Type.tp_dealloc = ${ourname}_dealloc;
-  ${ourname}_Type.tp_repr = ${ourname}_repr;
-  ${ourname}_Type.tp_getset = ${ourname}_getseters;
-}
-
-static PyObject *FROM_${cvtype}PTR(${cvtype} *r)
-{
-  ${ourname}_t *m = PyObject_NEW(${ourname}_t, &${ourname}_Type);
-  m->v = r;
-  return (PyObject*)m;
-}
-
-static int convert_to_${cvtype}PTR(PyObject *o, ${cvtype}** dst, const char *name = "no_name")
-{
-  ${allownull}
-  if (PyType_IsSubtype(o->ob_type, &${ourname}_Type)) {
-    *dst = ((${ourname}_t*)o)->v;
-    return 1;
-  } else {
-    (*dst) = (${cvtype}*)NULL;
-    return failmsg("Expected ${cvtype} for argument '%s'", name);
-  }
-}
-
-""")
-
-getset_func_template = Template("""
-static PyObject *${ourname}_get_${member}(${ourname}_t *p, void *closure)
-{
-  return ${rconverter}(p->v${accessor}${member});
-}
-
-static int ${ourname}_set_${member}(${ourname}_t *p, PyObject *value, void *closure)
-{
-  if (value == NULL) {
-    PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute");
-    return -1;
-  }
-
-  if (! ${checker}(value)) {
-    PyErr_SetString(PyExc_TypeError, "The ${member} attribute value must be a ${typename}");
-    return -1;
-  }
-
-  p->v${accessor}${member} = ${converter}(value);
-  return 0;
-}
-
-""")
-
-getset_init_template = Template("""
-  {(char*)"${member}", (getter)${ourname}_get_${member}, (setter)${ourname}_set_${member}, (char*)"${member}", NULL},
-""")
-
-objects = [
-    ( 'IplConvKernel', ['allownull'], {
-        "nCols" : 'i',
-        "nRows" : 'i',
-        "anchorX" : 'i',
-        "anchorY" : 'i',
-    }),
-    ( 'CvCapture', [], {}),
-    ( 'CvHaarClassifierCascade', [], {}),
-    ( 'CvPOSITObject', [], {}),
-    ( 'CvVideoWriter', [], {}),
-    ( 'CvStereoBMState', [], {
-        "preFilterType" : 'i',
-        "preFilterSize" : 'i',
-        "preFilterCap" : 'i',
-        "SADWindowSize" : 'i',
-        "minDisparity" : 'i',
-        "numberOfDisparities" : 'i',
-        "textureThreshold" : 'i',
-        "uniquenessRatio" : 'i',
-        "speckleWindowSize" : 'i',
-        "speckleRange" : 'i',
-    }),
-    ( 'CvStereoGCState', [], {
-        "Ithreshold" : 'i',
-        "interactionRadius" : 'i',
-        "K" : 'f',
-        "lambda" : 'f',
-        "lambda1" : 'f',
-        "lambda2" : 'f',
-        "occlusionCost" : 'i',
-        "minDisparity" : 'i',
-        "numberOfDisparities" : 'i',
-        "maxIters" : 'i',
-    }),
-    ( 'CvKalman', [], {
-        "MP" : 'i',
-        "DP" : 'i',
-        "CP" : 'i',
-        "state_pre" : 'mr',
-        "state_post" : 'mr',
-        "transition_matrix" : 'mr',
-        "control_matrix" : 'mr',
-        "measurement_matrix" : 'mr',
-        "control_matrix" : 'mr',
-        "process_noise_cov" : 'mr',
-        "measurement_noise_cov" : 'mr',
-        "error_cov_pre" : 'mr',
-        "gain" : 'mr',
-        "error_cov_post" : 'mr',
-    }),
-    ( 'CvMoments', ['copy'], {
-        "m00" : 'f',
-        "m10" : 'f',
-        "m01" : 'f',
-        "m20" : 'f',
-        "m11" : 'f',
-        "m02" : 'f',
-        "m30" : 'f',
-        "m21" : 'f',
-        "m12" : 'f',
-        "m03" : 'f',
-        "mu20" : 'f',
-        "mu11" : 'f',
-        "mu02" : 'f',
-        "mu30" : 'f',
-        "mu21" : 'f',
-        "mu12" : 'f',
-        "mu03" : 'f',
-        "inv_sqrt_m00" : 'f',
-    }),
-]
-
-checkers = {
-    'i' : 'PyNumber_Check',
-    'f' : 'PyNumber_Check',
-    'm' : 'is_cvmat',
-    'mr' : 'is_cvmat'
-}
-# Python -> C
-converters = {
-    'i' : 'PyInt_AsLong',
-    'f' : 'PyFloat_AsDouble',
-    'm' : 'PyCvMat_AsCvMat',
-    'mr' : 'PyCvMat_AsCvMat'
-}
-# C -> Python
-rconverters = {
-    'i' : 'PyInt_FromLong',
-    'f' : 'PyFloat_FromDouble',
-    'm' : 'FROM_CvMat',
-    'mr' : 'FROM_ROCvMatPTR'
-}
-# Human-readable type names
-typenames = {
-    'i' : 'integer',
-    'f' : 'float',
-    'm' : 'list of CvMat',
-    'mr' : 'list of CvMat',
-}
-
-for (t, flags, members) in objects:
-    map = {'cvtype' : t,
-           'ourname' : t.replace('Cv', '')}
-    # gsf is all the generated code for the member accessors
-    if 'copy' in flags:
-        a = '.'
-    else:
-        a = '->'
-    gsf = "".join([getset_func_template.substitute(map, accessor = a, member = m, checker = checkers[t], converter = converters[t], rconverter = rconverters[t], typename = typenames[t]) for (m, t) in members.items()])
-    # gsi is the generated code for the initializer for each accessor
-    gsi = "".join([getset_init_template.substitute(map, member = m) for (m, t) in members.items()])
-    # s is the template that pulls everything together
-    if 'allownull' in flags:
-        nullcode = """if (o == Py_None) { *dst = (%s*)NULL; return 1; }""" % map['cvtype']
-    else:
-        nullcode = ""
-    if 'copy' in flags:
-        print >>gen_c[3], gensimple.substitute(map, getset_funcs = gsf, getset_inits = gsi, allownull = nullcode)
-    else:
-        print >>gen_c[3], genptr.substitute(map, getset_funcs = gsf, getset_inits = gsi, allownull = nullcode)
-    print >>gen_c[4], "MKTYPE(%s);" % map['ourname']
-
-for f in gen_c:
-  f.close()
diff --git a/modules/python/src/opencv2x.h b/modules/python/src2/cv2.cpp
similarity index 85%
rename from modules/python/src/opencv2x.h
rename to modules/python/src2/cv2.cpp
index 271118236..b40c65710 100644
--- a/modules/python/src/opencv2x.h
+++ b/modules/python/src2/cv2.cpp
@@ -1,5 +1,12 @@
-#ifndef OPENCV2X_PYTHON_WRAPPERS
-#define OPENCV2X_PYTHON_WRAPPERS
+#include <Python.h>
+
+#if !PYTHON_USE_NUMPY
+#error "The module can only be built if NumPy is available"
+#endif
+
+#define MODULESTR "cv2"
+
+#include "numpy/ndarrayobject.h"
 
 #include "opencv2/core/core.hpp"
 #include "opencv2/imgproc/imgproc.hpp"
@@ -12,6 +19,21 @@
 #include "opencv2/highgui/highgui.hpp"
 #include "opencv_extra_api.hpp"
 
+static PyObject* opencv_error = 0;
+
+static int failmsg(const char *fmt, ...)
+{
+    char str[1000];
+    
+    va_list ap;
+    va_start(ap, fmt);
+    vsnprintf(str, sizeof(str), fmt, ap);
+    va_end(ap);
+    
+    PyErr_SetString(PyExc_TypeError, str);
+    return 0;
+}
+
 #define ERRWRAP2(expr) \
 try \
 { \
@@ -693,4 +715,131 @@ static inline PyObject* pyopencv_from(const CvDTreeNode* node)
     return value == ivalue ? PyInt_FromLong(ivalue) : PyFloat_FromDouble(value);
 }
 
+#define MKTYPE2(NAME) pyopencv_##NAME##_specials(); if (!to_ok(&pyopencv_##NAME##_Type)) return
+
+#include "pyopencv_generated_types.h"
+#include "pyopencv_generated_funcs.h"
+
+static PyMethodDef methods[] = {
+
+#include "pyopencv_generated_func_tab.h"
+
+  {NULL, NULL},
+};
+
+/************************************************************************/
+/* Module init */
+
+static int to_ok(PyTypeObject *to)
+{
+  to->tp_alloc = PyType_GenericAlloc;
+  to->tp_new = PyType_GenericNew;
+  to->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
+  return (PyType_Ready(to) == 0);
+}
+
+extern "C"
+#if defined WIN32 || defined _WIN32
+__declspec(dllexport)
 #endif
+
+void initcv2()
+{
+#if PYTHON_USE_NUMPY
+    import_array();
+#endif
+    
+#if PYTHON_USE_NUMPY
+#include "pyopencv_generated_type_reg.h"
+#endif
+
+  PyObject* m = Py_InitModule(MODULESTR"", methods);
+  PyObject* d = PyModule_GetDict(m);
+
+  PyDict_SetItemString(d, "__version__", PyString_FromString("$Rev: 4557 $"));
+
+  opencv_error = PyErr_NewException((char*)MODULESTR".error", NULL, NULL);
+  PyDict_SetItemString(d, "error", opencv_error);
+
+  // AFAIK the only floating-point constant
+  PyDict_SetItemString(d, "CV_PI", PyFloat_FromDouble(CV_PI));
+
+#define PUBLISH(I) PyDict_SetItemString(d, #I, PyInt_FromLong(I))
+#define PUBLISHU(I) PyDict_SetItemString(d, #I, PyLong_FromUnsignedLong(I))
+#define PUBLISH2(I, value) PyDict_SetItemString(d, #I, PyLong_FromLong(value))
+
+  PUBLISHU(IPL_DEPTH_8U);
+  PUBLISHU(IPL_DEPTH_8S);
+  PUBLISHU(IPL_DEPTH_16U);
+  PUBLISHU(IPL_DEPTH_16S);
+  PUBLISHU(IPL_DEPTH_32S);
+  PUBLISHU(IPL_DEPTH_32F);
+  PUBLISHU(IPL_DEPTH_64F);
+
+  PUBLISH(CV_LOAD_IMAGE_COLOR);
+  PUBLISH(CV_LOAD_IMAGE_GRAYSCALE);
+  PUBLISH(CV_LOAD_IMAGE_UNCHANGED);
+  PUBLISH(CV_HIST_ARRAY);
+  PUBLISH(CV_HIST_SPARSE);
+  PUBLISH(CV_8U);
+  PUBLISH(CV_8UC1);
+  PUBLISH(CV_8UC2);
+  PUBLISH(CV_8UC3);
+  PUBLISH(CV_8UC4);
+  PUBLISH(CV_8S);
+  PUBLISH(CV_8SC1);
+  PUBLISH(CV_8SC2);
+  PUBLISH(CV_8SC3);
+  PUBLISH(CV_8SC4);
+  PUBLISH(CV_16U);
+  PUBLISH(CV_16UC1);
+  PUBLISH(CV_16UC2);
+  PUBLISH(CV_16UC3);
+  PUBLISH(CV_16UC4);
+  PUBLISH(CV_16S);
+  PUBLISH(CV_16SC1);
+  PUBLISH(CV_16SC2);
+  PUBLISH(CV_16SC3);
+  PUBLISH(CV_16SC4);
+  PUBLISH(CV_32S);
+  PUBLISH(CV_32SC1);
+  PUBLISH(CV_32SC2);
+  PUBLISH(CV_32SC3);
+  PUBLISH(CV_32SC4);
+  PUBLISH(CV_32F);
+  PUBLISH(CV_32FC1);
+  PUBLISH(CV_32FC2);
+  PUBLISH(CV_32FC3);
+  PUBLISH(CV_32FC4);
+  PUBLISH(CV_64F);
+  PUBLISH(CV_64FC1);
+  PUBLISH(CV_64FC2);
+  PUBLISH(CV_64FC3);
+  PUBLISH(CV_64FC4);
+  PUBLISH(CV_NEXT_AROUND_ORG);
+  PUBLISH(CV_NEXT_AROUND_DST);
+  PUBLISH(CV_PREV_AROUND_ORG);
+  PUBLISH(CV_PREV_AROUND_DST);
+  PUBLISH(CV_NEXT_AROUND_LEFT);
+  PUBLISH(CV_NEXT_AROUND_RIGHT);
+  PUBLISH(CV_PREV_AROUND_LEFT);
+  PUBLISH(CV_PREV_AROUND_RIGHT);
+
+  PUBLISH(CV_WINDOW_AUTOSIZE);
+
+  PUBLISH(CV_PTLOC_INSIDE);
+  PUBLISH(CV_PTLOC_ON_EDGE);
+  PUBLISH(CV_PTLOC_VERTEX);
+  PUBLISH(CV_PTLOC_OUTSIDE_RECT);
+
+  PUBLISH(GC_BGD);
+  PUBLISH(GC_FGD);
+  PUBLISH(GC_PR_BGD);
+  PUBLISH(GC_PR_FGD);
+  PUBLISH(GC_INIT_WITH_RECT);
+  PUBLISH(GC_INIT_WITH_MASK);
+  PUBLISH(GC_EVAL);
+
+#include "pyopencv_generated_const_reg.h"
+}
+
diff --git a/modules/python/src/gen2.py b/modules/python/src2/gen2.py
similarity index 100%
rename from modules/python/src/gen2.py
rename to modules/python/src2/gen2.py
diff --git a/modules/python/src/hdr_parser.py b/modules/python/src2/hdr_parser.py
similarity index 100%
rename from modules/python/src/hdr_parser.py
rename to modules/python/src2/hdr_parser.py
diff --git a/modules/python/src/opencv_extra_api.hpp b/modules/python/src2/opencv_extra_api.hpp
similarity index 100%
rename from modules/python/src/opencv_extra_api.hpp
rename to modules/python/src2/opencv_extra_api.hpp
diff --git a/modules/python/test/camera_calibration.py b/modules/python/test/camera_calibration.py
new file mode 100644
index 000000000..8952e3cc2
--- /dev/null
+++ b/modules/python/test/camera_calibration.py
@@ -0,0 +1,358 @@
+import sys
+import math
+import time
+import random
+
+import numpy
+import transformations
+import cv
+
+def clamp(a, x, b):
+    return numpy.maximum(a, numpy.minimum(x, b))
+
+def norm(v):
+    mag = numpy.sqrt(sum([e * e for e in v]))
+    return v / mag
+
+class Vec3:
+    def __init__(self, x, y, z):
+        self.v = (x, y, z)
+    def x(self):
+        return self.v[0]
+    def y(self):
+        return self.v[1]
+    def z(self):
+        return self.v[2]
+    def __repr__(self):
+        return "<Vec3 (%s,%s,%s)>" % tuple([repr(c) for c in self.v])
+    def __add__(self, other):
+        return Vec3(*[self.v[i] + other.v[i] for i in range(3)])
+    def __sub__(self, other):
+        return Vec3(*[self.v[i] - other.v[i] for i in range(3)])
+    def __mul__(self, other):
+        if isinstance(other, Vec3):
+            return Vec3(*[self.v[i] * other.v[i] for i in range(3)])
+        else:
+            return Vec3(*[self.v[i] * other for i in range(3)])
+    def mag2(self):
+        return sum([e * e for e in self.v])
+    def __abs__(self):
+        return numpy.sqrt(sum([e * e for e in self.v]))
+    def norm(self):
+        return self * (1.0 / abs(self))
+    def dot(self, other):
+        return sum([self.v[i] * other.v[i] for i in range(3)])
+    def cross(self, other):
+        (ax, ay, az) = self.v
+        (bx, by, bz) = other.v
+        return Vec3(ay * bz - by * az, az * bx - bz * ax, ax * by - bx * ay)
+
+
+class Ray:
+
+    def __init__(self, o, d):
+        self.o = o
+        self.d = d
+
+    def project(self, d):
+        return self.o + self.d * d
+
+class Camera:
+
+    def __init__(self, F):
+        R = Vec3(1., 0., 0.)
+        U = Vec3(0, 1., 0)
+        self.center = Vec3(0, 0, 0)
+        self.pcenter = Vec3(0, 0, F)
+        self.up = U
+        self.right = R
+
+    def genray(self, x, y):
+        """ -1 <= y <= 1 """
+        r = numpy.sqrt(x * x + y * y)
+        if 0:
+            rprime = r + (0.17 * r**2)
+        else:
+            rprime = (10 * numpy.sqrt(17 * r + 25) - 50) / 17
+        print "scale", rprime / r
+        x *= rprime / r
+        y *= rprime / r
+        o = self.center
+        r = (self.pcenter + (self.right * x) + (self.up * y)) - o
+        return Ray(o, r.norm())
+
+class Sphere:
+
+    def __init__(self, center, radius):
+        self.center = center
+        self.radius = radius
+
+    def hit(self, r):
+        # a = mag2(r.d)
+        a = 1.
+        v = r.o - self.center
+        b = 2 * r.d.dot(v)
+        c = self.center.mag2() + r.o.mag2() + -2 * self.center.dot(r.o) - (self.radius ** 2)
+        det = (b * b) - (4 * c)
+        pred = 0 < det
+
+        sq = numpy.sqrt(abs(det))
+        h0 = (-b - sq) / (2)
+        h1 = (-b + sq) / (2)
+
+        h = numpy.minimum(h0, h1)
+
+        pred = pred & (h > 0)
+        normal = (r.project(h) - self.center) * (1.0 / self.radius)
+        return (pred, numpy.where(pred, h, 999999.), normal)
+
+def pt2plane(p, plane):
+    return p.dot(plane) * (1. / abs(plane))
+
+class Plane:
+
+    def __init__(self, p, n, right):
+        self.D = -pt2plane(p, n)
+        self.Pn = n
+        self.right = right
+        self.rightD = -pt2plane(p, right)
+        self.up = n.cross(right)
+        self.upD = -pt2plane(p, self.up)
+
+    def hit(self, r):
+        Vd = self.Pn.dot(r.d)
+        V0 = -(self.Pn.dot(r.o) + self.D)
+        h = V0 / Vd
+        pred = (0 <= h)
+
+        return (pred, numpy.where(pred, h, 999999.), self.Pn)
+
+    def localxy(self, loc):
+        x = (loc.dot(self.right) + self.rightD)
+        y = (loc.dot(self.up) + self.upD)
+        return (x, y)
+
+# lena = numpy.fromstring(cv.LoadImage("../samples/c/lena.jpg", 0).tostring(), numpy.uint8) / 255.0
+
+def texture(xy):
+    x,y = xy
+    xa = numpy.floor(x * 512)
+    ya = numpy.floor(y * 512)
+    a = (512 * ya) + xa
+    safe = (0 <= x) & (0 <= y) & (x < 1) & (y < 1)
+    if 0:
+        a = numpy.where(safe, a, 0).astype(numpy.int)
+        return numpy.where(safe, numpy.take(lena, a), 0.0)
+    else:
+        xi = numpy.floor(x * 11).astype(numpy.int)
+        yi = numpy.floor(y * 11).astype(numpy.int)
+        inside = (1 <= xi) & (xi < 10) & (2 <= yi) & (yi < 9)
+        checker = (xi & 1) ^ (yi & 1)
+        final = numpy.where(inside, checker, 1.0)
+        return numpy.where(safe, final, 0.5)
+
+def under(vv, m):
+    return Vec3(*(numpy.dot(m, vv.v + (1,))[:3]))
+
+class Renderer:
+
+    def __init__(self, w, h, oversample):
+        self.w = w
+        self.h = h
+
+        random.seed(1)
+        x = numpy.arange(self.w*self.h) % self.w
+        y = numpy.floor(numpy.arange(self.w*self.h) / self.w)
+        h2 = h / 2.0
+        w2 = w / 2.0
+        self.r = [ None ] * oversample
+        for o in range(oversample):
+            stoch_x = numpy.random.rand(self.w * self.h)
+            stoch_y = numpy.random.rand(self.w * self.h)
+            nx = (x + stoch_x - 0.5 - w2) / h2
+            ny = (y + stoch_y - 0.5 - h2) / h2
+            self.r[o] = cam.genray(nx, ny)
+
+        self.rnds = [random.random() for i in range(10)]
+
+    def frame(self, i):
+
+        rnds = self.rnds
+        roll = math.sin(i * .01 * rnds[0] + rnds[1])
+        pitch = math.sin(i * .01 * rnds[2] + rnds[3])
+        yaw = math.pi * math.sin(i * .01 * rnds[4] + rnds[5])
+        x = math.sin(i * 0.01 * rnds[6])
+        y = math.sin(i * 0.01 * rnds[7])
+
+        x,y,z = -0.5,0.5,1
+        roll,pitch,yaw = (0,0,0)
+
+        z = 4 + 3 * math.sin(i * 0.1 * rnds[8])
+        print z
+
+        rz = transformations.euler_matrix(roll, pitch, yaw)
+        p = Plane(Vec3(x, y, z), under(Vec3(0,0,-1), rz), under(Vec3(1, 0, 0), rz))
+
+        acc = 0
+        for r in self.r:
+            (pred, h, norm) = p.hit(r)
+            l = numpy.where(pred, texture(p.localxy(r.project(h))), 0.0)
+            acc += l
+        acc *= (1.0 / len(self.r))
+
+        # print "took", time.time() - st
+
+        img = cv.CreateMat(self.h, self.w, cv.CV_8UC1)
+        cv.SetData(img, (clamp(0, acc, 1) * 255).astype(numpy.uint8).tostring(), self.w)
+        return img
+
+#########################################################################
+
+num_x_ints = 8
+num_y_ints = 6
+num_pts = num_x_ints * num_y_ints
+
+def get_corners(mono, refine = False):
+    (ok, corners) = cv.FindChessboardCorners(mono, (num_x_ints, num_y_ints), cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_NORMALIZE_IMAGE)
+    if refine and ok:
+        corners = cv.FindCornerSubPix(mono, corners, (5,5), (-1,-1), ( cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER, 30, 0.1 ))
+    return (ok, corners)
+
+def mk_object_points(nimages, squaresize = 1):
+    opts = cv.CreateMat(nimages * num_pts, 3, cv.CV_32FC1)
+    for i in range(nimages):
+        for j in range(num_pts):
+            opts[i * num_pts + j, 0] = (j / num_x_ints) * squaresize
+            opts[i * num_pts + j, 1] = (j % num_x_ints) * squaresize
+            opts[i * num_pts + j, 2] = 0
+    return opts
+
+def mk_image_points(goodcorners):
+    ipts = cv.CreateMat(len(goodcorners) * num_pts, 2, cv.CV_32FC1)
+    for (i, co) in enumerate(goodcorners):
+        for j in range(num_pts):
+            ipts[i * num_pts + j, 0] = co[j][0]
+            ipts[i * num_pts + j, 1] = co[j][1]
+    return ipts
+
+def mk_point_counts(nimages):
+    npts = cv.CreateMat(nimages, 1, cv.CV_32SC1)
+    for i in range(nimages):
+        npts[i, 0] = num_pts
+    return npts
+
+def cvmat_iterator(cvmat):
+    for i in range(cvmat.rows):
+        for j in range(cvmat.cols):
+            yield cvmat[i,j]
+
+cam = Camera(3.0)
+rend = Renderer(640, 480, 2)
+cv.NamedWindow("snap")
+
+#images = [rend.frame(i) for i in range(0, 2000, 400)]
+images = [rend.frame(i) for i in [1200]]
+
+if 0:
+    for i,img in enumerate(images):
+        cv.SaveImage("final/%06d.png" % i, img)
+
+size = cv.GetSize(images[0])
+corners = [get_corners(i) for i in images]
+
+goodcorners = [co for (im, (ok, co)) in zip(images, corners) if ok]
+
+def checkerboard_error(xformed):
+    def pt2line(a, b, c):
+        x0,y0 = a
+        x1,y1 = b
+        x2,y2 = c
+        return abs((x2 - x1) * (y1 - y0) - (x1 - x0) * (y2 - y1)) / math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
+    errorsum = 0.
+    for im in xformed:
+        for row in range(6):
+            l0 = im[8 * row]
+            l1 = im[8 * row + 7]
+            for col in range(1, 7):
+                e = pt2line(im[8 * row + col], l0, l1)
+                #print "row", row, "e", e
+                errorsum += e
+
+    return errorsum
+
+if True:
+    from scipy.optimize import fmin
+
+    def xf(pt, poly):
+        x, y = pt
+        r = math.sqrt((x - 320) ** 2 + (y - 240) ** 2)
+        fr = poly(r) / r
+        return (320 + (x - 320) * fr, 240 + (y - 240) * fr)
+    def silly(p, goodcorners):
+    #    print "eval", p
+
+        d = 1.0 # - sum(p)
+        poly = numpy.poly1d(list(p) + [d, 0.])
+
+        xformed = [[xf(pt, poly) for pt in co] for co in goodcorners]
+
+        return checkerboard_error(xformed)
+
+    x0 = [ 0. ]
+    #print silly(x0, goodcorners)
+    print "initial error", silly(x0, goodcorners)
+    xopt = fmin(silly, x0, args=(goodcorners,))
+    print "xopt", xopt
+    print "final error", silly(xopt, goodcorners)
+
+    d = 1.0 # - sum(xopt)
+    poly = numpy.poly1d(list(xopt) + [d, 0.])
+    print "final polynomial"
+    print poly
+
+    for co in goodcorners:
+        scrib = cv.CreateMat(480, 640, cv.CV_8UC3)
+        cv.SetZero(scrib)
+        cv.DrawChessboardCorners(scrib, (num_x_ints, num_y_ints), [xf(pt, poly) for pt in co], True)
+        cv.ShowImage("snap", scrib)
+        cv.WaitKey()
+
+    sys.exit(0)
+
+for (i, (img, (ok, co))) in enumerate(zip(images, corners)):
+    scrib = cv.CreateMat(img.rows, img.cols, cv.CV_8UC3)
+    cv.CvtColor(img, scrib, cv.CV_GRAY2BGR)
+    if ok:
+        cv.DrawChessboardCorners(scrib, (num_x_ints, num_y_ints), co, True)
+    cv.ShowImage("snap", scrib)
+    cv.WaitKey()
+
+print len(goodcorners)
+ipts = mk_image_points(goodcorners)
+opts = mk_object_points(len(goodcorners), .1)
+npts = mk_point_counts(len(goodcorners))
+
+intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
+distortion = cv.CreateMat(4, 1, cv.CV_64FC1)
+cv.SetZero(intrinsics)
+cv.SetZero(distortion)
+# focal lengths have 1/1 ratio
+intrinsics[0,0] = 1.0
+intrinsics[1,1] = 1.0
+cv.CalibrateCamera2(opts, ipts, npts,
+           cv.GetSize(images[0]),
+           intrinsics,
+           distortion,
+           cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
+           cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
+           flags = 0) # cv.CV_CALIB_ZERO_TANGENT_DIST)
+print "D =", list(cvmat_iterator(distortion))
+print "K =", list(cvmat_iterator(intrinsics))
+mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
+mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
+cv.InitUndistortMap(intrinsics, distortion, mapx, mapy)
+for img in images:
+    r = cv.CloneMat(img)
+    cv.Remap(img, r, mapx, mapy)
+    cv.ShowImage("snap", r)
+    cv.WaitKey()
diff --git a/modules/python/test/goodfeatures.py b/modules/python/test/goodfeatures.py
new file mode 100644
index 000000000..d68bd1f9f
--- /dev/null
+++ b/modules/python/test/goodfeatures.py
@@ -0,0 +1,34 @@
+import cv
+import unittest
+
+class TestGoodFeaturesToTrack(unittest.TestCase):
+    def test(self):
+        arr = cv.LoadImage("../samples/c/lena.jpg", 0)
+        original = cv.CloneImage(arr)
+        size = cv.GetSize(arr)
+        eig_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
+        temp_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
+        threshes = [ x / 100. for x in range(1,10) ]
+
+        results = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, use_harris = 1)) for t in threshes])
+
+        # Check that GoodFeaturesToTrack has not modified input image
+        self.assert_(arr.tostring() == original.tostring())
+
+        # Check for repeatability
+        for i in range(10):
+            results2 = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, use_harris = 1)) for t in threshes])
+            self.assert_(results == results2)
+
+        for t0,t1 in zip(threshes, threshes[1:]):
+             r0 = results[t0]
+             r1 = results[t1]
+
+             # Increasing thresh should make result list shorter
+             self.assert_(len(r0) > len(r1))
+
+             # Increasing thresh should monly truncate result list
+             self.assert_(r0[:len(r1)] == r1)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/modules/python/test/leak1.py b/modules/python/test/leak1.py
new file mode 100644
index 000000000..c451c0e06
--- /dev/null
+++ b/modules/python/test/leak1.py
@@ -0,0 +1,7 @@
+import cv
+import numpy as np
+cv.NamedWindow('Leak')
+while 1:
+    leak = np.random.random((480, 640)) * 255
+    cv.ShowImage('Leak', leak.astype(np.uint8))
+    cv.WaitKey(10)
diff --git a/modules/python/test/leak2.py b/modules/python/test/leak2.py
new file mode 100644
index 000000000..a67352f19
--- /dev/null
+++ b/modules/python/test/leak2.py
@@ -0,0 +1,10 @@
+import cv
+import numpy as np
+import time
+
+while True:
+    for i in range(4000):
+        a = cv.CreateImage((1024,1024), cv.IPL_DEPTH_8U, 1)
+        b = cv.CreateMat(1024, 1024, cv.CV_8UC1)
+        c = cv.CreateMatND([1024,1024], cv.CV_8UC1)
+    print "pause..."
diff --git a/modules/python/test/leak3.py b/modules/python/test/leak3.py
new file mode 100644
index 000000000..c12364e8e
--- /dev/null
+++ b/modules/python/test/leak3.py
@@ -0,0 +1,6 @@
+import cv
+import math
+import time
+
+while True:
+    h = cv.CreateHist([40], cv.CV_HIST_ARRAY, [[0,255]], 1)
diff --git a/modules/python/test/leak4.py b/modules/python/test/leak4.py
new file mode 100644
index 000000000..326820052
--- /dev/null
+++ b/modules/python/test/leak4.py
@@ -0,0 +1,9 @@
+import cv
+import math
+import time
+
+N=50000
+print "leak4"
+while True:
+    seq=list((i*1., i*1.) for i in range(N))
+    cv.Moments(seq)
diff --git a/modules/python/test/test.py b/modules/python/test/test.py
new file mode 100644
index 000000000..d94426960
--- /dev/null
+++ b/modules/python/test/test.py
@@ -0,0 +1,2177 @@
+import unittest
+import random
+import time
+import math
+import sys
+import array
+import urllib
+import tarfile
+import hashlib
+import os
+import getopt
+import operator
+import functools
+
+import cv
+
+class OpenCVTests(unittest.TestCase):
+
+    depths = [ cv.IPL_DEPTH_8U, cv.IPL_DEPTH_8S, cv.IPL_DEPTH_16U, cv.IPL_DEPTH_16S, cv.IPL_DEPTH_32S, cv.IPL_DEPTH_32F, cv.IPL_DEPTH_64F ]
+
+    mat_types = [
+        cv.CV_8UC1,
+        cv.CV_8UC2,
+        cv.CV_8UC3,
+        cv.CV_8UC4,
+        cv.CV_8SC1,
+        cv.CV_8SC2,
+        cv.CV_8SC3,
+        cv.CV_8SC4,
+        cv.CV_16UC1,
+        cv.CV_16UC2,
+        cv.CV_16UC3,
+        cv.CV_16UC4,
+        cv.CV_16SC1,
+        cv.CV_16SC2,
+        cv.CV_16SC3,
+        cv.CV_16SC4,
+        cv.CV_32SC1,
+        cv.CV_32SC2,
+        cv.CV_32SC3,
+        cv.CV_32SC4,
+        cv.CV_32FC1,
+        cv.CV_32FC2,
+        cv.CV_32FC3,
+        cv.CV_32FC4,
+        cv.CV_64FC1,
+        cv.CV_64FC2,
+        cv.CV_64FC3,
+        cv.CV_64FC4,
+    ]
+    mat_types_single = [
+        cv.CV_8UC1,
+        cv.CV_8SC1,
+        cv.CV_16UC1,
+        cv.CV_16SC1,
+        cv.CV_32SC1,
+        cv.CV_32FC1,
+        cv.CV_64FC1,
+    ]
+
+    def depthsize(self, d):
+        return { cv.IPL_DEPTH_8U : 1,
+                 cv.IPL_DEPTH_8S : 1,
+                 cv.IPL_DEPTH_16U : 2,
+                 cv.IPL_DEPTH_16S : 2,
+                 cv.IPL_DEPTH_32S : 4,
+                 cv.IPL_DEPTH_32F : 4,
+                 cv.IPL_DEPTH_64F : 8 }[d]
+
+    def get_sample(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR):
+        if not filename in self.image_cache:
+            filedata = urllib.urlopen("https://code.ros.org/svn/opencv/trunk/opencv/" + filename).read()
+            imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
+            cv.SetData(imagefiledata, filedata, len(filedata))
+            self.image_cache[filename] = cv.DecodeImageM(imagefiledata, iscolor)
+        return self.image_cache[filename]
+
+    def setUp(self):
+        self.image_cache = {}
+
+    def snap(self, img):
+        self.snapL([img])
+
+    def snapL(self, L):
+        for i,img in enumerate(L):
+            cv.NamedWindow("snap-%d" % i, 1)
+            cv.ShowImage("snap-%d" % i, img)
+        cv.WaitKey()
+        cv.DestroyAllWindows()
+
+    def hashimg(self, im):
+        """ Compute a hash for an image, useful for image comparisons """
+        return hashlib.md5(im.tostring()).digest()
+
+# Tests to run first; check the handful of basic operations that the later tests rely on
+
+class PreliminaryTests(OpenCVTests):
+
+    def test_lena(self):
+        # Check that the lena jpg image has loaded correctly
+        # This test uses a 'golden' MD5 hash of the Lena image
+        # If the JPEG decompressor changes, it is possible that the MD5 hash will change,
+        # so the hash here will need to change.
+
+        im = self.get_sample("samples/c/lena.jpg")
+        # self.snap(im)     # uncomment this line to view the image, when regilding
+        self.assertEqual(hashlib.md5(im.tostring()).hexdigest(), "9dcd9247f9811c6ce86675ba7b0297b6")
+
+    def test_LoadImage(self):
+        self.assertRaises(TypeError, lambda: cv.LoadImage())
+        self.assertRaises(TypeError, lambda: cv.LoadImage(4))
+        self.assertRaises(TypeError, lambda: cv.LoadImage('foo.jpg', 1, 1))
+        self.assertRaises(TypeError, lambda: cv.LoadImage('foo.jpg', xiscolor=cv.CV_LOAD_IMAGE_COLOR))
+
+    def test_types(self):
+        self.assert_(type(cv.CreateImage((7,5), cv.IPL_DEPTH_8U, 1)) == cv.iplimage)
+        self.assert_(type(cv.CreateMat(5, 7, cv.CV_32FC1)) == cv.cvmat)
+        for i,t in enumerate(self.mat_types):
+            basefunc = [
+                cv.CV_8UC,
+                cv.CV_8SC,
+                cv.CV_16UC,
+                cv.CV_16SC,
+                cv.CV_32SC,
+                cv.CV_32FC,
+                cv.CV_64FC,
+            ][i / 4]
+            self.assertEqual(basefunc(1 + (i % 4)), t)
+
+    def test_tostring(self):
+
+        for w in [ 1, 4, 64, 512, 640]:
+            for h in [ 1, 4, 64, 480, 512]:
+                for c in [1, 2, 3, 4]:
+                    for d in self.depths:
+                        a = cv.CreateImage((w,h), d, c);
+                        self.assert_(len(a.tostring()) == w * h * c * self.depthsize(d))
+
+        for w in [ 32, 96, 480 ]:
+            for h in [ 32, 96, 480 ]:
+                depth_size = {
+                    cv.IPL_DEPTH_8U : 1,
+                    cv.IPL_DEPTH_8S : 1,
+                    cv.IPL_DEPTH_16U : 2,
+                    cv.IPL_DEPTH_16S : 2,
+                    cv.IPL_DEPTH_32S : 4,
+                    cv.IPL_DEPTH_32F : 4,
+                    cv.IPL_DEPTH_64F : 8
+                }
+                for f in  self.depths:
+                    for channels in (1,2,3,4):
+                        img = cv.CreateImage((w, h), f, channels)
+                        esize = (w * h * channels * depth_size[f])
+                        self.assert_(len(img.tostring()) == esize)
+                        cv.SetData(img, " " * esize, w * channels * depth_size[f])
+                        self.assert_(len(img.tostring()) == esize)
+
+                mattype_size = {
+                    cv.CV_8UC1 : 1,
+                    cv.CV_8UC2 : 1,
+                    cv.CV_8UC3 : 1,
+                    cv.CV_8UC4 : 1,
+                    cv.CV_8SC1 : 1,
+                    cv.CV_8SC2 : 1,
+                    cv.CV_8SC3 : 1,
+                    cv.CV_8SC4 : 1,
+                    cv.CV_16UC1 : 2,
+                    cv.CV_16UC2 : 2,
+                    cv.CV_16UC3 : 2,
+                    cv.CV_16UC4 : 2,
+                    cv.CV_16SC1 : 2,
+                    cv.CV_16SC2 : 2,
+                    cv.CV_16SC3 : 2,
+                    cv.CV_16SC4 : 2,
+                    cv.CV_32SC1 : 4,
+                    cv.CV_32SC2 : 4,
+                    cv.CV_32SC3 : 4,
+                    cv.CV_32SC4 : 4,
+                    cv.CV_32FC1 : 4,
+                    cv.CV_32FC2 : 4,
+                    cv.CV_32FC3 : 4,
+                    cv.CV_32FC4 : 4,
+                    cv.CV_64FC1 : 8,
+                    cv.CV_64FC2 : 8,
+                    cv.CV_64FC3 : 8,
+                    cv.CV_64FC4 : 8
+                }
+
+                for t in self.mat_types:
+                    for im in [cv.CreateMat(h, w, t), cv.CreateMatND([h, w], t)]:
+                        elemsize = cv.CV_MAT_CN(cv.GetElemType(im)) * mattype_size[cv.GetElemType(im)]
+                        cv.SetData(im, " " * (w * h * elemsize), (w * elemsize))
+                        esize = (w * h * elemsize)
+                        self.assert_(len(im.tostring()) == esize)
+                        cv.SetData(im, " " * esize, w * elemsize)
+                        self.assert_(len(im.tostring()) == esize)
+
+# Tests for specific OpenCV functions
+
+class FunctionTests(OpenCVTests):
+
+    def test_AvgSdv(self):
+        m = cv.CreateMat(1, 8, cv.CV_32FC1)
+        for i,v in enumerate([2, 4, 4, 4, 5, 5, 7, 9]):
+            m[0,i] = (v,)
+        self.assertAlmostEqual(cv.Avg(m)[0], 5.0, 3)
+        avg,sdv = cv.AvgSdv(m)
+        self.assertAlmostEqual(avg[0], 5.0, 3)
+        self.assertAlmostEqual(sdv[0], 2.0, 3)
+
+    def test_CalcEMD2(self):
+        cc = {}
+        for r in [ 5, 10, 37, 38 ]:
+            scratch = cv.CreateImage((100,100), 8, 1)
+            cv.SetZero(scratch)
+            cv.Circle(scratch, (50,50), r, 255, -1)
+            storage = cv.CreateMemStorage()
+            seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
+            arr = cv.CreateMat(len(seq), 3, cv.CV_32FC1)
+            for i,e in enumerate(seq):
+                arr[i,0] = 1
+                arr[i,1] = e[0]
+                arr[i,2] = e[1]
+            cc[r] = arr
+        def myL1(A, B, D):
+            return abs(A[0]-B[0]) + abs(A[1]-B[1])
+        def myL2(A, B, D):
+            return math.sqrt((A[0]-B[0])**2 + (A[1]-B[1])**2)
+        def myC(A, B, D):
+            return max(abs(A[0]-B[0]), abs(A[1]-B[1]))
+        contours = set(cc.values())
+        for c0 in contours:
+            for c1 in contours:
+                self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_L1) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myL1)) < 1e-3)
+                self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_L2) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myL2)) < 1e-3)
+                self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_C) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myC)) < 1e-3)
+
+    def test_CalcOpticalFlowBM(self):
+        a = self.get_sample("samples/c/lena.jpg", 0)
+        b = self.get_sample("samples/c/lena.jpg", 0)
+        (w,h) = cv.GetSize(a)
+        vel_size = (w - 8, h - 8)
+        velx = cv.CreateImage(vel_size, cv.IPL_DEPTH_32F, 1)
+        vely = cv.CreateImage(vel_size, cv.IPL_DEPTH_32F, 1)
+        cv.CalcOpticalFlowBM(a, b, (8,8), (1,1), (8,8), 0, velx, vely)
+
+    def test_CalcOpticalFlowPyrLK(self):
+        a = self.get_sample("samples/c/lena.jpg", 0)
+        map = cv.CreateMat(2, 3, cv.CV_32FC1)
+        cv.GetRotationMatrix2D((256, 256), 10, 1.0, map)
+        b = cv.CloneMat(a)
+        cv.WarpAffine(a, b, map)
+
+        eig_image = cv.CreateMat(a.rows, a.cols, cv.CV_32FC1)
+        temp_image = cv.CreateMat(a.rows, a.cols, cv.CV_32FC1)
+
+        prevPyr = cv.CreateMat(a.rows / 3, a.cols + 8, cv.CV_8UC1)
+        currPyr = cv.CreateMat(a.rows / 3, a.cols + 8, cv.CV_8UC1)
+        prevFeatures = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 400, 0.01, 0.01)
+        (currFeatures, status, track_error) = cv.CalcOpticalFlowPyrLK(a,
+                                                                      b,
+                                                                      prevPyr,
+                                                                      currPyr,
+                                                                      prevFeatures,
+                                                                      (10, 10),
+                                                                      3,
+                                                                      (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,20, 0.03),
+                                                                      0)
+        if 0:  # enable visualization
+            print
+            print sum(status), "Points found in curr image"
+            for prev,this in zip(prevFeatures, currFeatures):
+                iprev = tuple([int(c) for c in prev])
+                ithis = tuple([int(c) for c in this])
+                cv.Circle(a, iprev, 3, 255)
+                cv.Circle(a, ithis, 3, 0)
+                cv.Line(a, iprev, ithis, 128)
+
+            self.snapL([a, b])
+
+    def test_CartToPolar(self):
+        x = cv.CreateMat(5, 5, cv.CV_32F)
+        y = cv.CreateMat(5, 5, cv.CV_32F)
+        mag = cv.CreateMat(5, 5, cv.CV_32F)
+        angle = cv.CreateMat(5, 5, cv.CV_32F)
+        x2 = cv.CreateMat(5, 5, cv.CV_32F)
+        y2 = cv.CreateMat(5, 5, cv.CV_32F)
+
+        for i in range(5):
+            for j in range(5):
+                x[i, j] = i
+                y[i, j] = j
+
+        for in_degrees in [False, True]:
+            cv.CartToPolar(x, y, mag, angle, in_degrees)
+            cv.PolarToCart(mag, angle, x2, y2, in_degrees)
+            for i in range(5):
+                for j in range(5):
+                    self.assertAlmostEqual(x[i, j], x2[i, j], 1)
+                    self.assertAlmostEqual(y[i, j], y2[i, j], 1)
+
+    def test_Circle(self):
+        for w,h in [(2,77), (77,2), (256, 256), (640,480)]:
+            img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
+            cv.SetZero(img)
+            tricky = [ -8000, -2, -1, 0, 1, h/2, h-1, h, h+1, w/2, w-1, w, w+1, 8000]
+            for x0 in tricky:
+                for y0 in tricky:
+                    for r in [ 0, 1, 2, 3, 4, 5, w/2, w-1, w, w+1, h/2, h-1, h, h+1, 8000 ]:
+                        for thick in [1, 2, 10]:
+                            for t in [0, 8, 4, cv.CV_AA]:
+                                cv.Circle(img, (x0,y0), r, 255, thick, t)
+        # just check that something was drawn
+        self.assert_(cv.Sum(img)[0] > 0)
+
+    def test_ConvertImage(self):
+        i1 = cv.GetImage(self.get_sample("samples/c/lena.jpg", 1))
+        i2 = cv.CloneImage(i1)
+        i3 = cv.CloneImage(i1)
+        cv.ConvertImage(i1, i2, cv.CV_CVTIMG_FLIP + cv.CV_CVTIMG_SWAP_RB)
+        self.assertNotEqual(self.hashimg(i1), self.hashimg(i2))
+        cv.ConvertImage(i2, i3, cv.CV_CVTIMG_FLIP + cv.CV_CVTIMG_SWAP_RB)
+        self.assertEqual(self.hashimg(i1), self.hashimg(i3))
+
+    def test_ConvexHull2(self):
+        # Draw a series of N-pointed stars, find contours, assert the contour is not convex,
+        # assert the hull has N segments, assert that there are N convexity defects.
+
+        def polar2xy(th, r):
+            return (int(400 + r * math.cos(th)), int(400 + r * math.sin(th)))
+        storage = cv.CreateMemStorage(0)
+        for way in ['CvSeq', 'CvMat', 'list']:
+            for points in range(3,20):
+                scratch = cv.CreateImage((800,800), 8, 1)
+                cv.SetZero(scratch)
+                sides = 2 * points
+                cv.FillPoly(scratch, [ [ polar2xy(i * 2 * math.pi / sides, [100,350][i&1]) for i in range(sides) ] ], 255)
+
+                seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
+
+                if way == 'CvSeq':
+                    # pts is a CvSeq
+                    pts = seq
+                elif way == 'CvMat':
+                    # pts is a CvMat
+                    arr = cv.CreateMat(len(seq), 1, cv.CV_32SC2)
+                    for i,e in enumerate(seq):
+                        arr[i,0] = e
+                    pts = arr
+                elif way == 'list':
+                    # pts is a list of 2-tuples
+                    pts = list(seq)
+                else:
+                    assert False
+
+                self.assert_(cv.CheckContourConvexity(pts) == 0)
+                hull = cv.ConvexHull2(pts, storage, return_points = 1)
+                self.assert_(cv.CheckContourConvexity(hull) == 1)
+                self.assert_(len(hull) == points)
+
+                if way in [ 'CvSeq', 'CvMat' ]:
+                    defects = cv.ConvexityDefects(pts, cv.ConvexHull2(pts, storage), storage)
+                    self.assert_(len([depth for (_,_,_,depth) in defects if (depth > 5)]) == points)
+
+    def test_CreateImage(self):
+        for w in [ 1, 4, 64, 512, 640]:
+            for h in [ 1, 4, 64, 480, 512]:
+                for c in [1, 2, 3, 4]:
+                    for d in self.depths:
+                        a = cv.CreateImage((w,h), d, c);
+                        self.assert_(a.width == w)
+                        self.assert_(a.height == h)
+                        self.assert_(a.nChannels == c)
+                        self.assert_(a.depth == d)
+                        self.assert_(cv.GetSize(a) == (w, h))
+                        # self.assert_(cv.GetElemType(a) == d)
+        self.assertRaises(cv.error, lambda: cv.CreateImage((100, 100), 9, 1))
+
+    def test_CreateMat(self):
+        for rows in [1, 2, 4, 16, 64, 512, 640]:
+            for cols in [1, 2, 4, 16, 64, 512, 640]:
+                for t in self.mat_types:
+                    m = cv.CreateMat(rows, cols, t)
+                    self.assertEqual(cv.GetElemType(m), t)
+                    self.assertEqual(m.type, t)
+        self.assertRaises(cv.error, lambda: cv.CreateMat(-1, 100, cv.CV_8SC4))
+        self.assertRaises(cv.error, lambda: cv.CreateMat(100, -1, cv.CV_8SC4))
+        self.assertRaises(cv.error, lambda: cv.cvmat())
+
+    def test_DrawChessboardCorners(self):
+        im = cv.CreateImage((512,512), cv.IPL_DEPTH_8U, 3)
+        cv.SetZero(im)
+        cv.DrawChessboardCorners(im, (5, 5), [ (100,100) for i in range(5 * 5) ], 1)
+        self.assert_(cv.Sum(im)[0] > 0)
+
+        self.assertRaises(TypeError, lambda: cv.DrawChessboardCorners(im, (4, 5), [ (100,100) for i in range(5 * 5) ], 1))
+
+    def test_ExtractSURF(self):
+        img = self.get_sample("samples/c/lena.jpg", 0)
+        w,h = cv.GetSize(img)
+        for hessthresh in [ 300,400,500]:
+            for dsize in [0,1]:
+                for layers in [1,3,10]:
+                    kp,desc = cv.ExtractSURF(img, None, cv.CreateMemStorage(), (dsize, hessthresh, 3, layers))
+                    self.assert_(len(kp) == len(desc))
+                    for d in desc:
+                        self.assert_(len(d) == {0:64, 1:128}[dsize])
+                    for pt,laplacian,size,dir,hessian in kp:
+                        self.assert_((0 <= pt[0]) and (pt[0] <= w))
+                        self.assert_((0 <= pt[1]) and (pt[1] <= h))
+                        self.assert_(laplacian in [-1, 0, 1])
+                        self.assert_((0 <= dir) and (dir <= 360))
+                        self.assert_(hessian >= hessthresh)
+
+    def test_FillPoly(self):
+        scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1)
+        random.seed(0)
+        for i in range(50):
+            cv.SetZero(scribble)
+            self.assert_(cv.CountNonZero(scribble) == 0)
+            cv.FillPoly(scribble, [ [ (random.randrange(640), random.randrange(480)) for i in range(100) ] ], (255,))
+            self.assert_(cv.CountNonZero(scribble) != 0)
+
+    def test_FindChessboardCorners(self):
+        im = cv.CreateImage((512,512), cv.IPL_DEPTH_8U, 1)
+        cv.Set(im, 128)
+
+        # Empty image run
+        status,corners = cv.FindChessboardCorners( im, (7,7) )
+
+        # Perfect checkerboard
+        def xf(i,j, o):
+            return ((96 + o) + 40 * i, (96 + o) + 40 * j)
+        for i in range(8):
+            for j in range(8):
+                color = ((i ^ j) & 1) * 255
+                cv.Rectangle(im, xf(i,j, 0), xf(i,j, 39), color, cv.CV_FILLED)
+        status,corners = cv.FindChessboardCorners( im, (7,7) )
+        self.assert_(status)
+        self.assert_(len(corners) == (7 * 7))
+
+        # Exercise corner display
+        im3 = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 3)
+        cv.Merge(im, im, im, None, im3)
+        cv.DrawChessboardCorners(im3, (7,7), corners, status)
+
+        if 0:
+            self.snap(im3)
+
+        # Run it with too many corners
+        cv.Set(im, 128)
+        for i in range(40):
+            for j in range(40):
+                color = ((i ^ j) & 1) * 255
+                x = 30 + 6 * i
+                y = 30 + 4 * j
+                cv.Rectangle(im, (x, y), (x+4, y+4), color, cv.CV_FILLED)
+        status,corners = cv.FindChessboardCorners( im, (7,7) )
+
+        # XXX - this is very slow
+        if 0:
+            rng = cv.RNG(0)
+            cv.RandArr(rng, im, cv.CV_RAND_UNI, 0, 255.0)
+            self.snap(im)
+            status,corners = cv.FindChessboardCorners( im, (7,7) )
+
+    def test_FindContours(self):
+        random.seed(0)
+
+        storage = cv.CreateMemStorage()
+
+        # First run FindContours on a black image.
+        for mode in [cv.CV_RETR_EXTERNAL, cv.CV_RETR_LIST, cv.CV_RETR_CCOMP, cv.CV_RETR_TREE]:
+            for method in [cv.CV_CHAIN_CODE, cv.CV_CHAIN_APPROX_NONE, cv.CV_CHAIN_APPROX_SIMPLE, cv.CV_CHAIN_APPROX_TC89_L1, cv.CV_CHAIN_APPROX_TC89_KCOS, cv.CV_LINK_RUNS]:
+                scratch = cv.CreateImage((800,800), 8, 1)
+                cv.SetZero(scratch)
+                seq = cv.FindContours(scratch, storage, mode, method)
+                x = len(seq)
+                if seq:
+                    pass
+                for s in seq:
+                    pass
+
+        for trial in range(10):
+            scratch = cv.CreateImage((800,800), 8, 1)
+            cv.SetZero(scratch)
+            def plot(center, radius, mode):
+                cv.Circle(scratch, center, radius, mode, -1)
+                if radius < 20:
+                    return 0
+                else:
+                    newmode = 255 - mode
+                    subs = random.choice([1,2,3])
+                    if subs == 1:
+                        return [ plot(center, radius - 5, newmode) ]
+                    else:
+                        newradius = int({ 2: radius / 2, 3: radius / 2.3 }[subs] - 5)
+                        r = radius / 2
+                        ret = []
+                        for i in range(subs):
+                            th = i * (2 * math.pi) / subs
+                            ret.append(plot((int(center[0] + r * math.cos(th)), int(center[1] + r * math.sin(th))), newradius, newmode))
+                        return sorted(ret)
+
+            actual = plot((400,400), 390, 255 )
+
+            seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
+
+            def traverse(s):
+                if s == None:
+                    return 0
+                else:
+                    self.assert_(abs(cv.ContourArea(s)) > 0.0)
+                    ((x,y),(w,h),th) = cv.MinAreaRect2(s, cv.CreateMemStorage())
+                    self.assert_(((w / h) - 1.0) < 0.01)
+                    self.assert_(abs(cv.ContourArea(s)) > 0.0)
+                    r = []
+                    while s:
+                        r.append(traverse(s.v_next()))
+                        s = s.h_next()
+                    return sorted(r)
+            self.assert_(traverse(seq.v_next()) == actual)
+
+        if 1:
+            original = cv.CreateImage((800,800), 8, 1)
+            cv.SetZero(original)
+            cv.Circle(original, (400, 400), 200, 255, -1)
+            cv.Circle(original, (100, 100), 20, 255, -1)
+        else:
+            original = self.get_sample("samples/c/lena.jpg", 0)
+            cv.Threshold(original, original, 128, 255, cv.CV_THRESH_BINARY);
+
+        contours = cv.FindContours(original, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
+
+
+        def contour_iterator(contour):
+            while contour:
+                yield contour
+                contour = contour.h_next()
+
+        # Should be 2 contours from the two circles above
+        self.assertEqual(len(list(contour_iterator(contours))), 2)
+
+        # Smoke DrawContours
+        sketch = cv.CreateImage(cv.GetSize(original), 8, 3)
+        cv.SetZero(sketch)
+        red = cv.RGB(255, 0, 0)
+        green = cv.RGB(0, 255, 0)
+        for c in contour_iterator(contours):
+            cv.DrawContours(sketch, c, red, green, 0)
+        # self.snap(sketch)
+
+    def test_GetAffineTransform(self):
+        mapping = cv.CreateMat(2, 3, cv.CV_32FC1)
+        cv.GetAffineTransform([ (0,0), (1,0), (0,1) ], [ (0,0), (17,0), (0,17) ], mapping)
+        self.assertAlmostEqual(mapping[0,0], 17, 2)
+        self.assertAlmostEqual(mapping[1,1], 17, 2)
+
+    def test_GetRotationMatrix2D(self):
+        mapping = cv.CreateMat(2, 3, cv.CV_32FC1)
+        for scale in [0.0, 1.0, 2.0]:
+            for angle in [0.0, 360.0]:
+                cv.GetRotationMatrix2D((0,0), angle, scale, mapping)
+                for r in [0, 1]:
+                    for c in [0, 1, 2]:
+                        if r == c:
+                            e = scale
+                        else:
+                            e = 0.0
+                        self.assertAlmostEqual(mapping[r, c], e, 2)
+
+    def test_GetSize(self):
+        self.assert_(cv.GetSize(cv.CreateMat(5, 7, cv.CV_32FC1)) == (7,5))
+        self.assert_(cv.GetSize(cv.CreateImage((7,5), cv.IPL_DEPTH_8U, 1)) == (7,5))
+
+    def test_GetStarKeypoints(self):
+        src = self.get_sample("samples/c/lena.jpg", 0)
+        storage = cv.CreateMemStorage()
+        kp = cv.GetStarKeypoints(src, storage)
+        self.assert_(len(kp) > 0)
+        for (x,y),scale,r in kp:
+            self.assert_(0 <= x)
+            self.assert_(x <= cv.GetSize(src)[0])
+            self.assert_(0 <= y)
+            self.assert_(y <= cv.GetSize(src)[1])
+        return
+        scribble = cv.CreateImage(cv.GetSize(src), 8, 3)
+        cv.CvtColor(src, scribble, cv.CV_GRAY2BGR)
+        for (x,y),scale,r in kp:
+            print x,y,scale,r
+            cv.Circle(scribble, (x,y), scale, cv.RGB(255,0,0))
+        self.snap(scribble)
+
+    def test_GetSubRect(self):
+        src = cv.CreateImage((100,100), 8, 1)
+        data = "z" * (100 * 100)
+
+        cv.SetData(src, data, 100)
+        start_count = sys.getrefcount(data)
+
+        iter = 77
+        subs = []
+        for i in range(iter):
+            sub = cv.GetSubRect(src, (0, 0, 10, 10))
+            subs.append(sub)
+        self.assert_(sys.getrefcount(data) == (start_count + iter))
+
+        src = self.get_sample("samples/c/lena.jpg", 0)
+        made = cv.CreateImage(cv.GetSize(src), 8, 1)
+        sub = cv.CreateMat(32, 32, cv.CV_8UC1)
+        for x in range(0, 512, 32):
+            for y in range(0, 512, 32):
+                sub = cv.GetSubRect(src, (x, y, 32, 32))
+                cv.SetImageROI(made, (x, y, 32, 32))
+                cv.Copy(sub, made)
+        cv.ResetImageROI(made)
+        cv.AbsDiff(made, src, made)
+        self.assert_(cv.CountNonZero(made) == 0)
+
+        for m1 in [cv.CreateMat(1, 10, cv.CV_8UC1), cv.CreateImage((10, 1), 8, 1)]:
+            for i in range(10):
+                m1[0, i] = i
+            def aslist(cvmat): return list(array.array('B', cvmat.tostring()))
+            m2 = cv.GetSubRect(m1, (5, 0, 4, 1))
+            m3 = cv.GetSubRect(m2, (1, 0, 2, 1))
+            self.assertEqual(aslist(m1), range(10))
+            self.assertEqual(aslist(m2), range(5, 9))
+            self.assertEqual(aslist(m3), range(6, 8))
+
+    def xtest_grabCut(self):
+        image = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_COLOR)
+        tmp1 = cv.CreateMat(1, 13 * 5, cv.CV_32FC1)
+        tmp2 = cv.CreateMat(1, 13 * 5, cv.CV_32FC1)
+        mask = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1)
+        cv.GrabCut(image, mask, (10,10,200,200), tmp1, tmp2, 10, cv.GC_INIT_WITH_RECT)
+
+    def test_HoughLines2_PROBABILISTIC(self):
+        li = cv.HoughLines2(self.yield_line_image(),
+                                                cv.CreateMemStorage(),
+                                                cv.CV_HOUGH_PROBABILISTIC,
+                                                1,
+                                                math.pi/180,
+                                                50,
+                                                50,
+                                                10)
+        self.assert_(len(li) > 0)
+        self.assert_(li[0] != None)
+
+    def test_HoughLines2_STANDARD(self):
+        li = cv.HoughLines2(self.yield_line_image(),
+                                                cv.CreateMemStorage(),
+                                                cv.CV_HOUGH_STANDARD,
+                                                1,
+                                                math.pi/180,
+                                                100,
+                                                0,
+                                                0)
+        self.assert_(len(li) > 0)
+        self.assert_(li[0] != None)
+
+    def test_InPaint(self):
+        src = self.get_sample("doc/pics/building.jpg")
+        msk = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1)
+        damaged = cv.CloneMat(src)
+        repaired = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 3)
+        difference = cv.CloneImage(repaired)
+        cv.SetZero(msk)
+        for method in [ cv.CV_INPAINT_NS, cv.CV_INPAINT_TELEA ]:
+            for (p0,p1) in [ ((10,10), (400,400)) ]:
+                cv.Line(damaged, p0, p1, cv.RGB(255, 0, 255), 2)
+                cv.Line(msk, p0, p1, 255, 2)
+            cv.Inpaint(damaged, msk, repaired, 10., cv.CV_INPAINT_NS)
+        cv.AbsDiff(src, repaired, difference)
+        #self.snapL([src, damaged, repaired, difference])
+
+    def test_InitLineIterator(self):
+        scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1)
+        self.assert_(len(list(cv.InitLineIterator(scribble, (20,10), (30,10)))) == 11)
+
+    def test_InRange(self):
+
+        sz = (256,256)
+        Igray1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
+        Ilow1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
+        Ihi1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
+        Igray2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
+        Ilow2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
+        Ihi2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
+
+        Imask = cv.CreateImage(sz, cv.IPL_DEPTH_8U,1)
+        Imaskt = cv.CreateImage(sz,cv.IPL_DEPTH_8U,1)
+
+        cv.InRange(Igray1, Ilow1, Ihi1, Imask);
+        cv.InRange(Igray2, Ilow2, Ihi2, Imaskt);
+
+        cv.Or(Imask, Imaskt, Imask);
+
+    def test_Line(self):
+        w,h = 640,480
+        img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
+        cv.SetZero(img)
+        tricky = [ -8000, -2, -1, 0, 1, h/2, h-1, h, h+1, w/2, w-1, w, w+1, 8000]
+        for x0 in tricky:
+            for y0 in tricky:
+                for x1 in tricky:
+                    for y1 in tricky:
+                        for thickness in [ 0, 1, 8 ]:
+                            for line_type in [0, 4, 8, cv.CV_AA ]:
+                                cv.Line(img, (x0,y0), (x1,y1), 255, thickness, line_type)
+        # just check that something was drawn
+        self.assert_(cv.Sum(img)[0] > 0)
+
+    def test_MinMaxLoc(self):
+        scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1)
+        los = [ (random.randrange(480), random.randrange(640)) for i in range(100) ]
+        his = [ (random.randrange(480), random.randrange(640)) for i in range(100) ]
+        for (lo,hi) in zip(los,his):
+            cv.Set(scribble, 128)
+            scribble[lo] = 0
+            scribble[hi] = 255
+            r = cv.MinMaxLoc(scribble)
+            self.assert_(r == (0, 255, tuple(reversed(lo)), tuple(reversed(hi))))
+
+    def xxx_test_PyrMeanShiftFiltering(self):   # XXX - ticket #306
+        if 0:
+            src = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_COLOR)
+            dst = cv.CloneMat(src)
+            cv.PyrMeanShiftFiltering(src, dst, 5, 5)
+            print src, dst
+            self.snap(src)
+        else:
+            r = cv.temp_test()
+            print r
+            print len(r.tostring())
+            self.snap(r)
+
+    def test_Reshape(self):
+        # 97 rows
+        # 12 cols
+        rows = 97
+        cols = 12
+        im = cv.CreateMat( rows, cols, cv.CV_32FC1 )
+        elems = rows * cols * 1
+        def crd(im):
+            return cv.GetSize(im) + (cv.CV_MAT_CN(cv.GetElemType(im)),)
+
+        for c in (1, 2, 3, 4):
+            nc,nr,nd = crd(cv.Reshape(im, c))
+            self.assert_(nd == c)
+            self.assert_((nc * nr * nd) == elems)
+
+        nc,nr,nd = crd(cv.Reshape(im, 0, 97*2))
+        self.assert_(nr == 97*2)
+        self.assert_((nc * nr * nd) == elems)
+
+        nc,nr,nd = crd(cv.Reshape(im, 3, 97*2))
+        self.assert_(nr == 97*2)
+        self.assert_(nd == 3)
+        self.assert_((nc * nr * nd) == elems)
+
+        # Now test ReshapeMatND
+        mat = cv.CreateMatND([24], cv.CV_32FC1)
+        cv.Set(mat, 1.0)
+        self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 0, [24, 1])), (24, 1))
+        self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 0, [6, 4])), (6, 4))
+        self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 24, [1])), (1,))
+        self.assertRaises(TypeError, lambda: cv.ReshapeMatND(mat, 12, [1]))
+
+    def test_Save(self):
+        for o in [ cv.CreateImage((128,128), cv.IPL_DEPTH_8U, 1), cv.CreateMat(16, 16, cv.CV_32FC1), cv.CreateMatND([7,9,4], cv.CV_32FC1) ]:
+            cv.Save("test.save", o)
+            loaded = cv.Load("test.save", cv.CreateMemStorage())
+            self.assert_(type(o) == type(loaded))
+
+    def test_SetIdentity(self):
+        for r in range(1,16):
+            for c in range(1, 16):
+                for t in self.mat_types_single:
+                    M = cv.CreateMat(r, c, t)
+                    cv.SetIdentity(M)
+                    for rj in range(r):
+                        for cj in range(c):
+                            if rj == cj:
+                                expected = 1.0
+                            else:
+                                expected = 0.0
+                            self.assertEqual(M[rj,cj], expected)
+
+    def test_SnakeImage(self):
+        src = self.get_sample("samples/c/lena.jpg", 0)
+        pts = [ (512-i,i) for i in range(0, 512, 8) ]
+
+        # Make sure that weight arguments get validated
+        self.assertRaises(TypeError, lambda: cv.SnakeImage(cv.GetImage(src), pts, [1,2], .01, .01, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1)))
+
+        # Smoke by making sure that points are changed by call
+        r = cv.SnakeImage(cv.GetImage(src), pts, .01, .01, .01, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1))
+        if 0:
+            cv.PolyLine(src, [ r ], 0, 255)
+            self.snap(src)
+        self.assertEqual(len(r), len(pts))
+        self.assertNotEqual(r, pts)
+
+        # Ensure that list of weights is same as scalar weight
+        w = [.01] * len(pts)
+        r2 = cv.SnakeImage(cv.GetImage(src), pts, w, w, w, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1))
+        self.assertEqual(r, r2)
+
+    def test_KMeans2(self):
+        size = 500
+        samples = cv.CreateMat(size, 1, cv.CV_32FC3)
+        labels = cv.CreateMat(size, 1, cv.CV_32SC1)
+        centers = cv.CreateMat(2, 3, cv.CV_32FC1)
+
+        cv.Zero(samples)
+        cv.Zero(labels)
+        cv.Zero(centers)
+
+        cv.Set(cv.GetSubRect(samples, (0, 0, 1, size/2)), (255, 255, 255))
+
+        compact = cv.KMeans2(samples, 2, labels, (cv.CV_TERMCRIT_ITER, 100, 0.1), 1, 0, centers)
+
+        self.assertEqual(int(compact), 0)
+
+        random.seed(0)
+        for i in range(50):
+            index = random.randrange(size)
+            if index < size/2:
+                self.assertEqual(samples[index, 0], (255, 255, 255))
+                self.assertEqual(labels[index, 0], 1)
+            else:
+                self.assertEqual(samples[index, 0], (0, 0, 0))
+                self.assertEqual(labels[index, 0], 0)
+
+        for cluster in (0, 1):
+            for channel in (0, 1, 2):
+                self.assertEqual(int(centers[cluster, channel]), cluster*255)
+
+    def test_Sum(self):
+        for r in range(1,11):
+            for c in range(1, 11):
+                for t in self.mat_types_single:
+                    M = cv.CreateMat(r, c, t)
+                    cv.Set(M, 1)
+                    self.assertEqual(cv.Sum(M)[0], r * c)
+
+    def test_Threshold(self):
+	#""" directed test for bug 2790622 """
+        src = self.get_sample("samples/c/lena.jpg", 0)
+        results = set()
+        for i in range(10):
+            dst = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1)
+            cv.Threshold(src, dst, 128, 128, cv.CV_THRESH_BINARY)
+            results.add(dst.tostring())
+        # Should have produced the same answer every time, so results set should have size 1
+        self.assert_(len(results) == 1)
+
+        # ticket #71 repro attempt
+        image = self.get_sample("samples/c/lena.jpg", 0)
+        red = cv.CreateImage(cv.GetSize(image), 8, 1)
+        binary = cv.CreateImage(cv.GetSize(image), 8, 1)
+        cv.Split(image, red, None, None, None)
+        cv.Threshold(red, binary, 42, 255, cv.CV_THRESH_BINARY)
+
+    ##############################################################################
+
+    def yield_line_image(self):
+        """ Needed by HoughLines tests """
+        src = self.get_sample("doc/pics/building.jpg", 0)
+        dst = cv.CreateImage(cv.GetSize(src), 8, 1)
+        cv.Canny(src, dst, 50, 200, 3)
+        return dst
+
+# Tests for functional areas
+
+class AreaTests(OpenCVTests):
+
+    def test_numpy(self):
+        if 'fromarray' in dir(cv):
+            import numpy
+
+            def convert(numpydims):
+                """ Create a numpy array with specified dims, return the OpenCV CvMat """
+                a1 = numpy.array([1] * reduce(operator.__mul__, numpydims)).reshape(*numpydims).astype(numpy.float32)
+                return cv.fromarray(a1)
+            def row_col_chan(m):
+                col = m.cols
+                row = m.rows
+                chan = cv.CV_MAT_CN(cv.GetElemType(m))
+                return (row, col, chan)
+
+            self.assertEqual(row_col_chan(convert((2, 13))), (2, 13, 1))
+            self.assertEqual(row_col_chan(convert((2, 13, 4))), (2, 13, 4))
+            self.assertEqual(row_col_chan(convert((2, 13, cv.CV_CN_MAX))), (2, 13, cv.CV_CN_MAX))
+            self.assertRaises(TypeError, lambda: convert((2,)))
+            self.assertRaises(TypeError, lambda: convert((11, 17, cv.CV_CN_MAX + 1)))
+
+            for t in [cv.CV_16UC1, cv.CV_32SC1, cv.CV_32FC1]:
+                for d in [ (8,), (1,7), (2,3,4), (7,9,2,1,8), (1,2,3,4,5,6,7,8) ]:
+                    total = reduce(operator.__mul__, d)
+                    m = cv.CreateMatND(d, t)
+                    for i in range(total):
+                        cv.Set1D(m, i, i)
+                    na = numpy.asarray(m).reshape((total,))
+                    self.assertEqual(list(na), range(total))
+
+                    # now do numpy -> cvmat, and verify
+                    m2 = cv.fromarray(na, True)
+
+                    # Check that new cvmat m2 contains same counting sequence
+                    for i in range(total):
+                        self.assertEqual(cv.Get1D(m, i)[0], i)
+
+            # Verify round-trip for 2D arrays
+            for rows in [2, 3, 7, 13]:
+                for cols in [2, 3, 7, 13]:
+                    for allowND in [False, True]:
+                        im = cv.CreateMatND([rows, cols], cv.CV_16UC1)
+                        cv.SetZero(im)
+                        a = numpy.asarray(im)
+                        self.assertEqual(a.shape, (rows, cols))
+                        cvmatnd = cv.fromarray(a, allowND)
+                        self.assertEqual(cv.GetDims(cvmatnd), (rows, cols))
+
+                        # im, a and cvmatnd all point to the same data, so...
+                        for i,coord in enumerate([(0,0), (0,1), (1,0), (1,1)]):
+                            v = 5 + i + 7
+                            a[coord] = v
+                            self.assertEqual(im[coord], v)
+                            self.assertEqual(cvmatnd[coord], v)
+
+            # Cv -> Numpy 3 channel check
+            im = cv.CreateMatND([2, 13], cv.CV_16UC3)
+            self.assertEqual(numpy.asarray(im).shape, (2, 13, 3))
+
+            # multi-dimensional NumPy array
+            na = numpy.ones([7,9,2,1,8])
+            cm = cv.fromarray(na, True)
+            self.assertEqual(cv.GetDims(cm), (7,9,2,1,8))
+
+            # Using an array object for a CvArr parameter
+            ones = numpy.ones((640, 480))
+            r = numpy.ones((640, 480))
+            cv.AddS(ones, 7, r)
+            self.assert_(numpy.alltrue(r == (8 * ones)))
+
+            # create arrays, use them in OpenCV and replace the the array
+            # looking for leaks
+            def randdim():
+                return [random.randrange(1,6) for i in range(random.randrange(1, 6))]
+            arrays = [numpy.ones(randdim()).astype(numpy.uint8) for i in range(10)]
+            cs = [cv.fromarray(a, True) for a in arrays]
+            for i in range(1000):
+                arrays[random.randrange(10)] = numpy.ones(randdim()).astype(numpy.uint8)
+                cs[random.randrange(10)] = cv.fromarray(arrays[random.randrange(10)], True)
+                for j in range(10):
+                    self.assert_(all([c == chr(1) for c in cs[j].tostring()]))
+
+            # 
+            m = numpy.identity(4, dtype = numpy.float32)
+            rvec = cv.CreateMat(3, 1, cv.CV_32FC1)
+            rvec[0,0] = 1
+            rvec[1,0] = 1
+            rvec[2,0] = 1
+            cv.Rodrigues2(rvec, m[:3,:3])
+	    #print m
+
+        else:
+            print "SKIPPING test_numpy - numpy support not built"
+
+    def test_boundscatch(self):
+        l2 = cv.CreateMat(256, 1, cv.CV_8U)
+        l2[0,0]     # should be OK
+        self.assertRaises(cv.error, lambda: l2[1,1])
+        l2[0]       # should be OK
+        self.assertRaises(cv.error, lambda: l2[299])
+        for n in range(1, 8):
+            l = cv.CreateMatND([2] * n, cv.CV_8U)
+            l[0] # should be OK
+            self.assertRaises(cv.error, lambda: l[999])
+
+            tup0 = (0,) * n
+            l[tup0] # should be OK
+            tup2 = (2,) * n
+            self.assertRaises(cv.error, lambda: l[tup2])
+
+    def test_stereo(self):
+        bm = cv.CreateStereoBMState()
+        def illegal_delete():
+            bm = cv.CreateStereoBMState()
+            del bm.preFilterType
+        def illegal_assign():
+            bm = cv.CreateStereoBMState()
+            bm.preFilterType = "foo"
+
+        self.assertRaises(TypeError, illegal_delete)
+        self.assertRaises(TypeError, illegal_assign)
+
+        left = self.get_sample("samples/c/lena.jpg", 0)
+        right = self.get_sample("samples/c/lena.jpg", 0)
+        disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
+        cv.FindStereoCorrespondenceBM(left, right, disparity, bm)
+
+        gc = cv.CreateStereoGCState(16, 2)
+        left_disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
+        right_disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
+
+    def test_stereo(self):
+        bm = cv.CreateStereoBMState()
+        def illegal_delete():
+            bm = cv.CreateStereoBMState()
+            del bm.preFilterType
+        def illegal_assign():
+            bm = cv.CreateStereoBMState()
+            bm.preFilterType = "foo"
+
+        self.assertRaises(TypeError, illegal_delete)
+        self.assertRaises(TypeError, illegal_assign)
+
+        left = self.get_sample("samples/c/lena.jpg", 0)
+        right = self.get_sample("samples/c/lena.jpg", 0)
+        disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
+        cv.FindStereoCorrespondenceBM(left, right, disparity, bm)
+
+        gc = cv.CreateStereoGCState(16, 2)
+        left_disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
+        right_disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
+        cv.FindStereoCorrespondenceGC(left, right, left_disparity, right_disparity, gc)
+
+    def test_kalman(self):
+        k = cv.CreateKalman(2, 1, 0)
+
+    def failing_test_exception(self):
+        a = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1)
+        b = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1)
+        self.assertRaises(cv.error, lambda: cv.Laplace(a, b))
+
+    def test_cvmat_accessors(self):
+        cvm = cv.CreateMat(20, 10, cv.CV_32FC1)
+
+    def test_depths(self):
+	#""" Make sure that the depth enums are unique """
+        self.assert_(len(self.depths) == len(set(self.depths)))
+
+    def test_leak(self):
+	#""" If CreateImage is not releasing image storage, then the loop below should use ~4GB of memory. """
+        for i in range(64000):
+            a = cv.CreateImage((1024,1024), cv.IPL_DEPTH_8U, 1)
+        for i in range(64000):
+            a = cv.CreateMat(1024, 1024, cv.CV_8UC1)
+
+    def test_histograms(self):
+        def split(im):
+            nchans = cv.CV_MAT_CN(cv.GetElemType(im))
+            c = [ cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1) for i in range(nchans) ] + [None] * (4 - nchans)
+            cv.Split(im, c[0], c[1], c[2], c[3])
+            return c[:nchans]
+        def imh(im):
+            s = split(im)
+            hist = cv.CreateHist([256] * len(s), cv.CV_HIST_ARRAY, [ (0,255) ] * len(s), 1)
+            cv.CalcHist(s, hist, 0)
+            return hist
+
+        dims = [180]
+        ranges = [(0,180)]
+        a = cv.CreateHist(dims, cv.CV_HIST_ARRAY , ranges, 1)
+        src = self.get_sample("samples/c/lena.jpg", 0)
+        h = imh(src)
+        (minv, maxv, minl, maxl) = cv.GetMinMaxHistValue(h)
+        self.assert_(cv.QueryHistValue_nD(h, minl) == minv)
+        self.assert_(cv.QueryHistValue_nD(h, maxl) == maxv)
+        bp = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1)
+        cv.CalcBackProject(split(src), bp, h)
+        bp = cv.CreateImage((cv.GetSize(src)[0]-2, cv.GetSize(src)[1]-2), cv.IPL_DEPTH_32F, 1)
+        cv.CalcBackProjectPatch(split(src), bp, (3,3), h, cv.CV_COMP_INTERSECT, 1)
+
+        for meth,expected in [(cv.CV_COMP_CORREL, 1.0), (cv.CV_COMP_CHISQR, 0.0), (cv.CV_COMP_INTERSECT, 1.0), (cv.CV_COMP_BHATTACHARYYA, 0.0)]:
+            self.assertEqual(cv.CompareHist(h, h, meth), expected)
+
+    def test_arithmetic(self):
+        a = cv.CreateMat(4, 4, cv.CV_8UC1)
+        a[0,0] = 50.0
+        b = cv.CreateMat(4, 4, cv.CV_8UC1)
+        b[0,0] = 4.0
+        d = cv.CreateMat(4, 4, cv.CV_8UC1)
+        cv.Add(a, b, d)
+        self.assertEqual(d[0,0], 54.0)
+        cv.Mul(a, b, d)
+        self.assertEqual(d[0,0], 200.0)
+
+
+    def failing_test_cvtcolor(self):
+        src3 = self.get_sample("samples/c/lena.jpg")
+        src1 = self.get_sample("samples/c/lena.jpg", 0)
+        dst8u = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_8U, c)) for c in (1,2,3,4)])
+        dst16u = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_16U, c)) for c in (1,2,3,4)])
+        dst32f = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_32F, c)) for c in (1,2,3,4)])
+
+        for srcf in ["BGR", "RGB"]:
+            for dstf in ["Luv"]:
+                cv.CvtColor(src3, dst8u[3], eval("cv.CV_%s2%s" % (srcf, dstf)))
+                cv.CvtColor(src3, dst32f[3], eval("cv.CV_%s2%s" % (srcf, dstf)))
+                cv.CvtColor(src3, dst8u[3], eval("cv.CV_%s2%s" % (dstf, srcf)))
+
+        for srcf in ["BayerBG", "BayerGB", "BayerGR"]:
+            for dstf in ["RGB", "BGR"]:
+                cv.CvtColor(src1, dst8u[3], eval("cv.CV_%s2%s" % (srcf, dstf)))
+
+    def test_voronoi(self):
+        w,h = 500,500
+
+        storage = cv.CreateMemStorage(0)
+
+        def facet_edges(e0):
+            e = e0
+            while True:
+                e = cv.Subdiv2DGetEdge(e, cv.CV_NEXT_AROUND_LEFT)
+                yield e
+                if e == e0:
+                    break
+
+        def areas(edges):
+            seen = []
+            seensorted = []
+            for edge in edges:
+                pts = [ cv.Subdiv2DEdgeOrg(e) for e in facet_edges(edge) ]
+                if not (None in pts):
+                    l = [p.pt for p in pts]
+                    ls = sorted(l)
+                    if not(ls in seensorted):
+                        seen.append(l)
+                        seensorted.append(ls)
+            return seen
+
+        for npoints in range(1, 200):
+            points = [ (random.randrange(w), random.randrange(h)) for i in range(npoints) ]
+            subdiv = cv.CreateSubdivDelaunay2D( (0,0,w,h), storage )
+            for p in points:
+                cv.SubdivDelaunay2DInsert( subdiv, p)
+            cv.CalcSubdivVoronoi2D(subdiv)
+            ars = areas([ cv.Subdiv2DRotateEdge(e, 1) for e in subdiv.edges ] + [ cv.Subdiv2DRotateEdge(e, 3) for e in subdiv.edges ])
+            self.assert_(len(ars) == len(set(points)))
+
+            if False:
+                img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 3)
+                cv.SetZero(img)
+                def T(x): return int(x) # int(300+x/16)
+                for pts in ars:
+                    cv.FillConvexPoly( img, [(T(x),T(y)) for (x,y) in pts], cv.RGB(100+random.randrange(156),random.randrange(256),random.randrange(256)), cv.CV_AA, 0 );
+                for x,y in points:
+                    cv.Circle(img, (T(x), T(y)), 3, cv.RGB(0,0,0), -1)
+
+                cv.ShowImage("snap", img)
+                if cv.WaitKey(10) > 0:
+                    break
+
+    def perf_test_pow(self):
+        mt = cv.CreateMat(1000, 1000, cv.CV_32FC1)
+        dst = cv.CreateMat(1000, 1000, cv.CV_32FC1)
+        rng = cv.RNG(0)
+        cv.RandArr(rng, mt, cv.CV_RAND_UNI, 0, 1000.0)
+        mt[0,0] = 10
+        print
+        for a in [0.5, 2.0, 2.3, 2.4, 3.0, 37.1786] + [2.4]*10:
+            started = time.time()
+            for i in range(10):
+                cv.Pow(mt, dst, a)
+            took = (time.time() - started) / 1e7
+            print "%4.1f took %f ns" % (a, took * 1e9)
+        print dst[0,0], 10 ** 2.4
+
+    def test_access_row_col(self):
+        src = cv.CreateImage((8,3), 8, 1)
+        # Put these words
+        #     Achilles
+        #     Benedict
+        #     Congreve
+        # in an array (3 rows, 8 columns).
+        # Then extract the array in various ways.
+
+        for r,w in enumerate(("Achilles", "Benedict", "Congreve")):
+            for c,v in enumerate(w):
+                src[r,c] = ord(v)
+        self.assertEqual(src.tostring(), "AchillesBenedictCongreve")
+        self.assertEqual(src[:,:].tostring(), "AchillesBenedictCongreve")
+        self.assertEqual(src[:,:4].tostring(), "AchiBeneCong")
+        self.assertEqual(src[:,0].tostring(), "ABC")
+        self.assertEqual(src[:,4:].tostring(), "llesdictreve")
+        self.assertEqual(src[::2,:].tostring(), "AchillesCongreve")
+        self.assertEqual(src[1:,:].tostring(), "BenedictCongreve")
+        self.assertEqual(src[1:2,:].tostring(), "Benedict")
+        self.assertEqual(src[::2,:4].tostring(), "AchiCong")
+        # The mats share the same storage, so updating one should update them all
+        lastword = src[2]
+        self.assertEqual(lastword.tostring(), "Congreve")
+        src[2,0] = ord('K')
+        self.assertEqual(lastword.tostring(), "Kongreve")
+        src[2,0] = ord('C')
+
+        # ABCD
+        # EFGH
+        # IJKL
+        #
+        # MNOP
+        # QRST
+        # UVWX
+
+        mt = cv.CreateMatND([2,3,4], cv.CV_8UC1)
+        for i in range(2):
+            for j in range(3):
+                for k in range(4):
+                    mt[i,j,k] = ord('A') + k + 4 * (j + 3 * i)
+        self.assertEqual(mt[:,:,:1].tostring(), "AEIMQU")
+        self.assertEqual(mt[:,:1,:].tostring(), "ABCDMNOP")
+        self.assertEqual(mt[:1,:,:].tostring(), "ABCDEFGHIJKL")
+        self.assertEqual(mt[1,1].tostring(), "QRST")
+        self.assertEqual(mt[:,::2,:].tostring(), "ABCDIJKLMNOPUVWX")
+
+        # Exercise explicit GetRows
+        self.assertEqual(cv.GetRows(src, 0, 3).tostring(), "AchillesBenedictCongreve")
+        self.assertEqual(cv.GetRows(src, 0, 3, 1).tostring(), "AchillesBenedictCongreve")
+        self.assertEqual(cv.GetRows(src, 0, 3, 2).tostring(), "AchillesCongreve")
+
+        self.assertEqual(cv.GetRow(src, 0).tostring(), "Achilles")
+
+        self.assertEqual(cv.GetCols(src, 0, 4).tostring(), "AchiBeneCong")
+
+        self.assertEqual(cv.GetCol(src, 0).tostring(), "ABC")
+        self.assertEqual(cv.GetCol(src, 1).tostring(), "ceo")
+
+        self.assertEqual(cv.GetDiag(src, 0).tostring(), "Aen")
+
+        # Check that matrix type is preserved by the various operators
+
+        for mt in self.mat_types:
+            m = cv.CreateMat(5, 3, mt)
+            self.assertEqual(mt, cv.GetElemType(cv.GetRows(m, 0, 2)))
+            self.assertEqual(mt, cv.GetElemType(cv.GetRow(m, 0)))
+            self.assertEqual(mt, cv.GetElemType(cv.GetCols(m, 0, 2)))
+            self.assertEqual(mt, cv.GetElemType(cv.GetCol(m, 0)))
+            self.assertEqual(mt, cv.GetElemType(cv.GetDiag(m, 0)))
+            self.assertEqual(mt, cv.GetElemType(m[0]))
+            self.assertEqual(mt, cv.GetElemType(m[::2]))
+            self.assertEqual(mt, cv.GetElemType(m[:,0]))
+            self.assertEqual(mt, cv.GetElemType(m[:,:]))
+            self.assertEqual(mt, cv.GetElemType(m[::2,:]))
+
+    def test_addS_3D(self):
+        for dim in [ [1,1,4], [2,2,3], [7,4,3] ]:
+            for ty,ac in [ (cv.CV_32FC1, 'f'), (cv.CV_64FC1, 'd')]:
+                mat = cv.CreateMatND(dim, ty)
+                mat2 = cv.CreateMatND(dim, ty)
+                for increment in [ 0, 3, -1 ]:
+                    cv.SetData(mat, array.array(ac, range(dim[0] * dim[1] * dim[2])), 0)
+                    cv.AddS(mat, increment, mat2)
+                    for i in range(dim[0]):
+                        for j in range(dim[1]):
+                            for k in range(dim[2]):
+                                self.assert_(mat2[i,j,k] == mat[i,j,k] + increment)
+
+    def test_buffers(self):
+        ar = array.array('f', [7] * (360*640))
+
+        m = cv.CreateMat(360, 640, cv.CV_32FC1)
+        cv.SetData(m, ar, 4 * 640)
+        self.assert_(m[0,0] == 7.0)
+
+        m = cv.CreateMatND((360, 640), cv.CV_32FC1)
+        cv.SetData(m, ar, 4 * 640)
+        self.assert_(m[0,0] == 7.0)
+
+        m = cv.CreateImage((640, 360), cv.IPL_DEPTH_32F, 1)
+        cv.SetData(m, ar, 4 * 640)
+        self.assert_(m[0,0] == 7.0)
+
+    def xxtest_Filters(self):
+        print
+        m = cv.CreateMat(360, 640, cv.CV_32FC1)
+        d = cv.CreateMat(360, 640, cv.CV_32FC1)
+        for k in range(3, 21, 2):
+            started = time.time()
+            for i in range(1000):
+                cv.Smooth(m, m, param1=k)
+            print k, "took", time.time() - started
+
+    def assertSame(self, a, b):
+        w,h = cv.GetSize(a)
+        d = cv.CreateMat(h, w, cv.CV_8UC1)
+        cv.AbsDiff(a, b, d)
+        self.assert_(cv.CountNonZero(d) == 0)
+
+    def test_text(self):
+        img = cv.CreateImage((640,40), cv.IPL_DEPTH_8U, 1)
+        cv.SetZero(img)
+        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1)
+        message = "XgfooX"
+        cv.PutText(img, message, (320,30), font, 255)
+        ((w,h),bl) = cv.GetTextSize(message, font)
+
+        # Find nonzero in X and Y
+        Xs = []
+        for x in range(640):
+            cv.SetImageROI(img, (x, 0, 1, 40))
+            Xs.append(cv.Sum(img)[0] > 0)
+        def firstlast(l):
+            return (l.index(True), len(l) - list(reversed(l)).index(True))
+
+        Ys = []
+        for y in range(40):
+            cv.SetImageROI(img, (0, y, 640, 1))
+            Ys.append(cv.Sum(img)[0] > 0)
+
+        x0,x1 = firstlast(Xs)
+        y0,y1 = firstlast(Ys)
+        actual_width = x1 - x0
+        actual_height = y1 - y0
+
+        # actual_width can be up to 8 pixels smaller than GetTextSize says
+        self.assert_(actual_width <= w)
+        self.assert_((w - actual_width) <= 8)
+
+        # actual_height can be up to 4 pixels smaller than GetTextSize says
+        self.assert_(actual_height <= (h + bl))
+        self.assert_(((h + bl) - actual_height) <= 4)
+
+        cv.ResetImageROI(img)
+        self.assert_(w != 0)
+        self.assert_(h != 0)
+
+    def test_sizes(self):
+        sizes = [ 1, 2, 3, 97, 255, 256, 257, 947 ]
+        for w in sizes:
+            for h in sizes:
+                # Create an IplImage
+                im = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
+                cv.Set(im, 1)
+                self.assert_(cv.Sum(im)[0] == (w * h))
+                del im
+                # Create a CvMat
+                mt = cv.CreateMat(h, w, cv.CV_8UC1)
+                cv.Set(mt, 1)
+                self.assert_(cv.Sum(mt)[0] == (w * h))
+
+        random.seed(7)
+        for dim in range(1, cv.CV_MAX_DIM + 1):
+            for attempt in range(10):
+                dims = [ random.choice([1,1,1,1,2,3]) for i in range(dim) ]
+                mt = cv.CreateMatND(dims, cv.CV_8UC1)
+                cv.SetZero(mt)
+                self.assert_(cv.Sum(mt)[0] == 0)
+                # Set to all-ones, verify the sum
+                cv.Set(mt, 1)
+                expected = 1
+                for d in dims:
+                    expected *= d
+                self.assert_(cv.Sum(mt)[0] == expected)
+
+    def test_random(self):
+        seeds = [ 0, 1, 2**48, 2**48 + 1 ]
+        sequences = set()
+        for s in seeds:
+            rng = cv.RNG(s)
+            sequences.add(str([cv.RandInt(rng) for i in range(10)]))
+        self.assert_(len(seeds) == len(sequences))
+
+        rng = cv.RNG(0)
+        im = cv.CreateImage((1024,1024), cv.IPL_DEPTH_8U, 1)
+        cv.RandArr(rng, im, cv.CV_RAND_UNI, 0, 256)
+        cv.RandArr(rng, im, cv.CV_RAND_NORMAL, 128, 30)
+        if 1:
+            hist = cv.CreateHist([ 256 ], cv.CV_HIST_ARRAY, [ (0,255) ], 1)
+            cv.CalcHist([im], hist)
+
+        rng = cv.RNG()
+        for i in range(1000):
+            v = cv.RandReal(rng)
+            self.assert_(0 <= v)
+            self.assert_(v < 1)
+
+        for mode in [ cv.CV_RAND_UNI, cv.CV_RAND_NORMAL ]:
+            for fmt in self.mat_types:
+                mat = cv.CreateMat(64, 64, fmt)
+                cv.RandArr(cv.RNG(), mat, mode, (0,0,0,0), (1,1,1,1))
+
+    def test_MixChannels(self):
+
+        # First part - test the single case described in the documentation
+        rgba = cv.CreateMat(100, 100, cv.CV_8UC4)
+        bgr = cv.CreateMat(100, 100, cv.CV_8UC3)
+        alpha = cv.CreateMat(100, 100, cv.CV_8UC1)
+        cv.Set(rgba, (1,2,3,4))
+        cv.MixChannels([rgba], [bgr, alpha], [
+           (0, 2),    # rgba[0] -> bgr[2]
+           (1, 1),    # rgba[1] -> bgr[1]
+           (2, 0),    # rgba[2] -> bgr[0]
+           (3, 3)     # rgba[3] -> alpha[0]
+        ])
+        self.assert_(bgr[0,0] == (3,2,1))
+        self.assert_(alpha[0,0] == 4)
+
+        # Second part.  Choose random sets of sources and destinations,
+        # fill them with known values, choose random channel assignments,
+        # run cvMixChannels and check that the result is as expected.
+
+        random.seed(1)
+
+        for rows in [1,2,4,13,64,1000]:
+            for cols in [1,2,4,13,64,1000]:
+                for loop in range(5):
+                    sources = [random.choice([1, 2, 3, 4]) for i in range(8)]
+                    dests = [random.choice([1, 2, 3, 4]) for i in range(8)]
+                    # make sure that fromTo does not have duplicates in dests, otherwise the result is not determined
+                    while 1:
+                        fromTo = [(random.randrange(-1, sum(sources)), random.randrange(sum(dests))) for i in range(random.randrange(1, 30))]
+                        dests_set = list(set([j for (i, j) in fromTo]))
+                        if len(dests_set) == len(dests):
+                            break
+
+                    # print sources
+                    # print dests
+                    # print fromTo
+
+                    def CV_8UC(n):
+                        return [cv.CV_8UC1, cv.CV_8UC2, cv.CV_8UC3, cv.CV_8UC4][n-1]
+                    source_m = [cv.CreateMat(rows, cols, CV_8UC(c)) for c in sources]
+                    dest_m =   [cv.CreateMat(rows, cols, CV_8UC(c)) for c in dests]
+
+                    def m00(m):
+                        # return the contents of the N channel mat m[0,0] as a N-length list
+                        chans = cv.CV_MAT_CN(cv.GetElemType(m))
+                        if chans == 1:
+                            return [m[0,0]]
+                        else:
+                            return list(m[0,0])[:chans]
+
+                    # Sources numbered from 50, destinations numbered from 100
+
+                    for i in range(len(sources)):
+                        s = sum(sources[:i]) + 50
+                        cv.Set(source_m[i], (s, s+1, s+2, s+3))
+                        self.assertEqual(m00(source_m[i]), [s, s+1, s+2, s+3][:sources[i]])
+
+                    for i in range(len(dests)):
+                        s = sum(dests[:i]) + 100
+                        cv.Set(dest_m[i], (s, s+1, s+2, s+3))
+                        self.assertEqual(m00(dest_m[i]), [s, s+1, s+2, s+3][:dests[i]])
+
+                    # now run the sanity check
+
+                    for i in range(len(sources)):
+                        s = sum(sources[:i]) + 50
+                        self.assertEqual(m00(source_m[i]), [s, s+1, s+2, s+3][:sources[i]])
+
+                    for i in range(len(dests)):
+                        s = sum(dests[:i]) + 100
+                        self.assertEqual(m00(dest_m[i]), [s, s+1, s+2, s+3][:dests[i]])
+
+                    cv.MixChannels(source_m, dest_m, fromTo)
+
+                    expected = range(100, 100 + sum(dests))
+                    for (i, j) in fromTo:
+                        if i == -1:
+                            expected[j] = 0.0
+                        else:
+                            expected[j] = 50 + i
+
+                    actual = sum([m00(m) for m in dest_m], [])
+                    self.assertEqual(sum([m00(m) for m in dest_m], []), expected)
+
+    def test_allocs(self):
+        mats = [ 0 for i in range(20) ]
+        for i in range(1000):
+            m = cv.CreateMat(random.randrange(10, 512), random.randrange(10, 512), cv.CV_8UC1)
+            j = random.randrange(len(mats))
+            mats[j] = m
+            cv.SetZero(m)
+
+    def test_access(self):
+        cnames = { 1:cv.CV_32FC1, 2:cv.CV_32FC2, 3:cv.CV_32FC3, 4:cv.CV_32FC4 }
+
+        for w in range(1,11):
+            for h in range(2,11):
+                for c in [1,2]:
+                    for o in [ cv.CreateMat(h, w, cnames[c]), cv.CreateImage((w,h), cv.IPL_DEPTH_32F, c) ][1:]:
+                        pattern = [ (i,j) for i in range(w) for j in range(h) ]
+                        random.shuffle(pattern)
+                        for k,(i,j) in enumerate(pattern):
+                            if c == 1:
+                                o[j,i] = k
+                            else:
+                                o[j,i] = (k,) * c
+                        for k,(i,j) in enumerate(pattern):
+                            if c == 1:
+                                self.assert_(o[j,i] == k)
+                            else:
+                                self.assert_(o[j,i] == (k,)*c)
+
+        test_mat = cv.CreateMat(2, 3, cv.CV_32FC1)
+        cv.SetData(test_mat, array.array('f', range(6)), 12)
+        self.assertEqual(cv.GetDims(test_mat[0]), (1, 3))
+        self.assertEqual(cv.GetDims(test_mat[1]), (1, 3))
+        self.assertEqual(cv.GetDims(test_mat[0:1]), (1, 3))
+        self.assertEqual(cv.GetDims(test_mat[1:2]), (1, 3))
+        self.assertEqual(cv.GetDims(test_mat[-1:]), (1, 3))
+        self.assertEqual(cv.GetDims(test_mat[-1]), (1, 3))
+
+    def xxxtest_corners(self):
+        a = cv.LoadImage("foo-mono.png", 0)
+        cv.AdaptiveThreshold(a, a, 255, param1=5)
+        scribble = cv.CreateImage(cv.GetSize(a), 8, 3)
+        cv.CvtColor(a, scribble, cv.CV_GRAY2BGR)
+        if 0:
+            eig_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
+            temp_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
+            pts = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 100, 0.04, 2, use_harris=1)
+            for p in pts:
+                cv.Circle( scribble, p, 1, cv.RGB(255,0,0), -1 )
+            self.snap(scribble)
+        canny = cv.CreateImage(cv.GetSize(a), 8, 1)
+        cv.SubRS(a, 255, canny)
+        self.snap(canny)
+        li = cv.HoughLines2(canny,
+                                                cv.CreateMemStorage(),
+                                                cv.CV_HOUGH_STANDARD,
+                                                1,
+                                                math.pi/180,
+                                                60,
+                                                0,
+                                                0)
+        for (rho,theta) in li:
+            print rho,theta
+            c = math.cos(theta)
+            s = math.sin(theta)
+            x0 = c*rho
+            y0 = s*rho
+            cv.Line(scribble,
+                            (x0 + 1000*(-s), y0 + 1000*c),
+                            (x0 + -1000*(-s), y0 - 1000*c),
+                            (0,255,0))
+        self.snap(scribble)
+
+    def test_calibration(self):
+
+        def get_corners(mono, refine = False):
+            (ok, corners) = cv.FindChessboardCorners(mono, (num_x_ints, num_y_ints), cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_NORMALIZE_IMAGE)
+            if refine and ok:
+                corners = cv.FindCornerSubPix(mono, corners, (5,5), (-1,-1), ( cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER, 30, 0.1 ))
+            return (ok, corners)
+
+        def mk_object_points(nimages, squaresize = 1):
+            opts = cv.CreateMat(nimages * num_pts, 3, cv.CV_32FC1)
+            for i in range(nimages):
+                for j in range(num_pts):
+                    opts[i * num_pts + j, 0] = (j / num_x_ints) * squaresize
+                    opts[i * num_pts + j, 1] = (j % num_x_ints) * squaresize
+                    opts[i * num_pts + j, 2] = 0
+            return opts
+
+        def mk_image_points(goodcorners):
+            ipts = cv.CreateMat(len(goodcorners) * num_pts, 2, cv.CV_32FC1)
+            for (i, co) in enumerate(goodcorners):
+                for j in range(num_pts):
+                    ipts[i * num_pts + j, 0] = co[j][0]
+                    ipts[i * num_pts + j, 1] = co[j][1]
+            return ipts
+
+        def mk_point_counts(nimages):
+            npts = cv.CreateMat(nimages, 1, cv.CV_32SC1)
+            for i in range(nimages):
+                npts[i, 0] = num_pts
+            return npts
+
+        def cvmat_iterator(cvmat):
+            for i in range(cvmat.rows):
+                for j in range(cvmat.cols):
+                    yield cvmat[i,j]
+
+        def image_from_archive(tar, name):
+            member = tar.getmember(name)
+            filedata = tar.extractfile(member).read()
+            imagefiledata = cv.CreateMat(1, len(filedata), cv.CV_8UC1)
+            cv.SetData(imagefiledata, filedata, len(filedata))
+            return cv.DecodeImageM(imagefiledata)
+
+        urllib.urlretrieve("http://pr.willowgarage.com/data/camera_calibration/camera_calibration.tar.gz", "camera_calibration.tar.gz")
+        tf = tarfile.open("camera_calibration.tar.gz")
+
+        num_x_ints = 8
+        num_y_ints = 6
+        num_pts = num_x_ints * num_y_ints
+
+        leftimages = [image_from_archive(tf, "wide/left%04d.pgm" % i) for i in range(3, 15)]
+        size = cv.GetSize(leftimages[0])
+
+        # Monocular test
+
+        if True:
+            corners = [get_corners(i) for i in leftimages]
+            goodcorners = [co for (im, (ok, co)) in zip(leftimages, corners) if ok]
+
+            ipts = mk_image_points(goodcorners)
+            opts = mk_object_points(len(goodcorners), .1)
+            npts = mk_point_counts(len(goodcorners))
+
+            intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
+            distortion = cv.CreateMat(4, 1, cv.CV_64FC1)
+            cv.SetZero(intrinsics)
+            cv.SetZero(distortion)
+            # focal lengths have 1/1 ratio
+            intrinsics[0,0] = 1.0
+            intrinsics[1,1] = 1.0
+            cv.CalibrateCamera2(opts, ipts, npts,
+                       cv.GetSize(leftimages[0]),
+                       intrinsics,
+                       distortion,
+                       cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
+                       cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
+                       flags = 0) # cv.CV_CALIB_ZERO_TANGENT_DIST)
+            # print "D =", list(cvmat_iterator(distortion))
+            # print "K =", list(cvmat_iterator(intrinsics))
+
+            newK = cv.CreateMat(3, 3, cv.CV_64FC1)
+            cv.GetOptimalNewCameraMatrix(intrinsics, distortion, size, 1.0, newK)
+            # print "newK =", list(cvmat_iterator(newK))
+
+            mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
+            mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
+            for K in [ intrinsics, newK ]:
+                cv.InitUndistortMap(K, distortion, mapx, mapy)
+                for img in leftimages[:1]:
+                    r = cv.CloneMat(img)
+                    cv.Remap(img, r, mapx, mapy)
+                    # cv.ShowImage("snap", r)
+                    # cv.WaitKey()
+
+        rightimages = [image_from_archive(tf, "wide/right%04d.pgm" % i) for i in range(3, 15)]
+
+        # Stereo test
+
+        if True:
+            lcorners = [get_corners(i) for i in leftimages]
+            rcorners = [get_corners(i) for i in rightimages]
+            good = [(lco, rco) for ((lok, lco), (rok, rco)) in zip(lcorners, rcorners) if (lok and rok)]
+
+            lipts = mk_image_points([l for (l, r) in good])
+            ripts = mk_image_points([r for (l, r) in good])
+            opts = mk_object_points(len(good), .108)
+            npts = mk_point_counts(len(good))
+
+            flags = cv.CV_CALIB_FIX_ASPECT_RATIO | cv.CV_CALIB_FIX_INTRINSIC
+            flags = cv.CV_CALIB_SAME_FOCAL_LENGTH + cv.CV_CALIB_FIX_PRINCIPAL_POINT + cv.CV_CALIB_ZERO_TANGENT_DIST
+            flags = 0
+
+            T = cv.CreateMat(3, 1, cv.CV_64FC1)
+            R = cv.CreateMat(3, 3, cv.CV_64FC1)
+            lintrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
+            ldistortion = cv.CreateMat(4, 1, cv.CV_64FC1)
+            rintrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
+            rdistortion = cv.CreateMat(4, 1, cv.CV_64FC1)
+            lR = cv.CreateMat(3, 3, cv.CV_64FC1)
+            rR = cv.CreateMat(3, 3, cv.CV_64FC1)
+            lP = cv.CreateMat(3, 4, cv.CV_64FC1)
+            rP = cv.CreateMat(3, 4, cv.CV_64FC1)
+            lmapx = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
+            lmapy = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
+            rmapx = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
+            rmapy = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
+
+            cv.SetIdentity(lintrinsics)
+            cv.SetIdentity(rintrinsics)
+            lintrinsics[0,2] = size[0] * 0.5
+            lintrinsics[1,2] = size[1] * 0.5
+            rintrinsics[0,2] = size[0] * 0.5
+            rintrinsics[1,2] = size[1] * 0.5
+            cv.SetZero(ldistortion)
+            cv.SetZero(rdistortion)
+
+            cv.StereoCalibrate(opts, lipts, ripts, npts,
+                               lintrinsics, ldistortion,
+                               rintrinsics, rdistortion,
+                               size,
+                               R,                                  # R
+                               T,                                  # T
+                               cv.CreateMat(3, 3, cv.CV_32FC1),    # E
+                               cv.CreateMat(3, 3, cv.CV_32FC1),    # F
+                               (cv.CV_TERMCRIT_ITER + cv.CV_TERMCRIT_EPS, 30, 1e-5),
+                               flags)
+
+            for a in [-1, 0, 1]:
+                cv.StereoRectify(lintrinsics,
+                                 rintrinsics,
+                                 ldistortion,
+                                 rdistortion,
+                                 size,
+                                 R,
+                                 T,
+                                 lR, rR, lP, rP,
+                                 alpha = a)
+
+                cv.InitUndistortRectifyMap(lintrinsics, ldistortion, lR, lP, lmapx, lmapy)
+                cv.InitUndistortRectifyMap(rintrinsics, rdistortion, rR, rP, rmapx, rmapy)
+
+                for l,r in zip(leftimages, rightimages)[:1]:
+                    l_ = cv.CloneMat(l)
+                    r_ = cv.CloneMat(r)
+                    cv.Remap(l, l_, lmapx, lmapy)
+                    cv.Remap(r, r_, rmapx, rmapy)
+                    # cv.ShowImage("snap", l_)
+                    # cv.WaitKey()
+
+
+    def xxx_test_Disparity(self):
+        print
+        for t in ["8U", "8S", "16U", "16S", "32S", "32F", "64F" ]:
+          for c in [1,2,3,4]:
+            nm = "%sC%d" % (t, c)
+            print "int32 CV_%s=%d" % (nm, eval("cv.CV_%s" % nm))
+        return
+        integral = cv.CreateImage((641,481), cv.IPL_DEPTH_32S, 1)
+        L = cv.LoadImage("f0-left.png", 0)
+        R = cv.LoadImage("f0-right.png", 0)
+        d = cv.CreateImage(cv.GetSize(L), cv.IPL_DEPTH_8U, 1)
+        Rn = cv.CreateImage(cv.GetSize(L), cv.IPL_DEPTH_8U, 1)
+        started = time.time()
+        for i in range(100):
+            cv.AbsDiff(L, R, d)
+            cv.Integral(d, integral)
+            cv.SetImageROI(R, (1, 1, 639, 479))
+            cv.SetImageROI(Rn, (0, 0, 639, 479))
+            cv.Copy(R, Rn)
+            R = Rn
+            cv.ResetImageROI(R)
+        print 1e3 * (time.time() - started) / 100, "ms"
+        # self.snap(d)
+
+    def local_test_lk(self):
+        seq = [cv.LoadImage("track/%06d.png" % i, 0) for i in range(40)]
+        crit = (cv.CV_TERMCRIT_ITER, 100, 0.1)
+        crit = (cv.CV_TERMCRIT_EPS, 0, 0.001)
+
+        for i in range(1,40):
+            r = cv.CalcOpticalFlowPyrLK(seq[0], seq[i], None, None, [(32,32)], (7,7), 0, crit, 0)
+            pos = r[0][0]
+            #print pos, r[2]
+
+            a = cv.CreateImage((1024,1024), 8, 1)
+            b = cv.CreateImage((1024,1024), 8, 1)
+            cv.Resize(seq[0], a, cv.CV_INTER_NN)
+            cv.Resize(seq[i], b, cv.CV_INTER_NN)
+            cv.Line(a, (0, 512), (1024, 512), 255)
+            cv.Line(a, (512,0), (512,1024), 255)
+            x,y = [int(c) for c in pos]
+            cv.Line(b, (0, y*16), (1024, y*16), 255)
+            cv.Line(b, (x*16,0), (x*16,1024), 255)
+            #self.snapL([a,b])
+
+
+
+    def local_test_Haar(self):
+        import os
+        hcfile = os.environ['OPENCV_ROOT'] + '/share/opencv/haarcascades/haarcascade_frontalface_default.xml'
+        hc = cv.Load(hcfile)
+        img = cv.LoadImage('Stu.jpg', 0)
+        faces = cv.HaarDetectObjects(img, hc, cv.CreateMemStorage())
+        self.assert_(len(faces) > 0)
+        for (x,y,w,h),n in faces:
+            cv.Rectangle(img, (x,y), (x+w,y+h), 255)
+        #self.snap(img)
+
+    def test_create(self):
+	#""" CvCreateImage, CvCreateMat and the header-only form """
+        for (w,h) in [ (320,400), (640,480), (1024, 768) ]:
+            data = "z" * (w * h)
+
+            im = cv.CreateImage((w,h), 8, 1)
+            cv.SetData(im, data, w)
+            im2 = cv.CreateImageHeader((w,h), 8, 1)
+            cv.SetData(im2, data, w)
+            self.assertSame(im, im2)
+
+            m = cv.CreateMat(h, w, cv.CV_8UC1)
+            cv.SetData(m, data, w)
+            m2 = cv.CreateMatHeader(h, w, cv.CV_8UC1)
+            cv.SetData(m2, data, w)
+            self.assertSame(m, m2)
+
+            self.assertSame(im, m)
+            self.assertSame(im2, m2)
+
+
+    def test_casts(self):
+        im = cv.GetImage(self.get_sample("samples/c/lena.jpg", 0))
+        data = im.tostring()
+        cv.SetData(im, data, cv.GetSize(im)[0])
+
+        start_count = sys.getrefcount(data)
+
+        # Conversions should produce same data
+        self.assertSame(im, cv.GetImage(im))
+        m = cv.GetMat(im)
+        self.assertSame(im, m)
+        self.assertSame(m, cv.GetImage(m))
+        im2 = cv.GetImage(m)
+        self.assertSame(im, im2)
+
+        self.assertEqual(sys.getrefcount(data), start_count + 2)
+        del im2
+        self.assertEqual(sys.getrefcount(data), start_count + 1)
+        del m
+        self.assertEqual(sys.getrefcount(data), start_count)
+        del im
+        self.assertEqual(sys.getrefcount(data), start_count - 1)
+
+    def test_morphological(self):
+        im = cv.CreateImage((128, 128), cv.IPL_DEPTH_8U, 1)
+        cv.Resize(cv.GetImage(self.get_sample("samples/c/lena.jpg", 0)), im)
+        dst = cv.CloneImage(im)
+
+        # Check defaults by asserting that all these operations produce the same image
+        funs = [
+            lambda: cv.Dilate(im, dst),
+            lambda: cv.Dilate(im, dst, None),
+            lambda: cv.Dilate(im, dst, iterations = 1),
+            lambda: cv.Dilate(im, dst, element = None),
+            lambda: cv.Dilate(im, dst, iterations = 1, element = None),
+            lambda: cv.Dilate(im, dst, element = None, iterations = 1),
+        ]
+        src_h = self.hashimg(im)
+        hashes = set()
+        for f in funs:
+            f()
+            hashes.add(self.hashimg(dst))
+            self.assertNotEqual(src_h, self.hashimg(dst))
+        # Source image should be untouched
+        self.assertEqual(self.hashimg(im), src_h)
+        # All results should be same
+        self.assertEqual(len(hashes), 1)
+
+        # self.snap(dst)
+        shapes = [eval("cv.CV_SHAPE_%s" % s) for s in ['RECT', 'CROSS', 'ELLIPSE']]
+        elements = [cv.CreateStructuringElementEx(sz, sz, sz / 2 + 1, sz / 2 + 1, shape) for sz in [3, 4, 7, 20] for shape in shapes]
+        elements += [cv.CreateStructuringElementEx(7, 7, 3, 3, cv.CV_SHAPE_CUSTOM, [1] * 49)]
+        for e in elements:
+            for iter in [1, 2]:
+                cv.Dilate(im, dst, e, iter)
+                cv.Erode(im, dst, e, iter)
+                temp = cv.CloneImage(im)
+                for op in ["OPEN", "CLOSE", "GRADIENT", "TOPHAT", "BLACKHAT"]:
+                        cv.MorphologyEx(im, dst, temp, e, eval("cv.CV_MOP_%s" % op), iter)
+        
+    def test_getmat_nd(self):
+        # 1D CvMatND should yield (N,1) CvMat
+        matnd = cv.CreateMatND([13], cv.CV_8UC1)
+        self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (13, 1))
+
+        # 2D CvMatND should yield 2D CvMat
+        matnd = cv.CreateMatND([11, 12], cv.CV_8UC1)
+        self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (11, 12))
+
+        if 0: # XXX - ticket #149
+            # 3D CvMatND should yield (N,1) CvMat
+            matnd = cv.CreateMatND([7, 8, 9], cv.CV_8UC1)
+            self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (7 * 8 * 9, 1))
+
+    def test_clipline(self):
+        self.assert_(cv.ClipLine((100,100), (-100,0), (500,0)) == ((0,0), (99,0)))
+        self.assert_(cv.ClipLine((100,100), (-100,0), (-200,0)) == None)
+
+    def test_smoke_image_processing(self):
+        src = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE)
+        #dst = cv.CloneImage(src)
+        for aperture_size in [1, 3, 5, 7]:
+          dst_16s = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_16S, 1)
+          dst_32f = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_32F, 1)
+
+          cv.Sobel(src, dst_16s, 1, 1, aperture_size)
+          cv.Laplace(src, dst_16s, aperture_size)
+          cv.PreCornerDetect(src, dst_32f)
+          eigendst = cv.CreateImage((6*cv.GetSize(src)[0], cv.GetSize(src)[1]), cv.IPL_DEPTH_32F, 1)
+          cv.CornerEigenValsAndVecs(src, eigendst, 8, aperture_size)
+          cv.CornerMinEigenVal(src, dst_32f, 8, aperture_size)
+          cv.CornerHarris(src, dst_32f, 8, aperture_size)
+          cv.CornerHarris(src, dst_32f, 8, aperture_size, 0.1)
+
+        #self.snap(dst)
+
+    def test_fitline(self):
+        cv.FitLine([ (1,1), (10,10) ], cv.CV_DIST_L2, 0, 0.01, 0.01)
+        cv.FitLine([ (1,1,1), (10,10,10) ], cv.CV_DIST_L2, 0, 0.01, 0.01)
+        a = self.get_sample("samples/c/lena.jpg", 0)
+        eig_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
+        temp_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
+        pts = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 100, 0.04, 2, useHarris=1)
+        hull = cv.ConvexHull2(pts, cv.CreateMemStorage(), return_points = 1)
+        cv.FitLine(hull, cv.CV_DIST_L2, 0, 0.01, 0.01)
+
+    def test_moments(self):
+        im = self.get_sample("samples/c/lena.jpg", 0)
+        mo = cv.Moments(im)
+        for fld in ["m00", "m10", "m01", "m20", "m11", "m02", "m30", "m21", "m12", "m03", "mu20", "mu11", "mu02", "mu30", "mu21", "mu12", "mu03", "inv_sqrt_m00"]:
+            self.assert_(isinstance(getattr(mo, fld), float))
+            x = getattr(mo, fld)
+            self.assert_(isinstance(x, float))
+
+        orders = []
+        for x_order in range(4):
+          for y_order in range(4 - x_order):
+            orders.append((x_order, y_order))
+
+        # Just a smoke test for these three functions
+        [ cv.GetSpatialMoment(mo, xo, yo) for (xo,yo) in orders ]
+        [ cv.GetCentralMoment(mo, xo, yo) for (xo,yo) in orders ]
+        [ cv.GetNormalizedCentralMoment(mo, xo, yo) for (xo,yo) in orders ]
+
+        # Hu Moments we can do slightly better.  Check that the first
+        # six are invariant wrt image reflection, and that the 7th
+        # is negated.
+
+        hu0 = cv.GetHuMoments(cv.Moments(im))
+        cv.Flip(im, im, 1)
+        hu1 = cv.GetHuMoments(cv.Moments(im))
+        self.assert_(len(hu0) == 7)
+        self.assert_(len(hu1) == 7)
+        for i in range(5):
+          self.assert_(abs(hu0[i] - hu1[i]) < 1e-6)
+        self.assert_(abs(hu0[i] + hu1[i]) < 1e-6)
+
+    def test_encode(self):
+        im = self.get_sample("samples/c/lena.jpg", 1)
+        jpeg = cv.EncodeImage(".jpeg", im)
+
+        # Smoke jpeg compression at various qualities
+        sizes = dict([(qual, cv.EncodeImage(".jpeg", im, [cv.CV_IMWRITE_JPEG_QUALITY, qual]).cols) for qual in range(5, 100, 5)])
+
+        # Check that the default QUALITY is 95
+        self.assertEqual(cv.EncodeImage(".jpeg", im).cols, sizes[95])
+
+        # Check that the 'round-trip' gives an image of the same size
+        round_trip = cv.DecodeImage(cv.EncodeImage(".jpeg", im, [cv.CV_IMWRITE_JPEG_QUALITY, 10]))
+        self.assert_(cv.GetSize(round_trip) == cv.GetSize(im))
+
+    def test_reduce(self):
+        srcmat = cv.CreateMat(2, 3, cv.CV_32FC1)
+        # 0 1 2
+        # 3 4 5
+        srcmat[0,0] = 0
+        srcmat[0,1] = 1
+        srcmat[0,2] = 2
+        srcmat[1,0] = 3
+        srcmat[1,1] = 4
+        srcmat[1,2] = 5
+        def doreduce(siz, rfunc):
+            dst = cv.CreateMat(siz[0], siz[1], cv.CV_32FC1)
+            rfunc(dst)
+            if siz[0] != 1:
+                return [dst[i,0] for i in range(siz[0])]
+            else:
+                return [dst[0,i] for i in range(siz[1])]
+
+        # exercise dim
+        self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst)), [3, 5, 7])
+        self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, -1)), [3, 5, 7])
+        self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, 0)), [3, 5, 7])
+        self.assertEqual(doreduce((2,1), lambda dst: cv.Reduce(srcmat, dst, 1)), [3, 12])
+
+        # exercise op
+        self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_SUM)), [3, 5, 7])
+        self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_AVG)), [1.5, 2.5, 3.5])
+        self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_MAX)), [3, 4, 5])
+        self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_MIN)), [0, 1, 2])
+
+        # exercise both dim and op
+        self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, 0, cv.CV_REDUCE_MAX)), [3, 4, 5])
+        self.assertEqual(doreduce((2,1), lambda dst: cv.Reduce(srcmat, dst, 1, cv.CV_REDUCE_MAX)), [2, 5])
+
+    def test_operations(self):
+        class Im:
+
+            def __init__(self, data = None):
+                self.m = cv.CreateMat(1, 32, cv.CV_32FC1)
+                if data:
+                    cv.SetData(self.m, array.array('f', data), 128)
+
+            def __add__(self, other):
+                r = Im()
+                if isinstance(other, Im):
+                    cv.Add(self.m, other.m, r.m)
+                else:
+                    cv.AddS(self.m, (other,), r.m)
+                return r
+
+            def __sub__(self, other):
+                r = Im()
+                if isinstance(other, Im):
+                    cv.Sub(self.m, other.m, r.m)
+                else:
+                    cv.SubS(self.m, (other,), r.m)
+                return r
+
+            def __rsub__(self, other):
+                r = Im()
+                cv.SubRS(self.m, (other,), r.m)
+                return r
+
+            def __mul__(self, other):
+                r = Im()
+                if isinstance(other, Im):
+                    cv.Mul(self.m, other.m, r.m)
+                else:
+                    cv.ConvertScale(self.m, r.m, other)
+                return r
+
+            def __rmul__(self, other):
+                r = Im()
+                cv.ConvertScale(self.m, r.m, other)
+                return r
+
+            def __div__(self, other):
+                r = Im()
+                if isinstance(other, Im):
+                    cv.Div(self.m, other.m, r.m)
+                else:
+                    cv.ConvertScale(self.m, r.m, 1.0 / other)
+                return r
+
+            def __pow__(self, other):
+                r = Im()
+                cv.Pow(self.m, r.m, other)
+                return r
+
+            def __abs__(self):
+                r = Im()
+                cv.Abs(self.m, r.m)
+                return r
+
+            def __getitem__(self, i):
+                return self.m[0,i]
+
+        def verify(op):
+            r = op(a, b)
+            for i in range(32):
+                expected = op(a[i], b[i])
+                self.assertAlmostEqual(expected, r[i], 4)
+
+        a = Im([random.randrange(1, 256) for i in range(32)])
+        b = Im([random.randrange(1, 256) for i in range(32)])
+
+        # simple operations first
+        verify(lambda x, y: x + y)
+        verify(lambda x, y: x + 3)
+        verify(lambda x, y: x + 0)
+        verify(lambda x, y: x + -8)
+
+        verify(lambda x, y: x - y)
+        verify(lambda x, y: x - 1)
+        verify(lambda x, y: 1 - x)
+
+        verify(lambda x, y: abs(x))
+
+        verify(lambda x, y: x * y)
+        verify(lambda x, y: x * 3)
+
+        verify(lambda x, y: x / y)
+        verify(lambda x, y: x / 2)
+
+        for p in [-2, -1, -0.5, -0.1, 0, 0.1, 0.5, 1, 2 ]:
+            verify(lambda x, y: (x ** p) + (y ** p))
+
+        # Combinations...
+        verify(lambda x, y: x - 4 * abs(y))
+        verify(lambda x, y: abs(y) / x)
+
+        # a polynomial
+        verify(lambda x, y: 2 * x + 3 * (y ** 0.5))
+
+    def temp_test(self):
+        cv.temp_test()
+
+    def failing_test_rand_GetStarKeypoints(self):
+        # GetStarKeypoints [<cvmat(type=4242400d rows=64 cols=64 step=512 )>, <cv.cvmemstorage object at 0xb7cc40d0>, (45, 0.73705234376883488, 0.64282591451367344, 0.1567738743689836, 3)]
+        print cv.CV_MAT_CN(0x4242400d)
+        mat = cv.CreateMat( 64, 64, cv.CV_32FC2)
+        cv.GetStarKeypoints(mat, cv.CreateMemStorage(), (45, 0.73705234376883488, 0.64282591451367344, 0.1567738743689836, 3))
+        print mat
+
+    def test_rand_PutText(self):
+	#""" Test for bug 2829336 """
+        mat = cv.CreateMat( 64, 64, cv.CV_8UC1)
+        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1)
+        cv.PutText(mat, chr(127), (20, 20), font, 255)
+
+    def failing_test_rand_FindNearestPoint2D(self):
+        subdiv = cv.CreateSubdivDelaunay2D((0,0,100,100), cv.CreateMemStorage())
+        cv.SubdivDelaunay2DInsert( subdiv, (50, 50))
+        cv.CalcSubdivVoronoi2D(subdiv)
+        print
+        for e in subdiv.edges:
+            print e,
+            print "  ", cv.Subdiv2DEdgeOrg(e)
+            print "  ", cv.Subdiv2DEdgeOrg(cv.Subdiv2DRotateEdge(e, 1)), cv.Subdiv2DEdgeDst(cv.Subdiv2DRotateEdge(e, 1))
+        print "nearest", cv.FindNearestPoint2D(subdiv, (1.0, 1.0))
+
+class DocumentFragmentTests(OpenCVTests):
+    """ Test the fragments of code that are included in the documentation """
+    def setUp(self):
+        OpenCVTests.setUp(self)
+        sys.path.append("../doc/python_fragments")
+
+    def test_precornerdetect(self):
+        from precornerdetect import precornerdetect
+        im = self.get_sample("samples/c/right01.jpg", 0)
+        imf = cv.CreateMat(im.rows, im.cols, cv.CV_32FC1)
+        cv.ConvertScale(im, imf)
+        (r0,r1) = precornerdetect(imf)
+        for r in (r0, r1):
+            self.assertEqual(im.cols, r.cols)
+            self.assertEqual(im.rows, r.rows)
+
+    def test_findstereocorrespondence(self):
+        from findstereocorrespondence import findstereocorrespondence
+        (l,r) = [self.get_sample("doc/pics/tsukuba_%s.png" % c, cv.CV_LOAD_IMAGE_GRAYSCALE) for c in "lr"]
+
+        (disparity_left, disparity_right) = findstereocorrespondence(l, r)
+
+        disparity_left_visual = cv.CreateMat(l.rows, l.cols, cv.CV_8U)
+        cv.ConvertScale(disparity_left, disparity_left_visual, -16)
+        # self.snap(disparity_left_visual)
+
+    def test_calchist(self):
+        from calchist import hs_histogram
+        i1 = self.get_sample("samples/c/lena.jpg")
+        i2 = self.get_sample("doc/pics/building.jpg")
+        i3 = cv.CloneMat(i1)
+        cv.Flip(i3, i3, 1)
+        h1 = hs_histogram(i1)
+        h2 = hs_histogram(i2)
+        h3 = hs_histogram(i3)
+        self.assertEqual(self.hashimg(h1), self.hashimg(h3))
+        self.assertNotEqual(self.hashimg(h1), self.hashimg(h2))
+
+class NewTests(OpenCVTests):
+
+    pass
+
+if __name__ == '__main__':
+    print "testing", cv.__version__
+    random.seed(0)
+    unittest.main()
+#    optlist, args = getopt.getopt(sys.argv[1:], 'l:rd')
+#    loops = 1
+#    shuffle = 0
+#    doc_frags = False
+#    for o,a in optlist:
+#        if o == '-l':
+#            loops = int(a)
+#        if o == '-r':
+#            shuffle = 1
+#        if o == '-d':
+#            doc_frags = True
+#
+#    cases = [PreliminaryTests, FunctionTests, AreaTests]
+#    if doc_frags:
+#        cases += [DocumentFragmentTests]
+#    everything = [(tc, t) for tc in cases for t in unittest.TestLoader().getTestCaseNames(tc) ]
+#    if len(args) == 0:
+#        # cases = [NewTests]
+#        args = everything
+#    else:
+#        args = [(tc, t) for (tc, t) in everything if t in args]
+#
+#    suite = unittest.TestSuite()
+#    for l in range(loops):
+#        if shuffle:
+#            random.shuffle(args)
+#        for tc,t in args:
+#            suite.addTest(tc(t))
+#	    unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/modules/python/test/ticket_6.py b/modules/python/test/ticket_6.py
new file mode 100644
index 000000000..5f3d015d8
--- /dev/null
+++ b/modules/python/test/ticket_6.py
@@ -0,0 +1,16 @@
+import urllib
+import cv
+import Image
+import unittest
+
+class TestLoadImage(unittest.TestCase):
+    def setUp(self):
+        open("large.jpg", "w").write(urllib.urlopen("http://www.cs.ubc.ca/labs/lci/curious_george/img/ROS_bug_imgs/IMG_3560.jpg").read())
+
+    def test_load(self):
+        pilim = Image.open("large.jpg")
+        cvim = cv.LoadImage("large.jpg")
+        self.assert_(len(pilim.tostring()) == len(cvim.tostring()))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/modules/python/test/tickets.py b/modules/python/test/tickets.py
new file mode 100644
index 000000000..a6f33396c
--- /dev/null
+++ b/modules/python/test/tickets.py
@@ -0,0 +1,89 @@
+import unittest
+import random
+import time
+import math
+import sys
+import array
+import os
+
+import cv
+
+def find_sample(s):
+    for d in ["../samples/c/", "../doc/pics/"]:
+        path = os.path.join(d, s)
+        if os.access(path, os.R_OK):
+            return path
+    return s
+
+class TestTickets(unittest.TestCase):
+
+    def test_2542670(self):
+        xys = [(94, 121), (94, 122), (93, 123), (92, 123), (91, 124), (91, 125), (91, 126), (92, 127), (92, 128), (92, 129), (92, 130), (92, 131), (91, 132), (90, 131), (90, 130), (90, 131), (91, 132), (92, 133), (92, 134), (93, 135), (94, 136), (94, 137), (94, 138), (95, 139), (96, 140), (96, 141), (96, 142), (96, 143), (97, 144), (97, 145), (98, 146), (99, 146), (100, 146), (101, 146), (102, 146), (103, 146), (104, 146), (105, 146), (106, 146), (107, 146), (108, 146), (109, 146), (110, 146), (111, 146), (112, 146), (113, 146), (114, 146), (115, 146), (116, 146), (117, 146), (118, 146), (119, 146), (120, 146), (121, 146), (122, 146), (123, 146), (124, 146), (125, 146), (126, 146), (126, 145), (126, 144), (126, 143), (126, 142), (126, 141), (126, 140), (127, 139), (127, 138), (127, 137), (127, 136), (127, 135), (127, 134), (127, 133), (128, 132), (129, 132), (130, 131), (131, 130), (131, 129), (131, 128), (132, 127), (133, 126), (134, 125), (134, 124), (135, 123), (136, 122), (136, 121), (135, 121), (134, 121), (133, 121), (132, 121), (131, 121), (130, 121), (129, 121), (128, 121), (127, 121), (126, 121), (125, 121), (124, 121), (123, 121), (122, 121), (121, 121), (120, 121), (119, 121), (118, 121), (117, 121), (116, 121), (115, 121), (114, 121), (113, 121), (112, 121), (111, 121), (110, 121), (109, 121), (108, 121), (107, 121), (106, 121), (105, 121), (104, 121), (103, 121), (102, 121), (101, 121), (100, 121), (99, 121), (98, 121), (97, 121), (96, 121), (95, 121)]
+
+        #xys = xys[:12] + xys[16:]
+        pts = cv.CreateMat(len(xys), 1, cv.CV_32SC2)
+        for i,(x,y) in enumerate(xys):
+            pts[i,0] = (x, y)
+        storage = cv.CreateMemStorage()
+        hull = cv.ConvexHull2(pts, storage)
+        hullp = cv.ConvexHull2(pts, storage, return_points = 1)
+        defects = cv.ConvexityDefects(pts, hull, storage)
+
+        vis = cv.CreateImage((1000,1000), 8, 3)
+        x0 = min([x for (x,y) in xys]) - 10
+        x1 = max([x for (x,y) in xys]) + 10
+        y0 = min([y for (y,y) in xys]) - 10
+        y1 = max([y for (y,y) in xys]) + 10
+        def xform(pt):
+            x,y = pt
+            return (1000 * (x - x0) / (x1 - x0),
+                    1000 * (y - y0) / (y1 - y0))
+
+        for d in defects[:2]:
+            cv.Zero(vis)
+
+            # First draw the defect as a red triangle
+            cv.FillConvexPoly(vis, [xform(p) for p in d[:3]], cv.RGB(255,0,0))
+
+            # Draw the convex hull as a thick green line
+            for a,b in zip(hullp, hullp[1:]):
+                cv.Line(vis, xform(a), xform(b), cv.RGB(0,128,0), 3)
+
+            # Draw the original contour as a white line
+            for a,b in zip(xys, xys[1:]):
+                cv.Line(vis, xform(a), xform(b), (255,255,255))
+
+            self.snap(vis)
+
+    def test_2686307(self):
+        lena = cv.LoadImage(find_sample("lena.jpg"), 1)
+        dst = cv.CreateImage((512,512), 8, 3)
+        cv.Set(dst, (128,192,255))
+        mask = cv.CreateImage((512,512), 8, 1)
+        cv.Zero(mask)
+        cv.Rectangle(mask, (10,10), (300,100), 255, -1)
+        cv.Copy(lena, dst, mask)
+        self.snapL([lena, dst, mask])
+        m = cv.CreateMat(480, 640, cv.CV_8UC1)
+        print "ji", m
+        print m.rows, m.cols, m.type, m.step
+
+    def snap(self, img):
+        self.snapL([img])
+
+    def snapL(self, L):
+        for i,img in enumerate(L):
+            cv.NamedWindow("snap-%d" % i, 1)
+            cv.ShowImage("snap-%d" % i, img)
+        cv.WaitKey()
+        cv.DestroyAllWindows()
+
+if __name__ == '__main__':
+    random.seed(0)
+    if len(sys.argv) == 1:
+        suite = unittest.TestLoader().loadTestsFromTestCase(TestTickets)
+        unittest.TextTestRunner(verbosity=2).run(suite)
+    else:
+        suite = unittest.TestSuite()
+        suite.addTest(TestTickets(sys.argv[1]))
+        unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/modules/python/test/transformations.py b/modules/python/test/transformations.py
new file mode 100644
index 000000000..6d6f19e5b
--- /dev/null
+++ b/modules/python/test/transformations.py
@@ -0,0 +1,1705 @@
+# -*- coding: utf-8 -*-
+# transformations.py
+
+# Copyright (c) 2006, Christoph Gohlke
+# Copyright (c) 2006-2009, The Regents of the University of California
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in the
+#   documentation and/or other materials provided with the distribution.
+# * Neither the name of the copyright holders nor the names of any
+#   contributors may be used to endorse or promote products derived
+#   from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+"""Homogeneous Transformation Matrices and Quaternions.
+
+A library for calculating 4x4 matrices for translating, rotating, reflecting,
+scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
+3D homogeneous coordinates as well as for converting between rotation matrices,
+Euler angles, and quaternions. Also includes an Arcball control object and
+functions to decompose transformation matrices.
+
+:Authors:
+  `Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`__,
+  Laboratory for Fluorescence Dynamics, University of California, Irvine
+
+:Version: 20090418
+
+Requirements
+------------
+
+* `Python 2.6 <http://www.python.org>`__
+* `Numpy 1.3 <http://numpy.scipy.org>`__
+* `transformations.c 20090418 <http://www.lfd.uci.edu/~gohlke/>`__
+  (optional implementation of some functions in C)
+
+Notes
+-----
+
+Matrices (M) can be inverted using numpy.linalg.inv(M), concatenated using
+numpy.dot(M0, M1), or used to transform homogeneous coordinates (v) using
+numpy.dot(M, v) for shape (4, \*) "point of arrays", respectively
+numpy.dot(v, M.T) for shape (\*, 4) "array of points".
+
+Calculations are carried out with numpy.float64 precision.
+
+This Python implementation is not optimized for speed.
+
+Vector, point, quaternion, and matrix function arguments are expected to be
+"array like", i.e. tuple, list, or numpy arrays.
+
+Return types are numpy arrays unless specified otherwise.
+
+Angles are in radians unless specified otherwise.
+
+Quaternions ix+jy+kz+w are represented as [x, y, z, w].
+
+Use the transpose of transformation matrices for OpenGL glMultMatrixd().
+
+A triple of Euler angles can be applied/interpreted in 24 ways, which can
+be specified using a 4 character string or encoded 4-tuple:
+
+  *Axes 4-string*: e.g. 'sxyz' or 'ryxy'
+
+  - first character : rotations are applied to 's'tatic or 'r'otating frame
+  - remaining characters : successive rotation axis 'x', 'y', or 'z'
+
+  *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
+
+  - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
+  - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
+    by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
+  - repetition : first and last axis are same (1) or different (0).
+  - frame : rotations are applied to static (0) or rotating (1) frame.
+
+References
+----------
+
+(1)  Matrices and transformations. Ronald Goldman.
+     In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
+(2)  More matrices and transformations: shear and pseudo-perspective.
+     Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
+(3)  Decomposing a matrix into simple transformations. Spencer Thomas.
+     In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
+(4)  Recovering the data from the transformation matrix. Ronald Goldman.
+     In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
+(5)  Euler angle conversion. Ken Shoemake.
+     In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
+(6)  Arcball rotation control. Ken Shoemake.
+     In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
+(7)  Representing attitude: Euler angles, unit quaternions, and rotation
+     vectors. James Diebel. 2006.
+(8)  A discussion of the solution for the best rotation to relate two sets
+     of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
+(9)  Closed-form solution of absolute orientation using unit quaternions.
+     BKP Horn. J Opt Soc Am A. 1987. 4(4), 629-642.
+(10) Quaternions. Ken Shoemake.
+     http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
+(11) From quaternion to matrix and back. JMP van Waveren. 2005.
+     http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
+(12) Uniform random rotations. Ken Shoemake.
+     In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
+
+
+Examples
+--------
+
+>>> alpha, beta, gamma = 0.123, -1.234, 2.345
+>>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)
+>>> I = identity_matrix()
+>>> Rx = rotation_matrix(alpha, xaxis)
+>>> Ry = rotation_matrix(beta, yaxis)
+>>> Rz = rotation_matrix(gamma, zaxis)
+>>> R = concatenate_matrices(Rx, Ry, Rz)
+>>> euler = euler_from_matrix(R, 'rxyz')
+>>> numpy.allclose([alpha, beta, gamma], euler)
+True
+>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
+>>> is_same_transform(R, Re)
+True
+>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
+>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
+True
+>>> qx = quaternion_about_axis(alpha, xaxis)
+>>> qy = quaternion_about_axis(beta, yaxis)
+>>> qz = quaternion_about_axis(gamma, zaxis)
+>>> q = quaternion_multiply(qx, qy)
+>>> q = quaternion_multiply(q, qz)
+>>> Rq = quaternion_matrix(q)
+>>> is_same_transform(R, Rq)
+True
+>>> S = scale_matrix(1.23, origin)
+>>> T = translation_matrix((1, 2, 3))
+>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
+>>> R = random_rotation_matrix(numpy.random.rand(3))
+>>> M = concatenate_matrices(T, R, Z, S)
+>>> scale, shear, angles, trans, persp = decompose_matrix(M)
+>>> numpy.allclose(scale, 1.23)
+True
+>>> numpy.allclose(trans, (1, 2, 3))
+True
+>>> numpy.allclose(shear, (0, math.tan(beta), 0))
+True
+>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
+True
+>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
+>>> is_same_transform(M, M1)
+True
+
+"""
+
+from __future__ import division
+
+import warnings
+import math
+
+import numpy
+
+# Documentation in HTML format can be generated with Epydoc
+__docformat__ = "restructuredtext en"
+
+
+def identity_matrix():
+    """Return 4x4 identity/unit matrix.
+
+    >>> I = identity_matrix()
+    >>> numpy.allclose(I, numpy.dot(I, I))
+    True
+    >>> numpy.sum(I), numpy.trace(I)
+    (4.0, 4.0)
+    >>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64))
+    True
+
+    """
+    return numpy.identity(4, dtype=numpy.float64)
+
+
+def translation_matrix(direction):
+    """Return matrix to translate by direction vector.
+
+    >>> v = numpy.random.random(3) - 0.5
+    >>> numpy.allclose(v, translation_matrix(v)[:3, 3])
+    True
+
+    """
+    M = numpy.identity(4)
+    M[:3, 3] = direction[:3]
+    return M
+
+
+def translation_from_matrix(matrix):
+    """Return translation vector from translation matrix.
+
+    >>> v0 = numpy.random.random(3) - 0.5
+    >>> v1 = translation_from_matrix(translation_matrix(v0))
+    >>> numpy.allclose(v0, v1)
+    True
+
+    """
+    return numpy.array(matrix, copy=False)[:3, 3].copy()
+
+
+def reflection_matrix(point, normal):
+    """Return matrix to mirror at plane defined by point and normal vector.
+
+    >>> v0 = numpy.random.random(4) - 0.5
+    >>> v0[3] = 1.0
+    >>> v1 = numpy.random.random(3) - 0.5
+    >>> R = reflection_matrix(v0, v1)
+    >>> numpy.allclose(2., numpy.trace(R))
+    True
+    >>> numpy.allclose(v0, numpy.dot(R, v0))
+    True
+    >>> v2 = v0.copy()
+    >>> v2[:3] += v1
+    >>> v3 = v0.copy()
+    >>> v2[:3] -= v1
+    >>> numpy.allclose(v2, numpy.dot(R, v3))
+    True
+
+    """
+    normal = unit_vector(normal[:3])
+    M = numpy.identity(4)
+    M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
+    M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
+    return M
+
+
+def reflection_from_matrix(matrix):
+    """Return mirror plane point and normal vector from reflection matrix.
+
+    >>> v0 = numpy.random.random(3) - 0.5
+    >>> v1 = numpy.random.random(3) - 0.5
+    >>> M0 = reflection_matrix(v0, v1)
+    >>> point, normal = reflection_from_matrix(M0)
+    >>> M1 = reflection_matrix(point, normal)
+    >>> is_same_transform(M0, M1)
+    True
+
+    """
+    M = numpy.array(matrix, dtype=numpy.float64, copy=False)
+    # normal: unit eigenvector corresponding to eigenvalue -1
+    l, V = numpy.linalg.eig(M[:3, :3])
+    i = numpy.where(abs(numpy.real(l) + 1.0) < 1e-8)[0]
+    if not len(i):
+        raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
+    normal = numpy.real(V[:, i[0]]).squeeze()
+    # point: any unit eigenvector corresponding to eigenvalue 1
+    l, V = numpy.linalg.eig(M)
+    i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
+    if not len(i):
+        raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
+    point = numpy.real(V[:, i[-1]]).squeeze()
+    point /= point[3]
+    return point, normal
+
+
+def rotation_matrix(angle, direction, point=None):
+    """Return matrix to rotate about axis defined by point and direction.
+
+    >>> angle = (random.random() - 0.5) * (2*math.pi)
+    >>> direc = numpy.random.random(3) - 0.5
+    >>> point = numpy.random.random(3) - 0.5
+    >>> R0 = rotation_matrix(angle, direc, point)
+    >>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
+    >>> is_same_transform(R0, R1)
+    True
+    >>> R0 = rotation_matrix(angle, direc, point)
+    >>> R1 = rotation_matrix(-angle, -direc, point)
+    >>> is_same_transform(R0, R1)
+    True
+    >>> I = numpy.identity(4, numpy.float64)
+    >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
+    True
+    >>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2,
+    ...                                                direc, point)))
+    True
+
+    """
+    sina = math.sin(angle)
+    cosa = math.cos(angle)
+    direction = unit_vector(direction[:3])
+    # rotation matrix around unit vector
+    R = numpy.array(((cosa, 0.0,  0.0),
+                     (0.0,  cosa, 0.0),
+                     (0.0,  0.0,  cosa)), dtype=numpy.float64)
+    R += numpy.outer(direction, direction) * (1.0 - cosa)
+    direction *= sina
+    R += numpy.array((( 0.0,         -direction[2],  direction[1]),
+                      ( direction[2], 0.0,          -direction[0]),
+                      (-direction[1], direction[0],  0.0)),
+                     dtype=numpy.float64)
+    M = numpy.identity(4)
+    M[:3, :3] = R
+    if point is not None:
+        # rotation not around origin
+        point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
+        M[:3, 3] = point - numpy.dot(R, point)
+    return M
+
+
+def rotation_from_matrix(matrix):
+    """Return rotation angle and axis from rotation matrix.
+
+    >>> angle = (random.random() - 0.5) * (2*math.pi)
+    >>> direc = numpy.random.random(3) - 0.5
+    >>> point = numpy.random.random(3) - 0.5
+    >>> R0 = rotation_matrix(angle, direc, point)
+    >>> angle, direc, point = rotation_from_matrix(R0)
+    >>> R1 = rotation_matrix(angle, direc, point)
+    >>> is_same_transform(R0, R1)
+    True
+
+    """
+    R = numpy.array(matrix, dtype=numpy.float64, copy=False)
+    R33 = R[:3, :3]
+    # direction: unit eigenvector of R33 corresponding to eigenvalue of 1
+    l, W = numpy.linalg.eig(R33.T)
+    i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
+    if not len(i):
+        raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
+    direction = numpy.real(W[:, i[-1]]).squeeze()
+    # point: unit eigenvector of R33 corresponding to eigenvalue of 1
+    l, Q = numpy.linalg.eig(R)
+    i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
+    if not len(i):
+        raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
+    point = numpy.real(Q[:, i[-1]]).squeeze()
+    point /= point[3]
+    # rotation angle depending on direction
+    cosa = (numpy.trace(R33) - 1.0) / 2.0
+    if abs(direction[2]) > 1e-8:
+        sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
+    elif abs(direction[1]) > 1e-8:
+        sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
+    else:
+        sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
+    angle = math.atan2(sina, cosa)
+    return angle, direction, point
+
+
+def scale_matrix(factor, origin=None, direction=None):
+    """Return matrix to scale by factor around origin in direction.
+
+    Use factor -1 for point symmetry.
+
+    >>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0
+    >>> v[3] = 1.0
+    >>> S = scale_matrix(-1.234)
+    >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
+    True
+    >>> factor = random.random() * 10 - 5
+    >>> origin = numpy.random.random(3) - 0.5
+    >>> direct = numpy.random.random(3) - 0.5
+    >>> S = scale_matrix(factor, origin)
+    >>> S = scale_matrix(factor, origin, direct)
+
+    """
+    if direction is None:
+        # uniform scaling
+        M = numpy.array(((factor, 0.0,    0.0,    0.0),
+                         (0.0,    factor, 0.0,    0.0),
+                         (0.0,    0.0,    factor, 0.0),
+                         (0.0,    0.0,    0.0,    1.0)), dtype=numpy.float64)
+        if origin is not None:
+            M[:3, 3] = origin[:3]
+            M[:3, 3] *= 1.0 - factor
+    else:
+        # nonuniform scaling
+        direction = unit_vector(direction[:3])
+        factor = 1.0 - factor
+        M = numpy.identity(4)
+        M[:3, :3] -= factor * numpy.outer(direction, direction)
+        if origin is not None:
+            M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
+    return M
+
+
+def scale_from_matrix(matrix):
+    """Return scaling factor, origin and direction from scaling matrix.
+
+    >>> factor = random.random() * 10 - 5
+    >>> origin = numpy.random.random(3) - 0.5
+    >>> direct = numpy.random.random(3) - 0.5
+    >>> S0 = scale_matrix(factor, origin)
+    >>> factor, origin, direction = scale_from_matrix(S0)
+    >>> S1 = scale_matrix(factor, origin, direction)
+    >>> is_same_transform(S0, S1)
+    True
+    >>> S0 = scale_matrix(factor, origin, direct)
+    >>> factor, origin, direction = scale_from_matrix(S0)
+    >>> S1 = scale_matrix(factor, origin, direction)
+    >>> is_same_transform(S0, S1)
+    True
+
+    """
+    M = numpy.array(matrix, dtype=numpy.float64, copy=False)
+    M33 = M[:3, :3]
+    factor = numpy.trace(M33) - 2.0
+    try:
+        # direction: unit eigenvector corresponding to eigenvalue factor
+        l, V = numpy.linalg.eig(M33)
+        i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0]
+        direction = numpy.real(V[:, i]).squeeze()
+        direction /= vector_norm(direction)
+    except IndexError:
+        # uniform scaling
+        factor = (factor + 2.0) / 3.0
+        direction = None
+    # origin: any eigenvector corresponding to eigenvalue 1
+    l, V = numpy.linalg.eig(M)
+    i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
+    if not len(i):
+        raise ValueError("no eigenvector corresponding to eigenvalue 1")
+    origin = numpy.real(V[:, i[-1]]).squeeze()
+    origin /= origin[3]
+    return factor, origin, direction
+
+
+def projection_matrix(point, normal, direction=None,
+                      perspective=None, pseudo=False):
+    """Return matrix to project onto plane defined by point and normal.
+
+    Using either perspective point, projection direction, or none of both.
+
+    If pseudo is True, perspective projections will preserve relative depth
+    such that Perspective = dot(Orthogonal, PseudoPerspective).
+
+    >>> P = projection_matrix((0, 0, 0), (1, 0, 0))
+    >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
+    True
+    >>> point = numpy.random.random(3) - 0.5
+    >>> normal = numpy.random.random(3) - 0.5
+    >>> direct = numpy.random.random(3) - 0.5
+    >>> persp = numpy.random.random(3) - 0.5
+    >>> P0 = projection_matrix(point, normal)
+    >>> P1 = projection_matrix(point, normal, direction=direct)
+    >>> P2 = projection_matrix(point, normal, perspective=persp)
+    >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
+    >>> is_same_transform(P2, numpy.dot(P0, P3))
+    True
+    >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0))
+    >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0
+    >>> v0[3] = 1.0
+    >>> v1 = numpy.dot(P, v0)
+    >>> numpy.allclose(v1[1], v0[1])
+    True
+    >>> numpy.allclose(v1[0], 3.0-v1[1])
+    True
+
+    """
+    M = numpy.identity(4)
+    point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
+    normal = unit_vector(normal[:3])
+    if perspective is not None:
+        # perspective projection
+        perspective = numpy.array(perspective[:3], dtype=numpy.float64,
+                                  copy=False)
+        M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
+        M[:3, :3] -= numpy.outer(perspective, normal)
+        if pseudo:
+            # preserve relative depth
+            M[:3, :3] -= numpy.outer(normal, normal)
+            M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
+        else:
+            M[:3, 3] = numpy.dot(point, normal) * perspective
+        M[3, :3] = -normal
+        M[3, 3] = numpy.dot(perspective, normal)
+    elif direction is not None:
+        # parallel projection
+        direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
+        scale = numpy.dot(direction, normal)
+        M[:3, :3] -= numpy.outer(direction, normal) / scale
+        M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
+    else:
+        # orthogonal projection
+        M[:3, :3] -= numpy.outer(normal, normal)
+        M[:3, 3] = numpy.dot(point, normal) * normal
+    return M
+
+
+def projection_from_matrix(matrix, pseudo=False):
+    """Return projection plane and perspective point from projection matrix.
+
+    Return values are same as arguments for projection_matrix function:
+    point, normal, direction, perspective, and pseudo.
+
+    >>> point = numpy.random.random(3) - 0.5
+    >>> normal = numpy.random.random(3) - 0.5
+    >>> direct = numpy.random.random(3) - 0.5
+    >>> persp = numpy.random.random(3) - 0.5
+    >>> P0 = projection_matrix(point, normal)
+    >>> result = projection_from_matrix(P0)
+    >>> P1 = projection_matrix(*result)
+    >>> is_same_transform(P0, P1)
+    True
+    >>> P0 = projection_matrix(point, normal, direct)
+    >>> result = projection_from_matrix(P0)
+    >>> P1 = projection_matrix(*result)
+    >>> is_same_transform(P0, P1)
+    True
+    >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
+    >>> result = projection_from_matrix(P0, pseudo=False)
+    >>> P1 = projection_matrix(*result)
+    >>> is_same_transform(P0, P1)
+    True
+    >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
+    >>> result = projection_from_matrix(P0, pseudo=True)
+    >>> P1 = projection_matrix(*result)
+    >>> is_same_transform(P0, P1)
+    True
+
+    """
+    M = numpy.array(matrix, dtype=numpy.float64, copy=False)
+    M33 = M[:3, :3]
+    l, V = numpy.linalg.eig(M)
+    i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
+    if not pseudo and len(i):
+        # point: any eigenvector corresponding to eigenvalue 1
+        point = numpy.real(V[:, i[-1]]).squeeze()
+        point /= point[3]
+        # direction: unit eigenvector corresponding to eigenvalue 0
+        l, V = numpy.linalg.eig(M33)
+        i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
+        if not len(i):
+            raise ValueError("no eigenvector corresponding to eigenvalue 0")
+        direction = numpy.real(V[:, i[0]]).squeeze()
+        direction /= vector_norm(direction)
+        # normal: unit eigenvector of M33.T corresponding to eigenvalue 0
+        l, V = numpy.linalg.eig(M33.T)
+        i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
+        if len(i):
+            # parallel projection
+            normal = numpy.real(V[:, i[0]]).squeeze()
+            normal /= vector_norm(normal)
+            return point, normal, direction, None, False
+        else:
+            # orthogonal projection, where normal equals direction vector
+            return point, direction, None, None, False
+    else:
+        # perspective projection
+        i = numpy.where(abs(numpy.real(l)) > 1e-8)[0]
+        if not len(i):
+            raise ValueError(
+                "no eigenvector not corresponding to eigenvalue 0")
+        point = numpy.real(V[:, i[-1]]).squeeze()
+        point /= point[3]
+        normal = - M[3, :3]
+        perspective = M[:3, 3] / numpy.dot(point[:3], normal)
+        if pseudo:
+            perspective -= normal
+        return point, normal, None, perspective, pseudo
+
+
+def clip_matrix(left, right, bottom, top, near, far, perspective=False):
+    """Return matrix to obtain normalized device coordinates from frustrum.
+
+    The frustrum bounds are axis-aligned along x (left, right),
+    y (bottom, top) and z (near, far).
+
+    Normalized device coordinates are in range [-1, 1] if coordinates are
+    inside the frustrum.
+
+    If perspective is True the frustrum is a truncated pyramid with the
+    perspective point at origin and direction along z axis, otherwise an
+    orthographic canonical view volume (a box).
+
+    Homogeneous coordinates transformed by the perspective clip matrix
+    need to be dehomogenized (devided by w coordinate).
+
+    >>> frustrum = numpy.random.rand(6)
+    >>> frustrum[1] += frustrum[0]
+    >>> frustrum[3] += frustrum[2]
+    >>> frustrum[5] += frustrum[4]
+    >>> M = clip_matrix(*frustrum, perspective=False)
+    >>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
+    array([-1., -1., -1.,  1.])
+    >>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0])
+    array([ 1.,  1.,  1.,  1.])
+    >>> M = clip_matrix(*frustrum, perspective=True)
+    >>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
+    >>> v / v[3]
+    array([-1., -1., -1.,  1.])
+    >>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0])
+    >>> v / v[3]
+    array([ 1.,  1., -1.,  1.])
+
+    """
+    if left >= right or bottom >= top or near >= far:
+        raise ValueError("invalid frustrum")
+    if perspective:
+        if near <= _EPS:
+            raise ValueError("invalid frustrum: near <= 0")
+        t = 2.0 * near
+        M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0),
+             (0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0),
+             (0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)),
+             (0.0, 0.0, -1.0, 0.0))
+    else:
+        M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)),
+             (0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)),
+             (0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)),
+             (0.0, 0.0, 0.0, 1.0))
+    return numpy.array(M, dtype=numpy.float64)
+
+
+def shear_matrix(angle, direction, point, normal):
+    """Return matrix to shear by angle along direction vector on shear plane.
+
+    The shear plane is defined by a point and normal vector. The direction
+    vector must be orthogonal to the plane's normal vector.
+
+    A point P is transformed by the shear matrix into P" such that
+    the vector P-P" is parallel to the direction vector and its extent is
+    given by the angle of P-P'-P", where P' is the orthogonal projection
+    of P onto the shear plane.
+
+    >>> angle = (random.random() - 0.5) * 4*math.pi
+    >>> direct = numpy.random.random(3) - 0.5
+    >>> point = numpy.random.random(3) - 0.5
+    >>> normal = numpy.cross(direct, numpy.random.random(3))
+    >>> S = shear_matrix(angle, direct, point, normal)
+    >>> numpy.allclose(1.0, numpy.linalg.det(S))
+    True
+
+    """
+    normal = unit_vector(normal[:3])
+    direction = unit_vector(direction[:3])
+    if abs(numpy.dot(normal, direction)) > 1e-6:
+        raise ValueError("direction and normal vectors are not orthogonal")
+    angle = math.tan(angle)
+    M = numpy.identity(4)
+    M[:3, :3] += angle * numpy.outer(direction, normal)
+    M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
+    return M
+
+
+def shear_from_matrix(matrix):
+    """Return shear angle, direction and plane from shear matrix.
+
+    >>> angle = (random.random() - 0.5) * 4*math.pi
+    >>> direct = numpy.random.random(3) - 0.5
+    >>> point = numpy.random.random(3) - 0.5
+    >>> normal = numpy.cross(direct, numpy.random.random(3))
+    >>> S0 = shear_matrix(angle, direct, point, normal)
+    >>> angle, direct, point, normal = shear_from_matrix(S0)
+    >>> S1 = shear_matrix(angle, direct, point, normal)
+    >>> is_same_transform(S0, S1)
+    True
+
+    """
+    M = numpy.array(matrix, dtype=numpy.float64, copy=False)
+    M33 = M[:3, :3]
+    # normal: cross independent eigenvectors corresponding to the eigenvalue 1
+    l, V = numpy.linalg.eig(M33)
+    i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-4)[0]
+    if len(i) < 2:
+        raise ValueError("No two linear independent eigenvectors found %s" % l)
+    V = numpy.real(V[:, i]).squeeze().T
+    lenorm = -1.0
+    for i0, i1 in ((0, 1), (0, 2), (1, 2)):
+        n = numpy.cross(V[i0], V[i1])
+        l = vector_norm(n)
+        if l > lenorm:
+            lenorm = l
+            normal = n
+    normal /= lenorm
+    # direction and angle
+    direction = numpy.dot(M33 - numpy.identity(3), normal)
+    angle = vector_norm(direction)
+    direction /= angle
+    angle = math.atan(angle)
+    # point: eigenvector corresponding to eigenvalue 1
+    l, V = numpy.linalg.eig(M)
+    i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
+    if not len(i):
+        raise ValueError("no eigenvector corresponding to eigenvalue 1")
+    point = numpy.real(V[:, i[-1]]).squeeze()
+    point /= point[3]
+    return angle, direction, point, normal
+
+
+def decompose_matrix(matrix):
+    """Return sequence of transformations from transformation matrix.
+
+    matrix : array_like
+        Non-degenerative homogeneous transformation matrix
+
+    Return tuple of:
+        scale : vector of 3 scaling factors
+        shear : list of shear factors for x-y, x-z, y-z axes
+        angles : list of Euler angles about static x, y, z axes
+        translate : translation vector along x, y, z axes
+        perspective : perspective partition of matrix
+
+    Raise ValueError if matrix is of wrong type or degenerative.
+
+    >>> T0 = translation_matrix((1, 2, 3))
+    >>> scale, shear, angles, trans, persp = decompose_matrix(T0)
+    >>> T1 = translation_matrix(trans)
+    >>> numpy.allclose(T0, T1)
+    True
+    >>> S = scale_matrix(0.123)
+    >>> scale, shear, angles, trans, persp = decompose_matrix(S)
+    >>> scale[0]
+    0.123
+    >>> R0 = euler_matrix(1, 2, 3)
+    >>> scale, shear, angles, trans, persp = decompose_matrix(R0)
+    >>> R1 = euler_matrix(*angles)
+    >>> numpy.allclose(R0, R1)
+    True
+
+    """
+    M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
+    if abs(M[3, 3]) < _EPS:
+        raise ValueError("M[3, 3] is zero")
+    M /= M[3, 3]
+    P = M.copy()
+    P[:, 3] = 0, 0, 0, 1
+    if not numpy.linalg.det(P):
+        raise ValueError("Matrix is singular")
+
+    scale = numpy.zeros((3, ), dtype=numpy.float64)
+    shear = [0, 0, 0]
+    angles = [0, 0, 0]
+
+    if any(abs(M[:3, 3]) > _EPS):
+        perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
+        M[:, 3] = 0, 0, 0, 1
+    else:
+        perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64)
+
+    translate = M[3, :3].copy()
+    M[3, :3] = 0
+
+    row = M[:3, :3].copy()
+    scale[0] = vector_norm(row[0])
+    row[0] /= scale[0]
+    shear[0] = numpy.dot(row[0], row[1])
+    row[1] -= row[0] * shear[0]
+    scale[1] = vector_norm(row[1])
+    row[1] /= scale[1]
+    shear[0] /= scale[1]
+    shear[1] = numpy.dot(row[0], row[2])
+    row[2] -= row[0] * shear[1]
+    shear[2] = numpy.dot(row[1], row[2])
+    row[2] -= row[1] * shear[2]
+    scale[2] = vector_norm(row[2])
+    row[2] /= scale[2]
+    shear[1:] /= scale[2]
+
+    if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
+        scale *= -1
+        row *= -1
+
+    angles[1] = math.asin(-row[0, 2])
+    if math.cos(angles[1]):
+        angles[0] = math.atan2(row[1, 2], row[2, 2])
+        angles[2] = math.atan2(row[0, 1], row[0, 0])
+    else:
+        #angles[0] = math.atan2(row[1, 0], row[1, 1])
+        angles[0] = math.atan2(-row[2, 1], row[1, 1])
+        angles[2] = 0.0
+
+    return scale, shear, angles, translate, perspective
+
+
+def compose_matrix(scale=None, shear=None, angles=None, translate=None,
+                   perspective=None):
+    """Return transformation matrix from sequence of transformations.
+
+    This is the inverse of the decompose_matrix function.
+
+    Sequence of transformations:
+        scale : vector of 3 scaling factors
+        shear : list of shear factors for x-y, x-z, y-z axes
+        angles : list of Euler angles about static x, y, z axes
+        translate : translation vector along x, y, z axes
+        perspective : perspective partition of matrix
+
+    >>> scale = numpy.random.random(3) - 0.5
+    >>> shear = numpy.random.random(3) - 0.5
+    >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
+    >>> trans = numpy.random.random(3) - 0.5
+    >>> persp = numpy.random.random(4) - 0.5
+    >>> M0 = compose_matrix(scale, shear, angles, trans, persp)
+    >>> result = decompose_matrix(M0)
+    >>> M1 = compose_matrix(*result)
+    >>> is_same_transform(M0, M1)
+    True
+
+    """
+    M = numpy.identity(4)
+    if perspective is not None:
+        P = numpy.identity(4)
+        P[3, :] = perspective[:4]
+        M = numpy.dot(M, P)
+    if translate is not None:
+        T = numpy.identity(4)
+        T[:3, 3] = translate[:3]
+        M = numpy.dot(M, T)
+    if angles is not None:
+        R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
+        M = numpy.dot(M, R)
+    if shear is not None:
+        Z = numpy.identity(4)
+        Z[1, 2] = shear[2]
+        Z[0, 2] = shear[1]
+        Z[0, 1] = shear[0]
+        M = numpy.dot(M, Z)
+    if scale is not None:
+        S = numpy.identity(4)
+        S[0, 0] = scale[0]
+        S[1, 1] = scale[1]
+        S[2, 2] = scale[2]
+        M = numpy.dot(M, S)
+    M /= M[3, 3]
+    return M
+
+
+def orthogonalization_matrix(lengths, angles):
+    """Return orthogonalization matrix for crystallographic cell coordinates.
+
+    Angles are expected in degrees.
+
+    The de-orthogonalization matrix is the inverse.
+
+    >>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.))
+    >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
+    True
+    >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
+    >>> numpy.allclose(numpy.sum(O), 43.063229)
+    True
+
+    """
+    a, b, c = lengths
+    angles = numpy.radians(angles)
+    sina, sinb, _ = numpy.sin(angles)
+    cosa, cosb, cosg = numpy.cos(angles)
+    co = (cosa * cosb - cosg) / (sina * sinb)
+    return numpy.array((
+        ( a*sinb*math.sqrt(1.0-co*co),  0.0,    0.0, 0.0),
+        (-a*sinb*co,                    b*sina, 0.0, 0.0),
+        ( a*cosb,                       b*cosa, c,   0.0),
+        ( 0.0,                          0.0,    0.0, 1.0)),
+        dtype=numpy.float64)
+
+
+def superimposition_matrix(v0, v1, scaling=False, usesvd=True):
+    """Return matrix to transform given vector set into second vector set.
+
+    v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 vectors.
+
+    If usesvd is True, the weighted sum of squared deviations (RMSD) is
+    minimized according to the algorithm by W. Kabsch [8]. Otherwise the
+    quaternion based algorithm by B. Horn [9] is used (slower when using
+    this Python implementation).
+
+    The returned matrix performs rotation, translation and uniform scaling
+    (if specified).
+
+    >>> v0 = numpy.random.rand(3, 10)
+    >>> M = superimposition_matrix(v0, v0)
+    >>> numpy.allclose(M, numpy.identity(4))
+    True
+    >>> R = random_rotation_matrix(numpy.random.random(3))
+    >>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1))
+    >>> v1 = numpy.dot(R, v0)
+    >>> M = superimposition_matrix(v0, v1)
+    >>> numpy.allclose(v1, numpy.dot(M, v0))
+    True
+    >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0
+    >>> v0[3] = 1.0
+    >>> v1 = numpy.dot(R, v0)
+    >>> M = superimposition_matrix(v0, v1)
+    >>> numpy.allclose(v1, numpy.dot(M, v0))
+    True
+    >>> S = scale_matrix(random.random())
+    >>> T = translation_matrix(numpy.random.random(3)-0.5)
+    >>> M = concatenate_matrices(T, R, S)
+    >>> v1 = numpy.dot(M, v0)
+    >>> v0[:3] += numpy.random.normal(0.0, 1e-9, 300).reshape(3, -1)
+    >>> M = superimposition_matrix(v0, v1, scaling=True)
+    >>> numpy.allclose(v1, numpy.dot(M, v0))
+    True
+    >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
+    >>> numpy.allclose(v1, numpy.dot(M, v0))
+    True
+    >>> v = numpy.empty((4, 100, 3), dtype=numpy.float64)
+    >>> v[:, :, 0] = v0
+    >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
+    >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
+    True
+
+    """
+    v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
+    v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
+
+    if v0.shape != v1.shape or v0.shape[1] < 3:
+        raise ValueError("Vector sets are of wrong shape or type.")
+
+    # move centroids to origin
+    t0 = numpy.mean(v0, axis=1)
+    t1 = numpy.mean(v1, axis=1)
+    v0 = v0 - t0.reshape(3, 1)
+    v1 = v1 - t1.reshape(3, 1)
+
+    if usesvd:
+        # Singular Value Decomposition of covariance matrix
+        u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
+        # rotation matrix from SVD orthonormal bases
+        R = numpy.dot(u, vh)
+        if numpy.linalg.det(R) < 0.0:
+            # R does not constitute right handed system
+            R -= numpy.outer(u[:, 2], vh[2, :]*2.0)
+            s[-1] *= -1.0
+        # homogeneous transformation matrix
+        M = numpy.identity(4)
+        M[:3, :3] = R
+    else:
+        # compute symmetric matrix N
+        xx, yy, zz = numpy.sum(v0 * v1, axis=1)
+        xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
+        xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
+        N = ((xx+yy+zz, yz-zy,    zx-xz,    xy-yx),
+             (yz-zy,    xx-yy-zz, xy+yx,    zx+xz),
+             (zx-xz,    xy+yx,   -xx+yy-zz, yz+zy),
+             (xy-yx,    zx+xz,    yz+zy,   -xx-yy+zz))
+        # quaternion: eigenvector corresponding to most positive eigenvalue
+        l, V = numpy.linalg.eig(N)
+        q = V[:, numpy.argmax(l)]
+        q /= vector_norm(q) # unit quaternion
+        q = numpy.roll(q, -1) # move w component to end
+        # homogeneous transformation matrix
+        M = quaternion_matrix(q)
+
+    # scale: ratio of rms deviations from centroid
+    if scaling:
+        v0 *= v0
+        v1 *= v1
+        M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
+
+    # translation
+    M[:3, 3] = t1
+    T = numpy.identity(4)
+    T[:3, 3] = -t0
+    M = numpy.dot(M, T)
+    return M
+
+
+def euler_matrix(ai, aj, ak, axes='sxyz'):
+    """Return homogeneous rotation matrix from Euler angles and axis sequence.
+
+    ai, aj, ak : Euler's roll, pitch and yaw angles
+    axes : One of 24 axis sequences as string or encoded tuple
+
+    >>> R = euler_matrix(1, 2, 3, 'syxz')
+    >>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
+    True
+    >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
+    >>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
+    True
+    >>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
+    >>> for axes in _AXES2TUPLE.keys():
+    ...    R = euler_matrix(ai, aj, ak, axes)
+    >>> for axes in _TUPLE2AXES.keys():
+    ...    R = euler_matrix(ai, aj, ak, axes)
+
+    """
+    try:
+        firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
+    except (AttributeError, KeyError):
+        _ = _TUPLE2AXES[axes]
+        firstaxis, parity, repetition, frame = axes
+
+    i = firstaxis
+    j = _NEXT_AXIS[i+parity]
+    k = _NEXT_AXIS[i-parity+1]
+
+    if frame:
+        ai, ak = ak, ai
+    if parity:
+        ai, aj, ak = -ai, -aj, -ak
+
+    si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
+    ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
+    cc, cs = ci*ck, ci*sk
+    sc, ss = si*ck, si*sk
+
+    M = numpy.identity(4)
+    if repetition:
+        M[i, i] = cj
+        M[i, j] = sj*si
+        M[i, k] = sj*ci
+        M[j, i] = sj*sk
+        M[j, j] = -cj*ss+cc
+        M[j, k] = -cj*cs-sc
+        M[k, i] = -sj*ck
+        M[k, j] = cj*sc+cs
+        M[k, k] = cj*cc-ss
+    else:
+        M[i, i] = cj*ck
+        M[i, j] = sj*sc-cs
+        M[i, k] = sj*cc+ss
+        M[j, i] = cj*sk
+        M[j, j] = sj*ss+cc
+        M[j, k] = sj*cs-sc
+        M[k, i] = -sj
+        M[k, j] = cj*si
+        M[k, k] = cj*ci
+    return M
+
+
+def euler_from_matrix(matrix, axes='sxyz'):
+    """Return Euler angles from rotation matrix for specified axis sequence.
+
+    axes : One of 24 axis sequences as string or encoded tuple
+
+    Note that many Euler angle triplets can describe one matrix.
+
+    >>> R0 = euler_matrix(1, 2, 3, 'syxz')
+    >>> al, be, ga = euler_from_matrix(R0, 'syxz')
+    >>> R1 = euler_matrix(al, be, ga, 'syxz')
+    >>> numpy.allclose(R0, R1)
+    True
+    >>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
+    >>> for axes in _AXES2TUPLE.keys():
+    ...    R0 = euler_matrix(axes=axes, *angles)
+    ...    R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
+    ...    if not numpy.allclose(R0, R1): print axes, "failed"
+
+    """
+    try:
+        firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
+    except (AttributeError, KeyError):
+        _ = _TUPLE2AXES[axes]
+        firstaxis, parity, repetition, frame = axes
+
+    i = firstaxis
+    j = _NEXT_AXIS[i+parity]
+    k = _NEXT_AXIS[i-parity+1]
+
+    M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
+    if repetition:
+        sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
+        if sy > _EPS:
+            ax = math.atan2( M[i, j],  M[i, k])
+            ay = math.atan2( sy,       M[i, i])
+            az = math.atan2( M[j, i], -M[k, i])
+        else:
+            ax = math.atan2(-M[j, k],  M[j, j])
+            ay = math.atan2( sy,       M[i, i])
+            az = 0.0
+    else:
+        cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
+        if cy > _EPS:
+            ax = math.atan2( M[k, j],  M[k, k])
+            ay = math.atan2(-M[k, i],  cy)
+            az = math.atan2( M[j, i],  M[i, i])
+        else:
+            ax = math.atan2(-M[j, k],  M[j, j])
+            ay = math.atan2(-M[k, i],  cy)
+            az = 0.0
+
+    if parity:
+        ax, ay, az = -ax, -ay, -az
+    if frame:
+        ax, az = az, ax
+    return ax, ay, az
+
+
+def euler_from_quaternion(quaternion, axes='sxyz'):
+    """Return Euler angles from quaternion for specified axis sequence.
+
+    >>> angles = euler_from_quaternion([0.06146124, 0, 0, 0.99810947])
+    >>> numpy.allclose(angles, [0.123, 0, 0])
+    True
+
+    """
+    return euler_from_matrix(quaternion_matrix(quaternion), axes)
+
+
+def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
+    """Return quaternion from Euler angles and axis sequence.
+
+    ai, aj, ak : Euler's roll, pitch and yaw angles
+    axes : One of 24 axis sequences as string or encoded tuple
+
+    >>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
+    >>> numpy.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953])
+    True
+
+    """
+    try:
+        firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
+    except (AttributeError, KeyError):
+        _ = _TUPLE2AXES[axes]
+        firstaxis, parity, repetition, frame = axes
+
+    i = firstaxis
+    j = _NEXT_AXIS[i+parity]
+    k = _NEXT_AXIS[i-parity+1]
+
+    if frame:
+        ai, ak = ak, ai
+    if parity:
+        aj = -aj
+
+    ai /= 2.0
+    aj /= 2.0
+    ak /= 2.0
+    ci = math.cos(ai)
+    si = math.sin(ai)
+    cj = math.cos(aj)
+    sj = math.sin(aj)
+    ck = math.cos(ak)
+    sk = math.sin(ak)
+    cc = ci*ck
+    cs = ci*sk
+    sc = si*ck
+    ss = si*sk
+
+    quaternion = numpy.empty((4, ), dtype=numpy.float64)
+    if repetition:
+        quaternion[i] = cj*(cs + sc)
+        quaternion[j] = sj*(cc + ss)
+        quaternion[k] = sj*(cs - sc)
+        quaternion[3] = cj*(cc - ss)
+    else:
+        quaternion[i] = cj*sc - sj*cs
+        quaternion[j] = cj*ss + sj*cc
+        quaternion[k] = cj*cs - sj*sc
+        quaternion[3] = cj*cc + sj*ss
+    if parity:
+        quaternion[j] *= -1
+
+    return quaternion
+
+
+def quaternion_about_axis(angle, axis):
+    """Return quaternion for rotation about axis.
+
+    >>> q = quaternion_about_axis(0.123, (1, 0, 0))
+    >>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947])
+    True
+
+    """
+    quaternion = numpy.zeros((4, ), dtype=numpy.float64)
+    quaternion[:3] = axis[:3]
+    qlen = vector_norm(quaternion)
+    if qlen > _EPS:
+        quaternion *= math.sin(angle/2.0) / qlen
+    quaternion[3] = math.cos(angle/2.0)
+    return quaternion
+
+
+def quaternion_matrix(quaternion):
+    """Return homogeneous rotation matrix from quaternion.
+
+    >>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947])
+    >>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0)))
+    True
+
+    """
+    q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True)
+    nq = numpy.dot(q, q)
+    if nq < _EPS:
+        return numpy.identity(4)
+    q *= math.sqrt(2.0 / nq)
+    q = numpy.outer(q, q)
+    return numpy.array((
+        (1.0-q[1, 1]-q[2, 2],     q[0, 1]-q[2, 3],     q[0, 2]+q[1, 3], 0.0),
+        (    q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2],     q[1, 2]-q[0, 3], 0.0),
+        (    q[0, 2]-q[1, 3],     q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0),
+        (                0.0,                 0.0,                 0.0, 1.0)
+        ), dtype=numpy.float64)
+
+
+def quaternion_from_matrix(matrix):
+    """Return quaternion from rotation matrix.
+
+    >>> R = rotation_matrix(0.123, (1, 2, 3))
+    >>> q = quaternion_from_matrix(R)
+    >>> numpy.allclose(q, [0.0164262, 0.0328524, 0.0492786, 0.9981095])
+    True
+
+    """
+    q = numpy.empty((4, ), dtype=numpy.float64)
+    M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
+    t = numpy.trace(M)
+    if t > M[3, 3]:
+        q[3] = t
+        q[2] = M[1, 0] - M[0, 1]
+        q[1] = M[0, 2] - M[2, 0]
+        q[0] = M[2, 1] - M[1, 2]
+    else:
+        i, j, k = 0, 1, 2
+        if M[1, 1] > M[0, 0]:
+            i, j, k = 1, 2, 0
+        if M[2, 2] > M[i, i]:
+            i, j, k = 2, 0, 1
+        t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
+        q[i] = t
+        q[j] = M[i, j] + M[j, i]
+        q[k] = M[k, i] + M[i, k]
+        q[3] = M[k, j] - M[j, k]
+    q *= 0.5 / math.sqrt(t * M[3, 3])
+    return q
+
+
+def quaternion_multiply(quaternion1, quaternion0):
+    """Return multiplication of two quaternions.
+
+    >>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8])
+    >>> numpy.allclose(q, [-44, -14, 48, 28])
+    True
+
+    """
+    x0, y0, z0, w0 = quaternion0
+    x1, y1, z1, w1 = quaternion1
+    return numpy.array((
+         x1*w0 + y1*z0 - z1*y0 + w1*x0,
+        -x1*z0 + y1*w0 + z1*x0 + w1*y0,
+         x1*y0 - y1*x0 + z1*w0 + w1*z0,
+        -x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64)
+
+
+def quaternion_conjugate(quaternion):
+    """Return conjugate of quaternion.
+
+    >>> q0 = random_quaternion()
+    >>> q1 = quaternion_conjugate(q0)
+    >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3])
+    True
+
+    """
+    return numpy.array((-quaternion[0], -quaternion[1],
+                        -quaternion[2], quaternion[3]), dtype=numpy.float64)
+
+
+def quaternion_inverse(quaternion):
+    """Return inverse of quaternion.
+
+    >>> q0 = random_quaternion()
+    >>> q1 = quaternion_inverse(q0)
+    >>> numpy.allclose(quaternion_multiply(q0, q1), [0, 0, 0, 1])
+    True
+
+    """
+    return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion)
+
+
+def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
+    """Return spherical linear interpolation between two quaternions.
+
+    >>> q0 = random_quaternion()
+    >>> q1 = random_quaternion()
+    >>> q = quaternion_slerp(q0, q1, 0.0)
+    >>> numpy.allclose(q, q0)
+    True
+    >>> q = quaternion_slerp(q0, q1, 1.0, 1)
+    >>> numpy.allclose(q, q1)
+    True
+    >>> q = quaternion_slerp(q0, q1, 0.5)
+    >>> angle = math.acos(numpy.dot(q0, q))
+    >>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \
+        numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle)
+    True
+
+    """
+    q0 = unit_vector(quat0[:4])
+    q1 = unit_vector(quat1[:4])
+    if fraction == 0.0:
+        return q0
+    elif fraction == 1.0:
+        return q1
+    d = numpy.dot(q0, q1)
+    if abs(abs(d) - 1.0) < _EPS:
+        return q0
+    if shortestpath and d < 0.0:
+        # invert rotation
+        d = -d
+        q1 *= -1.0
+    angle = math.acos(d) + spin * math.pi
+    if abs(angle) < _EPS:
+        return q0
+    isin = 1.0 / math.sin(angle)
+    q0 *= math.sin((1.0 - fraction) * angle) * isin
+    q1 *= math.sin(fraction * angle) * isin
+    q0 += q1
+    return q0
+
+
+def random_quaternion(rand=None):
+    """Return uniform random unit quaternion.
+
+    rand: array like or None
+        Three independent random variables that are uniformly distributed
+        between 0 and 1.
+
+    >>> q = random_quaternion()
+    >>> numpy.allclose(1.0, vector_norm(q))
+    True
+    >>> q = random_quaternion(numpy.random.random(3))
+    >>> q.shape
+    (4,)
+
+    """
+    if rand is None:
+        rand = numpy.random.rand(3)
+    else:
+        assert len(rand) == 3
+    r1 = numpy.sqrt(1.0 - rand[0])
+    r2 = numpy.sqrt(rand[0])
+    pi2 = math.pi * 2.0
+    t1 = pi2 * rand[1]
+    t2 = pi2 * rand[2]
+    return numpy.array((numpy.sin(t1)*r1,
+                        numpy.cos(t1)*r1,
+                        numpy.sin(t2)*r2,
+                        numpy.cos(t2)*r2), dtype=numpy.float64)
+
+
+def random_rotation_matrix(rand=None):
+    """Return uniform random rotation matrix.
+
+    rnd: array like
+        Three independent random variables that are uniformly distributed
+        between 0 and 1 for each returned quaternion.
+
+    >>> R = random_rotation_matrix()
+    >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
+    True
+
+    """
+    return quaternion_matrix(random_quaternion(rand))
+
+
+class Arcball(object):
+    """Virtual Trackball Control.
+
+    >>> ball = Arcball()
+    >>> ball = Arcball(initial=numpy.identity(4))
+    >>> ball.place([320, 320], 320)
+    >>> ball.down([500, 250])
+    >>> ball.drag([475, 275])
+    >>> R = ball.matrix()
+    >>> numpy.allclose(numpy.sum(R), 3.90583455)
+    True
+    >>> ball = Arcball(initial=[0, 0, 0, 1])
+    >>> ball.place([320, 320], 320)
+    >>> ball.setaxes([1,1,0], [-1, 1, 0])
+    >>> ball.setconstrain(True)
+    >>> ball.down([400, 200])
+    >>> ball.drag([200, 400])
+    >>> R = ball.matrix()
+    >>> numpy.allclose(numpy.sum(R), 0.2055924)
+    True
+    >>> ball.next()
+
+    """
+
+    def __init__(self, initial=None):
+        """Initialize virtual trackball control.
+
+        initial : quaternion or rotation matrix
+
+        """
+        self._axis = None
+        self._axes = None
+        self._radius = 1.0
+        self._center = [0.0, 0.0]
+        self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64)
+        self._constrain = False
+
+        if initial is None:
+            self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64)
+        else:
+            initial = numpy.array(initial, dtype=numpy.float64)
+            if initial.shape == (4, 4):
+                self._qdown = quaternion_from_matrix(initial)
+            elif initial.shape == (4, ):
+                initial /= vector_norm(initial)
+                self._qdown = initial
+            else:
+                raise ValueError("initial not a quaternion or matrix.")
+
+        self._qnow = self._qpre = self._qdown
+
+    def place(self, center, radius):
+        """Place Arcball, e.g. when window size changes.
+
+        center : sequence[2]
+            Window coordinates of trackball center.
+        radius : float
+            Radius of trackball in window coordinates.
+
+        """
+        self._radius = float(radius)
+        self._center[0] = center[0]
+        self._center[1] = center[1]
+
+    def setaxes(self, *axes):
+        """Set axes to constrain rotations."""
+        if axes is None:
+            self._axes = None
+        else:
+            self._axes = [unit_vector(axis) for axis in axes]
+
+    def setconstrain(self, constrain):
+        """Set state of constrain to axis mode."""
+        self._constrain = constrain == True
+
+    def getconstrain(self):
+        """Return state of constrain to axis mode."""
+        return self._constrain
+
+    def down(self, point):
+        """Set initial cursor window coordinates and pick constrain-axis."""
+        self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
+        self._qdown = self._qpre = self._qnow
+
+        if self._constrain and self._axes is not None:
+            self._axis = arcball_nearest_axis(self._vdown, self._axes)
+            self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
+        else:
+            self._axis = None
+
+    def drag(self, point):
+        """Update current cursor window coordinates."""
+        vnow = arcball_map_to_sphere(point, self._center, self._radius)
+
+        if self._axis is not None:
+            vnow = arcball_constrain_to_axis(vnow, self._axis)
+
+        self._qpre = self._qnow
+
+        t = numpy.cross(self._vdown, vnow)
+        if numpy.dot(t, t) < _EPS:
+            self._qnow = self._qdown
+        else:
+            q = [t[0], t[1], t[2], numpy.dot(self._vdown, vnow)]
+            self._qnow = quaternion_multiply(q, self._qdown)
+
+    def next(self, acceleration=0.0):
+        """Continue rotation in direction of last drag."""
+        q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
+        self._qpre, self._qnow = self._qnow, q
+
+    def matrix(self):
+        """Return homogeneous rotation matrix."""
+        return quaternion_matrix(self._qnow)
+
+
+def arcball_map_to_sphere(point, center, radius):
+    """Return unit sphere coordinates from window coordinates."""
+    v = numpy.array(((point[0] - center[0]) / radius,
+                     (center[1] - point[1]) / radius,
+                     0.0), dtype=numpy.float64)
+    n = v[0]*v[0] + v[1]*v[1]
+    if n > 1.0:
+        v /= math.sqrt(n) # position outside of sphere
+    else:
+        v[2] = math.sqrt(1.0 - n)
+    return v
+
+
+def arcball_constrain_to_axis(point, axis):
+    """Return sphere point perpendicular to axis."""
+    v = numpy.array(point, dtype=numpy.float64, copy=True)
+    a = numpy.array(axis, dtype=numpy.float64, copy=True)
+    v -= a * numpy.dot(a, v) # on plane
+    n = vector_norm(v)
+    if n > _EPS:
+        if v[2] < 0.0:
+            v *= -1.0
+        v /= n
+        return v
+    if a[2] == 1.0:
+        return numpy.array([1, 0, 0], dtype=numpy.float64)
+    return unit_vector([-a[1], a[0], 0])
+
+
+def arcball_nearest_axis(point, axes):
+    """Return axis, which arc is nearest to point."""
+    point = numpy.array(point, dtype=numpy.float64, copy=False)
+    nearest = None
+    mx = -1.0
+    for axis in axes:
+        t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
+        if t > mx:
+            nearest = axis
+            mx = t
+    return nearest
+
+
+# epsilon for testing whether a number is close to zero
+_EPS = numpy.finfo(float).eps * 4.0
+
+# axis sequences for Euler angles
+_NEXT_AXIS = [1, 2, 0, 1]
+
+# map axes strings to/from tuples of inner axis, parity, repetition, frame
+_AXES2TUPLE = {
+    'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
+    'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
+    'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
+    'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
+    'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
+    'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
+    'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
+    'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
+
+_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
+
+# helper functions
+
+def vector_norm(data, axis=None, out=None):
+    """Return length, i.e. eucledian norm, of ndarray along axis.
+
+    >>> v = numpy.random.random(3)
+    >>> n = vector_norm(v)
+    >>> numpy.allclose(n, numpy.linalg.norm(v))
+    True
+    >>> v = numpy.random.rand(6, 5, 3)
+    >>> n = vector_norm(v, axis=-1)
+    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
+    True
+    >>> n = vector_norm(v, axis=1)
+    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
+    True
+    >>> v = numpy.random.rand(5, 4, 3)
+    >>> n = numpy.empty((5, 3), dtype=numpy.float64)
+    >>> vector_norm(v, axis=1, out=n)
+    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
+    True
+    >>> vector_norm([])
+    0.0
+    >>> vector_norm([1.0])
+    1.0
+
+    """
+    data = numpy.array(data, dtype=numpy.float64, copy=True)
+    if out is None:
+        if data.ndim == 1:
+            return math.sqrt(numpy.dot(data, data))
+        data *= data
+        out = numpy.atleast_1d(numpy.sum(data, axis=axis))
+        numpy.sqrt(out, out)
+        return out
+    else:
+        data *= data
+        numpy.sum(data, axis=axis, out=out)
+        numpy.sqrt(out, out)
+
+
+def unit_vector(data, axis=None, out=None):
+    """Return ndarray normalized by length, i.e. eucledian norm, along axis.
+
+    >>> v0 = numpy.random.random(3)
+    >>> v1 = unit_vector(v0)
+    >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
+    True
+    >>> v0 = numpy.random.rand(5, 4, 3)
+    >>> v1 = unit_vector(v0, axis=-1)
+    >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
+    >>> numpy.allclose(v1, v2)
+    True
+    >>> v1 = unit_vector(v0, axis=1)
+    >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
+    >>> numpy.allclose(v1, v2)
+    True
+    >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64)
+    >>> unit_vector(v0, axis=1, out=v1)
+    >>> numpy.allclose(v1, v2)
+    True
+    >>> list(unit_vector([]))
+    []
+    >>> list(unit_vector([1.0]))
+    [1.0]
+
+    """
+    if out is None:
+        data = numpy.array(data, dtype=numpy.float64, copy=True)
+        if data.ndim == 1:
+            data /= math.sqrt(numpy.dot(data, data))
+            return data
+    else:
+        if out is not data:
+            out[:] = numpy.array(data, copy=False)
+        data = out
+    length = numpy.atleast_1d(numpy.sum(data*data, axis))
+    numpy.sqrt(length, length)
+    if axis is not None:
+        length = numpy.expand_dims(length, axis)
+    data /= length
+    if out is None:
+        return data
+
+
+def random_vector(size):
+    """Return array of random doubles in the half-open interval [0.0, 1.0).
+
+    >>> v = random_vector(10000)
+    >>> numpy.all(v >= 0.0) and numpy.all(v < 1.0)
+    True
+    >>> v0 = random_vector(10)
+    >>> v1 = random_vector(10)
+    >>> numpy.any(v0 == v1)
+    False
+
+    """
+    return numpy.random.random(size)
+
+
+def inverse_matrix(matrix):
+    """Return inverse of square transformation matrix.
+
+    >>> M0 = random_rotation_matrix()
+    >>> M1 = inverse_matrix(M0.T)
+    >>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
+    True
+    >>> for size in range(1, 7):
+    ...     M0 = numpy.random.rand(size, size)
+    ...     M1 = inverse_matrix(M0)
+    ...     if not numpy.allclose(M1, numpy.linalg.inv(M0)): print size
+
+    """
+    return numpy.linalg.inv(matrix)
+
+
+def concatenate_matrices(*matrices):
+    """Return concatenation of series of transformation matrices.
+
+    >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
+    >>> numpy.allclose(M, concatenate_matrices(M))
+    True
+    >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
+    True
+
+    """
+    M = numpy.identity(4)
+    for i in matrices:
+        M = numpy.dot(M, i)
+    return M
+
+
+def is_same_transform(matrix0, matrix1):
+    """Return True if two matrices perform same transformation.
+
+    >>> is_same_transform(numpy.identity(4), numpy.identity(4))
+    True
+    >>> is_same_transform(numpy.identity(4), random_rotation_matrix())
+    False
+
+    """
+    matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
+    matrix0 /= matrix0[3, 3]
+    matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
+    matrix1 /= matrix1[3, 3]
+    return numpy.allclose(matrix0, matrix1)
+
+
+def _import_module(module_name, warn=True, prefix='_py_', ignore='_'):
+    """Try import all public attributes from module into global namespace.
+
+    Existing attributes with name clashes are renamed with prefix.
+    Attributes starting with underscore are ignored by default.
+
+    Return True on successful import.
+
+    """
+    try:
+        module = __import__(module_name)
+    except ImportError:
+        if warn:
+            warnings.warn("Failed to import module " + module_name)
+    else:
+        for attr in dir(module):
+            if ignore and attr.startswith(ignore):
+                continue
+            if prefix:
+                if attr in globals():
+                    globals()[prefix + attr] = globals()[attr]
+                elif warn:
+                    warnings.warn("No Python implementation of " + attr)
+            globals()[attr] = getattr(module, attr)
+        return True