Merged the trunk r8467:8507 (inclusive) (big bunch of documentation fixes)

This commit is contained in:
Andrey Kamaev 2012-05-30 11:13:07 +00:00
parent 052d2dc23a
commit 81a5988015
120 changed files with 5407 additions and 4695 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

497
doc/check_docs2.py Normal file
View File

@ -0,0 +1,497 @@
import os, sys, glob, re
sys.path.append("../modules/python/src2/")
sys.path.append("../modules/java/")
import hdr_parser as hp
import rst_parser as rp
rp.show_warnings = False
rp.show_errors = False
allmodules = rp.allmodules
DOCUMENTED_MARKER = "verified"
ERROR_001_NOTACLASS = 1
ERROR_002_NOTASTRUCT = 2
ERROR_003_INCORRECTBASE = 3
ERROR_004_MISSEDNAMESPACE = 4
ERROR_005_MISSINGPYFUNC = 5
ERROR_006_INVALIDPYOLDDOC = 6
ERROR_007_INVALIDPYDOC = 7
ERROR_008_CFUNCISNOTGLOBAL = 8
ERROR_009_OVERLOADNOTFOUND = 9
ERROR_010_UNKNOWNCLASS = 10
ERROR_011_UNKNOWNFUNC = 11
do_python_crosscheck = True
errors_disabled = [ERROR_004_MISSEDNAMESPACE]
doc_signatures_whitelist = [
# templates
"Matx", "Vec", "SparseMat_", "Scalar_", "Mat_", "Ptr", "Size_", "Point_", "Rect_", "Point3_",
"DataType", "detail::RotationWarperBase", "flann::Index_", "CalonderDescriptorExtractor",
"gpu::DevMem2D_", "gpu::PtrStep_", "gpu::PtrElemStep_",
# black boxes
"CvArr", "CvFileStorage",
# other
"InputArray", "OutputArray",
]
defines = ["cvGraphEdgeIdx", "cvFree", "CV_Assert", "cvSqrt", "cvGetGraphVtx", "cvGraphVtxIdx",
"cvCaptureFromFile", "cvCaptureFromCAM", "cvCalcBackProjectPatch", "cvCalcBackProject",
"cvGetHistValue_1D", "cvGetHistValue_2D", "cvGetHistValue_3D", "cvGetHistValue_nD",
"cvQueryHistValue_1D", "cvQueryHistValue_2D", "cvQueryHistValue_3D", "cvQueryHistValue_nD",
# not a real function but behaves as function
"Mat::size",
# ugly "virtual" functions from ml module
"CvStatModel::train", "CvStatModel::predict",
# TODO:
"cvExtractSURF"
]
synonims = {
"StarDetector" : ["StarFeatureDetector"],
"MSER" : ["MserFeatureDetector"],
"GFTTDetector" : ["GoodFeaturesToTrackDetector"],
"cvCaptureFromFile" : ["cvCreateFileCapture"],
"cvCaptureFromCAM" : ["cvCreateCameraCapture"],
"cvCalcArrBackProjectPatch" : ["cvCalcBackProjectPatch"],
"cvCalcArrBackProject" : ["cvCalcBackProject"],
"InputArray" : ["_InputArray"],
"OutputArray" : ["_OutputArray"],
}
if do_python_crosscheck:
try:
import cv2
except ImportError:
print "Could not load cv2"
do_python_crosscheck = False
def get_cv2_object(name):
if name.startswith("cv2."):
name = name[4:]
if name.startswith("cv."):
name = name[3:]
if name == "Algorithm":
return cv2.Algorithm__create("Feature2D.ORB"), name
elif name == "FeatureDetector":
return cv2.FeatureDetector_create("ORB"), name
elif name == "DescriptorExtractor":
return cv2.DescriptorExtractor_create("ORB"), name
elif name == "BackgroundSubtractor":
return cv2.BackgroundSubtractorMOG(), name
elif name == "StatModel":
return cv2.KNearest(), name
else:
return getattr(cv2, name)(), name
def compareSignatures(f, s):
# function names
if f[0] != s[0]:
return False, "name mismatch"
# return type
stype = (s[1] or "void")
ftype = f[1]
stype = re.sub(r"\b(cv|std)::", "", stype)
if ftype:
ftype = re.sub(r"\b(cv|std)::", "", ftype)
if ftype and ftype != stype:
return False, "return type mismatch"
if ("\C" in f[2]) ^ ("\C" in s[2]):
return False, "const qualifier mismatch"
if ("\S" in f[2]) ^ ("\S" in s[2]):
return False, "static qualifier mismatch"
if ("\V" in f[2]) ^ ("\V" in s[2]):
return False, "virtual qualifier mismatch"
if ("\A" in f[2]) ^ ("\A" in s[2]):
return False, "abstract qualifier mismatch"
if len(f[3]) != len(s[3]):
return False, "different number of arguments"
for idx, arg in enumerate(zip(f[3], s[3])):
farg = arg[0]
sarg = arg[1]
ftype = re.sub(r"\b(cv|std)::", "", (farg[0] or ""))
stype = re.sub(r"\b(cv|std)::", "", (sarg[0] or ""))
if ftype != stype:
return False, "type of argument #" + str(idx+1) + " mismatch"
fname = farg[1] or "arg" + str(idx)
sname = sarg[1] or "arg" + str(idx)
if fname != sname:
return False, "name of argument #" + str(idx+1) + " mismatch"
fdef = re.sub(r"\b(cv|std)::", "", (farg[2] or ""))
sdef = re.sub(r"\b(cv|std)::", "", (sarg[2] or ""))
if fdef != sdef:
return False, "default value of argument #" + str(idx+1) + " mismatch"
return True, "match"
def formatSignature(s):
_str = ""
if "/V" in s[2]:
_str += "virtual "
if "/S" in s[2]:
_str += "static "
if s[1]:
_str += s[1] + " "
else:
if not bool(re.match(r"(\w+\.)*(?P<cls>\w+)\.(?P=cls)", s[0])):
_str += "void "
if s[0].startswith("cv."):
_str += s[0][3:].replace(".", "::")
else:
_str += s[0].replace(".", "::")
if len(s[3]) == 0:
_str += "()"
else:
_str += "( "
for idx, arg in enumerate(s[3]):
if idx > 0:
_str += ", "
argtype = re.sub(r"\bcv::", "", arg[0])
bidx = argtype.find('[')
if bidx < 0:
_str += argtype + " "
else:
_srt += argtype[:bidx]
if arg[1]:
_str += arg[1]
else:
_str += "arg" + str(idx)
if bidx >= 0:
_str += argtype[bidx:]
if arg[2]:
_str += "=" + re.sub(r"\bcv::", "", arg[2])
_str += " )"
if "/C" in s[2]:
_str += " const"
if "/A" in s[2]:
_str += " = 0"
return _str
def logerror(code, message, doc = None):
if code in errors_disabled:
return
if doc:
print doc["file"] + ":" + str(doc["line"]),
print "error %03d: %s" % (code, message)
#print
def process_module(module, path):
hppparser = hp.CppHeaderParser()
rstparser = rp.RstParser(hppparser)
rstparser.parse(module, path)
rst = rstparser.definitions
hdrlist = glob.glob(os.path.join(path, "include", "opencv2", module, "*.h*"))
hdrlist.extend(glob.glob(os.path.join(path, "include", "opencv2", module, "detail", "*.h*")))
if module == "gpu":
hdrlist.append(os.path.join(path, "..", "core", "include", "opencv2", "core", "devmem2d.hpp"))
hdrlist.append(os.path.join(path, "..", "core", "include", "opencv2", "core", "gpumat.hpp"))
decls = []
for hname in hdrlist:
if not "ts_gtest.h" in hname:
decls += hppparser.parse(hname, wmode=False)
funcs = []
# not really needed to hardcode all the namespaces. Normally all they are collected automatically
namespaces = ['cv', 'cv.gpu', 'cvflann', 'cvflann.anyimpl', 'cvflann.lsh', 'cv.flann', 'cv.linemod', 'cv.detail', 'cvtest', 'perf', 'cv.videostab']
classes = []
structs = []
# collect namespaces and classes/structs
for decl in decls:
if decl[0].startswith("const"):
pass
elif decl[0].startswith("class") or decl[0].startswith("struct"):
if decl[0][0] == 'c':
classes.append(decl)
else:
structs.append(decl)
dotIdx = decl[0].rfind('.')
if dotIdx > 0:
namespace = decl[0][decl[0].find(' ')+1:dotIdx]
if not [c for c in classes if c[0].endswith(namespace)] and not [s for s in structs if s[0].endswith(namespace)]:
if namespace not in namespaces:
namespaces.append(namespace)
else:
funcs.append(decl)
clsnamespaces = []
# process classes
for cl in classes:
name = cl[0][cl[0].find(' ')+1:]
if name.find('.') < 0 and not name.startswith("Cv"):
logerror(ERROR_004_MISSEDNAMESPACE, "class " + name + " from opencv_" + module + " is placed in global namespace but violates C-style naming convention")
clsnamespaces.append(name)
if do_python_crosscheck and not name.startswith("cv.") and name.startswith("Cv"):
clsnamespaces.append("cv." + name[2:])
if name.startswith("cv."):
name = name[3:]
name = name.replace(".", "::")
sns = synonims.get(name, [])
sns.append(name)
for name in sns:
doc = rst.get(name)
if not doc:
#TODO: class is not documented
continue
doc[DOCUMENTED_MARKER] = True
# verify class marker
if not doc.get("isclass"):
logerror(ERROR_001_NOTACLASS, "class " + name + " is not marked as \"class\" in documentation", doc)
else:
# verify base
signature = doc.get("class", "")
signature = signature.replace(" public ", " ")
namespaceIdx = signature.rfind("::")
signature = ("class " + signature).strip()
hdrsignature = ("class " + name + " " + cl[1]).replace(".", "::").replace("cv::","").strip()
if signature != hdrsignature:
logerror(ERROR_003_INCORRECTBASE, "invalid base class documentation\ndocumented: " + signature + "\nactual: " + hdrsignature, doc)
# process structs
for st in structs:
name = st[0][st[0].find(' ')+1:]
if name.find('.') < 0 and not name.startswith("Cv"):
logerror(ERROR_004_MISSEDNAMESPACE, "struct " + name + " from opencv_" + module + " is placed in global namespace but violates C-style naming convention")
clsnamespaces.append(name)
if name.startswith("cv."):
name = name[3:]
name = name.replace(".", "::")
doc = rst.get(name)
if not doc:
#TODO: struct is not documented
continue
doc[DOCUMENTED_MARKER] = True
# verify struct marker
if not doc.get("isstruct"):
logerror(ERROR_002_NOTASTRUCT, "struct " + name + " is not marked as \"struct\" in documentation", doc)
else:
# verify base
signature = doc.get("class", "")
signature = signature.replace(", public ", " ").replace(" public ", " ")
signature = signature.replace(", protected ", " ").replace(" protected ", " ")
signature = signature.replace(", private ", " ").replace(" private ", " ")
signature = ("struct " + signature).strip()
hdrsignature = (st[0] + " " + st[1]).replace("struct cv.", "struct ").replace(".", "::").strip()
if signature != hdrsignature:
logerror(ERROR_003_INCORRECTBASE, "invalid base struct documentation\ndocumented: " + signature + "\nactual: " + hdrsignature, doc)
print st, doc
# process functions and methods
flookup = {}
for fn in funcs:
name = fn[0]
parent = None
namespace = None
for cl in clsnamespaces:
if name.startswith(cl + "."):
if cl.startswith(parent or ""):
parent = cl
if parent:
name = name[len(parent) + 1:]
for nm in namespaces:
if parent.startswith(nm + "."):
if nm.startswith(namespace or ""):
namespace = nm
if namespace:
parent = parent[len(namespace) + 1:]
else:
for nm in namespaces:
if name.startswith(nm + "."):
if nm.startswith(namespace or ""):
namespace = nm
if namespace:
name = name[len(namespace) + 1:]
#print namespace, parent, name, fn[0]
if not namespace and not parent and not name.startswith("cv") and not name.startswith("CV_"):
logerror(ERROR_004_MISSEDNAMESPACE, "function " + name + " from opencv_" + module + " is placed in global namespace but violates C-style naming convention")
else:
fdescr = (namespace, parent, name, fn)
flookup_entry = flookup.get(fn[0], [])
flookup_entry.append(fdescr)
flookup[fn[0]] = flookup_entry
if do_python_crosscheck:
for name, doc in rst.iteritems():
decls = doc.get("decls")
if not decls:
continue
for signature in decls:
if signature[0] == "Python1":
pname = signature[1][:signature[1].find('(')]
try:
fn = getattr(cv2.cv, pname[3:])
docstr = "cv." + fn.__doc__
except AttributeError:
logerror(ERROR_005_MISSINGPYFUNC, "could not load documented function: cv2." + pname, doc)
continue
docstring = docstr
sign = signature[1]
signature.append(DOCUMENTED_MARKER)
# convert old signature to pydoc style
if docstring.endswith("*"):
docstring = docstring[:-1]
s = None
while s != sign:
s = sign
sign = re.sub(r"^(.*\(.*)\(.*?\)(.*\) *->)", "\\1_\\2", sign)
s = None
while s != sign:
s = sign
sign = re.sub(r"\s*,\s*([^,]+)\s*=\s*[^,]+\s*(( \[.*\])?)\)", " [, \\1\\2])", sign)
sign = re.sub(r"\(\s*([^,]+)\s*=\s*[^,]+\s*(( \[.*\])?)\)", "([\\1\\2])", sign)
sign = re.sub(r"\)\s*->\s*", ") -> ", sign)
sign = sign.replace("-> convexHull", "-> CvSeq")
sign = sign.replace("-> lines", "-> CvSeq")
sign = sign.replace("-> boundingRects", "-> CvSeq")
sign = sign.replace("-> contours", "-> CvSeq")
sign = sign.replace("-> retval", "-> int")
sign = sign.replace("-> detectedObjects", "-> CvSeqOfCvAvgComp")
def retvalRplace(match):
m = match.group(1)
m = m.replace("CvScalar", "scalar")
m = m.replace("CvMemStorage", "memstorage")
m = m.replace("ROIplImage", "image")
m = m.replace("IplImage", "image")
m = m.replace("ROCvMat", "mat")
m = m.replace("CvMat", "mat")
m = m.replace("double", "float")
m = m.replace("CvSubdiv2DPoint", "point")
m = m.replace("CvBox2D", "Box2D")
m = m.replace("IplConvKernel", "kernel")
m = m.replace("CvHistogram", "hist")
m = m.replace("CvSize", "width,height")
m = m.replace("cvmatnd", "matND")
m = m.replace("CvSeqOfCvConvexityDefect", "convexityDefects")
mm = m.split(',')
if len(mm) > 1:
return "(" + ", ".join(mm) + ")"
else:
return m
docstring = re.sub(r"(?<=-> )(.*)$", retvalRplace, docstring)
docstring = docstring.replace("( [, ", "([")
if sign != docstring:
logerror(ERROR_006_INVALIDPYOLDDOC, "old-style documentation differs from pydoc\npydoc: " + docstring + "\nfixup: " + sign + "\ncvdoc: " + signature[1], doc)
elif signature[0] == "Python2":
pname = signature[1][4:signature[1].find('(')]
cvname = "cv." + pname
parent = None
for cl in clsnamespaces:
if cvname.startswith(cl + "."):
if cl.startswith(parent or ""):
parent = cl
try:
if parent:
instance, clsname = get_cv2_object(parent)
fn = getattr(instance, cvname[len(parent)+1:])
docstr = fn.__doc__
docprefix = "cv2." + clsname + "."
else:
fn = getattr(cv2, pname)
docstr = fn.__doc__
docprefix = "cv2."
except AttributeError:
if parent:
logerror(ERROR_005_MISSINGPYFUNC, "could not load documented member of " + parent + " class: cv2." + pname, doc)
else:
logerror(ERROR_005_MISSINGPYFUNC, "could not load documented function cv2." + pname, doc)
signature.append(DOCUMENTED_MARKER) # stop subsequent errors
continue
docstrings = [docprefix + s.replace("([, ", "([") for s in docstr.split(" or ")]
if not signature[1] in docstrings:
pydocs = "\npydoc: ".join(docstrings)
logerror(ERROR_007_INVALIDPYDOC, "documentation differs from pydoc\npydoc: " + pydocs + "\ncvdoc: " + signature[1], doc)
signature.append(DOCUMENTED_MARKER)
# verify C/C++ signatures
for name, doc in rst.iteritems():
decls = doc.get("decls")
if not decls:
continue
for signature in decls:
if signature[0] == "C" or signature[0] == "C++":
if "template" in (signature[2][1] or ""):
# TODO find a way to validate templates
signature.append(DOCUMENTED_MARKER)
continue
fd = flookup.get(signature[2][0])
if not fd:
if signature[2][0].startswith("cv."):
fd = flookup.get(signature[2][0][3:])
if not fd:
continue
else:
signature[2][0] = signature[2][0][3:]
if signature[0] == "C":
ffd = [f for f in fd if not f[0] and not f[1]] # filter out C++ stuff
if not ffd:
if fd[0][1]:
logerror(ERROR_008_CFUNCISNOTGLOBAL, "function " + fd[0][2] + " is documented as C function but is actually member of " + fd[0][1] + " class", doc)
elif fd[0][0]:
logerror(ERROR_008_CFUNCISNOTGLOBAL, "function " + fd[0][2] + " is documented as C function but is actually placed in " + fd[0][0] + " namespace", doc)
fd = ffd
error = None
for f in fd:
match, error = compareSignatures(signature[2], f[3])
if match:
signature.append(DOCUMENTED_MARKER)
break
if signature[-1] != DOCUMENTED_MARKER:
candidates = "\n\t".join([formatSignature(f[3]) for f in fd])
logerror(ERROR_009_OVERLOADNOTFOUND, signature[0] + " function " + signature[2][0].replace(".","::") + " is documented but misses in headers (" + error + ").\nDocumented as:\n\t" + signature[1] + "\nCandidates are:\n\t" + candidates, doc)
signature.append(DOCUMENTED_MARKER) # to stop subsequent error on this function
# verify that all signatures was found in the library headers
for name, doc in rst.iteritems():
# if doc.get(DOCUMENTED_MARKER, False):
# continue # this class/struct was found
if not doc.get(DOCUMENTED_MARKER, False) and (doc.get("isclass", False) or doc.get("isstruct", False)):
if name in doc_signatures_whitelist:
continue
logerror(ERROR_010_UNKNOWNCLASS, "class/struct " + name + " is mentioned in documentation but is not found in OpenCV headers", doc)
for d in doc.get("decls", []):
if d[-1] != DOCUMENTED_MARKER:
if d[0] == "C" or d[0] =="C++" or (do_python_crosscheck and d[0].startswith("Python")):
if d[0][0] == 'C':
sname = d[2][0][3:].replace(".", "::")
if sname in defines:
#TODO: need to find a way to verify #define's
continue
else:
sname = d[1][:d[1].find("(")]
prefixes = [x for x in doc_signatures_whitelist if sname.startswith(x)]
if prefixes:
# TODO: member of template class
continue
logerror(ERROR_011_UNKNOWNFUNC, d[0] + " function " + sname + " is documented but is not found in OpenCV headers. It is documented as:\n\t" + d[1], doc)
# end of process_module
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage:\n", os.path.basename(sys.argv[0]), " <module path>"
exit(0)
modules = sys.argv[1:]
if modules[0] == "all":
modules = allmodules
for module in modules:
selfpath = os.path.dirname(os.path.abspath(sys.argv[0]))
module_path = os.path.join(selfpath, "..", "modules", module)
if not os.path.isdir(module_path):
print "Module \"" + module + "\" could not be found."
exit(1)
process_module(module, module_path)

View File

@ -294,7 +294,8 @@ class OCVPyXRefRole(XRefRole):
########################### C/C++/Java Part ###########################
_identifier_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*)\b')
_identifier_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*\b)')
_argument_name_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*\b(?:\[\d*\])?|\.\.\.)')
_whitespace_re = re.compile(r'\s+(?u)')
_string_re = re.compile(r"[LuU8]?('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
@ -303,10 +304,10 @@ _operator_re = re.compile(r'''(?x)
\[\s*\]
| \(\s*\)
| (<<|>>)=?
| \+\+ | -- | ->\*?
| [!<>=/*%+|&^-]=?
| \+\+ | --
| ~ | && | \| | \|\|
| ->\*? | \,
| \,
''')
_id_shortwords = {
@ -667,13 +668,14 @@ class MemberObjDefExpr(NamedDefExpr):
class FuncDefExpr(NamedDefExpr):
def __init__(self, name, visibility, static, explicit, rv,
signature, const, pure_virtual):
signature, const, pure_virtual, virtual):
NamedDefExpr.__init__(self, name, visibility, static)
self.rv = rv
self.signature = signature
self.explicit = explicit
self.const = const
self.pure_virtual = pure_virtual
self.virtual = virtual
def get_id(self):
return u'%s%s%s' % (
@ -687,6 +689,8 @@ class FuncDefExpr(NamedDefExpr):
buf = self.get_modifiers()
if self.explicit:
buf.append(u'explicit')
if self.virtual:
buf.append(u'virtual')
if self.rv is not None:
buf.append(unicode(self.rv))
buf.append(u'%s(%s)' % (self.name, u', '.join(
@ -700,8 +704,9 @@ class FuncDefExpr(NamedDefExpr):
class ClassDefExpr(NamedDefExpr):
def __init__(self, name, visibility, static):
def __init__(self, name, visibility, static, parents = None):
NamedDefExpr.__init__(self, name, visibility, static)
self.parents = parents
def get_id(self):
return self.name.get_id()
@ -788,7 +793,6 @@ class DefinitionParser(object):
if self.match(_operator_re):
return NameDefExpr('operator' +
_whitespace_re.sub('', self.matched_text))
# new/delete operator?
for allocop in 'new', 'delete':
if not self.skip_word(allocop):
@ -807,7 +811,7 @@ class DefinitionParser(object):
return CastOpDefExpr(type)
def _parse_name(self):
if not self.match(_identifier_re):
if not self.match(_argument_name_re):
self.fail('expected name')
identifier = self.matched_text
@ -1004,8 +1008,13 @@ class DefinitionParser(object):
self.skip_ws()
argtype = self._parse_type()
argname = default = None
self.skip_ws()
if unicode(argtype) == u"...":
if not self.skip_string(')'):
self.fail("var arg must be the last argument")
args.append(ArgumentDefExpr(None, argtype, None))
break
argname = default = None
if self.skip_string('='):
self.pos += 1
default = self._parse_default_expr()
@ -1072,6 +1081,11 @@ class DefinitionParser(object):
self.skip_ws()
else:
explicit = False
if self.skip_word('virtual'):
virtual = True
self.skip_ws()
else:
virtual = False
rv = self._parse_type()
self.skip_ws()
# some things just don't have return values
@ -1081,11 +1095,26 @@ class DefinitionParser(object):
else:
name = self._parse_type()
return FuncDefExpr(name, visibility, static, explicit, rv,
*self._parse_signature())
*self._parse_signature(), virtual = virtual)
def parse_class(self):
visibility, static = self._parse_visibility_static()
return ClassDefExpr(self._parse_type(), visibility, static)
typename = self._parse_type()
parent = None
self.skip_ws()
parents = []
if self.skip_string(':'):
while not self.eof:
self.skip_ws()
classname_pos = self.pos
pvisibility, pstatic = self._parse_visibility_static()
if pstatic:
self.fail('unsepected static keyword, got %r' %
self.definition[self.classname_pos:])
parents.append(ClassDefExpr(self._parse_type(), pvisibility, pstatic))
if not self.skip_string(','):
break
return ClassDefExpr(typename, visibility, static, parents)
def read_rest(self):
rv = self.definition[self.pos:]
@ -1212,8 +1241,8 @@ class OCVClassObject(OCVObject):
object_annotation = "class "
object_long_name = "class"
def attach_modifiers(self, node, obj):
if obj.visibility != 'public':
def attach_modifiers(self, node, obj, skip_visibility = 'public'):
if obj.visibility != skip_visibility:
node += addnodes.desc_annotation(obj.visibility,
obj.visibility)
node += nodes.Text(' ')
@ -1231,6 +1260,15 @@ class OCVClassObject(OCVObject):
self.attach_modifiers(signode, cls)
signode += addnodes.desc_annotation(self.__class__.object_annotation, self.__class__.object_annotation)
self.attach_name(signode, cls.name)
first_parent = True
for p in cls.parents:
if first_parent:
signode += nodes.Text(' : ')
first_parent = False
else:
signode += nodes.Text(', ')
self.attach_modifiers(signode, p, None)
self.attach_name(signode, p.name)
class OCVStructObject(OCVClassObject):
object_annotation = "struct "
@ -1263,6 +1301,9 @@ class OCVMemberObject(OCVObject):
return ''
def parse_definition(self, parser):
parent_class = self.env.temp_data.get('ocv:parent')
if parent_class is None:
parser.fail("missing parent structure/class")
return parser.parse_member_object()
def describe_signature(self, signode, obj):
@ -1298,7 +1339,12 @@ class OCVFunctionObject(OCVObject):
self.attach_type(param, arg.type)
param += nodes.Text(u' ')
#param += nodes.emphasis(unicode(arg.name), unicode(arg.name))
sbrIdx = unicode(arg.name).find("[")
if sbrIdx < 0:
param += nodes.strong(unicode(arg.name), unicode(arg.name))
else:
param += nodes.strong(unicode(arg.name)[:sbrIdx], unicode(arg.name)[:sbrIdx])
param += nodes.Text(unicode(arg.name)[sbrIdx:])
if arg.default is not None:
def_ = u'=' + unicode(arg.default)
#param += nodes.emphasis(def_, def_)
@ -1325,6 +1371,9 @@ class OCVFunctionObject(OCVObject):
if func.explicit:
signode += addnodes.desc_annotation('explicit', 'explicit')
signode += nodes.Text(' ')
if func.virtual:
signode += addnodes.desc_annotation('virtual', 'virtual')
signode += nodes.Text(' ')
# return value is None for things with a reverse return value
# such as casting operator definitions or constructors
# and destructors.

View File

@ -199,7 +199,7 @@ protected:
int is_supported(const char* supp_modes_key, const char* mode)
{
const char* supported_modes = params.get(supp_modes_key);
return strstr(supported_modes, mode) > 0;
return (supported_modes && mode && (strstr(supported_modes, mode) > 0));
}
float getFocusDistance(int focus_distance_type)

View File

@ -111,11 +111,11 @@ calibrateCamera
---------------
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
.. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON))
.. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0, TermCriteria criteria=TermCriteria( TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) )
.. ocv:pyfunction:: cv2.calibrateCamera(objectPoints, imagePoints, imageSize[, cameraMatrix[, distCoeffs[, rvecs[, tvecs[, flags[, criteria]]]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
.. ocv:cfunction:: double cvCalibrateCamera2(const CvMat* objectPoints, const CvMat* imagePoints, const CvMat* pointCounts, CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs, CvMat* rvecs=NULL, CvMat* tvecs=NULL, int flags=0, CvTermCriteria term_crit = cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON) )
.. ocv:cfunction:: double cvCalibrateCamera2( const CvMat* object_points, const CvMat* image_points, const CvMat* point_counts, CvSize image_size, CvMat* camera_matrix, CvMat* distortion_coeffs, CvMat* rotation_vectors=NULL, CvMat* translation_vectors=NULL, int flags=0, CvTermCriteria term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON) )
.. ocv:pyoldfunction:: cv.CalibrateCamera2(objectPoints, imagePoints, pointCounts, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, flags=0)-> None
@ -127,7 +127,7 @@ Finds the camera intrinsic and extrinsic parameters from several views of a cali
In the old interface all the vectors of object points from different views are concatenated together.
:param pointCounts: In the old interface this is a vector of integers, containing as many elements, as the number of views of the calibration pattern. Each element is the number of points in each view. Usually, all the elements are the same and equal to the number of feature points on the calibration pattern.
:param point_counts: In the old interface this is a vector of integers, containing as many elements, as the number of views of the calibration pattern. Each element is the number of points in each view. Usually, all the elements are the same and equal to the number of feature points on the calibration pattern.
:param imageSize: Size of the image used only to initialize the intrinsic camera matrix.
@ -268,7 +268,7 @@ For points in an image of a stereo pair, computes the corresponding epilines in
.. ocv:function:: void computeCorrespondEpilines( InputArray points, int whichImage, InputArray F, OutputArray lines )
.. ocv:cfunction:: void cvComputeCorrespondEpilines( const CvMat* points, int whichImage, const CvMat* F, CvMat* lines)
.. ocv:cfunction:: void cvComputeCorrespondEpilines( const CvMat* points, int which_image, const CvMat* fundamental_matrix, CvMat* correspondent_lines )
.. ocv:pyoldfunction:: cv.ComputeCorrespondEpilines(points, whichImage, F, lines) -> None
@ -344,10 +344,8 @@ Converts points to/from homogeneous coordinates.
.. ocv:function:: void convertPointsHomogeneous( InputArray src, OutputArray dst )
.. ocv:pyfunction:: cv2.convertPointsHomogeneous(src[, dst]) -> dst
.. ocv:cfunction:: void cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst )
.. ocv:pyoldfunction:: cv.ConvertPointsHomogeneous( src, dst ) -> None
.. ocv:pyoldfunction:: cv.ConvertPointsHomogeneous(src, dst) -> None
:param src: Input array or vector of 2D, 3D, or 4D points.
@ -391,7 +389,7 @@ Decomposes a projection matrix into a rotation matrix and a camera matrix.
.. ocv:pyfunction:: cv2.decomposeProjectionMatrix(projMatrix[, cameraMatrix[, rotMatrix[, transVect[, rotMatrixX[, rotMatrixY[, rotMatrixZ[, eulerAngles]]]]]]]) -> cameraMatrix, rotMatrix, transVect, rotMatrixX, rotMatrixY, rotMatrixZ, eulerAngles
.. ocv:cfunction:: void cvDecomposeProjectionMatrix( const CvMat *projMatrix, CvMat *cameraMatrix, CvMat *rotMatrix, CvMat *transVect, CvMat *rotMatrX=NULL, CvMat *rotMatrY=NULL, CvMat *rotMatrZ=NULL, CvPoint3D64f *eulerAngles=NULL)
.. ocv:cfunction:: void cvDecomposeProjectionMatrix( const CvMat * projMatr, CvMat * calibMatr, CvMat * rotMatr, CvMat * posVect, CvMat * rotMatrX=NULL, CvMat * rotMatrY=NULL, CvMat * rotMatrZ=NULL, CvPoint3D64f * eulerAngles=NULL )
.. ocv:pyoldfunction:: cv.DecomposeProjectionMatrix(projMatrix, cameraMatrix, rotMatrix, transVect, rotMatrX=None, rotMatrY=None, rotMatrZ=None) -> eulerAngles
@ -428,7 +426,7 @@ Renders the detected chessboard corners.
.. ocv:pyfunction:: cv2.drawChessboardCorners(image, patternSize, corners, patternWasFound) -> None
.. ocv:cfunction:: void cvDrawChessboardCorners( CvArr* image, CvSize patternSize, CvPoint2D32f* corners, int count, int patternWasFound )
.. ocv:cfunction:: void cvDrawChessboardCorners( CvArr* image, CvSize pattern_size, CvPoint2D32f* corners, int count, int pattern_was_found )
.. ocv:pyoldfunction:: cv.DrawChessboardCorners(image, patternSize, corners, patternWasFound)-> None
:param image: Destination image. It must be an 8-bit color image.
@ -447,11 +445,11 @@ findChessboardCorners
-------------------------
Finds the positions of internal corners of the chessboard.
.. ocv:function:: bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, int flags=CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE )
.. ocv:function:: bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE )
.. ocv:pyfunction:: cv2.findChessboardCorners(image, patternSize[, corners[, flags]]) -> retval, corners
.. ocv:cfunction:: int cvFindChessboardCorners( const void* image, CvSize patternSize, CvPoint2D32f* corners, int* cornerCount=NULL, int flags=CV_CALIB_CB_ADAPTIVE_THRESH )
.. ocv:cfunction:: int cvFindChessboardCorners( const void* image, CvSize pattern_size, CvPoint2D32f* corners, int* corner_count=NULL, int flags=CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE )
.. ocv:pyoldfunction:: cv.FindChessboardCorners(image, patternSize, flags=CV_CALIB_CB_ADAPTIVE_THRESH) -> corners
:param image: Source chessboard view. It must be an 8-bit grayscale or color image.
@ -508,7 +506,7 @@ Finds the centers in the grid of circles.
.. ocv:function:: bool findCirclesGrid( InputArray image, Size patternSize, OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector() )
.. ocv:pyfunction:: cv2.findCirclesGridDefault(image, patternSize[, centers[, flags]]) -> centers
.. ocv:pyfunction:: cv2.findCirclesGridDefault(image, patternSize[, centers[, flags]]) -> retval, centers
:param image: Grid view of source circles. It must be an 8-bit grayscale or color image.
@ -551,13 +549,13 @@ solvePnP
------------
Finds an object pose from 3D-2D point correspondences.
.. ocv:function:: void solvePnP( InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess=false, int flags = CV_ITERATIVE )
.. ocv:function:: bool solvePnP( InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess=false, int flags=ITERATIVE )
.. ocv:pyfunction:: cv2.solvePnP( objectPoints, imagePoints, cameraMatrix, distCoeffs[, rvec[, tvec[, useExtrinsicGuess[, flags]]]] ) -> rvec, tvec
.. ocv:pyfunction:: cv2.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs[, rvec[, tvec[, useExtrinsicGuess[, flags]]]]) -> retval, rvec, tvec
.. ocv:cfunction:: void cvFindExtrinsicCameraParams2( const CvMat* objectPoints, const CvMat* imagePoints, const CvMat* cameraMatrix, const CvMat* distCoeffs, CvMat* rvec, CvMat* tvec, int useExtrinsicGuess=0 )
.. ocv:cfunction:: void cvFindExtrinsicCameraParams2( const CvMat* object_points, const CvMat* image_points, const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvMat* rotation_vector, CvMat* translation_vector, int use_extrinsic_guess=0 )
.. ocv:pyoldfunction:: cv.FindExtrinsicCameraParams2( objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess=0 )-> None
.. ocv:pyoldfunction:: cv.FindExtrinsicCameraParams2(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess=0 ) -> None
:param objectPoints: Array of object points in the object coordinate space, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. ``vector<Point3f>`` can be also passed here.
@ -587,7 +585,7 @@ solvePnPRansac
------------------
Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
.. ocv:function:: void solvePnPRansac( InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess=false, int iterationsCount = 100, float reprojectionError = 8.0, int minInliersCount = 100, OutputArray inliers = noArray(), int flags = CV_ITERATIVE )
.. ocv:function:: void solvePnPRansac( InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess=false, int iterationsCount = 100, float reprojectionError = 8.0, int minInliersCount = 100, OutputArray inliers = noArray(), int flags = ITERATIVE )
.. ocv:pyfunction:: cv2.solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs[, rvec[, tvec[, useExtrinsicGuess[, iterationsCount[, reprojectionError[, minInliersCount[, inliers[, flags]]]]]]]]) -> rvec, tvec, inliers
@ -628,8 +626,8 @@ Calculates a fundamental matrix from the corresponding points in two images.
.. ocv:pyfunction:: cv2.findFundamentalMat(points1, points2[, method[, param1[, param2[, mask]]]]) -> retval, mask
.. ocv:cfunction:: int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2, CvMat* fundamentalMatrix, int method=CV_FM_RANSAC, double param1=1., double param2=0.99, CvMat* status=NULL)
.. ocv:pyoldfunction:: cv.FindFundamentalMat(points1, points2, fundamentalMatrix, method=CV_FM_RANSAC, param1=1., param2=0.99, status=None) -> None
.. ocv:cfunction:: int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2, CvMat* fundamental_matrix, int method=CV_FM_RANSAC, double param1=3., double param2=0.99, CvMat* status=NULL )
.. ocv:pyoldfunction:: cv.FindFundamentalMat(points1, points2, fundamentalMatrix, method=CV_FM_RANSAC, param1=1., param2=0.99, status=None) -> retval
:param points1: Array of ``N`` points from the first image. The point coordinates should be floating-point (single or double precision).
@ -693,9 +691,9 @@ Finds a perspective transformation between two planes.
.. ocv:pyfunction:: cv2.findHomography(srcPoints, dstPoints[, method[, ransacReprojThreshold[, mask]]]) -> retval, mask
.. ocv:cfunction:: void cvFindHomography( const CvMat* srcPoints, const CvMat* dstPoints, CvMat* H, int method=0, double ransacReprojThreshold=3, CvMat* status=NULL)
.. ocv:cfunction:: int cvFindHomography( const CvMat* src_points, const CvMat* dst_points, CvMat* homography, int method=0, double ransacReprojThreshold=3, CvMat* mask=0 )
.. ocv:pyoldfunction:: cv.FindHomography(srcPoints, dstPoints, H, method, ransacReprojThreshold=3.0, status=None)-> None
.. ocv:pyoldfunction:: cv.FindHomography(srcPoints, dstPoints, H, method=0, ransacReprojThreshold=3.0, status=None) -> None
:param srcPoints: Coordinates of the points in the original plane, a matrix of the type ``CV_32FC2`` or ``vector<Point2f>`` .
@ -717,7 +715,7 @@ Finds a perspective transformation between two planes.
then the point :math:`i` is considered an outlier. If ``srcPoints`` and ``dstPoints`` are measured in pixels, it usually makes sense to set this parameter somewhere in the range of 1 to 10.
:param status: Optional output mask set by a robust method ( ``CV_RANSAC`` or ``CV_LMEDS`` ). Note that the input mask values are ignored.
:param mask: Optional output mask set by a robust method ( ``CV_RANSAC`` or ``CV_LMEDS`` ). Note that the input mask values are ignored.
The functions find and return the perspective transformation :math:`H` between the source and the destination planes:
@ -773,13 +771,13 @@ estimateAffine3D
--------------------
Computes an optimal affine transformation between two 3D point sets.
.. ocv:function:: int estimateAffine3D(InputArray srcpt, InputArray dstpt, OutputArray out, OutputArray inliers, double ransacThreshold = 3.0, double confidence = 0.99)
.. ocv:function:: int estimateAffine3D(InputArray src, InputArray dst, OutputArray out, OutputArray inliers, double ransacThreshold = 3, double confidence = 0.99)
.. ocv:pyfunction:: cv2.estimateAffine3D(srcpt, dstpt[, out[, inliers[, ransacThreshold[, confidence]]]]) -> retval, out, inliers
.. ocv:pyfunction:: cv2.estimateAffine3D(src, dst[, out[, inliers[, ransacThreshold[, confidence]]]]) -> retval, out, inliers
:param srcpt: First input 3D point set.
:param src: First input 3D point set.
:param dstpt: Second input 3D point set.
:param dst: Second input 3D point set.
:param out: Output 3D affine transformation matrix :math:`3 \times 4` .
@ -815,13 +813,13 @@ getOptimalNewCameraMatrix
-----------------------------
Returns the new camera matrix based on the free scaling parameter.
.. ocv:function:: Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs, Size imageSize, double alpha, Size newImageSize=Size(), Rect* validPixROI=0, bool centerPrincipalPoint=false)
.. ocv:function:: Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs, Size imageSize, double alpha, Size newImgSize=Size(), Rect* validPixROI=0, bool centerPrincipalPoint=false )
.. ocv:pyfunction:: cv2.getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, alpha[, newImgSize[, centerPrincipalPoint]]) -> retval, validPixROI
.. ocv:cfunction:: void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCoeffs, CvSize imageSize, double alpha, CvMat* newCameraMatrix, CvSize newImageSize=cvSize(0, 0), CvRect* validPixROI=0, int centerPrincipalPoint=0)
.. ocv:cfunction:: void cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix, const CvMat* dist_coeffs, CvSize image_size, double alpha, CvMat* new_camera_matrix, CvSize new_imag_size=cvSize(0,0), CvRect* valid_pixel_ROI=0, int center_principal_point=0 )
.. ocv:pyoldfunction:: cv.GetOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, alpha, newCameraMatrix, newImageSize=(0, 0), validPixROI=0) -> None
.. ocv:pyoldfunction:: cv.GetOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, alpha, newCameraMatrix, newImageSize=(0, 0), validPixROI=0, centerPrincipalPoint=0) -> None
:param cameraMatrix: Input camera matrix.
@ -831,9 +829,9 @@ Returns the new camera matrix based on the free scaling parameter.
:param alpha: Free scaling parameter between 0 (when all the pixels in the undistorted image are valid) and 1 (when all the source image pixels are retained in the undistorted image). See :ocv:func:`stereoRectify` for details.
:param newCameraMatrix: Output new camera matrix.
:param new_camera_matrix: Output new camera matrix.
:param newImageSize: Image size after rectification. By default,it is set to ``imageSize`` .
:param new_imag_size: Image size after rectification. By default,it is set to ``imageSize`` .
:param validPixROI: Optional output rectangle that outlines all-good-pixels region in the undistorted image. See ``roi1, roi2`` description in :ocv:func:`stereoRectify` .
@ -854,15 +852,15 @@ Finds an initial camera matrix from 3D-2D point correspondences.
.. ocv:pyfunction:: cv2.initCameraMatrix2D(objectPoints, imagePoints, imageSize[, aspectRatio]) -> retval
.. ocv:cfunction:: void cvInitIntrinsicParams2D( const CvMat* objectPoints, const CvMat* imagePoints, const CvMat* pointCounts, CvSize imageSize, CvMat* cameraMatrix, double aspectRatio=1.)
.. ocv:cfunction:: void cvInitIntrinsicParams2D( const CvMat* object_points, const CvMat* image_points, const CvMat* npoints, CvSize image_size, CvMat* camera_matrix, double aspect_ratio=1. )
.. ocv:pyoldfunction:: cv.InitIntrinsicParams2D(objectPoints, imagePoints, pointCounts, imageSize, cameraMatrix, aspectRatio=1.) -> None
.. ocv:pyoldfunction:: cv.InitIntrinsicParams2D(objectPoints, imagePoints, npoints, imageSize, cameraMatrix, aspectRatio=1.) -> None
:param objectPoints: Vector of vectors of the calibration pattern points in the calibration pattern coordinate space. In the old interface all the per-view vectors are concatenated. See :ocv:func:`calibrateCamera` for details.
:param imagePoints: Vector of vectors of the projections of the calibration pattern points. In the old interface all the per-view vectors are concatenated.
:param pointCounts: The integer vector of point counters for each view.
:param npoints: The integer vector of point counters for each view.
:param imageSize: Image size in pixels used to initialize the principal point.
@ -903,7 +901,8 @@ Projects 3D points to an image plane.
.. ocv:pyfunction:: cv2.projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs[, imagePoints[, jacobian[, aspectRatio]]]) -> imagePoints, jacobian
.. ocv:cfunction:: void cvProjectPoints2( const CvMat* objectPoints, const CvMat* rvec, const CvMat* tvec, const CvMat* cameraMatrix, const CvMat* distCoeffs, CvMat* imagePoints, CvMat* dpdrot=NULL, CvMat* dpdt=NULL, CvMat* dpdf=NULL, CvMat* dpdc=NULL, CvMat* dpddist=NULL )
.. ocv:cfunction:: void cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector, const CvMat* translation_vector, const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvMat* image_points, CvMat* dpdrot=NULL, CvMat* dpdt=NULL, CvMat* dpdf=NULL, CvMat* dpdc=NULL, CvMat* dpddist=NULL, double aspect_ratio=0 )
.. ocv:pyoldfunction:: cv.ProjectPoints2(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, imagePoints, dpdrot=None, dpdt=None, dpdf=None, dpdc=None, dpddist=None)-> None
:param objectPoints: Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or ``vector<Point3f>`` ), where N is the number of points in the view.
@ -943,11 +942,11 @@ reprojectImageTo3D
----------------------
Reprojects a disparity image to 3D space.
.. ocv:function:: void reprojectImageTo3D( InputArray disparity, OutputArray _3dImage, InputArray Q, bool handleMissingValues=false, int depth=-1 )
.. ocv:function:: void reprojectImageTo3D( InputArray disparity, OutputArray _3dImage, InputArray Q, bool handleMissingValues=false, int ddepth=-1 )
.. ocv:pyfunction:: cv2.reprojectImageTo3D(disparity, Q[, _3dImage[, handleMissingValues[, ddepth]]]) -> _3dImage
.. ocv:cfunction:: void cvReprojectImageTo3D( const CvArr* disparity, CvArr* _3dImage, const CvMat* Q, int handleMissingValues=0)
.. ocv:cfunction:: void cvReprojectImageTo3D( const CvArr* disparityImage, CvArr* _3dImage, const CvMat* Q, int handleMissingValues=0 )
.. ocv:pyoldfunction:: cv.ReprojectImageTo3D(disparity, _3dImage, Q, handleMissingValues=0) -> None
@ -978,18 +977,18 @@ RQDecomp3x3
---------------
Computes an RQ decomposition of 3x3 matrices.
.. ocv:function:: Vec3d RQDecomp3x3( InputArray M, OutputArray R, OutputArray Q, OutputArray Qx=noArray(), OutputArray Qy=noArray(), OutputArray Qz=noArray() )
.. ocv:function:: Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ, OutputArray Qx=noArray(), OutputArray Qy=noArray(), OutputArray Qz=noArray() )
.. ocv:pyfunction:: cv2.RQDecomp3x3(src[, mtxR[, mtxQ[, Qx[, Qy[, Qz]]]]]) -> retval, mtxR, mtxQ, Qx, Qy, Qz
.. ocv:cfunction:: void cvRQDecomp3x3( const CvMat *M, CvMat *R, CvMat *Q, CvMat *Qx=NULL, CvMat *Qy=NULL, CvMat *Qz=NULL, CvPoint3D64f *eulerAngles=NULL)
.. ocv:cfunction:: void cvRQDecomp3x3( const CvMat * matrixM, CvMat * matrixR, CvMat * matrixQ, CvMat * matrixQx=NULL, CvMat * matrixQy=NULL, CvMat * matrixQz=NULL, CvPoint3D64f * eulerAngles=NULL )
.. ocv:pyoldfunction:: cv.RQDecomp3x3(M, R, Q, Qx=None, Qy=None, Qz=None) -> eulerAngles
:param M: 3x3 input matrix.
:param src: 3x3 input matrix.
:param R: Output 3x3 upper-triangular matrix.
:param mtxR: Output 3x3 upper-triangular matrix.
:param Q: Output 3x3 orthogonal matrix.
:param mtxQ: Output 3x3 orthogonal matrix.
:param Qx: Optional output 3x3 rotation matrix around x-axis.
@ -1083,11 +1082,11 @@ The constructors.
.. ocv:function:: StereoBM::StereoBM()
.. ocv:function:: StereoBM::StereoBM(int preset, int ndisparities=0, int SADWindowSize=21)
.. ocv:pyfunction:: cv2.StereoBM.StereoBM(preset[, ndisparities[, SADWindowSize]]) -> <StereoBM object>
.. ocv:pyfunction:: cv2.StereoBM([preset[, ndisparities[, SADWindowSize]]]) -> <StereoBM object>
.. ocv:cfunction:: CvStereoBMState* cvCreateStereoBMState( int preset=CV_STEREO_BM_BASIC, int ndisparities=0 )
.. ocv:cfunction:: CvStereoBMState* cvCreateStereoBMState( int preset=CV_STEREO_BM_BASIC, int numberOfDisparities=0 )
.. ocv:pyoldfunction:: cv.CreateStereoBMState(preset=CV_STEREO_BM_BASIC, ndisparities=0)-> StereoBMState
.. ocv:pyoldfunction:: cv.CreateStereoBMState(preset=CV_STEREO_BM_BASIC, numberOfDisparities=0)-> CvStereoBMState
:param preset: specifies the whole set of algorithm parameters, one of:
@ -1109,7 +1108,7 @@ StereoBM::operator()
-----------------------
Computes disparity using the BM algorithm for a rectified stereo pair.
.. ocv:function:: void StereoBM::operator()(InputArray left, InputArray right, OutputArray disp, int disptype=CV_16S )
.. ocv:function:: void StereoBM::operator()( InputArray left, InputArray right, OutputArray disparity, int disptype=CV_16S )
.. ocv:pyfunction:: cv2.StereoBM.compute(left, right[, disparity[, disptype]]) -> disparity
@ -1121,7 +1120,7 @@ Computes disparity using the BM algorithm for a rectified stereo pair.
:param right: Right image of the same size and the same type as the left one.
:param disp: Output disparity map. It has the same size as the input images. When ``disptype==CV_16S``, the map is a 16-bit signed single-channel image, containing disparity values scaled by 16. To get the true disparity values from such fixed-point representation, you will need to divide each ``disp`` element by 16. If ``disptype==CV_32F``, the disparity map will already contain the real disparity values on output.
:param disparity: Output disparity map. It has the same size as the input images. When ``disptype==CV_16S``, the map is a 16-bit signed single-channel image, containing disparity values scaled by 16. To get the true disparity values from such fixed-point representation, you will need to divide each ``disp`` element by 16. If ``disptype==CV_32F``, the disparity map will already contain the real disparity values on output.
:param disptype: Type of the output disparity map, ``CV_16S`` (default) or ``CV_32F``.
@ -1182,7 +1181,7 @@ StereoSGBM::StereoSGBM
.. ocv:function:: StereoSGBM::StereoSGBM( int minDisparity, int numDisparities, int SADWindowSize, int P1=0, int P2=0, int disp12MaxDiff=0, int preFilterCap=0, int uniquenessRatio=0, int speckleWindowSize=0, int speckleRange=0, bool fullDP=false)
.. ocv:pyfunction:: cv2.StereoSGBM.StereoSGBM(minDisparity, numDisparities, SADWindowSize[, P1[, P2[, disp12MaxDiff[, preFilterCap[, uniquenessRatio[, speckleWindowSize[, speckleRange[, fullDP]]]]]]]]) -> <StereoSGBM object>
.. ocv:pyfunction:: cv2.StereoSGBM([minDisparity, numDisparities, SADWindowSize[, P1[, P2[, disp12MaxDiff[, preFilterCap[, uniquenessRatio[, speckleWindowSize[, speckleRange[, fullDP]]]]]]]]]) -> <StereoSGBM object>
Initializes ``StereoSGBM`` and sets parameters to custom values.??
@ -1232,129 +1231,17 @@ The method executes the SGBM algorithm on a rectified stereo pair. See ``stereo_
.. note:: The method is not constant, so you should not use the same ``StereoSGBM`` instance from different threads simultaneously.
StereoVar
----------
.. ocv:class:: StereoVar
Class for computing stereo correspondence using the variational matching algorithm ::
class StereoVar
{
StereoVar();
StereoVar( int levels, double pyrScale,
int nIt, int minDisp, int maxDisp,
int poly_n, double poly_sigma, float fi,
float lambda, int penalization, int cycle,
int flags);
virtual ~StereoVar();
virtual void operator()(InputArray left, InputArray right, OutputArray disp);
int levels;
double pyrScale;
int nIt;
int minDisp;
int maxDisp;
int poly_n;
double poly_sigma;
float fi;
float lambda;
int penalization;
int cycle;
int flags;
...
};
The class implements the modified S. G. Kosov algorithm [Publication] that differs from the original one as follows:
* The automatic initialization of method's parameters is added.
* The method of Smart Iteration Distribution (SID) is implemented.
* The support of Multi-Level Adaptation Technique (MLAT) is not included.
* The method of dynamic adaptation of method's parameters is not included.
StereoVar::StereoVar
--------------------------
.. ocv:function:: StereoVar::StereoVar()
.. ocv:function:: StereoVar::StereoVar( int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags )
The constructor
:param levels: The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used. This parameter is ignored if flag USE_AUTO_PARAMS is set.
:param pyrScale: Specifies the image scale (<1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param nIt: The number of iterations the algorithm does at each pyramid level. (If the flag USE_SMART_ID is set, the number of iterations will be redistributed in such a way, that more iterations will be done on more coarser levels.)
:param minDisp: Minimum possible disparity value. Could be negative in case the left and right input images change places.
:param maxDisp: Maximum possible disparity value.
:param poly_n: Size of the pixel neighbourhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly_n = 3, 5 or 7
:param poly_sigma: Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly_n=5 you can set poly_sigma=1.1 , for poly_n=7 a good value would be poly_sigma=1.5
:param fi: The smoothness parameter, ot the weight coefficient for the smoothness term.
:param lambda: The threshold parameter for edge-preserving smoothness. (This parameter is ignored if PENALIZATION_CHARBONNIER or PENALIZATION_PERONA_MALIK is used.)
:param penalization: Possible values: PENALIZATION_TICHONOV - linear smoothness; PENALIZATION_CHARBONNIER - non-linear edge preserving smoothness; PENALIZATION_PERONA_MALIK - non-linear edge-enhancing smoothness. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param cycle: Type of the multigrid cycle. Possible values: CYCLE_O and CYCLE_V for null- and v-cycles respectively. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param flags: The operation flags; can be a combination of the following:
* USE_INITIAL_DISPARITY: Use the input flow as the initial flow approximation.
* USE_EQUALIZE_HIST: Use the histogram equalization in the pre-processing phase.
* USE_SMART_ID: Use the smart iteration distribution (SID).
* USE_AUTO_PARAMS: Allow the method to initialize the main parameters.
* USE_MEDIAN_FILTERING: Use the median filer of the solution in the post processing phase.
The first constructor initializes ``StereoVar`` with all the default parameters. So, you only have to set ``StereoVar::maxDisp`` and / or ``StereoVar::minDisp`` at minimum. The second constructor enables you to set each parameter to a custom value.
StereoVar::operator ()
-----------------------
.. ocv:function:: void StereoVar::operator()(InputArray left, InputArray right, OutputArray disp)
Computes disparity using the variational algorithm for a rectified stereo pair.
:param left: Left 8-bit single-channel or 3-channel image.
:param right: Right image of the same size and the same type as the left one.
:param disp: Output disparity map. It is a 8-bit signed single-channel image of the same size as the input image.
The method executes the variational algorithm on a rectified stereo pair. See ``stereo_match.cpp`` OpenCV sample on how to prepare images and call the method.
**Note**:
The method is not constant, so you should not use the same ``StereoVar`` instance from different threads simultaneously.
stereoCalibrate
-------------------
Calibrates the stereo camera.
.. ocv:function:: double stereoCalibrate( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, Size imageSize, OutputArray R, OutputArray T, OutputArray E, OutputArray F, TermCriteria term_crit = TermCriteria(TermCriteria::COUNT+ TermCriteria::EPS, 30, 1e-6), int flags=CALIB_FIX_INTRINSIC )
.. ocv:function:: double stereoCalibrate( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, Size imageSize, OutputArray R, OutputArray T, OutputArray E, OutputArray F, TermCriteria criteria=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6), int flags=CALIB_FIX_INTRINSIC )
.. ocv:pyfunction:: cv2.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize[, cameraMatrix1[, distCoeffs1[, cameraMatrix2[, distCoeffs2[, R[, T[, E[, F[, criteria[, flags]]]]]]) -> retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F
.. ocv:pyfunction:: cv2.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize[, cameraMatrix1[, distCoeffs1[, cameraMatrix2[, distCoeffs2[, R[, T[, E[, F[, criteria[, flags]]]]]]]]]]) -> retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F
.. ocv:cfunction:: double cvStereoCalibrate( const CvMat* objectPoints, const CvMat* imagePoints1, const CvMat* imagePoints2, const CvMat* pointCounts, CvMat* cameraMatrix1, CvMat* distCoeffs1, CvMat* cameraMatrix2, CvMat* distCoeffs2, CvSize imageSize, CvMat* R, CvMat* T, CvMat* E=0, CvMat* F=0, CvTermCriteria termCrit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), int flags=CV_CALIB_FIX_INTRINSIC )
.. ocv:pyoldfunction:: cv.StereoCalibrate( objectPoints, imagePoints1, imagePoints2, pointCounts, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E=None, F=None, termCrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), flags=CV_CALIB_FIX_INTRINSIC)-> None
.. ocv:cfunction:: double cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1, const CvMat* image_points2, const CvMat* npoints, CvMat* camera_matrix1, CvMat* dist_coeffs1, CvMat* camera_matrix2, CvMat* dist_coeffs2, CvSize image_size, CvMat* R, CvMat* T, CvMat* E=0, CvMat* F=0, CvTermCriteria term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6), int flags=CV_CALIB_FIX_INTRINSIC )
.. ocv:pyoldfunction:: cv.StereoCalibrate(objectPoints, imagePoints1, imagePoints2, pointCounts, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E=None, F=None, term_crit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), flags=CV_CALIB_FIX_INTRINSIC)-> None
:param objectPoints: Vector of vectors of the calibration pattern points.
@ -1439,10 +1326,11 @@ stereoRectify
-----------------
Computes rectification transforms for each head of a calibrated stereo camera.
.. ocv:function:: void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1, InputArray cameraMatrix2, InputArray distCoeffs2, Size imageSize, InputArray R, InputArray T, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags=CALIB_ZERO_DISPARITY, double alpha, Size newImageSize=Size(), Rect* roi1=0, Rect* roi2=0 )
.. ocv:function:: void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1, InputArray cameraMatrix2, InputArray distCoeffs2, Size imageSize, InputArray R, InputArray T, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags=CALIB_ZERO_DISPARITY, double alpha=-1, Size newImageSize=Size(), Rect* validPixROI1=0, Rect* validPixROI2=0 )
.. ocv:cfunction:: void cvStereoRectify( const CvMat* cameraMatrix1, const CvMat* cameraMatrix2, const CvMat* distCoeffs1, const CvMat* distCoeffs2, CvSize imageSize, const CvMat* R, const CvMat* T, CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2, CvMat* Q=0, int flags=CV_CALIB_ZERO_DISPARITY, double alpha=-1, CvSize newImageSize=cvSize(0, 0), CvRect* roi1=0, CvRect* roi2=0)
.. ocv:pyoldfunction:: cv.StereoRectify( cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, imageSize, R, T, R1, R2, P1, P2, Q=None, flags=CV_CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0, 0))-> (roi1, roi2)
.. ocv:cfunction:: void cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2, const CvMat* dist_coeffs1, const CvMat* dist_coeffs2, CvSize image_size, const CvMat* R, const CvMat* T, CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2, CvMat* Q=0, int flags=CV_CALIB_ZERO_DISPARITY, double alpha=-1, CvSize new_image_size=cvSize(0,0), CvRect* valid_pix_ROI1=0, CvRect* valid_pix_ROI2=0 )
.. ocv:pyoldfunction:: cv.StereoRectify(cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, imageSize, R, T, R1, R2, P1, P2, Q=None, flags=CV_CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0, 0)) -> (roi1, roi2)
:param cameraMatrix1: First camera matrix.
@ -1474,9 +1362,9 @@ Computes rectification transforms for each head of a calibrated stereo camera.
:param newImageSize: New image resolution after rectification. The same size should be passed to :ocv:func:`initUndistortRectifyMap` (see the ``stereo_calib.cpp`` sample in OpenCV samples directory). When (0,0) is passed (default), it is set to the original ``imageSize`` . Setting it to larger value can help you preserve details in the original image, especially when there is a big radial distortion.
:param roi1:
:param validPixROI1: Optional output rectangles inside the rectified images where all the pixels are valid. If ``alpha=0`` , the ROIs cover the whole images. Otherwise, they are likely to be smaller (see the picture below).
:param roi2: Optional output rectangles inside the rectified images where all the pixels are valid. If ``alpha=0`` , the ROIs cover the whole images. Otherwise, they are likely to be smaller (see the picture below).
:param validPixROI2: Optional output rectangles inside the rectified images where all the pixels are valid. If ``alpha=0`` , the ROIs cover the whole images. Otherwise, they are likely to be smaller (see the picture below).
The function computes the rotation matrices for each camera that (virtually) make both camera image planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies the dense stereo correspondence problem. The function takes the matrices computed by
:ocv:func:`stereoCalibrate` as input. As output, it provides two rotation matrices and also two projection matrices in the new coordinates. The function distinguishes the following two cases:
@ -1529,7 +1417,7 @@ Computes a rectification transform for an uncalibrated stereo camera.
.. ocv:pyfunction:: cv2.stereoRectifyUncalibrated(points1, points2, F, imgSize[, H1[, H2[, threshold]]]) -> retval, H1, H2
.. ocv:cfunction:: void cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2, const CvMat* F, CvSize imageSize, CvMat* H1, CvMat* H2, double threshold=5 )
.. ocv:cfunction:: int cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2, const CvMat* F, CvSize img_size, CvMat* H1, CvMat* H2, double threshold=5 )
.. ocv:pyoldfunction:: cv.StereoRectifyUncalibrated(points1, points2, F, imageSize, H1, H2, threshold=5)-> None
@ -1539,7 +1427,7 @@ Computes a rectification transform for an uncalibrated stereo camera.
:param F: Input fundamental matrix. It can be computed from the same set of point pairs using :ocv:func:`findFundamentalMat` .
:param imageSize: Size of the image.
:param imgSize: Size of the image.
:param H1: Output rectification homography matrix for the first image.

View File

@ -215,8 +215,7 @@ CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
CvPoint2D32f* corners,
int* corner_count CV_DEFAULT(NULL),
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+
CV_CALIB_CB_NORMALIZE_IMAGE) );
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
/* Draws individual chessboard corners or the whole chessboard detected */
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
@ -474,7 +473,7 @@ enum
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec,
bool useExtrinsicGuess=false, int flags=0);
bool useExtrinsicGuess=false, int flags=ITERATIVE);
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
@ -488,7 +487,7 @@ CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
float reprojectionError = 8.0,
int minInliersCount = 100,
OutputArray inliers = noArray(),
int flags = 0);
int flags = ITERATIVE);
//! initializes camera matrix from a few 3D points and the corresponding projections.
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
@ -501,8 +500,7 @@ enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2,
//! finds checkerboard pattern of the specified size in the image
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize,
OutputArray corners,
int flags=CALIB_CB_ADAPTIVE_THRESH+
CALIB_CB_NORMALIZE_IMAGE );
int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE );
//! finds subpixel-accurate positions of the chessboard corners
CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size);
@ -574,8 +572,7 @@ CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
CV_OUT InputOutputArray distCoeffs2,
Size imageSize, OutputArray R,
OutputArray T, OutputArray E, OutputArray F,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+
TermCriteria::EPS, 30, 1e-6),
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
int flags=CALIB_FIX_INTRINSIC );
@ -642,7 +639,7 @@ CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
double param1=3., double param2=0.99);
//! finds coordinates of epipolar lines corresponding the specified points
CV_EXPORTS void computeCorrespondEpilines( InputArray points1,
CV_EXPORTS void computeCorrespondEpilines( InputArray points,
int whichImage, InputArray F,
OutputArray lines );
@ -743,9 +740,9 @@ CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
bool handleMissingValues=false,
int ddepth=-1 );
CV_EXPORTS_W int estimateAffine3D(InputArray _from, InputArray _to,
OutputArray _out, OutputArray _inliers,
double param1=3, double param2=0.99);
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
OutputArray out, OutputArray inliers,
double ransacThreshold=3, double confidence=0.99);
}

View File

@ -3,3 +3,8 @@ contrib. Contributed/Experimental Stuff
***************************************
The module contains some recently added functionality that has not been stabilized, or functionality that is considered optional.
.. toctree::
:maxdepth: 2
stereo

View File

@ -0,0 +1,117 @@
Stereo Correspondence
========================================
.. highlight:: cpp
StereoVar
----------
.. ocv:class:: StereoVar
Class for computing stereo correspondence using the variational matching algorithm ::
class StereoVar
{
StereoVar();
StereoVar( int levels, double pyrScale,
int nIt, int minDisp, int maxDisp,
int poly_n, double poly_sigma, float fi,
float lambda, int penalization, int cycle,
int flags);
virtual ~StereoVar();
virtual void operator()(InputArray left, InputArray right, OutputArray disp);
int levels;
double pyrScale;
int nIt;
int minDisp;
int maxDisp;
int poly_n;
double poly_sigma;
float fi;
float lambda;
int penalization;
int cycle;
int flags;
...
};
The class implements the modified S. G. Kosov algorithm [Publication] that differs from the original one as follows:
* The automatic initialization of method's parameters is added.
* The method of Smart Iteration Distribution (SID) is implemented.
* The support of Multi-Level Adaptation Technique (MLAT) is not included.
* The method of dynamic adaptation of method's parameters is not included.
StereoVar::StereoVar
--------------------------
.. ocv:function:: StereoVar::StereoVar()
.. ocv:function:: StereoVar::StereoVar( int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags )
The constructor
:param levels: The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used. This parameter is ignored if flag USE_AUTO_PARAMS is set.
:param pyrScale: Specifies the image scale (<1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param nIt: The number of iterations the algorithm does at each pyramid level. (If the flag USE_SMART_ID is set, the number of iterations will be redistributed in such a way, that more iterations will be done on more coarser levels.)
:param minDisp: Minimum possible disparity value. Could be negative in case the left and right input images change places.
:param maxDisp: Maximum possible disparity value.
:param poly_n: Size of the pixel neighbourhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly_n = 3, 5 or 7
:param poly_sigma: Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly_n=5 you can set poly_sigma=1.1 , for poly_n=7 a good value would be poly_sigma=1.5
:param fi: The smoothness parameter, ot the weight coefficient for the smoothness term.
:param lambda: The threshold parameter for edge-preserving smoothness. (This parameter is ignored if PENALIZATION_CHARBONNIER or PENALIZATION_PERONA_MALIK is used.)
:param penalization: Possible values: PENALIZATION_TICHONOV - linear smoothness; PENALIZATION_CHARBONNIER - non-linear edge preserving smoothness; PENALIZATION_PERONA_MALIK - non-linear edge-enhancing smoothness. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param cycle: Type of the multigrid cycle. Possible values: CYCLE_O and CYCLE_V for null- and v-cycles respectively. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param flags: The operation flags; can be a combination of the following:
* USE_INITIAL_DISPARITY: Use the input flow as the initial flow approximation.
* USE_EQUALIZE_HIST: Use the histogram equalization in the pre-processing phase.
* USE_SMART_ID: Use the smart iteration distribution (SID).
* USE_AUTO_PARAMS: Allow the method to initialize the main parameters.
* USE_MEDIAN_FILTERING: Use the median filer of the solution in the post processing phase.
The first constructor initializes ``StereoVar`` with all the default parameters. So, you only have to set ``StereoVar::maxDisp`` and / or ``StereoVar::minDisp`` at minimum. The second constructor enables you to set each parameter to a custom value.
StereoVar::operator ()
-----------------------
.. ocv:function:: void StereoVar::operator()( const Mat& left, const Mat& right, Mat& disp )
Computes disparity using the variational algorithm for a rectified stereo pair.
:param left: Left 8-bit single-channel or 3-channel image.
:param right: Right image of the same size and the same type as the left one.
:param disp: Output disparity map. It is a 8-bit signed single-channel image of the same size as the input image.
The method executes the variational algorithm on a rectified stereo pair. See ``stereo_match.cpp`` OpenCV sample on how to prepare images and call the method.
**Note**:
The method is not constant, so you should not use the same ``StereoVar`` instance from different threads simultaneously.

View File

@ -182,7 +182,7 @@ The class represents rotated (i.e. not up-right) rectangles on a plane. Each rec
:param angle: The rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle.
:param box: The rotated rectangle parameters as the obsolete CvBox2D structure.
.. ocv:function:: void RotatedRect::points(Point2f* pts) const
.. ocv:function:: void RotatedRect::points( Point2f pts[] ) const
.. ocv:function:: Rect RotatedRect::boundingRect() const
.. ocv:function:: RotatedRect::operator CvBox2D() const
@ -797,7 +797,7 @@ Various Mat constructors
.. ocv:function:: Mat::Mat(Size size, int type, void* data, size_t step=AUTO_STEP)
.. ocv:function:: Mat::Mat(const Mat& m, const Range& rowRange, const Range& colRange)
.. ocv:function:: Mat::Mat( const Mat& m, const Range& rowRange, const Range& colRange=Range::all() )
.. ocv:function:: Mat::Mat(const Mat& m, const Rect& roi)
@ -811,8 +811,6 @@ Various Mat constructors
.. ocv:function:: template<typename T> explicit Mat::Mat(const vector<T>& vec, bool copyData=false)
.. ocv:function:: Mat::Mat(const MatExpr& expr)
.. ocv:function:: Mat::Mat(int ndims, const int* sizes, int type)
.. ocv:function:: Mat::Mat(int ndims, const int* sizes, int type, const Scalar& s)
@ -857,8 +855,6 @@ Various Mat constructors
:param ranges: Array of selected ranges of ``m`` along each dimensionality.
:param expr: Matrix expression. See :ref:`MatrixExpressions`.
These are various constructors that form a matrix. As noted in the :ref:`AutomaticAllocation`,
often the default constructor is enough, and the proper matrix will be allocated by an OpenCV function. The constructed matrix can further be assigned to another matrix or matrix expression or can be allocated with
:ocv:func:`Mat::create` . In the former case, the old content is de-referenced.
@ -879,7 +875,7 @@ Provides matrix assignment operators.
.. ocv:function:: Mat& Mat::operator = (const Mat& m)
.. ocv:function:: Mat& Mat::operator = (const MatExpr_Base& expr)
.. ocv:function:: Mat& Mat::operator =( const MatExpr& expr )
.. ocv:function:: Mat& Mat::operator = (const Scalar& s)
@ -891,23 +887,13 @@ Provides matrix assignment operators.
These are available assignment operators. Since they all are very different, make sure to read the operator parameters description.
Mat::operator MatExpr
-------------------------
Provides a ``Mat`` -to- ``MatExpr`` cast operator.
.. ocv:function:: Mat::operator MatExpr_<Mat, Mat>() const
The cast operator should not be called explicitly. It is used internally by the
:ref:`MatrixExpressions` engine.
Mat::row
------------
Creates a matrix header for the specified matrix row.
.. ocv:function:: Mat Mat::row(int i) const
.. ocv:function:: Mat Mat::row(int y) const
:param i: A 0-based row index.
:param y: A 0-based row index.
The method makes a new header for the specified matrix row and returns it. This is an O(1) operation, regardless of the matrix size. The underlying data of the new matrix is shared with the original matrix. Here is the example of one of the classical basic matrix processing operations, ``axpy``, used by LU and many other algorithms: ::
@ -940,9 +926,9 @@ Mat::col
------------
Creates a matrix header for the specified matrix column.
.. ocv:function:: Mat Mat::col(int j) const
.. ocv:function:: Mat Mat::col(int x) const
:param j: A 0-based column index.
:param x: A 0-based column index.
The method makes a new header for the specified matrix column and returns it. This is an O(1) operation, regardless of the matrix size. The underlying data of the new matrix is shared with the original matrix. See also the
:ocv:func:`Mat::row` description.
@ -988,11 +974,11 @@ Mat::diag
-------------
Extracts a diagonal from a matrix, or creates a diagonal matrix.
.. ocv:function:: Mat Mat::diag(int d) const
.. ocv:function:: Mat Mat::diag( int d=0 ) const
.. ocv:function:: static Mat Mat::diag(const Mat& matD)
.. ocv:function:: static Mat Mat::diag( const Mat& d )
:param d: Index of the diagonal, with the following values:
:param d: Single-column matrix that forms a diagonal matrix or index of the diagonal, with the following values:
* **d=0** is the main diagonal.
@ -1000,8 +986,6 @@ Extracts a diagonal from a matrix, or creates a diagonal matrix.
* **d<0** is a diagonal from the upper half. For example, ``d=1`` means the diagonal is set immediately above the main one.
:param matD: Single-column matrix that forms a diagonal matrix.
The method makes a new header for the specified matrix diagonal. The new matrix is represented as a single-column matrix. Similarly to
:ocv:func:`Mat::row` and
:ocv:func:`Mat::col` , this is an O(1) operation.
@ -1075,9 +1059,9 @@ Mat::setTo
--------------
Sets all or some of the array elements to the specified value.
.. ocv:function:: Mat& Mat::setTo(const Scalar& s, InputArray mask=noArray())
.. ocv:function:: Mat& Mat::setTo( InputArray value, InputArray mask=noArray() )
:param s: Assigned scalar converted to the actual array type.
:param value: Assigned scalar converted to the actual array type.
:param mask: Operation mask of the same size as ``*this``. This is an advanced variant of the ``Mat::operator=(const Scalar& s)`` operator.
@ -1189,7 +1173,7 @@ Returns a zero array of the specified size and type.
.. ocv:function:: static MatExpr Mat::zeros(int rows, int cols, int type)
.. ocv:function:: static MatExpr Mat::zeros(Size size, int type)
.. ocv:function:: static MatExpr Mat::zeros(int ndims, const int* sizes, int type)
.. ocv:function:: static MatExpr Mat::zeros( int ndims, const int* sz, int type )
:param ndims: Array dimensionality.
@ -1199,7 +1183,7 @@ Returns a zero array of the specified size and type.
:param size: Alternative to the matrix size specification ``Size(cols, rows)`` .
:param sizes: Array of integers specifying the array shape.
:param sz: Array of integers specifying the array shape.
:param type: Created matrix type.
@ -1218,7 +1202,7 @@ Returns an array of all 1's of the specified size and type.
.. ocv:function:: static MatExpr Mat::ones(int rows, int cols, int type)
.. ocv:function:: static MatExpr Mat::ones(Size size, int type)
.. ocv:function:: static MatExpr Mat::ones(int ndims, const int* sizes, int type)
.. ocv:function:: static MatExpr Mat::ones( int ndims, const int* sz, int type )
:param ndims: Array dimensionality.
@ -1228,7 +1212,7 @@ Returns an array of all 1's of the specified size and type.
:param size: Alternative to the matrix size specification ``Size(cols, rows)`` .
:param sizes: Array of integers specifying the array shape.
:param sz: Array of integers specifying the array shape.
:param type: Created matrix type.
@ -1367,7 +1351,8 @@ Mat::push_back
Adds elements to the bottom of the matrix.
.. ocv:function:: template<typename T> void Mat::push_back(const T& elem)
.. ocv:function:: void Mat::push_back(const Mat& elem)
.. ocv:function:: void Mat::push_back( const Mat& m )
:param elem: Added element(s).
@ -1439,7 +1424,7 @@ Extracts a rectangular submatrix.
.. ocv:function:: Mat Mat::operator()( const Rect& roi ) const
.. ocv:function:: Mat Mat::operator()( const Ranges* ranges ) const
.. ocv:function:: Mat Mat::operator()( const Range* ranges ) const
:param rowRange: Start and end row of the extracted submatrix. The upper boundary is not included. To select all the rows, use ``Range::all()``.
@ -1626,7 +1611,7 @@ Mat::step1
--------------
Returns a normalized step.
.. ocv:function:: size_t Mat::step1() const
.. ocv:function:: size_t Mat::step1( int i=0 ) const
The method returns a matrix step divided by
:ocv:func:`Mat::elemSize1()` . It can be useful to quickly access an arbitrary matrix element.
@ -1654,15 +1639,15 @@ Mat::ptr
------------
Returns a pointer to the specified matrix row.
.. ocv:function:: uchar* Mat::ptr(int i=0)
.. ocv:function:: uchar* Mat::ptr(int i0=0)
.. ocv:function:: const uchar* Mat::ptr(int i=0) const
.. ocv:function:: const uchar* Mat::ptr(int i0=0) const
.. ocv:function:: template<typename _Tp> _Tp* Mat::ptr(int i=0)
.. ocv:function:: template<typename _Tp> _Tp* Mat::ptr(int i0=0)
.. ocv:function:: template<typename _Tp> const _Tp* Mat::ptr(int i=0) const
.. ocv:function:: template<typename _Tp> const _Tp* Mat::ptr(int i0=0) const
:param i: A 0-based row index.
:param i0: A 0-based row index.
The methods return ``uchar*`` or typed pointer to the specified matrix row. See the sample in
:ocv:func:`Mat::isContinuous` to know how to use these methods.
@ -1816,6 +1801,7 @@ To use ``Mat_`` for multi-channel images/matrices, pass ``Vec`` as a ``Mat_`` pa
InputArray
----------
.. ocv:class:: InputArray
This is the proxy class for passing read-only input arrays into OpenCV functions. It is defined as ::
@ -1880,6 +1866,7 @@ It denotes function arguments that are either vectors of vectors or vectors of m
OutputArray
-----------
.. ocv:class:: OutputArray : public InputArray
This type is very similar to ``InputArray`` except that it is used for input/output and output function parameters. Just like with ``InputArray``, OpenCV users should not care about ``OutputArray``, they just pass ``Mat``, ``vector<T>`` etc. to the functions. The same limitation as for ``InputArray``: **Do not explicitly create OutputArray instances** applies here too.
@ -2310,6 +2297,7 @@ It simplifies notation of some operations. ::
Algorithm
---------
.. ocv:class:: Algorithm
This is a base class for all more or less complex algorithms in OpenCV, especially for classes of algorithms, for which there can be multiple implementations. The examples are stereo correspondence (for which there are algorithms like block matching, semi-global block matching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck etc.).

View File

@ -7,17 +7,17 @@ kmeans
------
Finds centers of clusters and groups input samples around the clusters.
.. ocv:function:: double kmeans( InputArray samples, int clusterCount, InputOutputArray labels, TermCriteria criteria, int attempts, int flags, OutputArray centers=noArray() )
.. ocv:function:: double kmeans( InputArray data, int K, InputOutputArray bestLabels, TermCriteria criteria, int attempts, int flags, OutputArray centers=noArray() )
.. ocv:pyfunction:: cv2.kmeans(data, K, criteria, attempts, flags[, bestLabels[, centers]]) -> retval, bestLabels, centers
.. ocv:cfunction:: int cvKMeans2(const CvArr* samples, int clusterCount, CvArr* labels, CvTermCriteria criteria, int attempts=1, CvRNG* rng=0, int flags=0, CvArr* centers=0, double* compactness=0)
.. ocv:cfunction:: int cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, CvTermCriteria termcrit, int attempts=1, CvRNG* rng=0, int flags=0, CvArr* _centers=0, double* compactness=0 )
.. ocv:pyoldfunction:: cv.KMeans2(samples, clusterCount, labels, criteria)-> None
.. ocv:pyoldfunction:: cv.KMeans2(samples, nclusters, labels, termcrit, attempts=1, flags=0, centers=None) -> float
:param samples: Floating-point matrix of input samples, one row per sample.
:param clusterCount: Number of clusters to split the set by.
:param cluster_count: Number of clusters to split the set by.
:param labels: Input/output integer array that stores the cluster indices for every sample.
@ -40,7 +40,7 @@ Finds centers of clusters and groups input samples around the clusters.
:param compactness: The returned value that is described below.
The function ``kmeans`` implements a k-means algorithm that finds the
centers of ``clusterCount`` clusters and groups the input samples
centers of ``cluster_count`` clusters and groups the input samples
around the clusters. As an output,
:math:`\texttt{labels}_i` contains a 0-based cluster index for
the sample stored in the

View File

@ -34,7 +34,8 @@ Draws a circle.
.. ocv:pyfunction:: cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]]) -> None
.. ocv:cfunction:: void cvCircle( CvArr* img, CvPoint center, int radius, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvCircle( CvArr* img, CvPoint center, int radius, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Circle(img, center, radius, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image where the circle is drawn.
@ -63,8 +64,9 @@ Clips the line against the image rectangle.
.. ocv:pyfunction:: cv2.clipLine(imgRect, pt1, pt2) -> retval, pt1, pt2
.. ocv:cfunction:: int cvClipLine( CvSize imgSize, CvPoint* pt1, CvPoint* pt2 )
.. ocv:pyoldfunction:: cv.ClipLine(imgSize, pt1, pt2) -> (clippedPt1, clippedPt2)
.. ocv:cfunction:: int cvClipLine( CvSize img_size, CvPoint* pt1, CvPoint* pt2 )
.. ocv:pyoldfunction:: cv.ClipLine(imgSize, pt1, pt2) -> (point1, point2)
:param imgSize: Image size. The image rectangle is ``Rect(0, 0, imgSize.width, imgSize.height)`` .
@ -88,10 +90,12 @@ Draws a simple or thick elliptic arc or fills an ellipse sector.
.. ocv:pyfunction:: cv2.ellipse(img, center, axes, angle, startAngle, endAngle, color[, thickness[, lineType[, shift]]]) -> None
.. ocv:pyfunction:: cv2.ellipse(img, box, color[, thickness[, lineType]]) -> None
.. ocv:cfunction:: void cvEllipse( CvArr* img, CvPoint center, CvSize axes, double angle, double startAngle, double endAngle, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Ellipse(img, center, axes, angle, startAngle, endAngle, color, thickness=1, lineType=8, shift=0)-> None
.. ocv:cfunction:: void cvEllipse( CvArr* img, CvPoint center, CvSize axes, double angle, double start_angle, double end_angle, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Ellipse(img, center, axes, angle, start_angle, end_angle, color, thickness=1, lineType=8, shift=0)-> None
.. ocv:cfunction:: void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:cfunction:: void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:pyoldfunction:: cv.EllipseBox(img, box, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image.
@ -130,7 +134,7 @@ ellipse2Poly
----------------
Approximates an elliptic arc with a polyline.
.. ocv:function:: void ellipse2Poly( Point center, Size axes, int angle, int startAngle, int endAngle, int delta, vector<Point>& pts )
.. ocv:function:: void ellipse2Poly( Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector<Point>& pts )
.. ocv:pyfunction:: cv2.ellipse2Poly(center, axes, angle, arcStart, arcEnd, delta) -> pts
@ -140,9 +144,9 @@ Approximates an elliptic arc with a polyline.
:param angle: Rotation angle of the ellipse in degrees. See the :ocv:func:`ellipse` for details.
:param startAngle: Starting angle of the elliptic arc in degrees.
:param arcStart: Starting angle of the elliptic arc in degrees.
:param endAngle: Ending angle of the elliptic arc in degrees.
:param arcEnd: Ending angle of the elliptic arc in degrees.
:param delta: Angle between the subsequent polyline vertices. It defines the approximation accuracy.
@ -161,7 +165,8 @@ Fills a convex polygon.
.. ocv:pyfunction:: cv2.fillConvexPoly(img, points, color[, lineType[, shift]]) -> None
.. ocv:cfunction:: void cvFillConvexPoly( CvArr* img, CvPoint* pts, int npts, CvScalar color, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.FillConvexPoly(img, pn, color, lineType=8, shift=0)-> None
:param img: Image.
@ -190,7 +195,8 @@ Fills the area bounded by one or more polygons.
.. ocv:pyfunction:: cv2.fillPoly(img, pts, color[, lineType[, shift[, offset]]]) -> None
.. ocv:cfunction:: void cvFillPoly( CvArr* img, CvPoint** pts, int* npts, int ncontours, CvScalar color, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvFillPoly( CvArr* img, CvPoint** pts, const int* npts, int contours, CvScalar color, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.FillPoly(img, polys, color, lineType=8, shift=0)-> None
:param img: Image.
@ -222,7 +228,8 @@ Calculates the width and height of a text string.
.. ocv:pyfunction:: cv2.getTextSize(text, fontFace, fontScale, thickness) -> retval, baseLine
.. ocv:cfunction:: void cvGetTextSize( const char* textString, const CvFont* font, CvSize* textSize, int* baseline )
.. ocv:cfunction:: void cvGetTextSize( const char* text_string, const CvFont* font, CvSize* text_size, int* baseline )
.. ocv:pyoldfunction:: cv.GetTextSize(textString, font)-> (textSize, baseline)
:param text: Input text string.
@ -273,11 +280,11 @@ InitFont
--------
Initializes font structure (OpenCV 1.x API).
.. ocv:cfunction:: void cvInitFont( CvFont* font, int fontFace, double hscale, double vscale, double shear=0, int thickness=1, int lineType=8 )
.. ocv:cfunction:: void cvInitFont( CvFont* font, int font_face, double hscale, double vscale, double shear=0, int thickness=1, int line_type=8 )
:param font: Pointer to the font structure initialized by the function
:param fontFace: Font name identifier. Only a subset of Hershey fonts http://sources.isc.org/utils/misc/hershey-font.txt are supported now:
:param font_face: Font name identifier. Only a subset of Hershey fonts http://sources.isc.org/utils/misc/hershey-font.txt are supported now:
* **CV_FONT_HERSHEY_SIMPLEX** normal size sans-serif font
@ -310,7 +317,7 @@ Initializes font structure (OpenCV 1.x API).
:param thickness: Thickness of the text strokes
:param lineType: Type of the strokes, see :ocv:func:`line` description
:param line_type: Type of the strokes, see :ocv:func:`line` description
The function initializes the font structure that can be passed to text rendering functions.
@ -327,7 +334,8 @@ Draws a line segment connecting two points.
.. ocv:pyfunction:: cv2.line(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> None
.. ocv:cfunction:: void cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Line(img, pt1, pt2, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image.
@ -404,11 +412,12 @@ Draws a simple, thick, or filled up-right rectangle.
.. ocv:function:: void rectangle(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
.. ocv:function:: void rectangle(Mat& img, Rect r, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
.. ocv:function:: void rectangle( Mat& img, Rect rec, const Scalar& color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:pyfunction:: cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> None
.. ocv:cfunction:: void cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Rectangle(img, pt1, pt2, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image.
@ -417,7 +426,7 @@ Draws a simple, thick, or filled up-right rectangle.
:param pt2: Vertex of the rectangle opposite to ``pt1`` .
:param r: Alternative specification of the drawn rectangle.
:param rec: Alternative specification of the drawn rectangle.
:param color: Rectangle color or brightness (grayscale image).
@ -439,9 +448,9 @@ Draws several polygonal curves.
.. ocv:pyfunction:: cv2.polylines(img, pts, isClosed, color[, thickness[, lineType[, shift]]]) -> None
.. ocv:cfunction:: void cvPolyLine( CvArr* img, CvPoint** pts, int* npts, int contours, int isClosed, CvScalar color, int thickness=1, int lineType=8, int shift=0 )
.. ocv:cfunction:: void cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours, int is_closed, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.PolyLine(img, polys, isClosed, color, thickness=1, lineType=8, shift=0)-> None
.. ocv:pyoldfunction:: cv.PolyLine(img, polys, is_closed, color, thickness=1, lineType=8, shift=0) -> None
:param img: Image.
@ -464,6 +473,88 @@ Draws several polygonal curves.
The function ``polylines`` draws one or more polygonal curves.
drawContours
----------------
Draws contours outlines or filled contours.
.. ocv:function:: void drawContours( InputOutputArray image, InputArrayOfArrays contours, int contourIdx, const Scalar& color, int thickness=1, int lineType=8, InputArray hierarchy=noArray(), int maxLevel=INT_MAX, Point offset=Point() )
.. ocv:pyfunction:: cv2.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]]) -> None
.. ocv:cfunction:: void cvDrawContours( CvArr * img, CvSeq* contour, CvScalar external_color, CvScalar hole_color, int max_level, int thickness=1, int line_type=8, CvPoint offset=cvPoint(0,0) )
.. ocv:pyoldfunction:: cv.DrawContours(img, contour, external_color, hole_color, max_level, thickness=1, lineType=8, offset=(0, 0))-> None
:param image: Destination image.
:param contours: All the input contours. Each contour is stored as a point vector.
:param contourIdx: Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
:param color: Color of the contours.
:param thickness: Thickness of lines the contours are drawn with. If it is negative (for example, ``thickness=CV_FILLED`` ), the contour interiors are
drawn.
:param lineType: Line connectivity. See :ocv:func:`line` for details.
:param hierarchy: Optional information about hierarchy. It is only needed if you want to draw only some of the contours (see ``maxLevel`` ).
:param maxLevel: Maximal level for drawn contours. If it is 0, only
the specified contour is drawn. If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account when there is ``hierarchy`` available.
:param offset: Optional contour shift parameter. Shift all the drawn contours by the specified :math:`\texttt{offset}=(dx,dy)` .
:param contour: Pointer to the first contour.
:param external_color: Color of external contours.
:param hole_color: Color of internal contours (holes).
The function draws contour outlines in the image if
:math:`\texttt{thickness} \ge 0` or fills the area bounded by the contours if
:math:`\texttt{thickness}<0` . The example below shows how to retrieve connected components from the binary image and label them: ::
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main( int argc, char** argv )
{
Mat src;
// the first command-line parameter must be a filename of the binary
// (black-n-white) image
if( argc != 2 || !(src=imread(argv[1], 0)).data)
return -1;
Mat dst = Mat::zeros(src.rows, src.cols, CV_8UC3);
src = src > 1;
namedWindow( "Source", 1 );
imshow( "Source", src );
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours( src, contours, hierarchy,
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
// iterate through all the top-level contours,
// draw each connected component with its own random color
int idx = 0;
for( ; idx >= 0; idx = hierarchy[idx][0] )
{
Scalar color( rand()&255, rand()&255, rand()&255 );
drawContours( dst, contours, idx, color, CV_FILLED, 8, hierarchy );
}
namedWindow( "Components", 1 );
imshow( "Components", dst );
waitKey(0);
}
putText
-----------
@ -471,7 +562,7 @@ Draws a text string.
.. ocv:function:: void putText( Mat& img, const string& text, Point org, int fontFace, double fontScale, Scalar color, int thickness=1, int lineType=8, bool bottomLeftOrigin=false )
.. ocv:pyfunction:: cv2.putText(img, text, org, fontFace, fontScale, color[, thickness[, linetype[, bottomLeftOrigin]]]) -> None
.. ocv:pyfunction:: cv2.putText(img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]]) -> None
.. ocv:cfunction:: void cvPutText( CvArr* img, const char* text, CvPoint org, const CvFont* font, CvScalar color )
.. ocv:pyoldfunction:: cv.PutText(img, text, org, font, color)-> None

View File

@ -10,7 +10,7 @@ CvMemStorage
.. ocv:struct:: CvMemStorage
A storage for various OpenCV dynamic data structures, such as ``CvSeq``, ``CvSet`` etc.
A storage for various OpenCV dynamic data structures, such as ``CvSeq``, ``CvSet`` etc.
.. ocv:member:: CvMemBlock* bottom
@ -64,7 +64,7 @@ CvSeq
.. ocv:struct:: CvSeq
Dynamically growing sequence.
Dynamically growing sequence.
.. ocv:member:: int flags
@ -228,9 +228,9 @@ ClearSet
--------
Clears a set.
.. ocv:cfunction:: void cvClearSet( CvSet* setHeader )
.. ocv:cfunction:: void cvClearSet( CvSet* set_header )
:param setHeader: Cleared set
:param set_header: Cleared set
The function removes all elements from set. It has O(1) time complexity.
@ -362,11 +362,12 @@ CreateMemStorage
----------------
Creates memory storage.
.. ocv:cfunction:: CvMemStorage* cvCreateMemStorage( int blockSize=0 )
.. ocv:cfunction:: CvMemStorage* cvCreateMemStorage( int block_size=0 )
.. ocv:pyoldfunction:: cv.CreateMemStorage(blockSize=0) -> memstorage
:param blockSize: Size of the storage blocks in bytes. If it is 0, the block size is set to a default value - currently it is about 64K.
:param block_size: Size of the storage blocks in bytes. If it is 0, the block size is set to a default value - currently it is about 64K.
The function creates an empty memory storage. See
:ocv:struct:`CvMemStorage`
@ -376,14 +377,14 @@ CreateSeq
---------
Creates a sequence.
.. ocv:cfunction:: CvSeq* cvCreateSeq( int seqFlags, int headerSize, int elemSize, CvMemStorage* storage)
.. ocv:cfunction:: CvSeq* cvCreateSeq( int seq_flags, size_t header_size, size_t elem_size, CvMemStorage* storage )
:param seqFlags: Flags of the created sequence. If the sequence is not passed to any function working with a specific type of sequences, the sequence value may be set to 0, otherwise the appropriate type must be selected from the list of predefined sequence types.
:param seq_flags: Flags of the created sequence. If the sequence is not passed to any function working with a specific type of sequences, the sequence value may be set to 0, otherwise the appropriate type must be selected from the list of predefined sequence types.
:param headerSize: Size of the sequence header; must be greater than or equal to ``sizeof(CvSeq)`` . If a specific type or its extension is indicated, this type must fit the base type header.
:param header_size: Size of the sequence header; must be greater than or equal to ``sizeof(CvSeq)`` . If a specific type or its extension is indicated, this type must fit the base type header.
:param elemSize: Size of the sequence elements in bytes. The size must be consistent with the sequence type. For example, for a sequence of points to be created, the element type ``CV_SEQ_ELTYPE_POINT`` should be specified and the parameter ``elemSize`` must be equal to ``sizeof(CvPoint)`` .
:param elem_size: Size of the sequence elements in bytes. The size must be consistent with the sequence type. For example, for a sequence of points to be created, the element type ``CV_SEQ_ELTYPE_POINT`` should be specified and the parameter ``elem_size`` must be equal to ``sizeof(CvPoint)`` .
:param storage: Sequence location
@ -480,13 +481,13 @@ FindGraphEdgeByPtr
------------------
Finds an edge in a graph by using its pointer.
.. ocv:cfunction:: CvGraphEdge* cvFindGraphEdgeByPtr( const CvGraph* graph, const CvGraphVtx* startVtx, const CvGraphVtx* endVtx )
.. ocv:cfunction:: CvGraphEdge* cvFindGraphEdgeByPtr( const CvGraph* graph, const CvGraphVtx* start_vtx, const CvGraphVtx* end_vtx )
:param graph: Graph
:param startVtx: Pointer to the starting vertex of the edge
:param start_vtx: Pointer to the starting vertex of the edge
:param endVtx: Pointer to the ending vertex of the edge. For an unoriented graph, the order of the vertex parameters does not matter.
:param end_vtx: Pointer to the ending vertex of the edge. For an unoriented graph, the order of the vertex parameters does not matter.
::
@ -529,7 +530,7 @@ GetSeqElem
----------
Returns a pointer to a sequence element according to its index.
.. ocv:cfunction:: char* cvGetSeqElem( const CvSeq* seq, int index )
.. ocv:cfunction:: schar* cvGetSeqElem( const CvSeq* seq, int index )
:param seq: Sequence
@ -587,9 +588,9 @@ GetSetElem
----------
Finds a set element by its index.
.. ocv:cfunction:: CvSetElem* cvGetSetElem( const CvSet* setHeader, int index )
.. ocv:cfunction:: CvSetElem* cvGetSetElem( const CvSet* set_header, int index )
:param setHeader: Set
:param set_header: Set
:param index: Index of the set element within a sequence
@ -723,11 +724,11 @@ GraphVtxDegree
--------------
Counts the number of edges incident to the vertex.
.. ocv:cfunction:: int cvGraphVtxDegree( const CvGraph* graph, int vtxIdx )
.. ocv:cfunction:: int cvGraphVtxDegree( const CvGraph* graph, int vtx_idx )
:param graph: Graph
:param vtxIdx: Index of the graph vertex
:param vtx_idx: Index of the graph vertex
The function returns the number of edges incident to the specified vertex, both incoming and outgoing. To count the edges, the following code is used:
@ -1021,11 +1022,11 @@ SeqInsert
---------
Inserts an element in the middle of a sequence.
.. ocv:cfunction:: char* cvSeqInsert( CvSeq* seq, int beforeIndex, void* element=NULL )
.. ocv:cfunction:: schar* cvSeqInsert( CvSeq* seq, int before_index, const void* element=NULL )
:param seq: Sequence
:param beforeIndex: Index before which the element is inserted. Inserting before 0 (the minimal allowed value of the parameter) is equal to :ocv:cfunc:`SeqPushFront` and inserting before ``seq->total`` (the maximal allowed value of the parameter) is equal to :ocv:cfunc:`SeqPush` .
:param before_index: Index before which the element is inserted. Inserting before 0 (the minimal allowed value of the parameter) is equal to :ocv:cfunc:`SeqPushFront` and inserting before ``seq->total`` (the maximal allowed value of the parameter) is equal to :ocv:cfunc:`SeqPush` .
:param element: Inserted element
@ -1037,13 +1038,13 @@ SeqInsertSlice
--------------
Inserts an array in the middle of a sequence.
.. ocv:cfunction:: void cvSeqInsertSlice( CvSeq* seq, int beforeIndex, const CvArr* fromArr )
.. ocv:cfunction:: void cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr )
:param seq: Sequence
:param beforeIndex: Index before which the array is inserted
:param before_index: Index before which the array is inserted
:param fromArr: The array to take elements from
:param from_arr: The array to take elements from
The function inserts all
``fromArr``
@ -1109,7 +1110,7 @@ SeqPush
-------
Adds an element to the end of a sequence.
.. ocv:cfunction:: char* cvSeqPush( CvSeq* seq, void* element=NULL )
.. ocv:cfunction:: schar* cvSeqPush( CvSeq* seq, const void* element=NULL )
:param seq: Sequence
@ -1149,7 +1150,7 @@ SeqPushFront
------------
Adds an element to the beginning of a sequence.
.. ocv:cfunction:: char* cvSeqPushFront( CvSeq* seq, void* element=NULL )
.. ocv:cfunction:: schar* cvSeqPushFront( CvSeq* seq, const void* element=NULL )
:param seq: Sequence
@ -1163,7 +1164,7 @@ SeqPushMulti
------------
Pushes several elements to either end of a sequence.
.. ocv:cfunction:: void cvSeqPushMulti( CvSeq* seq, void* elements, int count, int in_front=0 )
.. ocv:cfunction:: void cvSeqPushMulti( CvSeq* seq, const void* elements, int count, int in_front=0 )
:param seq: Sequence
@ -1216,7 +1217,7 @@ SeqSearch
---------
Searches for an element in a sequence.
.. ocv:cfunction:: char* cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func, int is_sorted, int* elem_idx, void* userdata=NULL )
.. ocv:cfunction:: schar* cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func, int is_sorted, int* elem_idx, void* userdata=NULL )
:param seq: The sequence
@ -1325,9 +1326,9 @@ SetAdd
------
Occupies a node in the set.
.. ocv:cfunction:: int cvSetAdd( CvSet* setHeader, CvSetElem* elem=NULL, CvSetElem** inserted_elem=NULL )
.. ocv:cfunction:: int cvSetAdd( CvSet* set_header, CvSetElem* elem=NULL, CvSetElem** inserted_elem=NULL )
:param setHeader: Set
:param set_header: Set
:param elem: Optional input argument, an inserted element. If not NULL, the function copies the data to the allocated node (the MSB of the first integer field is cleared after copying).
@ -1346,9 +1347,9 @@ SetNew
------
Adds an element to a set (fast variant).
.. ocv:cfunction:: CvSetElem* cvSetNew( CvSet* setHeader )
.. ocv:cfunction:: CvSetElem* cvSetNew( CvSet* set_header )
:param setHeader: Set
:param set_header: Set
The function is an inline lightweight variant of
:ocv:cfunc:`SetAdd`
@ -1358,9 +1359,9 @@ SetRemove
---------
Removes an element from a set.
.. ocv:cfunction:: void cvSetRemove( CvSet* setHeader, int index )
.. ocv:cfunction:: void cvSetRemove( CvSet* set_header, int index )
:param setHeader: Set
:param set_header: Set
:param index: Index of the removed element
@ -1375,9 +1376,9 @@ SetRemoveByPtr
--------------
Removes a set element based on its pointer.
.. ocv:cfunction:: void cvSetRemoveByPtr( CvSet* setHeader, void* elem )
.. ocv:cfunction:: void cvSetRemoveByPtr( CvSet* set_header, void* elem )
:param setHeader: Set
:param set_header: Set
:param elem: Removed element
@ -1389,23 +1390,23 @@ SetSeqBlockSize
---------------
Sets up sequence block size.
.. ocv:cfunction:: void cvSetSeqBlockSize( CvSeq* seq, int deltaElems )
.. ocv:cfunction:: void cvSetSeqBlockSize( CvSeq* seq, int delta_elems )
:param seq: Sequence
:param deltaElems: Desirable sequence block size for elements
:param delta_elems: Desirable sequence block size for elements
The function affects memory allocation
granularity. When the free space in the sequence buffers has run out,
the function allocates the space for
``deltaElems``
``delta_elems``
sequence
elements. If this block immediately follows the one previously allocated,
the two blocks are concatenated; otherwise, a new sequence block is
created. Therefore, the bigger the parameter is, the lower the possible
sequence fragmentation, but the more space in the storage block is wasted. When
the sequence is created, the parameter
``deltaElems``
``delta_elems``
is set to
the default value of about 1K. The function can be called any time after
the sequence is created and affects future allocations. The function

View File

@ -10,7 +10,7 @@ CvPoint
.. ocv:struct:: CvPoint
2D point with integer coordinates (usually zero-based).
2D point with integer coordinates (usually zero-based).
.. ocv:member:: int x
@ -24,7 +24,7 @@ CvPoint
constructs ``CvPoint`` structure.
.. ocv:cfunction:: CvPoint cvPointFrom32f( CvPoint32f pt )
.. ocv:cfunction:: CvPoint cvPointFrom32f( CvPoint2D32f point )
converts ``CvPoint2D32f`` to ``CvPoint``.
@ -35,7 +35,7 @@ CvPoint2D32f
.. ocv:struct:: CvPoint2D32f
2D point with floating-point coordinates.
2D point with floating-point coordinates.
.. ocv:member:: float x
@ -45,11 +45,11 @@ CvPoint2D32f
y-coordinate
.. ocv:cfunction:: CvPoint2D32f cvPoint2D32f( float x, float y )
.. ocv:cfunction:: CvPoint2D32f cvPoint2D32f( double x, double y )
constructs ``CvPoint2D32f`` structure.
.. ocv:cfunction:: CvPoint2D32f cvPointTo32f( CvPoint pt )
.. ocv:cfunction:: CvPoint2D32f cvPointTo32f( CvPoint point )
converts ``CvPoint`` to ``CvPoint2D32f``.
@ -60,7 +60,7 @@ CvPoint3D32f
.. ocv:struct:: CvPoint3D32f
3D point with floating-point coordinates
3D point with floating-point coordinates
.. ocv:member:: float x
@ -74,7 +74,7 @@ CvPoint3D32f
z-coordinate
.. ocv:cfunction:: CvPoint3D32f cvPoint3D32f( float x, float y, float z )
.. ocv:cfunction:: CvPoint3D32f cvPoint3D32f( double x, double y, double z )
constructs ``CvPoint3D32f`` structure.
@ -85,7 +85,7 @@ CvPoint2D64f
.. ocv:struct:: CvPoint2D64f
2D point with double-precision floating-point coordinates.
2D point with double-precision floating-point coordinates.
.. ocv:member:: double x
@ -106,7 +106,7 @@ CvPoint3D64f
.. ocv:struct:: CvPoint3D64f
3D point with double-precision floating-point coordinates.
3D point with double-precision floating-point coordinates.
.. ocv:member:: double x
@ -129,7 +129,7 @@ CvSize
.. ocv:struct:: CvSize
Size of a rectangle or an image.
Size of a rectangle or an image.
.. ocv:member:: int width
@ -150,7 +150,7 @@ CvSize2D32f
.. ocv:struct:: CvSize2D32f
Sub-pixel accurate size of a rectangle.
Sub-pixel accurate size of a rectangle.
.. ocv:member:: float width
@ -160,7 +160,7 @@ Sub-pixel accurate size of a rectangle.
Height of the rectangle
.. ocv:cfunction:: CvSize2D32f cvSize2D23f( float width, float height )
.. ocv:cfunction:: CvSize2D32f cvSize2D32f( double width, double height )
constructs ``CvSize2D32f`` structure.
@ -171,7 +171,7 @@ CvRect
.. ocv:struct:: CvRect
Stores coordinates of a rectangle.
Stores coordinates of a rectangle.
.. ocv:member:: int x
@ -197,11 +197,11 @@ Stores coordinates of a rectangle.
CvBox2D
------
-------
.. ocv:struct:: CvBox2D
Stores coordinates of a rotated rectangle.
Stores coordinates of a rotated rectangle.
.. ocv:member:: CvPoint2D32f center
@ -223,7 +223,7 @@ CvScalar
.. ocv:struct:: CvScalar
A container for 1-,2-,3- or 4-tuples of doubles.
A container for 1-,2-,3- or 4-tuples of doubles.
.. ocv:member:: double[4] val
@ -246,7 +246,7 @@ CvTermCriteria
.. ocv:struct:: CvTermCriteria
Termination criteria for iterative algorithms.
Termination criteria for iterative algorithms.
.. ocv:member:: int type
@ -273,7 +273,7 @@ CvMat
.. ocv:struct:: CvMat
A multi-channel dense matrix.
A multi-channel dense matrix.
.. ocv:member:: int type
@ -323,7 +323,7 @@ CvMatND
.. ocv:struct:: CvMatND
Multi-dimensional dense multi-channel array.
Multi-dimensional dense multi-channel array.
.. ocv:member:: int type
@ -363,7 +363,7 @@ CvSparseMat
.. ocv:struct:: CvSparseMat
Multi-dimensional sparse multi-channel array.
Multi-dimensional sparse multi-channel array.
.. ocv:member:: int type
@ -398,7 +398,7 @@ IplImage
.. ocv:struct:: IplImage
IPL image header
IPL image header
.. ocv:member:: int nSize
@ -511,7 +511,8 @@ ClearND
-------
Clears a specific array element.
.. ocv:cfunction:: void cvClearND(CvArr* arr, int* idx)
.. ocv:cfunction:: void cvClearND( CvArr* arr, const int* idx )
.. ocv:pyoldfunction:: cv.ClearND(arr, idx)-> None
:param arr: Input array
@ -524,7 +525,7 @@ CloneImage
Makes a full copy of an image, including the header, data, and ROI.
.. ocv:cfunction:: IplImage* cvCloneImage(const IplImage* image)
.. ocv:pyoldfunction:: cv.CloneImage(image)-> copy
.. ocv:pyoldfunction:: cv.CloneImage(image) -> image
:param image: The original image
@ -533,7 +534,7 @@ CloneMat
Creates a full matrix copy.
.. ocv:cfunction:: CvMat* cvCloneMat(const CvMat* mat)
.. ocv:pyoldfunction:: cv.CloneMat(mat)-> copy
.. ocv:pyoldfunction:: cv.CloneMat(mat) -> mat
:param mat: Matrix to be copied
@ -544,7 +545,7 @@ CloneMatND
Creates full copy of a multi-dimensional array and returns a pointer to the copy.
.. ocv:cfunction:: CvMatND* cvCloneMatND(const CvMatND* mat)
.. ocv:pyoldfunction:: cv.CloneMatND(mat)-> copy
.. ocv:pyoldfunction:: cv.CloneMatND(mat) -> matND
:param mat: Input array
@ -701,7 +702,7 @@ CreateMatND
Creates the header and allocates the data for a multi-dimensional dense array.
.. ocv:cfunction:: CvMatND* cvCreateMatND( int dims, const int* sizes, int type)
.. ocv:pyoldfunction:: cv.CreateMatND(dims, type) -> None
.. ocv:pyoldfunction:: cv.CreateMatND(dims, type) -> matND
:param dims: Number of array dimensions. This must not exceed CV_MAX_DIM (32 by default, but can be changed at build time).
@ -719,7 +720,7 @@ CreateMatNDHeader
Creates a new matrix header but does not allocate the matrix data.
.. ocv:cfunction:: CvMatND* cvCreateMatNDHeader( int dims, const int* sizes, int type)
.. ocv:pyoldfunction:: cv.CreateMatNDHeader(dims, type) -> None
.. ocv:pyoldfunction:: cv.CreateMatNDHeader(dims, type) -> matND
:param dims: Number of array dimensions
@ -776,7 +777,7 @@ DotProduct
Calculates the dot product of two arrays in Euclidean metrics.
.. ocv:cfunction:: double cvDotProduct(const CvArr* src1, const CvArr* src2)
.. ocv:pyoldfunction:: cv.DotProduct(src1, src2)-> double
.. ocv:pyoldfunction:: cv.DotProduct(src1, src2) -> float
:param src1: The first source array
@ -799,7 +800,7 @@ Get?D
.. ocv:cfunction:: CvScalar cvGet1D(const CvArr* arr, int idx0)
.. ocv:cfunction:: CvScalar cvGet2D(const CvArr* arr, int idx0, int idx1)
.. ocv:cfunction:: CvScalar cvGet3D(const CvArr* arr, int idx0, int idx1, int idx2)
.. ocv:cfunction:: CvScalar cvGetND(const CvArr* arr, int* idx)
.. ocv:cfunction:: CvScalar cvGetND( const CvArr* arr, const int* idx )
.. ocv:pyoldfunction:: cv.Get1D(arr, idx) -> scalar
.. ocv:pyoldfunction:: cv.Get2D(arr, idx0, idx1) -> scalar
@ -825,9 +826,11 @@ GetCol(s)
Returns one of more array columns.
.. ocv:cfunction:: CvMat* cvGetCol(const CvArr* arr, CvMat* submat, int col)
.. ocv:cfunction:: CvMat* cvGetCols(const CvArr* arr, CvMat* submat, int startCol, int endCol)
.. ocv:cfunction:: CvMat* cvGetCols( const CvArr* arr, CvMat* submat, int start_col, int end_col )
.. ocv:pyoldfunction:: cv.GetCol(arr, col)-> submat
.. ocv:pyoldfunction:: cv.GetCols(arr, startCol, endCol)-> submat
:param arr: Input array
@ -836,9 +839,9 @@ Returns one of more array columns.
:param col: Zero-based index of the selected column
:param startCol: Zero-based index of the starting column (inclusive) of the span
:param start_col: Zero-based index of the starting column (inclusive) of the span
:param endCol: Zero-based index of the ending column (exclusive) of the span
:param end_col: Zero-based index of the ending column (exclusive) of the span
The functions return the header, corresponding to a specified column span of the input array. That is, no data is copied. Therefore, any modifications of the submatrix will affect the original array. If you need to copy the columns, use :ocv:cfunc:`CloneMat`. ``cvGetCol(arr, submat, col)`` is a shortcut for ``cvGetCols(arr, submat, col, col+1)``.
@ -862,7 +865,7 @@ GetDims
Return number of array dimensions
.. ocv:cfunction:: int cvGetDims(const CvArr* arr, int* sizes=NULL)
.. ocv:pyoldfunction:: cv.GetDims(arr)-> list
.. ocv:pyoldfunction:: cv.GetDims(arr) -> (dim1, dim2, ...)
:param arr: Input array
@ -907,21 +910,22 @@ GetImage
--------
Returns image header for arbitrary array.
.. ocv:cfunction:: IplImage* cvGetImage(const CvArr* arr, IplImage* imageHeader)
.. ocv:cfunction:: IplImage* cvGetImage( const CvArr* arr, IplImage* image_header )
.. ocv:pyoldfunction:: cv.GetImage(arr) -> iplimage
:param arr: Input array
:param imageHeader: Pointer to ``IplImage`` structure used as a temporary buffer
:param image_header: Pointer to ``IplImage`` structure used as a temporary buffer
The function returns the image header for the input array that can be a matrix (:ocv:struct:`CvMat`) or image (:ocv:struct:`IplImage`). In the case of an image the function simply returns the input pointer. In the case of ``CvMat`` it initializes an ``imageHeader`` structure with the parameters of the input matrix. Note that if we transform ``IplImage`` to ``CvMat`` using :ocv:cfunc:`GetMat` and then transform ``CvMat`` back to IplImage using this function, we will get different headers if the ROI is set in the original image.
The function returns the image header for the input array that can be a matrix (:ocv:struct:`CvMat`) or image (:ocv:struct:`IplImage`). In the case of an image the function simply returns the input pointer. In the case of ``CvMat`` it initializes an ``image_header`` structure with the parameters of the input matrix. Note that if we transform ``IplImage`` to ``CvMat`` using :ocv:cfunc:`GetMat` and then transform ``CvMat`` back to IplImage using this function, we will get different headers if the ROI is set in the original image.
GetImageCOI
-----------
Returns the index of the channel of interest.
.. ocv:cfunction:: int cvGetImageCOI(const IplImage* image)
.. ocv:pyoldfunction:: cv.GetImageCOI(image)-> channel
.. ocv:pyoldfunction:: cv.GetImageCOI(image) -> int
:param image: A pointer to the image header
@ -944,7 +948,7 @@ GetMat
Returns matrix header for arbitrary array.
.. ocv:cfunction:: CvMat* cvGetMat(const CvArr* arr, CvMat* header, int* coi=NULL, int allowND=0)
.. ocv:pyoldfunction:: cv.GetMat(arr, allowND=0) -> cvmat
.. ocv:pyoldfunction:: cv.GetMat(arr, allowND=0) -> mat
:param arr: Input array
@ -966,9 +970,9 @@ GetNextSparseNode
-----------------
Returns the next sparse matrix element
.. ocv:cfunction:: CvSparseNode* cvGetNextSparseNode(CvSparseMatIterator* matIterator)
.. ocv:cfunction:: CvSparseNode* cvGetNextSparseNode( CvSparseMatIterator* mat_iterator )
:param matIterator: Sparse array iterator
:param mat_iterator: Sparse array iterator
The function moves iterator to the next sparse matrix element and returns pointer to it. In the current version there is no any particular order of the elements, because they are stored in the hash table. The sample below demonstrates how to iterate through the sparse matrix: ::
@ -999,7 +1003,7 @@ GetRawData
----------
Retrieves low-level information about the array.
.. ocv:cfunction:: void cvGetRawData(const CvArr* arr, uchar** data, int* step=NULL, CvSize* roiSize=NULL)
.. ocv:cfunction:: void cvGetRawData( const CvArr* arr, uchar** data, int* step=NULL, CvSize* roi_size=NULL )
:param arr: Array header
@ -1007,7 +1011,7 @@ Retrieves low-level information about the array.
:param step: Output full row length in bytes
:param roiSize: Output ROI size
:param roi_size: Output ROI size
The function fills output variables with low-level information about the array data. All output parameters are optional, so some of the pointers may be set to ``NULL``. If the array is ``IplImage`` with ROI set, the parameters of ROI are returned.
@ -1031,7 +1035,7 @@ Return a specific element of single-channel 1D, 2D, 3D or nD array.
.. ocv:cfunction:: double cvGetReal1D(const CvArr* arr, int idx0)
.. ocv:cfunction:: double cvGetReal2D(const CvArr* arr, int idx0, int idx1)
.. ocv:cfunction:: double cvGetReal3D(const CvArr* arr, int idx0, int idx1, int idx2)
.. ocv:cfunction:: double cvGetRealND(const CvArr* arr, int* idx)
.. ocv:cfunction:: double cvGetRealND( const CvArr* arr, const int* idx )
.. ocv:pyoldfunction:: cv.GetReal1D(arr, idx0)->float
.. ocv:pyoldfunction:: cv.GetReal2D(arr, idx0, idx1)->float
@ -1059,7 +1063,7 @@ Returns array row or row span.
.. ocv:cfunction:: CvMat* cvGetRow(const CvArr* arr, CvMat* submat, int row)
.. ocv:cfunction:: CvMat* cvGetRows(const CvArr* arr, CvMat* submat, int startRow, int endRow, int deltaRow=1)
.. ocv:cfunction:: CvMat* cvGetRows( const CvArr* arr, CvMat* submat, int start_row, int end_row, int delta_row=1 )
.. ocv:pyoldfunction:: cv.GetRow(arr, row)-> submat
.. ocv:pyoldfunction:: cv.GetRows(arr, startRow, endRow, deltaRow=1)-> submat
@ -1070,11 +1074,11 @@ Returns array row or row span.
:param row: Zero-based index of the selected row
:param startRow: Zero-based index of the starting row (inclusive) of the span
:param start_row: Zero-based index of the starting row (inclusive) of the span
:param endRow: Zero-based index of the ending row (exclusive) of the span
:param end_row: Zero-based index of the ending row (exclusive) of the span
:param deltaRow: Index step in the row span. That is, the function extracts every ``deltaRow`` -th row from ``startRow`` and up to (but not including) ``endRow`` .
:param delta_row: Index step in the row span. That is, the function extracts every ``delta_row`` -th row from ``start_row`` and up to (but not including) ``end_row`` .
The functions return the header, corresponding to a specified row/row span of the input array. ``cvGetRow(arr, submat, row)`` is a shortcut for ``cvGetRows(arr, submat, row, row+1)``.
@ -1209,11 +1213,11 @@ InitSparseMatIterator
---------------------
Initializes sparse array elements iterator.
.. ocv:cfunction:: CvSparseNode* cvInitSparseMatIterator(const CvSparseMat* mat, CvSparseMatIterator* matIterator)
.. ocv:cfunction:: CvSparseNode* cvInitSparseMatIterator( const CvSparseMat* mat, CvSparseMatIterator* mat_iterator )
:param mat: Input array
:param matIterator: Initialized iterator
:param mat_iterator: Initialized iterator
The function initializes iterator of sparse array elements and returns pointer to the first element, or NULL if the array is empty.
@ -1250,7 +1254,7 @@ Return pointer to a particular array element.
.. ocv:cfunction:: uchar* cvPtr3D(const CvArr* arr, int idx0, int idx1, int idx2, int* type=NULL)
.. ocv:cfunction:: uchar* cvPtrND(const CvArr* arr, int* idx, int* type=NULL, int createNode=1, unsigned* precalcHashval=NULL)
.. ocv:cfunction:: uchar* cvPtrND( const CvArr* arr, const int* idx, int* type=NULL, int create_node=1, unsigned* precalc_hashval=NULL )
:param arr: Input array
@ -1264,9 +1268,9 @@ Return pointer to a particular array element.
:param type: Optional output parameter: type of matrix elements
:param createNode: Optional input parameter for sparse matrices. Non-zero value of the parameter means that the requested element is created if it does not exist already.
:param create_node: Optional input parameter for sparse matrices. Non-zero value of the parameter means that the requested element is created if it does not exist already.
:param precalcHashval: Optional input parameter for sparse matrices. If the pointer is not NULL, the function does not recalculate the node hash value, but takes it from the specified location. It is useful for speeding up pair-wise operations (TODO: provide an example)
:param precalc_hashval: Optional input parameter for sparse matrices. If the pointer is not NULL, the function does not recalculate the node hash value, but takes it from the specified location. It is useful for speeding up pair-wise operations (TODO: provide an example)
The functions return a pointer to a specific array element. Number of array dimension should match to the number of indices passed to the function except for ``cvPtr1D`` function that can be used for sequential access to 1D, 2D or nD dense arrays.
@ -1403,16 +1407,17 @@ Reshape
-------
Changes shape of matrix/image without copying data.
.. ocv:cfunction:: CvMat* cvReshape(const CvArr* arr, CvMat* header, int newCn, int newRows=0)
.. ocv:pyoldfunction:: cv.Reshape(arr, newCn, newRows=0) -> cvmat
.. ocv:cfunction:: CvMat* cvReshape( const CvArr* arr, CvMat* header, int new_cn, int new_rows=0 )
.. ocv:pyoldfunction:: cv.Reshape(arr, newCn, newRows=0) -> mat
:param arr: Input array
:param header: Output header to be filled
:param newCn: New number of channels. 'newCn = 0' means that the number of channels remains unchanged.
:param new_cn: New number of channels. 'new_cn = 0' means that the number of channels remains unchanged.
:param newRows: New number of rows. 'newRows = 0' means that the number of rows remains unchanged unless it needs to be changed according to ``newCn`` value.
:param new_rows: New number of rows. 'new_rows = 0' means that the number of rows remains unchanged unless it needs to be changed according to ``new_cn`` value.
The function initializes the CvMat header so that it points to the same data as the original array but has a different shape - different number of channels, different number of rows, or both.
@ -1440,20 +1445,21 @@ ReshapeMatND
------------
Changes the shape of a multi-dimensional array without copying the data.
.. ocv:cfunction:: CvArr* cvReshapeMatND(const CvArr* arr, int sizeofHeader, CvArr* header, int newCn, int newDims, int* newSizes)
.. ocv:pyoldfunction:: cv.ReshapeMatND(arr, newCn, newDims) -> cvmat
.. ocv:cfunction:: CvArr* cvReshapeMatND( const CvArr* arr, int sizeof_header, CvArr* header, int new_cn, int new_dims, int* new_sizes )
.. ocv:pyoldfunction:: cv.ReshapeMatND(arr, newCn, newDims) -> mat
:param arr: Input array
:param sizeofHeader: Size of output header to distinguish between IplImage, CvMat and CvMatND output headers
:param sizeof_header: Size of output header to distinguish between IplImage, CvMat and CvMatND output headers
:param header: Output header to be filled
:param newCn: New number of channels. ``newCn = 0`` means that the number of channels remains unchanged.
:param new_cn: New number of channels. ``new_cn = 0`` means that the number of channels remains unchanged.
:param newDims: New number of dimensions. ``newDims = 0`` means that the number of dimensions remains the same.
:param new_dims: New number of dimensions. ``new_dims = 0`` means that the number of dimensions remains the same.
:param newSizes: Array of new dimension sizes. Only ``newDims-1`` values are used, because the total number of elements must remain the same. Thus, if ``newDims = 1``, ``newSizes`` array is not used.
:param new_sizes: Array of new dimension sizes. Only ``new_dims-1`` values are used, because the total number of elements must remain the same. Thus, if ``new_dims = 1``, ``new_sizes`` array is not used.
The function is an advanced version of :ocv:cfunc:`Reshape` that can work with multi-dimensional arrays as well (though it can work with ordinary images and matrices) and change the number of dimensions.
@ -1508,7 +1514,7 @@ Change the particular array element.
.. ocv:cfunction:: void cvSet3D(CvArr* arr, int idx0, int idx1, int idx2, CvScalar value)
.. ocv:cfunction:: void cvSetND(CvArr* arr, int* idx, CvScalar value)
.. ocv:cfunction:: void cvSetND( CvArr* arr, const int* idx, CvScalar value )
.. ocv:pyoldfunction:: cv.Set1D(arr, idx, value) -> None
.. ocv:pyoldfunction:: cv.Set2D(arr, idx0, idx1, value) -> None
@ -1589,7 +1595,7 @@ Change a specific array element.
.. ocv:cfunction:: void cvSetReal3D(CvArr* arr, int idx0, int idx1, int idx2, double value)
.. ocv:cfunction:: void cvSetRealND(CvArr* arr, int* idx, double value)
.. ocv:cfunction:: void cvSetRealND( CvArr* arr, const int* idx, double value )
.. ocv:pyoldfunction:: cv.SetReal1D(arr, idx, value) -> None
.. ocv:pyoldfunction:: cv.SetReal2D(arr, idx0, idx1, value) -> None
@ -1617,7 +1623,7 @@ SetZero
Clears the array.
.. ocv:cfunction:: void cvSetZero(CvArr* arr)
.. ocv:pyoldfunction:: cv.SetZero(arr)-> None
.. ocv:pyoldfunction:: cv.SetZero(arr) -> None
:param arr: Array to be cleared
@ -1628,7 +1634,7 @@ mGet
Returns the particular element of single-channel floating-point matrix.
.. ocv:cfunction:: double cvmGet(const CvMat* mat, int row, int col)
.. ocv:pyoldfunction:: cv.mGet(mat, row, col)-> double
.. ocv:pyoldfunction:: cv.mGet(mat, row, col) -> float
:param mat: Input matrix
@ -1687,14 +1693,15 @@ RandArr
-------
Fills an array with random numbers and updates the RNG state.
.. ocv:cfunction:: void cvRandArr( CvRNG* rng, CvArr* arr, int distType, CvScalar param1, CvScalar param2)
.. ocv:cfunction:: void cvRandArr( CvRNG* rng, CvArr* arr, int dist_type, CvScalar param1, CvScalar param2 )
.. ocv:pyoldfunction:: cv.RandArr(rng, arr, distType, param1, param2)-> None
:param rng: CvRNG state initialized by :ocv:cfunc:`RNG`
:param arr: The destination array
:param distType: Distribution type
:param dist_type: Distribution type
* **CV_RAND_UNI** uniform distribution
@ -1725,7 +1732,7 @@ RandReal
Returns a floating-point random number and updates RNG.
.. ocv:cfunction:: double cvRandReal(CvRNG* rng)
.. ocv:pyoldfunction:: cv.RandReal(rng)-> double
.. ocv:pyoldfunction:: cv.RandReal(rng) -> float
:param rng: RNG state initialized by :ocv:cfunc:`RNG`
@ -1736,7 +1743,7 @@ fromarray
---------
Create a CvMat from an object that supports the array interface.
.. ocv:pyoldfunction:: cv.fromarray(object, allowND=False) -> CvMat
.. ocv:pyoldfunction:: cv.fromarray(array, allowND=False) -> mat
:param object: Any object that supports the array interface

View File

@ -48,7 +48,7 @@ CvFileNode
.. ocv:struct:: CvFileNode
File storage node. When XML/YAML file is read, it is first parsed and stored in the memory as a hierarchical collection of nodes. Each node can be a "leaf", that is, contain a single number or a string, or be a collection of other nodes. Collections are also referenced to as "structures" in the data writing functions. There can be named collections (mappings), where each element has a name and is accessed by a name, and ordered collections (sequences), where elements do not have names, but rather accessed by index.
File storage node. When XML/YAML file is read, it is first parsed and stored in the memory as a hierarchical collection of nodes. Each node can be a "leaf", that is, contain a single number or a string, or be a collection of other nodes. Collections are also referenced to as "structures" in the data writing functions. There can be named collections (mappings), where each element has a name and is accessed by a name, and ordered collections (sequences), where elements do not have names, but rather accessed by index.
.. ocv:member:: int tag
@ -151,11 +151,11 @@ Clone
-----
Makes a clone of an object.
.. ocv:cfunction:: void* cvClone( const void* structPtr )
.. ocv:cfunction:: void* cvClone( const void* struct_ptr )
:param structPtr: The object to clone
:param struct_ptr: The object to clone
The function finds the type of a given object and calls ``clone`` with the passed object. Of course, if you know the object type, for example, ``structPtr`` is ``CvMat*``, it is faster to call the specific function, like :ocv:cfunc:`CloneMat`.
The function finds the type of a given object and calls ``clone`` with the passed object. Of course, if you know the object type, for example, ``struct_ptr`` is ``CvMat*``, it is faster to call the specific function, like :ocv:cfunc:`CloneMat`.
EndWriteStruct
--------------
@ -171,9 +171,9 @@ FindType
--------
Finds a type by its name.
.. ocv:cfunction:: CvTypeInfo* cvFindType(const char* typeName)
.. ocv:cfunction:: CvTypeInfo* cvFindType( const char* type_name )
:param typeName: Type name
:param type_name: Type name
The function finds a registered type by its name. It returns NULL if there is no type with the specified name.
@ -189,7 +189,7 @@ GetFileNode
-----------
Finds a node in a map or file storage.
.. ocv:cfunction:: CvFileNode* cvGetFileNode( CvFileStorage* fs, CvFileNode* map, const CvStringHashNode* key, int createMissing=0 )
.. ocv:cfunction:: CvFileNode* cvGetFileNode( CvFileStorage* fs, CvFileNode* map, const CvStringHashNode* key, int create_missing=0 )
:param fs: File storage
@ -197,7 +197,7 @@ Finds a node in a map or file storage.
:param key: Unique pointer to the node name, retrieved with :ocv:cfunc:`GetHashedKey`
:param createMissing: Flag that specifies whether an absent node should be added to the map
:param create_missing: Flag that specifies whether an absent node should be added to the map
The function finds a file node. It is a faster version of :ocv:cfunc:`GetFileNodeByName`
(see :ocv:cfunc:`GetHashedKey` discussion). Also, the function can insert a new node, if it is not in the map yet.
@ -231,7 +231,7 @@ GetHashedKey
------------
Returns a unique pointer for a given name.
.. ocv:cfunction:: CvStringHashNode* cvGetHashedKey( CvFileStorage* fs, const char* name, int len=-1, int createMissing=0 )
.. ocv:cfunction:: CvStringHashNode* cvGetHashedKey( CvFileStorage* fs, const char* name, int len=-1, int create_missing=0 )
:param fs: File storage
@ -239,7 +239,7 @@ Returns a unique pointer for a given name.
:param len: Length of the name (if it is known apriori), or -1 if it needs to be calculated
:param createMissing: Flag that specifies, whether an absent key should be added into the hash table
:param create_missing: Flag that specifies, whether an absent key should be added into the hash table
The function returns a unique pointer for each particular file node name. This pointer can be then passed to the :ocv:cfunc:`GetFileNode` function that is faster than :ocv:cfunc:`GetFileNodeByName`
because it compares text strings by comparing pointers rather than the strings' content.
@ -325,16 +325,17 @@ Load
----
Loads an object from a file.
.. ocv:cfunction:: void* cvLoad( const char* filename, CvMemStorage* storage=NULL, const char* name=NULL, const char** realName=NULL )
.. ocv:cfunction:: void* cvLoad( const char* filename, CvMemStorage* memstorage=NULL, const char* name=NULL, const char** real_name=NULL )
.. ocv:pyoldfunction:: cv.Load(filename, storage=None, name=None)-> generic
:param filename: File name
:param storage: Memory storage for dynamic structures, such as :ocv:struct:`CvSeq` or :ocv:struct:`CvGraph` . It is not used for matrices or images.
:param memstorage: Memory storage for dynamic structures, such as :ocv:struct:`CvSeq` or :ocv:struct:`CvGraph` . It is not used for matrices or images.
:param name: Optional object name. If it is NULL, the first top-level object in the storage will be loaded.
:param realName: Optional output parameter that will contain the name of the loaded object (useful if ``name=NULL`` )
:param real_name: Optional output parameter that will contain the name of the loaded object (useful if ``name=NULL`` )
The function loads an object from a file. It basically reads the specified file, find the first top-level node and calls :ocv:cfunc:`Read` for that node. If the file node does not have type information or the type information can not be found by the type name, the function returns NULL. After the object is loaded, the file storage is closed and all the temporary buffers are deleted. Thus, to load a dynamic structure, such as a sequence, contour, or graph, one should pass a valid memory storage destination to the function.
@ -342,7 +343,7 @@ OpenFileStorage
---------------
Opens file storage for reading or writing data.
.. ocv:cfunction:: CvFileStorage* cvOpenFileStorage( const char* filename, CvMemStorage* memstorage, int flags)
.. ocv:cfunction:: CvFileStorage* cvOpenFileStorage( const char* filename, CvMemStorage* memstorage, int flags, const char* encoding=NULL )
:param filename: Name of the file associated with the storage
@ -393,21 +394,21 @@ ReadInt
-------
Retrieves an integer value from a file node.
.. ocv:cfunction:: int cvReadInt( const CvFileNode* node, int defaultValue=0 )
.. ocv:cfunction:: int cvReadInt( const CvFileNode* node, int default_value=0 )
:param node: File node
:param defaultValue: The value that is returned if ``node`` is NULL
:param default_value: The value that is returned if ``node`` is NULL
The function returns an integer that is represented by the file node. If the file node is NULL, the
``defaultValue`` is returned (thus, it is convenient to call the function right after :ocv:cfunc:`GetFileNode` without checking for a NULL pointer). If the file node has type ``CV_NODE_INT``, then ``node->data.i`` is returned. If the file node has type ``CV_NODE_REAL``, then ``node->data.f``
``default_value`` is returned (thus, it is convenient to call the function right after :ocv:cfunc:`GetFileNode` without checking for a NULL pointer). If the file node has type ``CV_NODE_INT``, then ``node->data.i`` is returned. If the file node has type ``CV_NODE_REAL``, then ``node->data.f``
is converted to an integer and returned. Otherwise the error is reported.
ReadIntByName
-------------
Finds a file node and returns its value.
.. ocv:cfunction:: int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, int defaultValue=0 )
.. ocv:cfunction:: int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, int default_value=0 )
:param fs: File storage
@ -415,7 +416,7 @@ Finds a file node and returns its value.
:param name: The node name
:param defaultValue: The value that is returned if the file node is not found
:param default_value: The value that is returned if the file node is not found
The function is a simple superposition of :ocv:cfunc:`GetFileNodeByName` and :ocv:cfunc:`ReadInt`.
@ -459,15 +460,15 @@ ReadReal
--------
Retrieves a floating-point value from a file node.
.. ocv:cfunction:: double cvReadReal( const CvFileNode* node, double defaultValue=0. )
.. ocv:cfunction:: double cvReadReal( const CvFileNode* node, double default_value=0. )
:param node: File node
:param defaultValue: The value that is returned if ``node`` is NULL
:param default_value: The value that is returned if ``node`` is NULL
The function returns a floating-point value
that is represented by the file node. If the file node is NULL, the
``defaultValue``
``default_value``
is returned (thus, it is convenient to call
the function right after
:ocv:cfunc:`GetFileNode`
@ -489,7 +490,7 @@ ReadRealByName
--------------
Finds a file node and returns its value.
.. ocv:cfunction:: double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, double defaultValue=0.)
.. ocv:cfunction:: double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, double default_value=0. )
:param fs: File storage
@ -497,7 +498,7 @@ Finds a file node and returns its value.
:param name: The node name
:param defaultValue: The value that is returned if the file node is not found
:param default_value: The value that is returned if the file node is not found
The function is a simple superposition of
:ocv:cfunc:`GetFileNodeByName`
@ -510,15 +511,15 @@ ReadString
----------
Retrieves a text string from a file node.
.. ocv:cfunction:: const char* cvReadString( const CvFileNode* node, const char* defaultValue=NULL )
.. ocv:cfunction:: const char* cvReadString( const CvFileNode* node, const char* default_value=NULL )
:param node: File node
:param defaultValue: The value that is returned if ``node`` is NULL
:param default_value: The value that is returned if ``node`` is NULL
The function returns a text string that is represented
by the file node. If the file node is NULL, the
``defaultValue``
``default_value``
is returned (thus, it is convenient to call the function right after
:ocv:cfunc:`GetFileNode`
without checking for a NULL pointer). If
@ -533,7 +534,7 @@ ReadStringByName
----------------
Finds a file node by its name and returns its value.
.. ocv:cfunction:: const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, const char* defaultValue=NULL )
.. ocv:cfunction:: const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map, const char* name, const char* default_value=NULL )
:param fs: File storage
@ -541,7 +542,7 @@ Finds a file node by its name and returns its value.
:param name: The node name
:param defaultValue: The value that is returned if the file node is not found
:param default_value: The value that is returned if the file node is not found
The function is a simple superposition of
:ocv:cfunc:`GetFileNodeByName`
@ -569,9 +570,9 @@ Release
-------
Releases an object.
.. ocv:cfunction:: void cvRelease( void** structPtr )
.. ocv:cfunction:: void cvRelease( void** struct_ptr )
:param structPtr: Double pointer to the object
:param struct_ptr: Double pointer to the object
The function finds the type of a given object and calls
``release``
@ -593,12 +594,13 @@ Save
----
Saves an object to a file.
.. ocv:cfunction:: void cvSave( const char* filename, const void* structPtr, const char* name=NULL, const char* comment=NULL, CvAttrList attributes=cvAttrList())
.. ocv:cfunction:: void cvSave( const char* filename, const void* struct_ptr, const char* name=NULL, const char* comment=NULL, CvAttrList attributes=cvAttrList() )
.. ocv:pyoldfunction:: cv.Save(filename, structPtr, name=None, comment=None)-> None
:param filename: File name
:param structPtr: Object to save
:param struct_ptr: Object to save
:param name: Optional object name. If it is NULL, the name will be formed from ``filename`` .
@ -659,7 +661,7 @@ StartWriteStruct
----------------
Starts writing a new structure.
.. ocv:cfunction:: void cvStartWriteStruct( CvFileStorage* fs, const char* name, int struct_flags, const char* typeName=NULL, CvAttrList attributes=cvAttrList())
.. ocv:cfunction:: void cvStartWriteStruct( CvFileStorage* fs, const char* name, int struct_flags, const char* type_name=NULL, CvAttrList attributes=cvAttrList() )
:param fs: File storage
@ -675,7 +677,7 @@ Starts writing a new structure.
* **CV_NODE_FLOW** the optional flag that makes sense only for YAML streams. It means that the structure is written as a flow (not as a block), which is more compact. It is recommended to use this flag for structures or arrays whose elements are all scalars.
:param typeName: Optional parameter - the object type name. In
:param type_name: Optional parameter - the object type name. In
case of XML it is written as a ``type_id`` attribute of the
structure opening tag. In the case of YAML it is written after a colon
following the structure name (see the example in :ocv:struct:`CvFileStorage`
@ -692,9 +694,9 @@ TypeOf
------
Returns the type of an object.
.. ocv:cfunction:: CvTypeInfo* cvTypeOf( const void* structPtr )
.. ocv:cfunction:: CvTypeInfo* cvTypeOf( const void* struct_ptr )
:param structPtr: The object pointer
:param struct_ptr: The object pointer
The function finds the type of a given object. It iterates through the list of registered types and calls the ``is_instance`` function/method for every type info structure with that object until one of them returns non-zero or until the whole list has been traversed. In the latter case, the function returns NULL.
@ -703,9 +705,9 @@ UnregisterType
--------------
Unregisters the type.
.. ocv:cfunction:: void cvUnregisterType( const char* typeName )
.. ocv:cfunction:: void cvUnregisterType( const char* type_name )
:param typeName: Name of an unregistered type
:param type_name: Name of an unregistered type
The function unregisters a type with a specified name. If the name is unknown, it is possible to locate the type info by an instance of the type using :ocv:cfunc:`TypeOf` or by iterating the type list, starting from :ocv:cfunc:`FirstType`, and then calling ``cvUnregisterType(info->typeName)``.
@ -774,13 +776,13 @@ WriteComment
------------
Writes a comment.
.. ocv:cfunction:: void cvWriteComment( CvFileStorage* fs, const char* comment, int eolComment)
.. ocv:cfunction:: void cvWriteComment( CvFileStorage* fs, const char* comment, int eol_comment )
:param fs: File storage
:param comment: The written comment, single-line or multi-line
:param eolComment: If non-zero, the function tries to put the comment at the end of current line. If the flag is zero, if the comment is multi-line, or if it does not fit at the end of the current line, the comment starts a new line.
:param eol_comment: If non-zero, the function tries to put the comment at the end of current line. If the flag is zero, if the comment is multi-line, or if it does not fit at the end of the current line, the comment starts a new line.
The function writes a comment into file storage. The comments are skipped when the storage is read.

View File

@ -7,10 +7,11 @@ abs
---
Computes an absolute value of each matrix element.
.. ocv:function:: MatExpr abs(const Mat& src)
.. ocv:function:: MatExpr abs(const MatExpr& src)
.. ocv:function:: MatExpr abs( const Mat& m )
.. ocv:function:: MatExpr abs( const MatExpr& e )
:param src: Matrix or matrix expression.
:param m: Matrix.
:param e: Matrix expression.
``abs`` is a meta-function that is expanded to one of :ocv:func:`absdiff` forms:
@ -373,7 +374,8 @@ Calculates the covariance matrix of a set of vectors.
.. ocv:pyfunction:: cv2.calcCovarMatrix(samples, flags[, covar[, mean[, ctype]]]) -> covar, mean
.. ocv:cfunction:: void cvCalcCovarMatrix( const CvArr** vects, int count, CvArr* covMat, CvArr* avg, int flags)
.. ocv:cfunction:: void cvCalcCovarMatrix( const CvArr** vects, int count, CvArr* cov_mat, CvArr* avg, int flags )
.. ocv:pyoldfunction:: cv.CalcCovarMatrix(vects, covMat, avg, flags)-> None
:param samples: Samples stored either as separate matrices or as rows/columns of a single matrix.
@ -428,7 +430,8 @@ Calculates the magnitude and angle of 2D vectors.
.. ocv:pyfunction:: cv2.cartToPolar(x, y[, magnitude[, angle[, angleInDegrees]]]) -> magnitude, angle
.. ocv:cfunction:: void cvCartToPolar( const CvArr* x, const CvArr* y, CvArr* magnitude, CvArr* angle=NULL, int angleInDegrees=0)
.. ocv:cfunction:: void cvCartToPolar( const CvArr* x, const CvArr* y, CvArr* magnitude, CvArr* angle=NULL, int angle_in_degrees=0 )
.. ocv:pyoldfunction:: cv.CartToPolar(x, y, magnitude, angle=None, angleInDegrees=0)-> None
:param x: Array of x-coordinates. This must be a single-precision or double-precision floating-point array.
@ -458,11 +461,11 @@ checkRange
----------
Checks every element of an input array for invalid values.
.. ocv:function:: bool checkRange(InputArray src, bool quiet=true, Point* pos=0, double minVal=-DBL_MAX, double maxVal=DBL_MAX)
.. ocv:function:: bool checkRange( InputArray a, bool quiet=true, Point* pos=0, double minVal=-DBL_MAX, double maxVal=DBL_MAX )
.. ocv:pyfunction:: cv2.checkRange(a[, quiet[, minVal[, maxVal]]]) -> retval, pt
.. ocv:pyfunction:: cv2.checkRange(a[, quiet[, minVal[, maxVal]]]) -> retval, pos
:param src: Array to check.
:param a: Array to check.
:param quiet: Flag indicating whether the functions quietly return false when the array elements are out of range or they throw an exception.
@ -487,13 +490,13 @@ Performs the per-element comparison of two arrays or an array and scalar value.
.. ocv:pyfunction:: cv2.compare(src1, src2, cmpop[, dst]) -> dst
.. ocv:cfunction:: void cvCmp(const CvArr* src1, const CvArr* src2, CvArr* dst, int cmpOp)
.. ocv:cfunction:: void cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op )
.. ocv:pyoldfunction:: cv.Cmp(src1, src2, dst, cmpOp)-> None
.. ocv:cfunction:: void cvCmpS(const CvArr* src1, double src2, CvArr* dst, int cmpOp)
.. ocv:cfunction:: void cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op )
.. ocv:pyoldfunction:: cv.CmpS(src1, src2, dst, cmpOp)-> None
.. ocv:pyoldfunction:: cv.CmpS(src, value, dst, cmpOp)-> None
:param src1: First source array or a scalar (in the case of ``cvCmp``, ``cv.Cmp``, ``cvCmpS``, ``cv.CmpS`` it is always an array). When it is array, it must have a single channel.
@ -629,20 +632,21 @@ countNonZero
------------
Counts non-zero array elements.
.. ocv:function:: int countNonZero( InputArray mtx )
.. ocv:function:: int countNonZero( InputArray src )
.. ocv:pyfunction:: cv2.countNonZero(src) -> retval
.. ocv:cfunction:: int cvCountNonZero(const CvArr* arr)
.. ocv:pyoldfunction:: cv.CountNonZero(arr)-> int
:param mtx: Single-channel array.
:param src: Single-channel array.
The function returns the number of non-zero elements in ``mtx`` :
The function returns the number of non-zero elements in ``src`` :
.. math::
\sum _{I: \; \texttt{mtx} (I) \ne0 } 1
\sum _{I: \; \texttt{src} (I) \ne0 } 1
.. seealso::
@ -658,9 +662,9 @@ cvarrToMat
----------
Converts ``CvMat``, ``IplImage`` , or ``CvMatND`` to ``Mat``.
.. ocv:function:: Mat cvarrToMat(const CvArr* src, bool copyData=false, bool allowND=true, int coiMode=0)
.. ocv:function:: Mat cvarrToMat( const CvArr* arr, bool copyData=false, bool allowND=true, int coiMode=0 )
:param src: Source ``CvMat``, ``IplImage`` , or ``CvMatND`` .
:param arr: Source ``CvMat``, ``IplImage`` , or ``CvMatND`` .
:param copyData: When it is false (default value), no data is copied and only the new header is created. In this case, the original array should not be deallocated while the new matrix header is used. If the parameter is true, all the data is copied and you may deallocate the original array right after the conversion.
@ -675,7 +679,7 @@ Converts ``CvMat``, ``IplImage`` , or ``CvMatND`` to ``Mat``.
The function ``cvarrToMat`` converts ``CvMat``, ``IplImage`` , or ``CvMatND`` header to
:ocv:class:`Mat` header, and optionally duplicates the underlying data. The constructed header is returned by the function.
When ``copyData=false`` , the conversion is done really fast (in O(1) time) and the newly created matrix header will have ``refcount=0`` , which means that no reference counting is done for the matrix data. In this case, you have to preserve the data until the new header is destructed. Otherwise, when ``copyData=true`` , the new buffer is allocated and managed as if you created a new matrix from scratch and copied the data there. That is, ``cvarrToMat(src, true)`` is equivalent to ``cvarrToMat(src, false).clone()`` (assuming that COI is not set). The function provides a uniform way of supporting
When ``copyData=false`` , the conversion is done really fast (in O(1) time) and the newly created matrix header will have ``refcount=0`` , which means that no reference counting is done for the matrix data. In this case, you have to preserve the data until the new header is destructed. Otherwise, when ``copyData=true`` , the new buffer is allocated and managed as if you created a new matrix from scratch and copied the data there. That is, ``cvarrToMat(arr, true)`` is equivalent to ``cvarrToMat(arr, false).clone()`` (assuming that COI is not set). The function provides a uniform way of supporting
``CvArr`` paradigm in the code that is migrated to use new-style data structures internally. The reverse transformation, from
``Mat`` to
``CvMat`` or
@ -819,7 +823,8 @@ Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-
.. ocv:pyfunction:: cv2.dft(src[, dst[, flags[, nonzeroRows]]]) -> dst
.. ocv:cfunction:: void cvDFT(const CvArr* src, CvArr* dst, int flags, int nonzeroRows=0)
.. ocv:cfunction:: void cvDFT( const CvArr* src, CvArr* dst, int flags, int nonzero_rows=0 )
.. ocv:pyoldfunction:: cv.DFT(src, dst, flags, nonzeroRows=0)-> None
:param src: Source array that could be real or complex.
@ -976,7 +981,7 @@ Performs per-element division of two arrays or a scalar by an array.
.. ocv:pyfunction:: cv2.divide(scale, src2[, dst[, dtype]]) -> dst
.. ocv:cfunction:: void cvDiv(const CvArr* src1, const CvArr* src2, CvArr* dst, double scale=1)
.. ocv:pyoldfunction:: cv.Div(src1, src2, dst, scale)-> None
.. ocv:pyoldfunction:: cv.Div(src1, src2, dst, scale=1) -> None
:param src1: First source array.
@ -1021,8 +1026,9 @@ Returns the determinant of a square floating-point matrix.
.. ocv:pyfunction:: cv2.determinant(mtx) -> retval
.. ocv:cfunction:: double cvDet(const CvArr* mtx)
.. ocv:pyoldfunction:: cv.Det(mtx)-> double
.. ocv:cfunction:: double cvDet( const CvArr* mat )
.. ocv:pyoldfunction:: cv.Det(mat) -> float
:param mtx: Input matrix that must have ``CV_32FC1`` or ``CV_64FC1`` type and square size.
@ -1043,18 +1049,17 @@ For symmetric positively-determined matrices, it is also possible to use :ocv:fu
eigen
-----
Computes eigenvalues and eigenvectors of a symmetric matrix.
.. ocv:function:: bool eigen(InputArray src, OutputArray eigenvalues, int lowindex=-1, int highindex=-1)
.. ocv:function:: bool eigen(InputArray src, OutputArray eigenvalues, OutputArray eigenvectors, int lowindex=-1,int highindex=-1)
.. ocv:cfunction:: void cvEigenVV( CvArr* src, CvArr* eigenvectors, CvArr* eigenvalues, double eps=0, int lowindex=-1, int highindex=-1)
.. ocv:pyfunction:: cv2.eigen(src, computeEigenvectors[, eigenvalues[, eigenvectors]]) -> retval, eigenvalues, eigenvectors
.. ocv:pyoldfunction:: cv.EigenVV(src, eigenvectors, eigenvalues, eps, lowindex=-1, highindex=-1)-> None
.. ocv:cfunction:: void cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, double eps=0, int lowindex=-1, int highindex=-1 )
Computes eigenvalues and eigenvectors of a symmetric matrix.
.. ocv:pyfunction:: cv2.eigen(src, computeEigenvectors[, eigenvalues[, eigenvectors[, lowindex[, highindex]]]]) -> retval, eigenvalues, eigenvectors
.. ocv:pyoldfunction:: cv.EigenVV(mat, evects, evals, eps, lowindex=-1, highindex=-1)-> None
:param src: Input matrix that must have ``CV_32FC1`` or ``CV_64FC1`` type, square size and be symmetrical (``src`` :sup:`T` == ``src``).
@ -1107,13 +1112,13 @@ extractImageCOI
---------------
Extracts the selected image channel.
.. ocv:function:: void extractImageCOI(const CvArr* src, OutputArray dst, int coi=-1)
.. ocv:function:: void extractImageCOI( const CvArr* arr, OutputArray coiimg, int coi=-1 )
:param src: Source array. It should be a pointer to ``CvMat`` or ``IplImage`` .
:param arr: Source array. It should be a pointer to ``CvMat`` or ``IplImage`` .
:param dst: Destination array with a single channel and the same size and depth as ``src`` .
:param coiimg: Destination array with a single channel and the same size and depth as ``arr`` .
:param coi: If the parameter is ``>=0`` , it specifies the channel to extract. If it is ``<0`` and ``src`` is a pointer to ``IplImage`` with a valid COI set, the selected COI is extracted.
:param coi: If the parameter is ``>=0`` , it specifies the channel to extract. If it is ``<0`` and ``arr`` is a pointer to ``IplImage`` with a valid COI set, the selected COI is extracted.
The function ``extractImageCOI`` is used to extract an image COI from an old-style array and put the result to the new-style C++ matrix. As usual, the destination matrix is reallocated using ``Mat::create`` if needed.
@ -1128,13 +1133,13 @@ insertImageCOI
---------------
Copies the selected image channel from a new-style C++ matrix to the old-style C array.
.. ocv:function:: void insertImageCOI(InputArray src, CvArr* dst, int coi=-1)
.. ocv:function:: void insertImageCOI( InputArray coiimg, CvArr* arr, int coi=-1 )
:param src: Source array with a single channel and the same size and depth as ``dst``.
:param coiimg: Source array with a single channel and the same size and depth as ``arr``.
:param dst: Destination array, it should be a pointer to ``CvMat`` or ``IplImage``.
:param arr: Destination array, it should be a pointer to ``CvMat`` or ``IplImage``.
:param coi: If the parameter is ``>=0`` , it specifies the channel to insert. If it is ``<0`` and ``dst`` is a pointer to ``IplImage`` with a valid COI set, the selected COI is extracted.
:param coi: If the parameter is ``>=0`` , it specifies the channel to insert. If it is ``<0`` and ``arr`` is a pointer to ``IplImage`` with a valid COI set, the selected COI is extracted.
The function ``insertImageCOI`` is used to extract an image COI from a new-style C++ matrix and put the result to the old-style array.
@ -1164,7 +1169,8 @@ Flips a 2D array around vertical, horizontal, or both axes.
.. ocv:pyfunction:: cv2.flip(src, flipCode[, dst]) -> dst
.. ocv:cfunction:: void cvFlip(const CvArr* src, CvArr* dst=NULL, int flipMode=0)
.. ocv:cfunction:: void cvFlip( const CvArr* src, CvArr* dst=NULL, int flip_mode=0 )
.. ocv:pyoldfunction:: cv.Flip(src, dst=None, flipMode=0)-> None
:param src: Source array.
@ -1208,7 +1214,7 @@ gemm
----
Performs generalized matrix multiplication.
.. ocv:function:: void gemm(InputArray src1, InputArray src2, double alpha, InputArray src3, double beta, OutputArray dst, int flags=0)
.. ocv:function:: void gemm( InputArray src1, InputArray src2, double alpha, InputArray src3, double gamma, OutputArray dst, int flags=0 )
.. ocv:pyfunction:: cv2.gemm(src1, src2, alpha, src3, gamma[, dst[, flags]]) -> dst
@ -1415,9 +1421,9 @@ Finds the inverse or pseudo-inverse of a matrix.
.. ocv:pyfunction:: cv2.invert(src[, dst[, flags]]) -> retval, dst
.. ocv:cfunction:: double cvInvert(const CvArr* src, CvArr* dst, int flags=CV_LU)
.. ocv:cfunction:: double cvInvert( const CvArr* src, CvArr* dst, int method=CV_LU )
.. ocv:pyoldfunction:: cv.Invert(src, dst, flags=CV_LU)-> double
.. ocv:pyoldfunction:: cv.Invert(src, dst, method=CV_LU) -> float
:param src: Source floating-point ``M x N`` matrix.
@ -1487,7 +1493,7 @@ LUT
---
Performs a look-up table transform of an array.
.. ocv:function:: void LUT(InputArray src, InputArray lut, OutputArray dst)
.. ocv:function:: void LUT( InputArray src, InputArray lut, OutputArray dst, int interpolation=0 )
.. ocv:pyfunction:: cv2.LUT(src, lut[, dst[, interpolation]]) -> dst
@ -1552,13 +1558,13 @@ Mahalanobis
-----------
Calculates the Mahalanobis distance between two vectors.
.. ocv:function:: double Mahalanobis(InputArray vec1, InputArray vec2, InputArray icovar)
.. ocv:function:: double Mahalanobis( InputArray v1, InputArray v2, InputArray icovar )
.. ocv:pyfunction:: cv2.Mahalanobis(v1, v2, icovar) -> retval
.. ocv:cfunction:: double cvMahalanobis( const CvArr* vec1, const CvArr* vec2, CvArr* icovar)
.. ocv:cfunction:: double cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat )
.. ocv:pyoldfunction:: cv.Mahalanobis(vec1, vec2, icovar)-> None
.. ocv:pyoldfunction:: cv.Mahalonobis(vec1, vec2, mat) -> None
:param vec1: First 1D source vector.
@ -1582,17 +1588,17 @@ max
---
Calculates per-element maximum of two arrays or an array and a scalar.
.. ocv:function:: MatExpr max(const Mat& src1, const Mat& src2)
.. ocv:function:: MatExpr max( const Mat& a, const Mat& b )
.. ocv:function:: MatExpr max(const Mat& src1, double value)
.. ocv:function:: MatExpr max( const Mat& a, double s )
.. ocv:function:: MatExpr max(double value, const Mat& src1)
.. ocv:function:: MatExpr max( double s, const Mat& a )
.. ocv:function:: void max(InputArray src1, InputArray src2, OutputArray dst)
.. ocv:function:: void max(const Mat& src1, const Mat& src2, Mat& dst)
.. ocv:function:: void max(const Mat& src1, double value, Mat& dst)
.. ocv:function:: void max( const Mat& src1, double src2, Mat& dst )
.. ocv:pyfunction:: cv2.max(src1, src2[, dst]) -> dst
@ -1643,8 +1649,9 @@ Calculates an average (mean) of array elements.
.. ocv:pyfunction:: cv2.mean(src[, mask]) -> retval
.. ocv:cfunction:: CvScalar cvAvg(const CvArr* src, const CvArr* mask=NULL)
.. ocv:pyoldfunction:: cv.Avg(src, mask=None)-> CvScalar
.. ocv:cfunction:: CvScalar cvAvg( const CvArr* arr, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Avg(arr, mask=None) -> scalar
:param src: Source array that should have from 1 to 4 channels so that the result can be stored in :ocv:class:`Scalar_` .
@ -1675,8 +1682,9 @@ Calculates a mean and standard deviation of array elements.
.. ocv:pyfunction:: cv2.meanStdDev(src[, mean[, stddev[, mask]]]) -> mean, stddev
.. ocv:cfunction:: void cvAvgSdv(const CvArr* src, CvScalar* mean, CvScalar* stdDev, const CvArr* mask=NULL)
.. ocv:pyoldfunction:: cv.AvgSdv(src, mask=None)-> (mean, stdDev)
.. ocv:cfunction:: void cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.AvgSdv(arr, mask=None) -> (mean, stdDev)
:param src: Source array that should have from 1 to 4 channels so that the results can be stored in :ocv:class:`Scalar_` 's.
@ -1712,7 +1720,7 @@ Composes a multi-channel array from several single-channel arrays.
.. ocv:function:: void merge(const Mat* mv, size_t count, OutputArray dst)
.. ocv:function:: void merge(const vector<Mat>& mv, OutputArray dst)
.. ocv:function:: void merge( InputArrayOfArrays mv, OutputArray dst )
.. ocv:pyfunction:: cv2.merge(mv[, dst]) -> dst
@ -1743,17 +1751,17 @@ min
---
Calculates per-element minimum of two arrays or array and a scalar.
.. ocv:function:: MatExpr min(const Mat& src1, const Mat& src2)
.. ocv:function:: MatExpr min( const Mat& a, const Mat& b )
.. ocv:function:: MatExpr min(const Mat& src1, double value)
.. ocv:function:: MatExpr min( const Mat& a, double s )
.. ocv:function:: MatExpr min(double value, const Mat& src1)
.. ocv:function:: MatExpr min( double s, const Mat& a )
.. ocv:function:: void min(InputArray src1, InputArray src2, OutputArray dst)
.. ocv:function:: void min(const Mat& src1, const Mat& src2, Mat& dst)
.. ocv:function:: void min(const Mat& src1, double value, Mat& dst)
.. ocv:function:: void min( const Mat& src1, double src2, Mat& dst )
.. ocv:pyfunction:: cv2.min(src1, src2[, dst]) -> dst
@ -1834,11 +1842,12 @@ Finds the global minimum and maximum in an array.
.. ocv:function:: void minMaxLoc(InputArray src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, InputArray mask=noArray())
.. ocv:function:: void minMaxLoc(const SparseMat& src, double* minVal, double* maxVal, int* minIdx=0, int* maxIdx=0)
.. ocv:function:: void minMaxLoc( const SparseMat& a, double* minVal, double* maxVal, int* minIdx=0, int* maxIdx=0 )
.. ocv:pyfunction:: cv2.minMaxLoc(src[, mask]) -> minVal, maxVal, minLoc, maxLoc
.. ocv:cfunction:: void cvMinMaxLoc(const CvArr* arr, double* minVal, double* maxVal, CvPoint* minLoc=NULL, CvPoint* maxLoc=NULL, const CvArr* mask=NULL)
.. ocv:cfunction:: void cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val, CvPoint* min_loc=NULL, CvPoint* max_loc=NULL, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.MinMaxLoc(arr, mask=None)-> (minVal, maxVal, minLoc, maxLoc)
:param src: Source single-channel array.
@ -1879,22 +1888,23 @@ mixChannels
-----------
Copies specified channels from input arrays to the specified channels of output arrays.
.. ocv:function:: void mixChannels(const Mat* src, int nsrc, Mat* dst, int ndst, const int* fromTo, size_t npairs)
.. ocv:function:: void mixChannels( const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, const int* fromTo, size_t npairs )
.. ocv:function:: void mixChannels(const vector<Mat>& src, vector<Mat>& dst, const int* fromTo, int npairs)
.. ocv:function:: void mixChannels( const vector<Mat>& src, vector<Mat>& dst, const int* fromTo, size_t npairs )
.. ocv:pyfunction:: cv2.mixChannels(src, dst, fromTo) -> None
.. ocv:cfunction:: void cvMixChannels(const CvArr** src, int srcCount, CvArr** dst, int dstCount, const int* fromTo, int pairCount)
.. ocv:cfunction:: void cvMixChannels( const CvArr** src, int src_count, CvArr** dst, int dst_count, const int* from_to, int pair_count )
.. ocv:pyoldfunction:: cv.MixChannels(src, dst, fromTo) -> None
:param src: Input array or vector of matrices. All the matrices must have the same size and the same depth.
:param nsrc: Number of matrices in ``src`` .
:param nsrcs: Number of matrices in ``src`` .
:param dst: Output array or vector of matrices. All the matrices *must be allocated* . Their size and depth must be the same as in ``src[0]`` .
:param ndst: Number of matrices in ``dst`` .
:param ndsts: Number of matrices in ``dst`` .
:param fromTo: Array of index pairs specifying which channels are copied and where. ``fromTo[k*2]`` is a 0-based index of the input channel in ``src`` . ``fromTo[k*2+1]`` is an index of the output channel in ``dst`` . The continuous channel numbering is used: the first input image channels are indexed from ``0`` to ``src[0].channels()-1`` , the second input image channels are indexed from ``src[0].channels()`` to ``src[0].channels() + src[1].channels()-1``, and so on. The same scheme is used for the output image channels. As a special case, when ``fromTo[k*2]`` is negative, the corresponding output channel is filled with zero .
@ -1935,7 +1945,7 @@ mulSpectrums
------------
Performs the per-element multiplication of two Fourier spectrums.
.. ocv:function:: void mulSpectrums(InputArray src1, InputArray src2, OutputArray dst, int flags, bool conj=false)
.. ocv:function:: void mulSpectrums( InputArray a, InputArray b, OutputArray c, int flags, bool conjB=false )
.. ocv:pyfunction:: cv2.mulSpectrums(a, b, flags[, c[, conjB]]) -> c
@ -1950,13 +1960,13 @@ Performs the per-element multiplication of two Fourier spectrums.
:param flags: Operation flags. Currently, the only supported flag is ``DFT_ROWS``, which indicates that each row of ``src1`` and ``src2`` is an independent 1D Fourier spectrum.
:param conj: Optional flag that conjugates the second source array before the multiplication (true) or not (false).
:param conjB: Optional flag that conjugates the second source array before the multiplication (true) or not (false).
The function ``mulSpectrums`` performs the per-element multiplication of the two CCS-packed or complex matrices that are results of a real or complex Fourier transform.
The function, together with
:ocv:func:`dft` and
:ocv:func:`idft` , may be used to calculate convolution (pass ``conj=false`` ) or correlation (pass ``conj=true`` ) of two arrays rapidly. When the arrays are complex, they are simply multiplied (per element) with an optional conjugation of the second-array elements. When the arrays are real, they are assumed to be CCS-packed (see
:ocv:func:`idft` , may be used to calculate convolution (pass ``conjB=false`` ) or correlation (pass ``conjB=true`` ) of two arrays rapidly. When the arrays are complex, they are simply multiplied (per element) with an optional conjugation of the second-array elements. When the arrays are real, they are assumed to be CCS-packed (see
:ocv:func:`dft` for details).
@ -1965,12 +1975,12 @@ multiply
--------
Calculates the per-element scaled product of two arrays.
.. ocv:function:: void multiply(InputArray src1, InputArray src2, OutputArray dst, double scale=1)
.. ocv:function:: void multiply( InputArray src1, InputArray src2, OutputArray dst, double scale=1, int dtype=-1 )
.. ocv:pyfunction:: cv2.multiply(src1, src2[, dst[, scale[, dtype]]]) -> dst
.. ocv:cfunction:: void cvMul(const CvArr* src1, const CvArr* src2, CvArr* dst, double scale=1)
.. ocv:pyoldfunction:: cv.Mul(src1, src2, dst, scale)-> None
.. ocv:pyoldfunction:: cv.Mul(src1, src2, dst, scale=1) -> None
:param src1: First source array.
@ -2014,12 +2024,13 @@ mulTransposed
-------------
Calculates the product of a matrix and its transposition.
.. ocv:function:: void mulTransposed(InputArray src, OutputArray dst, bool aTa, InputArray delta=noArray(), double scale=1, int rtype=-1)
.. ocv:function:: void mulTransposed( InputArray src, OutputArray dst, bool aTa, InputArray delta=noArray(), double scale=1, int dtype=-1 )
.. ocv:pyfunction:: cv2.mulTransposed(src, aTa[, dst[, delta[, scale[, dtype]]]]) -> dst
.. ocv:cfunction:: void cvMulTransposed(const CvArr* src, CvArr* dst, int order, const CvArr* delta=NULL, double scale=1.0)
.. ocv:pyoldfunction:: cv.MulTransposed(src, dst, order, delta=None, scale)-> None
.. ocv:cfunction:: void cvMulTransposed( const CvArr* src, CvArr* dst, int order, const CvArr* delta=NULL, double scale=1. )
.. ocv:pyoldfunction:: cv.MulTransposed(src, dst, order, delta=None, scale=1.0) -> None
:param src: Source single-channel matrix. Note that unlike :ocv:func:`gemm`, the function can multiply not only floating-point matrices.
@ -2027,11 +2038,11 @@ Calculates the product of a matrix and its transposition.
:param aTa: Flag specifying the multiplication ordering. See the description below.
:param delta: Optional delta matrix subtracted from ``src`` before the multiplication. When the matrix is empty ( ``delta=noArray()`` ), it is assumed to be zero, that is, nothing is subtracted. If it has the same size as ``src`` , it is simply subtracted. Otherwise, it is "repeated" (see :ocv:func:`repeat` ) to cover the full ``src`` and then subtracted. Type of the delta matrix, when it is not empty, must be the same as the type of created destination matrix. See the ``rtype`` parameter description below.
:param delta: Optional delta matrix subtracted from ``src`` before the multiplication. When the matrix is empty ( ``delta=noArray()`` ), it is assumed to be zero, that is, nothing is subtracted. If it has the same size as ``src`` , it is simply subtracted. Otherwise, it is "repeated" (see :ocv:func:`repeat` ) to cover the full ``src`` and then subtracted. Type of the delta matrix, when it is not empty, must be the same as the type of created destination matrix. See the ``dtype`` parameter description below.
:param scale: Optional scale factor for the matrix product.
:param rtype: Optional type of the destination matrix. When it is negative, the destination matrix will have the same type as ``src`` . Otherwise, it will be ``type=CV_MAT_DEPTH(rtype)`` that should be either ``CV_32F`` or ``CV_64F`` .
:param dtype: Optional type of the destination matrix. When it is negative, the destination matrix will have the same type as ``src`` . Otherwise, it will be ``type=CV_MAT_DEPTH(dtype)`` that should be either ``CV_32F`` or ``CV_64F`` .
The function ``mulTransposed`` calculates the product of ``src`` and its transposition:
@ -2062,15 +2073,16 @@ Calculates an absolute array norm, an absolute difference norm, or a relative di
.. ocv:function:: double norm(InputArray src1, int normType=NORM_L2, InputArray mask=noArray())
.. ocv:function:: double norm(InputArray src1, InputArray src2, int normType, InputArray mask=noArray())
.. ocv:function:: double norm( InputArray src1, InputArray src2, int normType=NORM_L2, InputArray mask=noArray() )
.. ocv:function:: double norm( const SparseMat& src, int normType )
.. ocv:pyfunction:: cv2.norm(src1[, normType[, mask]]) -> retval
.. ocv:pyfunction:: cv2.norm(src1, src2[, normType[, mask]]) -> retval
.. ocv:cfunction:: double cvNorm(const CvArr* arr1, const CvArr* arr2=NULL, int normType=CV_L2, const CvArr* mask=NULL)
.. ocv:pyoldfunction:: cv.Norm(arr1, arr2, normType=CV_L2, mask=None)-> double
.. ocv:cfunction:: double cvNorm( const CvArr* arr1, const CvArr* arr2=NULL, int norm_type=CV_L2, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Norm(arr1, arr2, normType=CV_L2, mask=None) -> float
:param src1: First source array.
@ -2116,7 +2128,7 @@ normalize
---------
Normalizes the norm or value range of an array.
.. ocv:function:: void normalize(const InputArray src, OutputArray dst, double alpha=1, double beta=0, int normType=NORM_L2, int rtype=-1, InputArray mask=noArray())
.. ocv:function:: void normalize( InputArray src, OutputArray dst, double alpha=1, double beta=0, int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray() )
.. ocv:function:: void normalize(const SparseMat& src, SparseMat& dst, double alpha, int normType)
@ -2132,7 +2144,7 @@ Normalizes the norm or value range of an array.
:param normType: Normalization type. See the details below.
:param rtype: When the parameter is negative, the destination array has the same type as ``src``. Otherwise, it has the same number of channels as ``src`` and the depth ``=CV_MAT_DEPTH(rtype)`` .
:param dtype: When the parameter is negative, the destination array has the same type as ``src``. Otherwise, it has the same number of channels as ``src`` and the depth ``=CV_MAT_DEPTH(dtype)`` .
:param mask: Optional operation mask.
@ -2279,7 +2291,7 @@ Projects vector(s) to the principal component subspace.
.. ocv:function:: void PCA::project(InputArray vec, OutputArray result) const
.. ocv:pyfunction:: cv2.PCAProject(vec, mean, eigenvectors[, result]) -> result
.. ocv:pyfunction:: cv2.PCAProject(data, mean, eigenvectors[, result]) -> result
:param vec: Input vector(s). They must have the same dimensionality and the same layout as the input data used at PCA phase. That is, if ``CV_PCA_DATA_AS_ROW`` are specified, then ``vec.cols==data.cols`` (vector dimensionality) and ``vec.rows`` is the number of vectors to project. The same is true for the ``CV_PCA_DATA_AS_COL`` case.
@ -2297,7 +2309,7 @@ Reconstructs vectors from their PC projections.
.. ocv:function:: void PCA::backProject(InputArray vec, OutputArray result) const
.. ocv:pyfunction:: cv2.PCABackProject(vec, mean, eigenvectors[, result]) -> result
.. ocv:pyfunction:: cv2.PCABackProject(data, mean, eigenvectors[, result]) -> result
:param vec: Coordinates of the vectors in the principal component subspace. The layout and size are the same as of ``PCA::project`` output vectors.
@ -2312,7 +2324,7 @@ perspectiveTransform
--------------------
Performs the perspective matrix transformation of vectors.
.. ocv:function:: void perspectiveTransform(InputArray src, OutputArray dst, InputArray mtx)
.. ocv:function:: void perspectiveTransform( InputArray src, OutputArray dst, InputArray m )
.. ocv:pyfunction:: cv2.perspectiveTransform(src, m[, dst]) -> dst
@ -2323,7 +2335,7 @@ Performs the perspective matrix transformation of vectors.
:param dst: Destination array of the same size and type as ``src`` .
:param mtx: ``3x3`` or ``4x4`` floating-point transformation matrix.
:param m: ``3x3`` or ``4x4`` floating-point transformation matrix.
The function ``perspectiveTransform`` transforms every element of ``src`` by treating it as a 2D or 3D vector, in the following way:
@ -2389,7 +2401,8 @@ Computes x and y coordinates of 2D vectors from their magnitude and angle.
.. ocv:pyfunction:: cv2.polarToCart(magnitude, angle[, x[, y[, angleInDegrees]]]) -> x, y
.. ocv:cfunction:: void cvPolarToCart( const CvArr* magnitude, const CvArr* angle, CvArr* x, CvArr* y, int angleInDegrees=0)
.. ocv:cfunction:: void cvPolarToCart( const CvArr* magnitude, const CvArr* angle, CvArr* x, CvArr* y, int angle_in_degrees=0 )
.. ocv:pyoldfunction:: cv.PolarToCart(magnitude, angle, x, y, angleInDegrees=0)-> None
:param magnitude: Source floating-point array of magnitudes of 2D vectors. It can be an empty matrix ( ``=Mat()`` ). In this case, the function assumes that all the magnitudes are =1. If it is not empty, it must have the same size and type as ``angle`` .
@ -2426,7 +2439,7 @@ pow
---
Raises every array element to a power.
.. ocv:function:: void pow(InputArray src, double p, OutputArray dst)
.. ocv:function:: void pow( InputArray src, double power, OutputArray dst )
.. ocv:pyfunction:: cv2.pow(src, power[, dst]) -> dst
@ -2435,15 +2448,15 @@ Raises every array element to a power.
:param src: Source array.
:param p: Exponent of power.
:param power: Exponent of power.
:param dst: Destination array of the same size and type as ``src`` .
The function ``pow`` raises every element of the input array to ``p`` :
The function ``pow`` raises every element of the input array to ``power`` :
.. math::
\texttt{dst} (I) = \fork{\texttt{src}(I)^p}{if \texttt{p} is integer}{|\texttt{src}(I)|^p}{otherwise}
\texttt{dst} (I) = \fork{\texttt{src}(I)^power}{if \texttt{power} is integer}{|\texttt{src}(I)|^power}{otherwise}
So, for a non-integer power exponent, the absolute values of input array elements are used. However, it is possible to get true values for negative values using some extra operations. In the example below, computing the 5th root of array ``src`` shows: ::
@ -2452,7 +2465,7 @@ So, for a non-integer power exponent, the absolute values of input array element
subtract(Scalar::all(0), dst, dst, mask);
For some values of ``p`` , such as integer values, 0.5 and -0.5, specialized faster algorithms are used.
For some values of ``power`` , such as integer values, 0.5 and -0.5, specialized faster algorithms are used.
Special values (NaN, Inf) are not handled.
@ -2617,11 +2630,11 @@ Generates a single uniformly-distributed random number or an array of random num
.. ocv:function:: template<typename _Tp> _Tp randu()
.. ocv:function:: void randu(InputOutputArray mtx, InputArray low, InputArray high)
.. ocv:function:: void randu( InputOutputArray dst, InputArray low, InputArray high )
.. ocv:pyfunction:: cv2.randu(dst, low, high) -> None
:param mtx: Output array of random numbers. The array must be pre-allocated.
:param dst: Output array of random numbers. The array must be pre-allocated.
:param low: Inclusive lower boundary of the generated random numbers.
@ -2630,11 +2643,11 @@ Generates a single uniformly-distributed random number or an array of random num
The template functions ``randu`` generate and return the next uniformly-distributed random value of the specified type. ``randu<int>()`` is an equivalent to ``(int)theRNG();`` , and so on. See
:ocv:class:`RNG` description.
The second non-template variant of the function fills the matrix ``mtx`` with uniformly-distributed random numbers from the specified range:
The second non-template variant of the function fills the matrix ``dst`` with uniformly-distributed random numbers from the specified range:
.. math::
\texttt{low} _c \leq \texttt{mtx} (I)_c < \texttt{high} _c
\texttt{low} _c \leq \texttt{dst} (I)_c < \texttt{high} _c
.. seealso::
@ -2648,17 +2661,17 @@ randn
-----
Fills the array with normally distributed random numbers.
.. ocv:function:: void randn(InputOutputArray mtx, InputArray mean, InputArray stddev)
.. ocv:function:: void randn( InputOutputArray dst, InputArray mean, InputArray stddev )
.. ocv:pyfunction:: cv2.randn(dst, mean, stddev) -> None
:param mtx: Output array of random numbers. The array must be pre-allocated and have 1 to 4 channels.
:param dst: Output array of random numbers. The array must be pre-allocated and have 1 to 4 channels.
:param mean: Mean value (expectation) of the generated random numbers.
:param stddev: Standard deviation of the generated random numbers. It can be either a vector (in which case a diagonal standard deviation matrix is assumed) or a square matrix.
The function ``randn`` fills the matrix ``mtx`` with normally distributed random numbers with the specified mean vector and the standard deviation matrix. The generated random numbers are clipped to fit the value range of the destination array data type.
The function ``randn`` fills the matrix ``dst`` with normally distributed random numbers with the specified mean vector and the standard deviation matrix. The generated random numbers are clipped to fit the value range of the destination array data type.
.. seealso::
@ -2671,17 +2684,17 @@ randShuffle
-----------
Shuffles the array elements randomly.
.. ocv:function:: void randShuffle(InputOutputArray mtx, double iterFactor=1., RNG* rng=0)
.. ocv:function:: void randShuffle( InputOutputArray dst, double iterFactor=1., RNG* rng=0 )
.. ocv:pyfunction:: cv2.randShuffle(src[, dst[, iterFactor]]) -> dst
.. ocv:pyfunction:: cv2.randShuffle(dst[, iterFactor]) -> None
:param mtx: Input/output numerical 1D array.
:param dst: Input/output numerical 1D array.
:param iterFactor: Scale factor that determines the number of random swap operations. See the details below.
:param rng: Optional random number generator used for shuffling. If it is zero, :ocv:func:`theRNG` () is used instead.
The function ``randShuffle`` shuffles the specified 1D array by randomly choosing pairs of elements and swapping them. The number of such swap operations will be ``mtx.rows*mtx.cols*iterFactor`` .
The function ``randShuffle`` shuffles the specified 1D array by randomly choosing pairs of elements and swapping them. The number of such swap operations will be ``dst.rows*dst.cols*iterFactor`` .
.. seealso::
@ -2694,20 +2707,20 @@ reduce
------
Reduces a matrix to a vector.
.. ocv:function:: void reduce(InputArray mtx, OutputArray vec, int dim, int reduceOp, int dtype=-1)
.. ocv:function:: void reduce( InputArray src, OutputArray dst, int dim, int rtype, int dtype=-1 )
.. ocv:pyfunction:: cv2.reduce(src, dim, rtype[, dst[, dtype]]) -> dst
.. ocv:cfunction:: void cvReduce(const CvArr* src, CvArr* dst, int dim=-1, int op=CV_REDUCE_SUM)
.. ocv:pyoldfunction:: cv.Reduce(src, dst, dim=-1, op=CV_REDUCE_SUM)-> None
:param mtx: Source 2D matrix.
:param src: Source 2D matrix.
:param vec: Destination vector. Its size and type is defined by ``dim`` and ``dtype`` parameters.
:param dst: Destination vector. Its size and type is defined by ``dim`` and ``dtype`` parameters.
:param dim: Dimension index along which the matrix is reduced. 0 means that the matrix is reduced to a single row. 1 means that the matrix is reduced to a single column.
:param reduceOp: Reduction operation that could be one of the following:
:param rtype: Reduction operation that could be one of the following:
* **CV_REDUCE_SUM** The output is the sum of all rows/columns of the matrix.
@ -2717,7 +2730,7 @@ Reduces a matrix to a vector.
* **CV_REDUCE_MIN** The output is the minimum (column/row-wise) of all rows/columns of the matrix.
:param dtype: When it is negative, the destination vector will have the same type as the source matrix. Otherwise, its type will be ``CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), mtx.channels())`` .
:param dtype: When it is negative, the destination vector will have the same type as the source matrix. Otherwise, its type will be ``CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels())`` .
The function ``reduce`` reduces the matrix to a vector by treating the matrix rows/columns as a set of 1D vectors and performing the specified operation on the vectors until a single row/column is obtained. For example, the function can be used to compute horizontal and vertical projections of a raster image. In case of ``CV_REDUCE_SUM`` and ``CV_REDUCE_AVG`` , the output may have a larger element bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction modes.
@ -2731,11 +2744,12 @@ Fills the destination array with repeated copies of the source array.
.. ocv:function:: void repeat(InputArray src, int ny, int nx, OutputArray dst)
.. ocv:function:: Mat repeat(InputArray src, int ny, int nx)
.. ocv:function:: Mat repeat( const Mat& src, int ny, int nx )
.. ocv:pyfunction:: cv2.repeat(src, ny, nx[, dst]) -> dst
.. ocv:cfunction:: void cvRepeat(const CvArr* src, CvArr* dst)
.. ocv:pyoldfunction:: cv.Repeat(src, dst)-> None
:param src: Source array to replicate.
@ -2767,7 +2781,7 @@ scaleAdd
--------
Calculates the sum of a scaled array and another array.
.. ocv:function:: void scaleAdd(InputArray src1, double scale, InputArray src2, OutputArray dst)
.. ocv:function:: void scaleAdd( InputArray src1, double alpha, InputArray src2, OutputArray dst )
.. ocv:pyfunction:: cv2.scaleAdd(src1, alpha, src2[, dst]) -> dst
@ -2810,14 +2824,15 @@ setIdentity
-----------
Initializes a scaled identity matrix.
.. ocv:function:: void setIdentity(InputOutputArray dst, const Scalar& value=Scalar(1))
.. ocv:function:: void setIdentity( InputOutputArray mtx, const Scalar& s=Scalar(1) )
.. ocv:pyfunction:: cv2.setIdentity(mtx[, s]) -> None
.. ocv:cfunction:: void cvSetIdentity(CvArr* mat, CvScalar value=cvRealScalar(1))
.. ocv:pyoldfunction:: cv.SetIdentity(mat, value=1)-> None
:param dst: Matrix to initialize (not necessarily square).
:param mtx: Matrix to initialize (not necessarily square).
:param value: Value to assign to diagonal elements.
@ -2826,7 +2841,7 @@ The function
.. math::
\texttt{dst} (i,j)= \fork{\texttt{value}}{ if $i=j$}{0}{otherwise}
\texttt{mtx} (i,j)= \fork{\texttt{value}}{ if $i=j$}{0}{otherwise}
The function can also be emulated using the matrix initializers and the matrix expressions: ::
@ -2898,11 +2913,12 @@ solveCubic
--------------
Finds the real roots of a cubic equation.
.. ocv:function:: void solveCubic(InputArray coeffs, OutputArray roots)
.. ocv:function:: int solveCubic( InputArray coeffs, OutputArray roots )
.. ocv:pyfunction:: cv2.solveCubic(coeffs[, roots]) -> retval, roots
.. ocv:cfunction:: void cvSolveCubic(const CvArr* coeffs, CvArr* roots)
.. ocv:cfunction:: int cvSolveCubic( const CvMat* coeffs, CvMat* roots )
.. ocv:pyoldfunction:: cv.SolveCubic(coeffs, roots)-> None
:param coeffs: Equation coefficients, an array of 3 or 4 elements.
@ -2931,7 +2947,7 @@ solvePoly
---------
Finds the real or complex roots of a polynomial equation.
.. ocv:function:: void solvePoly(InputArray coeffs, OutputArray roots, int maxIters=300)
.. ocv:function:: double solvePoly( InputArray coeffs, OutputArray roots, int maxIters=300 )
.. ocv:pyfunction:: cv2.solvePoly(coeffs[, roots[, maxIters]]) -> retval, roots
@ -3022,24 +3038,25 @@ split
-----
Divides a multi-channel array into several single-channel arrays.
.. ocv:function:: void split(const Mat& mtx, Mat* mv)
.. ocv:function:: void split( const Mat& src, Mat* mvbegin )
.. ocv:function:: void split(const Mat& mtx, vector<Mat>& mv)
.. ocv:function:: void split( InputArray m, OutputArrayOfArrays mv )
.. ocv:pyfunction:: cv2.split(m, mv) -> None
.. ocv:pyfunction:: cv2.split(m[, mv]) -> mv
.. ocv:cfunction:: void cvSplit(const CvArr* src, CvArr* dst0, CvArr* dst1, CvArr* dst2, CvArr* dst3)
.. ocv:pyoldfunction:: cv.Split(src, dst0, dst1, dst2, dst3)-> None
:param mtx: Source multi-channel array.
:param src: Source multi-channel array.
:param mv: Destination array or vector of arrays. In the first variant of the function the number of arrays must match ``mtx.channels()`` . The arrays themselves are reallocated, if needed.
:param mv: Destination array or vector of arrays. In the first variant of the function the number of arrays must match ``src.channels()`` . The arrays themselves are reallocated, if needed.
The functions ``split`` split a multi-channel array into separate single-channel arrays:
.. math::
\texttt{mv} [c](I) = \texttt{mtx} (I)_c
\texttt{mv} [c](I) = \texttt{src} (I)_c
If you need to extract a single channel or do some other sophisticated channel permutation, use
:ocv:func:`mixChannels` .
@ -3085,12 +3102,12 @@ Calculates the per-element difference between two arrays or array and a scalar.
.. ocv:pyfunction:: cv2.subtract(src1, src2[, dst[, mask[, dtype]]]) -> dst
.. ocv:cfunction:: void cvSub(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL)
.. ocv:cfunction:: void cvSubRS(const CvArr* src1, CvScalar src2, CvArr* dst, const CvArr* mask=NULL)
.. ocv:cfunction:: void cvSubS(const CvArr* src1, CvScalar src2, CvArr* dst, const CvArr* mask=NULL)
.. ocv:cfunction:: void cvSubRS( const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL )
.. ocv:cfunction:: void cvSubS( const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Sub(src1, src2, dst, mask=None)-> None
.. ocv:pyoldfunction:: cv.SubRS(src1, src2, dst, mask=None)-> None
.. ocv:pyoldfunction:: cv.SubS(src1, src2, dst, mask=None)-> None
.. ocv:pyoldfunction:: cv.Sub(src1, src2, dst, mask=None) -> None
.. ocv:pyoldfunction:: cv.SubRS(src, value, dst, mask=None) -> None
.. ocv:pyoldfunction:: cv.SubS(src, value, dst, mask=None) -> None
:param src1: First source array or a scalar.
@ -3176,9 +3193,9 @@ The constructors.
.. ocv:function:: SVD::SVD()
.. ocv:function:: SVD::SVD( InputArray A, int flags=0 )
.. ocv:function:: SVD::SVD( InputArray src, int flags=0 )
:param A: Decomposed matrix.
:param src: Decomposed matrix.
:param flags: Operation flags.
@ -3222,9 +3239,9 @@ Performs SVD of a matrix
.. ocv:pyfunction:: cv2.SVDecomp(src[, w[, u[, vt[, flags]]]]) -> w, u, vt
.. ocv:cfunction:: void cvSVD( CvArr* src, CvArr* w, CvArr* u=NULL, CvArr* v=NULL, int flags=0)
.. ocv:cfunction:: void cvSVD( CvArr* A, CvArr* W, CvArr* U=NULL, CvArr* V=NULL, int flags=0 )
.. ocv:pyoldfunction:: cv.SVD(src, w, u=None, v=None, flags=0)-> None
.. ocv:pyoldfunction:: cv.SVD(A, W, U=None, V=None, flags=0) -> None
:param src: Decomposed matrix
@ -3232,7 +3249,7 @@ Performs SVD of a matrix
:param u: Computed left singular vectors
:param v: Computed right singular vectors
:param V: Computed right singular vectors
:param vt: Transposed matrix of right singular values
@ -3272,15 +3289,15 @@ Performs a singular value back substitution.
.. ocv:pyfunction:: cv2.SVBackSubst(w, u, vt, rhs[, dst]) -> dst
.. ocv:cfunction:: void cvSVBkSb( const CvArr* w, const CvArr* u, const CvArr* v, const CvArr* rhs, CvArr* dst, int flags)
.. ocv:cfunction:: void cvSVBkSb( const CvArr* W, const CvArr* U, const CvArr* V, const CvArr* B, CvArr* X, int flags )
.. ocv:pyoldfunction:: cv.SVBkSb(w, u, v, rhs, dst, flags)-> None
.. ocv:pyoldfunction:: cv.SVBkSb(W, U, V, B, X, flags) -> None
:param w: Singular values
:param u: Left singular vectors
:param v: Right singular vectors
:param V: Right singular vectors
:param vt: Transposed matrix of right singular vectors.
@ -3304,12 +3321,13 @@ sum
---
Calculates the sum of array elements.
.. ocv:function:: Scalar sum(InputArray arr)
.. ocv:function:: Scalar sum( InputArray src )
.. ocv:pyfunction:: cv2.sumElems(arr) -> retval
.. ocv:pyfunction:: cv2.sumElems(src) -> retval
.. ocv:cfunction:: CvScalar cvSum(const CvArr* arr)
.. ocv:pyoldfunction:: cv.Sum(arr)-> CvScalar
.. ocv:pyoldfunction:: cv.Sum(arr) -> scalar
:param arr: Source array that must have from 1 to 4 channels.
@ -3348,12 +3366,13 @@ trace
-----
Returns the trace of a matrix.
.. ocv:function:: Scalar trace(InputArray mat)
.. ocv:function:: Scalar trace( InputArray mtx )
.. ocv:pyfunction:: cv2.trace(mat) -> retval
.. ocv:pyfunction:: cv2.trace(mtx) -> retval
.. ocv:cfunction:: CvScalar cvTrace(const CvArr* mat)
.. ocv:pyoldfunction:: cv.Trace(mat)-> CvScalar
.. ocv:pyoldfunction:: cv.Trace(mat) -> scalar
:param mat: Source matrix.
@ -3369,37 +3388,38 @@ transform
---------
Performs the matrix transformation of every array element.
.. ocv:function:: void transform(InputArray src, OutputArray dst, InputArray mtx )
.. ocv:function:: void transform( InputArray src, OutputArray dst, InputArray m )
.. ocv:pyfunction:: cv2.transform(src, mtx [, dst]) -> dst
.. ocv:pyfunction:: cv2.transform(src, m[, dst]) -> dst
.. ocv:cfunction:: void cvTransform(const CvArr* src, CvArr* dst, const CvMat* mtx, const CvMat* shiftvec=NULL)
.. ocv:pyoldfunction:: cv.Transform(src, dst, mtx, shiftvec=None)-> None
.. ocv:cfunction:: void cvTransform( const CvArr* src, CvArr* dst, const CvMat* transmat, const CvMat* shiftvec=NULL )
:param src: Source array that must have as many channels (1 to 4) as ``mtx.cols`` or ``mtx.cols-1``.
.. ocv:pyoldfunction:: cv.Transform(src, dst, transmat, shiftvec=None)-> None
:param dst: Destination array of the same size and depth as ``src`` . It has as many channels as ``mtx.rows`` .
:param src: Source array that must have as many channels (1 to 4) as ``m.cols`` or ``m.cols-1``.
:param mtx: Transformation ``2x2`` or ``2x3`` floating-point matrix.
:param dst: Destination array of the same size and depth as ``src`` . It has as many channels as ``m.rows`` .
:param shiftvec: Optional translation vector (when ``mtx`` is ``2x2``)
:param m: Transformation ``2x2`` or ``2x3`` floating-point matrix.
:param shiftvec: Optional translation vector (when ``m`` is ``2x2``)
The function ``transform`` performs the matrix transformation of every element of the array ``src`` and stores the results in ``dst`` :
.. math::
\texttt{dst} (I) = \texttt{mtx} \cdot \texttt{src} (I)
\texttt{dst} (I) = \texttt{m} \cdot \texttt{src} (I)
(when ``mtx.cols=src.channels()`` ), or
(when ``m.cols=src.channels()`` ), or
.. math::
\texttt{dst} (I) = \texttt{mtx} \cdot [ \texttt{src} (I); 1]
\texttt{dst} (I) = \texttt{m} \cdot [ \texttt{src} (I); 1]
(when ``mtx.cols=src.channels()+1`` )
(when ``m.cols=src.channels()+1`` )
Every element of the ``N`` -channel array ``src`` is interpreted as ``N`` -element vector that is transformed using
the ``M x N`` or ``M x (N+1)`` matrix ``mtx``
the ``M x N`` or ``M x (N+1)`` matrix ``m``
to ``M``-element vector - the corresponding element of the destination array ``dst`` .
The function may be used for geometrical transformation of

View File

@ -93,9 +93,9 @@ Computes the cube root of an argument.
.. ocv:pyfunction:: cv2.cubeRoot(val) -> retval
.. ocv:cfunction:: float cvCbrt(float val)
.. ocv:cfunction:: float cvCbrt( float value )
.. ocv:pyoldfunction:: cv.Cbrt(val)-> float
.. ocv:pyoldfunction:: cv.Cbrt(value)-> float
:param val: A function argument.
@ -182,7 +182,7 @@ Signals an error and raises an exception.
.. ocv:function:: void error( const Exception& exc )
.. ocv:cfunction:: int cvError( int status, const char* funcName, const char* err_msg, const char* filename, int line )
.. ocv:cfunction:: void cvError( int status, const char* func_name, const char* err_msg, const char* file_name, int line )
:param exc: Exception to throw.
@ -209,7 +209,7 @@ The macro ``CV_Error_`` can be used to construct an error message on-fly to incl
Exception
---------
.. ocv:class:: Exception
.. ocv:class:: Exception : public std::exception
Exception class passed to an error. ::
@ -244,7 +244,8 @@ fastMalloc
--------------
Allocates an aligned memory buffer.
.. ocv:function:: void* fastMalloc(size_t size)
.. ocv:function:: void* fastMalloc( size_t bufSize )
.. ocv:cfunction:: void* cvAlloc( size_t size )
:param size: Allocated buffer size.
@ -276,7 +277,7 @@ Returns a text string formatted using the ``printf``\ -like expression.
:param fmt: ``printf`` -compatible formatting specifiers.
The function acts like ``sprintf`` but forms and returns an STL string. It can be used to form an error message in the
:ocv:func:`Exception` constructor.
:ocv:class:`Exception` constructor.
@ -286,7 +287,7 @@ Returns true if the specified feature is supported by the host hardware.
.. ocv:function:: bool checkHardwareSupport(int feature)
.. ocv:cfunction:: int cvCheckHardwareSupport(int feature)
.. ocv:pyfunction:: checkHardwareSupport(feature) -> Bool
.. ocv:pyfunction:: cv2.checkHardwareSupport(feature) -> retval
:param feature: The feature of interest, one of:
@ -419,13 +420,13 @@ setUseOptimized
-----------------
Enables or disables the optimized code.
.. ocv:function:: void setUseOptimized(bool onoff)
.. ocv:function:: int cvUseOptimized( int on_off )
.. ocv:pyfunction:: cv2.setUseOptimized(onoff) -> None
.. ocv:cfunction:: int cvUseOptimized( int onoff )
.. ocv:cfunction:: int cvUseOptimized( int on_off )
:param onoff: The boolean flag specifying whether the optimized code should be used (``onoff=true``) or not (``onoff=false``).
:param on_off: The boolean flag specifying whether the optimized code should be used (``on_off=true``) or not (``on_off=false``).
The function can be used to dynamically turn on and off optimized code (code that uses SSE2, AVX, and other instructions on the platforms that support it). It sets a global flag that is further checked by OpenCV functions. Since the flag is not checked in the inner OpenCV loops, it is only safe to call the function on the very top level in your application where you can be sure that no other OpenCV function is currently executed.

View File

@ -663,7 +663,7 @@ FileNodeIterator::operator +=
-----------------------------
Moves iterator forward by the specified offset.
.. ocv:function:: FileNodeIterator& FileNodeIterator::operator += (int ofs)
.. ocv:function:: FileNodeIterator& FileNodeIterator::operator +=( int ofs )
:param ofs: Offset (possibly negative) to move the iterator.
@ -672,7 +672,7 @@ FileNodeIterator::operator -=
-----------------------------
Moves iterator backward by the specified offset (possibly negative).
.. ocv:function:: FileNodeIterator& FileNodeIterator::operator -= (int ofs)
.. ocv:function:: FileNodeIterator& FileNodeIterator::operator -=( int ofs )
:param ofs: Offset (possibly negative) to move the iterator.

View File

@ -897,7 +897,7 @@ class CV_EXPORTS RotatedRect
public:
//! various constructors
RotatedRect();
RotatedRect(const Point2f& _center, const Size2f& _size, float _angle);
RotatedRect(const Point2f& center, const Size2f& size, float angle);
RotatedRect(const CvBox2D& box);
//! returns 4 vertices of the rectangle
@ -1634,22 +1634,22 @@ public:
Mat();
//! constructs 2D matrix of the specified size and type
// (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)
Mat(int _rows, int _cols, int _type);
Mat(Size _size, int _type);
Mat(int rows, int cols, int type);
Mat(Size size, int type);
//! constucts 2D matrix and fills it with the specified value _s.
Mat(int _rows, int _cols, int _type, const Scalar& _s);
Mat(Size _size, int _type, const Scalar& _s);
Mat(int rows, int cols, int type, const Scalar& s);
Mat(Size size, int type, const Scalar& s);
//! constructs n-dimensional matrix
Mat(int _ndims, const int* _sizes, int _type);
Mat(int _ndims, const int* _sizes, int _type, const Scalar& _s);
Mat(int ndims, const int* sizes, int type);
Mat(int ndims, const int* sizes, int type, const Scalar& s);
//! copy constructor
Mat(const Mat& m);
//! constructor for matrix headers pointing to user-allocated data
Mat(int _rows, int _cols, int _type, void* _data, size_t _step=AUTO_STEP);
Mat(Size _size, int _type, void* _data, size_t _step=AUTO_STEP);
Mat(int _ndims, const int* _sizes, int _type, void* _data, const size_t* _steps=0);
Mat(int rows, int cols, int type, void* data, size_t step=AUTO_STEP);
Mat(Size size, int type, void* data, size_t step=AUTO_STEP);
Mat(int ndims, const int* sizes, int type, void* data, const size_t* steps=0);
//! creates a matrix header for a part of the bigger matrix
Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all());
@ -1664,11 +1664,9 @@ public:
//! builds matrix from std::vector with or without copying the data
template<typename _Tp> explicit Mat(const vector<_Tp>& vec, bool copyData=false);
//! builds matrix from cv::Vec; the data is copied by default
template<typename _Tp, int n> explicit Mat(const Vec<_Tp, n>& vec,
bool copyData=true);
template<typename _Tp, int n> explicit Mat(const Vec<_Tp, n>& vec, bool copyData=true);
//! builds matrix from cv::Matx; the data is copied by default
template<typename _Tp, int m, int n> explicit Mat(const Matx<_Tp, m, n>& mtx,
bool copyData=true);
template<typename _Tp, int m, int n> explicit Mat(const Matx<_Tp, m, n>& mtx, bool copyData=true);
//! builds matrix from a 2D point
template<typename _Tp> explicit Mat(const Point_<_Tp>& pt, bool copyData=true);
//! builds matrix from a 3D point
@ -1721,8 +1719,8 @@ public:
Mat& setTo(InputArray value, InputArray mask=noArray());
//! creates alternative matrix header for the same data, with different
// number of channels and/or different number of rows. see cvReshape.
Mat reshape(int _cn, int _rows=0) const;
Mat reshape(int _cn, int _newndims, const int* _newsz) const;
Mat reshape(int cn, int rows=0) const;
Mat reshape(int cn, int newndims, const int* newsz) const;
//! matrix transposition by means of matrix expressions
MatExpr t() const;
@ -1748,9 +1746,9 @@ public:
//! allocates new matrix data unless the matrix already has specified size and type.
// previous data is unreferenced if needed.
void create(int _rows, int _cols, int _type);
void create(Size _size, int _type);
void create(int _ndims, const int* _sizes, int _type);
void create(int rows, int cols, int type);
void create(Size size, int type);
void create(int ndims, const int* sizes, int type);
//! increases the reference counter; use with care to avoid memleaks
void addref();
@ -1966,7 +1964,7 @@ public:
enum { UNIFORM=0, NORMAL=1 };
RNG();
RNG(uint64 _state);
RNG(uint64 state);
//! updates the state and returns the next 32-bit unsigned integer random number
unsigned next();
@ -1976,7 +1974,7 @@ public:
operator short();
operator unsigned();
//! returns a random integer sampled uniformly from [0, N).
unsigned operator()(unsigned N);
unsigned operator ()(unsigned N);
unsigned operator ()();
operator int();
operator float();
@ -4121,9 +4119,9 @@ public:
//! moves iterator to the previous node
FileNodeIterator operator -- (int);
//! moves iterator forward by the specified offset (possibly negative)
FileNodeIterator& operator += (int);
FileNodeIterator& operator += (int ofs);
//! moves iterator backward by the specified offset (possibly negative)
FileNodeIterator& operator -= (int);
FileNodeIterator& operator -= (int ofs);
//! reads the next maxCount elements (or less, if the sequence/mapping last element occurs earlier) to the buffer with the specified format
FileNodeIterator& readRaw( const string& fmt, uchar* vec,

View File

@ -1130,7 +1130,7 @@ CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z )
/******************************** CvSize's & CvBox **************************************/
typedef struct
typedef struct CvSize
{
int width;
int height;

View File

@ -13,7 +13,7 @@ descriptor extractors inherit the
DescriptorExtractor
-------------------
.. ocv:class:: DescriptorExtractor
.. ocv:class:: DescriptorExtractor : public Algorithm
Abstract base class for computing descriptors for image keypoints. ::
@ -65,25 +65,6 @@ Computes the descriptors for a set of keypoints detected in an image (first vari
:param descriptors: Computed descriptors. In the second variant of the method ``descriptors[i]`` are descriptors computed for a ``keypoints[i]`. Row ``j`` is the ``keypoints`` (or ``keypoints[i]``) is the descriptor for keypoint ``j``-th keypoint.
DescriptorExtractor::read
-----------------------------
Reads the object of a descriptor extractor from a file node.
.. ocv:function:: void DescriptorExtractor::read( const FileNode& fn )
:param fn: File node from which the detector is read.
DescriptorExtractor::write
------------------------------
Writes the object of a descriptor extractor to a file storage.
.. ocv:function:: void DescriptorExtractor::write( FileStorage& fs ) const
:param fs: File storage where the detector is written.
DescriptorExtractor::create
-------------------------------
@ -107,7 +88,7 @@ for example: ``"OpponentSIFT"`` .
OpponentColorDescriptorExtractor
--------------------------------
.. ocv:class:: OpponentColorDescriptorExtractor
.. ocv:class:: OpponentColorDescriptorExtractor : public DescriptorExtractor
Class adapting a descriptor extractor to compute descriptors in the Opponent Color Space
(refer to Van de Sande et al., CGIV 2008 *Color Descriptors for Object Category Recognition*).
@ -132,7 +113,7 @@ them into a single color descriptor. ::
BriefDescriptorExtractor
------------------------
.. ocv:class:: BriefDescriptorExtractor
.. ocv:class:: BriefDescriptorExtractor : public DescriptorExtractor
Class for computing BRIEF descriptors described in a paper of Calonder M., Lepetit V.,
Strecha C., Fua P. *BRIEF: Binary Robust Independent Elementary Features* ,

View File

@ -11,7 +11,7 @@ descriptor matchers inherit the
DMatch
------
.. ocv:class:: DMatch
.. ocv:struct:: DMatch
Class for matching keypoint descriptors: query descriptor index,
train descriptor index, train image index, and distance between descriptors. ::
@ -40,7 +40,7 @@ train descriptor index, train image index, and distance between descriptors. ::
DescriptorMatcher
-----------------
.. ocv:class:: DescriptorMatcher
.. ocv:class:: DescriptorMatcher : public Algorithm
Abstract base class for matching keypoint descriptors. It has two groups
of match methods: for matching descriptors of an image with another image or
@ -227,7 +227,7 @@ DescriptorMatcher::clone
----------------------------
Clones the matcher.
.. ocv:function:: Ptr<DescriptorMatcher> DescriptorMatcher::clone( bool emptyTrainData ) const
.. ocv:function:: Ptr<DescriptorMatcher> DescriptorMatcher::clone( bool emptyTrainData=false )
:param emptyTrainData: If ``emptyTrainData`` is false, the method creates a deep copy of the object, that is, copies both parameters and train data. If ``emptyTrainData`` is true, the method creates an object copy with the current parameters but with empty train data.
@ -258,25 +258,25 @@ Creates a descriptor matcher of a given type with the default parameters (using
BFMatcher
-----------------
.. ocv:class::BFMatcher
.. ocv:class:: BFMatcher : public DescriptorMatcher
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches of descriptor sets. ::
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches of descriptor sets.
BFMatcher::BFMatcher
--------------------
Brute-force matcher constructor.
.. ocv:function:: BFMatcher::BFMatcher( int distanceType, bool crossCheck=false )
.. ocv:function:: BFMatcher::BFMatcher( int normType, bool crossCheck=false )
:param distanceType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
:param normType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
:param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
FlannBasedMatcher
-----------------
.. ocv:class:: FlannBasedMatcher
.. ocv:class:: FlannBasedMatcher : public DescriptorMatcher
Flann-based descriptor matcher. This matcher trains :ocv:class:`flann::Index_` on a train descriptor collection and calls its nearest search methods to find the best matches. So, this matcher may be faster when matching a large train collection than the brute force matcher. ``FlannBasedMatcher`` does not support masking permissible matches of descriptor sets because ``flann::Index`` does not support this. ::

View File

@ -12,7 +12,7 @@ KeyPoint
--------
.. ocv:class:: KeyPoint
Data structure for salient point detectors.
Data structure for salient point detectors.
.. ocv:member:: Point2f pt
@ -48,7 +48,7 @@ The keypoint constructors
.. ocv:function:: KeyPoint::KeyPoint(float x, float y, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1)
.. ocv:pyfunction:: cv2.KeyPoint(x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]) -> <KeyPoint object>
.. ocv:pyfunction:: cv2.KeyPoint([x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]]) -> <KeyPoint object>
:param x: x-coordinate of the keypoint
@ -69,7 +69,7 @@ The keypoint constructors
FeatureDetector
---------------
.. ocv:class:: FeatureDetector
.. ocv:class:: FeatureDetector : public Algorithm
Abstract base class for 2D image feature detectors. ::
@ -112,22 +112,6 @@ Detects keypoints in an image (first variant) or image set (second variant).
:param masks: Masks for each input image specifying where to look for keypoints (optional). ``masks[i]`` is a mask for ``images[i]``.
FeatureDetector::read
-------------------------
Reads a feature detector object from a file node.
.. ocv:function:: void FeatureDetector::read( const FileNode& fn )
:param fn: File node from which the detector is read.
FeatureDetector::write
--------------------------
Writes a feature detector object to a file storage.
.. ocv:function:: void FeatureDetector::write( FileStorage& fs ) const
:param fs: File storage where the detector is written.
FeatureDetector::create
---------------------------
Creates a feature detector by its name.
@ -156,7 +140,7 @@ for example: ``"GridFAST"``, ``"PyramidSTAR"`` .
FastFeatureDetector
-------------------
.. ocv:class:: FastFeatureDetector
.. ocv:class:: FastFeatureDetector : public FeatureDetector
Wrapping class for feature detection using the
:ocv:func:`FAST` method. ::
@ -173,7 +157,7 @@ Wrapping class for feature detection using the
GoodFeaturesToTrackDetector
---------------------------
.. ocv:class:: GoodFeaturesToTrackDetector
.. ocv:class:: GoodFeaturesToTrackDetector : public FeatureDetector
Wrapping class for feature detection using the
:ocv:func:`goodFeaturesToTrack` function. ::
@ -211,7 +195,7 @@ Wrapping class for feature detection using the
MserFeatureDetector
-------------------
.. ocv:class:: MserFeatureDetector
.. ocv:class:: MserFeatureDetector : public FeatureDetector
Wrapping class for feature detection using the
:ocv:class:`MSER` class. ::
@ -233,7 +217,7 @@ Wrapping class for feature detection using the
StarFeatureDetector
-------------------
.. ocv:class:: StarFeatureDetector
.. ocv:class:: StarFeatureDetector : public FeatureDetector
Wrapping class for feature detection using the
:ocv:class:`StarDetector` class. ::
@ -252,7 +236,7 @@ Wrapping class for feature detection using the
DenseFeatureDetector
--------------------
.. ocv:class:: DenseFeatureDetector
.. ocv:class:: DenseFeatureDetector : public FeatureDetector
Class for generation of image features which are distributed densely and regularly over the image. ::
@ -279,7 +263,7 @@ The detector generates several levels (in the amount of ``featureScaleLevels``)
SimpleBlobDetector
-------------------
.. ocv:class:: SimpleBlobDetector
.. ocv:class:: SimpleBlobDetector : public FeatureDetector
Class for extracting blobs from an image. ::
@ -344,7 +328,7 @@ Default values of parameters are tuned to extract dark circular blobs.
GridAdaptedFeatureDetector
--------------------------
.. ocv:class:: GridAdaptedFeatureDetector
.. ocv:class:: GridAdaptedFeatureDetector : public FeatureDetector
Class adapting a detector to partition the source image into a grid and detect points in each cell. ::
@ -369,7 +353,7 @@ Class adapting a detector to partition the source image into a grid and detect p
PyramidAdaptedFeatureDetector
-----------------------------
.. ocv:class:: PyramidAdaptedFeatureDetector
.. ocv:class:: PyramidAdaptedFeatureDetector : public FeatureDetector
Class adapting a detector to detect points over multiple levels of a Gaussian pyramid. Consider using this class for detectors that are not inherently scaled. ::
@ -387,7 +371,7 @@ Class adapting a detector to detect points over multiple levels of a Gaussian py
DynamicAdaptedFeatureDetector
-----------------------------
.. ocv:class:: DynamicAdaptedFeatureDetector
.. ocv:class:: DynamicAdaptedFeatureDetector : public FeatureDetector
Adaptively adjusting detector that iteratively detects features until the desired number is found. ::
@ -431,7 +415,7 @@ DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector
----------------------------------------------------------------
The constructor
.. ocv:function:: DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features, int max_features, int max_iters )
.. ocv:function:: DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 )
:param adjuster: :ocv:class:`AdjusterAdapter` that detects features and adjusts parameters.
@ -443,7 +427,7 @@ The constructor
AdjusterAdapter
---------------
.. ocv:class:: AdjusterAdapter
.. ocv:class:: AdjusterAdapter : public FeatureDetector
Class providing an interface for adjusting parameters of a feature detector. This interface is used by :ocv:class:`DynamicAdaptedFeatureDetector` . It is a wrapper for :ocv:class:`FeatureDetector` that enables adjusting parameters after feature detection. ::
@ -522,7 +506,7 @@ Creates an adjuster adapter by name
FastAdjuster
------------
.. ocv:class:: FastAdjuster
.. ocv:class:: FastAdjuster : public AdjusterAdapter
:ocv:class:`AdjusterAdapter` for :ocv:class:`FastFeatureDetector`. This class decreases or increases the threshold value by 1. ::
@ -535,7 +519,7 @@ FastAdjuster
StarAdjuster
------------
.. ocv:class:: StarAdjuster
.. ocv:class:: StarAdjuster : public AdjusterAdapter
:ocv:class:`AdjusterAdapter` for :ocv:class:`StarFeatureDetector`. This class adjusts the ``responseThreshhold`` of ``StarFeatureDetector``. ::

View File

@ -130,7 +130,7 @@ GenericDescriptorMatcher::isMaskSupported
---------------------------------------------
Returns ``true`` if a generic descriptor matcher supports masking permissible matches.
.. ocv:function:: void GenericDescriptorMatcher::isMaskSupported()
.. ocv:function:: bool GenericDescriptorMatcher::isMaskSupported()
@ -231,7 +231,7 @@ GenericDescriptorMatcher::clone
-----------------------------------
Clones the matcher.
.. ocv:function:: Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::clone( bool emptyTrainData ) const
.. ocv:function:: Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::clone( bool emptyTrainData=false ) const
:param emptyTrainData: If ``emptyTrainData`` is false, the method creates a deep copy of the object, that is, copies
both parameters and train data. If ``emptyTrainData`` is true, the method creates an object copy with the current parameters
@ -240,7 +240,7 @@ Clones the matcher.
VectorDescriptorMatcher
-----------------------
.. ocv:class:: VectorDescriptorMatcher
.. ocv:class:: VectorDescriptorMatcher : public GenericDescriptorMatcher
Class used for matching descriptors that can be described as vectors in a finite-dimensional space. ::

View File

@ -9,7 +9,7 @@ Draws the found matches of keypoints from two images.
.. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<DMatch>& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<char>& matchesMask=vector<char>(), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<vector<DMatch> >& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<vector<char>>& matchesMask= vector<vector<char> >(), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<vector<DMatch> >& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<vector<char> >& matchesMask=vector<vector<char> >(), int flags=DrawMatchesFlags::DEFAULT )
:param img1: First source image.
@ -65,13 +65,13 @@ drawKeypoints
-----------------
Draws keypoints.
.. ocv:function:: void drawKeypoints( const Mat& image, const vector<KeyPoint>& keypoints, Mat& outImg, const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawKeypoints( const Mat& image, const vector<KeyPoint>& keypoints, Mat& outImage, const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT )
:param image: Source image.
:param keypoints: Keypoints from the source image.
:param outImg: Output image. Its content depends on the ``flags`` value defining what is drawn in the output image. See possible ``flags`` bit values below.
:param outImage: Output image. Its content depends on the ``flags`` value defining what is drawn in the output image. See possible ``flags`` bit values below.
:param color: Color of keypoints.

View File

@ -24,7 +24,7 @@ Detects corners using the FAST algorithm by [Rosten06]_.
MSER
----
.. ocv:class:: MSER
.. ocv:class:: MSER : public FeatureDetector
Maximally stable extremal region extractor. ::
@ -50,7 +50,7 @@ http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions). Also see http:/
ORB
---
.. ocv:class:: ORB
.. ocv:class:: ORB : public Feature2D
Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor, described in [RRKB11]_. The algorithm uses FAST in pyramids to detect stable keypoints, selects the strongest features using FAST or Harris response, finds their orientation using first-order moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or k-tuples) are rotated according to the measured orientation).
@ -60,8 +60,6 @@ ORB::ORB
--------
The ORB constructor
.. ocv:function:: ORB::ORB()
.. ocv:function:: ORB::ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, int firstLevel = 0, int WTA_K=2, int scoreType=HARRIS_SCORE, int patchSize=31)
:param nfeatures: The maximum number of features to retain.

View File

@ -54,7 +54,7 @@ BOWTrainer::descripotorsCount
---------------------------------
Returns the count of all descriptors stored in the training set.
.. ocv:function:: const vector<Mat>& BOWTrainer::descripotorsCount() const
.. ocv:function:: int BOWTrainer::descripotorsCount() const
@ -72,7 +72,7 @@ The vocabulary consists of cluster centers. So, this method returns the vocabula
BOWKMeansTrainer
----------------
.. ocv:class:: BOWKMeansTrainer
.. ocv:class:: BOWKMeansTrainer : public BOWTrainer
:ocv:func:`kmeans` -based class to train visual vocabulary using the *bag of visual words* approach.
::

View File

@ -276,7 +276,7 @@ public:
enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
explicit ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31,
int firstLevel = 0, int WTA_K=2, int scoreType=0, int patchSize=31 );
int firstLevel = 0, int WTA_K=2, int scoreType=HARRIS_SCORE, int patchSize=31 );
// returns the descriptor size in bytes
int descriptorSize() const;
@ -588,13 +588,13 @@ class CV_EXPORTS DynamicAdaptedFeatureDetector: public FeatureDetector
{
public:
/** \param adjaster an AdjusterAdapter that will do the detection and parameter adjustment
/** \param adjuster an AdjusterAdapter that will do the detection and parameter adjustment
* \param max_features the maximum desired number of features
* \param max_iters the maximum number of times to try to adjust the feature detector params
* for the FastAdjuster this can be high, but with Star or Surf this can get time consuming
* \param min_features the minimum desired features
*/
DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjaster, int min_features=400, int max_features=500, int max_iters=5 );
DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 );
virtual bool empty() const;
@ -1158,9 +1158,9 @@ public:
const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
// Reads matcher object from a file node
virtual void read( const FileNode& );
virtual void read( const FileNode& fn );
// Writes matcher object to a file storage
virtual void write( FileStorage& ) const;
virtual void write( FileStorage& fs ) const;
// Return true if matching object is empty (e.g. feature detector or descriptor matcher are empty)
virtual bool empty() const;

View File

@ -289,7 +289,7 @@ Enables the :ocv:class:`gpu::StereoConstantSpaceBP` constructors.
.. ocv:function:: gpu::StereoConstantSpaceBP::StereoConstantSpaceBP(int ndisp = DEFAULT_NDISP, int iters = DEFAULT_ITERS, int levels = DEFAULT_LEVELS, int nr_plane = DEFAULT_NR_PLANE, int msg_type = CV_32F)
.. ocv:function:: StereoConstantSpaceBP::StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th = 0, int msg_type = CV_32F)
.. ocv:function:: gpu::StereoConstantSpaceBP::StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th = 0, int msg_type = CV_32F)
:param ndisp: Number of disparities.

View File

@ -338,7 +338,7 @@ Blocks the current CPU thread until all operations in the stream are complete.
gpu::StreamAccessor
-------------------
.. ocv:class:: gpu::StreamAccessor
.. ocv:struct:: gpu::StreamAccessor
Class that enables getting ``cudaStream_t`` from :ocv:class:`gpu::Stream` and is declared in ``stream_accessor.hpp`` because it is the only public header that depends on the CUDA Runtime API. Including it brings a dependency to your code. ::

View File

@ -346,19 +346,19 @@ Detects keypoints and computes descriptors for them.
gpu::ORB_GPU::downloadKeypoints
gpu::ORB_GPU::downloadKeyPoints
-------------------------------------
Download keypoints from GPU to CPU memory.
.. ocv:function:: void gpu::ORB_GPU::downloadKeypoints(const GpuMat& d_keypoints, std::vector<KeyPoint>& keypoints)
.. ocv:function:: void gpu::ORB_GPU::downloadKeyPoints( GpuMat& d_keypoints, std::vector<KeyPoint>& keypoints )
gpu::ORB_GPU::convertKeypoints
gpu::ORB_GPU::convertKeyPoints
-------------------------------------
Converts keypoints from GPU representation to vector of ``KeyPoint``.
.. ocv:function:: void gpu::ORB_GPU::convertKeypoints(const Mat& h_keypoints, std::vector<KeyPoint>& keypoints)
.. ocv:function:: void gpu::ORB_GPU::convertKeyPoints( Mat& d_keypoints, std::vector<KeyPoint>& keypoints )

View File

@ -195,7 +195,7 @@ Creates a normalized 2D box filter.
.. ocv:function:: Ptr<FilterEngine_GPU> gpu::createBoxFilter_GPU(int srcType, int dstType, const Size& ksize, const Point& anchor = Point(-1,-1))
.. ocv:function:: Ptr<BaseFilter_GPU> getBoxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1, -1))
.. ocv:function:: Ptr<BaseFilter_GPU> gpu::getBoxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1, -1))
:param srcType: Input image type supporting ``CV_8UC1`` and ``CV_8UC4`` .
@ -285,7 +285,9 @@ gpu::erode
--------------
Erodes an image by using a specific structuring element.
.. ocv:function:: void gpu::erode(const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::erode( const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor=Point(-1, -1), int iterations=1 )
.. ocv:function:: void gpu::erode( const GpuMat& src, GpuMat& dst, const Mat& kernel, GpuMat& buf, Point anchor=Point(-1, -1), int iterations=1, Stream& stream=Stream::Null() )
:param src: Source image. Only ``CV_8UC1`` and ``CV_8UC4`` types are supported.
@ -309,7 +311,9 @@ gpu::dilate
---------------
Dilates an image by using a specific structuring element.
.. ocv:function:: void gpu::dilate(const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::dilate( const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor=Point(-1, -1), int iterations=1 )
.. ocv:function:: void gpu::dilate( const GpuMat& src, GpuMat& dst, const Mat& kernel, GpuMat& buf, Point anchor=Point(-1, -1), int iterations=1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` and ``CV_8UC4`` source types are supported.
@ -333,7 +337,9 @@ gpu::morphologyEx
---------------------
Applies an advanced morphological operation to an image.
.. ocv:function:: void gpu::morphologyEx(const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::morphologyEx( const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, Point anchor=Point(-1, -1), int iterations=1 )
.. ocv:function:: void gpu::morphologyEx( const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, GpuMat& buf1, GpuMat& buf2, Point anchor=Point(-1, -1), int iterations=1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` and ``CV_8UC4`` source types are supported.
@ -371,8 +377,6 @@ Creates a non-separable linear filter.
.. ocv:function:: Ptr<FilterEngine_GPU> gpu::createLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT)
.. ocv:function:: Ptr<BaseFilter_GPU> gpu::getLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, const Size& ksize, Point anchor = Point(-1, -1))
:param srcType: Input image type. Supports ``CV_8U`` , ``CV_16U`` and ``CV_32F`` one and four channel image.
:param dstType: Output image type. The same type as ``src`` is supported.
@ -441,7 +445,7 @@ gpu::getLinearRowFilter_GPU
-------------------------------
Creates a primitive row filter with the specified kernel.
.. ocv:function:: Ptr<BaseRowFilter_GPU> gpu::getLinearRowFilter_GPU(int srcType, int bufType, const Mat& rowKernel, int anchor = -1, int borderType = BORDER_CONSTANT)
.. ocv:function:: Ptr<BaseRowFilter_GPU> gpu::getLinearRowFilter_GPU( int srcType, int bufType, const Mat& rowKernel, int anchor=-1, int borderType=BORDER_DEFAULT )
:param srcType: Source array type. Only ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_16SC3`` , ``CV_32SC1`` , ``CV_32FC1`` source types are supported.
@ -467,7 +471,7 @@ gpu::getLinearColumnFilter_GPU
----------------------------------
Creates a primitive column filter with the specified kernel.
.. ocv:function:: Ptr<BaseColumnFilter_GPU> gpu::getLinearColumnFilter_GPU(int bufType, int dstType, const Mat& columnKernel, int anchor = -1, int borderType = BORDER_CONSTANT)
.. ocv:function:: Ptr<BaseColumnFilter_GPU> gpu::getLinearColumnFilter_GPU( int bufType, int dstType, const Mat& columnKernel, int anchor=-1, int borderType=BORDER_DEFAULT )
:param bufType: Intermediate buffer type with as many channels as ``dstType`` .
@ -517,7 +521,10 @@ gpu::sepFilter2D
--------------------
Applies a separable 2D linear filter to an image.
.. ocv:function:: void gpu::sepFilter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, Point anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::sepFilter2D( const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, Point anchor=Point(-1,-1), int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1 )
.. ocv:function:: void gpu::sepFilter2D( const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, GpuMat& buf, Point anchor=Point(-1,-1), int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_32SC1`` , ``CV_32FC1`` source types are supported.
@ -569,7 +576,9 @@ gpu::Sobel
--------------
Applies the generalized Sobel operator to an image.
.. ocv:function:: void gpu::Sobel(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::Sobel( const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, int ksize=3, double scale=1, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1 )
.. ocv:function:: void gpu::Sobel( const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, GpuMat& buf, int ksize=3, double scale=1, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_16SC3`` , ``CV_32SC1`` , ``CV_32FC1`` source types are supported.
@ -599,7 +608,9 @@ gpu::Scharr
---------------
Calculates the first x- or y- image derivative using the Scharr operator.
.. ocv:function:: void gpu::Scharr(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, double scale = 1, int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::Scharr( const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, double scale=1, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1 )
.. ocv:function:: void gpu::Scharr( const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, GpuMat& buf, double scale=1, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_16SC3`` , ``CV_32SC1`` , ``CV_32FC1`` source types are supported.
@ -627,15 +638,15 @@ gpu::createGaussianFilter_GPU
---------------------------------
Creates a Gaussian filter engine.
.. ocv:function:: Ptr<FilterEngine_GPU> gpu::createGaussianFilter_GPU(int type, Size ksize, double sigmaX, double sigmaY = 0, int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1)
.. ocv:function:: Ptr<FilterEngine_GPU> gpu::createGaussianFilter_GPU( int type, Size ksize, double sigma1, double sigma2=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1 )
:param type: Source and destination image type. ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_16SC3`` , ``CV_32SC1`` , ``CV_32FC1`` are supported.
:param ksize: Aperture size. See :ocv:func:`getGaussianKernel` for details.
:param sigmaX: Gaussian sigma in the horizontal direction. See :ocv:func:`getGaussianKernel` for details.
:param sigma1: Gaussian sigma in the horizontal direction. See :ocv:func:`getGaussianKernel` for details.
:param sigmaY: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigmaY}\leftarrow\texttt{sigmaX}` .
:param sigma2: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigma2}\leftarrow\texttt{sigma1}` .
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
@ -649,17 +660,19 @@ gpu::GaussianBlur
---------------------
Smooths an image using the Gaussian filter.
.. ocv:function:: void gpu::GaussianBlur(const GpuMat& src, GpuMat& dst, Size ksize, double sigmaX, double sigmaY = 0, int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::GaussianBlur( const GpuMat& src, GpuMat& dst, Size ksize, double sigma1, double sigma2=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1 )
.. ocv:function:: void gpu::GaussianBlur( const GpuMat& src, GpuMat& dst, Size ksize, GpuMat& buf, double sigma1, double sigma2=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8UC1`` , ``CV_8UC4`` , ``CV_16SC1`` , ``CV_16SC2`` , ``CV_16SC3`` , ``CV_32SC1`` , ``CV_32FC1`` source types are supported.
:param dst: Destination image with the same size and type as ``src`` .
:param ksize: Gaussian kernel size. ``ksize.width`` and ``ksize.height`` can differ but they both must be positive and odd. If they are zeros, they are computed from ``sigmaX`` and ``sigmaY`` .
:param ksize: Gaussian kernel size. ``ksize.width`` and ``ksize.height`` can differ but they both must be positive and odd. If they are zeros, they are computed from ``sigma1`` and ``sigma2`` .
:param sigmaX: Gaussian kernel standard deviation in X direction.
:param sigma1: Gaussian kernel standard deviation in X direction.
:param sigmaY: Gaussian kernel standard deviation in Y direction. If ``sigmaY`` is zero, it is set to be equal to ``sigmaX`` . If they are both zeros, they are computed from ``ksize.width`` and ``ksize.height``, respectively. See :ocv:func:`getGaussianKernel` for details. To fully control the result regardless of possible future modification of all this semantics, you are recommended to specify all of ``ksize`` , ``sigmaX`` , and ``sigmaY`` .
:param sigma2: Gaussian kernel standard deviation in Y direction. If ``sigma2`` is zero, it is set to be equal to ``sigma1`` . If they are both zeros, they are computed from ``ksize.width`` and ``ksize.height``, respectively. See :ocv:func:`getGaussianKernel` for details. To fully control the result regardless of possible future modification of all this semantics, you are recommended to specify all of ``ksize`` , ``sigma1`` , and ``sigma2`` .
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.

View File

@ -9,7 +9,7 @@ gpu::meanShiftFiltering
---------------------------
Performs mean-shift filtering for each point of the source image.
.. ocv:function:: void gpu::meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr,TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1))
.. ocv:function:: void gpu::meanShiftFiltering( const GpuMat& src, GpuMat& dst, int sp, int sr, TermCriteria criteria=TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), Stream& stream=Stream::Null() )
:param src: Source image. Only ``CV_8UC4`` images are supported for now.
@ -29,7 +29,7 @@ gpu::meanShiftProc
----------------------
Performs a mean-shift procedure and stores information about processed points (their colors and positions) in two images.
.. ocv:function:: void gpu::meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1))
.. ocv:function:: void gpu::meanShiftProc( const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, TermCriteria criteria=TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), Stream& stream=Stream::Null() )
:param src: Source image. Only ``CV_8UC4`` images are supported for now.
@ -159,7 +159,7 @@ gpu::mulSpectrums
---------------------
Performs a per-element multiplication of two Fourier spectrums.
.. ocv:function:: void gpu::mulSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB=false)
.. ocv:function:: void gpu::mulSpectrums( const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB=false, Stream& stream=Stream::Null() )
:param a: First spectrum.
@ -181,7 +181,7 @@ gpu::mulAndScaleSpectrums
-----------------------------
Performs a per-element multiplication of two Fourier spectrums and scales the result.
.. ocv:function:: void gpu::mulAndScaleSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, float scale, bool conjB=false)
.. ocv:function:: void gpu::mulAndScaleSpectrums( const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, float scale, bool conjB=false, Stream& stream=Stream::Null() )
:param a: First spectrum.
@ -205,7 +205,7 @@ gpu::dft
------------
Performs a forward or inverse discrete Fourier transform (1D or 2D) of the floating point matrix.
.. ocv:function:: void gpu::dft(const GpuMat& src, GpuMat& dst, Size dft_size, int flags=0)
.. ocv:function:: void gpu::dft( const GpuMat& src, GpuMat& dst, Size dft_size, int flags=0, Stream& stream=Stream::Null() )
:param src: Source matrix (real or complex).
@ -238,7 +238,7 @@ The source matrix should be continuous, otherwise reallocation and data copying
gpu::ConvolveBuf
----------------
.. ocv:class:: gpu::ConvolveBuf
.. ocv:struct:: gpu::ConvolveBuf
Class providing a memory buffer for :ocv:func:`gpu::convolve` function, plus it allows to adjust some specific parameters. ::
@ -261,7 +261,7 @@ You can use field `user_block_size` to set specific block size for :ocv:func:`gp
gpu::ConvolveBuf::create
------------------------
.. ocv:function:: ConvolveBuf::create(Size image_size, Size templ_size)
.. ocv:function:: gpu::ConvolveBuf::create(Size image_size, Size templ_size)
Constructs a buffer for :ocv:func:`gpu::convolve` function with respective arguments.
@ -272,7 +272,7 @@ Computes a convolution (or cross-correlation) of two images.
.. ocv:function:: void gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr=false)
.. ocv:function:: void gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr, ConvolveBuf& buf, Stream &stream = Stream::Null())
.. ocv:function:: void gpu::convolve( const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr, ConvolveBuf& buf, Stream& stream=Stream::Null() )
:param image: Source image. Only ``CV_32FC1`` images are supported for now.
@ -282,7 +282,7 @@ Computes a convolution (or cross-correlation) of two images.
:param ccorr: Flags to evaluate cross-correlation instead of convolution.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:class:`gpu::ConvolveBuf`.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:struct:`gpu::ConvolveBuf`.
:param stream: Stream for the asynchronous version.
@ -290,7 +290,7 @@ Computes a convolution (or cross-correlation) of two images.
gpu::MatchTemplateBuf
---------------------
.. ocv:class:: gpu::MatchTemplateBuf
.. ocv:struct:: gpu::MatchTemplateBuf
Class providing memory buffers for :ocv:func:`gpu::matchTemplate` function, plus it allows to adjust some specific parameters. ::
@ -321,7 +321,7 @@ Computes a proximity map for a raster template and an image where the template i
:param method: Specifies the way to compare the template with the image.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:class:`gpu::MatchTemplateBuf`.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:struct:`gpu::MatchTemplateBuf`.
:param stream: Stream for the asynchronous version.
@ -346,7 +346,7 @@ gpu::remap
--------------
Applies a generic geometrical transformation to an image.
.. ocv:function:: void gpu::remap(const GpuMat& src, GpuMat& dst, const GpuMat& xmap, const GpuMat& ymap, int interpolation, int borderMode = BORDER_CONSTANT, const Scalar& borderValue = Scalar(), Stream& stream = Stream::Null())
.. ocv:function:: void gpu::remap( const GpuMat& src, GpuMat& dst, const GpuMat& xmap, const GpuMat& ymap, int interpolation, int borderMode=BORDER_CONSTANT, Scalar borderValue=Scalar(), Stream& stream=Stream::Null() )
:param src: Source image.
@ -477,7 +477,7 @@ gpu::warpAffine
-------------------
Applies an affine transformation to an image.
.. ocv:function:: void gpu::warpAffine(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::warpAffine( const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags=INTER_LINEAR, int borderMode=BORDER_CONSTANT, Scalar borderValue=Scalar(), Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8U`` , ``CV_16U`` , ``CV_32S`` , or ``CV_32F`` depth and 1, 3, or 4 channels are supported.
@ -499,7 +499,7 @@ gpu::buildWarpAffineMaps
------------------------
Builds transformation maps for affine transformation.
.. ocv:function:: void buildWarpAffineMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::buildWarpAffineMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null())
:param M: *2x3* transformation matrix.
@ -521,7 +521,7 @@ gpu::warpPerspective
------------------------
Applies a perspective transformation to an image.
.. ocv:function:: void gpu::warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::warpPerspective( const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags=INTER_LINEAR, int borderMode=BORDER_CONSTANT, Scalar borderValue=Scalar(), Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8U`` , ``CV_16U`` , ``CV_32S`` , or ``CV_32F`` depth and 1, 3, or 4 channels are supported.
@ -543,7 +543,7 @@ gpu::buildWarpPerspectiveMaps
-----------------------------
Builds transformation maps for perspective transformation.
.. ocv:function:: void buildWarpAffineMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::buildWarpAffineMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null())
:param M: *3x3* transformation matrix.
@ -657,9 +657,9 @@ Calculates a histogram with evenly distributed bins.
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat* hist, int* histSize, int* lowerLevel, int* upperLevel, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histEven( const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat* hist, GpuMat& buf, int* histSize, int* lowerLevel, int* upperLevel, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histEven( const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8U``, ``CV_16U``, or ``CV_16S`` depth and 1 or 4 channels are supported. For a four-channel image, all channels are processed separately.
@ -685,10 +685,6 @@ Calculates a histogram with bins determined by the ``levels`` array.
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat* hist, const GpuMat* levels, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat* hist, const GpuMat* levels, GpuMat& buf, Stream& stream = Stream::Null())
:param src: Source image. ``CV_8U`` , ``CV_16U`` , or ``CV_16S`` depth and 1 or 4 channels are supported. For a four-channel image, all channels are processed separately.
:param hist: Destination histogram with one row, ``(levels.cols-1)`` columns, and the ``CV_32SC1`` type.
@ -747,7 +743,7 @@ gpu::buildWarpPlaneMaps
-----------------------
Builds plane warping maps.
.. ocv:function:: void gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s, double dist, GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::buildWarpPlaneMaps( Size src_size, Rect dst_roi, const Mat & K, const Mat& R, const Mat & T, float scale, GpuMat& map_x, GpuMat& map_y, Stream& stream=Stream::Null() )
:param stream: Stream for the asynchronous version.
@ -757,7 +753,7 @@ gpu::buildWarpCylindricalMaps
-----------------------------
Builds cylindrical warping maps.
.. ocv:function:: void gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s, GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::buildWarpCylindricalMaps( Size src_size, Rect dst_roi, const Mat & K, const Mat& R, float scale, GpuMat& map_x, GpuMat& map_y, Stream& stream=Stream::Null() )
:param stream: Stream for the asynchronous version.
@ -767,7 +763,7 @@ gpu::buildWarpSphericalMaps
---------------------------
Builds spherical warping maps.
.. ocv:function:: void gpu::buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s, GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::buildWarpSphericalMaps( Size src_size, Rect dst_roi, const Mat & K, const Mat& R, float scale, GpuMat& map_x, GpuMat& map_y, Stream& stream=Stream::Null() )
:param stream: Stream for the asynchronous version.

View File

@ -47,7 +47,6 @@ Any subsequent API call to this device will reinitialize the device.
gpu::FeatureSet
---------------
.. ocv:class:: gpu::FeatureSet
Class providing GPU computing features. ::
@ -74,9 +73,9 @@ Class providing a set of static methods to check what NVIDIA* card architecture
The following method checks whether the module was built with the support of the given feature:
.. ocv:function:: static bool gpu::TargetArchs::builtWith(FeatureSet feature)
.. ocv:function:: static bool gpu::TargetArchs::builtWith( FeatureSet feature_set )
:param feature: Feature to be checked. See :ocv:class:`gpu::FeatureSet`.
:param feature_set: Features to be checked. See :ocv:class:`gpu::FeatureSet`.
There is a set of methods to check whether the module contains intermediate (PTX) or binary GPU code for the given architecture(s):
@ -150,7 +149,7 @@ gpu::DeviceInfo::name
-------------------------
Returns the device name.
.. ocv:function:: string gpu::DeviceInfo::name()
.. ocv:function:: string gpu::DeviceInfo::name() const
@ -198,9 +197,9 @@ gpu::DeviceInfo::supports
-----------------------------
Provides information on GPU feature support.
.. ocv:function:: bool gpu::DeviceInfo::supports(FeatureSet feature)
.. ocv:function:: bool gpu::DeviceInfo::supports( FeatureSet feature_set ) const
:param feature: Feature to be checked. See :ocv:class:`gpu::FeatureSet`.
:param feature_set: Features to be checked. See :ocv:class:`gpu::FeatureSet`.
This function returns ``true`` if the device has the specified GPU feature. Otherwise, it returns ``false`` .

View File

@ -32,7 +32,7 @@ By default, the OpenCV GPU module includes:
PTX code for compute capabilities 1.1 and 1.3 (controlled by ``CUDA_ARCH_PTX`` in ``CMake``)
This means that for devices with CC 1.3 and 2.0 binary images are ready to run. For all newer platforms, the PTX code for 1.3 is JIT'ed to a binary image. For devices with CC 1.1 and 1.2, the PTX for 1.1 is JIT'ed. For devices with CC 1.0, no code is available and the functions throw
:ocv:func:`Exception`. For platforms where JIT compilation is performed first, the run is slow.
:ocv:class:`Exception`. For platforms where JIT compilation is performed first, the run is slow.
On a GPU with CC 1.0, you can still compile the GPU module and most of the functions will run flawlessly. To achieve this, add "1.0" to the list of binaries, for example, ``CUDA_ARCH_BIN="1.0 1.3 2.0"`` . The functions that cannot be run on CC 1.0 GPUs throw an exception.

View File

@ -7,7 +7,7 @@ Object Detection
gpu::HOGDescriptor
------------------
.. ocv:class:: gpu::HOGDescriptor
.. ocv:struct:: gpu::HOGDescriptor
The class implements Histogram of Oriented Gradients ([Dalal2005]_) object detector. ::
@ -235,7 +235,7 @@ gpu::CascadeClassifier_GPU::CascadeClassifier_GPU
-----------------------------------------------------
Loads the classifier from a file.
.. ocv:function:: gpu::CascadeClassifier_GPU(const string& filename)
.. ocv:function:: gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const string& filename)
:param filename: Name of the file from which the classifier is loaded. Only the old ``haar`` classifier (trained by the ``haar`` training application) and NVIDIA's ``nvbin`` are supported.

View File

@ -9,7 +9,7 @@ gpu::gemm
------------------
Performs generalized matrix multiplication.
.. ocv:function:: void gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null())
:param src1: First multiplied input matrix that should have ``CV_32FC1`` , ``CV_64FC1`` , ``CV_32FC2`` , or ``CV_64FC2`` type.
@ -47,9 +47,9 @@ gpu::transpose
------------------
Transposes a matrix.
.. ocv:function:: void gpu::transpose(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::transpose( const GpuMat& src1, GpuMat& dst, Stream& stream=Stream::Null() )
:param src: Source matrix. 1-, 4-, 8-byte element sizes are supported for now (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc).
:param src1: Source matrix. 1-, 4-, 8-byte element sizes are supported for now (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc).
:param dst: Destination matrix.
@ -63,11 +63,11 @@ gpu::flip
-------------
Flips a 2D matrix around vertical, horizontal, or both axes.
.. ocv:function:: void gpu::flip(const GpuMat& src, GpuMat& dst, int flipCode, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::flip( const GpuMat& a, GpuMat& b, int flipCode, Stream& stream=Stream::Null() )
:param src: Source matrix. Supports 1, 3 and 4 channels images with ``CV_8U``, ``CV_16U``, ``CV_32S`` or ``CV_32F`` depth.
:param a: Source matrix. Supports 1, 3 and 4 channels images with ``CV_8U``, ``CV_16U``, ``CV_32S`` or ``CV_32F`` depth.
:param dst: Destination matrix.
:param b: Destination matrix.
:param flipCode: Flip mode for the source:
@ -143,7 +143,7 @@ gpu::magnitude
------------------
Computes magnitudes of complex matrix elements.
.. ocv:function:: void gpu::magnitude(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::magnitude( const GpuMat& xy, GpuMat& magnitude, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null())
@ -165,7 +165,7 @@ gpu::magnitudeSqr
---------------------
Computes squared magnitudes of complex matrix elements.
.. ocv:function:: void gpu::magnitudeSqr(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::magnitudeSqr( const GpuMat& xy, GpuMat& magnitude, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null())

View File

@ -9,15 +9,17 @@ gpu::add
------------
Computes a matrix-matrix or matrix-scalar sum.
.. ocv:function:: void gpu::add(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::add( const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask=GpuMat(), int dtype=-1, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::add(const GpuMat& src1, const Scalar& src2, GpuMat& dst, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::add( const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask=GpuMat(), int dtype=-1, Stream& stream=Stream::Null() )
:param src1: First source matrix.
:param a: First source matrix.
:param src2: Second source matrix or a scalar to be added to ``src1`` . Matrix should have the same size and type as ``src1`` .
:param b: Second source matrix to be added to ``a`` . Matrix should have the same size and type as ``a`` .
:param dst: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``src1`` depth.
:param sc: A scalar to be added to ``a`` .
:param c: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``a`` depth.
:param mask: Optional operation mask, 8-bit single channel array, that specifies elements of the destination array to be changed.
@ -33,15 +35,17 @@ gpu::subtract
-----------------
Computes a matrix-matrix or matrix-scalar difference.
.. ocv:function:: void gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::subtract( const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask=GpuMat(), int dtype=-1, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::subtract(const GpuMat& src1, const Scalar& src2, GpuMat& dst, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::subtract( const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask=GpuMat(), int dtype=-1, Stream& stream=Stream::Null() )
:param src1: First source matrix.
:param a: First source matrix.
:param src2: Second source matrix or a scalar to be added to ``src1`` . Matrix should have the same size and type as ``src1`` .
:param b: Second source matrix to be added to ``a`` . Matrix should have the same size and type as ``a`` .
:param dst: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``src1`` depth.
:param sc: A scalar to be added to ``a`` .
:param c: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``a`` depth.
:param mask: Optional operation mask, 8-bit single channel array, that specifies elements of the destination array to be changed.
@ -57,15 +61,17 @@ gpu::multiply
-----------------
Computes a matrix-matrix or matrix-scalar per-element product.
.. ocv:function:: void gpu::multiply(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::multiply( const GpuMat& a, const GpuMat& b, GpuMat& c, double scale=1, int dtype=-1, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::multiply(const GpuMat& src1, const Scalar& src2, GpuMat& dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::multiply( const GpuMat& a, const Scalar& sc, GpuMat& c, double scale=1, int dtype=-1, Stream& stream=Stream::Null() )
:param src1: First source matrix.
:param a: First source matrix.
:param src2: Second source matrix or a scalar to be multiplied by ``src1`` elements.
:param b: Second source matrix to be multiplied by ``a`` elements.
:param dst: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``src1`` depth.
:param sc: A scalar to be multiplied by ``a`` elements.
:param c: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``a`` depth.
:param scale: Optional scale factor.
@ -81,15 +87,19 @@ gpu::divide
-----------
Computes a matrix-matrix or matrix-scalar division.
.. ocv:function:: void gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::divide( const GpuMat& a, const GpuMat& b, GpuMat& c, double scale=1, int dtype=-1, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::divide(double src1, const GpuMat& src2, GpuMat& dst, int dtype = -1, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::divide(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null())
:param src1: First source matrix or a scalar.
.. ocv:function:: void gpu::divide( double scale, const GpuMat& b, GpuMat& c, int dtype=-1, Stream& stream=Stream::Null() )
:param src2: Second source matrix or a scalar. The ``src1`` elements are divided by it.
:param a: First source matrix or a scalar.
:param dst: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``src1`` depth.
:param b: Second source matrix. The ``a`` elements are divided by it.
:param sc: A scalar to be divided by the elements of ``a`` matrix.
:param c: Destination matrix that has the same size and number of channels as the input array(s). The depth is defined by ``dtype`` or ``a`` depth.
:param scale: Optional scale factor.
@ -186,11 +196,11 @@ gpu::exp
------------
Computes an exponent of each matrix element.
.. ocv:function:: void gpu::exp(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::exp( const GpuMat& a, GpuMat& b, Stream& stream=Stream::Null() )
:param src: Source matrix. Supports ``CV_8U`` , ``CV_16U`` , ``CV_16S`` and ``CV_32F`` depth.
:param a: Source matrix. Supports ``CV_8U`` , ``CV_16U`` , ``CV_16S`` and ``CV_32F`` depth.
:param dst: Destination matrix with the same size and type as ``src`` .
:param b: Destination matrix with the same size and type as ``a`` .
:param stream: Stream for the asynchronous version.
@ -202,11 +212,11 @@ gpu::log
------------
Computes a natural logarithm of absolute value of each matrix element.
.. ocv:function:: void gpu::log(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::log( const GpuMat& a, GpuMat& b, Stream& stream=Stream::Null() )
:param src: Source matrix. Supports ``CV_8U`` , ``CV_16U`` , ``CV_16S`` and ``CV_32F`` depth.
:param a: Source matrix. Supports ``CV_8U`` , ``CV_16U`` , ``CV_16S`` and ``CV_32F`` depth.
:param dst: Destination matrix with the same size and type as ``src`` .
:param b: Destination matrix with the same size and type as ``a`` .
:param stream: Stream for the asynchronous version.
@ -242,15 +252,17 @@ gpu::absdiff
----------------
Computes per-element absolute difference of two matrices (or of a matrix and scalar).
.. ocv:function:: void gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::absdiff( const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::absdiff(const GpuMat& src1, const Scalar& src2, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::absdiff( const GpuMat& a, const Scalar& s, GpuMat& c, Stream& stream=Stream::Null() )
:param src1: First source matrix.
:param a: First source matrix.
:param src2: Second source matrix or a scalar to be added to ``src1`` .
:param b: Second source matrix to be added to ``a`` .
:param dst: Destination matrix with the same size and type as ``src1`` .
:param s: A scalar to be added to ``a`` .
:param c: Destination matrix with the same size and type as ``a`` .
:param stream: Stream for the asynchronous version.
@ -262,22 +274,26 @@ gpu::compare
----------------
Compares elements of two matrices.
.. ocv:function:: void gpu::compare(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int cmpop, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::compare( const GpuMat& a, const GpuMat& b, GpuMat& c, int cmpop, Stream& stream=Stream::Null() )
:param src1: First source matrix.
.. ocv:function:: void gpu::compare(const GpuMat& a, Scalar sc, GpuMat& c, int cmpop, Stream& stream = Stream::Null())
:param src2: Second source matrix with the same size and type as ``src1`` .
:param a: First source matrix.
:param dst: Destination matrix with the same size as ``src1`` and the ``CV_8UC1`` type.
:param b: Second source matrix with the same size and type as ``a`` .
:param sc: A scalar to be compared with ``a`` .
:param c: Destination matrix with the same size as ``a`` and the ``CV_8UC1`` type.
:param cmpop: Flag specifying the relation between the elements to be checked:
* **CMP_EQ:** ``src1(.) == src2(.)``
* **CMP_GT:** ``src1(.) < src2(.)``
* **CMP_GE:** ``src1(.) <= src2(.)``
* **CMP_LT:** ``src1(.) < src2(.)``
* **CMP_LE:** ``src1(.) <= src2(.)``
* **CMP_NE:** ``src1(.) != src2(.)``
* **CMP_EQ:** ``a(.) == b(.)``
* **CMP_GT:** ``a(.) < b(.)``
* **CMP_GE:** ``a(.) <= b(.)``
* **CMP_LT:** ``a(.) < b(.)``
* **CMP_LE:** ``a(.) <= b(.)``
* **CMP_NE:** ``a(.) != b(.)``
:param stream: Stream for the asynchronous version.
@ -362,7 +378,7 @@ gpu::rshift
--------------------
Performs pixel by pixel right shift of an image by a constant value.
.. ocv:function:: void gpu::rshift(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::rshift( const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream=Stream::Null() )
:param src: Source matrix. Supports 1, 3 and 4 channels images with integers elements.
@ -378,7 +394,7 @@ gpu::lshift
--------------------
Performs pixel by pixel right left of an image by a constant value.
.. ocv:function:: void gpu::lshift(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::lshift( const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream=Stream::Null() )
:param src: Source matrix. Supports 1, 3 and 4 channels images with ``CV_8U`` , ``CV_16U`` or ``CV_32S`` depth.

View File

@ -431,11 +431,11 @@ CV_EXPORTS void split(const GpuMat& src, vector<GpuMat>& dst, Stream& stream = S
//! computes magnitude of complex (x(i).re, x(i).im) vector
//! supports only CV_32FC2 type
CV_EXPORTS void magnitude(const GpuMat& x, GpuMat& magnitude, Stream& stream = Stream::Null());
CV_EXPORTS void magnitude(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null());
//! computes squared magnitude of complex (x(i).re, x(i).im) vector
//! supports only CV_32FC2 type
CV_EXPORTS void magnitudeSqr(const GpuMat& x, GpuMat& magnitude, Stream& stream = Stream::Null());
CV_EXPORTS void magnitudeSqr(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null());
//! computes magnitude of each (x(i), y(i)) vector
//! supports only floating-point source
@ -480,7 +480,7 @@ CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale
//! computes element-wise weighted quotient of matrix and scalar (c = a / s)
CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
//! computes element-wise weighted reciprocal of an array (dst = scale/src2)
CV_EXPORTS void divide(double scale, const GpuMat& src2, GpuMat& dst, int dtype = -1, Stream& stream = Stream::Null());
CV_EXPORTS void divide(double scale, const GpuMat& b, GpuMat& c, int dtype = -1, Stream& stream = Stream::Null());
//! computes the weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma)
CV_EXPORTS void addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst,
@ -1697,15 +1697,15 @@ public:
class CV_EXPORTS GoodFeaturesToTrackDetector_GPU
{
public:
explicit GoodFeaturesToTrackDetector_GPU(int maxCorners_ = 1000, double qualityLevel_ = 0.01, double minDistance_ = 0.0,
int blockSize_ = 3, bool useHarrisDetector_ = false, double harrisK_ = 0.04)
explicit GoodFeaturesToTrackDetector_GPU(int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0,
int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04)
{
maxCorners = maxCorners_;
qualityLevel = qualityLevel_;
minDistance = minDistance_;
blockSize = blockSize_;
useHarrisDetector = useHarrisDetector_;
harrisK = harrisK_;
this->maxCorners = maxCorners;
this->qualityLevel = qualityLevel;
this->minDistance = minDistance;
this->blockSize = blockSize;
this->useHarrisDetector = useHarrisDetector;
this->harrisK = harrisK;
}
//! return 1 rows matrix with CV_32FC2 type

View File

@ -61,11 +61,11 @@ setWindowProperty
---------------------
Changes parameters of a window dynamically.
.. ocv:function:: void setWindowProperty(const string& name, int prop_id, double prop_value)
.. ocv:function:: void setWindowProperty( const string& winname, int prop_id, double prop_value )
.. ocv:pyfunction:: cv2.setWindowProperty(winname, prop_id, prop_value) -> None
.. ocv:cfunction:: void cvSetWindowProperty(const char* name, int propId, double propValue)
.. ocv:cfunction:: void cvSetWindowProperty( const char* name, int prop_id, double prop_value )
:param name: Name of the window.
@ -97,11 +97,11 @@ getWindowProperty
---------------------
Provides parameters of a window.
.. ocv:function:: void getWindowProperty(const string& name, int prop_id)
.. ocv:function:: double getWindowProperty( const string& winname, int prop_id )
.. ocv:pyfunction:: cv2.getWindowProperty(winname, prop_id) -> retval
.. ocv:cfunction:: void cvGetWindowProperty(const char* name, int propId)
.. ocv:cfunction:: double cvGetWindowProperty( const char* name, int prop_id )
:param name: Name of the window.
@ -169,15 +169,15 @@ addText
-----------
Creates the font to draw a text on an image.
.. ocv:function:: void addText(const Mat& img, const string& text, Point location, CvFont *font)
.. ocv:function:: void addText( const Mat& img, const string& text, Point org, CvFont font )
.. ocv:cfunction:: void cvAddText(const CvArr* img, const char* text, CvPoint location, CvFont *font)
.. ocv:cfunction:: void cvAddText( const CvArr* img, const char* text, CvPoint org, CvFont * arg2 )
:param img: 8-bit 3-channel image where the text should be drawn.
:param text: Text to write on an image.
:param location: Point(x,y) where the text should start on an image.
:param org: Point(x,y) where the text should start on an image.
:param font: Font to use to draw a text.
@ -195,7 +195,7 @@ displayOverlay
------------------
Displays a text on a window image as an overlay for a specified duration.
.. ocv:function:: void displayOverlay(const string& name, const string& text, int delayms = 0)
.. ocv:function:: void displayOverlay( const string& winname, const string& text, int delayms=0 )
.. ocv:cfunction:: void cvDisplayOverlay(const char* name, const char* text, int delayms = 0)
@ -212,7 +212,7 @@ displayStatusBar
--------------------
Displays a text on the window statusbar during the specified period of time.
.. ocv:function:: void displayStatusBar(const string& name, const string& text, int delayms = 0)
.. ocv:function:: void displayStatusBar( const string& winname, const string& text, int delayms=0 )
.. ocv:cfunction:: void cvDisplayStatusBar(const char* name, const char* text, int delayms = 0)
@ -226,27 +226,21 @@ The function ``displayOverlay`` displays useful information/tips on top of the w
*delayms*
. This information is displayed on the window statusbar (the window must be created with the ``CV_GUI_EXPANDED`` flags).
createOpenGLCallback
setOpenGlDrawCallback
------------------------
Creates a callback function called to draw OpenGL on top the image display by ``windowname``.
Sets a callback function to be called to draw on top of displayed image.
.. ocv:function:: void createOpenGLCallback( const string& window_name, OpenGLCallback callbackOpenGL, void* userdata =NULL, double angle=-1, double zmin=-1, double zmax=-1)
.. ocv:function:: void setOpenGlDrawCallback( const string& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata=0 )
.. ocv:cfunction:: void cvCreateOpenGLCallback( const char* windowName, CvOpenGLCallback callbackOpenGL, void* userdata=NULL, double angle=-1, double zmin=-1, double zmax=-1 )
.. ocv:cfunction:: void cvSetOpenGlDrawCallback( const char* window_name, CvOpenGlDrawCallback callback, void* userdata=NULL )
:param window_name: Name of the window.
:param callbackOpenGL: Pointer to the function to be called every frame. This function should be prototyped as ``void Foo(*void);`` .
:param onOpenGlDraw: Pointer to the function to be called every frame. This function should be prototyped as ``void Foo(void*)`` .
:param userdata: Pointer passed to the callback function. *(Optional)*
:param angle: Parameter specifying the field of a view angle, in degrees, in the y direction. Default value is 45 degrees. *(Optional)*
:param zmin: Parameter specifying the distance from the viewer to the near clipping plane (always positive). Default value is 0.01. *(Optional)*
:param zmax: Parameter specifying the distance from the viewer to the far clipping plane (always positive). Default value is 1000. *(Optional)*
The function ``createOpenGLCallback`` can be used to draw 3D data on the window. See the example of callback use below: ::
The function ``setOpenGlDrawCallback`` can be used to draw 3D data on the window. See the example of callback function below: ::
void on_opengl(void* param)
{
@ -282,7 +276,7 @@ saveWindowParameters
------------------------
Saves parameters of the specified window.
.. ocv:function:: void saveWindowParameters(const string& name)
.. ocv:function:: void saveWindowParameters( const string& windowName )
.. ocv:cfunction:: void cvSaveWindowParameters(const char* name)
@ -295,7 +289,7 @@ loadWindowParameters
------------------------
Loads parameters of the specified window.
.. ocv:function:: void loadWindowParameters(const string& name)
.. ocv:function:: void loadWindowParameters( const string& windowName )
.. ocv:cfunction:: void cvLoadWindowParameters(const char* name)
@ -308,9 +302,9 @@ createButton
----------------
Attaches a button to the control panel.
.. ocv:function:: createButton( const string& button_name=NULL, ButtonCallback on_change=NULL, void* userdata=NULL, int button_type=CV_PUSH_BUTTON, int initial_button_state=0 )
.. ocv:function:: int createButton( const string& bar_name, ButtonCallback on_change, void* userdata=NULL, int type=CV_PUSH_BUTTON, bool initial_button_state=0 )
.. ocv:cfunction:: cvCreateButton( const char* buttonName=NULL, CvButtonCallback onChange=NULL, void* userdata=NULL, int buttonType=CV_PUSH_BUTTON, int initialButtonState=0 )
.. ocv:cfunction:: int cvCreateButton( const char* button_name=NULL, CvButtonCallback on_change=NULL, void* userdata=NULL, int button_type=CV_PUSH_BUTTON, int initial_button_state=0 )
:param button_name: Name of the button.

View File

@ -31,9 +31,9 @@ Encodes an image into a memory buffer.
.. ocv:function:: bool imencode( const string& ext, InputArray img, vector<uchar>& buf, const vector<int>& params=vector<int>())
.. ocv:cfunction:: CvMat* cvEncodeImage(const char* ext, const CvArr* image, const int* params=NULL )
.. ocv:cfunction:: CvMat* cvEncodeImage( const char* ext, const CvArr* image, const int* params=0 )
.. ocv:pyfunction:: cv2.imencode(ext, img, buf[, params]) -> retval
.. ocv:pyfunction:: cv2.imencode(ext, img[, params]) -> retval, buf
:param ext: File extension that defines the output format.
@ -57,13 +57,13 @@ Loads an image from a file.
.. ocv:pyfunction:: cv2.imread(filename[, flags]) -> retval
.. ocv:cfunction:: IplImage* cvLoadImage( const char* filename, int flags=CV_LOAD_IMAGE_COLOR )
.. ocv:cfunction:: IplImage* cvLoadImage( const char* filename, int iscolor=CV_LOAD_IMAGE_COLOR )
.. ocv:cfunction:: CvMat* cvLoadImageM( const char* filename, int flags=CV_LOAD_IMAGE_COLOR )
.. ocv:cfunction:: CvMat* cvLoadImageM( const char* filename, int iscolor=CV_LOAD_IMAGE_COLOR )
.. ocv:pyoldfunction:: cv.LoadImage(filename, flags=CV_LOAD_IMAGE_COLOR)->None
.. ocv:pyoldfunction:: cv.LoadImage(filename, iscolor=CV_LOAD_IMAGE_COLOR) -> None
.. ocv:pyoldfunction:: cv.LoadImageM(filename, flags=CV_LOAD_IMAGE_COLOR)->None
.. ocv:pyoldfunction:: cv.LoadImageM(filename, iscolor=CV_LOAD_IMAGE_COLOR) -> None
:param filename: Name of file to be loaded.
@ -103,11 +103,11 @@ imwrite
-----------
Saves an image to a specified file.
.. ocv:function:: bool imwrite( const string& filename, InputArray image, const vector<int>& params=vector<int>())
.. ocv:function:: bool imwrite( const string& filename, InputArray img, const vector<int>& params=vector<int>() )
.. ocv:pyfunction:: cv2.imwrite(filename, image[, params]) -> retval
.. ocv:pyfunction:: cv2.imwrite(filename, img[, params]) -> retval
.. ocv:cfunction:: int cvSaveImage( const char* filename, const CvArr* image )
.. ocv:cfunction:: int cvSaveImage( const char* filename, const CvArr* image, const int* params=0 )
.. ocv:pyoldfunction:: cv.SaveImage(filename, image)-> None
@ -225,7 +225,7 @@ VideoCapture constructors.
.. ocv:pyfunction:: cv2.VideoCapture(device) -> <VideoCapture object>
.. ocv:cfunction:: CvCapture* cvCaptureFromCAM( int device )
.. ocv:pyoldfunction:: cv.CaptureFromCAM(device) -> CvCapture
.. ocv:pyoldfunction:: cv.CaptureFromCAM(index) -> CvCapture
.. ocv:cfunction:: CvCapture* cvCaptureFromFile( const char* filename )
.. ocv:pyoldfunction:: cv.CaptureFromFile(filename) -> CvCapture
@ -243,8 +243,8 @@ Open video file or a capturing device for video capturing
.. ocv:function:: bool VideoCapture::open(const string& filename)
.. ocv:function:: bool VideoCapture::open(int device)
.. ocv:pyfunction:: cv2.VideoCapture.open(filename) -> successFlag
.. ocv:pyfunction:: cv2.VideoCapture.open(device) -> successFlag
.. ocv:pyfunction:: cv2.VideoCapture.open(filename) -> retval
.. ocv:pyfunction:: cv2.VideoCapture.open(device) -> retval
:param filename: name of the opened video file
@ -259,7 +259,7 @@ Returns true if video capturing has been initialized already.
.. ocv:function:: bool VideoCapture::isOpened()
.. ocv:pyfunction:: cv2.VideoCapture.isOpened() -> flag
.. ocv:pyfunction:: cv2.VideoCapture.isOpened() -> retval
If the previous call to ``VideoCapture`` constructor or ``VideoCapture::open`` succeeded, the method returns true.
@ -269,7 +269,7 @@ Closes video file or capturing device.
.. ocv:function:: void VideoCapture::release()
.. ocv:pyfunction:: cv2.VideoCapture.release()
.. ocv:pyfunction:: cv2.VideoCapture.release() -> None
.. ocv:cfunction:: void cvReleaseCapture(CvCapture** capture)
@ -284,7 +284,7 @@ Grabs the next frame from video file or capturing device.
.. ocv:function:: bool VideoCapture::grab()
.. ocv:pyfunction:: cv2.VideoCapture.grab() -> successFlag
.. ocv:pyfunction:: cv2.VideoCapture.grab() -> retval
.. ocv:cfunction:: int cvGrabFrame(CvCapture* capture)
@ -303,11 +303,11 @@ Decodes and returns the grabbed video frame.
.. ocv:function:: bool VideoCapture::retrieve(Mat& image, int channel=0)
.. ocv:pyfunction:: cv2.VideoCapture.retrieve([image[, channel]]) -> successFlag, image
.. ocv:pyfunction:: cv2.VideoCapture.retrieve([image[, channel]]) -> retval, image
.. ocv:cfunction:: IplImage* cvRetrieveFrame(CvCapture* capture)
.. ocv:cfunction:: IplImage* cvRetrieveFrame( CvCapture* capture, int streamIdx=0 )
.. ocv:pyoldfunction:: cv.RetrieveFrame(capture) -> iplimage
.. ocv:pyoldfunction:: cv.RetrieveFrame(capture) -> image
The methods/functions decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
@ -322,11 +322,11 @@ Grabs, decodes and returns the next video frame.
.. ocv:function:: bool VideoCapture::read(Mat& image)
.. ocv:pyfunction:: cv2.VideoCapture.read([image]) -> successFlag, image
.. ocv:pyfunction:: cv2.VideoCapture.read([image]) -> retval, image
.. ocv:cfunction:: IplImage* cvQueryFrame(CvCapture* capture)
.. ocv:pyoldfunction:: cv.QueryFrame(capture) -> iplimage
.. ocv:pyoldfunction:: cv.QueryFrame(capture) -> image
The methods/functions combine :ocv:func:`VideoCapture::grab` and :ocv:func:`VideoCapture::retrieve` in one call. This is the most convenient method for reading video files or capturing data from decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
@ -341,9 +341,9 @@ Returns the specified ``VideoCapture`` property
.. ocv:pyfunction:: cv2.VideoCapture.get(propId) -> retval
.. ocv:cfunction:: double cvGetCaptureProperty( CvCapture* capture, int propId )
.. ocv:cfunction:: double cvGetCaptureProperty( CvCapture* capture, int property_id )
.. ocv:pyoldfunction:: cv.GetCaptureProperty(capture, propId)->double
.. ocv:pyoldfunction:: cv.GetCaptureProperty(capture, property_id) -> float
:param propId: Property identifier. It can be one of the following:
@ -393,13 +393,13 @@ VideoCapture::set
---------------------
Sets a property in the ``VideoCapture``.
.. ocv:function:: bool VideoCapture::set(int propertyId, double value)
.. ocv:function:: bool VideoCapture::set( int propId, double value )
.. ocv:pyfunction:: cv2.VideoCapture.set(propId, value) -> retval
.. ocv:cfunction:: int cvSetCaptureProperty( CvCapture* capture, int propId, double value )
.. ocv:cfunction:: int cvSetCaptureProperty( CvCapture* capture, int property_id, double value )
.. ocv:pyoldfunction:: cv.SetCaptureProperty(capture, propId, value)->None
.. ocv:pyoldfunction:: cv.SetCaptureProperty(capture, property_id, value) -> retval
:param propId: Property identifier. It can be one of the following:
@ -463,8 +463,8 @@ VideoWriter constructors
.. ocv:pyfunction:: cv2.VideoWriter([filename, fourcc, fps, frameSize[, isColor]]) -> <VideoWriter object>
.. ocv:cfunction:: CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc, double fps, CvSize frameSize, int isColor=1 )
.. ocv:pyoldfunction:: cv.CreateVideoWriter(filename, fourcc, fps, frameSize, isColor) -> CvVideoWriter
.. ocv:cfunction:: CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc, double fps, CvSize frame_size, int is_color=1 )
.. ocv:pyoldfunction:: cv.CreateVideoWriter(filename, fourcc, fps, frame_size, is_color=true) -> CvVideoWriter
.. ocv:pyfunction:: cv2.VideoWriter.isOpened() -> retval
.. ocv:pyfunction:: cv2.VideoWriter.open(filename, fourcc, fps, frameSize[, isColor]) -> retval

View File

@ -9,7 +9,7 @@ Creates a trackbar and attaches it to the specified window.
.. ocv:function:: int createTrackbar( const string& trackbarname, const string& winname, int* value, int count, TrackbarCallback onChange=0, void* userdata=0)
.. ocv:cfunction:: int cvCreateTrackbar( const char* trackbarName, const char* windowName, int* value, int count, CvTrackbarCallback onChange )
.. ocv:cfunction:: int cvCreateTrackbar( const char* trackbar_name, const char* window_name, int* value, int count, CvTrackbarCallback on_change=NULL )
.. ocv:pyoldfunction:: cv.CreateTrackbar(trackbarName, windowName, value, count, onChange) -> None
:param trackbarname: Name of the created trackbar.
@ -40,8 +40,8 @@ Returns the trackbar position.
.. ocv:pyfunction:: cv2.getTrackbarPos(trackbarname, winname) -> retval
.. ocv:cfunction:: int cvGetTrackbarPos( const char* trackbarName, const char* windowName )
.. ocv:pyoldfunction:: cv.GetTrackbarPos(trackbarName, windowName)-> None
.. ocv:cfunction:: int cvGetTrackbarPos( const char* trackbar_name, const char* window_name )
.. ocv:pyoldfunction:: cv.GetTrackbarPos(trackbarName, windowName) -> retval
:param trackbarname: Name of the trackbar.
@ -57,12 +57,12 @@ imshow
----------
Displays an image in the specified window.
.. ocv:function:: void imshow( const string& winname, InputArray image )
.. ocv:function:: void imshow( const string& winname, InputArray mat )
.. ocv:pyfunction:: cv2.imshow(winname, image) -> None
.. ocv:pyfunction:: cv2.imshow(winname, mat) -> None
.. ocv:cfunction:: void cvShowImage( const char* winname, const CvArr* image )
.. ocv:pyoldfunction:: cv.ShowImage(winname, image)-> None
.. ocv:cfunction:: void cvShowImage( const char* name, const CvArr* image )
.. ocv:pyoldfunction:: cv.ShowImage(name, image) -> None
:param winname: Name of the window.
@ -81,11 +81,11 @@ namedWindow
---------------
Creates a window.
.. ocv:function:: void namedWindow( const string& winname, int flags )
.. ocv:function:: void namedWindow( const string& winname, int flags=WINDOW_AUTOSIZE )
.. ocv:pyfunction:: cv2.namedWindow(winname[, flags]) -> None
.. ocv:cfunction:: int cvNamedWindow( const char* name, int flags )
.. ocv:cfunction:: int cvNamedWindow( const char* name, int flags=CV_WINDOW_AUTOSIZE )
.. ocv:pyoldfunction:: cv.NamedWindow(name, flags=CV_WINDOW_AUTOSIZE)-> None
:param name: Name of the window in the window caption that may be used as a window identifier.
@ -115,7 +115,7 @@ destroyWindow
-------------
Destroys a window.
.. ocv:function:: void destroyWindow( const string &winname )
.. ocv:function:: void destroyWindow( const string& winname )
.. ocv:pyfunction:: cv2.destroyWindow(winname) -> None
@ -179,12 +179,12 @@ SetMouseCallback
----------------
Sets mouse handler for the specified window
.. ocv:cfunction:: void cvSetMouseCallback( const char* name, CvMouseCallback onMouse, void* param=NULL )
.. ocv:pyoldfunction:: cv.SetMouseCallback(name, onMouse, param) -> None
.. ocv:cfunction:: void cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, void* param=NULL )
.. ocv:pyoldfunction:: cv.SetMouseCallback(windowName, onMouse, param=None) -> None
:param name: Window name
:param window_name: Window name
:param onMouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/ffilldemo.cpp, on how to specify and use the callback.
:param on_mouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/ffilldemo.cpp, on how to specify and use the callback.
:param param: The optional parameter passed to the callback.
@ -197,7 +197,7 @@ Sets the trackbar position.
.. ocv:pyfunction:: cv2.setTrackbarPos(trackbarname, winname, pos) -> None
.. ocv:cfunction:: void cvSetTrackbarPos( const char* trackbarName, const char* windowName, int pos )
.. ocv:cfunction:: void cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos )
.. ocv:pyoldfunction:: cv.SetTrackbarPos(trackbarName, windowName, pos)-> None
:param trackbarname: Name of the trackbar.
@ -218,7 +218,7 @@ Waits for a pressed key.
.. ocv:function:: int waitKey(int delay=0)
.. ocv:pyfunction:: cv2.waitKey([, delay]) -> retval
.. ocv:pyfunction:: cv2.waitKey([delay]) -> retval
.. ocv:cfunction:: int cvWaitKey( int delay=0 )
.. ocv:pyoldfunction:: cv.WaitKey(delay=0)-> int

View File

@ -1091,8 +1091,6 @@ videoInput::videoInput(){
formatTypes[VI_SECAM_K1] = AnalogVideo_SECAM_K1;
formatTypes[VI_SECAM_L] = AnalogVideo_SECAM_L;
}
@ -2254,7 +2252,6 @@ int videoInput::getCameraPropertyFromCV(int cv_property){
// see CameraControlProperty in strmif.h
switch (cv_property) {
case CV_CAP_PROP_PAN:
return CameraControl_Pan;
@ -3075,7 +3072,7 @@ HRESULT videoInput::routeCrossbar(ICaptureGraphBuilder2 **ppBuild, IBaseFilter *
}
Crossbar->Route(pOIndex,pIndex);
}else{
if(verbose)printf("SETUP: Didn't find specified Physical Connection type. Using Defualt. \n");
if(verbose) printf("SETUP: Didn't find specified Physical Connection type. Using Defualt. \n");
}
//we only free the crossbar when we close or restart the device
@ -3087,7 +3084,7 @@ HRESULT videoInput::routeCrossbar(ICaptureGraphBuilder2 **ppBuild, IBaseFilter *
if(pXBar1)pXBar1 = NULL;
}else{
if(verbose)printf("SETUP: You are a webcam or snazzy firewire cam! No Crossbar needed\n");
if(verbose) printf("SETUP: You are a webcam or snazzy firewire cam! No Crossbar needed\n");
return hr;
}
@ -3110,8 +3107,6 @@ public:
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain() { return CV_CAP_DSHOW; } // Return the type of the capture object: CV_CAP_VFW, etc...
protected:
void init();
@ -3217,60 +3212,29 @@ double CvCaptureCAM_DShow::getProperty( int property_id )
switch( property_id )
{
case CV_CAP_PROP_BRIGHTNESS:
if ( VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_BRIGHTNESS),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_CONTRAST:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_CONTRAST),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_HUE:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_HUE),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_SATURATION:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_SATURATION),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_SHARPNESS:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_SHARPNESS),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_GAMMA:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_GAMMA),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_MONOCROME:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_MONOCROME),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_WHITE_BALANCE_BLUE_U),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_BACKLIGHT:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_BACKLIGHT),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_GAIN:
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_GAIN),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(property_id),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
}
// camera properties
switch( property_id )
{
case CV_CAP_PROP_BACKLIGHT:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_BACKLIGHT),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_PAN:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_PAN),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_TILT:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_TILT),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_ROLL:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_ROLL),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_ZOOM:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_BACKLIGHT),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_EXPOSURE:
case CV_CAP_PROP_IRIS:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_IRIS),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
case CV_CAP_PROP_FOCUS:
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_FOCUS),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(property_id),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value;
}
@ -3341,67 +3305,32 @@ bool CvCaptureCAM_DShow::setProperty( int property_id, double value )
//video Filter properties
switch( property_id )
{
case CV_CAP_PROP_BRIGHTNESS:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_BRIGHTNESS),(long)value);
case CV_CAP_PROP_CONTRAST:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_CONTRAST),(long)value);
case CV_CAP_PROP_HUE:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_HUE),(long)value);
case CV_CAP_PROP_SATURATION:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_SATURATION),(long)value);
case CV_CAP_PROP_SHARPNESS:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_SHARPNESS),(long)value);
case CV_CAP_PROP_GAMMA:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_GAMMA),(long)value);
case CV_CAP_PROP_MONOCROME:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_MONOCROME),(long)value);
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_WHITE_BALANCE_BLUE_U),(long)value);
case CV_CAP_PROP_BACKLIGHT:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_BACKLIGHT),(long)value);
case CV_CAP_PROP_GAIN:
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(CV_CAP_PROP_GAIN),(long)value);
default:
;
return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(property_id),(long)value);
}
//camera properties
switch( property_id )
{
case CV_CAP_PROP_PAN:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_PAN),(long)value);
case CV_CAP_PROP_TILT:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_TILT),(long)value);
case CV_CAP_PROP_ROLL:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_ROLL),(long)value);
case CV_CAP_PROP_ZOOM:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_ZOOM),(long)value);
case CV_CAP_PROP_EXPOSURE:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_EXPOSURE),(long)value);
case CV_CAP_PROP_IRIS:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_IRIS),(long)value);
case CV_CAP_PROP_FOCUS:
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(CV_CAP_PROP_FOCUS),(long)value);
return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(property_id),(long)value);
}
return false;
}
@ -3410,8 +3339,16 @@ CvCapture* cvCreateCameraCapture_DShow( int index )
{
CvCaptureCAM_DShow* capture = new CvCaptureCAM_DShow;
try
{
if( capture->open( index ))
return capture;
}
catch(...)
{
delete capture;
throw;
}
delete capture;
return 0;

View File

@ -521,7 +521,7 @@ void CV_SpecificVideoTest::run(int)
TEST(Highgui_Image, regression) { CV_ImageTest test; test.safe_run(); }
#endif
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT && !defined(__APPLE__)
TEST(Highgui_Video, regression) { CV_VideoTest test; test.safe_run(); }
TEST(Highgui_Video, write_read) { CV_SpecificVideoTest test; test.safe_run(); }
#endif

View File

@ -13,8 +13,9 @@ Finds edges in an image using the [Canny86]_ algorithm.
.. ocv:pyfunction:: cv2.Canny(image, threshold1, threshold2[, edges[, apertureSize[, L2gradient]]]) -> edges
.. ocv:cfunction:: void cvCanny( const CvArr* image, CvArr* edges, double threshold1, double threshold2, int apertureSize=3 )
.. ocv:pyoldfunction:: cv.Canny(image, edges, threshold1, threshold2, apertureSize=3)-> None
.. ocv:cfunction:: void cvCanny( const CvArr* image, CvArr* edges, double threshold1, double threshold2, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.Canny(image, edges, threshold1, threshold2, aperture_size=3) -> None
:param image: Single-channel 8-bit input image.
@ -37,12 +38,13 @@ cornerEigenValsAndVecs
----------------------
Calculates eigenvalues and eigenvectors of image blocks for corner detection.
.. ocv:function:: void cornerEigenValsAndVecs( InputArray src, OutputArray dst, int blockSize, int apertureSize, int borderType=BORDER_DEFAULT )
.. ocv:function:: void cornerEigenValsAndVecs( InputArray src, OutputArray dst, int blockSize, int ksize, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.cornerEigenValsAndVecs(src, blockSize, ksize[, dst[, borderType]]) -> dst
.. ocv:cfunction:: void cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, int blockSize, int apertureSize=3 )
.. ocv:pyoldfunction:: cv.CornerEigenValsAndVecs(image, eigenvv, blockSize, apertureSize=3)-> None
.. ocv:cfunction:: void cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, int block_size, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.CornerEigenValsAndVecs(image, eigenvv, blockSize, aperture_size=3) -> None
:param src: Input single-channel 8-bit or floating-point image.
@ -50,7 +52,7 @@ Calculates eigenvalues and eigenvectors of image blocks for corner detection.
:param blockSize: Neighborhood size (see details below).
:param apertureSize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param ksize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` .
@ -89,12 +91,13 @@ cornerHarris
------------
Harris edge detector.
.. ocv:function:: void cornerHarris( InputArray src, OutputArray dst, int blockSize, int apertureSize, double k, int borderType=BORDER_DEFAULT )
.. ocv:function:: void cornerHarris( InputArray src, OutputArray dst, int blockSize, int ksize, double k, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.cornerHarris(src, blockSize, ksize, k[, dst[, borderType]]) -> dst
.. ocv:cfunction:: void cvCornerHarris( const CvArr* image, CvArr* harrisDst, int blockSize, int apertureSize=3, double k=0.04 )
.. ocv:pyoldfunction:: cv.CornerHarris(image, harrisDst, blockSize, apertureSize=3, k=0.04)-> None
.. ocv:cfunction:: void cvCornerHarris( const CvArr* image, CvArr* harris_responce, int block_size, int aperture_size=3, double k=0.04 )
.. ocv:pyoldfunction:: cv.CornerHarris(image, harris_dst, blockSize, aperture_size=3, k=0.04) -> None
:param src: Input single-channel 8-bit or floating-point image.
@ -102,7 +105,7 @@ Harris edge detector.
:param blockSize: Neighborhood size (see the details on :ocv:func:`cornerEigenValsAndVecs` ).
:param apertureSize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param ksize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param k: Harris detector free parameter. See the formula below.
@ -128,13 +131,13 @@ cornerMinEigenVal
-----------------
Calculates the minimal eigenvalue of gradient matrices for corner detection.
.. ocv:function:: void cornerMinEigenVal( InputArray src, OutputArray dst, int blockSize, int apertureSize=3, int borderType=BORDER_DEFAULT )
.. ocv:function:: void cornerMinEigenVal( InputArray src, OutputArray dst, int blockSize, int ksize=3, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.cornerMinEigenVal(src, blockSize[, dst[, ksize[, borderType]]]) -> dst
.. ocv:cfunction:: void cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, int blockSize, int apertureSize=3 )
.. ocv:cfunction:: void cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, int block_size, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.CornerMinEigenVal(image, eigenval, blockSize, apertureSize=3)-> None
.. ocv:pyoldfunction:: cv.CornerMinEigenVal(image, eigenval, blockSize, aperture_size=3) -> None
:param src: Input single-channel 8-bit or floating-point image.
@ -142,7 +145,7 @@ Calculates the minimal eigenvalue of gradient matrices for corner detection.
:param blockSize: Neighborhood size (see the details on :ocv:func:`cornerEigenValsAndVecs` ).
:param apertureSize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param ksize: Aperture parameter for the :ocv:func:`Sobel` operator.
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` .
@ -161,9 +164,9 @@ Refines the corner locations.
.. ocv:pyfunction:: cv2.cornerSubPix(image, corners, winSize, zeroZone, criteria) -> None
.. ocv:cfunction:: void cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, int count, CvSize winSize, CvSize zeroZone, CvTermCriteria criteria )
.. ocv:cfunction:: void cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, int count, CvSize win, CvSize zero_zone, CvTermCriteria criteria )
.. ocv:pyoldfunction:: cv.FindCornerSubPix(image, corners, winSize, zeroZone, criteria)-> corners
.. ocv:pyoldfunction:: cv.FindCornerSubPix(image, corners, win, zero_zone, criteria) -> corners
:param image: Input image.
@ -223,15 +226,15 @@ Determines strong corners on an image.
.. ocv:pyfunction:: cv2.goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]]) -> corners
.. ocv:cfunction:: void cvGoodFeaturesToTrack( const CvArr* image, CvArr* eigImage, CvArr* tempImage, CvPoint2D32f* corners, int* cornerCount, double qualityLevel, double minDistance, const CvArr* mask=NULL, int blockSize=3, int useHarris=0, double k=0.04 )
.. ocv:cfunction:: void cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, CvArr* temp_image, CvPoint2D32f* corners, int* corner_count, double quality_level, double min_distance, const CvArr* mask=NULL, int block_size=3, int use_harris=0, double k=0.04 )
.. ocv:pyoldfunction:: cv.GoodFeaturesToTrack(image, eigImage, tempImage, cornerCount, qualityLevel, minDistance, mask=None, blockSize=3, useHarris=0, k=0.04)-> corners
.. ocv:pyoldfunction:: cv.GoodFeaturesToTrack(image, eigImage, tempImage, cornerCount, qualityLevel, minDistance, mask=None, blockSize=3, useHarris=0, k=0.04) -> cornerCount
:param image: Input 8-bit or floating-point 32-bit, single-channel image.
:param eigImage: The parameter is ignored.
:param eig_image: The parameter is ignored.
:param tempImage: The parameter is ignored.
:param temp_image: The parameter is ignored.
:param corners: Output vector of detected corners.
@ -287,7 +290,7 @@ Finds circles in a grayscale image using the Hough transform.
.. ocv:function:: void HoughCircles( InputArray image, OutputArray circles, int method, double dp, double minDist, double param1=100, double param2=100, int minRadius=0, int maxRadius=0 )
.. ocv:cfunction:: CvSeq* cvHoughCircles( CvArr* image, CvMemStorage* circleStorage, int method, double dp, double minDist, double param1=100, double param2=100, int minRadius=0, int maxRadius=0 )
.. ocv:cfunction:: CvSeq* cvHoughCircles( CvArr* image, void* circle_storage, int method, double dp, double min_dist, double param1=100, double param2=100, int min_radius=0, int max_radius=0 )
.. ocv:pyfunction:: cv2.HoughCircles(image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]]) -> circles
@ -295,7 +298,7 @@ Finds circles in a grayscale image using the Hough transform.
:param circles: Output vector of found circles. Each vector is encoded as a 3-element floating-point vector :math:`(x, y, radius)` .
:param circleStorage: In C function this is a memory storage that will contain the output sequence of found circles.
:param circle_storage: In C function this is a memory storage that will contain the output sequence of found circles.
:param method: Detection method to use. Currently, the only implemented method is ``CV_HOUGH_GRADIENT`` , which is basically *21HT* , described in [Yuen90]_.
@ -362,7 +365,7 @@ Finds lines in a binary image using the standard Hough transform.
.. ocv:pyfunction:: cv2.HoughLines(image, rho, theta, threshold[, lines[, srn[, stn]]]) -> lines
.. ocv:cfunction:: CvSeq* cvHoughLines2( CvArr* image, void* storage, int method, double rho, double theta, int threshold, double param1=0, double param2=0 )
.. ocv:cfunction:: CvSeq* cvHoughLines2( CvArr* image, void* line_storage, int method, double rho, double theta, int threshold, double param1=0, double param2=0 )
.. ocv:pyoldfunction:: cv.HoughLines2(image, storage, method, rho, theta, threshold, param1=0, param2=0)-> lines
@ -501,18 +504,19 @@ preCornerDetect
---------------
Calculates a feature map for corner detection.
.. ocv:function:: void preCornerDetect( InputArray src, OutputArray dst, int apertureSize, int borderType=BORDER_DEFAULT )
.. ocv:function:: void preCornerDetect( InputArray src, OutputArray dst, int ksize, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.preCornerDetect(src, ksize[, dst[, borderType]]) -> dst
.. ocv:cfunction:: void cvPreCornerDetect( const CvArr* image, CvArr* corners, int apertureSize=3 )
.. ocv:cfunction:: void cvPreCornerDetect( const CvArr* image, CvArr* corners, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.PreCornerDetect(image, corners, apertureSize=3)-> None
:param src: Source single-channel 8-bit of floating-point image.
:param dst: Output image that has the type ``CV_32F`` and the same size as ``src`` .
:param apertureSize: Aperture size of the :ocv:func:`Sobel` .
:param ksize: Aperture size of the :ocv:func:`Sobel` .
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` .

View File

@ -523,7 +523,7 @@ buildPyramid
----------------
Constructs the Gaussian pyramid for an image.
.. ocv:function:: void buildPyramid( InputArray src, OutputArrayOfArrays dst, int maxlevel )
.. ocv:function:: void buildPyramid( InputArray src, OutputArrayOfArrays dst, int maxlevel, int borderType=BORDER_DEFAULT )
:param src: Source image. Check :ocv:func:`pyrDown` for the list of supported types.
@ -672,15 +672,15 @@ createGaussianFilter
------------------------
Returns an engine for smoothing images with the Gaussian filter.
.. ocv:function:: Ptr<FilterEngine> createGaussianFilter( int type, Size ksize, double sigmaX, double sigmaY=0, int borderType=BORDER_DEFAULT)
.. ocv:function:: Ptr<FilterEngine> createGaussianFilter( int type, Size ksize, double sigma1, double sigma2=0, int borderType=BORDER_DEFAULT )
:param type: Source and destination image type.
:param ksize: Aperture size. See :ocv:func:`getGaussianKernel` .
:param sigmaX: Gaussian sigma in the horizontal direction. See :ocv:func:`getGaussianKernel` .
:param sigma1: Gaussian sigma in the horizontal direction. See :ocv:func:`getGaussianKernel` .
:param sigmaY: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigmaY}\leftarrow\texttt{sigmaX}` .
:param sigma2: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigma2}\leftarrow\texttt{sigma1}` .
:param borderType: Border type to use. See :ocv:func:`borderInterpolate` .
@ -701,7 +701,7 @@ createLinearFilter
----------------------
Creates a non-separable linear filter engine.
.. ocv:function:: Ptr<FilterEngine> createLinearFilter(int srcType, int dstType, InputArray kernel, Point _anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar())
.. ocv:function:: Ptr<FilterEngine> createLinearFilter( int srcType, int dstType, InputArray kernel, Point _anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar() )
.. ocv:function:: Ptr<BaseFilter> getLinearFilter(int srcType, int dstType, InputArray kernel, Point anchor=Point(-1,-1), double delta=0, int bits=0)
@ -737,13 +737,13 @@ createMorphologyFilter
--------------------------
Creates an engine for non-separable morphological operations.
.. ocv:function:: Ptr<FilterEngine> createMorphologyFilter(int op, int type, InputArray element, Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT, int columnBorderType=-1, const Scalar& borderValue=morphologyDefaultBorderValue())
.. ocv:function:: Ptr<FilterEngine> createMorphologyFilter( int op, int type, InputArray kernel, Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT, int columnBorderType=-1, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:function:: Ptr<BaseFilter> getMorphologyFilter(int op, int type, InputArray element, Point anchor=Point(-1,-1))
.. ocv:function:: Ptr<BaseFilter> getMorphologyFilter( int op, int type, InputArray kernel, Point anchor=Point(-1,-1) )
.. ocv:function:: Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int esize, int anchor=-1)
.. ocv:function:: Ptr<BaseRowFilter> getMorphologyRowFilter( int op, int type, int ksize, int anchor=-1 )
.. ocv:function:: Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int esize, int anchor=-1)
.. ocv:function:: Ptr<BaseColumnFilter> getMorphologyColumnFilter( int op, int type, int ksize, int anchor=-1 )
.. ocv:function:: Scalar morphologyDefaultBorderValue()
@ -751,9 +751,9 @@ Creates an engine for non-separable morphological operations.
:param type: Input/output image type. The number of channels can be arbitrary. The depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
:param element: 2D 8-bit structuring element for a morphological operation. Non-zero elements indicate the pixels that belong to the element.
:param kernel: 2D 8-bit structuring element for a morphological operation. Non-zero elements indicate the pixels that belong to the element.
:param esize: Horizontal or vertical structuring element size for separable morphological operations.
:param ksize: Horizontal or vertical structuring element size for separable morphological operations.
:param anchor: Anchor position within the structuring element. Negative values mean that the anchor is at the kernel center.
@ -783,11 +783,11 @@ createSeparableLinearFilter
-------------------------------
Creates an engine for a separable linear filter.
.. ocv:function:: Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType, InputArray rowKernel, InputArray columnKernel, Point anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar())
.. ocv:function:: Ptr<FilterEngine> createSeparableLinearFilter( int srcType, int dstType, InputArray rowKernel, InputArray columnKernel, Point anchor=Point(-1,-1), double delta=0, int rowBorderType=BORDER_DEFAULT, int columnBorderType=-1, const Scalar& borderValue=Scalar() )
.. ocv:function:: Ptr<BaseColumnFilter> getLinearColumnFilter(int bufType, int dstType, InputArray columnKernel, int anchor, int symmetryType, double delta=0, int bits=0)
.. ocv:function:: Ptr<BaseColumnFilter> getLinearColumnFilter( int bufType, int dstType, InputArray kernel, int anchor, int symmetryType, double delta=0, int bits=0 )
.. ocv:function:: Ptr<BaseRowFilter> getLinearRowFilter(int srcType, int bufType, InputArray rowKernel, int anchor, int symmetryType)
.. ocv:function:: Ptr<BaseRowFilter> getLinearRowFilter( int srcType, int bufType, InputArray kernel, int anchor, int symmetryType )
:param srcType: Source array type.
@ -831,7 +831,7 @@ dilate
----------
Dilates an image by using a specific structuring element.
.. ocv:function:: void dilate( InputArray src, OutputArray dst, InputArray element, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:function:: void dilate( InputArray src, OutputArray dst, InputArray kernel, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:pyfunction:: cv2.dilate(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
@ -871,7 +871,7 @@ erode
---------
Erodes an image by using a specific structuring element.
.. ocv:function:: void erode( InputArray src, OutputArray dst, InputArray element, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:function:: void erode( InputArray src, OutputArray dst, InputArray kernel, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:pyfunction:: cv2.erode(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
@ -916,7 +916,8 @@ Convolves an image with the kernel.
.. ocv:pyfunction:: cv2.filter2D(src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]]) -> dst
.. ocv:cfunction:: void cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, CvPoint anchor=cvPoint(-1, -1))
.. ocv:cfunction:: void cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, CvPoint anchor=cvPoint(-1,-1) )
.. ocv:pyoldfunction:: cv.Filter2D(src, dst, kernel, anchor=(-1, -1))-> None
:param src: Source image.
@ -1095,7 +1096,7 @@ Returns a structuring element of the specified size and shape for morphological
.. ocv:pyfunction:: cv2.getStructuringElement(shape, ksize[, anchor]) -> retval
.. ocv:cfunction:: IplConvKernel* cvCreateStructuringElementEx( int cols, int rows, int anchorX, int anchorY, int shape, int* values=NULL )
.. ocv:cfunction:: IplConvKernel* cvCreateStructuringElementEx( int cols, int rows, int anchor_x, int anchor_y, int shape, int* values=NULL )
.. ocv:pyoldfunction:: cv.CreateStructuringElementEx(cols, rows, anchorX, anchorY, shape, values=None)-> kernel
@ -1125,9 +1126,9 @@ Returns a structuring element of the specified size and shape for morphological
:param anchor: Anchor position within the element. The default value :math:`(-1, -1)` means that the anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor position. In other cases the anchor just regulates how much the result of the morphological operation is shifted.
:param anchorX: x-coordinate of the anchor
:param anchor_x: x-coordinate of the anchor
:param anchorY: y-coordinate of the anchor
:param anchor_y: y-coordinate of the anchor
:param values: integer array of ``cols``*``rows`` elements that specifies the custom shape of the structuring element, when ``shape=CV_SHAPE_CUSTOM``.
@ -1170,7 +1171,7 @@ morphologyEx
----------------
Performs advanced morphological transformations.
.. ocv:function:: void morphologyEx( InputArray src, OutputArray dst, int op, InputArray element, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:function:: void morphologyEx( InputArray src, OutputArray dst, int op, InputArray kernel, Point anchor=Point(-1,-1), int iterations=1, int borderType=BORDER_CONSTANT, const Scalar& borderValue=morphologyDefaultBorderValue() )
.. ocv:pyfunction:: cv2.morphologyEx(src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
@ -1242,7 +1243,6 @@ Any of the operations can be done in-place. In case of multi-channel images, eac
:ocv:func:`createMorphologyFilter`
Laplacian
-------------
Calculates the Laplacian of an image.
@ -1251,9 +1251,9 @@ Calculates the Laplacian of an image.
.. ocv:pyfunction:: cv2.Laplacian(src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst
.. ocv:cfunction:: void cvLaplace( const CvArr* src, CvArr* dst, int ksize=3)
.. ocv:cfunction:: void cvLaplace( const CvArr* src, CvArr* dst, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.Laplace(src, dst, ksize=3)-> None
.. ocv:pyoldfunction:: cv.Laplace(src, dst, apertureSize=3) -> None
:param src: Source image.
@ -1293,9 +1293,9 @@ pyrDown
-----------
Smoothes an image and downsamples it.
.. ocv:function:: void pyrDown( InputArray src, OutputArray dst, const Size& dstsize=Size())
.. ocv:function:: void pyrDown( InputArray src, OutputArray dst, const Size& dstsize=Size(), int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.pyrDown(src[, dst[, dstsize]]) -> dst
.. ocv:pyfunction:: cv2.pyrDown(src[, dst[, dstsize[, borderType]]]) -> dst
.. ocv:cfunction:: void cvPyrDown( const CvArr* src, CvArr* dst, int filter=CV_GAUSSIAN_5x5 )
@ -1326,9 +1326,9 @@ pyrUp
---------
Upsamples an image and then smoothes it.
.. ocv:function:: void pyrUp( InputArray src, OutputArray dst, const Size& dstsize=Size())
.. ocv:function:: void pyrUp( InputArray src, OutputArray dst, const Size& dstsize=Size(), int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.pyrUp(src[, dst[, dstsize]]) -> dst
.. ocv:pyfunction:: cv2.pyrUp(src[, dst[, dstsize[, borderType]]]) -> dst
.. ocv:cfunction:: cvPyrUp( const CvArr* src, CvArr* dst, int filter=CV_GAUSSIAN_5x5 )
@ -1353,13 +1353,13 @@ pyrMeanShiftFiltering
---------------------
Performs initial step of meanshift segmentation of an image.
.. ocv:function:: void pyrMeanShiftFiltering( InputArray src, OutputArray dst, double sp, double sr, int maxLevel=1, TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) )
.. ocv:function:: void pyrMeanShiftFiltering( InputArray src, OutputArray dst, double sp, double sr, int maxLevel=1, TermCriteria termcrit=TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) )
.. ocv:pyfunction:: cv2.pyrMeanShiftFiltering(src, sp, sr[, dst[, maxLevel[, termcrit]]]) -> dst
.. ocv:cfunction:: void cvPyrMeanShiftFiltering( const CvArr* src, CvArr* dst, double sp, double sr, int max_level=1, CvTermCriteria termcrit= cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5,1))
.. ocv:pyoldfunction:: cv.PyrMeanShiftFiltering(src, dst, sp, sr, maxLevel=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1))-> None
.. ocv:pyoldfunction:: cv.PyrMeanShiftFiltering(src, dst, sp, sr, max_level=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1)) -> None
:param src: The source 8-bit, 3-channel image.
@ -1402,7 +1402,7 @@ sepFilter2D
---------------
Applies a separable linear filter to an image.
.. ocv:function:: void sepFilter2D( InputArray src, OutputArray dst, int ddepth, InputArray rowKernel, InputArray columnKernel, Point anchor=Point(-1,-1), double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:function:: void sepFilter2D( InputArray src, OutputArray dst, int ddepth, InputArray kernelX, InputArray kernelY, Point anchor=Point(-1,-1), double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.sepFilter2D(src, ddepth, kernelX, kernelY[, dst[, anchor[, delta[, borderType]]]]) -> dst
@ -1418,9 +1418,9 @@ Applies a separable linear filter to an image.
when ``ddepth=-1``, the destination image will have the same depth as the source.
:param rowKernel: Coefficients for filtering each row.
:param kernelX: Coefficients for filtering each row.
:param columnKernel: Coefficients for filtering each column.
:param kernelY: Coefficients for filtering each column.
:param anchor: Anchor position within the kernel. The default value :math:`(-1, 1)` means that the anchor is at the kernel center.
@ -1428,7 +1428,7 @@ Applies a separable linear filter to an image.
:param borderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
The function applies a separable linear filter to the image. That is, first, every row of ``src`` is filtered with the 1D kernel ``rowKernel`` . Then, every column of the result is filtered with the 1D kernel ``columnKernel`` . The final result shifted by ``delta`` is stored in ``dst`` .
The function applies a separable linear filter to the image. That is, first, every row of ``src`` is filtered with the 1D kernel ``kernelX`` . Then, every column of the result is filtered with the 1D kernel ``kernelY`` . The final result shifted by ``delta`` is stored in ``dst`` .
.. seealso::
@ -1444,7 +1444,7 @@ Smooth
------
Smooths the image in one of several ways.
.. ocv:cfunction:: void cvSmooth( const CvArr* src, CvArr* dst, int smoothtype=CV_GAUSSIAN, int param1=3, int param2=0, double param3=0, double param4=0)
.. ocv:cfunction:: void cvSmooth( const CvArr* src, CvArr* dst, int smoothtype=CV_GAUSSIAN, int size1=3, int size2=0, double sigma1=0, double sigma2=0 )
.. ocv:pyoldfunction:: cv.Smooth(src, dst, smoothtype=CV_GAUSSIAN, param1=3, param2=0, param3=0, param4=0)-> None
@ -1454,32 +1454,32 @@ Smooths the image in one of several ways.
:param smoothtype: Type of the smoothing:
* **CV_BLUR_NO_SCALE** linear convolution with :math:`\texttt{param1}\times\texttt{param2}` box kernel (all 1's). If you want to smooth different pixels with different-size box kernels, you can use the integral image that is computed using :ocv:func:`integral`
* **CV_BLUR_NO_SCALE** linear convolution with :math:`\texttt{size1}\times\texttt{size2}` box kernel (all 1's). If you want to smooth different pixels with different-size box kernels, you can use the integral image that is computed using :ocv:func:`integral`
* **CV_BLUR** linear convolution with :math:`\texttt{param1}\times\texttt{param2}` box kernel (all 1's) with subsequent scaling by :math:`1/(\texttt{param1}\cdot\texttt{param2})`
* **CV_BLUR** linear convolution with :math:`\texttt{size1}\times\texttt{size2}` box kernel (all 1's) with subsequent scaling by :math:`1/(\texttt{size1}\cdot\texttt{size2})`
* **CV_GAUSSIAN** linear convolution with a :math:`\texttt{param1}\times\texttt{param2}` Gaussian kernel
* **CV_GAUSSIAN** linear convolution with a :math:`\texttt{size1}\times\texttt{size2}` Gaussian kernel
* **CV_MEDIAN** median filter with a :math:`\texttt{param1}\times\texttt{param1}` square aperture
* **CV_MEDIAN** median filter with a :math:`\texttt{size1}\times\texttt{size1}` square aperture
* **CV_BILATERAL** bilateral filter with a :math:`\texttt{param1}\times\texttt{param1}` square aperture, color sigma= ``param3`` and spatial sigma= ``param4`` . If ``param1=0`` , the aperture square side is set to ``cvRound(param4*1.5)*2+1`` . Information about bilateral filtering can be found at http://www.dai.ed.ac.uk/CVonline/LOCAL\_COPIES/MANDUCHI1/Bilateral\_Filtering.html
* **CV_BILATERAL** bilateral filter with a :math:`\texttt{size1}\times\texttt{size1}` square aperture, color sigma= ``sigma1`` and spatial sigma= ``sigma2`` . If ``size1=0`` , the aperture square side is set to ``cvRound(sigma2*1.5)*2+1`` . Information about bilateral filtering can be found at http://www.dai.ed.ac.uk/CVonline/LOCAL\_COPIES/MANDUCHI1/Bilateral\_Filtering.html
:param param1: The first parameter of the smoothing operation, the aperture width. Must be a positive odd number (1, 3, 5, ...)
:param size1: The first parameter of the smoothing operation, the aperture width. Must be a positive odd number (1, 3, 5, ...)
:param param2: The second parameter of the smoothing operation, the aperture height. Ignored by ``CV_MEDIAN`` and ``CV_BILATERAL`` methods. In the case of simple scaled/non-scaled and Gaussian blur if ``param2`` is zero, it is set to ``param1`` . Otherwise it must be a positive odd number.
:param size2: The second parameter of the smoothing operation, the aperture height. Ignored by ``CV_MEDIAN`` and ``CV_BILATERAL`` methods. In the case of simple scaled/non-scaled and Gaussian blur if ``size2`` is zero, it is set to ``size1`` . Otherwise it must be a positive odd number.
:param param3: In the case of a Gaussian parameter this parameter may specify Gaussian :math:`\sigma` (standard deviation). If it is zero, it is calculated from the kernel size:
:param sigma1: In the case of a Gaussian parameter this parameter may specify Gaussian :math:`\sigma` (standard deviation). If it is zero, it is calculated from the kernel size:
.. math::
\sigma = 0.3 (n/2 - 1) + 0.8 \quad \text{where} \quad n= \begin{array}{l l} \mbox{\texttt{param1} for horizontal kernel} \\ \mbox{\texttt{param2} for vertical kernel} \end{array}
\sigma = 0.3 (n/2 - 1) + 0.8 \quad \text{where} \quad n= \begin{array}{l l} \mbox{\texttt{size1} for horizontal kernel} \\ \mbox{\texttt{size2} for vertical kernel} \end{array}
Using standard sigma for small kernels ( :math:`3\times 3` to :math:`7\times 7` ) gives better speed. If ``param3`` is not zero, while ``param1`` and ``param2`` are zeros, the kernel size is calculated from the sigma (to provide accurate enough operation).
Using standard sigma for small kernels ( :math:`3\times 3` to :math:`7\times 7` ) gives better speed. If ``sigma1`` is not zero, while ``size1`` and ``size2`` are zeros, the kernel size is calculated from the sigma (to provide accurate enough operation).
The function smooths an image using one of several methods. Every of the methods has some features and restrictions listed below:
@ -1496,11 +1496,12 @@ Sobel
---------
Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
.. ocv:function:: void Sobel( InputArray src, OutputArray dst, int ddepth, int xorder, int yorder, int ksize=3, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:function:: void Sobel( InputArray src, OutputArray dst, int ddepth, int dx, int dy, int ksize=3, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.Sobel(src, ddepth, dx, dy[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst
.. ocv:cfunction:: void cvSobel( const CvArr* src, CvArr* dst, int xorder, int yorder, int apertureSize=3 )
.. ocv:cfunction:: void cvSobel( const CvArr* src, CvArr* dst, int xorder, int yorder, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.Sobel(src, dst, xorder, yorder, apertureSize=3)-> None
:param src: Source image.
@ -1582,7 +1583,7 @@ Scharr
----------
Calculates the first x- or y- image derivative using Scharr operator.
.. ocv:function:: void Scharr( InputArray src, OutputArray dst, int ddepth, int xorder, int yorder, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:function:: void Scharr( InputArray src, OutputArray dst, int ddepth, int dx, int dy, double scale=1, double delta=0, int borderType=BORDER_DEFAULT )
.. ocv:pyfunction:: cv2.Scharr(src, ddepth, dx, dy[, dst[, scale[, delta[, borderType]]]]) -> dst
@ -1592,9 +1593,9 @@ Calculates the first x- or y- image derivative using Scharr operator.
:param ddepth: Destination image depth. See :ocv:func:`Sobel` for the list of supported combination of ``src.depth()`` and ``ddepth``.
:param xorder: Order of the derivative x.
:param dx: Order of the derivative x.
:param yorder: Order of the derivative y.
:param dy: Order of the derivative y.
:param scale: Optional scale factor for the computed derivative values. By default, no scaling is applied. See :ocv:func:`getDerivKernels` for details.
@ -1606,13 +1607,13 @@ The function computes the first x- or y- spatial image derivative using the Scha
.. math::
\texttt{Scharr(src, dst, ddepth, xorder, yorder, scale, delta, borderType)}
\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}
is equivalent to
.. math::
\texttt{Sobel(src, dst, ddepth, xorder, yorder, CV\_SCHARR, scale, delta, borderType)} .
\texttt{Sobel(src, dst, ddepth, dx, dy, CV\_SCHARR, scale, delta, borderType)} .
.. seealso::

View File

@ -77,11 +77,13 @@ getAffineTransform
----------------------
Calculates an affine transform from three pairs of the corresponding points.
.. ocv:function:: Mat getAffineTransform( const Point2f* src, const Point2f* dst )
.. ocv:function:: Mat getAffineTransform( InputArray src, InputArray dst )
.. ocv:function:: Mat getAffineTransform( const Point2f src[], const Point2f dst[] )
.. ocv:pyfunction:: cv2.getAffineTransform(src, dst) -> retval
.. ocv:cfunction:: CvMat* cvGetAffineTransform( const CvPoint2D32f* src, const CvPoint2D32f* dst, CvMat* mapMatrix )
.. ocv:cfunction:: CvMat* cvGetAffineTransform( const CvPoint2D32f * src, const CvPoint2D32f * dst, CvMat * map_matrix )
.. ocv:pyoldfunction:: cv.GetAffineTransform(src, dst, mapMatrix)-> None
@ -114,11 +116,13 @@ getPerspectiveTransform
---------------------------
Calculates a perspective transform from four pairs of the corresponding points.
.. ocv:function:: Mat getPerspectiveTransform( const Point2f* src, const Point2f* dst )
.. ocv:function:: Mat getPerspectiveTransform( InputArray src, InputArray dst )
.. ocv:function:: Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] )
.. ocv:pyfunction:: cv2.getPerspectiveTransform(src, dst) -> retval
.. ocv:cfunction:: CvMat* cvGetPerspectiveTransform( const CvPoint2D32f* src, const CvPoint2D32f* dst, CvMat* mapMatrix )
.. ocv:cfunction:: CvMat* cvGetPerspectiveTransform( const CvPoint2D32f* src, const CvPoint2D32f* dst, CvMat* map_matrix )
.. ocv:pyoldfunction:: cv.GetPerspectiveTransform(src, dst, mapMatrix)-> None
@ -151,7 +155,7 @@ getRectSubPix
-----------------
Retrieves a pixel rectangle from an image with sub-pixel accuracy.
.. ocv:function:: void getRectSubPix( InputArray image, Size patchSize, Point2f center, OutputArray dst, int patchType=-1 )
.. ocv:function:: void getRectSubPix( InputArray image, Size patchSize, Point2f center, OutputArray patch, int patchType=-1 )
.. ocv:pyfunction:: cv2.getRectSubPix(image, patchSize, center[, patch[, patchType]]) -> patch
@ -196,7 +200,7 @@ Calculates an affine matrix of 2D rotation.
.. ocv:pyfunction:: cv2.getRotationMatrix2D(center, angle, scale) -> retval
.. ocv:cfunction:: CvMat* cv2DRotationMatrix( CvPoint2D32f center, double angle, double scale, CvMat* mapMatrix )
.. ocv:cfunction:: CvMat* cv2DRotationMatrix( CvPoint2D32f center, double angle, double scale, CvMat* map_matrix )
.. ocv:pyoldfunction:: cv.GetRotationMatrix2D(center, angle, scale, mapMatrix)-> None
@ -206,7 +210,7 @@ Calculates an affine matrix of 2D rotation.
:param scale: Isotropic scale factor.
:param mapMatrix: The output affine transformation, 2x3 floating-point matrix.
:param map_matrix: The output affine transformation, 2x3 floating-point matrix.
The function calculates the following matrix:
@ -428,10 +432,12 @@ Applies an affine transformation to an image.
.. ocv:pyfunction:: cv2.warpAffine(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) -> dst
.. ocv:cfunction:: void cvWarpAffine( const CvArr* src, CvArr* dst, const CvMat* mapMatrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
.. ocv:cfunction:: void cvWarpAffine( const CvArr* src, CvArr* dst, const CvMat* map_matrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
.. ocv:pyoldfunction:: cv.WarpAffine(src, dst, mapMatrix, flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None
.. ocv:cfunction:: void cvGetQuadrangleSubPix( const CvArr* src, CvArr* dst, const CvMat* mapMatrix )
.. ocv:cfunction:: void cvGetQuadrangleSubPix( const CvArr* src, CvArr* dst, const CvMat* map_matrix )
.. ocv:pyoldfunction:: cv.GetQuadrangleSubPix(src, dst, mapMatrix)-> None
:param src: Source image.
@ -477,7 +483,8 @@ Applies a perspective transformation to an image.
.. ocv:pyfunction:: cv2.warpPerspective(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) -> dst
.. ocv:cfunction:: void cvWarpPerspective( const CvArr* src, CvArr* dst, const CvMat* mapMatrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
.. ocv:cfunction:: void cvWarpPerspective( const CvArr* src, CvArr* dst, const CvMat* map_matrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
.. ocv:pyoldfunction:: cv.WarpPerspective(src, dst, mapMatrix, flags=CV_INNER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None
:param src: Source image.
@ -524,8 +531,8 @@ Computes the undistortion and rectification transformation map.
.. ocv:pyfunction:: cv2.initUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, size, m1type[, map1[, map2]]) -> map1, map2
.. ocv:cfunction:: void cvInitUndistortRectifyMap( const CvMat* cameraMatrix, const CvMat* distCoeffs, const CvMat* R, const CvMat* newCameraMatrix, CvArr* map1, CvArr* map2 )
.. ocv:cfunction:: void cvInitUndistortMap( const CvMat* cameraMatrix, const CvMat* distCoeffs, CvArr* map1, CvArr* map2 )
.. ocv:cfunction:: void cvInitUndistortRectifyMap( const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat * R, const CvMat* new_camera_matrix, CvArr* mapx, CvArr* mapy )
.. ocv:cfunction:: void cvInitUndistortMap( const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvArr* mapx, CvArr* mapy )
.. ocv:pyoldfunction:: cv.InitUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, map1, map2)-> None
.. ocv:pyoldfunction:: cv.InitUndistortMap(cameraMatrix, distCoeffs, map1, map2)-> None
@ -621,7 +628,7 @@ Transforms an image to compensate for lens distortion.
.. ocv:pyfunction:: cv2.undistort(src, cameraMatrix, distCoeffs[, dst[, newCameraMatrix]]) -> dst
.. ocv:cfunction:: void cvUndistort2( const CvArr* src, CvArr* dst, const CvMat* cameraMatrix, const CvMat* distCoeffs, const CvMat* newCameraMatrix=NULL )
.. ocv:cfunction:: void cvUndistort2( const CvArr* src, CvArr* dst, const CvMat* camera_matrix, const CvMat* distortion_coeffs, const CvMat* new_camera_matrix=0 )
.. ocv:pyoldfunction:: cv.Undistort2(src, dst, cameraMatrix, distCoeffs)-> None
@ -660,7 +667,7 @@ Computes the ideal point coordinates from the observed point coordinates.
.. ocv:function:: void undistortPoints( InputArray src, OutputArray dst, InputArray cameraMatrix, InputArray distCoeffs, InputArray R=noArray(), InputArray P=noArray())
.. ocv:cfunction:: void cvUndistortPoints( const CvMat* src, CvMat* dst, const CvMat* cameraMatrix, const CvMat* distCoeffs, const CvMat* R=NULL, const CvMat* P=NULL)
.. ocv:cfunction:: void cvUndistortPoints( const CvMat* src, CvMat* dst, const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat* R=0, const CvMat* P=0 )
.. ocv:pyoldfunction:: cv.UndistortPoints(src, dst, cameraMatrix, distCoeffs, R=None, P=None)-> None
:param src: Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2).

View File

@ -9,22 +9,22 @@ calcHist
------------
Calculates a histogram of a set of arrays.
.. ocv:function:: void calcHist( const Mat* arrays, int narrays, const int* channels, InputArray mask, OutputArray hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
.. ocv:function:: void calcHist( const Mat* images, int nimages, const int* channels, InputArray mask, OutputArray hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
.. ocv:function:: void calcHist( const Mat* arrays, int narrays, const int* channels, InputArray mask, SparseMat& hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
.. ocv:function:: void calcHist( const Mat* images, int nimages, const int* channels, InputArray mask, SparseMat& hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
.. ocv:pyfunction:: cv2.calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]]) -> hist
.. ocv:cfunction:: void cvCalcHist( IplImage** image, CvHistogram* hist, int accumulate=0, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.CalcHist(image, hist, accumulate=0, mask=None)-> None
:param arrays: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
:param images: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
:param narrays: Number of source arrays.
:param nimages: Number of source images.
:param channels: List of the ``dims`` channels used to compute the histogram. The first array channels are numerated from 0 to ``arrays[0].channels()-1`` , the second array channels are counted from ``arrays[0].channels()`` to ``arrays[0].channels() + arrays[1].channels()-1``, and so on.
:param channels: List of the ``dims`` channels used to compute the histogram. The first array channels are numerated from 0 to ``images[0].channels()-1`` , the second array channels are counted from ``images[0].channels()`` to ``images[0].channels() + images[1].channels()-1``, and so on.
:param mask: Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as ``arrays[i]`` . The non-zero mask elements mark the array elements counted in the histogram.
:param mask: Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as ``images[i]`` . The non-zero mask elements mark the array elements counted in the histogram.
:param hist: Output histogram, which is a dense or sparse ``dims`` -dimensional array.
@ -106,24 +106,24 @@ calcBackProject
-------------------
Calculates the back projection of a histogram.
.. ocv:function:: void calcBackProject( const Mat* arrays, int narrays, const int* channels, InputArray hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
.. ocv:function:: void calcBackProject( const Mat* images, int nimages, const int* channels, InputArray hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
.. ocv:function:: void calcBackProject( const Mat* arrays, int narrays, const int* channels, const SparseMat& hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
.. ocv:function:: void calcBackProject( const Mat* images, int nimages, const int* channels, const SparseMat& hist, OutputArray backProject, const float** ranges, double scale=1, bool uniform=true )
.. ocv:pyfunction:: cv2.calcBackProject(images, channels, hist, ranges[, dst[, scale]]) -> dst
.. ocv:pyfunction:: cv2.calcBackProject(images, channels, hist, ranges, scale[, dst]) -> dst
.. ocv:cfunction:: void cvCalcBackProject( IplImage** image, CvArr* backProject, const CvHistogram* hist )
.. ocv:pyoldfunction:: cv.CalcBackProject(image, backProject, hist)-> None
.. ocv:pyoldfunction:: cv.CalcBackProject(image, back_project, hist) -> None
:param arrays: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
:param images: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
:param narrays: Number of source arrays.
:param nimages: Number of source images.
:param channels: The list of channels used to compute the back projection. The number of channels must match the histogram dimensionality. The first array channels are numerated from 0 to ``arrays[0].channels()-1`` , the second array channels are counted from ``arrays[0].channels()`` to ``arrays[0].channels() + arrays[1].channels()-1``, and so on.
:param channels: The list of channels used to compute the back projection. The number of channels must match the histogram dimensionality. The first array channels are numerated from 0 to ``images[0].channels()-1`` , the second array channels are counted from ``images[0].channels()`` to ``images[0].channels() + images[1].channels()-1``, and so on.
:param hist: Input histogram that can be dense or sparse.
:param backProject: Destination back projection array that is a single-channel array of the same size and depth as ``arrays[0]`` .
:param backProject: Destination back projection array that is a single-channel array of the same size and depth as ``images[0]`` .
:param ranges: Array of arrays of the histogram bin boundaries in each dimension. See :ocv:func:`calcHist` .
@ -225,8 +225,9 @@ Computes the "minimal work" distance between two weighted point configurations.
.. ocv:function:: float EMD( InputArray signature1, InputArray signature2, int distType, InputArray cost=noArray(), float* lowerBound=0, OutputArray flow=noArray() )
.. ocv:cfunction:: float cvCalcEMD2( const CvArr* signature1, const CvArr* signature2, int distType, CvDistanceFunction distFunc=NULL, const CvArr* cost=NULL, CvArr* flow=NULL, float* lowerBound=NULL, void* userdata=NULL )
.. ocv:pyoldfunction:: cv.CalcEMD2(signature1, signature2, distType, distFunc=None, cost=None, flow=None, lowerBound=None, userdata=None) -> float
.. ocv:cfunction:: float cvCalcEMD2( const CvArr* signature1, const CvArr* signature2, int distance_type, CvDistanceFunction distance_func=NULL, const CvArr* cost_matrix=NULL, CvArr* flow=NULL, float* lower_bound=NULL, void* userdata=NULL )
.. ocv:pyoldfunction:: cv.CalcEMD2(signature1, signature2, distance_type, distance_func=None, cost_matrix=None, flow=None, lower_bound=None, userdata=None) -> float
:param signature1: First signature, a :math:`\texttt{size1}\times \texttt{dims}+1` floating-point matrix. Each row stores the point weight followed by the point coordinates. The matrix is allowed to have a single column (weights only) if the user-defined cost matrix is used.
@ -234,7 +235,7 @@ Computes the "minimal work" distance between two weighted point configurations.
:param distType: Used metric. ``CV_DIST_L1, CV_DIST_L2`` , and ``CV_DIST_C`` stand for one of the standard metrics. ``CV_DIST_USER`` means that a pre-calculated cost matrix ``cost`` is used.
:param distFunc: Custom distance function supported by the old interface. ``CvDistanceFunction`` is defined as: ::
:param distance_func: Custom distance function supported by the old interface. ``CvDistanceFunction`` is defined as: ::
typedef float (CV_CDECL * CvDistanceFunction)( const float* a,
const float* b, void* userdata );
@ -301,7 +302,7 @@ Locates a template within an image by using a histogram comparison.
.. ocv:cfunction:: void cvCalcBackProjectPatch( IplImage** images, CvArr* dst, CvSize patch_size, CvHistogram* hist, int method, double factor )
.. ocv:pyoldfunction:: cv.CalcBackProjectPatch(images, dst, patchSize, hist, method, factor)-> None
.. ocv:pyoldfunction:: cv.CalcBackProjectPatch(images, dst, patch_size, hist, method, factor)-> None
:param images: Source images (though, you may pass CvMat** as well).
@ -324,15 +325,15 @@ CalcProbDensity
---------------
Divides one histogram by another.
.. ocv:cfunction:: void cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, CvHistogram* dsthist, double scale=255 )
.. ocv:cfunction:: void cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, CvHistogram* dst_hist, double scale=255 )
.. ocv:pyoldfunction:: cv.CalcProbDensity(hist1, hist2, dsthist, scale=255)-> None
.. ocv:pyoldfunction:: cv.CalcProbDensity(hist1, hist2, dst_hist, scale=255) -> None
:param hist1: First histogram (the divisor).
:param hist2: Second histogram.
:param dsthist: Destination histogram.
:param dst_hist: Destination histogram.
:param scale: Scale factor for the destination histogram.
@ -375,7 +376,7 @@ Creates a histogram.
.. ocv:cfunction:: CvHistogram* cvCreateHist( int dims, int* sizes, int type, float** ranges=NULL, int uniform=1 )
.. ocv:pyoldfunction:: cv.CreateHist(dims, type, ranges, uniform=1) -> hist
.. ocv:pyoldfunction:: cv.CreateHist(dims, type, ranges=None, uniform=1) -> hist
:param dims: Number of histogram dimensions.
@ -405,51 +406,13 @@ Creates a histogram.
The function creates a histogram of the specified size and returns a pointer to the created histogram. If the array ``ranges`` is 0, the histogram bin ranges must be specified later via the function :ocv:cfunc:`SetHistBinRanges`. Though :ocv:cfunc:`CalcHist` and :ocv:cfunc:`CalcBackProject` may process 8-bit images without setting bin ranges, they assume they are equally spaced in 0 to 255 bins.
GetHistValue\_?D
----------------
Returns a pointer to the histogram bin.
.. ocv:cfunction:: float cvGetHistValue_1D(CvHistogram hist, int idx0)
.. ocv:cfunction:: float cvGetHistValue_2D(CvHistogram hist, int idx0, int idx1)
.. ocv:cfunction:: float cvGetHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2)
.. ocv:cfunction:: float cvGetHistValue_nD(CvHistogram hist, int idx)
:param hist: Histogram.
:param idx0: 0-th index.
:param idx1: 1-st index.
:param idx2: 2-nd index.
:param idx: Array of indices.
::
#define cvGetHistValue_1D( hist, idx0 )
((float*)(cvPtr1D( (hist)->bins, (idx0), 0 ))
#define cvGetHistValue_2D( hist, idx0, idx1 )
((float*)(cvPtr2D( (hist)->bins, (idx0), (idx1), 0 )))
#define cvGetHistValue_3D( hist, idx0, idx1, idx2 )
((float*)(cvPtr3D( (hist)->bins, (idx0), (idx1), (idx2), 0 )))
#define cvGetHistValue_nD( hist, idx )
((float*)(cvPtrND( (hist)->bins, (idx), 0 )))
..
The macros ``GetHistValue`` return a pointer to the specified bin of the 1D, 2D, 3D, or N-D histogram. In case of a sparse histogram, the function creates a new bin and sets it to 0, unless it exists already.
GetMinMaxHistValue
------------------
Finds the minimum and maximum histogram bins.
.. ocv:cfunction:: void cvGetMinMaxHistValue( const CvHistogram* hist, float* min_value, float* max_value, int* min_idx=NULL, int* max_idx=NULL )
.. ocv:pyoldfunction:: cv.GetMinMaxHistValue(hist)-> (minValue, maxValue, minIdx, maxIdx)
.. ocv:pyoldfunction:: cv.GetMinMaxHistValue(hist)-> (min_value, max_value, min_idx, max_idx)
:param hist: Histogram.
@ -498,32 +461,6 @@ Normalizes the histogram.
The function normalizes the histogram bins by scaling them so that the sum of the bins becomes equal to ``factor``.
QueryHistValue*D
----------------
Queries the value of the histogram bin.
.. ocv:cfunction:: float QueryHistValue_1D(CvHistogram hist, int idx0)
.. ocv:cfunction:: float QueryHistValue_2D(CvHistogram hist, int idx0, int idx1)
.. ocv:cfunction:: float QueryHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2)
.. ocv:cfunction:: float QueryHistValue_nD(CvHistogram hist, const int* idx)
.. ocv:pyoldfunction:: cv.QueryHistValue_1D(hist, idx0) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_2D(hist, idx0, idx1) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_3D(hist, idx0, idx1, idx2) -> float
.. ocv:pyoldfunction:: cv.QueryHistValueND(hist, idx) -> float
:param hist: Histogram.
:param idx0: 0-th index.
:param idx1: 1-st index.
:param idx2: 2-nd index.
:param idx: Array of indices.
The macros return the value of the specified bin of the 1D, 2D, 3D, or N-D histogram. In case of a sparse histogram, the function returns 0. If the bin is not present in the histogram, no new bin is created.
ReleaseHist
-----------
Releases the histogram.
@ -555,7 +492,7 @@ ThreshHist
Thresholds the histogram.
.. ocv:cfunction:: void cvThreshHist( CvHistogram* hist, double threshold )
.. ocv:pyoldfunction:: cv.ThreshHist(hist, threshold)-> None
.. ocv:pyoldfunction:: cv.ThreshHist(hist, threshold) -> None
:param hist: Pointer to the histogram.
@ -564,28 +501,4 @@ Thresholds the histogram.
The function clears histogram bins that are below the specified threshold.
CalcPGH
-------
Calculates a pair-wise geometrical histogram for a contour.
.. ocv:cfunction:: void cvCalcPGH( const CvSeq* contour, CvHistogram* hist )
.. ocv:pyoldfunction:: cv.CalcPGH(contour, hist)-> None
:param contour: Input contour. Currently, only integer point coordinates are allowed.
:param hist: Calculated histogram. It must be two-dimensional.
The function calculates a 2D pair-wise geometrical histogram (PGH), described in [Iivarinen97]_ for the contour. The algorithm considers every pair of contour
edges. The angle between the edges and the minimum/maximum distances
are determined for every pair. To do this, each of the edges in turn
is taken as the base, while the function loops through all the other
edges. When the base edge and any other edge are considered, the minimum
and maximum distances from the points on the non-base edge and line of
the base edge are selected. The angle between the edges defines the row
of the histogram in which all the bins that correspond to the distance
between the calculated minimum and maximum distances are incremented
(that is, the histogram is transposed relatively to the definition in the original paper). The histogram can be used for contour matching.
.. [RubnerSept98] Y. Rubner. C. Tomasi, L.J. Guibas. *The Earth Movers Distance as a Metric for Image Retrieval*. Technical Report STAN-CS-TN-98-86, Department of Computer Science, Stanford University, September 1998.
.. [Iivarinen97] Jukka Iivarinen, Markus Peura, Jaakko Srel, and Ari Visa. *Comparison of Combined Shape Descriptors for Irregular Objects*, 8th British Machine Vision Conference, BMVC'97. http://www.cis.hut.fi/research/IA/paper/publications/bmvc97/bmvc97.html

View File

@ -12,8 +12,9 @@ Applies an adaptive threshold to an array.
.. ocv:pyfunction:: cv2.adaptiveThreshold(src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst]) -> dst
.. ocv:cfunction:: void cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double maxValue, int adaptiveMethod=CV_ADAPTIVE_THRESH_MEAN_C, int thresholdType=CV_THRESH_BINARY, int blockSize=3, double param1=5 )
.. ocv:pyoldfunction:: cv.AdaptiveThreshold(src, dst, maxValue, adaptiveMethod=CV_ADAPTIVE_THRESH_MEAN_C, thresholdType=CV_THRESH_BINARY, blockSize=3, param1=5)-> None
.. ocv:cfunction:: void cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, int adaptive_method=CV_ADAPTIVE_THRESH_MEAN_C, int threshold_type=CV_THRESH_BINARY, int block_size=3, double param1=5 )
.. ocv:pyoldfunction:: cv.AdaptiveThreshold(src, dst, maxValue, adaptive_method=CV_ADAPTIVE_THRESH_MEAN_C, thresholdType=CV_THRESH_BINARY, blockSize=3, param1=5)-> None
:param src: Source 8-bit single-channel image.
@ -414,11 +415,11 @@ Calculates the distance to the closest zero pixel for each pixel of the source i
.. ocv:function:: void distanceTransform( InputArray src, OutputArray dst, OutputArray labels, int distanceType, int maskSize, int labelType=DIST_LABEL_CCOMP )
.. ocv:pyfunction:: cv2.distanceTransform(src, distanceType, maskSize[, dst[, labels[, labelType=cv2.DIST_LABEL_CCOMP]]]) -> dst, labels
.. ocv:pyfunction:: cv2.distanceTransform(src, distanceType, maskSize[, dst]) -> dst
.. ocv:cfunction:: void cvDistTransform( const CvArr* src, CvArr* dst, int distanceType=CV_DIST_L2, int maskSize=3, const float* mask=NULL, CvArr* labels=NULL, int labelType=CV_DIST_LABEL_CCOMP )
.. ocv:cfunction:: void cvDistTransform( const CvArr* src, CvArr* dst, int distance_type=CV_DIST_L2, int mask_size=3, const float* mask=NULL, CvArr* labels=NULL, int labelType=CV_DIST_LABEL_CCOMP )
.. ocv:pyoldfunction:: cv.DistTransform(src, dst, distanceType=CV_DIST_L2, maskSize=3, mask=None, labels=None)-> None
.. ocv:pyoldfunction:: cv.DistTransform(src, dst, distance_type=CV_DIST_L2, mask_size=3, mask=None, labels=None) -> None
:param src: 8-bit, single-channel (binary) source image.
@ -483,14 +484,14 @@ floodFill
-------------
Fills a connected component with the given color.
.. ocv:function:: int floodFill( InputOutputArray image, Point seed, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
.. ocv:function:: int floodFill( InputOutputArray image, Point seedPoint, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
.. ocv:function:: int floodFill( InputOutputArray image, InputOutputArray mask, Point seed, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
.. ocv:function:: int floodFill( InputOutputArray image, InputOutputArray mask, Point seedPoint, Scalar newVal, Rect* rect=0, Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), int flags=4 )
.. ocv:pyfunction:: cv2.floodFill(image, mask, seedPoint, newVal[, loDiff[, upDiff[, flags]]]) -> retval, rect
.. ocv:cfunction:: void cvFloodFill( CvArr* image, CvPoint seedPoint, CvScalar newVal, CvScalar loDiff=cvScalarAll(0), CvScalar upDiff=cvScalarAll(0), CvConnectedComp* comp=NULL, int flags=4, CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.FloodFill(image, seedPoint, newVal, loDiff=(0, 0, 0, 0), upDiff=(0, 0, 0, 0), flags=4, mask=None)-> comp
.. ocv:cfunction:: void cvFloodFill( CvArr* image, CvPoint seed_point, CvScalar new_val, CvScalar lo_diff=cvScalarAll(0), CvScalar up_diff=cvScalarAll(0), CvConnectedComp* comp=NULL, int flags=4, CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.FloodFill(image, seed_point, new_val, lo_diff=(0, 0, 0, 0), up_diff=(0, 0, 0, 0), flags=4, mask=None)-> comp
:param image: Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the function unless the ``FLOODFILL_MASK_ONLY`` flag is set in the second variant of the function. See the details below.
@ -498,7 +499,7 @@ Fills a connected component with the given color.
.. note:: Since the mask is larger than the filled image, a pixel :math:`(x, y)` in ``image`` corresponds to the pixel :math:`(x+1, y+1)` in the ``mask`` .
:param seed: Starting point.
:param seedPoint: Starting point.
:param newVal: New value of the repainted domain pixels.
@ -528,7 +529,7 @@ The functions ``floodFill`` fill a connected component starting from the seed po
.. math::
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)+ \texttt{upDiff}
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}
in case of a grayscale image and fixed range
@ -555,17 +556,17 @@ The functions ``floodFill`` fill a connected component starting from the seed po
.. math::
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_r+ \texttt{upDiff} _r,
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,
.. math::
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_g+ \texttt{upDiff} _g
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g
and
.. math::
\texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seed} .x, \texttt{seed} .y)_b+ \texttt{upDiff} _b
\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b
in case of a color image and fixed range
@ -588,11 +589,11 @@ integral
------------
Calculates the integral of an image.
.. ocv:function:: void integral( InputArray image, OutputArray sum, int sdepth=-1 )
.. ocv:function:: void integral( InputArray src, OutputArray sum, int sdepth=-1 )
.. ocv:function:: void integral( InputArray image, OutputArray sum, OutputArray sqsum, int sdepth=-1 )
.. ocv:function:: void integral( InputArray src, OutputArray sum, OutputArray sqsum, int sdepth=-1 )
.. ocv:function:: void integral( InputArray image, OutputArray sum, OutputArray sqsum, OutputArray tilted, int sdepth=-1 )
.. ocv:function:: void integral( InputArray src, OutputArray sum, OutputArray sqsum, OutputArray tilted, int sdepth=-1 )
.. ocv:pyfunction:: cv2.integral(src[, sum[, sdepth]]) -> sum
@ -600,7 +601,8 @@ Calculates the integral of an image.
.. ocv:pyfunction:: cv2.integral3(src[, sum[, sqsum[, tilted[, sdepth]]]]) -> sum, sqsum, tilted
.. ocv:cfunction:: void cvIntegral( const CvArr* image, CvArr* sum, CvArr* sqsum=NULL, CvArr* tiltedSum=NULL )
.. ocv:cfunction:: void cvIntegral( const CvArr* image, CvArr* sum, CvArr* sqsum=NULL, CvArr* tilted_sum=NULL )
.. ocv:pyoldfunction:: cv.Integral(image, sum, sqsum=None, tiltedSum=None)-> None
:param image: Source image as :math:`W \times H` , 8-bit or floating-point (32f or 64f).
@ -647,11 +649,12 @@ threshold
-------------
Applies a fixed-level threshold to each array element.
.. ocv:function:: double threshold( InputArray src, OutputArray dst, double thresh, double maxVal, int thresholdType )
.. ocv:function:: double threshold( InputArray src, OutputArray dst, double thresh, double maxval, int type )
.. ocv:pyfunction:: cv2.threshold(src, thresh, maxval, type[, dst]) -> retval, dst
.. ocv:cfunction:: double cvThreshold( const CvArr* src, CvArr* dst, double threshold, double maxValue, int thresholdType )
.. ocv:cfunction:: double cvThreshold( const CvArr* src, CvArr* dst, double threshold, double max_value, int threshold_type )
.. ocv:pyoldfunction:: cv.Threshold(src, dst, threshold, maxValue, thresholdType)-> None
:param src: Source array (single-channel, 8-bit of 32-bit floating point).
@ -660,9 +663,9 @@ Applies a fixed-level threshold to each array element.
:param thresh: Threshold value.
:param maxVal: Maximum value to use with the ``THRESH_BINARY`` and ``THRESH_BINARY_INV`` thresholding types.
:param maxval: Maximum value to use with the ``THRESH_BINARY`` and ``THRESH_BINARY_INV`` thresholding types.
:param thresholdType: Thresholding type (see the details below).
:param type: Thresholding type (see the details below).
The function applies fixed-level thresholding
to a single-channel array. The function is typically used to get a
@ -670,19 +673,19 @@ bi-level (binary) image out of a grayscale image (
:ocv:func:`compare` could
be also used for this purpose) or for removing a noise, that is, filtering
out pixels with too small or too large values. There are several
types of thresholding supported by the function. They are determined by ``thresholdType`` :
types of thresholding supported by the function. They are determined by ``type`` :
* **THRESH_BINARY**
.. math::
\texttt{dst} (x,y) = \fork{\texttt{maxVal}}{if $\texttt{src}(x,y) > \texttt{thresh}$}{0}{otherwise}
\texttt{dst} (x,y) = \fork{\texttt{maxval}}{if $\texttt{src}(x,y) > \texttt{thresh}$}{0}{otherwise}
* **THRESH_BINARY_INV**
.. math::
\texttt{dst} (x,y) = \fork{0}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{maxVal}}{otherwise}
\texttt{dst} (x,y) = \fork{0}{if $\texttt{src}(x,y) > \texttt{thresh}$}{\texttt{maxval}}{otherwise}
* **THRESH_TRUNC**
@ -748,11 +751,11 @@ grabCut
-------
Runs the GrabCut algorithm.
.. ocv:function:: void grabCut(InputArray image, InputOutputArray mask, Rect rect, InputOutputArray bgdModel, InputOutputArray fgdModel, int iterCount, int mode )
.. ocv:function:: void grabCut( InputArray img, InputOutputArray mask, Rect rect, InputOutputArray bgdModel, InputOutputArray fgdModel, int iterCount, int mode=GC_EVAL )
.. ocv:pyfunction:: cv2.grabCut(img, mask, rect, bgdModel, fgdModel, iterCount[, mode]) -> None
:param image: Input 8-bit 3-channel image.
:param img: Input 8-bit 3-channel image.
:param mask: Input/output 8-bit single-channel mask. The mask is initialized by the function when ``mode`` is set to ``GC_INIT_WITH_RECT``. Its elements may have one of following values:

View File

@ -11,8 +11,9 @@ Adds an image to the accumulator.
.. ocv:pyfunction:: cv2.accumulate(src, dst[, mask]) -> None
.. ocv:cfunction:: void cvAcc( const CvArr* src, CvArr* dst, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Acc(src, dst, mask=None)-> None
.. ocv:cfunction:: void cvAcc( const CvArr* image, CvArr* sum, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Acc(image, sum, mask=None) -> None
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
@ -46,8 +47,9 @@ Adds the square of a source image to the accumulator.
.. ocv:pyfunction:: cv2.accumulateSquare(src, dst[, mask]) -> None
.. ocv:cfunction:: void cvSquareAcc( const CvArr* src, CvArr* dst, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.SquareAcc(src, dst, mask=None)-> None
.. ocv:cfunction:: void cvSquareAcc( const CvArr* image, CvArr* sqsum, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.SquareAcc(image, sqsum, mask=None) -> None
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
@ -79,8 +81,9 @@ Adds the per-element product of two input images to the accumulator.
.. ocv:pyfunction:: cv2.accumulateProduct(src1, src2, dst[, mask]) -> None
.. ocv:cfunction:: void cvMultiplyAcc( const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.MultiplyAcc(src1, src2, dst, mask=None)-> None
.. ocv:cfunction:: void cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.MultiplyAcc(image1, image2, acc, mask=None)-> None
:param src1: First input image, 1- or 3-channel, 8-bit or 32-bit floating point.
@ -114,8 +117,8 @@ Updates a running average.
.. ocv:pyfunction:: cv2.accumulateWeighted(src, dst, alpha[, mask]) -> None
.. ocv:cfunction:: void cvRunningAvg( const CvArr* src, CvArr* dst, double alpha, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.RunningAvg(src, dst, alpha, mask=None)-> None
.. ocv:cfunction:: void cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.RunningAvg(image, acc, alpha, mask=None)-> None
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.

View File

@ -7,7 +7,7 @@ matchTemplate
-----------------
Compares a template against overlapped image regions.
.. ocv:function:: void matchTemplate( InputArray image, InputArray temp, OutputArray result, int method )
.. ocv:function:: void matchTemplate( InputArray image, InputArray templ, OutputArray result, int method )
.. ocv:pyfunction:: cv2.matchTemplate(image, templ, method[, result]) -> result

View File

@ -11,8 +11,9 @@ Calculates all of the moments up to the third order of a polygon or rasterized s
.. ocv:pyfunction:: cv2.moments(array[, binaryImage]) -> retval
.. ocv:cfunction:: void cvMoments( const CvArr* array, CvMoments* moments, int binary=0 )
.. ocv:pyoldfunction:: cv.Moments(array, binary=0) -> moments
.. ocv:cfunction:: void cvMoments( const CvArr* arr, CvMoments* moments, int binary=0 )
.. ocv:pyoldfunction:: cv.Moments(arr, binary=0) -> moments
:param array: Raster image (single-channel, 8-bit or floating-point 2D array) or an array ( :math:`1 \times N` or :math:`N \times 1` ) of 2D points (``Point`` or ``Point2f`` ).
@ -89,11 +90,13 @@ HuMoments
-------------
Calculates seven Hu invariants.
.. ocv:function:: void HuMoments( const Moments& moments, double* hu )
.. ocv:function:: void HuMoments( const Moments& m, OutputArray hu )
.. ocv:pyfunction:: cv2.HuMoments(m) -> hu
.. ocv:function:: void HuMoments( const Moments& moments, double hu[7] )
.. ocv:cfunction:: void cvGetHuMoments( const CvMoments* moments, CvHuMoments* hu )
.. ocv:pyfunction:: cv2.HuMoments(m[, hu]) -> hu
.. ocv:cfunction:: void cvGetHuMoments( CvMoments* moments, CvHuMoments* hu_moments )
.. ocv:pyoldfunction:: cv.GetHuMoments(moments) -> hu
@ -126,9 +129,9 @@ Finds contours in a binary image.
.. ocv:pyfunction:: cv2.findContours(image, mode, method[, contours[, hierarchy[, offset]]]) -> contours, hierarchy
.. ocv:cfunction:: int cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** firstContour, int headerSize=sizeof(CvContour), int mode=CV_RETR_LIST, int method=CV_CHAIN_APPROX_SIMPLE, CvPoint offset=cvPoint(0, 0) )
.. ocv:cfunction:: int cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** first_contour, int header_size=sizeof(CvContour), int mode=CV_RETR_LIST, int method=CV_CHAIN_APPROX_SIMPLE, CvPoint offset=cvPoint(0,0) )
.. ocv:pyoldfunction:: cv.FindContours(image, storage, mode=CV_RETR_LIST, method=CV_CHAIN_APPROX_SIMPLE, offset=(0, 0)) -> cvseq
.. ocv:pyoldfunction:: cv.FindContours(image, storage, mode=CV_RETR_LIST, method=CV_CHAIN_APPROX_SIMPLE, offset=(0, 0)) -> contours
:param image: Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as ``binary`` . You can use :ocv:func:`compare` , :ocv:func:`inRange` , :ocv:func:`threshold` , :ocv:func:`adaptiveThreshold` , :ocv:func:`Canny` , and others to create a binary image out of a grayscale or color one. The function modifies the ``image`` while extracting the contours.
@ -163,87 +166,6 @@ The function retrieves contours from the binary image using the algorithm
.. note:: If you use the new Python interface then the ``CV_`` prefix has to be omitted in contour retrieval mode and contour approximation method parameters (for example, use ``cv2.RETR_LIST`` and ``cv2.CHAIN_APPROX_NONE`` parameters). If you use the old Python interface then these parameters have the ``CV_`` prefix (for example, use ``cv.CV_RETR_LIST`` and ``cv.CV_CHAIN_APPROX_NONE``).
drawContours
----------------
Draws contours outlines or filled contours.
.. ocv:function:: void drawContours( InputOutputArray image, InputArrayOfArrays contours, int contourIdx, const Scalar& color, int thickness=1, int lineType=8, InputArray hierarchy=noArray(), int maxLevel=INT_MAX, Point offset=Point() )
.. ocv:pyfunction:: cv2.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]]) -> None
.. ocv:cfunction:: void cvDrawContours( CvArr *img, CvSeq* contour, CvScalar externalColor, CvScalar holeColor, int maxLevel, int thickness=1, int lineType=8 )
.. ocv:pyoldfunction:: cv.DrawContours(img, contour, externalColor, holeColor, maxLevel, thickness=1, lineType=8, offset=(0, 0))-> None
:param image: Destination image.
:param contours: All the input contours. Each contour is stored as a point vector.
:param contourIdx: Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
:param color: Color of the contours.
:param thickness: Thickness of lines the contours are drawn with. If it is negative (for example, ``thickness=CV_FILLED`` ), the contour interiors are
drawn.
:param lineType: Line connectivity. See :ocv:func:`line` for details.
:param hierarchy: Optional information about hierarchy. It is only needed if you want to draw only some of the contours (see ``maxLevel`` ).
:param maxLevel: Maximal level for drawn contours. If it is 0, only
the specified contour is drawn. If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account when there is ``hierarchy`` available.
:param offset: Optional contour shift parameter. Shift all the drawn contours by the specified :math:`\texttt{offset}=(dx,dy)` .
:param contour: Pointer to the first contour.
:param externalColor: Color of external contours.
:param holeColor: Color of internal contours (holes).
The function draws contour outlines in the image if
:math:`\texttt{thickness} \ge 0` or fills the area bounded by the contours if
:math:`\texttt{thickness}<0` . The example below shows how to retrieve connected components from the binary image and label them: ::
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main( int argc, char** argv )
{
Mat src;
// the first command-line parameter must be a filename of the binary
// (black-n-white) image
if( argc != 2 || !(src=imread(argv[1], 0)).data)
return -1;
Mat dst = Mat::zeros(src.rows, src.cols, CV_8UC3);
src = src > 1;
namedWindow( "Source", 1 );
imshow( "Source", src );
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours( src, contours, hierarchy,
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
// iterate through all the top-level contours,
// draw each connected component with its own random color
int idx = 0;
for( ; idx >= 0; idx = hierarchy[idx][0] )
{
Scalar color( rand()&255, rand()&255, rand()&255 );
drawContours( dst, contours, idx, color, CV_FILLED, 8, hierarchy );
}
namedWindow( "Components", 1 );
imshow( "Components", dst );
waitKey(0);
}
approxPolyDP
----------------
@ -253,7 +175,7 @@ Approximates a polygonal curve(s) with the specified precision.
.. ocv:pyfunction:: cv2.approxPolyDP(curve, epsilon, closed[, approxCurve]) -> approxCurve
.. ocv:cfunction:: CvSeq* cvApproxPoly( const void* curve, int headerSize, CvMemStorage* storage, int method, double epsilon, int recursive=0 )
.. ocv:cfunction:: CvSeq* cvApproxPoly( const void* src_seq, int header_size, CvMemStorage* storage, int method, double eps, int recursive=0 )
:param curve: Input vector of a 2D point stored in:
@ -269,7 +191,7 @@ Approximates a polygonal curve(s) with the specified precision.
:param closed: If true, the approximated curve is closed (its first and last vertices are connected). Otherwise, it is not closed.
:param headerSize: Header size of the approximated curve. Normally, ``sizeof(CvContour)`` is used.
:param header_size: Header size of the approximated curve. Normally, ``sizeof(CvContour)`` is used.
:param storage: Memory storage where the approximated curve is stored.
@ -287,11 +209,11 @@ ApproxChains
-------------
Approximates Freeman chain(s) with a polygonal curve.
.. ocv:cfunction:: CvSeq* cvApproxChains( CvSeq* chain, CvMemStorage* storage, int method=CV_CHAIN_APPROX_SIMPLE, double parameter=0, int minimalPerimeter=0, int recursive=0 )
.. ocv:cfunction:: CvSeq* cvApproxChains( CvSeq* src_seq, CvMemStorage* storage, int method=CV_CHAIN_APPROX_SIMPLE, double parameter=0, int minimal_perimeter=0, int recursive=0 )
.. ocv:pyoldfunction:: cv.ApproxChains(chain, storage, method=CV_CHAIN_APPROX_SIMPLE, parameter=0, minimalPerimeter=0, recursive=0)-> contours
.. ocv:pyoldfunction:: cv.ApproxChains(src_seq, storage, method=CV_CHAIN_APPROX_SIMPLE, parameter=0, minimal_perimeter=0, recursive=0)-> contours
:param chain: Pointer to the approximated Freeman chain that can refer to other chains.
:param src_seq: Pointer to the approximated Freeman chain that can refer to other chains.
:param storage: Storage location for the resulting polylines.
@ -299,7 +221,7 @@ Approximates Freeman chain(s) with a polygonal curve.
:param parameter: Method parameter (not used now).
:param minimalPerimeter: Approximates only those contours whose perimeters are not less than ``minimal_perimeter`` . Other chains are removed from the resulting structure.
:param minimal_perimeter: Approximates only those contours whose perimeters are not less than ``minimal_perimeter`` . Other chains are removed from the resulting structure.
:param recursive: Recursion flag. If it is non-zero, the function approximates all chains that can be obtained from ``chain`` by using the ``h_next`` or ``v_next`` links. Otherwise, the single input chain is approximated.
@ -314,8 +236,9 @@ Calculates a contour perimeter or a curve length.
.. ocv:pyfunction:: cv2.arcLength(curve, closed) -> retval
.. ocv:cfunction:: double cvArcLength( const void* curve, CvSlice slice=CV_WHOLE_SEQ, int isClosed=-1 )
.. ocv:pyoldfunction:: cv.ArcLength(curve, slice=CV_WHOLE_SEQ, isClosed=-1)-> double
.. ocv:cfunction:: double cvArcLength( const void* curve, CvSlice slice=CV_WHOLE_SEQ, int is_closed=-1 )
.. ocv:pyoldfunction:: cv.ArcLength(curve, slice=CV_WHOLE_SEQ, isClosed=-1) -> float
:param curve: Input vector of 2D points, stored in ``std::vector`` or ``Mat``.
@ -351,9 +274,9 @@ Calculates a contour area.
.. ocv:pyfunction:: cv2.contourArea(contour[, oriented]) -> retval
.. ocv:cfunction:: double cvContourArea( const CvArr* contour, CvSlice slice=CV_WHOLE_SEQ )
.. ocv:cfunction:: double cvContourArea( const CvArr* contour, CvSlice slice=CV_WHOLE_SEQ, int oriented=0 )
.. ocv:pyoldfunction:: cv.ContourArea(contour, slice=CV_WHOLE_SEQ)-> double
.. ocv:pyoldfunction:: cv.ContourArea(contour, slice=CV_WHOLE_SEQ) -> float
:param contour: Input vector of 2D points (contour vertices), stored in ``std::vector`` or ``Mat``.
@ -390,17 +313,17 @@ Finds the convex hull of a point set.
.. ocv:function:: void convexHull( InputArray points, OutputArray hull, bool clockwise=false, bool returnPoints=true )
.. ocv:pyfunction:: cv2.convexHull(points[, hull[, returnPoints[, clockwise]]]) -> hull
.. ocv:pyfunction:: cv2.convexHull(points[, hull[, clockwise[, returnPoints]]]) -> hull
.. ocv:cfunction:: CvSeq* cvConvexHull2( const CvArr* input, void* storage=NULL, int orientation=CV_CLOCKWISE, int returnPoints=0 )
.. ocv:cfunction:: CvSeq* cvConvexHull2( const CvArr* input, void* hull_storage=NULL, int orientation=CV_CLOCKWISE, int return_points=0 )
.. ocv:pyoldfunction:: cv.ConvexHull2(points, storage, orientation=CV_CLOCKWISE, returnPoints=0)-> convexHull
.. ocv:pyoldfunction:: cv.ConvexHull2(points, storage, orientation=CV_CLOCKWISE, return_points=0) -> convexHull
:param points: Input 2D point set, stored in ``std::vector`` or ``Mat``.
:param hull: Output convex hull. It is either an integer vector of indices or vector of points. In the first case, the ``hull`` elements are 0-based indices of the convex hull points in the original array (since the set of convex hull points is a subset of the original point set). In the second case, ``hull`` elements are the convex hull points themselves.
:param storage: Output memory storage in the old API (``cvConvexHull2`` returns a sequence containing the convex hull points or their indices).
:param hull_storage: Output memory storage in the old API (``cvConvexHull2`` returns a sequence containing the convex hull points or their indices).
:param clockwise: Orientation flag. If it is true, the output convex hull is oriented clockwise. Otherwise, it is oriented counter-clockwise. The usual screen coordinate system is assumed so that the origin is at the top-left corner, x axis is oriented to the right, and y axis is oriented downwards.
@ -420,7 +343,7 @@ Finds the convexity defects of a contour.
.. ocv:function:: void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects )
.. ocv:pyfunction:: cv2.ConvexityDefects(contour, convexhull)-> convexityDefects
.. ocv:pyfunction:: cv2.convexityDefects(contour, convexhull[, convexityDefects]) -> convexityDefects
.. ocv:cfunction:: CvSeq* cvConvexityDefects( const CvArr* contour, const CvArr* convexhull, CvMemStorage* storage=NULL )
@ -475,10 +398,11 @@ Fits a line to a 2D or 3D point set.
.. ocv:function:: void fitLine( InputArray points, OutputArray line, int distType, double param, double reps, double aeps )
.. ocv:pyfunction:: cv2.fitLine(points, distType, param, reps, aeps) -> line
.. ocv:pyfunction:: cv2.fitLine(points, distType, param, reps, aeps[, line]) -> line
.. ocv:cfunction:: void cvFitLine( const CvArr* points, int distType, double param, double reps, double aeps, float* line )
.. ocv:pyoldfunction:: cv.FitLine(points, distType, param, reps, aeps) -> line
.. ocv:cfunction:: void cvFitLine( const CvArr* points, int dist_type, double param, double reps, double aeps, float* line )
.. ocv:pyoldfunction:: cv.FitLine(points, dist_type, param, reps, aeps) -> line
:param points: Input vector of 2D or 3D points, stored in ``std::vector<>`` or ``Mat``.
@ -575,7 +499,7 @@ Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
.. ocv:cfunction:: CvBox2D cvMinAreaRect2( const CvArr* points, CvMemStorage* storage=NULL )
.. ocv:pyoldfunction:: cv.MinAreaRect2(points, storage=None)-> CvBox2D
.. ocv:pyoldfunction:: cv.MinAreaRect2(points, storage=None) -> Box2D
:param points: Input vector of 2D points, stored in:
@ -595,7 +519,7 @@ Finds a circle of the minimum area enclosing a 2D point set.
.. ocv:function:: void minEnclosingCircle( InputArray points, Point2f& center, float& radius )
.. ocv:pyfunction:: cv2.minEnclosingCircle(points, center, radius) -> None
.. ocv:pyfunction:: cv2.minEnclosingCircle(points) -> center, radius
.. ocv:cfunction:: int cvMinEnclosingCircle( const CvArr* points, CvPoint2D32f* center, float* radius )
@ -621,12 +545,12 @@ matchShapes
---------------
Compares two shapes.
.. ocv:function:: double matchShapes( InputArray object1, InputArray object2, int method, double parameter=0 )
.. ocv:function:: double matchShapes( InputArray contour1, InputArray contour2, int method, double parameter )
.. ocv:pyfunction:: cv2.matchShapes(contour1, contour2, method, parameter) -> retval
.. ocv:cfunction:: double cvMatchShapes( const void* object1, const void* object2, int method, double parameter=0 )
.. ocv:pyoldfunction:: cv.MatchShapes(object1, object2, method, parameter=0)-> None
.. ocv:pyoldfunction:: cv.MatchShapes(object1, object2, method, parameter=0) -> float
:param object1: First contour or grayscale image.
@ -680,8 +604,8 @@ Performs a point-in-contour test.
.. ocv:pyfunction:: cv2.pointPolygonTest(contour, pt, measureDist) -> retval
.. ocv:cfunction:: double cvPointPolygonTest( const CvArr* contour, CvPoint2D32f pt, int measureDist )
.. ocv:pyoldfunction:: cv.PointPolygonTest(contour, pt, measureDist)-> double
.. ocv:cfunction:: double cvPointPolygonTest( const CvArr* contour, CvPoint2D32f pt, int measure_dist )
.. ocv:pyoldfunction:: cv.PointPolygonTest(contour, pt, measure_dist) -> float
:param contour: Input contour.

View File

@ -309,16 +309,16 @@ CV_EXPORTS Ptr<BaseFilter> getLinearFilter(int srcType, int dstType,
//! returns the separable linear filter engine
CV_EXPORTS Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType,
InputArray rowKernel, InputArray columnKernel,
Point _anchor=Point(-1,-1), double delta=0,
int _rowBorderType=BORDER_DEFAULT,
int _columnBorderType=-1,
const Scalar& _borderValue=Scalar());
Point anchor=Point(-1,-1), double delta=0,
int rowBorderType=BORDER_DEFAULT,
int columnBorderType=-1,
const Scalar& borderValue=Scalar());
//! returns the non-separable linear filter engine
CV_EXPORTS Ptr<FilterEngine> createLinearFilter(int srcType, int dstType,
InputArray kernel, Point _anchor=Point(-1,-1),
double delta=0, int _rowBorderType=BORDER_DEFAULT,
int _columnBorderType=-1, const Scalar& _borderValue=Scalar());
double delta=0, int rowBorderType=BORDER_DEFAULT,
int columnBorderType=-1, const Scalar& borderValue=Scalar());
//! returns the Gaussian kernel with the specified parameters
CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F );
@ -371,9 +371,9 @@ static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX
//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported.
CV_EXPORTS Ptr<FilterEngine> createMorphologyFilter(int op, int type, InputArray kernel,
Point anchor=Point(-1,-1), int _rowBorderType=BORDER_CONSTANT,
int _columnBorderType=-1,
const Scalar& _borderValue=morphologyDefaultBorderValue());
Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT,
int columnBorderType=-1,
const Scalar& borderValue=morphologyDefaultBorderValue());
//! shape of the structuring element
enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 };
@ -392,7 +392,7 @@ CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize );
//! smooths the image using Gaussian filter.
CV_EXPORTS_W void GaussianBlur( InputArray src,
OutputArray dst, Size ksize,
double sigma1, double sigma2=0,
double sigmaX, double sigmaY=0,
int borderType=BORDER_DEFAULT );
//! smooths the image using bilateral filter
CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d,

View File

@ -351,8 +351,8 @@ CVAPI(CvPoint) cvReadChainPoint( CvChainPtReader* reader );
a tree of polygonal curves (contours) */
CVAPI(CvSeq*) cvApproxPoly( const void* src_seq,
int header_size, CvMemStorage* storage,
int method, double parameter,
int parameter2 CV_DEFAULT(0));
int method, double eps,
int recursive CV_DEFAULT(0));
/* Calculates perimeter of a contour or length of a part of contour */
CVAPI(double) cvArcLength( const void* curve,

View File

@ -57,8 +57,6 @@ int icvIntersectLines( double x1, double dx1, double y1, double dy1,
double* t2 );
void icvCreateCenterNormalLine( CvSubdiv2DEdge edge, double* a, double* b, double* c );
void icvIntersectLines3( double* a0, double* b0, double* c0,
double* a1, double* b1, double* c1,
CvPoint2D32f* point );

View File

@ -589,9 +589,8 @@ class ClassInfo(object):
self.jname = m[1:]
self.base = ''
if decl[1]:
self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip()
#self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip()
self.base = re.sub(r"^.*:", "", decl[1].split(",")[0]).strip().replace(self.jname, "")
class ArgInfo(object):
def __init__(self, arg_tuple): # [ ctype, name, def val, [mod], argno ]

View File

@ -3,6 +3,7 @@ allmodules = ["core", "flann", "imgproc", "ml", "highgui", "video", "features2d"
verbose = False
show_warnings = True
show_errors = True
show_critical_errors = True
params_blacklist = {
"fromarray" : ("object", "allowND"), # python only function
@ -14,6 +15,18 @@ params_blacklist = {
"gpu::swapChannels" : ("dstOrder") # parameter is not parsed correctly by the hdr_parser
}
ERROR_001_SECTIONFAILURE = 1
WARNING_002_HDRWHITESPACE = 2
ERROR_003_PARENTHESES = 3
WARNING_004_TABS = 4
ERROR_005_REDEFENITIONPARAM = 5
ERROR_006_REDEFENITIONFUNC = 6
WARNING_007_UNDOCUMENTEDPARAM = 7
WARNING_008_MISSINGPARAM = 8
WARNING_009_HDRMISMATCH = 9
ERROR_010_NOMODULE = 10
ERROR_011_EOLEXPECTED = 11
params_mapping = {
"composeRT" : {
"dr3dr1" : "d*d*",
@ -121,15 +134,16 @@ class RstParser(object):
self.parse_section(module_name, section_name, file_name, lineno, lines)
except AssertionError, args:
if show_errors:
print >> sys.stderr, "RST parser error: assertion in \"%s\" File: %s (line %s)" % (section_name, file_name, lineno)
print >> sys.stderr, "RST parser error E%03d: assertion in \"%s\" at %s:%s" % (ERROR_001_SECTIONFAILURE, section_name, file_name, lineno)
print >> sys.stderr, " Details: %s" % args
def parse_section(self, module_name, section_name, file_name, lineno, lines):
self.sections_total += 1
# skip sections having whitespace in name
if section_name.find(" ") >= 0 and section_name.find("::operator") < 0:
#if section_name.find(" ") >= 0 and section_name.find("::operator") < 0:
if section_name.find(" ") >= 0 and not bool(re.match(r"(\w+::)*operator\s*(\w+|>>|<<|\(\)|->|\+\+|--|=|==|\+=|-=)", section_name)):
if show_errors:
print "SKIPPED: \"%s\" File: %s (line %s)" % (section_name, file_name, lineno)
print >> sys.stderr, "RST parser warning W%03d: SKIPPED: \"%s\" File: %s:%s" % (WARNING_002_HDRWHITESPACE, section_name, file_name, lineno)
self.sections_skipped += 1
return
@ -271,8 +285,8 @@ class RstParser(object):
# endfor l in lines
if fdecl.balance != 0:
if show_errors:
print >> sys.stderr, "RST parser error: invalid parentheses balance in \"%s\" File: %s (line %s)" % (section_name, file_name, lineno)
if show_critical_errors:
print >> sys.stderr, "RST parser error E%03d: invalid parentheses balance in \"%s\" at %s:%s" % (ERROR_003_PARENTHESES, section_name, file_name, lineno)
return
# save last parameter if needed
@ -308,7 +322,7 @@ class RstParser(object):
if l.find("\t") >= 0:
whitespace_warnings += 1
if whitespace_warnings <= max_whitespace_warnings and show_warnings:
print >> sys.stderr, "RST parser warning: tab symbol instead of space is used at file %s (line %s)" % (doc, lineno)
print >> sys.stderr, "RST parser warning W%03d: tab symbol instead of space is used at %s:%s" % (WARNING_004_TABS, doc, lineno)
l = l.replace("\t", " ")
# handle first line
@ -343,12 +357,15 @@ class RstParser(object):
return section_name
def add_new_fdecl(self, func, decl):
if decl.fdecl.endswith(";"):
print >> sys.stderr, "RST parser error E%03d: unexpected semicolon at the end of declaration in \"%s\" at %s:%s" \
% (ERROR_011_EOLEXPECTED, func["name"], func["file"], func["line"])
decls = func.get("decls",[])
if (decl.lang == "C++" or decl.lang == "C"):
rst_decl = self.cpp_parser.parse_func_decl_no_wrap(decl.fdecl)
decls.append( (decl.lang, decl.fdecl, rst_decl) )
decls.append( [decl.lang, decl.fdecl, rst_decl] )
else:
decls.append( (decl.lang, decl.fdecl) )
decls.append( [decl.lang, decl.fdecl] )
func["decls"] = decls
def add_new_pdecl(self, func, decl):
@ -357,8 +374,8 @@ class RstParser(object):
if show_errors:
#check black_list
if decl.name not in params_blacklist.get(func["name"], []):
print >> sys.stderr, "RST parser error: redefinition of parameter \"%s\" in \"%s\" File: %s (line %s)" \
% (decl.name, func["name"], func["file"], func["line"])
print >> sys.stderr, "RST parser error E%03d: redefinition of parameter \"%s\" in \"%s\" at %s:%s" \
% (ERROR_005_REDEFENITIONPARAM, decl.name, func["name"], func["file"], func["line"])
else:
params[decl.name] = decl.comment
func["params"] = params
@ -368,7 +385,7 @@ class RstParser(object):
if skipped:
print >> out, "SKIPPED DEFINITION:"
print >> out, "name: %s" % (func.get("name","~empty~"))
print >> out, "file: %s (line %s)" % (func.get("file","~empty~"), func.get("line","~empty~"))
print >> out, "file: %s:%s" % (func.get("file","~empty~"), func.get("line","~empty~"))
print >> out, "is class: %s" % func.get("isclass",False)
print >> out, "is struct: %s" % func.get("isstruct",False)
print >> out, "module: %s" % func.get("module","~unknown~")
@ -395,8 +412,8 @@ class RstParser(object):
return False
if func["name"] in self.definitions:
if show_errors:
print >> sys.stderr, "RST parser error: \"%s\" from file: %s (line %s) is already documented in file: %s (line %s)" \
% (func["name"], func["file"], func["line"], self.definitions[func["name"]]["file"], self.definitions[func["name"]]["line"])
print >> sys.stderr, "RST parser error E%03d: \"%s\" from: %s:%s is already documented at %s:%s" \
% (ERROR_006_REDEFENITIONFUNC, func["name"], func["file"], func["line"], self.definitions[func["name"]]["file"], self.definitions[func["name"]]["line"])
return False
return self.validateParams(func)
@ -416,13 +433,13 @@ class RstParser(object):
# 1. all params are documented
for p in params:
if p not in documentedParams and show_warnings:
print >> sys.stderr, "RST parser warning: parameter \"%s\" of \"%s\" is undocumented. File: %s (line %s)" % (p, func["name"], func["file"], func["line"])
print >> sys.stderr, "RST parser warning W%03d: parameter \"%s\" of \"%s\" is undocumented. %s:%s" % (WARNING_007_UNDOCUMENTEDPARAM, p, func["name"], func["file"], func["line"])
# 2. only real params are documented
for p in documentedParams:
if p not in params and show_warnings:
if p not in params_blacklist.get(func["name"], []):
print >> sys.stderr, "RST parser warning: unexisting parameter \"%s\" of \"%s\" is documented. File: %s (line %s)" % (p, func["name"], func["file"], func["line"])
print >> sys.stderr, "RST parser warning W%03d: unexisting parameter \"%s\" of \"%s\" is documented at %s:%s" % (WARNING_008_MISSINGPARAM, p, func["name"], func["file"], func["line"])
return True
def normalize(self, func):
@ -489,11 +506,11 @@ class RstParser(object):
fname = fname.replace(".", "::")
if fname.startswith("cv::cv"):
if fname[6:] == func.get("name", ""):
if fname[6:] == func.get("name", "").replace("*", "_n"):
func["name"] = fname[4:]
func["method"] = fname[4:]
elif show_warnings:
print >> sys.stderr, "\"%s\" - section name is \"%s\" instead of \"%s\". File: %s (line %s)" % (fname, func["name"], fname[6:], func["file"], func["line"])
print >> sys.stderr, "RST parser warning W%03d: \"%s\" - section name is \"%s\" instead of \"%s\" at %s:%s" % (WARNING_009_HDRMISMATCH, fname, func["name"], fname[6:], func["file"], func["line"])
#self.print_info(func)
def normalizeText(self, s):
@ -710,7 +727,7 @@ if __name__ == "__main__":
module = sys.argv[1]
if module != "all" and not os.path.isdir(os.path.join(rst_parser_dir, "../" + module)):
print "Module \"" + module + "\" could not be found."
print "RST parser error E%03d: module \"%s\" could not be found." % (ERROR_010_NOMODULE, module)
exit(1)
parser = RstParser(hdr_parser.CppHeaderParser())

View File

@ -13,7 +13,7 @@ descriptor extractors inherit the
CalonderDescriptorExtractor
---------------------------
.. ocv:class:: CalonderDescriptorExtractor
.. ocv:class:: CalonderDescriptorExtractor : public DescriptorExtractor
Wrapping class for computing descriptors by using the
:ocv:class:`RTreeClassifier` class. ::

View File

@ -5,7 +5,7 @@ Common Interfaces of Generic Descriptor Matchers
OneWayDescriptorMatcher
-----------------------
.. ocv:class:: OneWayDescriptorMatcher
.. ocv:class:: OneWayDescriptorMatcher : public GenericDescriptorMatcher
Wrapping class for computing, matching, and classifying descriptors using the
:ocv:class:`OneWayDescriptorBase` class. ::
@ -64,7 +64,7 @@ Wrapping class for computing, matching, and classifying descriptors using the
FernDescriptorMatcher
---------------------
.. ocv:class:: FernDescriptorMatcher
.. ocv:class:: FernDescriptorMatcher : public GenericDescriptorMatcher
Wrapping class for computing, matching, and classifying descriptors using the
:ocv:class:`FernClassifier` class. ::

View File

@ -8,7 +8,7 @@ This section describes obsolete ``C`` interface of EM algorithm. Details of the
CvEMParams
----------
.. ocv:class:: CvEMParams
.. ocv:struct:: CvEMParams
Parameters of the EM algorithm. All parameters are public. You can initialize them by a constructor and then override some of them directly if you want.
@ -18,7 +18,7 @@ The constructors
.. ocv:function:: CvEMParams::CvEMParams()
.. ocv:function:: CvEMParams::CvEMParams( int nclusters, int cov_mat_type=CvEM::COV_MAT_DIAGONAL, int start_step=CvEM::START_AUTO_STEP, CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON), const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 )
.. ocv:function:: CvEMParams::CvEMParams( int nclusters, int cov_mat_type=EM::COV_MAT_DIAGONAL, int start_step=EM::START_AUTO_STEP, CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON), const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 )
:param nclusters: The number of mixture components in the Gaussian mixture model. Some of EM implementation could determine the optimal number of mixtures within a specified value range, but that is not the case in ML yet.
@ -62,7 +62,7 @@ With another constructor it is possible to override a variety of parameters from
CvEM
----
.. ocv:class:: CvEM
.. ocv:class:: CvEM : public CvStatModel
The class implements the EM algorithm as described in the beginning of the section :ref:`ML_Expectation Maximization`.
@ -71,15 +71,13 @@ CvEM::train
-----------
Estimates the Gaussian mixture parameters from a sample set.
.. ocv:function:: void CvEM::train( const Mat& samples, const Mat& sample_idx=Mat(), CvEMParams params=CvEMParams(), Mat* labels=0 )
.. ocv:function:: bool CvEM::train( const Mat& samples, const Mat& sampleIdx=Mat(), CvEMParams params=CvEMParams(), Mat* labels=0 )
.. ocv:function:: bool CvEM::train( const CvMat* samples, const CvMat* sampleIdx=0, CvEMParams params=CvEMParams(), CvMat* labels=0 )
.. ocv:pyfunction:: cv2.EM.train(samples[, sampleIdx[, params]]) -> retval, labels
:param samples: Samples from which the Gaussian mixture model will be estimated.
:param sample_idx: Mask of samples to use. All samples are used by default.
:param sampleIdx: Mask of samples to use. All samples are used by default.
:param params: Parameters of the EM algorithm.
@ -107,8 +105,6 @@ Returns a mixture component index of a sample.
.. ocv:function:: float CvEM::predict( const CvMat* sample, CvMat* probs ) const
.. ocv:pyfunction:: cv2.EM.predict(sample) -> retval, probs
:param sample: A sample for classification.
:param probs: If it is not null then the method will write posterior probabilities of each component given the sample data to this parameter.
@ -122,8 +118,6 @@ Returns the number of mixture components :math:`M` in the Gaussian mixture model
.. ocv:function:: int CvEM::get_nclusters() const
.. ocv:pyfunction:: cv2.EM.getNClusters() -> retval
CvEM::getMeans
------------------
@ -133,8 +127,6 @@ Returns mixture means :math:`a_k`.
.. ocv:function:: const CvMat* CvEM::get_means() const
.. ocv:pyfunction:: cv2.EM.getMeans() -> means
CvEM::getCovs
-------------
@ -144,8 +136,6 @@ Returns mixture covariance matrices :math:`S_k`.
.. ocv:function:: const CvMat** CvEM::get_covs() const
.. ocv:pyfunction:: cv2.EM.getCovs([covs]) -> covs
CvEM::getWeights
----------------
@ -155,8 +145,6 @@ Returns mixture weights :math:`\pi_k`.
.. ocv:function:: const CvMat* CvEM::get_weights() const
.. ocv:pyfunction:: cv2.EM.getWeights() -> weights
CvEM::getProbs
--------------
@ -166,8 +154,6 @@ Returns vectors of probabilities for each training sample.
.. ocv:function:: const CvMat* CvEM::get_probs() const
.. ocv:pyfunction:: cv2.EM.getProbs() -> probs
For each training sample :math:`i` (that have been passed to the constructor or to :ocv:func:`CvEM::train`) returns probabilities :math:`p_{i,k}` to belong to a mixture component :math:`k`.
@ -179,8 +165,6 @@ Returns logarithm of likelihood.
.. ocv:function:: double CvEM::get_log_likelihood() const
.. ocv:pyfunction:: cv2.EM.getLikelihood() -> likelihood
CvEM::write
-----------

View File

@ -81,9 +81,9 @@ RandomizedTree::train
-------------------------
Trains a randomized tree using an input set of keypoints.
.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
.. ocv:function:: void RandomizedTree::train( vector<BaseKeypoint> const& base_set, RNG & rng, int depth, int views, size_t reduced_num_dim, int num_quant_bits )
.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
.. ocv:function:: void RandomizedTree::train( vector<BaseKeypoint> const& base_set, RNG & rng, PatchGenerator & make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits )
:param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
@ -105,9 +105,9 @@ RandomizedTree::read
------------------------
Reads a pre-saved randomized tree from a file or stream.
.. ocv:function:: read(const char* file_name, int num_quant_bits)
.. ocv:function:: RandomizedTree::read(const char* file_name, int num_quant_bits)
.. ocv:function:: read(std::istream &is, int num_quant_bits)
.. ocv:function:: RandomizedTree::read(std::istream &is, int num_quant_bits)
:param file_name: Name of the file that contains randomized tree data.
@ -121,9 +121,9 @@ RandomizedTree::write
-------------------------
Writes the current randomized tree to a file or stream.
.. ocv:function:: void write(const char* file_name) const
.. ocv:function:: void RandomizedTree::write(const char* file_name) const
.. ocv:function:: void write(std::ostream &os) const
.. ocv:function:: void RandomizedTree::write(std::ostream &os) const
:param file_name: Name of the file where randomized tree data is stored.
@ -133,7 +133,7 @@ Writes the current randomized tree to a file or stream.
RandomizedTree::applyQuantization
-------------------------------------
.. ocv:function:: void applyQuantization(int num_quant_bits)
.. ocv:function:: void RandomizedTree::applyQuantization(int num_quant_bits)
Applies quantization to the current randomized tree.
@ -142,7 +142,7 @@ RandomizedTree::applyQuantization
RTreeNode
---------
.. ocv:class:: RTreeNode
.. ocv:struct:: RTreeNode
Class containing a base structure for ``RandomizedTree``. ::
@ -240,9 +240,9 @@ RTreeClassifier::train
--------------------------
Trains a randomized tree classifier using an input set of keypoints.
.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
.. ocv:function:: void RTreeClassifier::train( vector<BaseKeypoint> const& base_set, RNG & rng, int num_trees=RTreeClassifier::DEFAULT_TREES, int depth=RandomizedTree::DEFAULT_DEPTH, int views=RandomizedTree::DEFAULT_VIEWS, size_t reduced_num_dim=RandomizedTree::DEFAULT_REDUCED_NUM_DIM, int num_quant_bits=DEFAULT_NUM_QUANT_BITS )
.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
.. ocv:function:: void RTreeClassifier::train( vector<BaseKeypoint> const& base_set, RNG & rng, PatchGenerator & make_patch, int num_trees=RTreeClassifier::DEFAULT_TREES, int depth=RandomizedTree::DEFAULT_DEPTH, int views=RandomizedTree::DEFAULT_VIEWS, size_t reduced_num_dim=RandomizedTree::DEFAULT_REDUCED_NUM_DIM, int num_quant_bits=DEFAULT_NUM_QUANT_BITS )
:param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
@ -260,17 +260,14 @@ Trains a randomized tree classifier using an input set of keypoints.
:param num_quant_bits: Number of bits used for quantization.
:param print_status: Current status of training printed on the console.
RTreeClassifier::getSignature
---------------------------------
Returns a signature for an image patch.
.. ocv:function:: void getSignature(IplImage *patch, uchar *sig)
.. ocv:function:: void RTreeClassifier::getSignature(IplImage *patch, uchar *sig)
.. ocv:function:: void getSignature(IplImage *patch, float *sig)
.. ocv:function:: void RTreeClassifier::getSignature(IplImage *patch, float *sig)
:param patch: Image patch to calculate the signature for.
:param sig: Output signature (array dimension is ``reduced_num_dim)`` .
@ -281,7 +278,7 @@ RTreeClassifier::getSparseSignature
---------------------------------------
Returns a sparse signature for an image patch
.. ocv:function:: void getSparseSignature(IplImage *patch, float *sig, float thresh)
.. ocv:function:: void RTreeClassifier::getSparseSignature(IplImage *patch, float *sig, float thresh)
:param patch: Image patch to calculate the signature for.
@ -296,7 +293,7 @@ RTreeClassifier::countNonZeroElements
-----------------------------------------
Returns the number of non-zero elements in an input array.
.. ocv:function:: static int countNonZeroElements(float *vec, int n, double tol=1e-10)
.. ocv:function:: static int RTreeClassifier::countNonZeroElements(float *vec, int n, double tol=1e-10)
:param vec: Input vector containing float elements.
@ -310,9 +307,9 @@ RTreeClassifier::read
-------------------------
Reads a pre-saved ``RTreeClassifier`` from a file or stream.
.. ocv:function:: read(const char* file_name)
.. ocv:function:: void RTreeClassifier::read(const char* file_name)
.. ocv:function:: read(std::istream& is)
.. ocv:function:: void RTreeClassifier::read( std::istream & is )
:param file_name: Name of the file that contains randomized tree data.
@ -324,9 +321,9 @@ RTreeClassifier::write
--------------------------
Writes the current ``RTreeClassifier`` to a file or stream.
.. ocv:function:: void write(const char* file_name) const
.. ocv:function:: void RTreeClassifier::write(const char* file_name) const
.. ocv:function:: void write(std::ostream &os) const
.. ocv:function:: void RTreeClassifier::write(std::ostream &os) const
:param file_name: Name of the file where randomized tree data is stored.
@ -338,7 +335,7 @@ RTreeClassifier::setQuantization
------------------------------------
Applies quantization to the current randomized tree.
.. ocv:function:: void setQuantization(int num_quant_bits)
.. ocv:function:: void RTreeClassifier::setQuantization(int num_quant_bits)
:param num_quant_bits: Number of bits used for quantization.

View File

@ -0,0 +1,95 @@
Histograms
==========
.. highlight:: cpp
CalcPGH
-------
Calculates a pair-wise geometrical histogram for a contour.
.. ocv:cfunction:: void cvCalcPGH( const CvSeq* contour, CvHistogram* hist )
:param contour: Input contour. Currently, only integer point coordinates are allowed.
:param hist: Calculated histogram. It must be two-dimensional.
The function calculates a 2D pair-wise geometrical histogram (PGH), described in [Iivarinen97]_ for the contour. The algorithm considers every pair of contour
edges. The angle between the edges and the minimum/maximum distances
are determined for every pair. To do this, each of the edges in turn
is taken as the base, while the function loops through all the other
edges. When the base edge and any other edge are considered, the minimum
and maximum distances from the points on the non-base edge and line of
the base edge are selected. The angle between the edges defines the row
of the histogram in which all the bins that correspond to the distance
between the calculated minimum and maximum distances are incremented
(that is, the histogram is transposed relatively to the definition in the original paper). The histogram can be used for contour matching.
.. [Iivarinen97] Jukka Iivarinen, Markus Peura, Jaakko Srel, and Ari Visa. *Comparison of Combined Shape Descriptors for Irregular Objects*, 8th British Machine Vision Conference, BMVC'97. http://www.cis.hut.fi/research/IA/paper/publications/bmvc97/bmvc97.html
QueryHistValue*D
----------------
Queries the value of the histogram bin.
.. ocv:cfunction:: float cvQueryHistValue_1D(CvHistogram hist, int idx0)
.. ocv:cfunction:: float cvQueryHistValue_2D(CvHistogram hist, int idx0, int idx1)
.. ocv:cfunction:: float cvQueryHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2)
.. ocv:cfunction:: float cvQueryHistValue_nD(CvHistogram hist, const int* idx)
.. ocv:pyoldfunction:: cv.QueryHistValue_1D(hist, idx0) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_2D(hist, idx0, idx1) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_3D(hist, idx0, idx1, idx2) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_nD(hist, idx) -> float
:param hist: Histogram.
:param idx0: 0-th index.
:param idx1: 1-st index.
:param idx2: 2-nd index.
:param idx: Array of indices.
The macros return the value of the specified bin of the 1D, 2D, 3D, or N-D histogram. In case of a sparse histogram, the function returns 0. If the bin is not present in the histogram, no new bin is created.
GetHistValue\_?D
----------------
Returns a pointer to the histogram bin.
.. ocv:cfunction:: float cvGetHistValue_1D(CvHistogram hist, int idx0)
.. ocv:cfunction:: float cvGetHistValue_2D(CvHistogram hist, int idx0, int idx1)
.. ocv:cfunction:: float cvGetHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2)
.. ocv:cfunction:: float cvGetHistValue_nD(CvHistogram hist, int idx)
:param hist: Histogram.
:param idx0: 0-th index.
:param idx1: 1-st index.
:param idx2: 2-nd index.
:param idx: Array of indices.
::
#define cvGetHistValue_1D( hist, idx0 )
((float*)(cvPtr1D( (hist)->bins, (idx0), 0 ))
#define cvGetHistValue_2D( hist, idx0, idx1 )
((float*)(cvPtr2D( (hist)->bins, (idx0), (idx1), 0 )))
#define cvGetHistValue_3D( hist, idx0, idx1, idx2 )
((float*)(cvPtr3D( (hist)->bins, (idx0), (idx1), (idx2), 0 )))
#define cvGetHistValue_nD( hist, idx )
((float*)(cvPtrND( (hist)->bins, (idx), 0 )))
..
The macros ``GetHistValue`` return a pointer to the specified bin of the 1D, 2D, 3D, or N-D histogram. In case of a sparse histogram, the function creates a new bin and sets it to 0, unless it exists already.

View File

@ -9,6 +9,7 @@ legacy. Deprecated stuff
motion_analysis
expectation_maximization
histograms
planar_subdivisions
feature_detection_and_description
common_interfaces_of_descriptor_extractors

View File

@ -8,42 +8,42 @@ CalcOpticalFlowBM
-----------------
Calculates the optical flow for two images by using the block matching method.
.. ocv:cfunction:: void cvCalcOpticalFlowBM( const CvArr* prev, const CvArr* curr, CvSize blockSize, CvSize shiftSize, CvSize maxRange, int usePrevious, CvArr* velx, CvArr* vely )
.. ocv:cfunction:: void cvCalcOpticalFlowBM( const CvArr* prev, const CvArr* curr, CvSize block_size, CvSize shift_size, CvSize max_range, int use_previous, CvArr* velx, CvArr* vely )
.. ocv:pyoldfunction:: cv.CalcOpticalFlowBM(prev, curr, blockSize, shiftSize, maxRange, usePrevious, velx, vely)-> None
.. ocv:pyoldfunction:: cv.CalcOpticalFlowBM(prev, curr, blockSize, shiftSize, max_range, usePrevious, velx, vely)-> None
:param prev: First image, 8-bit, single-channel
:param curr: Second image, 8-bit, single-channel
:param blockSize: Size of basic blocks that are compared
:param block_size: Size of basic blocks that are compared
:param shiftSize: Block coordinate increments
:param shift_size: Block coordinate increments
:param maxRange: Size of the scanned neighborhood in pixels around the block
:param max_range: Size of the scanned neighborhood in pixels around the block
:param usePrevious: Flag that specifies whether to use the input velocity as initial approximations or not.
:param use_previous: Flag that specifies whether to use the input velocity as initial approximations or not.
:param velx: Horizontal component of the optical flow of
.. math::
\left \lfloor \frac{\texttt{prev->width} - \texttt{blockSize.width}}{\texttt{shiftSize.width}} \right \rfloor \times \left \lfloor \frac{\texttt{prev->height} - \texttt{blockSize.height}}{\texttt{shiftSize.height}} \right \rfloor
\left \lfloor \frac{\texttt{prev->width} - \texttt{block_size.width}}{\texttt{shift_size.width}} \right \rfloor \times \left \lfloor \frac{\texttt{prev->height} - \texttt{block_size.height}}{\texttt{shift_size.height}} \right \rfloor
size, 32-bit floating-point, single-channel
:param vely: Vertical component of the optical flow of the same size ``velx`` , 32-bit floating-point, single-channel
The function calculates the optical flow for overlapped blocks ``blockSize.width x blockSize.height`` pixels each, thus the velocity fields are smaller than the original images. For every block in ``prev``
the functions tries to find a similar block in ``curr`` in some neighborhood of the original block or shifted by ``(velx(x0,y0), vely(x0,y0))`` block as has been calculated by previous function call (if ``usePrevious=1``)
The function calculates the optical flow for overlapped blocks ``block_size.width x block_size.height`` pixels each, thus the velocity fields are smaller than the original images. For every block in ``prev``
the functions tries to find a similar block in ``curr`` in some neighborhood of the original block or shifted by ``(velx(x0,y0), vely(x0,y0))`` block as has been calculated by previous function call (if ``use_previous=1``)
CalcOpticalFlowHS
-----------------
Calculates the optical flow for two images using Horn-Schunck algorithm.
.. ocv:cfunction:: void cvCalcOpticalFlowHS(const CvArr* prev, const CvArr* curr, int usePrevious, CvArr* velx, CvArr* vely, double lambda, CvTermCriteria criteria)
.. ocv:cfunction:: void cvCalcOpticalFlowHS(const CvArr* prev, const CvArr* curr, int use_previous, CvArr* velx, CvArr* vely, double lambda, CvTermCriteria criteria)
.. ocv:pyoldfunction:: cv.CalcOpticalFlowHS(prev, curr, usePrevious, velx, vely, lambda, criteria)-> None
@ -51,7 +51,7 @@ Calculates the optical flow for two images using Horn-Schunck algorithm.
:param curr: Second image, 8-bit, single-channel
:param usePrevious: Flag that specifies whether to use the input velocity as initial approximations or not.
:param use_previous: Flag that specifies whether to use the input velocity as initial approximations or not.
:param velx: Horizontal component of the optical flow of the same size as input images, 32-bit floating-point, single-channel
@ -69,7 +69,7 @@ CalcOpticalFlowLK
Calculates the optical flow for two images using Lucas-Kanade algorithm.
.. ocv:cfunction:: void cvCalcOpticalFlowLK( const CvArr* prev, const CvArr* curr, CvSize winSize, CvArr* velx, CvArr* vely )
.. ocv:cfunction:: void cvCalcOpticalFlowLK( const CvArr* prev, const CvArr* curr, CvSize win_size, CvArr* velx, CvArr* vely )
.. ocv:pyoldfunction:: cv.CalcOpticalFlowLK(prev, curr, winSize, velx, vely)-> None
@ -77,7 +77,7 @@ Calculates the optical flow for two images using Lucas-Kanade algorithm.
:param curr: Second image, 8-bit, single-channel
:param winSize: Size of the averaging window used for grouping pixels
:param win_size: Size of the averaging window used for grouping pixels
:param velx: Horizontal component of the optical flow of the same size as input images, 32-bit floating-point, single-channel

View File

@ -145,7 +145,7 @@ CreateSubdivDelaunay2D
Creates an empty Delaunay triangulation.
.. ocv:cfunction:: CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage )
.. ocv:pyoldfunction:: cv.CreateSubdivDelaunay2D(rect, storage)-> emptyDelaunayTriangulation
.. ocv:pyoldfunction:: cv.CreateSubdivDelaunay2D(rect, storage) -> CvSubdiv2D
:param rect: Rectangle that includes all of the 2D points that are to be added to the subdivision.

View File

@ -910,7 +910,7 @@ CvSeq * cvPostBoostingFindFace(IplImage * Image,CvMemStorage* storage);
typedef unsigned char CvBool;
typedef struct
typedef struct Cv3dTracker2dTrackedObject
{
int id;
CvPoint2D32f p; // pgruebele: So we do not loose precision, this needs to be float
@ -924,7 +924,7 @@ CV_INLINE Cv3dTracker2dTrackedObject cv3dTracker2dTrackedObject(int id, CvPoint2
return r;
}
typedef struct
typedef struct Cv3dTrackerTrackedObject
{
int id;
CvPoint3D32f p; // location of the tracked object
@ -938,7 +938,7 @@ CV_INLINE Cv3dTrackerTrackedObject cv3dTrackerTrackedObject(int id, CvPoint3D32f
return r;
}
typedef struct
typedef struct Cv3dTrackerCameraInfo
{
CvBool valid;
float mat[4][4]; /* maps camera coordinates to world coordinates */
@ -946,7 +946,7 @@ typedef struct
/* has all the info we need */
} Cv3dTrackerCameraInfo;
typedef struct
typedef struct Cv3dTrackerCameraIntrinsics
{
CvPoint2D32f principal_point;
float focal_length[2];
@ -1768,8 +1768,8 @@ protected:
struct CV_EXPORTS_W_MAP CvEMParams
{
CvEMParams();
CvEMParams( int nclusters, int cov_mat_type=1/*CvEM::COV_MAT_DIAGONAL*/,
int start_step=0/*CvEM::START_AUTO_STEP*/,
CvEMParams( int nclusters, int cov_mat_type=cv::EM::COV_MAT_DIAGONAL,
int start_step=cv::EM::START_AUTO_STEP,
CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON),
const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 );

View File

@ -65,10 +65,21 @@ training examples are recomputed at each training iteration. Examples deleted at
CvBoostParams
-------------
.. ocv:class:: CvBoostParams
.. ocv:struct:: CvBoostParams : public CvDTreeParams
Boosting training parameters.
There is one structure member that you can set directly:
.. ocv:member:: int split_criteria
Splitting criteria used to choose optimal splits during a weak tree construction. Possible values are:
* **CvBoost::DEFAULT** Use the default for the particular boosting method, see below.
* **CvBoost::GINI** Use Gini index. This is default option for Real AdaBoost; may be also used for Discrete AdaBoost.
* **CvBoost::MISCLASS** Use misclassification rate. This is default option for Discrete AdaBoost; may be also used for Real AdaBoost.
* **CvBoost::SQERR** Use least squares criteria. This is default and the only option for LogitBoost and Gentle AdaBoost.
The structure is derived from :ocv:class:`CvDTreeParams` but not all of the decision tree parameters are supported. In particular, cross-validation is not supported.
All parameters are public. You can initialize them by a constructor and then override some of them directly if you want.
@ -96,17 +107,6 @@ The constructors.
See :ocv:func:`CvDTreeParams::CvDTreeParams` for description of other parameters.
Also there is one structure member that you can set directly:
.. ocv:member:: int split_criteria
Splitting criteria used to choose optimal splits during a weak tree construction. Possible values are:
* **CvBoost::DEFAULT** Use the default for the particular boosting method, see below.
* **CvBoost::GINI** Use Gini index. This is default option for Real AdaBoost; may be also used for Discrete AdaBoost.
* **CvBoost::MISCLASS** Use misclassification rate. This is default option for Discrete AdaBoost; may be also used for Real AdaBoost.
* **CvBoost::SQERR** Use least squares criteria. This is default and the only option for LogitBoost and Gentle AdaBoost.
Default parameters are:
::
@ -122,7 +122,7 @@ Default parameters are:
CvBoostTree
-----------
.. ocv:class:: CvBoostTree
.. ocv:class:: CvBoostTree : public CvDTree
The weak tree classifier, a component of the boosted tree classifier :ocv:class:`CvBoost`, is a derivative of :ocv:class:`CvDTree`. Normally, there is no need to use the weak classifiers directly. However, they can be accessed as elements of the sequence :ocv:member:`CvBoost::weak`, retrieved by :ocv:func:`CvBoost::get_weak_predictors`.
@ -130,7 +130,7 @@ The weak tree classifier, a component of the boosted tree classifier :ocv:class:
CvBoost
-------
.. ocv:class:: CvBoost
.. ocv:class:: CvBoost : public CvStatModel
Boosted tree classifier derived from :ocv:class:`CvStatModel`.
@ -144,7 +144,7 @@ Default and training constructors.
.. ocv:function:: CvBoost::CvBoost( const CvMat* trainData, int tflag, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, const CvMat* varType=0, const CvMat* missingDataMask=0, CvBoostParams params=CvBoostParams() )
.. ocv:pyfunction:: cv2.Boost(trainData, tflag, responses[, varIdx[, sampleIdx[, varType[, missingDataMask[, params]]]]]) -> <Boost object>
.. ocv:pyfunction:: cv2.Boost([trainData, tflag, responses[, varIdx[, sampleIdx[, varType[, missingDataMask[, params]]]]]]) -> <Boost object>
The constructors follow conventions of :ocv:func:`CvStatModel::CvStatModel`. See :ocv:func:`CvStatModel::train` for parameters descriptions.

View File

@ -57,28 +57,28 @@ Importance of each variable is computed over all the splits on this variable in
CvDTreeSplit
------------
.. ocv:class:: CvDTreeSplit
.. ocv:struct:: CvDTreeSplit
The structure represents a possible decision tree node split. It has public members:
The structure represents a possible decision tree node split. It has public members:
.. ocv:member:: int var_idx
.. ocv:member:: int var_idx
Index of variable on which the split is created.
.. ocv:member:: int inversed
.. ocv:member:: int inversed
If it is not null then inverse split rule is used that is left and right branches are exchanged in the rule expressions below.
.. ocv:member:: float quality
.. ocv:member:: float quality
The split quality, a positive number. It is used to choose the best primary split, then to choose and sort the surrogate splits. After the tree is constructed, it is also used to compute variable importance.
.. ocv:member:: CvDTreeSplit* next
.. ocv:member:: CvDTreeSplit* next
Pointer to the next split in the node list of splits.
.. ocv:member:: int[] subset
.. ocv:member:: int[] subset
Bit array indicating the value subset in case of split on a categorical variable. The rule is: ::
@ -86,7 +86,7 @@ The structure represents a possible decision tree node split. It has public memb
then next_node <- left
else next_node <- right
.. ocv:member:: float ord::c
.. ocv:member:: float ord::c
The threshold value in case of split on an ordered variable. The rule is: ::
@ -94,50 +94,50 @@ The structure represents a possible decision tree node split. It has public memb
then next_node<-left
else next_node<-right
.. ocv:member:: int ord::split_point
.. ocv:member:: int ord::split_point
Used internally by the training algorithm.
CvDTreeNode
-----------
.. ocv:class:: CvDTreeNode
.. ocv:struct:: CvDTreeNode
The structure represents a node in a decision tree. It has public members:
The structure represents a node in a decision tree. It has public members:
.. ocv:member:: int class_idx
.. ocv:member:: int class_idx
Class index normalized to 0..class_count-1 range and assigned to the node. It is used internally in classification trees and tree ensembles.
.. ocv:member:: int Tn
.. ocv:member:: int Tn
Tree index in a ordered sequence of pruned trees. The indices are used during and after the pruning procedure. The root node has the maximum value ``Tn`` of the whole tree, child nodes have ``Tn`` less than or equal to the parent's ``Tn``, and nodes with :math:`Tn \leq CvDTree::pruned\_tree\_idx` are not used at prediction stage (the corresponding branches are considered as cut-off), even if they have not been physically deleted from the tree at the pruning stage.
.. ocv:member:: double value
.. ocv:member:: double value
Value at the node: a class label in case of classification or estimated function value in case of regression.
.. ocv:member:: CvDTreeNode* parent
.. ocv:member:: CvDTreeNode* parent
Pointer to the parent node.
.. ocv:member:: CvDTreeNode* left
.. ocv:member:: CvDTreeNode* left
Pointer to the left child node.
.. ocv:member:: CvDTreeNode* right
.. ocv:member:: CvDTreeNode* right
Pointer to the right child node.
.. ocv:member:: CvDTreeSplit* split
.. ocv:member:: CvDTreeSplit* split
Pointer to the first (primary) split in the node list of splits.
.. ocv:member:: int sample_count
.. ocv:member:: int sample_count
The number of samples that fall into the node at the training stage. It is used to resolve the difficult cases - when the variable for the primary split is missing and all the variables for other surrogate splits are missing too. In this case the sample is directed to the left if ``left->sample_count > right->sample_count`` and to the right otherwise.
.. ocv:member:: int depth
.. ocv:member:: int depth
Depth of the node. The root node depth is 0, the child nodes depth is the parent's depth + 1.
@ -145,7 +145,7 @@ Other numerous fields of ``CvDTreeNode`` are used internally at the training sta
CvDTreeParams
-------------
.. ocv:class:: CvDTreeParams
.. ocv:struct:: CvDTreeParams
The structure contains all the decision tree training parameters. You can initialize it by default constructor and then override any parameters directly before training, or the structure may be fully initialized using the advanced variant of the constructor.
@ -187,7 +187,7 @@ The default constructor initializes all the parameters with the default values t
CvDTreeTrainData
----------------
.. ocv:class:: CvDTreeTrainData
.. ocv:struct:: CvDTreeTrainData
Decision tree training data and shared data for tree ensembles. The structure is mostly used internally for storing both standalone trees and tree ensembles efficiently. Basically, it contains the following types of information:
@ -212,7 +212,7 @@ There are two ways of using this structure. In simple cases (for example, a stan
CvDTree
-------
.. ocv:class:: CvDTree
.. ocv:class:: CvDTree : public CvStatModel
The class implements a decision tree as described in the beginning of this section.
@ -221,7 +221,7 @@ CvDTree::train
--------------
Trains a decision tree.
.. ocv:function:: bool CvDTree::train( const Mat& train_data, int tflag, const Mat& responses, const Mat& var_idx=Mat(), const Mat& sample_idx=Mat(), const Mat& var_type=Mat(), const Mat& missing_mask=Mat(), CvDTreeParams params=CvDTreeParams() )
.. ocv:function:: bool CvDTree::train( const Mat& trainData, int tflag, const Mat& responses, const Mat& varIdx=Mat(), const Mat& sampleIdx=Mat(), const Mat& varType=Mat(), const Mat& missingDataMask=Mat(), CvDTreeParams params=CvDTreeParams() )
.. ocv:function:: bool CvDTree::train( const CvMat* trainData, int tflag, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, const CvMat* varType=0, const CvMat* missingDataMask=0, CvDTreeParams params=CvDTreeParams() )
@ -290,7 +290,7 @@ Returns the variable importance array.
.. ocv:function:: const CvMat* CvDTree::get_var_importance()
.. ocv:pyfunction:: cv2.DTree.getVarImportance() -> importanceVector
.. ocv:pyfunction:: cv2.DTree.getVarImportance() -> retval
CvDTree::get_root
-----------------
@ -311,7 +311,7 @@ CvDTree::get_data
-----------------
Returns used train data of the decision tree.
.. ocv:function:: const CvDTreeTrainData* CvDTree::get_data() const
.. ocv:function:: CvDTreeTrainData* CvDTree::get_data() const
Example: building a tree for classifying mushrooms. See the ``mushroom.cpp`` sample that demonstrates how to build and use the
decision tree.

View File

@ -10,6 +10,6 @@ Extremely randomized trees have been introduced by Pierre Geurts, Damien Ernst a
CvERTrees
----------
.. ocv:class:: CvERTrees
.. ocv:class:: CvERTrees : public CvRTrees
The class implements the Extremely randomized trees algorithm. ``CvERTrees`` is inherited from :ocv:class:`CvRTrees` and has the same interface, so see description of :ocv:class:`CvRTrees` class to get details. To set the training parameters of Extremely randomized trees the same class :ocv:class:`CvRTParams` is used.
The class implements the Extremely randomized trees algorithm. ``CvERTrees`` is inherited from :ocv:class:`CvRTrees` and has the same interface, so see description of :ocv:class:`CvRTrees` class to get details. To set the training parameters of Extremely randomized trees the same class :ocv:struct:`CvRTParams` is used.

View File

@ -91,7 +91,7 @@ already a good enough approximation).
EM
--
.. ocv:class:: EM
.. ocv:class:: EM : public Algorithm
The class implements the EM algorithm as described in the beginning of this section. It is inherited from :ocv:class:`Algorithm`.
@ -102,6 +102,7 @@ The constructor of the class
.. ocv:function:: EM::EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, FLT_EPSILON) )
.. ocv:pyfunction:: cv2.EM([nclusters[, covMatType[, termCrit]]]) -> <EM object>
:param nclusters: The number of mixture components in the Gaussian mixture model. Default value of the parameter is ``EM::DEFAULT_NCLUSTERS=5``. Some of EM implementation could determine the optimal number of mixtures within a specified value range, but that is not the case in ML yet.
@ -125,6 +126,12 @@ Estimates the Gaussian mixture parameters from a samples set.
.. ocv:function:: bool EM::trainM(InputArray samples, InputArray probs0, OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray())
.. ocv:pyfunction:: cv2.EM.train(samples[, logLikelihoods[, labels[, probs]]]) -> retval, logLikelihoods, labels, probs
.. ocv:pyfunction:: cv2.EM.trainE(samples, means0[, covs0[, weights0[, logLikelihoods[, labels[, probs]]]]]) -> retval, logLikelihoods, labels, probs
.. ocv:pyfunction:: cv2.EM.trainM(samples, probs0[, logLikelihoods[, labels[, probs]]]) -> retval, logLikelihoods, labels, probs
:param samples: Samples from which the Gaussian mixture model will be estimated. It should be a one-channel matrix, each row of which is a sample. If the matrix does not have ``CV_64F`` type it will be converted to the inner matrix of such type for the further computing.
:param means0: Initial means :math:`a_k` of mixture components. It is a one-channel matrix of :math:`nclusters \times dims` size. If the matrix does not have ``CV_64F`` type it will be converted to the inner matrix of such type for the further computing.
@ -166,7 +173,9 @@ EM::predict
-----------
Returns a likelihood logarithm value and an index of the most probable mixture component for the given sample.
.. ocv:function:: Vec2d predict(InputArray sample, OutputArray probs=noArray()) const
.. ocv:function:: Vec2d EM::predict(InputArray sample, OutputArray probs=noArray()) const
.. ocv:pyfunction:: cv2.EM.predict(sample[, probs]) -> retval, probs
:param sample: A sample for classification. It should be a one-channel matrix of :math:`1 \times dims` or :math:`dims \times 1` size.
@ -180,6 +189,8 @@ Returns ``true`` if the Gaussian mixture model was trained.
.. ocv:function:: bool EM::isTrained() const
.. ocv:pyfunction:: cv2.EM.isTrained() -> retval
EM::read, EM::write
-------------------
See :ocv:func:`Algorithm::read` and :ocv:func:`Algorithm::write`.
@ -195,4 +206,5 @@ See :ocv:func:`Algorithm::get` and :ocv:func:`Algorithm::set`. The following par
* ``"weights"`` *(read-only)*
* ``"means"`` *(read-only)*
* ``"covs"`` *(read-only)*
..

View File

@ -104,7 +104,7 @@ For classification problems, the result is :math:`\arg\max_{i=1..K}(f_i(x))`.
CvGBTreesParams
---------------
.. ocv:class:: CvGBTreesParams
.. ocv:struct:: CvGBTreesParams : public CvDTreeParams
GBT training parameters.
@ -149,7 +149,7 @@ By default the following constructor is used:
CvGBTrees
---------
.. ocv:class:: CvGBTrees
.. ocv:class:: CvGBTrees : public CvStatModel
The class implements the Gradient boosted tree model as described in the beginning of this section.

View File

@ -7,7 +7,7 @@ The algorithm caches all training samples and predicts the response for a new sa
CvKNearest
----------
.. ocv:class:: CvKNearest
.. ocv:class:: CvKNearest : public CvStatModel
The class implements K-Nearest Neighbors model as described in the beginning of this section.

View File

@ -237,7 +237,7 @@ The method returns a map that converts string class labels to the numerical clas
CvTrainTestSplit
----------------
.. ocv:class:: CvTrainTestSplit
.. ocv:struct:: CvTrainTestSplit
Structure setting the split of a data set read by :ocv:class:`CvMLData`.
::

View File

@ -97,39 +97,39 @@ The second (default) one is a batch RPROP algorithm.
CvANN_MLP_TrainParams
---------------------
.. ocv:class:: CvANN_MLP_TrainParams
.. ocv:struct:: CvANN_MLP_TrainParams
Parameters of the MLP training algorithm. You can initialize the structure by a constructor or the individual parameters can be adjusted after the structure is created.
Parameters of the MLP training algorithm. You can initialize the structure by a constructor or the individual parameters can be adjusted after the structure is created.
The back-propagation algorithm parameters:
The back-propagation algorithm parameters:
.. ocv:member:: double bp_dw_scale
.. ocv:member:: double bp_dw_scale
Strength of the weight gradient term. The recommended value is about 0.1.
.. ocv:member:: double bp_moment_scale
.. ocv:member:: double bp_moment_scale
Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough
The RPROP algorithm parameters (see [RPROP93]_ for details):
The RPROP algorithm parameters (see [RPROP93]_ for details):
.. ocv:member:: double rp_dw0
.. ocv:member:: double rp_dw0
Initial value :math:`\Delta_0` of update-values :math:`\Delta_{ij}`.
.. ocv:member:: double rp_dw_plus
.. ocv:member:: double rp_dw_plus
Increase factor :math:`\eta^+`. It must be >1.
.. ocv:member:: double rp_dw_minus
.. ocv:member:: double rp_dw_minus
Decrease factor :math:`\eta^-`. It must be <1.
.. ocv:member:: double rp_dw_min
.. ocv:member:: double rp_dw_min
Update-values lower limit :math:`\Delta_{min}`. It must be positive.
.. ocv:member:: double rp_dw_max
.. ocv:member:: double rp_dw_max
Update-values upper limit :math:`\Delta_{max}`. It must be >1.
@ -169,7 +169,7 @@ By default the RPROP algorithm is used:
CvANN_MLP
---------
.. ocv:class:: CvANN_MLP
.. ocv:class:: CvANN_MLP : public CvStatModel
MLP model.
@ -184,7 +184,7 @@ The constructors.
.. ocv:function:: CvANN_MLP::CvANN_MLP( const CvMat* layerSizes, int activateFunc=CvANN_MLP::SIGMOID_SYM, double fparam1=0, double fparam2=0 )
.. ocv:pyfunction:: cv2.ANN_MLP(layerSizes[, activateFunc[, fparam1[, fparam2]]]) -> <ANN_MLP object>
.. ocv:pyfunction:: cv2.ANN_MLP([layerSizes[, activateFunc[, fparam1[, fparam2]]]]) -> <ANN_MLP object>
The advanced constructor allows to create MLP with the specified topology. See :ocv:func:`CvANN_MLP::create` for details.
@ -216,7 +216,7 @@ Trains/updates MLP.
.. ocv:function:: int CvANN_MLP::train( const CvMat* inputs, const CvMat* outputs, const CvMat* sampleWeights, const CvMat* sampleIdx=0, CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), int flags=0 )
.. ocv:pyfunction:: cv2.ANN_MLP.train(inputs, outputs, sampleWeights[, sampleIdx[, params[, flags]]]) -> niterations
.. ocv:pyfunction:: cv2.ANN_MLP.train(inputs, outputs, sampleWeights[, sampleIdx[, params[, flags]]]) -> retval
:param inputs: Floating-point matrix of input vectors, one vector per row.
@ -249,7 +249,7 @@ Predicts responses for input samples.
.. ocv:function:: float CvANN_MLP::predict( const CvMat* inputs, CvMat* outputs ) const
.. ocv:pyfunction:: cv2.ANN_MLP.predict(inputs, outputs) -> retval
.. ocv:pyfunction:: cv2.ANN_MLP.predict(inputs[, outputs]) -> retval, outputs
:param inputs: Input samples.

View File

@ -11,7 +11,7 @@ This simple classification model assumes that feature vectors from each class ar
CvNormalBayesClassifier
-----------------------
.. ocv:class:: CvNormalBayesClassifier
.. ocv:class:: CvNormalBayesClassifier : public CvStatModel
Bayes classifier for normally distributed data.
@ -25,7 +25,7 @@ Default and training constructors.
.. ocv:function:: CvNormalBayesClassifier::CvNormalBayesClassifier( const CvMat* trainData, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0 )
.. ocv:pyfunction:: cv2.NormalBayesClassifier(trainData, responses[, varIdx[, sampleIdx]]) -> <NormalBayesClassifier object>
.. ocv:pyfunction:: cv2.NormalBayesClassifier([trainData, responses[, varIdx[, sampleIdx]]]) -> <NormalBayesClassifier object>
The constructors follow conventions of :ocv:func:`CvStatModel::CvStatModel`. See :ocv:func:`CvStatModel::train` for parameters descriptions.

View File

@ -42,7 +42,7 @@ For the random trees usage example, please, see letter_recog.cpp sample in OpenC
CvRTParams
----------
.. ocv:class:: CvRTParams
.. ocv:struct:: CvRTParams : public CvDTreeParams
Training parameters of random trees.
@ -94,7 +94,7 @@ The default constructor sets all parameters to default values which are differen
CvRTrees
--------
.. ocv:class:: CvRTrees
.. ocv:class:: CvRTrees : public CvStatModel
The class implements the random forest predictor as described in the beginning of this section.
@ -118,7 +118,7 @@ CvRTrees::predict
-----------------
Predicts the output for an input sample.
.. ocv:function:: double CvRTrees::predict( const Mat& sample, const Mat& missing=Mat() ) const
.. ocv:function:: float CvRTrees::predict( const Mat& sample, const Mat& missing=Mat() ) const
.. ocv:function:: float CvRTrees::predict( const CvMat* sample, const CvMat* missing = 0 ) const
@ -156,7 +156,7 @@ Returns the variable importance array.
.. ocv:function:: const CvMat* CvRTrees::get_var_importance()
.. ocv:pyfunction:: cv2.RTrees.getVarImportance() -> importanceVector
.. ocv:pyfunction:: cv2.RTrees.getVarImportance() -> retval
The method returns the variable importance vector, computed at the training stage when ``CvRTParams::calc_var_importance`` is set to true. If this flag was set to false, the ``NULL`` pointer is returned. This differs from the decision trees where variable importance can be computed anytime after the training.
@ -181,7 +181,7 @@ CvRTrees::calc_error
--------------------
Returns error of the random forest.
.. ocv:function:: float CvRTrees::calc_error( CvMLData* data, int type, std::vector<float> *resp = 0 )
.. ocv:function:: float CvRTrees::calc_error( CvMLData* data, int type, std::vector<float>* resp=0 )
The method is identical to :ocv:func:`CvDTree::calc_error` but uses the random forest as predictor.

View File

@ -52,7 +52,7 @@ CvStatModel::CvStatModel(...)
-----------------------------
The training constructor.
.. ocv:function:: CvStatModel::CvStatModel( const Mat& train_data ... )
.. ocv:function:: CvStatModel::CvStatModel()
Most ML classes provide a single-step constructor and train constructors. This constructor is equivalent to the default constructor, followed by the :ocv:func:`CvStatModel::train` method with the parameters that are passed to the constructor.

View File

@ -16,19 +16,19 @@ SVM implementation in OpenCV is based on [LibSVM]_.
CvParamGrid
-----------
.. ocv:class:: CvParamGrid
.. ocv:struct:: CvParamGrid
The structure represents the logarithmic grid range of statmodel parameters. It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate being computed by cross-validation.
The structure represents the logarithmic grid range of statmodel parameters. It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate being computed by cross-validation.
.. ocv:member:: double CvParamGrid::min_val
.. ocv:member:: double CvParamGrid::min_val
Minimum value of the statmodel parameter.
.. ocv:member:: double CvParamGrid::max_val
.. ocv:member:: double CvParamGrid::max_val
Maximum value of the statmodel parameter.
.. ocv:member:: double CvParamGrid::step
.. ocv:member:: double CvParamGrid::step
Logarithmic step for iterating the statmodel parameter.
@ -77,7 +77,7 @@ Returns ``true`` if the grid is valid and ``false`` otherwise. The grid is valid
CvSVMParams
-----------
.. ocv:class:: CvSVMParams
.. ocv:struct:: CvSVMParams
SVM training parameters.
@ -146,7 +146,7 @@ The default constructor initialize the structure with following values:
CvSVM
-----
.. ocv:class:: CvSVM
.. ocv:class:: CvSVM : public CvStatModel
Support Vector Machines.
@ -160,7 +160,7 @@ Default and training constructors.
.. ocv:function:: CvSVM::CvSVM( const CvMat* trainData, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, CvSVMParams params=CvSVMParams() )
.. ocv:pyfunction:: cv2.SVM(trainData, responses[, varIdx[, sampleIdx[, params]]]) -> <SVM object>
.. ocv:pyfunction:: cv2.SVM([trainData, responses[, varIdx[, sampleIdx[, params]]]]) -> <SVM object>
The constructors follow conventions of :ocv:func:`CvStatModel::CvStatModel`. See :ocv:func:`CvStatModel::train` for parameters descriptions.
@ -285,7 +285,7 @@ Retrieves a number of support vectors and the particular vector.
.. ocv:function:: const float* CvSVM::get_support_vector(int i) const
.. ocv:pyfunction:: cv2.SVM.get_support_vector_count() -> nsupportVectors
.. ocv:pyfunction:: cv2.SVM.get_support_vector_count() -> retval
:param i: Index of the particular support vector.
@ -297,4 +297,4 @@ Returns the number of used features (variables count).
.. ocv:function:: int CvSVM::get_var_count() const
.. ocv:pyfunction:: cv2.SVM.get_var_count() -> nvars
.. ocv:pyfunction:: cv2.SVM.get_var_count() -> retval

View File

@ -170,10 +170,10 @@ struct CV_EXPORTS_W_MAP CvParamGrid
min_val = max_val = step = 0;
}
CvParamGrid( double _min_val, double _max_val, double log_step )
CvParamGrid( double min_val, double max_val, double log_step )
{
min_val = _min_val;
max_val = _max_val;
this->min_val = min_val;
this->max_val = max_val;
step = log_step;
}
//CvParamGrid( int param_id );
@ -291,10 +291,10 @@ protected:
struct CV_EXPORTS_W_MAP CvSVMParams
{
CvSVMParams();
CvSVMParams( int _svm_type, int _kernel_type,
double _degree, double _gamma, double _coef0,
double Cvalue, double _nu, double _p,
CvMat* _class_weights, CvTermCriteria _term_crit );
CvSVMParams( int svm_type, int kernel_type,
double degree, double gamma, double coef0,
double Cvalue, double nu, double p,
CvMat* class_weights, CvTermCriteria term_crit );
CV_PROP_RW int svm_type;
CV_PROP_RW int kernel_type;
@ -569,8 +569,7 @@ public:
enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+
TermCriteria::EPS,
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
EM::DEFAULT_MAX_ITERS, FLT_EPSILON));
virtual ~EM();
@ -1026,7 +1025,7 @@ public:
virtual float get_proximity( const CvMat* sample1, const CvMat* sample2,
const CvMat* missing1 = 0, const CvMat* missing2 = 0 ) const;
virtual float calc_error( CvMLData* _data, int type , std::vector<float> *resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR}
virtual float calc_error( CvMLData* data, int type , std::vector<float>* resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR}
virtual float get_train_error();

View File

@ -3,7 +3,7 @@ Feature Detection and Description
SIFT
----
.. ocv:class:: SIFT
.. ocv:class:: SIFT : public Feature2D
Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) algorithm by D. Lowe [Lowe04]_.
@ -31,9 +31,9 @@ SIFT::operator ()
-----------------
Extract features and computes their descriptors using SIFT algorithm
.. ocv:function:: void SIFT::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
.. ocv:function:: void SIFT::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
:param image: Input 8-bit grayscale image
:param img: Input 8-bit grayscale image
:param mask: Optional input mask that marks the regions where we should detect features.
@ -46,9 +46,9 @@ Extract features and computes their descriptors using SIFT algorithm
SURF
----
.. ocv:class:: SURF
.. ocv:class:: SURF : public Feature2D
Class for extracting Speeded Up Robust Features from an image [Bay06]_. The class is derived from ``CvSURFParams`` structure, which specifies the algorithm parameters:
Class for extracting Speeded Up Robust Features from an image [Bay06]_. The class is derived from ``CvSURFParams`` structure, which specifies the algorithm parameters:
.. ocv:member:: int extended
@ -82,9 +82,9 @@ The SURF extractor constructors.
.. ocv:function:: SURF::SURF()
.. ocv:function:: SURF::SURF(double hessianThreshold, int nOctaves=4, int nOctaveLayers=2, bool extended=false, bool upright=false)
.. ocv:function:: SURF::SURF( double hessianThreshold, int nOctaves=4, int nOctaveLayers=2, bool extended=true, bool upright=false )
.. ocv:pyfunction:: cv2.SURF(_hessianThreshold[, _nOctaves[, _nOctaveLayers[, _extended[, _upright]]]]) -> <SURF object>
.. ocv:pyfunction:: cv2.SURF([hessianThreshold[, nOctaves[, nOctaveLayers[, extended[, upright]]]]]) -> <SURF object>
:param hessianThreshold: Threshold for hessian keypoint detector used in SURF.
@ -101,11 +101,11 @@ SURF::operator()
----------------
Detects keypoints and computes SURF descriptors for them.
.. ocv:function:: void SURF::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints) const
.. ocv:function:: void SURF::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
.. ocv:function:: void SURF::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints) const
.. ocv:function:: void SURF::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
.. ocv:pyfunction:: cv2.SURF.detect(img, mask) -> keypoints
.. ocv:pyfunction:: cv2.SURF.detect(img, mask[, useProvidedKeypoints]) -> keypoints, descriptors
.. ocv:pyfunction:: cv2.SURF.detect(img, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors
.. ocv:cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )

View File

@ -58,9 +58,9 @@ namespace cv
class CV_EXPORTS_W SIFT : public Feature2D
{
public:
explicit SIFT( int _nfeatures=0, int _nOctaveLayers=3,
double _contrastThreshold=0.04, double _edgeThreshold=10,
double _sigma=1.6);
explicit SIFT( int nfeatures=0, int nOctaveLayers=3,
double contrastThreshold=0.04, double edgeThreshold=10,
double sigma=1.6);
//! returns the descriptor size in floats (128)
int descriptorSize() const;
@ -108,23 +108,23 @@ class CV_EXPORTS_W SURF : public Feature2D
{
public:
//! the default constructor
SURF();
CV_WRAP SURF();
//! the full constructor taking all the necessary parameters
explicit SURF(double _hessianThreshold,
int _nOctaves=4, int _nOctaveLayers=2,
bool _extended=true, bool _upright=false);
explicit CV_WRAP SURF(double hessianThreshold,
int nOctaves=4, int nOctaveLayers=2,
bool extended=true, bool upright=false);
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
CV_WRAP int descriptorSize() const;
//! returns the descriptor type
int descriptorType() const;
CV_WRAP int descriptorType() const;
//! finds the keypoints using fast hessian detector used in SURF
void operator()(InputArray img, InputArray mask,
CV_WRAP_AS(detect) void operator()(InputArray img, InputArray mask,
CV_OUT vector<KeyPoint>& keypoints) const;
//! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints
void operator()(InputArray img, InputArray mask,
CV_WRAP_AS(detect) void operator()(InputArray img, InputArray mask,
CV_OUT vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints=false) const;

View File

@ -131,7 +131,7 @@ FeatureEvaluator::create
----------------------------
Constructs the feature evaluator.
.. ocv:function:: static Ptr<FeatureEvaluator> FeatureEvaluator::create(int type)
.. ocv:function:: Ptr<FeatureEvaluator> FeatureEvaluator::create(int type)
:param type: Type of features evaluated by cascade (``HAAR`` or ``LBP`` for now).
@ -148,7 +148,7 @@ Loads a classifier from a file.
.. ocv:function:: CascadeClassifier::CascadeClassifier(const string& filename)
.. ocv:pyfunction:: cv2.CascadeClassifier(filename) -> <CascadeClassifier object>
.. ocv:pyfunction:: cv2.CascadeClassifier([filename]) -> <CascadeClassifier object>
:param filename: Name of the file from which the classifier is loaded.
@ -193,9 +193,9 @@ Detects objects of different sizes in the input image. The detected objects are
.. ocv:pyfunction:: cv2.CascadeClassifier.detectMultiScale(image[, scaleFactor[, minNeighbors[, flags[, minSize[, maxSize]]]]]) -> objects
.. ocv:pyfunction:: cv2.CascadeClassifier.detectMultiScale(image, rejectLevels, levelWeights[, scaleFactor[, minNeighbors[, flags[, minSize[, maxSize[, outputRejectLevels]]]]]]) -> objects
.. ocv:cfunction:: CvSeq* cvHaarDetectObjects( const CvArr* image, CvHaarClassifierCascade* cascade, CvMemStorage* storage, double scaleFactor=1.1, int minNeighbors=3, int flags=0, CvSize minSize=cvSize(0, 0), CvSize maxSize=cvSize(0, 0) )
.. ocv:cfunction:: CvSeq* cvHaarDetectObjects( const CvArr* image, CvHaarClassifierCascade* cascade, CvMemStorage* storage, double scale_factor=1.1, int min_neighbors=3, int flags=0, CvSize min_size=cvSize(0,0), CvSize max_size=cvSize(0,0) )
.. ocv:pyoldfunction:: cv.HaarDetectObjects(image, cascade, storage, scaleFactor=1.1, minNeighbors=3, flags=0, minSize=(0, 0))-> detectedObjects
.. ocv:pyoldfunction:: cv.HaarDetectObjects(image, cascade, storage, scale_factor=1.1, min_neighbors=3, flags=0, min_size=(0, 0)) -> detectedObjects
:param cascade: Haar classifier cascade (OpenCV 1.x API only). It can be loaded from XML or YAML file using :ocv:cfunc:`Load`. When the cascade is not needed anymore, release it using ``cvReleaseHaarClassifierCascade(&cascade)``.
@ -222,7 +222,7 @@ Sets an image for detection.
.. ocv:function:: bool CascadeClassifier::setImage( Ptr<FeatureEvaluator>& feval, const Mat& image )
.. ocv:cfunction:: void cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade, const CvArr* sum, const CvArr* sqsum, const CvArr* tiltedSum, double scale )
.. ocv:cfunction:: void cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade, const CvArr* sum, const CvArr* sqsum, const CvArr* tilted_sum, double scale )
:param cascade: Haar classifier cascade (OpenCV 1.x API only). See :ocv:func:`CascadeClassifier::detectMultiScale` for more information.
@ -239,9 +239,9 @@ CascadeClassifier::runAt
----------------------------
Runs the detector at the specified point.
.. ocv:function:: int CascadeClassifier::runAt( Ptr<FeatureEvaluator>& feval, Point pt )
.. ocv:function:: int CascadeClassifier::runAt( Ptr<FeatureEvaluator>& feval, Point pt, double& weight )
.. ocv:cfunction:: int cvRunHaarClassifierCascade( CvHaarClassifierCascade* cascade, CvPoint pt, int startStage=0 )
.. ocv:cfunction:: int cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade, CvPoint pt, int start_stage=0 )
:param cascade: Haar classifier cascade (OpenCV 1.x API only). See :ocv:func:`CascadeClassifier::detectMultiScale` for more information.

View File

@ -45,7 +45,7 @@ CvLSVMFilterPosition
--------------------
.. ocv:struct:: CvLSVMFilterPosition
Structure describes the position of the filter in the feature pyramid.
Structure describes the position of the filter in the feature pyramid.
.. ocv:member:: unsigned int l
@ -64,7 +64,7 @@ CvLSVMFilterObject
------------------
.. ocv:struct:: CvLSVMFilterObject
Description of the filter, which corresponds to the part of the object.
Description of the filter, which corresponds to the part of the object.
.. ocv:member:: CvLSVMFilterPosition V
@ -96,7 +96,7 @@ CvLatentSvmDetector
-------------------
.. ocv:struct:: CvLatentSvmDetector
Structure contains internal representation of trained Latent SVM detector.
Structure contains internal representation of trained Latent SVM detector.
.. ocv:member:: int num_filters
@ -127,7 +127,7 @@ CvObjectDetection
-----------------
.. ocv:struct:: CvObjectDetection
Structure contains the bounding box and confidence level for detected object.
Structure contains the bounding box and confidence level for detected object.
.. ocv:member:: CvRect rect
@ -161,7 +161,7 @@ cvLatentSvmDetectObjects
Find rectangular regions in the given image that are likely to contain objects
and corresponding confidence levels.
.. ocv:function:: CvSeq* cvLatentSvmDetectObjects(IplImage* image, CvLatentSvmDetector* detector, CvMemStorage* storage, float overlap_threshold, int numThreads)
.. ocv:function:: CvSeq* cvLatentSvmDetectObjects( IplImage* image, CvLatentSvmDetector* detector, CvMemStorage* storage, float overlap_threshold=0.5f, int numThreads=-1 )
:param image: image
:param detector: LatentSVM detector in internal representation
@ -181,9 +181,9 @@ using them.
LatentSvmDetector::ObjectDetection
----------------------------------
.. ocv:class:: LatentSvmDetector::ObjectDetection
.. ocv:struct:: LatentSvmDetector::ObjectDetection
Structure contains the detection information.
Structure contains the detection information.
.. ocv:member:: Rect rect
@ -228,7 +228,7 @@ LatentSvmDetector::load
-----------------------
Load the trained models from given ``.xml`` files and return ``true`` if at least one model was loaded.
.. ocv:function:: bool LatentSvmDetector::load(const vector<string>& filenames, const vector<string>& classNames)
.. ocv:function:: bool LatentSvmDetector::load( const vector<string>& filenames, const vector<string>& classNames=vector<string>() )
:param filenames: A set of filenames storing the trained detectors (models). Each file contains one model. See examples of such files here /opencv_extra/testdata/cv/latentsvmdetector/models_VOC2007/.
@ -239,7 +239,7 @@ LatentSvmDetector::detect
Find rectangular regions in the given image that are likely to contain objects of loaded classes (models)
and corresponding confidence levels.
.. ocv:function:: void LatentSvmDetector::detect( const Mat& image, vector<ObjectDetection>& objectDetections, float overlapThreshold=0.5, int numThreads=-1 )
.. ocv:function:: void LatentSvmDetector::detect( const Mat& image, vector<ObjectDetection>& objectDetections, float overlapThreshold=0.5f, int numThreads=-1 )
:param image: An image.
:param objectDetections: The detections: rectangulars, scores and class IDs.
@ -256,7 +256,7 @@ LatentSvmDetector::getClassCount
--------------------------------
Return a count of loaded models (classes).
.. ocv:function:: size_t getClassCount() const
.. ocv:function:: size_t LatentSvmDetector::getClassCount() const
.. [Felzenszwalb2010] Felzenszwalb, P. F. and Girshick, R. B. and McAllester, D. and Ramanan, D. *Object Detection with Discriminatively Trained Part Based Models*. PAMI, vol. 32, no. 9, pp. 1627-1645, September 2010

View File

@ -160,7 +160,7 @@ CVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade,
// Structure describes the position of the filter in the feature pyramid
// l - level in the feature pyramid
// (x, y) - coordinate in level l
typedef struct
typedef struct CvLSVMFilterPosition
{
int x;
int y;
@ -181,7 +181,7 @@ typedef struct
// used formula H[(j * sizeX + i) * p + k], where
// k - component of feature vector in cell (i, j)
// END OF FILTER DESCRIPTION
typedef struct{
typedef struct CvLSVMFilterObject{
CvLSVMFilterPosition V;
float fineFunction[4];
int sizeX;
@ -346,7 +346,7 @@ public:
virtual Ptr<FeatureEvaluator> clone() const;
virtual int getFeatureType() const;
virtual bool setImage(const Mat&, Size origWinSize);
virtual bool setImage(const Mat& img, Size origWinSize);
virtual bool setWindow(Point p);
virtual double calcOrd(int featureIdx) const;
@ -425,8 +425,8 @@ protected:
template<class FEval>
friend int predictCategoricalStump( CascadeClassifier& cascade, Ptr<FeatureEvaluator> &featureEvaluator, double& weight);
bool setImage( Ptr<FeatureEvaluator>&, const Mat& );
virtual int runAt( Ptr<FeatureEvaluator>&, Point, double& weight );
bool setImage( Ptr<FeatureEvaluator>& feval, const Mat& image);
virtual int runAt( Ptr<FeatureEvaluator>& feval, Point pt, double& weight );
class Data
{

View File

@ -9,9 +9,9 @@ Restores the selected region in an image using the region neighborhood.
.. ocv:function:: void inpaint( InputArray src, InputArray inpaintMask, OutputArray dst, double inpaintRadius, int flags )
.. ocv:pyfunction:: cv2.inpaint(src, inpaintMask, inpaintRange, flags[, dst]) -> dst
.. ocv:pyfunction:: cv2.inpaint(src, inpaintMask, inpaintRadius, flags[, dst]) -> dst
.. ocv:cfunction:: void cvInpaint( const CvArr* src, const CvArr* mask, CvArr* dst, double inpaintRadius, int flags)
.. ocv:cfunction:: void cvInpaint( const CvArr* src, const CvArr* inpaint_mask, CvArr* dst, double inpaintRange, int flags )
.. ocv:pyoldfunction:: cv.Inpaint(src, mask, dst, inpaintRadius, flags) -> None
:param src: Input 8-bit 1-channel or 3-channel image.

View File

@ -1669,7 +1669,7 @@ ApproxPoly /doconly
int method
double parameter 0.0
int parameter2 0
CalcEMD2 /doconly
CalcEMD2 float /doconly
CvArr signature1
CvArr signature2
int distance_type
@ -1688,7 +1688,7 @@ CalcOpticalFlowPyrLK currFeatures,status,track_error /doconly
int level
CvTermCriteria criteria
int flags
CvPoint2D32f* guesses
CvPoint2D32f* guesses NULL
CvPoint2D32f currFeatures /O
char status /O
float track_error /O
@ -1726,7 +1726,7 @@ CreateMatND CvMatND /doconly
ints dims
int type
CreateMemStorage CvMemStorage /doconly
int blockSize
int blockSize 0
CreateTrackbar /doconly
char* trackbarName
char* windowName
@ -1738,7 +1738,7 @@ FindChessboardCorners corners /doconly
CvSize patternSize
CvPoint2D32fs corners /O
int flags CV_CALIB_CB_ADAPTIVE_THRESH
FindContours /doconly
FindContours CvSeq /doconly
CvArr image
CvMemStorage storage
int mode CV_RETR_LIST
@ -1751,14 +1751,14 @@ FitLine line /doconly
double reps
double aeps
PyObject* line /O
GetDims /doconly
GetDims dim1,dim2,... /doconly
CvArr arr
GetHuMoments hu /doconly
CvMoments moments
PyObject* hu /O
GetImage /doconly
GetImage iplimage /doconly
CvMat arr
GetMat /doconly
GetMat CvMat /doconly
IplImage arr
int allowND 0
GetMinMaxHistValue min_value,max_value,min_idx,max_idx /doconly
@ -1780,14 +1780,14 @@ LoadImageM /doconly
LoadImage /doconly
char* filename
int iscolor CV_LOAD_IMAGE_COLOR
ReshapeMatND /doconly
ReshapeMatND CvMat /doconly
CvMat arr
int newCn
ints newDims
Reshape /doconly
Reshape CvMat /doconly
CvArr arr
int newCn
int newRows
int newRows 0
SetData /doconly
CvArr arr
PyObject* data
@ -1801,5 +1801,5 @@ Subdiv2DLocate loc,where /doconly
CvPoint2D32f pt
int loc /O
edgeorpoint where /O
WaitKey /doconly
WaitKey int /doconly
int delay 0

View File

@ -3844,7 +3844,7 @@ static double cppKMeans(const CvArr* _samples, int cluster_count, CvArr* _labels
static PyMethodDef old_methods[] = {
#if PYTHON_USE_NUMPY
{"fromarray", (PyCFunction)pycvfromarray, METH_KEYWORDS, "fromarray(array) -> cvmatnd"},
{"fromarray", (PyCFunction)pycvfromarray, METH_KEYWORDS, "fromarray(array [, allowND]) -> CvMat"},
#endif
{"FindDataMatrix", pyfinddatamatrix, METH_VARARGS},

View File

@ -146,7 +146,7 @@ class CppHeaderParser(object):
arg_type += "_and_"
elif w == ">":
if angle_stack[0] == 0:
print "Error at %d: template has no arguments" % (self.lineno,)
print "Error at %s:%d: template has no arguments" % (self.hname, self.lineno)
sys.exit(-1)
if angle_stack[0] > 1:
arg_type += "_end_"
@ -242,7 +242,20 @@ class CppHeaderParser(object):
bases = ll[2:]
return classname, bases, modlist
def parse_func_decl_no_wrap(self, decl_str):
def parse_func_decl_no_wrap(self, decl_str, static_method = False):
decl_str = (decl_str or "").strip()
virtual_method = False
explicit_method = False
if decl_str.startswith("explicit"):
decl_str = decl_str[len("explicit"):].lstrip()
explicit_method = True
if decl_str.startswith("virtual"):
decl_str = decl_str[len("virtual"):].lstrip()
virtual_method = True
if decl_str.startswith("static"):
decl_str = decl_str[len("static"):].lstrip()
static_method = True
fdecl = decl_str.replace("CV_OUT", "").replace("CV_IN_OUT", "")
fdecl = fdecl.strip().replace("\t", " ")
while " " in fdecl:
@ -268,14 +281,21 @@ class CppHeaderParser(object):
apos = fdecl.find("(")
if fname.endswith("operator"):
fname += "()"
fname += " ()"
apos = fdecl.find("(", apos+1)
fname = "cv." + fname.replace("::", ".")
decl = [fname, rettype, [], []]
# inline constructor implementation
implmatch = re.match(r"(\(.*?\))\s*:\s*(\w+\(.*?\),?\s*)+", fdecl[apos:])
if bool(implmatch):
fdecl = fdecl[:apos] + implmatch.group(1)
args0str = fdecl[apos+1:fdecl.rfind(")")].strip()
if args0str != "":
if args0str != "" and args0str != "void":
args0str = re.sub(r"\([^)]*\)", lambda m: m.group(0).replace(',', "@comma@"), args0str)
args0 = args0str.split(",")
args = []
@ -293,9 +313,19 @@ class CppHeaderParser(object):
defval = ""
if dfpos >= 0:
defval = arg[dfpos+1:].strip()
else:
dfpos = arg.find("CV_DEFAULT")
if dfpos >= 0:
defval, pos3 = self.get_macro_arg(arg, dfpos)
else:
dfpos = arg.find("CV_WRAP_DEFAULT")
if dfpos >= 0:
defval, pos3 = self.get_macro_arg(arg, dfpos)
if dfpos >= 0:
defval = defval.replace("@comma@", ",")
arg = arg[:dfpos].strip()
pos = len(arg)-1
while pos >= 0 and (arg[pos] == "_" or arg[pos].isalpha() or arg[pos].isdigit()):
while pos >= 0 and (arg[pos] in "_[]" or arg[pos].isalpha() or arg[pos].isdigit()):
pos -= 1
if pos >= 0:
aname = arg[pos+1:].strip()
@ -306,8 +336,24 @@ class CppHeaderParser(object):
else:
atype = arg
aname = "param"
if aname.endswith("]"):
bidx = aname.find('[')
atype += aname[bidx:]
aname = aname[:bidx]
decl[3].append([atype, aname, defval, []])
if static_method:
decl[2].append("/S")
if virtual_method:
decl[2].append("/V")
if explicit_method:
decl[2].append("/E")
if bool(re.match(r".*\)\s*(const)?\s*=\s*0", decl_str)):
decl[2].append("/A")
if bool(re.match(r".*\)\s*const(\s*=\s*0)?", decl_str)):
decl[2].append("/C")
if "virtual" in decl_str:
print decl_str
return decl
def parse_func_decl(self, decl_str):
@ -328,7 +374,7 @@ class CppHeaderParser(object):
return []
# ignore old API in the documentation check (for now)
if "CVAPI(" in decl_str:
if "CVAPI(" in decl_str and self.wrap_mode:
return []
top = self.block_stack[-1]
@ -377,6 +423,13 @@ class CppHeaderParser(object):
print "Error at %d: no args in '%s'" % (self.lineno, decl_str)
sys.exit(-1)
decl_start = decl_str[:args_begin].strip()
# TODO: normalize all type of operators
if decl_start.endswith("()"):
decl_start = decl_start[0:-2].rstrip() + " ()"
# constructor/destructor case
if bool(re.match(r'(\w+::)*(?P<x>\w+)::~?(?P=x)', decl_start)):
decl_start = "void " + decl_start
rettype, funcname, modlist, argno = self.parse_arg(decl_start, -1)
@ -385,7 +438,15 @@ class CppHeaderParser(object):
if rettype == classname or rettype == "~" + classname:
rettype, funcname = "", rettype
else:
print "Error at %d. the function/method name is missing: '%s'" % (self.lineno, decl_start)
if bool(re.match('\w+\s+\(\*\w+\)\s*\(.*\)', decl_str)):
return [] # function typedef
elif bool(re.match('[A-Z_]+', decl_start)):
return [] # it seems to be a macro instantiation
elif "__declspec" == decl_start:
return []
else:
#print rettype, funcname, modlist, argno
print "Error at %s:%d the function/method name is missing: '%s'" % (self.hname, self.lineno, decl_start)
sys.exit(-1)
if self.wrap_mode and (("::" in funcname) or funcname.startswith("~")):
@ -399,7 +460,7 @@ class CppHeaderParser(object):
funcname = self.get_dotted_name(funcname)
if not self.wrap_mode:
decl = self.parse_func_decl_no_wrap(decl_str)
decl = self.parse_func_decl_no_wrap(decl_str, static_method)
decl[0] = funcname
return decl
@ -515,7 +576,7 @@ class CppHeaderParser(object):
sys.exit(-1)
if block_name:
n += block_name + "."
return n + name
return n + name.replace("::", ".")
def parse_stmt(self, stmt, end_token):
"""
@ -543,7 +604,7 @@ class CppHeaderParser(object):
break
w = stmt[:colon_pos].strip()
if w in ["public", "protected", "private"]:
if w == "public":
if w == "public" or (not self.wrap_mode and w == "protected"):
stack_top[self.PUBLIC_SECTION] = True
else:
stack_top[self.PUBLIC_SECTION] = False
@ -555,14 +616,33 @@ class CppHeaderParser(object):
return stmt_type, "", False, None
if end_token == "{":
if stmt.startswith("class") or stmt.startswith("struct"):
stmt_type = stmt.split()[0]
classname, bases, modlist = self.parse_class_decl(stmt)
decl = []
if ("CV_EXPORTS_W" in stmt) or ("CV_EXPORTS_AS" in stmt) or (not self.wrap_mode and ("CV_EXPORTS" in stmt)):
if not self.wrap_mode and stmt.startswith("typedef struct"):
stmt_type = "struct"
try:
classname, bases, modlist = self.parse_class_decl(stmt[len("typedef "):])
except:
print "Error at %s:%d" % (self.hname, self.lineno)
exit(1)
if classname.startswith("_Ipl"):
classname = classname[1:]
decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, []]
if bases:
decl[1] = ": " + " ".join(bases)
decl[1] = ": " + ", ".join([b if "::" in b else self.get_dotted_name(b).replace(".","::") for b in bases])
return stmt_type, classname, True, decl
if stmt.startswith("class") or stmt.startswith("struct"):
stmt_type = stmt.split()[0]
if stmt.strip() != stmt_type:
try:
classname, bases, modlist = self.parse_class_decl(stmt)
except:
print "Error at %s:%d" % (self.hname, self.lineno)
exit(1)
decl = []
if ("CV_EXPORTS_W" in stmt) or ("CV_EXPORTS_AS" in stmt) or (not self.wrap_mode):# and ("CV_EXPORTS" in stmt)):
decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, []]
if bases:
decl[1] = ": " + ", ".join([b if "::" in b else self.get_dotted_name(b).replace(".","::") for b in bases])
return stmt_type, classname, True, decl
if stmt.startswith("enum"):
@ -570,6 +650,8 @@ class CppHeaderParser(object):
if stmt.startswith("namespace"):
stmt_list = stmt.split()
if len(stmt_list) < 2:
stmt_list.append("<unnamed>")
return stmt_list[0], stmt_list[1], True, None
if stmt.startswith("extern") and "\"C\"" in stmt:
return "namespace", "", True, None
@ -633,6 +715,7 @@ class CppHeaderParser(object):
The main method. Parses the input file.
Returns the list of declarations (that can be print using print_decls)
"""
self.hname = hname
decls = []
f = open(hname, "rt")
linelist = list(f.readlines())

View File

@ -7,7 +7,7 @@ detail::focalsFromHomography
----------------------------
Tries to estimate focal lengths from the given homography under the assumption that the camera undergoes rotations around its centre only.
.. ocv:function:: void focalsFromHomography(const Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok)
.. ocv:function:: void detail::focalsFromHomography(const Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok)
:param H: Homography.
@ -23,7 +23,7 @@ detail::estimateFocal
---------------------
Estimates focal lengths for each given camera.
.. ocv:function:: void estimateFocal(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, std::vector<double> &focals)
.. ocv:function:: void detail::estimateFocal(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, std::vector<double> &focals)
:param features: Features of images.

Some files were not shown because too many files have changed in this diff Show More