Removed Sphinx generation commands from cmake scripts

This commit is contained in:
Maksim Shabunin 2014-12-24 18:41:01 +03:00
parent d01bedbc61
commit bebf6c47c6
6 changed files with 9 additions and 1240 deletions

View File

@ -473,14 +473,10 @@ include(cmake/OpenCVFindLibsPerf.cmake)
# Detect other 3rd-party libraries/tools
# ----------------------------------------------------------------------------
# --- LATEX for pdf documentation ---
unset(HAVE_DOXYGEN CACHE)
# --- Doxygen for documentation ---
unset(DOXYGEN_FOUND CACHE)
if(BUILD_DOCS)
include(cmake/OpenCVFindLATEX.cmake)
find_host_program(DOXYGEN_BUILD doxygen)
if (DOXYGEN_BUILD)
set(HAVE_DOXYGEN 1)
endif (DOXYGEN_BUILD)
find_package(Doxygen)
endif(BUILD_DOCS)
# --- Python Support ---
@ -1083,15 +1079,7 @@ endif()
if(BUILD_DOCS)
status("")
status(" Documentation:")
if(HAVE_SPHINX)
status(" Build Documentation:" PDFLATEX_COMPILER THEN YES ELSE "YES (only HTML and without math expressions)")
else()
status(" Build Documentation:" NO)
endif()
status(" Sphinx:" HAVE_SPHINX THEN "${SPHINX_BUILD} (ver ${SPHINX_VERSION})" ELSE NO)
status(" PdfLaTeX compiler:" PDFLATEX_COMPILER THEN "${PDFLATEX_COMPILER}" ELSE NO)
status(" PlantUML:" PLANTUML THEN "${PLANTUML}" ELSE NO)
status(" Doxygen:" HAVE_DOXYGEN THEN "YES (${DOXYGEN_BUILD})" ELSE NO)
status(" Doxygen:" DOXYGEN_FOUND THEN "${DOXYGEN_EXECUTABLE} (ver ${DOXYGEN_VERSION})" ELSE NO)
endif()
# ========================== samples and tests ==========================

View File

@ -237,21 +237,3 @@ if(PYTHON2INTERP_FOUND)
set(PYTHON_DEFAULT_AVAILABLE "TRUE")
set(PYTHON_DEFAULT_EXECUTABLE "${PYTHON2_EXECUTABLE}")
endif()
unset(HAVE_SPHINX CACHE)
if(BUILD_DOCS)
find_host_program(SPHINX_BUILD sphinx-build)
find_host_program(PLANTUML plantuml)
if(SPHINX_BUILD)
execute_process(COMMAND "${SPHINX_BUILD}"
OUTPUT_QUIET
ERROR_VARIABLE SPHINX_OUTPUT
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(SPHINX_OUTPUT MATCHES "Sphinx v([0-9][^ \n]*)")
set(SPHINX_VERSION "${CMAKE_MATCH_1}")
set(HAVE_SPHINX 1)
message(STATUS "Found Sphinx ${SPHINX_VERSION}: ${SPHINX_BUILD}")
endif()
endif()
endif(BUILD_DOCS)

View File

@ -2,7 +2,7 @@
# CMake file for OpenCV docs
#-----------------------
set(HAVE_DOC_GENERATOR BUILD_DOCS AND (HAVE_SPHINX OR HAVE_DOXYGEN))
set(HAVE_DOC_GENERATOR BUILD_DOCS AND DOXYGEN_FOUND)
if(HAVE_DOC_GENERATOR)
project(opencv_docs)
@ -34,120 +34,9 @@ if(HAVE_DOC_GENERATOR)
set(OPTIONAL_DOC_LIST "")
endif(HAVE_DOC_GENERATOR)
# ========= Sphinx docs =========
if(BUILD_DOCS AND HAVE_SPHINX)
if(NOT INSTALL_CREATE_DISTRIB)
list(APPEND DOC_LIST "${OpenCV_SOURCE_DIR}/doc/haartraining.htm")
endif()
# build lists of documentation files and generate table of contents for reference manual
set(DOC_FAKE_ROOT "${CMAKE_CURRENT_BINARY_DIR}/fake-root")
set(DOC_FAKE_ROOT_FILES "")
function(ocv_doc_add_file_to_fake_root source destination)
add_custom_command(
OUTPUT "${DOC_FAKE_ROOT}/${destination}"
COMMAND "${CMAKE_COMMAND}" -E copy "${source}" "${DOC_FAKE_ROOT}/${destination}"
DEPENDS "${source}"
COMMENT "Copying ${destination} to fake root..."
VERBATIM
)
list(APPEND DOC_FAKE_ROOT_FILES "${DOC_FAKE_ROOT}/${destination}")
set(DOC_FAKE_ROOT_FILES "${DOC_FAKE_ROOT_FILES}" PARENT_SCOPE)
endfunction()
function(ocv_doc_add_to_fake_root source)
if(ARGC GREATER 1)
set(destination "${ARGV1}")
else()
file(RELATIVE_PATH destination "${OpenCV_SOURCE_DIR}" "${source}")
endif()
if(IS_DIRECTORY "${source}")
file(GLOB_RECURSE files RELATIVE "${source}" "${source}/*")
foreach(file ${files})
ocv_doc_add_file_to_fake_root("${source}/${file}" "${destination}/${file}")
endforeach()
else()
ocv_doc_add_file_to_fake_root("${source}" "${destination}")
endif()
set(DOC_FAKE_ROOT_FILES "${DOC_FAKE_ROOT_FILES}" PARENT_SCOPE)
endfunction()
set(OPENCV_REFMAN_TOC "")
foreach(mod ${BASE_MODULES} ${EXTRA_MODULES})
if(EXISTS "${OPENCV_MODULE_opencv_${mod}_LOCATION}/doc/${mod}.rst")
ocv_doc_add_to_fake_root("${OPENCV_MODULE_opencv_${mod}_LOCATION}/doc" modules/${mod}/doc)
set(OPENCV_REFMAN_TOC "${OPENCV_REFMAN_TOC} ${mod}/doc/${mod}.rst\n")
endif()
endforeach()
configure_file("${OpenCV_SOURCE_DIR}/modules/refman.rst.in" "${DOC_FAKE_ROOT}/modules/refman.rst" @ONLY)
ocv_doc_add_to_fake_root("${OpenCV_SOURCE_DIR}/index.rst")
ocv_doc_add_to_fake_root("${OpenCV_SOURCE_DIR}/doc")
ocv_doc_add_to_fake_root("${OpenCV_SOURCE_DIR}/platforms/android")
ocv_doc_add_to_fake_root("${OpenCV_SOURCE_DIR}/samples")
set(BUILD_PLANTUML "")
if(PLANTUML)
set(BUILD_PLANTUML "-tplantuml")
endif()
if(PDFLATEX_COMPILER)
add_custom_target(docs
COMMAND ${SPHINX_BUILD} ${BUILD_PLANTUML} -b latex -c "${CMAKE_CURRENT_SOURCE_DIR}" "${DOC_FAKE_ROOT}" .
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/mymath.sty ${CMAKE_CURRENT_BINARY_DIR}
COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/patch_refman_latex.py" opencv2refman.tex
COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/patch_refman_latex.py" opencv2manager.tex
COMMAND ${CMAKE_COMMAND} -E echo "Generating opencv2refman.pdf"
COMMAND ${PDFLATEX_COMPILER} -interaction=batchmode opencv2refman.tex
COMMAND ${PDFLATEX_COMPILER} -interaction=batchmode opencv2refman.tex
COMMAND ${CMAKE_COMMAND} -E echo "Generating opencv2manager.pdf"
COMMAND ${PDFLATEX_COMPILER} -interaction=batchmode opencv2manager.tex
COMMAND ${PDFLATEX_COMPILER} -interaction=batchmode opencv2manager.tex
COMMAND ${CMAKE_COMMAND} -E echo "Generating opencv_user.pdf"
COMMAND ${PDFLATEX_COMPILER} -interaction=batchmode opencv_user.tex
COMMAND ${PDFLATEX_COMPILER} -interaction=batchmode opencv_user.tex
COMMAND ${CMAKE_COMMAND} -E echo "Generating opencv_tutorials.pdf"
COMMAND ${PDFLATEX_COMPILER} -interaction=batchmode opencv_tutorials.tex
COMMAND ${PDFLATEX_COMPILER} -interaction=batchmode opencv_tutorials.tex
COMMAND ${CMAKE_COMMAND} -E echo "Generating opencv_cheatsheet.pdf"
COMMAND ${PDFLATEX_COMPILER} -interaction=batchmode "${CMAKE_CURRENT_SOURCE_DIR}/opencv_cheatsheet.tex"
COMMAND ${PDFLATEX_COMPILER} -interaction=batchmode "${CMAKE_CURRENT_SOURCE_DIR}/opencv_cheatsheet.tex"
DEPENDS ${DOC_FAKE_ROOT_FILES}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating the PDF Manuals"
)
LIST(APPEND OPTIONAL_DOC_LIST "${CMAKE_BINARY_DIR}/doc/opencv2refman.pdf" "${CMAKE_BINARY_DIR}/doc/opencv2manager.pdf"
"${CMAKE_BINARY_DIR}/doc/opencv_user.pdf" "${CMAKE_BINARY_DIR}/doc/opencv_tutorials.pdf" "${CMAKE_BINARY_DIR}/doc/opencv_cheatsheet.pdf")
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(docs PROPERTIES FOLDER "documentation")
endif()
endif()
add_custom_target(html_docs
COMMAND "${SPHINX_BUILD}" ${BUILD_PLANTUML} -b html -c "${CMAKE_CURRENT_SOURCE_DIR}" "${DOC_FAKE_ROOT}" ./_html
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/mymath.sty ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS ${DOC_FAKE_ROOT_FILES}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating Online Documentation"
)
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(html_docs PROPERTIES FOLDER "documentation")
endif()
endif()
# ========= Doxygen docs =========
if(BUILD_DOCS AND HAVE_DOXYGEN)
if(BUILD_DOCS AND DOXYGEN_FOUND)
# not documented modules list
list(APPEND blacklist "ts" "java" "python2" "python3" "world")
@ -252,7 +141,7 @@ if(BUILD_DOCS AND HAVE_DOXYGEN)
COMMAND "${CMAKE_COMMAND}" -E copy_directory "${CMAKE_SOURCE_DIR}/samples" "${CMAKE_DOXYGEN_OUTPUT_PATH}/html/samples"
COMMAND "${CMAKE_COMMAND}" -E copy "${CMAKE_CURRENT_SOURCE_DIR}/pattern.png" "${CMAKE_DOXYGEN_OUTPUT_PATH}/html"
COMMAND "${CMAKE_COMMAND}" -E copy "${CMAKE_CURRENT_SOURCE_DIR}/acircles_pattern.png" "${CMAKE_DOXYGEN_OUTPUT_PATH}/html"
COMMAND ${DOXYGEN_BUILD} ${doxyfile}
COMMAND ${DOXYGEN_EXECUTABLE} ${doxyfile}
DEPENDS ${doxyfile} ${rootfile} ${bibfile} ${deps}
)
endif()

View File

@ -32,8 +32,6 @@ endforeach()
# scripts
set(scripts_gen_java "${CMAKE_CURRENT_SOURCE_DIR}/generator/gen_java.py")
set(scripts_hdr_parser "${CMAKE_CURRENT_SOURCE_DIR}/../python/src2/hdr_parser.py")
set(scripts_gen_javadoc "${CMAKE_CURRENT_SOURCE_DIR}/generator/gen_javadoc.py")
set(scripts_rst_parser "${CMAKE_CURRENT_SOURCE_DIR}/generator/rst_parser.py")
# handwritten C/C++ and Java sources
file(GLOB handwrittren_h_sources "${CMAKE_CURRENT_SOURCE_DIR}/generator/src/cpp/*.h" "${CMAKE_CURRENT_SOURCE_DIR}/generator/src/cpp/*.hpp")
@ -75,13 +73,6 @@ foreach(module ${OPENCV_JAVA_MODULES})
endif()
endforeach()
# rst documentation used for javadoc generation
set(javadoc_rst_sources "")
foreach(module ${OPENCV_JAVA_MODULES})
file(GLOB_RECURSE refman_rst_headers "${OPENCV_MODULE_opencv_${module}_LOCATION}/*.rst")
list(APPEND javadoc_rst_sources ${refman_rst_headers})
endforeach()
# generated cpp files
set(generated_cpp_sources "")
foreach(module ${OPENCV_JAVA_MODULES})
@ -112,13 +103,6 @@ foreach(module ${OPENCV_JAVA_MODULES})
list(APPEND generated_java_sources ${generated_java_sources_${module}})
endforeach()
# generated java files with javadoc
set(documented_java_files "")
foreach(java_file ${generated_java_sources} ${handwrittren_java_sources})
get_filename_component(java_file_name "${java_file}" NAME_WE)
list(APPEND documented_java_files "${CMAKE_CURRENT_BINARY_DIR}/${java_file_name}-jdoc.java")
endforeach()
######################################################################################################################################
# step 1: generate .cpp/.java from OpenCV headers
@ -132,18 +116,8 @@ foreach(module ${OPENCV_JAVA_MODULES})
)
endforeach()
# step 2: generate javadoc comments
set(step2_depends ${step1_depends} ${scripts_gen_javadoc} ${scripts_rst_parser} ${javadoc_rst_sources} ${generated_java_sources} ${handwrittren_java_sources})
string(REPLACE ";" "," OPENCV_JAVA_MODULES_STR "${OPENCV_JAVA_MODULES}")
add_custom_command(OUTPUT ${documented_java_files}
COMMAND ${PYTHON_DEFUALT_EXECUTABLE} "${scripts_gen_javadoc}" --modules ${OPENCV_JAVA_MODULES_STR} "${CMAKE_CURRENT_SOURCE_DIR}/generator/src/java" "${CMAKE_CURRENT_BINARY_DIR}" 2> "${CMAKE_CURRENT_BINARY_DIR}/get_javadoc_errors.log"
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS ${step2_depends}
VERBATIM
)
# step 3: copy files to destination
set(step3_input_files ${documented_java_files} ${handwrittren_aidl_sources})
set(step3_input_files ${generated_java_sources} ${handwrittren_java_sources} ${handwrittren_aidl_sources})
set(copied_files "")
foreach(java_file ${step3_input_files})
get_filename_component(java_file_name "${java_file}" NAME)
@ -154,7 +128,7 @@ foreach(java_file ${step3_input_files})
add_custom_command(OUTPUT "${output_name}"
COMMAND ${CMAKE_COMMAND} -E copy "${java_file}" "${output_name}"
MAIN_DEPENDENCY "${java_file}"
DEPENDS ${step2_depends}
DEPENDS ${step1_depends} ${generated_java_sources} ${handwrittren_java_sources}
COMMENT "Generating src/org/opencv/${java_file_name}"
)
list(APPEND copied_files "${output_name}")

View File

@ -1,290 +0,0 @@
#!/usr/bin/env python
import os, sys, re, string, glob
from optparse import OptionParser
# Black list for classes and methods that does not implemented in Java API
# Created to exclude referencies to them in @see tag
JAVADOC_ENTITY_BLACK_LIST = set(["org.opencv.core.Core#abs", \
"org.opencv.core.Core#theRNG", \
"org.opencv.core.Core#extractImageCOI", \
"org.opencv.core.PCA", \
"org.opencv.core.SVD", \
"org.opencv.core.RNG", \
"org.opencv.imgproc.Imgproc#createMorphologyFilter", \
"org.opencv.imgproc.Imgproc#createLinearFilter", \
"org.opencv.imgproc.Imgproc#createSeparableLinearFilter", \
"org.opencv.imgproc.FilterEngine"])
class JavadocGenerator(object):
def __init__(self, definitions = {}, modules= [], javadoc_marker = "//javadoc:"):
self.definitions = definitions
self.javadoc_marker = javadoc_marker
self.markers_processed = 0
self.markers_documented = 0
self.params_documented = 0
self.params_undocumented = 0
self.known_modules = modules
self.verbose = False
self.show_warnings = True
self.show_errors = True
def parceJavadocMarker(self, line):
assert line.lstrip().startswith(self.javadoc_marker)
offset = line[:line.find(self.javadoc_marker)]
line = line.strip()[len(self.javadoc_marker):]
args_start = line.rfind("(")
args_end = line.rfind(")")
assert args_start * args_end > 0
if args_start >= 0:
assert args_start < args_end
name = line[:args_start].strip()
if name.startswith("java"):
name = name[4:]
return (name, offset, filter(None, list(arg.strip() for arg in line[args_start+1:args_end].split(","))))
name = line.strip()
if name.startswith("java"):
name = name[4:]
return (name, offset, [])
def document(self, infile, outfile):
inf = open(infile, "rt")
outf = open(outfile, "wt")
module = os.path.splitext(os.path.basename(infile))[0].split("+")[0]
if module not in self.known_modules:
module = "unknown"
try:
for l in inf.readlines():
org = l
l = l.replace(" ", "").replace("\t", "")#remove all whitespace
if l.startswith(self.javadoc_marker):
marker = self.parceJavadocMarker(l)
self.markers_processed += 1
decl = self.definitions.get(marker[0],None)
if decl:
javadoc = self.makeJavadoc(decl, marker[2])
if self.verbose:
print
print("Javadoc for \"%s\" File: %s line %s" % (decl["name"], decl["file"], decl["line"]))
print(javadoc)
for line in javadoc.split("\n"):
outf.write(marker[1] + line + "\n")
self.markers_documented += 1
elif self.show_errors:
sys.stderr.write("gen_javadoc error: could not find documentation for %s (module: %s)" % (l.lstrip()[len(self.javadoc_marker):-1].strip(), module))
else:
outf.write(org.replace("\t", " ").rstrip()+"\n")
except:
inf.close()
outf.close()
os.remove(outfile)
raise
else:
inf.close()
outf.close()
def FinishParagraph(self, text):
return text[:-1] + "</p>\n"
def ReformatForJavadoc(self, s):
out = ""
in_paragraph = False
in_list = False
for term in s.split("\n"):
in_list_item = False
if term.startswith("*"):
in_list_item = True
if in_paragraph:
out = self.FinishParagraph(out)
in_paragraph = False
if not in_list:
out += " * <ul>\n"
in_list = True
term = " <li>" + term[1:]
if term.startswith("#."):
in_list_item = True
if in_paragraph:
out = self.FinishParagraph(out)
in_paragraph = False
if not in_list:
out += " * <ul>\n"
in_list = True
term = " <li>" + term[2:]
if not term:
if in_paragraph:
out = self.FinishParagraph(out)
in_paragraph = False
out += " *\n"
else:
if in_list and not in_list_item:
in_list = False
if out.endswith(" *\n"):
out = out[:-3] + " * </ul>\n *\n"
else:
out += " * </ul>\n"
pos_start = 0
pos_end = min(77, len(term)-1)
while pos_start < pos_end:
if pos_end - pos_start == 77:
while pos_end >= pos_start+60:
if not term[pos_end].isspace():
pos_end -= 1
else:
break
if pos_end < pos_start+60:
pos_end = min(pos_start + 77, len(term)-1)
while pos_end < len(term):
if not term[pos_end].isspace():
pos_end += 1
else:
break
if in_paragraph or term.startswith("@") or in_list_item:
out += " * "
else:
in_paragraph = True
out += " * <p>"
out += term[pos_start:pos_end+1].rstrip() + "\n"
pos_start = pos_end + 1
pos_end = min(pos_start + 77, len(term)-1)
if in_paragraph:
out = self.FinishParagraph(out)
if in_list:
out += " * </ul>\n"
return out
def getJavaName(self, decl, methodSeparator = "."):
name = "org.opencv."
name += decl["module"]
if "class" in decl:
name += "." + decl["class"]
else:
name += "." + decl["module"].capitalize()
if "method" in decl:
name += methodSeparator + decl["method"]
return name
def getDocURL(self, decl):
url = "http://docs.opencv.org/modules/"
url += decl["module"]
url += "/doc/"
url += os.path.basename(decl["file"]).replace(".rst",".html")
url += "#" + decl["name"].replace("::","-").replace("()","").replace("=","").strip().rstrip("_").replace(" ","-").replace("_","-").lower()
return url
def makeJavadoc(self, decl, args = None):
doc = ""
prefix = "/**\n"
if decl.get("isclass", False):
decl_type = "class"
elif decl.get("isstruct", False):
decl_type = "struct"
elif "class" in decl:
decl_type = "method"
else:
decl_type = "function"
# brief goes first
if "brief" in decl:
doc += prefix + self.ReformatForJavadoc(decl["brief"])
prefix = " *\n"
elif "long" not in decl:
if self.show_warnings:
print >> sys.stderr, "gen_javadoc warning: no description for " + decl_type + " \"%s\" File: %s (line %s)" % (func["name"], func["file"], func["line"])
doc += prefix + self.ReformatForJavadoc("This " + decl_type + " is undocumented")
prefix = " *\n"
# long goes after brief
if "long" in decl:
doc += prefix + self.ReformatForJavadoc(decl["long"])
prefix = " *\n"
# @param tags
if args and (decl_type == "method" or decl_type == "function"):
documented_params = decl.get("params",{})
for arg in args:
arg_doc = documented_params.get(arg, None)
if not arg_doc:
arg_doc = "a " + arg
if self.show_warnings:
sys.stderr.write("gen_javadoc warning: parameter \"%s\" of \"%s\" is undocumented. File: %s (line %s)" % (arg, decl["name"], decl["file"], decl["line"]))
self.params_undocumented += 1
else:
self.params_documented += 1
doc += prefix + self.ReformatForJavadoc("@param " + arg + " " + arg_doc)
prefix = ""
prefix = " *\n"
# @see tags
# always link to documentation
doc += prefix + " * @see <a href=\"" + self.getDocURL(decl) + "\">" + self.getJavaName(decl) + "</a>\n"
prefix = ""
# other links
if "seealso" in decl:
for see in decl["seealso"]:
seedecl = self.definitions.get(see,None)
if seedecl:
javadoc_name = self.getJavaName(seedecl, "#")
if (javadoc_name not in JAVADOC_ENTITY_BLACK_LIST):
doc += prefix + " * @see " + javadoc_name + "\n"
prefix = " *\n"
#doc += prefix + " * File: " + decl["file"] + " (line " + str(decl["line"]) + ")\n"
return (doc + " */").replace("::",".")
def printSummary(self):
print("Javadoc Generator Summary:")
print(" Total markers: %s" % self.markers_processed)
print( " Undocumented markers: %s" % (self.markers_processed - self.markers_documented))
print( " Generated comments: %s" % self.markers_documented)
print
print(" Documented params: %s" % self.params_documented)
print(" Undocumented params: %s" % self.params_undocumented)
print
if __name__ == "__main__":
selfpath = os.path.dirname(os.path.abspath(sys.argv[0]))
hdr_parser_path = os.path.join(selfpath, "../../python/src2")
sys.path.append(selfpath)
sys.path.append(hdr_parser_path)
import hdr_parser
import rst_parser
parser = OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", help="Print verbose log to stdout", action="store_true", default=False)
parser.add_option("", "--no-warnings", dest="warnings", help="Hide warning messages", action="store_false", default=True)
parser.add_option("", "--no-errors", dest="errors", help="Hide error messages", action="store_false", default=True)
parser.add_option("", "--modules", dest="modules", help="comma-separated list of modules to generate comments", metavar="MODS", default=",".join(rst_parser.allmodules))
(options, args) = parser.parse_args(sys.argv)
options.modules = options.modules.split(",")
if len(args) < 2 or len(options.modules) < 1:
parser.print_help()
exit(0)
parser = rst_parser.RstParser(hdr_parser.CppHeaderParser())
for m in options.modules:
parser.parse(m, os.path.join(selfpath, "../../" + m))
parser.printSummary()
generator = JavadocGenerator(parser.definitions, options.modules)
generator.verbose = options.verbose
generator.show_warnings = options.warnings
generator.show_errors = options.errors
for path in args:
folder = os.path.abspath(path)
for jfile in [f for f in glob.glob(os.path.join(folder,"*.java")) if not f.endswith("-jdoc.java")]:
outfile = os.path.abspath(os.path.basename(jfile).replace(".java", "-jdoc.java"))
generator.document(jfile, outfile)
generator.printSummary()

View File

@ -1,774 +0,0 @@
#!/usr/bin/env python
from __future__ import print_function
import os, sys, re, string, fnmatch
allmodules = ["core", "flann", "imgproc", "imgcodecs", "videoio", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "videostab", "softcascade", "superres"]
verbose = False
show_warnings = True
show_errors = True
show_critical_errors = True
params_blacklist = {
"fromarray" : ("object", "allowND"), # python only function
"reprojectImageTo3D" : ("ddepth"), # python only argument
"composeRT" : ("d*d*"), # wildchards in parameter names are not supported by this parser
"error" : "args", # parameter of supporting macro
"getConvertElem" : ("from", "cn", "to", "beta", "alpha"), # arguments of returned functions
"gpu::swapChannels" : ("dstOrder") # parameter is not parsed correctly by the hdr_parser
}
ERROR_001_SECTIONFAILURE = 1
WARNING_002_HDRWHITESPACE = 2
ERROR_003_PARENTHESES = 3
WARNING_004_TABS = 4
ERROR_005_REDEFENITIONPARAM = 5
ERROR_006_REDEFENITIONFUNC = 6
WARNING_007_UNDOCUMENTEDPARAM = 7
WARNING_008_MISSINGPARAM = 8
WARNING_009_HDRMISMATCH = 9
ERROR_010_NOMODULE = 10
ERROR_011_EOLEXPECTED = 11
params_mapping = {
"composeRT" : {
"dr3dr1" : "d*d*",
"dr3dr2" : "d*d*",
"dr3dt1" : "d*d*",
"dr3dt2" : "d*d*",
"dt3dr1" : "d*d*",
"dt3dr2" : "d*d*",
"dt3dt1" : "d*d*",
"dt3dt2" : "d*d*"
},
"CvSVM::train_auto" : {
"coeffGrid" : "\\*Grid",
"degreeGrid" : "\\*Grid",
"gammaGrid" : "\\*Grid",
"nuGrid" : "\\*Grid",
"pGrid" : "\\*Grid"
}
}
known_text_sections_names = ["Appendix", "Results", "Prerequisites", "Introduction", "Description"]
class DeclarationParser(object):
def __init__(self, line=None):
if line is None:
self.fdecl = ""
self.lang = ""
self.balance = 0
return
self.lang = self.getLang(line)
assert self.lang is not None
self.fdecl = line[line.find("::")+2:].strip()
self.balance = self.fdecl.count("(") - self.fdecl.count(")")
assert self.balance >= 0
def append(self, line):
self.fdecl += line
self.balance = self.fdecl.count("(") - self.fdecl.count(")")
def isready(self):
return self.balance == 0
@classmethod
def getLang(cls, line):
if line.startswith(".. ocv:function::"):
return "C++"
if line.startswith(".. ocv:cfunction::"):
return "C"
if line.startswith(".. ocv:pyfunction::"):
return "Python2"
if line.startswith(".. ocv:jfunction::"):
return "Java"
return None
def hasDeclaration(self, line):
return self.getLang(line) is not None
class ParamParser(object):
def __init__(self, line=None):
if line is None:
self.prefix = ""
self.name = ""
self.comment = ""
self.active = False
return
offset = line.find(":param")
assert offset > 0
self.prefix = line[:offset]
assert self.prefix == " "*len(self.prefix), ":param definition should be prefixed with spaces"
line = line[offset + 6:].lstrip()
name_end = line.find(":")
assert name_end > 0
self.name = line[:name_end]
self.comment = line[name_end+1:].lstrip()
self.active = True
def append(self, line):
assert self.active
if (self.hasDeclaration(line)):
self.active = False
elif line.startswith(self.prefix) or not line:
self.comment += "\n" + line.lstrip()
else:
self.active = False
@classmethod
def hasDeclaration(cls, line):
return line.lstrip().startswith(":param")
class RstParser(object):
def __init__(self, cpp_parser):
self.cpp_parser = cpp_parser
self.definitions = {}
self.sections_parsed = 0
self.sections_total = 0
self.sections_skipped = 0
def parse(self, module_name, module_path=None):
if module_path is None:
module_path = "../" + module_name
doclist = []
for root, dirs, files in os.walk(os.path.join(module_path,"doc")):
for filename in fnmatch.filter(files, "*.rst"):
doclist.append(os.path.join(root, filename))
for doc in doclist:
self.parse_rst_file(module_name, doc)
def parse_section_safe(self, module_name, section_name, file_name, lineno, lines):
try:
self.parse_section(module_name, section_name, file_name, lineno, lines)
except AssertionError as args:
if show_errors:
print("RST parser error E%03d: assertion in \"%s\" at %s:%s" % (ERROR_001_SECTIONFAILURE, section_name, file_name, lineno), file=sys.stderr)
print(" Details: %s" % args, file=sys.stderr)
def parse_section(self, module_name, section_name, file_name, lineno, lines):
self.sections_total += 1
# skip sections having whitespace in name
#if section_name.find(" ") >= 0 and section_name.find("::operator") < 0:
if (section_name.find(" ") >= 0 and not bool(re.match(r"(\w+::)*operator\s*(\w+|>>|<<|\(\)|->|\+\+|--|=|==|\+=|-=)", section_name)) ) or section_name.endswith(":"):
if show_errors:
print("RST parser warning W%03d: SKIPPED: \"%s\" File: %s:%s" % (WARNING_002_HDRWHITESPACE, section_name, file_name, lineno), file=sys.stderr)
self.sections_skipped += 1
return
func = {}
func["name"] = section_name
func["file"] = file_name
func["line"] = lineno
func["module"] = module_name
# parse section name
section_name = self.parse_namespace(func, section_name)
class_separator_idx = section_name.find("::")
if class_separator_idx > 0:
func["class"] = section_name[:class_separator_idx]
func["method"] = section_name[class_separator_idx+2:]
else:
func["method"] = section_name
capturing_seealso = False
skip_code_lines = False
expected_brief = True
was_code_line = False
fdecl = DeclarationParser()
pdecl = ParamParser()
ll = None
for l in lines:
# read tail of function/method declaration if needed
if not fdecl.isready():
fdecl.append(ll)
if fdecl.isready():
self.add_new_fdecl(func, fdecl)
continue
# continue capture seealso
if capturing_seealso:
if not l or l.startswith(" "):
seealso = func.get("seealso", [])
seealso.extend(l.split(","))
func["seealso"] = seealso
continue
else:
capturing_seealso = False
ll = l.strip()
if ll == "..":
expected_brief = False
skip_code_lines = False
continue
# skip lines if line-skipping mode is activated
if skip_code_lines:
if not l:
continue
if not l.startswith(" "):
skip_code_lines = False
if ll.startswith(".. code-block::") or ll.startswith(".. image::"):
skip_code_lines = True
continue
# todo: parse structure members; skip them for now
if ll.startswith(".. ocv:member::"):
#print ll
skip_code_lines = True
continue
#ignore references (todo: collect them)
if l.startswith(".. ["):
continue
if ll.startswith(".. "):
expected_brief = False
elif ll.endswith("::"):
# turn on line-skipping mode for code fragments
#print ll
skip_code_lines = True
ll = ll[:len(ll)-2]
# continue param parsing (process params after processing .. at the beginning of the line and :: at the end)
if pdecl.active:
pdecl.append(l)
if pdecl.active:
continue
else:
self.add_new_pdecl(func, pdecl)
# do not continue - current line can contain next parameter definition
# parse ".. seealso::" blocks
if ll.startswith(".. seealso::"):
if ll.endswith(".. seealso::"):
capturing_seealso = True
else:
seealso = func.get("seealso", [])
seealso.extend(ll[ll.find("::")+2:].split(","))
func["seealso"] = seealso
continue
# skip ".. index::"
if ll.startswith(".. index::"):
continue
# parse class & struct definitions
if ll.startswith(".. ocv:class::"):
func["class"] = ll[ll.find("::")+2:].strip()
if "method" in func:
del func["method"]
func["isclass"] = True
expected_brief = True
continue
if ll.startswith(".. ocv:struct::"):
func["class"] = ll[ll.find("::")+2:].strip()
if "method" in func:
del func["method"]
func["isstruct"] = True
expected_brief = True
continue
# parse function/method definitions
if fdecl.hasDeclaration(ll):
fdecl = DeclarationParser(ll)
if fdecl.isready():
self.add_new_fdecl(func, fdecl)
continue
# parse parameters
if pdecl.hasDeclaration(l):
pdecl = ParamParser(l)
continue
# record brief description
if expected_brief:
func["brief"] = func.get("brief", "") + "\n" + ll
if skip_code_lines:
expected_brief = False # force end brief if code block begins
continue
# record other lines as long description
if (skip_code_lines):
ll = ll.replace("/*", "/ *")
ll = ll.replace("*/", "* /")
if (was_code_line):
func["long"] = func.get("long", "") + "\n" + ll + "\n"
else:
was_code_line = True
func["long"] = func.get("long", "") + ll +"\n<code>\n\n // C++ code:\n\n"
else:
if (was_code_line):
func["long"] = func.get("long", "") + "\n" + ll + "\n</code>\n"
was_code_line = False
else:
func["long"] = func.get("long", "") + "\n" + ll
# endfor l in lines
if fdecl.balance != 0:
if show_critical_errors:
print("RST parser error E%03d: invalid parentheses balance in \"%s\" at %s:%s" % (ERROR_003_PARENTHESES, section_name, file_name, lineno), file=sys.stderr)
return
# save last parameter if needed
if pdecl.active:
self.add_new_pdecl(func, pdecl)
# add definition to list
func = self.normalize(func)
if self.validate(func):
self.definitions[func["name"]] = func
self.sections_parsed += 1
if verbose:
self.print_info(func)
elif func:
if func["name"] in known_text_sections_names:
if show_errors:
print("RST parser warning W%03d: SKIPPED: \"%s\" File: %s:%s" % (WARNING_002_HDRWHITESPACE, section_name, file_name, lineno), file=sys.stderr)
self.sections_skipped += 1
elif show_errors:
self.print_info(func, True, sys.stderr)
def parse_rst_file(self, module_name, doc):
doc = os.path.abspath(doc)
lineno = 0
whitespace_warnings = 0
max_whitespace_warnings = 10
lines = []
flineno = 0
fname = ""
prev_line = None
df = open(doc, "rt")
for l in df.readlines():
lineno += 1
# handle tabs
if l.find("\t") >= 0:
whitespace_warnings += 1
if whitespace_warnings <= max_whitespace_warnings and show_warnings:
print("RST parser warning W%03d: tab symbol instead of space is used at %s:%s" % (WARNING_004_TABS, doc, lineno), file=sys.stderr)
l = l.replace("\t", " ")
# handle first line
if prev_line == None:
prev_line = l.rstrip()
continue
ll = l.rstrip()
if len(prev_line) > 0 and len(ll) >= len(prev_line) and (ll == "-" * len(ll) or ll == "+" * len(ll) or ll == "=" * len(ll)):
# new function candidate
if len(lines) > 1:
self.parse_section_safe(module_name, fname, doc, flineno, lines[:len(lines)-1])
lines = []
flineno = lineno-1
fname = prev_line.strip()
elif flineno > 0:
lines.append(ll)
prev_line = ll
df.close()
# don't forget about the last function section in file!!!
if len(lines) > 1:
self.parse_section_safe(module_name, fname, doc, flineno, lines)
@classmethod
def parse_namespace(cls, func, section_name):
known_namespaces = ["cv", "gpu", "flann", "superres"]
l = section_name.strip()
for namespace in known_namespaces:
if l.startswith(namespace + "::"):
func["namespace"] = namespace
return l[len(namespace)+2:]
return section_name
def add_new_fdecl(self, func, decl):
if decl.fdecl.endswith(";"):
print("RST parser error E%03d: unexpected semicolon at the end of declaration in \"%s\" at %s:%s" \
% (ERROR_011_EOLEXPECTED, func["name"], func["file"], func["line"]), file=sys.stderr)
decls = func.get("decls", [])
if (decl.lang == "C++" or decl.lang == "C"):
rst_decl = self.cpp_parser.parse_func_decl_no_wrap(decl.fdecl)
decls.append( [decl.lang, decl.fdecl, rst_decl] )
else:
decls.append( [decl.lang, decl.fdecl] )
func["decls"] = decls
@classmethod
def add_new_pdecl(cls, func, decl):
params = func.get("params", {})
if decl.name in params:
if show_errors:
#check black_list
if decl.name not in params_blacklist.get(func["name"], []):
print("RST parser error E%03d: redefinition of parameter \"%s\" in \"%s\" at %s:%s" \
% (ERROR_005_REDEFENITIONPARAM, decl.name, func["name"], func["file"], func["line"]), file=sys.stderr)
else:
params[decl.name] = decl.comment
func["params"] = params
def print_info(self, func, skipped=False, out = sys.stdout):
print(file=out)
if skipped:
print("SKIPPED DEFINITION:", file=out)
print("name: %s" % (func.get("name","~empty~")), file=out)
print("file: %s:%s" % (func.get("file","~empty~"), func.get("line","~empty~")), file=out)
print("is class: %s" % func.get("isclass", False), file=out)
print("is struct: %s" % func.get("isstruct", False), file=out)
print("module: %s" % func.get("module","~unknown~"), file=out)
print("namespace: %s" % func.get("namespace", "~empty~"), file=out)
print("class: %s" % (func.get("class","~empty~")), file=out)
print("method: %s" % (func.get("method","~empty~")), file=out)
print("brief: %s" % (func.get("brief","~empty~")), file=out)
if "decls" in func:
print("declarations:", file=out)
for d in func["decls"]:
print(" %7s: %s" % (d[0], re.sub(r"[ ]+", " ", d[1])), file=out)
if "seealso" in func:
print("seealso: ", func["seealso"], file=out)
if "params" in func:
print("parameters:", file=out)
for name, comment in func["params"].items():
print("%23s: %s" % (name, comment), file=out)
print("long: %s" % (func.get("long","~empty~")), file=out)
print(file=out)
def validate(self, func):
if func.get("decls", None) is None:
if not func.get("isclass", False) and not func.get("isstruct", False):
return False
if func["name"] in self.definitions:
if show_errors:
print("RST parser error E%03d: \"%s\" from: %s:%s is already documented at %s:%s" \
% (ERROR_006_REDEFENITIONFUNC, func["name"], func["file"], func["line"], self.definitions[func["name"]]["file"], self.definitions[func["name"]]["line"]), file=sys.stderr)
return False
return self.validateParams(func)
def validateParams(self, func):
documentedParams = list(func.get("params", {}).keys())
params = []
for decl in func.get("decls", []):
if len(decl) > 2:
args = decl[2][3] # decl[2] -> [ funcname, return_ctype, [modifiers], [args] ]
for arg in args:
# arg -> [ ctype, name, def val, [mod], argno ]
if arg[0] != "...":
params.append(arg[1])
params = list(set(params))#unique
# 1. all params are documented
for p in params:
if p not in documentedParams and show_warnings:
print("RST parser warning W%03d: parameter \"%s\" of \"%s\" is undocumented. %s:%s" % (WARNING_007_UNDOCUMENTEDPARAM, p, func["name"], func["file"], func["line"]), file=sys.stderr)
# 2. only real params are documented
for p in documentedParams:
if p not in params and show_warnings:
if p not in params_blacklist.get(func["name"], []):
print("RST parser warning W%03d: unexisting parameter \"%s\" of \"%s\" is documented at %s:%s" % (WARNING_008_MISSINGPARAM, p, func["name"], func["file"], func["line"]), file=sys.stderr)
return True
def normalize(self, func):
if not func:
return func
fnname = func["name"]
fnname = self.normalizeText(fnname)
fnname = re.sub(r'_\?D$', "_nD", fnname) # tailing _?D can be mapped to _nD
fnname = re.sub(r'\?D$', "ND", fnname) # tailing ?D can be mapped to ND
fnname = re.sub(r'\(s\)$', "s", fnname) # tailing (s) can be mapped to s
func["name"] = fnname
if "method" in func:
func["method"] = self.normalizeText(func["method"])
if "class" in func:
func["class"] = self.normalizeText(func["class"])
if "brief" in func:
func["brief"] = self.normalizeText(func.get("brief", None))
if not func["brief"]:
del func["brief"]
if "long" in func:
func["long"] = self.normalizeText(func.get("long", None))
if not func["long"]:
del func["long"]
if "decls" in func:
func["decls"].sort()
if "params" in func:
params = {}
for name, comment in func["params"].items():
cmt = self.normalizeText(comment)
if cmt:
params[name] = cmt
# expand some wellknown params
pmap = params_mapping.get(fnname)
if pmap:
for name, alias in pmap.items():
params[name] = params[alias]
func["params"] = params
if "seealso" in func:
seealso = []
for see in func["seealso"]:
item = self.normalizeText(see.rstrip(".")).strip("\"")
if item and (item.find(" ") < 0 or item.find("::operator") > 0):
seealso.append(item)
func["seealso"] = list(set(seealso))
if not func["seealso"]:
del func["seealso"]
# special case for old C functions - section name should omit "cv" prefix
if not func.get("isclass", False) and not func.get("isstruct", False):
self.fixOldCFunctionName(func)
return func
def fixOldCFunctionName(self, func):
if not "decls" in func:
return
fname = None
for decl in func["decls"]:
if decl[0] != "C" and decl[0] != "Python1":
return
if decl[0] == "C":
fname = decl[2][0]
if fname is None:
return
fname = fname.replace(".", "::")
if fname.startswith("cv::cv"):
if fname[6:] == func.get("name", "").replace("*", "_n"):
func["name"] = fname[4:]
func["method"] = fname[4:]
elif show_warnings:
print("RST parser warning W%03d: \"%s\" - section name is \"%s\" instead of \"%s\" at %s:%s" % (WARNING_009_HDRMISMATCH, fname, func["name"], fname[6:], func["file"], func["line"]), file=sys.stderr)
#self.print_info(func)
def normalizeText(self, s):
if s is None:
return s
s = re.sub(r"\.\. math::[ \r]*\n+((.|\n)*?)(\n[ \r]*\n|$)", mathReplace2, s)
s = re.sub(r":math:`([^`]+?)`", mathReplace, s)
s = re.sub(r" *:sup:", "^", s)
s = s.replace(":ocv:class:", "")
s = s.replace(":ocv:struct:", "")
s = s.replace(":ocv:func:", "")
s = s.replace(":ocv:cfunc:","")
s = s.replace(":c:type:", "")
s = s.replace(":c:func:", "")
s = s.replace(":ref:", "")
s = s.replace(":math:", "")
s = s.replace(":func:", "")
s = s.replace("]_", "]")
s = s.replace(".. note::", "Note:")
s = s.replace(".. table::", "")
s = s.replace(".. ocv:function::", "")
s = s.replace(".. ocv:cfunction::", "")
# remove ".. identifier:" lines
s = re.sub(r"(^|\n)\.\. [a-zA-Z_0-9]+(::[a-zA-Z_0-9]+)?:(\n|$)", "\n ", s)
# unwrap urls
s = re.sub(r"`([^`<]+ )<(https?://[^>]+)>`_", "\\1(\\2)", s)
# remove tailing ::
s = re.sub(r"::(\n|$)", "\\1", s)
# normalize line endings
s = re.sub(r"\r\n", "\n", s)
# remove extra line breaks before/after _ or ,
s = re.sub(r"\n[ ]*([_,])\n", r"\1 ", s)
# remove extra line breaks after `
#s = re.sub(r"`\n", "` ", s)
# remove extra space after ( and before .,)
s = re.sub(r"\([\n ]+", "(", s)
s = re.sub(r"[\n ]+(\.|,|\))", "\\1", s)
# remove extra line breaks after ".. note::"
s = re.sub(r"\.\. note::\n+", ".. note:: ", s)
# remove extra line breaks before *
s = re.sub(r"\n+\*", "\n*", s)
# remove extra line breaks after *
s = re.sub(r"\n\*\n+", "\n* ", s)
# remove extra line breaks before #.
s = re.sub(r"\n+#\.", "\n#.", s)
# remove extra line breaks after #.
s = re.sub(r"\n#\.\n+", "\n#. ", s)
# remove extra line breaks before `
#s = re.sub(r"\n[ ]*`", " `", s)
# remove trailing whitespaces
s = re.sub(r"[ ]+$", "", s)
# remove .. for references
#s = re.sub(r"\.\. \[", "[", s)
# unescape
s = re.sub(r"\\(.)", "\\1", s)
# remove whitespace before .
s = re.sub(r"[ ]+\.", ".", s)
# remove tailing whitespace
s = re.sub(r" +(\n|$)", "\\1", s)
# remove leading whitespace
s = re.sub(r"(^|\n) +", "\\1", s)
# compress line breaks
s = re.sub(r"\n\n+", "\n\n", s)
# remove other newlines
s = re.sub(r"([^.\n\\=])\n([^*#\n]|\*[^ ])", "\\1 \\2", s)
# compress whitespace
s = re.sub(r" +", " ", s)
# restore math
s = re.sub(r" *<BR> *", "\n", s)
# remove extra space before .
s = re.sub(r"[\n ]+\.", ".", s)
s = s.replace("**", "")
s = re.sub(r"``([^\n]+?)``", "<code>\\1</code>", s)
s = s.replace("``", "\"")
s = s.replace("`", "\"")
s = s.replace("\"\"", "\"")
s = s.strip()
return s
def printSummary(self):
print("RST Parser Summary:")
print(" Total sections: %s" % self.sections_total)
print(" Skipped sections: %s" % self.sections_skipped)
print(" Parsed sections: %s" % self.sections_parsed)
print(" Invalid sections: %s" % (self.sections_total - self.sections_parsed - self.sections_skipped))
# statistic by language
stat = {}
classes = 0
structs = 0
for name, d in self.definitions.items():
if d.get("isclass", False):
classes += 1
elif d.get("isstruct", False):
structs += 1
else:
for decl in d.get("decls", []):
stat[decl[0]] = stat.get(decl[0], 0) + 1
print()
print(" classes documented: %s" % classes)
print(" structs documented: %s" % structs)
for lang in sorted(stat.items()):
print(" %7s functions documented: %s" % lang)
print()
def mathReplace2(match):
m = mathReplace(match)
#print "%s ===> %s" % (match.group(0), m)
return "\n\n"+m+"<BR><BR>"
def hdotsforReplace(match):
return '... '*int(match.group(1))
def matrixReplace(match):
m = match.group(2)
m = re.sub(r" *& *", " ", m)
return m
def mathReplace(match):
m = match.group(1)
m = m.replace("\n", "<BR>")
m = m.replace("<", "&lt")
m = m.replace(">", "&gt")
m = re.sub(r"\\text(tt|rm)?{(.*?)}", "\\2", m)
m = re.sub(r"\\mbox{(.*?)}", "\\1", m)
m = re.sub(r"\\mathrm{(.*?)}", "\\1", m)
m = re.sub(r"\\vecthree{(.*?)}{(.*?)}{(.*?)}", "[\\1 \\2 \\3]", m)
m = re.sub(r"\\bar{(.*?)}", "\\1`", m)
m = re.sub(r"\\sqrt\[(\d)*\]{(.*?)}", "sqrt\\1(\\2)", m)
m = re.sub(r"\\sqrt{(.*?)}", "sqrt(\\1)", m)
m = re.sub(r"\\frac{(.*?)}{(.*?)}", "(\\1)/(\\2)", m)
m = re.sub(r"\\fork{(.*?)}{(.*?)}{(.*?)}{(.*?)}", "\\1 \\2; \\3 \\4", m)
m = re.sub(r"\\forkthree{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}", "\\1 \\2; \\3 \\4; \\5 \\6", m)
m = re.sub(r"\\stackrel{(.*?)}{(.*?)}", "\\1 \\2", m)
m = re.sub(r"\\sum _{(.*?)}", "sum{by: \\1}", m)
m = re.sub(r" +", " ", m)
m = re.sub(r"\\begin{(?P<gtype>array|bmatrix)}(?:{[\|lcr\. ]+})? *(.*?)\\end{(?P=gtype)}", matrixReplace, m)
m = re.sub(r"\\hdotsfor{(\d+)}", hdotsforReplace, m)
m = re.sub(r"\\vecthreethree{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}{(.*?)}", "<BR>|\\1 \\2 \\3|<BR>|\\4 \\5 \\6|<BR>|\\7 \\8 \\9|<BR>", m)
m = re.sub(r"\\left[ ]*\\lfloor[ ]*", "[", m)
m = re.sub(r"[ ]*\\right[ ]*\\rfloor", "]", m)
m = re.sub(r"\\left[ ]*\([ ]*", "(", m)
m = re.sub(r"[ ]*\\right[ ]*\)", ")", m)
m = re.sub(r"([^\\])\$", "\\1", m)
m = m.replace("\\times", "x")
m = m.replace("\\pm", "+-")
m = m.replace("\\cdot", "*")
m = m.replace("\\sim", "~")
m = m.replace("\\leftarrow", "<-")
m = m.replace("\\rightarrow", "->")
m = m.replace("\\leftrightarrow", "<->")
m = re.sub(r" *\\neg *", " !", m)
m = re.sub(r" *\\neq? *", " != ", m)
m = re.sub(r" *\\geq? *", " >= ", m)
m = re.sub(r" *\\leq? *", " <= ", m)
m = re.sub(r" *\\vee *", " V ", m)
m = re.sub(r" *\\oplus *", " (+) ", m)
m = re.sub(r" *\\mod *", " mod ", m)
m = re.sub(r"( *)\\partial *", "\\1d", m)
m = re.sub(r"( *)\\quad *", "\\1 ", m)
m = m.replace("\\,", " ")
m = m.replace("\\:", " ")
m = m.replace("\\;", " ")
m = m.replace("\\!", "")
m = m.replace("\\\\", "<BR>")
m = m.replace("\\wedge", "/\\\\")
m = re.sub(r"\\(.)", "\\1", m)
m = re.sub(r"\([ ]+", "(", m)
m = re.sub(r"[ ]+(\.|,|\))(<BR>| |$)", "\\1\\2", m)
m = re.sub(r" +\|[ ]+([a-zA-Z0-9_(])", " |\\1", m)
m = re.sub(r"([a-zA-Z0-9_)}])[ ]+(\(|\|)", "\\1\\2", m)
m = re.sub(r"{\((-?[a-zA-Z0-9_]+)\)}", "\\1", m)
m = re.sub(r"{(-?[a-zA-Z0-9_]+)}", "(\\1)", m)
m = re.sub(r"\(([0-9]+)\)", "\\1", m)
m = m.replace("{", "(")
m = m.replace("}", ")")
#print "%s ===> %s" % (match.group(0), m)
return "<em>" + m + "</em>"
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage:\n", os.path.basename(sys.argv[0]), " <module path>")
exit(0)
if len(sys.argv) >= 3:
if sys.argv[2].lower() == "verbose":
verbose = True
rst_parser_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
hdr_parser_path = os.path.join(rst_parser_dir, "../../python/src2")
sys.path.append(hdr_parser_path)
import hdr_parser
module = sys.argv[1]
if module != "all" and not os.path.isdir(os.path.join(rst_parser_dir, "../../" + module)):
print("RST parser error E%03d: module \"%s\" could not be found." % (ERROR_010_NOMODULE, module))
exit(1)
parser = RstParser(hdr_parser.CppHeaderParser())
if module == "all":
for m in allmodules:
parser.parse(m, os.path.join(rst_parser_dir, "../../" + m))
else:
parser.parse(module, os.path.join(rst_parser_dir, "../../" + module))
# summary
parser.printSummary()