Stripped carriage return and added eol-style native prop.

This commit is contained in:
Christopher Dunn 2007-06-14 21:01:26 +00:00
parent f1a49467cf
commit f986423955
77 changed files with 1203 additions and 1203 deletions

View File

@ -1 +1 @@
Baptiste Lepilleur <blep@users.sourceforge.net>
Baptiste Lepilleur <blep@users.sourceforge.net>

View File

@ -1,44 +1,44 @@
* Introduction:
JSON (JavaScript Object Notation) is a lightweight data-interchange format.
It can represent integer, real number, string, an ordered sequence of
value, and a collection of name/value pairs.
JsonCpp is a simple API to manipulate JSON value, and handle serialization
and unserialization to string.
It can also preserve existing comment in unserialization/serialization steps,
making it a convenient format to store user input files.
Unserialization parsing is user friendly and provides precise error reports.
* Building/Testing:
JsonCpp uses Scons (http://www.scons.org) as a build system. Scons requires
python to be installed (http://www.python.org).
You download scons-local distribution from the following url:
http://sourceforge.net/project/showfiles.php?group_id=30337&package_id=67375
Unzip it in the directory where you found this README file. scons.py Should be
at the same level as README.
python scons.py platform=PLTFRM [TARGET]
where PLTFRM may be one of:
suncc Sun C++ (Solaris)
vacpp Visual Age C++ (AIX)
mingw
msvc6 Microsoft Visual Studio 6 service pack 5-6
msvc70 Microsoft Visual Studio 2002
msvc71 Microsoft Visual Studio 2003
msvc80 Microsoft Visual Studio 2005
linux-gcc Gnu C++ (linux, also reported to work for Mac OS X)
adding platform is fairly simple. You need to change the Sconstruct file
to do so.
and TARGET may be:
check: build library and run unit tests.
doc: build documentation
doc-dist: build documentation tarball
* Introduction:
JSON (JavaScript Object Notation) is a lightweight data-interchange format.
It can represent integer, real number, string, an ordered sequence of
value, and a collection of name/value pairs.
JsonCpp is a simple API to manipulate JSON value, and handle serialization
and unserialization to string.
It can also preserve existing comment in unserialization/serialization steps,
making it a convenient format to store user input files.
Unserialization parsing is user friendly and provides precise error reports.
* Building/Testing:
JsonCpp uses Scons (http://www.scons.org) as a build system. Scons requires
python to be installed (http://www.python.org).
You download scons-local distribution from the following url:
http://sourceforge.net/project/showfiles.php?group_id=30337&package_id=67375
Unzip it in the directory where you found this README file. scons.py Should be
at the same level as README.
python scons.py platform=PLTFRM [TARGET]
where PLTFRM may be one of:
suncc Sun C++ (Solaris)
vacpp Visual Age C++ (AIX)
mingw
msvc6 Microsoft Visual Studio 6 service pack 5-6
msvc70 Microsoft Visual Studio 2002
msvc71 Microsoft Visual Studio 2003
msvc80 Microsoft Visual Studio 2005
linux-gcc Gnu C++ (linux, also reported to work for Mac OS X)
adding platform is fairly simple. You need to change the Sconstruct file
to do so.
and TARGET may be:
check: build library and run unit tests.
doc: build documentation
doc-dist: build documentation tarball

View File

@ -1,205 +1,205 @@
# Big issue:
# emitter depends on doxyfile which is generated from doxyfile.in.
# build fails after cleaning and relaunching the build.
import os
import os.path
import glob
from fnmatch import fnmatch
def DoxyfileParse(file_contents):
"""
Parse a Doxygen source file and return a dictionary of all the values.
Values will be strings and lists of strings.
"""
data = {}
import shlex
lex = shlex.shlex(instream = file_contents, posix = True)
lex.wordchars += "*+./-:"
lex.whitespace = lex.whitespace.replace("\n", "")
lex.escape = ""
lineno = lex.lineno
last_backslash_lineno = lineno
token = lex.get_token()
key = token # the first token should be a key
last_token = ""
key_token = False
next_key = False
new_data = True
def append_data(data, key, new_data, token):
if new_data or len(data[key]) == 0:
data[key].append(token)
else:
data[key][-1] += token
while token:
if token in ['\n']:
if last_token not in ['\\']:
key_token = True
elif token in ['\\']:
pass
elif key_token:
key = token
key_token = False
else:
if token == "+=":
if not data.has_key(key):
data[key] = list()
elif token == "=":
data[key] = list()
else:
append_data( data, key, new_data, token )
new_data = True
last_token = token
token = lex.get_token()
if last_token == '\\' and token != '\n':
new_data = False
append_data( data, key, new_data, '\\' )
# compress lists of len 1 into single strings
for (k, v) in data.items():
if len(v) == 0:
data.pop(k)
# items in the following list will be kept as lists and not converted to strings
if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
continue
if len(v) == 1:
data[k] = v[0]
return data
def DoxySourceScan(node, env, path):
"""
Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
any files used to generate docs to the list of source files.
"""
default_file_patterns = [
'*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
'*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
'*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
'*.py',
]
default_exclude_patterns = [
'*~',
]
sources = []
data = DoxyfileParse(node.get_contents())
if data.get("RECURSIVE", "NO") == "YES":
recursive = True
else:
recursive = False
file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
doxyfile_dir = str( node.dir )
## print 'running from', os.getcwd()
for node in data.get("INPUT", []):
node_real_path = os.path.normpath( os.path.join( doxyfile_dir, node ) )
if os.path.isfile(node_real_path):
## print str(node), 'is a file'
sources.append(node)
elif os.path.isdir(node_real_path):
## print str(node), 'is a directory'
if recursive:
for root, dirs, files in os.walk(node):
for f in files:
filename = os.path.join(root, f)
pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
if pattern_check and not exclude_check:
sources.append(filename)
## print ' adding source', os.path.abspath( filename )
else:
for pattern in file_patterns:
sources.extend(glob.glob(os.path.join( node, pattern)))
## else:
## print str(node), 'is neither a file nor a directory'
sources = map( lambda path: env.File(path), sources )
return sources
def DoxySourceScanCheck(node, env):
"""Check if we should scan this file"""
return os.path.isfile(node.path)
def DoxyEmitter(source, target, env):
"""Doxygen Doxyfile emitter"""
# possible output formats and their default values and output locations
output_formats = {
"HTML": ("YES", "html"),
"LATEX": ("YES", "latex"),
"RTF": ("NO", "rtf"),
"MAN": ("YES", "man"),
"XML": ("NO", "xml"),
}
## print '#### DoxyEmitter:', source[0].abspath, os.path.exists( source[0].abspath )
data = DoxyfileParse(source[0].get_contents())
targets = []
out_dir = data.get("OUTPUT_DIRECTORY", ".")
# add our output locations
for (k, v) in output_formats.items():
if data.get("GENERATE_" + k, v[0]) == "YES":
targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
# don't clobber targets
for node in targets:
env.Precious(node)
# set up cleaning stuff
for node in targets:
clean_cmd = env.Clean(node, node)
env.Depends( clean_cmd, source )
return (targets, source)
def generate(env):
"""
Add builders and construction variables for the
Doxygen tool. This is currently for Doxygen 1.4.6.
"""
doxyfile_scanner = env.Scanner(
DoxySourceScan,
"DoxySourceScan",
scan_check = DoxySourceScanCheck,
)
doxyfile_builder = env.Builder(
action = env.Action("cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}",
varlist=['$SOURCES']),
emitter = DoxyEmitter,
target_factory = env.fs.Entry,
single_source = True,
source_scanner = doxyfile_scanner,
)
env.Append(BUILDERS = {
'Doxygen': doxyfile_builder,
})
env.AppendUnique(
DOXYGEN = 'doxygen',
)
def exists(env):
"""
Make sure doxygen exists.
"""
return env.Detect("doxygen")
# Big issue:
# emitter depends on doxyfile which is generated from doxyfile.in.
# build fails after cleaning and relaunching the build.
import os
import os.path
import glob
from fnmatch import fnmatch
def DoxyfileParse(file_contents):
"""
Parse a Doxygen source file and return a dictionary of all the values.
Values will be strings and lists of strings.
"""
data = {}
import shlex
lex = shlex.shlex(instream = file_contents, posix = True)
lex.wordchars += "*+./-:"
lex.whitespace = lex.whitespace.replace("\n", "")
lex.escape = ""
lineno = lex.lineno
last_backslash_lineno = lineno
token = lex.get_token()
key = token # the first token should be a key
last_token = ""
key_token = False
next_key = False
new_data = True
def append_data(data, key, new_data, token):
if new_data or len(data[key]) == 0:
data[key].append(token)
else:
data[key][-1] += token
while token:
if token in ['\n']:
if last_token not in ['\\']:
key_token = True
elif token in ['\\']:
pass
elif key_token:
key = token
key_token = False
else:
if token == "+=":
if not data.has_key(key):
data[key] = list()
elif token == "=":
data[key] = list()
else:
append_data( data, key, new_data, token )
new_data = True
last_token = token
token = lex.get_token()
if last_token == '\\' and token != '\n':
new_data = False
append_data( data, key, new_data, '\\' )
# compress lists of len 1 into single strings
for (k, v) in data.items():
if len(v) == 0:
data.pop(k)
# items in the following list will be kept as lists and not converted to strings
if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
continue
if len(v) == 1:
data[k] = v[0]
return data
def DoxySourceScan(node, env, path):
"""
Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
any files used to generate docs to the list of source files.
"""
default_file_patterns = [
'*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
'*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
'*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
'*.py',
]
default_exclude_patterns = [
'*~',
]
sources = []
data = DoxyfileParse(node.get_contents())
if data.get("RECURSIVE", "NO") == "YES":
recursive = True
else:
recursive = False
file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
doxyfile_dir = str( node.dir )
## print 'running from', os.getcwd()
for node in data.get("INPUT", []):
node_real_path = os.path.normpath( os.path.join( doxyfile_dir, node ) )
if os.path.isfile(node_real_path):
## print str(node), 'is a file'
sources.append(node)
elif os.path.isdir(node_real_path):
## print str(node), 'is a directory'
if recursive:
for root, dirs, files in os.walk(node):
for f in files:
filename = os.path.join(root, f)
pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
if pattern_check and not exclude_check:
sources.append(filename)
## print ' adding source', os.path.abspath( filename )
else:
for pattern in file_patterns:
sources.extend(glob.glob(os.path.join( node, pattern)))
## else:
## print str(node), 'is neither a file nor a directory'
sources = map( lambda path: env.File(path), sources )
return sources
def DoxySourceScanCheck(node, env):
"""Check if we should scan this file"""
return os.path.isfile(node.path)
def DoxyEmitter(source, target, env):
"""Doxygen Doxyfile emitter"""
# possible output formats and their default values and output locations
output_formats = {
"HTML": ("YES", "html"),
"LATEX": ("YES", "latex"),
"RTF": ("NO", "rtf"),
"MAN": ("YES", "man"),
"XML": ("NO", "xml"),
}
## print '#### DoxyEmitter:', source[0].abspath, os.path.exists( source[0].abspath )
data = DoxyfileParse(source[0].get_contents())
targets = []
out_dir = data.get("OUTPUT_DIRECTORY", ".")
# add our output locations
for (k, v) in output_formats.items():
if data.get("GENERATE_" + k, v[0]) == "YES":
targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
# don't clobber targets
for node in targets:
env.Precious(node)
# set up cleaning stuff
for node in targets:
clean_cmd = env.Clean(node, node)
env.Depends( clean_cmd, source )
return (targets, source)
def generate(env):
"""
Add builders and construction variables for the
Doxygen tool. This is currently for Doxygen 1.4.6.
"""
doxyfile_scanner = env.Scanner(
DoxySourceScan,
"DoxySourceScan",
scan_check = DoxySourceScanCheck,
)
doxyfile_builder = env.Builder(
action = env.Action("cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}",
varlist=['$SOURCES']),
emitter = DoxyEmitter,
target_factory = env.fs.Entry,
single_source = True,
source_scanner = doxyfile_scanner,
)
env.Append(BUILDERS = {
'Doxygen': doxyfile_builder,
})
env.AppendUnique(
DOXYGEN = 'doxygen',
)
def exists(env):
"""
Make sure doxygen exists.
"""
return env.Detect("doxygen")

View File

@ -1,179 +1,179 @@
import os
import os.path
import glob
from fnmatch import fnmatch
import targz
##def DoxyfileParse(file_contents):
## """
## Parse a Doxygen source file and return a dictionary of all the values.
## Values will be strings and lists of strings.
## """
## data = {}
##
## import shlex
## lex = shlex.shlex(instream = file_contents, posix = True)
## lex.wordchars += "*+./-:"
## lex.whitespace = lex.whitespace.replace("\n", "")
## lex.escape = ""
##
## lineno = lex.lineno
## last_backslash_lineno = lineno
## token = lex.get_token()
## key = token # the first token should be a key
## last_token = ""
## key_token = False
## next_key = False
## new_data = True
##
## def append_data(data, key, new_data, token):
## if new_data or len(data[key]) == 0:
## data[key].append(token)
## else:
## data[key][-1] += token
##
## while token:
## if token in ['\n']:
## if last_token not in ['\\']:
## key_token = True
## elif token in ['\\']:
## pass
## elif key_token:
## key = token
## key_token = False
## else:
## if token == "+=":
## if not data.has_key(key):
## data[key] = list()
## elif token == "=":
## data[key] = list()
## else:
## append_data( data, key, new_data, token )
## new_data = True
##
## last_token = token
## token = lex.get_token()
##
## if last_token == '\\' and token != '\n':
## new_data = False
## append_data( data, key, new_data, '\\' )
##
## # compress lists of len 1 into single strings
## for (k, v) in data.items():
## if len(v) == 0:
## data.pop(k)
##
## # items in the following list will be kept as lists and not converted to strings
## if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
## continue
##
## if len(v) == 1:
## data[k] = v[0]
##
## return data
##
##def DoxySourceScan(node, env, path):
## """
## Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
## any files used to generate docs to the list of source files.
## """
## default_file_patterns = [
## '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
## '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
## '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
## '*.py',
## ]
##
## default_exclude_patterns = [
## '*~',
## ]
##
## sources = []
##
## data = DoxyfileParse(node.get_contents())
##
## if data.get("RECURSIVE", "NO") == "YES":
## recursive = True
## else:
## recursive = False
##
## file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
## exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
##
## for node in data.get("INPUT", []):
## if os.path.isfile(node):
## sources.add(node)
## elif os.path.isdir(node):
## if recursive:
## for root, dirs, files in os.walk(node):
## for f in files:
## filename = os.path.join(root, f)
##
## pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
## exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
##
## if pattern_check and not exclude_check:
## sources.append(filename)
## else:
## for pattern in file_patterns:
## sources.extend(glob.glob("/".join([node, pattern])))
## sources = map( lambda path: env.File(path), sources )
## return sources
##
##
##def DoxySourceScanCheck(node, env):
## """Check if we should scan this file"""
## return os.path.isfile(node.path)
def srcDistEmitter(source, target, env):
## """Doxygen Doxyfile emitter"""
## # possible output formats and their default values and output locations
## output_formats = {
## "HTML": ("YES", "html"),
## "LATEX": ("YES", "latex"),
## "RTF": ("NO", "rtf"),
## "MAN": ("YES", "man"),
## "XML": ("NO", "xml"),
## }
##
## data = DoxyfileParse(source[0].get_contents())
##
## targets = []
## out_dir = data.get("OUTPUT_DIRECTORY", ".")
##
## # add our output locations
## for (k, v) in output_formats.items():
## if data.get("GENERATE_" + k, v[0]) == "YES":
## targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
##
## # don't clobber targets
## for node in targets:
## env.Precious(node)
##
## # set up cleaning stuff
## for node in targets:
## env.Clean(node, node)
##
## return (targets, source)
return (target,source)
def generate(env):
"""
Add builders and construction variables for the
SrcDist tool.
"""
## doxyfile_scanner = env.Scanner(
## DoxySourceScan,
## "DoxySourceScan",
## scan_check = DoxySourceScanCheck,
## )
srcdist_builder = targz.makeBuilder( srcDistEmitter )
env['BUILDERS']['SrcDist'] = srcdist_builder
def exists(env):
"""
Make sure srcdist exists.
"""
return True
import os
import os.path
import glob
from fnmatch import fnmatch
import targz
##def DoxyfileParse(file_contents):
## """
## Parse a Doxygen source file and return a dictionary of all the values.
## Values will be strings and lists of strings.
## """
## data = {}
##
## import shlex
## lex = shlex.shlex(instream = file_contents, posix = True)
## lex.wordchars += "*+./-:"
## lex.whitespace = lex.whitespace.replace("\n", "")
## lex.escape = ""
##
## lineno = lex.lineno
## last_backslash_lineno = lineno
## token = lex.get_token()
## key = token # the first token should be a key
## last_token = ""
## key_token = False
## next_key = False
## new_data = True
##
## def append_data(data, key, new_data, token):
## if new_data or len(data[key]) == 0:
## data[key].append(token)
## else:
## data[key][-1] += token
##
## while token:
## if token in ['\n']:
## if last_token not in ['\\']:
## key_token = True
## elif token in ['\\']:
## pass
## elif key_token:
## key = token
## key_token = False
## else:
## if token == "+=":
## if not data.has_key(key):
## data[key] = list()
## elif token == "=":
## data[key] = list()
## else:
## append_data( data, key, new_data, token )
## new_data = True
##
## last_token = token
## token = lex.get_token()
##
## if last_token == '\\' and token != '\n':
## new_data = False
## append_data( data, key, new_data, '\\' )
##
## # compress lists of len 1 into single strings
## for (k, v) in data.items():
## if len(v) == 0:
## data.pop(k)
##
## # items in the following list will be kept as lists and not converted to strings
## if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
## continue
##
## if len(v) == 1:
## data[k] = v[0]
##
## return data
##
##def DoxySourceScan(node, env, path):
## """
## Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
## any files used to generate docs to the list of source files.
## """
## default_file_patterns = [
## '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
## '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
## '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
## '*.py',
## ]
##
## default_exclude_patterns = [
## '*~',
## ]
##
## sources = []
##
## data = DoxyfileParse(node.get_contents())
##
## if data.get("RECURSIVE", "NO") == "YES":
## recursive = True
## else:
## recursive = False
##
## file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
## exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
##
## for node in data.get("INPUT", []):
## if os.path.isfile(node):
## sources.add(node)
## elif os.path.isdir(node):
## if recursive:
## for root, dirs, files in os.walk(node):
## for f in files:
## filename = os.path.join(root, f)
##
## pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
## exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
##
## if pattern_check and not exclude_check:
## sources.append(filename)
## else:
## for pattern in file_patterns:
## sources.extend(glob.glob("/".join([node, pattern])))
## sources = map( lambda path: env.File(path), sources )
## return sources
##
##
##def DoxySourceScanCheck(node, env):
## """Check if we should scan this file"""
## return os.path.isfile(node.path)
def srcDistEmitter(source, target, env):
## """Doxygen Doxyfile emitter"""
## # possible output formats and their default values and output locations
## output_formats = {
## "HTML": ("YES", "html"),
## "LATEX": ("YES", "latex"),
## "RTF": ("NO", "rtf"),
## "MAN": ("YES", "man"),
## "XML": ("NO", "xml"),
## }
##
## data = DoxyfileParse(source[0].get_contents())
##
## targets = []
## out_dir = data.get("OUTPUT_DIRECTORY", ".")
##
## # add our output locations
## for (k, v) in output_formats.items():
## if data.get("GENERATE_" + k, v[0]) == "YES":
## targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
##
## # don't clobber targets
## for node in targets:
## env.Precious(node)
##
## # set up cleaning stuff
## for node in targets:
## env.Clean(node, node)
##
## return (targets, source)
return (target,source)
def generate(env):
"""
Add builders and construction variables for the
SrcDist tool.
"""
## doxyfile_scanner = env.Scanner(
## DoxySourceScan,
## "DoxySourceScan",
## scan_check = DoxySourceScanCheck,
## )
srcdist_builder = targz.makeBuilder( srcDistEmitter )
env['BUILDERS']['SrcDist'] = srcdist_builder
def exists(env):
"""
Make sure srcdist exists.
"""
return True

View File

@ -1,79 +1,79 @@
import re
from SCons.Script import * # the usual scons stuff you get in a SConscript
def generate(env):
"""
Add builders and construction variables for the
SubstInFile tool.
Adds SubstInFile builder, which substitutes the keys->values of SUBST_DICT
from the source to the target.
The values of SUBST_DICT first have any construction variables expanded
(its keys are not expanded).
If a value of SUBST_DICT is a python callable function, it is called and
the result is expanded as the value.
If there's more than one source and more than one target, each target gets
substituted from the corresponding source.
"""
def do_subst_in_file(targetfile, sourcefile, dict):
"""Replace all instances of the keys of dict with their values.
For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
"""
try:
f = open(sourcefile, 'rb')
contents = f.read()
f.close()
except:
raise SCons.Errors.UserError, "Can't read source file %s"%sourcefile
for (k,v) in dict.items():
contents = re.sub(k, v, contents)
try:
f = open(targetfile, 'wb')
f.write(contents)
f.close()
except:
raise SCons.Errors.UserError, "Can't write target file %s"%targetfile
return 0 # success
def subst_in_file(target, source, env):
if not env.has_key('SUBST_DICT'):
raise SCons.Errors.UserError, "SubstInFile requires SUBST_DICT to be set."
d = dict(env['SUBST_DICT']) # copy it
for (k,v) in d.items():
if callable(v):
d[k] = env.subst(v()).replace('\\','\\\\')
elif SCons.Util.is_String(v):
d[k] = env.subst(v).replace('\\','\\\\')
else:
raise SCons.Errors.UserError, "SubstInFile: key %s: %s must be a string or callable"%(k, repr(v))
for (t,s) in zip(target, source):
return do_subst_in_file(str(t), str(s), d)
def subst_in_file_string(target, source, env):
"""This is what gets printed on the console."""
return '\n'.join(['Substituting vars from %s into %s'%(str(s), str(t))
for (t,s) in zip(target, source)])
def subst_emitter(target, source, env):
"""Add dependency from substituted SUBST_DICT to target.
Returns original target, source tuple unchanged.
"""
d = env['SUBST_DICT'].copy() # copy it
for (k,v) in d.items():
if callable(v):
d[k] = env.subst(v())
elif SCons.Util.is_String(v):
d[k]=env.subst(v)
Depends(target, SCons.Node.Python.Value(d))
return target, source
## env.Append(TOOLS = 'substinfile') # this should be automaticaly done by Scons ?!?
subst_action = SCons.Action.Action( subst_in_file, subst_in_file_string )
env['BUILDERS']['SubstInFile'] = Builder(action=subst_action, emitter=subst_emitter)
def exists(env):
"""
Make sure tool exists.
"""
return True
import re
from SCons.Script import * # the usual scons stuff you get in a SConscript
def generate(env):
"""
Add builders and construction variables for the
SubstInFile tool.
Adds SubstInFile builder, which substitutes the keys->values of SUBST_DICT
from the source to the target.
The values of SUBST_DICT first have any construction variables expanded
(its keys are not expanded).
If a value of SUBST_DICT is a python callable function, it is called and
the result is expanded as the value.
If there's more than one source and more than one target, each target gets
substituted from the corresponding source.
"""
def do_subst_in_file(targetfile, sourcefile, dict):
"""Replace all instances of the keys of dict with their values.
For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
"""
try:
f = open(sourcefile, 'rb')
contents = f.read()
f.close()
except:
raise SCons.Errors.UserError, "Can't read source file %s"%sourcefile
for (k,v) in dict.items():
contents = re.sub(k, v, contents)
try:
f = open(targetfile, 'wb')
f.write(contents)
f.close()
except:
raise SCons.Errors.UserError, "Can't write target file %s"%targetfile
return 0 # success
def subst_in_file(target, source, env):
if not env.has_key('SUBST_DICT'):
raise SCons.Errors.UserError, "SubstInFile requires SUBST_DICT to be set."
d = dict(env['SUBST_DICT']) # copy it
for (k,v) in d.items():
if callable(v):
d[k] = env.subst(v()).replace('\\','\\\\')
elif SCons.Util.is_String(v):
d[k] = env.subst(v).replace('\\','\\\\')
else:
raise SCons.Errors.UserError, "SubstInFile: key %s: %s must be a string or callable"%(k, repr(v))
for (t,s) in zip(target, source):
return do_subst_in_file(str(t), str(s), d)
def subst_in_file_string(target, source, env):
"""This is what gets printed on the console."""
return '\n'.join(['Substituting vars from %s into %s'%(str(s), str(t))
for (t,s) in zip(target, source)])
def subst_emitter(target, source, env):
"""Add dependency from substituted SUBST_DICT to target.
Returns original target, source tuple unchanged.
"""
d = env['SUBST_DICT'].copy() # copy it
for (k,v) in d.items():
if callable(v):
d[k] = env.subst(v())
elif SCons.Util.is_String(v):
d[k]=env.subst(v)
Depends(target, SCons.Node.Python.Value(d))
return target, source
## env.Append(TOOLS = 'substinfile') # this should be automaticaly done by Scons ?!?
subst_action = SCons.Action.Action( subst_in_file, subst_in_file_string )
env['BUILDERS']['SubstInFile'] = Builder(action=subst_action, emitter=subst_emitter)
def exists(env):
"""
Make sure tool exists.
"""
return True

View File

@ -3,75 +3,75 @@
Tool-specific initialization for tarball.
"""
## Commands to tackle a command based implementation:
##to unpack on the fly...
##gunzip < FILE.tar.gz | tar xvf -
##to pack on the fly...
##tar cvf - FILE-LIST | gzip -c > FILE.tar.gz
## Commands to tackle a command based implementation:
##to unpack on the fly...
##gunzip < FILE.tar.gz | tar xvf -
##to pack on the fly...
##tar cvf - FILE-LIST | gzip -c > FILE.tar.gz
import os.path
import SCons.Builder
import SCons.Node.FS
import SCons.Util
try:
import gzip
import gzip
import tarfile
internal_targz = 1
except ImportError:
internal_targz = 0
TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
if internal_targz:
def targz(target, source, env):
def archive_name( path ):
path = os.path.normpath( os.path.abspath( path ) )
common_path = os.path.commonprefix( (base_dir, path) )
archive_name = path[len(common_path):]
return archive_name
def targz(target, source, env):
def archive_name( path ):
path = os.path.normpath( os.path.abspath( path ) )
common_path = os.path.commonprefix( (base_dir, path) )
archive_name = path[len(common_path):]
return archive_name
def visit(tar, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
tar.add(path, archive_name(path) )
compression = env.get('TARGZ_COMPRESSION_LEVEL',TARGZ_DEFAULT_COMPRESSION_LEVEL)
base_dir = os.path.normpath( env.get('TARGZ_BASEDIR', env.Dir('.')).abspath )
target_path = str(target[0])
compression = env.get('TARGZ_COMPRESSION_LEVEL',TARGZ_DEFAULT_COMPRESSION_LEVEL)
base_dir = os.path.normpath( env.get('TARGZ_BASEDIR', env.Dir('.')).abspath )
target_path = str(target[0])
fileobj = gzip.GzipFile( target_path, 'wb', compression )
tar = tarfile.TarFile(os.path.splitext(target_path)[0], 'w', fileobj)
for source in source:
for source in source:
source_path = str(source)
if source.isdir():
os.path.walk(source_path, visit, tar)
else:
else:
tar.add(source_path, archive_name(source_path) ) # filename, arcname
tar.close()
targzAction = SCons.Action.Action(targz, varlist=['TARGZ_COMPRESSION_LEVEL','TARGZ_BASEDIR'])
def makeBuilder( emitter = None ):
return SCons.Builder.Builder(action = SCons.Action.Action('$TARGZ_COM', '$TARGZ_COMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$TARGZ_SUFFIX',
multi = 1)
return SCons.Builder.Builder(action = SCons.Action.Action('$TARGZ_COM', '$TARGZ_COMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$TARGZ_SUFFIX',
multi = 1)
TarGzBuilder = makeBuilder()
def generate(env):
"""Add Builders and construction variables for zip to an Environment.
The following environnement variables may be set:
TARGZ_COMPRESSION_LEVEL: integer, [0-9]. 0: no compression, 9: best compression (same as gzip compression level).
TARGZ_BASEDIR: base-directory used to determine archive name (this allow archive name to be relative
to something other than top-dir).
"""Add Builders and construction variables for zip to an Environment.
The following environnement variables may be set:
TARGZ_COMPRESSION_LEVEL: integer, [0-9]. 0: no compression, 9: best compression (same as gzip compression level).
TARGZ_BASEDIR: base-directory used to determine archive name (this allow archive name to be relative
to something other than top-dir).
"""
env['BUILDERS']['TarGz'] = TarGzBuilder
env['TARGZ_COM'] = targzAction
env['BUILDERS']['TarGz'] = TarGzBuilder
env['TARGZ_COM'] = targzAction
env['TARGZ_COMPRESSION_LEVEL'] = TARGZ_DEFAULT_COMPRESSION_LEVEL # range 0-9
env['TARGZ_SUFFIX'] = '.tar.gz'
env['TARGZ_SUFFIX'] = '.tar.gz'
env['TARGZ_BASEDIR'] = env.Dir('.') # Sources archive name are made relative to that directory.
def exists(env):

View File

@ -1,187 +1,187 @@
#include <json/json.h>
#include <algorithm> // sort
#include <stdio.h>
#if defined(_MSC_VER) && _MSC_VER >= 1310
# pragma warning( disable: 4996 ) // disable fopen deprecation warning
#endif
static std::string
readInputTestFile( const char *path )
{
FILE *file = fopen( path, "rb" );
if ( !file )
return std::string("");
fseek( file, 0, SEEK_END );
long size = ftell( file );
fseek( file, 0, SEEK_SET );
std::string text;
char *buffer = new char[size+1];
buffer[size] = 0;
if ( fread( buffer, 1, size, file ) == (unsigned long)size )
text = buffer;
fclose( file );
delete[] buffer;
return text;
}
static void
printValueTree( FILE *fout, Json::Value &value, const std::string &path = "." )
{
switch ( value.type() )
{
case Json::nullValue:
fprintf( fout, "%s=null\n", path.c_str() );
break;
case Json::intValue:
fprintf( fout, "%s=%d\n", path.c_str(), value.asInt() );
break;
case Json::uintValue:
fprintf( fout, "%s=%u\n", path.c_str(), value.asUInt() );
break;
case Json::realValue:
fprintf( fout, "%s=%.16g\n", path.c_str(), value.asDouble() );
break;
case Json::stringValue:
fprintf( fout, "%s=\"%s\"\n", path.c_str(), value.asString().c_str() );
break;
case Json::booleanValue:
fprintf( fout, "%s=%s\n", path.c_str(), value.asBool() ? "true" : "false" );
break;
case Json::arrayValue:
{
fprintf( fout, "%s=[]\n", path.c_str() );
int size = value.size();
for ( int index =0; index < size; ++index )
{
static char buffer[16];
sprintf( buffer, "[%d]", index );
printValueTree( fout, value[index], path + buffer );
}
}
break;
case Json::objectValue:
{
fprintf( fout, "%s={}\n", path.c_str() );
Json::Value::Members members( value.getMemberNames() );
std::sort( members.begin(), members.end() );
std::string suffix = *(path.end()-1) == '.' ? "" : ".";
for ( Json::Value::Members::iterator it = members.begin();
it != members.end();
++it )
{
const std::string &name = *it;
printValueTree( fout, value[name], path + suffix + name );
}
}
break;
default:
break;
}
}
static int
parseAndSaveValueTree( const std::string &input,
const std::string &actual,
const std::string &kind,
Json::Value &root )
{
Json::Reader reader;
bool parsingSuccessful = reader.parse( input, root );
if ( !parsingSuccessful )
{
printf( "Failed to parse %s file: \n%s\n",
kind.c_str(),
reader.getFormatedErrorMessages().c_str() );
return 1;
}
FILE *factual = fopen( actual.c_str(), "wt" );
if ( !factual )
{
printf( "Failed to create %s actual file.\n", kind.c_str() );
return 2;
}
printValueTree( factual, root );
fclose( factual );
return 0;
}
static int
rewriteValueTree( const std::string &rewritePath,
const Json::Value &root,
std::string &rewrite )
{
//Json::FastWriter writer;
//writer.enableYAMLCompatibility();
Json::StyledWriter writer;
rewrite = writer.write( root );
FILE *fout = fopen( rewritePath.c_str(), "wt" );
if ( !fout )
{
printf( "Failed to create rewrite file: %s\n", rewritePath.c_str() );
return 2;
}
fprintf( fout, "%s\n", rewrite.c_str() );
fclose( fout );
return 0;
}
static std::string
removeSuffix( const std::string &path,
const std::string &extension )
{
if ( extension.length() >= path.length() )
return std::string("");
std::string suffix = path.substr( path.length() - extension.length() );
if ( suffix != extension )
return std::string("");
return path.substr( 0, path.length() - extension.length() );
}
int main( int argc, const char *argv[] )
{
if ( argc != 2 )
{
printf( "Usage: %s input-json-file", argv[0] );
return 3;
}
std::string input = readInputTestFile( argv[1] );
if ( input.empty() )
{
printf( "Failed to read input or empty input: %s\n", argv[1] );
return 3;
}
std::string basePath = removeSuffix( argv[1], ".json" );
if ( basePath.empty() )
{
printf( "Bad input path. Path does not end with '.expected':\n%s\n", argv[1] );
return 3;
}
std::string actualPath = basePath + ".actual";
std::string rewritePath = basePath + ".rewrite";
std::string rewriteActualPath = basePath + ".actual-rewrite";
Json::Value root;
int exitCode = parseAndSaveValueTree( input, actualPath, "input", root );
if ( exitCode == 0 )
{
std::string rewrite;
exitCode = rewriteValueTree( rewritePath, root, rewrite );
if ( exitCode == 0 )
{
Json::Value rewriteRoot;
exitCode = parseAndSaveValueTree( rewrite, rewriteActualPath, "rewrite", rewriteRoot );
}
}
return exitCode;
}
#include <json/json.h>
#include <algorithm> // sort
#include <stdio.h>
#if defined(_MSC_VER) && _MSC_VER >= 1310
# pragma warning( disable: 4996 ) // disable fopen deprecation warning
#endif
static std::string
readInputTestFile( const char *path )
{
FILE *file = fopen( path, "rb" );
if ( !file )
return std::string("");
fseek( file, 0, SEEK_END );
long size = ftell( file );
fseek( file, 0, SEEK_SET );
std::string text;
char *buffer = new char[size+1];
buffer[size] = 0;
if ( fread( buffer, 1, size, file ) == (unsigned long)size )
text = buffer;
fclose( file );
delete[] buffer;
return text;
}
static void
printValueTree( FILE *fout, Json::Value &value, const std::string &path = "." )
{
switch ( value.type() )
{
case Json::nullValue:
fprintf( fout, "%s=null\n", path.c_str() );
break;
case Json::intValue:
fprintf( fout, "%s=%d\n", path.c_str(), value.asInt() );
break;
case Json::uintValue:
fprintf( fout, "%s=%u\n", path.c_str(), value.asUInt() );
break;
case Json::realValue:
fprintf( fout, "%s=%.16g\n", path.c_str(), value.asDouble() );
break;
case Json::stringValue:
fprintf( fout, "%s=\"%s\"\n", path.c_str(), value.asString().c_str() );
break;
case Json::booleanValue:
fprintf( fout, "%s=%s\n", path.c_str(), value.asBool() ? "true" : "false" );
break;
case Json::arrayValue:
{
fprintf( fout, "%s=[]\n", path.c_str() );
int size = value.size();
for ( int index =0; index < size; ++index )
{
static char buffer[16];
sprintf( buffer, "[%d]", index );
printValueTree( fout, value[index], path + buffer );
}
}
break;
case Json::objectValue:
{
fprintf( fout, "%s={}\n", path.c_str() );
Json::Value::Members members( value.getMemberNames() );
std::sort( members.begin(), members.end() );
std::string suffix = *(path.end()-1) == '.' ? "" : ".";
for ( Json::Value::Members::iterator it = members.begin();
it != members.end();
++it )
{
const std::string &name = *it;
printValueTree( fout, value[name], path + suffix + name );
}
}
break;
default:
break;
}
}
static int
parseAndSaveValueTree( const std::string &input,
const std::string &actual,
const std::string &kind,
Json::Value &root )
{
Json::Reader reader;
bool parsingSuccessful = reader.parse( input, root );
if ( !parsingSuccessful )
{
printf( "Failed to parse %s file: \n%s\n",
kind.c_str(),
reader.getFormatedErrorMessages().c_str() );
return 1;
}
FILE *factual = fopen( actual.c_str(), "wt" );
if ( !factual )
{
printf( "Failed to create %s actual file.\n", kind.c_str() );
return 2;
}
printValueTree( factual, root );
fclose( factual );
return 0;
}
static int
rewriteValueTree( const std::string &rewritePath,
const Json::Value &root,
std::string &rewrite )
{
//Json::FastWriter writer;
//writer.enableYAMLCompatibility();
Json::StyledWriter writer;
rewrite = writer.write( root );
FILE *fout = fopen( rewritePath.c_str(), "wt" );
if ( !fout )
{
printf( "Failed to create rewrite file: %s\n", rewritePath.c_str() );
return 2;
}
fprintf( fout, "%s\n", rewrite.c_str() );
fclose( fout );
return 0;
}
static std::string
removeSuffix( const std::string &path,
const std::string &extension )
{
if ( extension.length() >= path.length() )
return std::string("");
std::string suffix = path.substr( path.length() - extension.length() );
if ( suffix != extension )
return std::string("");
return path.substr( 0, path.length() - extension.length() );
}
int main( int argc, const char *argv[] )
{
if ( argc != 2 )
{
printf( "Usage: %s input-json-file", argv[0] );
return 3;
}
std::string input = readInputTestFile( argv[1] );
if ( input.empty() )
{
printf( "Failed to read input or empty input: %s\n", argv[1] );
return 3;
}
std::string basePath = removeSuffix( argv[1], ".json" );
if ( basePath.empty() )
{
printf( "Bad input path. Path does not end with '.expected':\n%s\n", argv[1] );
return 3;
}
std::string actualPath = basePath + ".actual";
std::string rewritePath = basePath + ".rewrite";
std::string rewriteActualPath = basePath + ".actual-rewrite";
Json::Value root;
int exitCode = parseAndSaveValueTree( input, actualPath, "input", root );
if ( exitCode == 0 )
{
std::string rewrite;
exitCode = rewriteValueTree( rewritePath, root, rewrite );
if ( exitCode == 0 )
{
Json::Value rewriteRoot;
exitCode = parseAndSaveValueTree( rewrite, rewriteActualPath, "rewrite", rewriteRoot );
}
}
return exitCode;
}

View File

@ -1,6 +1,6 @@
Import( 'env_testing buildJSONTests' )
buildJSONTests( env_testing, Split( """
main.cpp
""" ),
'jsontestrunner' )
Import( 'env_testing buildJSONTests' )
buildJSONTests( env_testing, Split( """
main.cpp
""" ),
'jsontestrunner' )

View File

@ -1,10 +1,10 @@
# removes all files created during testing
import glob
import os
paths = []
for pattern in [ '*.actual', '*.actual-rewrite', '*.rewrite', '*.process-output' ]:
paths += glob.glob( pattern )
for path in paths:
os.unlink( path )
# removes all files created during testing
import glob
import os
paths = []
for pattern in [ '*.actual', '*.actual-rewrite', '*.rewrite', '*.process-output' ]:
paths += glob.glob( pattern )
for path in paths:
os.unlink( path )

View File

@ -1,11 +1,11 @@
import glob
import os.path
for path in glob.glob( '*.json' ):
text = file(path,'rt').read()
target = os.path.splitext(path)[0] + '.expected'
if os.path.exists( target ):
print 'skipping:', target
else:
print 'creating:', target
file(target,'wt').write(text)
import glob
import os.path
for path in glob.glob( '*.json' ):
text = file(path,'rt').read()
target = os.path.splitext(path)[0] + '.expected'
if os.path.exists( target ):
print 'skipping:', target
else:
print 'creating:', target
file(target,'wt').write(text)

View File

@ -1,64 +1,64 @@
# Simple implementation of a json test runner to run the test against json-py.
import sys
import os.path
import json
import types
if len(sys.argv) != 2:
print "Usage: %s input-json-file", sys.argv[0]
sys.exit(3)
input_path = sys.argv[1]
base_path = os.path.splitext(input_path)[0]
actual_path = base_path + '.actual'
rewrite_path = base_path + '.rewrite'
rewrite_actual_path = base_path + '.actual-rewrite'
def valueTreeToString( fout, value, path = '.' ):
ty = type(value)
if ty is types.DictType:
fout.write( '%s={}\n' % path )
suffix = path[-1] != '.' and '.' or ''
names = value.keys()
names.sort()
for name in names:
valueTreeToString( fout, value[name], path + suffix + name )
elif ty is types.ListType:
fout.write( '%s=[]\n' % path )
for index, childValue in zip( xrange(0,len(value)), value ):
valueTreeToString( fout, childValue, path + '[%d]' % index )
elif ty is types.StringType:
fout.write( '%s="%s"\n' % (path,value) )
elif ty is types.IntType:
fout.write( '%s=%d\n' % (path,value) )
elif ty is types.FloatType:
fout.write( '%s=%.16g\n' % (path,value) )
elif value is True:
fout.write( '%s=true\n' % path )
elif value is False:
fout.write( '%s=false\n' % path )
elif value is None:
fout.write( '%s=null\n' % path )
else:
assert False and "Unexpected value type"
def parseAndSaveValueTree( input, actual_path ):
root = json.read( input )
fout = file( actual_path, 'wt' )
valueTreeToString( fout, root )
fout.close()
return root
def rewriteValueTree( value, rewrite_path ):
rewrite = json.write( value )
rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ?
file( rewrite_path, 'wt').write( rewrite + '\n' )
return rewrite
input = file( input_path, 'rt' ).read()
root = parseAndSaveValueTree( input, actual_path )
rewrite = rewriteValueTree( json.write( root ), rewrite_path )
rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
sys.exit( 0 )
# Simple implementation of a json test runner to run the test against json-py.
import sys
import os.path
import json
import types
if len(sys.argv) != 2:
print "Usage: %s input-json-file", sys.argv[0]
sys.exit(3)
input_path = sys.argv[1]
base_path = os.path.splitext(input_path)[0]
actual_path = base_path + '.actual'
rewrite_path = base_path + '.rewrite'
rewrite_actual_path = base_path + '.actual-rewrite'
def valueTreeToString( fout, value, path = '.' ):
ty = type(value)
if ty is types.DictType:
fout.write( '%s={}\n' % path )
suffix = path[-1] != '.' and '.' or ''
names = value.keys()
names.sort()
for name in names:
valueTreeToString( fout, value[name], path + suffix + name )
elif ty is types.ListType:
fout.write( '%s=[]\n' % path )
for index, childValue in zip( xrange(0,len(value)), value ):
valueTreeToString( fout, childValue, path + '[%d]' % index )
elif ty is types.StringType:
fout.write( '%s="%s"\n' % (path,value) )
elif ty is types.IntType:
fout.write( '%s=%d\n' % (path,value) )
elif ty is types.FloatType:
fout.write( '%s=%.16g\n' % (path,value) )
elif value is True:
fout.write( '%s=true\n' % path )
elif value is False:
fout.write( '%s=false\n' % path )
elif value is None:
fout.write( '%s=null\n' % path )
else:
assert False and "Unexpected value type"
def parseAndSaveValueTree( input, actual_path ):
root = json.read( input )
fout = file( actual_path, 'wt' )
valueTreeToString( fout, root )
fout.close()
return root
def rewriteValueTree( value, rewrite_path ):
rewrite = json.write( value )
rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ?
file( rewrite_path, 'wt').write( rewrite + '\n' )
return rewrite
input = file( input_path, 'rt' ).read()
root = parseAndSaveValueTree( input, actual_path )
rewrite = rewriteValueTree( json.write( root ), rewrite_path )
rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
sys.exit( 0 )

View File

@ -1,91 +1,91 @@
import sys
import os
import os.path
import glob
def compareOutputs( expected, actual, message ):
expected = expected.strip().replace('\r','').split('\n')
actual = actual.strip().replace('\r','').split('\n')
diff_line = 0
max_line_to_compare = min( len(expected), len(actual) )
for index in xrange(0,max_line_to_compare):
if expected[index].strip() != actual[index].strip():
diff_line = index + 1
break
if diff_line == 0 and len(expected) != len(actual):
diff_line = max_line_to_compare+1
if diff_line == 0:
return None
def safeGetLine( lines, index ):
index += -1
if index >= len(lines):
return ''
return lines[index].strip()
return """ Difference in %s at line %d:
Expected: '%s'
Actual: '%s'
""" % (message, diff_line,
safeGetLine(expected,diff_line),
safeGetLine(actual,diff_line) )
def safeReadFile( path ):
try:
return file( path, 'rt' ).read()
except IOError, e:
return '<File "%s" is missing: %s>' % (path,e)
def runAllTests( jsontest_executable_path, input_dir = None ):
if not input_dir:
input_dir = os.getcwd()
tests = glob.glob( os.path.join( input_dir, '*.json' ) )
failed_tests = []
for input_path in tests:
print 'TESTING:', input_path,
pipe = os.popen( "%s %s" % (jsontest_executable_path, input_path) )
process_output = pipe.read()
status = pipe.close()
base_path = os.path.splitext(input_path)[0]
actual_output = safeReadFile( base_path + '.actual' )
actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
file(base_path + '.process-output','wt').write( process_output )
if status:
print 'parsing failed'
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
else:
expected_output_path = os.path.splitext(input_path)[0] + '.expected'
expected_output = file( expected_output_path, 'rt' ).read()
detail = ( compareOutputs( expected_output, actual_output, 'input' )
or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
if detail:
print 'FAILED'
failed_tests.append( (input_path, detail) )
else:
print 'OK'
if failed_tests:
print
print 'Failure details:'
for failed_test in failed_tests:
print '* Test', failed_test[0]
print failed_test[1]
print
print 'Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
len(failed_tests) )
return 1
else:
print 'All %d tests passed.' % len(tests)
return 0
if __name__ == '__main__':
if len(sys.argv) < 1 or len(sys.argv) > 2:
print "Usage: %s jsontest-executable-path [input-testcase-directory]" % sys.argv[0]
sys.exit( 1 )
jsontest_executable_path = os.path.normpath( os.path.abspath( sys.argv[1] ) )
if len(sys.argv) > 2:
input_path = os.path.normpath( os.path.abspath( sys.argv[2] ) )
else:
input_path = None
status = runAllTests( jsontest_executable_path, input_path )
import sys
import os
import os.path
import glob
def compareOutputs( expected, actual, message ):
expected = expected.strip().replace('\r','').split('\n')
actual = actual.strip().replace('\r','').split('\n')
diff_line = 0
max_line_to_compare = min( len(expected), len(actual) )
for index in xrange(0,max_line_to_compare):
if expected[index].strip() != actual[index].strip():
diff_line = index + 1
break
if diff_line == 0 and len(expected) != len(actual):
diff_line = max_line_to_compare+1
if diff_line == 0:
return None
def safeGetLine( lines, index ):
index += -1
if index >= len(lines):
return ''
return lines[index].strip()
return """ Difference in %s at line %d:
Expected: '%s'
Actual: '%s'
""" % (message, diff_line,
safeGetLine(expected,diff_line),
safeGetLine(actual,diff_line) )
def safeReadFile( path ):
try:
return file( path, 'rt' ).read()
except IOError, e:
return '<File "%s" is missing: %s>' % (path,e)
def runAllTests( jsontest_executable_path, input_dir = None ):
if not input_dir:
input_dir = os.getcwd()
tests = glob.glob( os.path.join( input_dir, '*.json' ) )
failed_tests = []
for input_path in tests:
print 'TESTING:', input_path,
pipe = os.popen( "%s %s" % (jsontest_executable_path, input_path) )
process_output = pipe.read()
status = pipe.close()
base_path = os.path.splitext(input_path)[0]
actual_output = safeReadFile( base_path + '.actual' )
actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
file(base_path + '.process-output','wt').write( process_output )
if status:
print 'parsing failed'
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
else:
expected_output_path = os.path.splitext(input_path)[0] + '.expected'
expected_output = file( expected_output_path, 'rt' ).read()
detail = ( compareOutputs( expected_output, actual_output, 'input' )
or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
if detail:
print 'FAILED'
failed_tests.append( (input_path, detail) )
else:
print 'OK'
if failed_tests:
print
print 'Failure details:'
for failed_test in failed_tests:
print '* Test', failed_test[0]
print failed_test[1]
print
print 'Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
len(failed_tests) )
return 1
else:
print 'All %d tests passed.' % len(tests)
return 0
if __name__ == '__main__':
if len(sys.argv) < 1 or len(sys.argv) > 2:
print "Usage: %s jsontest-executable-path [input-testcase-directory]" % sys.argv[0]
sys.exit( 1 )
jsontest_executable_path = os.path.normpath( os.path.abspath( sys.argv[1] ) )
if len(sys.argv) > 2:
input_path = os.path.normpath( os.path.abspath( sys.argv[2] ) )
else:
input_path = None
status = runAllTests( jsontest_executable_path, input_path )
sys.exit( status )

View File

@ -1 +1 @@
.=[]
.=[]

View File

@ -1 +1 @@
[]
[]

View File

@ -1,2 +1,2 @@
.=[]
.[0]=1
.=[]
.[0]=1

View File

@ -1 +1 @@
[1]
[1]

View File

@ -1,6 +1,6 @@
.=[]
.[0]=1
.[1]=2
.[2]=3
.[3]=4
.[4]=5
.=[]
.[0]=1
.[1]=2
.[2]=3
.[3]=4
.[4]=5

View File

@ -1 +1 @@
[ 1, 2 , 3,4,5]
[ 1, 2 , 3,4,5]

View File

@ -1,5 +1,5 @@
.=[]
.[0]=1
.[1]="abc"
.[2]=12.3
.[3]=-4
.=[]
.[0]=1
.[1]="abc"
.[2]=12.3
.[3]=-4

View File

@ -1 +1 @@
[1, "abc" , 12.3, -4]
[1, "abc" , 12.3, -4]

View File

@ -1,100 +1,100 @@
.=[]
.[0]=1
.[1]=2
.[2]=3
.[3]=4
.[4]=5
.[5]=6
.[6]=7
.[7]=8
.[8]=9
.[9]=10
.[10]=11
.[11]=12
.[12]=13
.[13]=14
.[14]=15
.[15]=16
.[16]=17
.[17]=18
.[18]=19
.[19]=20
.[20]=21
.[21]=22
.[22]=23
.[23]=24
.[24]=25
.[25]=26
.[26]=27
.[27]=28
.[28]=29
.[29]=30
.[30]=31
.[31]=32
.[32]=33
.[33]=34
.[34]=35
.[35]=36
.[36]=37
.[37]=38
.[38]=39
.[39]=40
.[40]=41
.[41]=42
.[42]=43
.[43]=44
.[44]=45
.[45]=46
.[46]=47
.[47]=48
.[48]=49
.[49]=50
.[50]=51
.[51]=52
.[52]=53
.[53]=54
.[54]=55
.[55]=56
.[56]=57
.[57]=58
.[58]=59
.[59]=60
.[60]=61
.[61]=62
.[62]=63
.[63]=64
.[64]=65
.[65]=66
.[66]=67
.[67]=68
.[68]=69
.[69]=70
.[70]=71
.[71]=72
.[72]=73
.[73]=74
.[74]=75
.[75]=76
.[76]=77
.[77]=78
.[78]=79
.[79]=80
.[80]=81
.[81]=82
.[82]=83
.[83]=84
.[84]=85
.[85]=86
.[86]=87
.[87]=88
.[88]=89
.[89]=90
.[90]=91
.[91]=92
.[92]=93
.[93]=94
.[94]=95
.[95]=96
.[96]=97
.[97]=98
.[98]=99
.=[]
.[0]=1
.[1]=2
.[2]=3
.[3]=4
.[4]=5
.[5]=6
.[6]=7
.[7]=8
.[8]=9
.[9]=10
.[10]=11
.[11]=12
.[12]=13
.[13]=14
.[14]=15
.[15]=16
.[16]=17
.[17]=18
.[18]=19
.[19]=20
.[20]=21
.[21]=22
.[22]=23
.[23]=24
.[24]=25
.[25]=26
.[26]=27
.[27]=28
.[28]=29
.[29]=30
.[30]=31
.[31]=32
.[32]=33
.[33]=34
.[34]=35
.[35]=36
.[36]=37
.[37]=38
.[38]=39
.[39]=40
.[40]=41
.[41]=42
.[42]=43
.[43]=44
.[44]=45
.[45]=46
.[46]=47
.[47]=48
.[48]=49
.[49]=50
.[50]=51
.[51]=52
.[52]=53
.[53]=54
.[54]=55
.[55]=56
.[56]=57
.[57]=58
.[58]=59
.[59]=60
.[60]=61
.[61]=62
.[62]=63
.[63]=64
.[64]=65
.[65]=66
.[66]=67
.[67]=68
.[68]=69
.[69]=70
.[70]=71
.[71]=72
.[72]=73
.[73]=74
.[74]=75
.[75]=76
.[76]=77
.[77]=78
.[78]=79
.[79]=80
.[80]=81
.[81]=82
.[82]=83
.[83]=84
.[84]=85
.[85]=86
.[86]=87
.[87]=88
.[88]=89
.[89]=90
.[90]=91
.[91]=92
.[92]=93
.[93]=94
.[94]=95
.[95]=96
.[96]=97
.[97]=98
.[98]=99

View File

@ -1,5 +1,5 @@
.=[]
.[0]="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
.[1]="bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
.[2]="ccccccccccccccccccccccc"
.[3]="dddddddddddddddddddddddddddddddddddddddddddddddddddd"
.=[]
.[0]="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
.[1]="bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
.[2]="ccccccccccccccccccccccc"
.[3]="dddddddddddddddddddddddddddddddddddddddddddddddddddd"

View File

@ -1,4 +1,4 @@
[ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"ccccccccccccccccccccccc",
[ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"ccccccccccccccccccccccc",
"dddddddddddddddddddddddddddddddddddddddddddddddddddd" ]

View File

@ -1 +1 @@
.=123456789
.=123456789

View File

@ -1 +1 @@
0123456789
0123456789

View File

@ -1 +1 @@
.=-123456789
.=-123456789

View File

@ -1 +1 @@
-0123456789
-0123456789

View File

@ -1,3 +1,3 @@
.=1.2345678
.=1.2345678

View File

@ -1,3 +1,3 @@
1.2345678
1.2345678

View File

@ -1,2 +1,2 @@
.="abcdef"
.="abcdef"

View File

@ -1,2 +1,2 @@
"abcdef"
"abcdef"

View File

@ -1,2 +1,2 @@
.=null
.=null

View File

@ -1,2 +1,2 @@
null
null

View File

@ -1,2 +1,2 @@
.=true
.=true

View File

@ -1,2 +1,2 @@
true
true

View File

@ -1,2 +1,2 @@
.=false
.=false

View File

@ -1,2 +1,2 @@
false
false

View File

@ -1,2 +1,2 @@
.=null
.=null

View File

@ -1,3 +1,3 @@
// C++ style comment
null
// C++ style comment
null

View File

@ -1,2 +1,2 @@
.=null
.=null

View File

@ -1,4 +1,4 @@
/* C style comment
*/
null
/* C style comment
*/
null

View File

@ -1,20 +1,20 @@
.={}
.attribute=[]
.attribute[0]="random"
.attribute[1]="short"
.attribute[2]="bold"
.attribute[3]=12
.attribute[4]={}
.attribute[4].height=7
.attribute[4].width=64
.count=1234
.name={}
.name.aka="T.E.S.T."
.name.id=123987
.test={}
.test.1={}
.test.1.2={}
.test.1.2.3={}
.test.1.2.3.coord=[]
.test.1.2.3.coord[0]=1
.test.1.2.3.coord[1]=2
.={}
.attribute=[]
.attribute[0]="random"
.attribute[1]="short"
.attribute[2]="bold"
.attribute[3]=12
.attribute[4]={}
.attribute[4].height=7
.attribute[4].width=64
.count=1234
.name={}
.name.aka="T.E.S.T."
.name.id=123987
.test={}
.test.1={}
.test.1.2={}
.test.1.2.3={}
.test.1.2.3.coord=[]
.test.1.2.3.coord[0]=1
.test.1.2.3.coord[1]=2

View File

@ -1,17 +1,17 @@
{
"count" : 1234,
"name" : { "aka" : "T.E.S.T.", "id" : 123987 },
"attribute" : [
"random",
"short",
"bold",
12,
{ "height" : 7, "width" : 64 }
],
"test": { "1" :
{ "2" :
{ "3" : { "coord" : [ 1,2] }
}
}
}
}
{
"count" : 1234,
"name" : { "aka" : "T.E.S.T.", "id" : 123987 },
"attribute" : [
"random",
"short",
"bold",
12,
{ "height" : 7, "width" : 64 }
],
"test": { "1" :
{ "2" :
{ "3" : { "coord" : [ 1,2] }
}
}
}
}

View File

@ -1 +1 @@
.=2147483647
.=2147483647

View File

@ -1,2 +1,2 @@
// Max signed integer
2147483647
// Max signed integer
2147483647

View File

@ -1 +1 @@
.=-2147483648
.=-2147483648

View File

@ -1,2 +1,2 @@
// Min signed integer
-2147483648
// Min signed integer
-2147483648

View File

@ -1 +1 @@
.=4294967295
.=4294967295

View File

@ -1,2 +1,2 @@
// Max unsigned integer
4294967295
// Max unsigned integer
4294967295

View File

@ -1,2 +1,2 @@
.=0
.=0

View File

@ -1,3 +1,3 @@
// Min unsigned integer
0
// Min unsigned integer
0

View File

@ -1,2 +1,2 @@
.=1
.=1

View File

@ -1,2 +1,2 @@
1
1

View File

@ -1 +1 @@
.={}
.={}

View File

@ -1 +1 @@
{}
{}

View File

@ -1,2 +1,2 @@
.={}
.count=1234
.={}
.count=1234

View File

@ -1 +1 @@
{ "count" : 1234 }
{ "count" : 1234 }

View File

@ -1,4 +1,4 @@
.={}
.attribute="random"
.count=1234
.name="test"
.={}
.attribute="random"
.count=1234
.name="test"

View File

@ -1,5 +1,5 @@
{
"count" : 1234,
"name" : "test",
"attribute" : "random"
}
{
"count" : 1234,
"name" : "test",
"attribute" : "random"
}

View File

@ -1,2 +1,2 @@
.={}
.=1234
.={}
.=1234

View File

@ -1,3 +1,3 @@
{
"" : 1234
}
{
"" : 1234
}

View File

@ -1,3 +1,3 @@
.={}
.first=1
.second=2
.={}
.first=1
.second=2

View File

@ -1,14 +1,14 @@
/* A comment
at the beginning of the file.
*/
{
"first" : 1, // comment after 'first' on the same line
/* Comment before 'second'
*/
"second" : 2
}
/* A comment at
the end of the file.
*/
/* A comment
at the beginning of the file.
*/
{
"first" : 1, // comment after 'first' on the same line
/* Comment before 'second'
*/
"second" : 2
}
/* A comment at
the end of the file.
*/

View File

@ -1,2 +1,2 @@
.=8589934592
.=8589934592

View File

@ -1,3 +1,3 @@
// 2^33 => out of integer range, switch to double
8589934592
// 2^33 => out of integer range, switch to double
8589934592

View File

@ -1,2 +1,2 @@
.=-4294967295
.=-4294967295

View File

@ -1,3 +1,3 @@
// -2^32 => out of signed integer range, switch to double
-4294967295
// -2^32 => out of signed integer range, switch to double
-4294967295

View File

@ -1,2 +1,2 @@
.=-4294967295
.=-4294967295

View File

@ -1,3 +1,3 @@
// -2^32 => out of signed integer range, switch to double
-4294967295
// -2^32 => out of signed integer range, switch to double
-4294967295

View File

@ -1,2 +1,2 @@
.=1.2345678
.=1.2345678

View File

@ -1,3 +1,3 @@
// 1.2345678
12345678e-7
// 1.2345678
12345678e-7

View File

@ -1,3 +1,3 @@
.=1234567.8
.=1234567.8

View File

@ -1,3 +1,3 @@
// 1234567.8
0.12345678e7
// 1234567.8
0.12345678e7

View File

@ -1,3 +1,3 @@
.=-1.2345678
.=-1.2345678

View File

@ -1,3 +1,3 @@
// -1.2345678
-12345678e-7
// -1.2345678
-12345678e-7

View File

@ -1,3 +1,3 @@
.=-1234567.8
.=-1234567.8

View File

@ -1,3 +1,3 @@
// -1234567.8
-0.12345678e7
// -1234567.8
-0.12345678e7