Stripped carriage return and added eol-style native prop.

This commit is contained in:
Christopher Dunn 2007-06-14 21:01:26 +00:00
parent f1a49467cf
commit f986423955
77 changed files with 1203 additions and 1203 deletions

View File

@ -1 +1 @@
Baptiste Lepilleur <blep@users.sourceforge.net> Baptiste Lepilleur <blep@users.sourceforge.net>

View File

@ -1,44 +1,44 @@
* Introduction: * Introduction:
JSON (JavaScript Object Notation) is a lightweight data-interchange format. JSON (JavaScript Object Notation) is a lightweight data-interchange format.
It can represent integer, real number, string, an ordered sequence of It can represent integer, real number, string, an ordered sequence of
value, and a collection of name/value pairs. value, and a collection of name/value pairs.
JsonCpp is a simple API to manipulate JSON value, and handle serialization JsonCpp is a simple API to manipulate JSON value, and handle serialization
and unserialization to string. and unserialization to string.
It can also preserve existing comment in unserialization/serialization steps, It can also preserve existing comment in unserialization/serialization steps,
making it a convenient format to store user input files. making it a convenient format to store user input files.
Unserialization parsing is user friendly and provides precise error reports. Unserialization parsing is user friendly and provides precise error reports.
* Building/Testing: * Building/Testing:
JsonCpp uses Scons (http://www.scons.org) as a build system. Scons requires JsonCpp uses Scons (http://www.scons.org) as a build system. Scons requires
python to be installed (http://www.python.org). python to be installed (http://www.python.org).
You download scons-local distribution from the following url: You download scons-local distribution from the following url:
http://sourceforge.net/project/showfiles.php?group_id=30337&package_id=67375 http://sourceforge.net/project/showfiles.php?group_id=30337&package_id=67375
Unzip it in the directory where you found this README file. scons.py Should be Unzip it in the directory where you found this README file. scons.py Should be
at the same level as README. at the same level as README.
python scons.py platform=PLTFRM [TARGET] python scons.py platform=PLTFRM [TARGET]
where PLTFRM may be one of: where PLTFRM may be one of:
suncc Sun C++ (Solaris) suncc Sun C++ (Solaris)
vacpp Visual Age C++ (AIX) vacpp Visual Age C++ (AIX)
mingw mingw
msvc6 Microsoft Visual Studio 6 service pack 5-6 msvc6 Microsoft Visual Studio 6 service pack 5-6
msvc70 Microsoft Visual Studio 2002 msvc70 Microsoft Visual Studio 2002
msvc71 Microsoft Visual Studio 2003 msvc71 Microsoft Visual Studio 2003
msvc80 Microsoft Visual Studio 2005 msvc80 Microsoft Visual Studio 2005
linux-gcc Gnu C++ (linux, also reported to work for Mac OS X) linux-gcc Gnu C++ (linux, also reported to work for Mac OS X)
adding platform is fairly simple. You need to change the Sconstruct file adding platform is fairly simple. You need to change the Sconstruct file
to do so. to do so.
and TARGET may be: and TARGET may be:
check: build library and run unit tests. check: build library and run unit tests.
doc: build documentation doc: build documentation
doc-dist: build documentation tarball doc-dist: build documentation tarball

View File

@ -1,205 +1,205 @@
# Big issue: # Big issue:
# emitter depends on doxyfile which is generated from doxyfile.in. # emitter depends on doxyfile which is generated from doxyfile.in.
# build fails after cleaning and relaunching the build. # build fails after cleaning and relaunching the build.
import os import os
import os.path import os.path
import glob import glob
from fnmatch import fnmatch from fnmatch import fnmatch
def DoxyfileParse(file_contents): def DoxyfileParse(file_contents):
""" """
Parse a Doxygen source file and return a dictionary of all the values. Parse a Doxygen source file and return a dictionary of all the values.
Values will be strings and lists of strings. Values will be strings and lists of strings.
""" """
data = {} data = {}
import shlex import shlex
lex = shlex.shlex(instream = file_contents, posix = True) lex = shlex.shlex(instream = file_contents, posix = True)
lex.wordchars += "*+./-:" lex.wordchars += "*+./-:"
lex.whitespace = lex.whitespace.replace("\n", "") lex.whitespace = lex.whitespace.replace("\n", "")
lex.escape = "" lex.escape = ""
lineno = lex.lineno lineno = lex.lineno
last_backslash_lineno = lineno last_backslash_lineno = lineno
token = lex.get_token() token = lex.get_token()
key = token # the first token should be a key key = token # the first token should be a key
last_token = "" last_token = ""
key_token = False key_token = False
next_key = False next_key = False
new_data = True new_data = True
def append_data(data, key, new_data, token): def append_data(data, key, new_data, token):
if new_data or len(data[key]) == 0: if new_data or len(data[key]) == 0:
data[key].append(token) data[key].append(token)
else: else:
data[key][-1] += token data[key][-1] += token
while token: while token:
if token in ['\n']: if token in ['\n']:
if last_token not in ['\\']: if last_token not in ['\\']:
key_token = True key_token = True
elif token in ['\\']: elif token in ['\\']:
pass pass
elif key_token: elif key_token:
key = token key = token
key_token = False key_token = False
else: else:
if token == "+=": if token == "+=":
if not data.has_key(key): if not data.has_key(key):
data[key] = list() data[key] = list()
elif token == "=": elif token == "=":
data[key] = list() data[key] = list()
else: else:
append_data( data, key, new_data, token ) append_data( data, key, new_data, token )
new_data = True new_data = True
last_token = token last_token = token
token = lex.get_token() token = lex.get_token()
if last_token == '\\' and token != '\n': if last_token == '\\' and token != '\n':
new_data = False new_data = False
append_data( data, key, new_data, '\\' ) append_data( data, key, new_data, '\\' )
# compress lists of len 1 into single strings # compress lists of len 1 into single strings
for (k, v) in data.items(): for (k, v) in data.items():
if len(v) == 0: if len(v) == 0:
data.pop(k) data.pop(k)
# items in the following list will be kept as lists and not converted to strings # items in the following list will be kept as lists and not converted to strings
if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]: if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
continue continue
if len(v) == 1: if len(v) == 1:
data[k] = v[0] data[k] = v[0]
return data return data
def DoxySourceScan(node, env, path): def DoxySourceScan(node, env, path):
""" """
Doxygen Doxyfile source scanner. This should scan the Doxygen file and add Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
any files used to generate docs to the list of source files. any files used to generate docs to the list of source files.
""" """
default_file_patterns = [ default_file_patterns = [
'*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx', '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
'*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++', '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
'*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm', '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
'*.py', '*.py',
] ]
default_exclude_patterns = [ default_exclude_patterns = [
'*~', '*~',
] ]
sources = [] sources = []
data = DoxyfileParse(node.get_contents()) data = DoxyfileParse(node.get_contents())
if data.get("RECURSIVE", "NO") == "YES": if data.get("RECURSIVE", "NO") == "YES":
recursive = True recursive = True
else: else:
recursive = False recursive = False
file_patterns = data.get("FILE_PATTERNS", default_file_patterns) file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns) exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
doxyfile_dir = str( node.dir ) doxyfile_dir = str( node.dir )
## print 'running from', os.getcwd() ## print 'running from', os.getcwd()
for node in data.get("INPUT", []): for node in data.get("INPUT", []):
node_real_path = os.path.normpath( os.path.join( doxyfile_dir, node ) ) node_real_path = os.path.normpath( os.path.join( doxyfile_dir, node ) )
if os.path.isfile(node_real_path): if os.path.isfile(node_real_path):
## print str(node), 'is a file' ## print str(node), 'is a file'
sources.append(node) sources.append(node)
elif os.path.isdir(node_real_path): elif os.path.isdir(node_real_path):
## print str(node), 'is a directory' ## print str(node), 'is a directory'
if recursive: if recursive:
for root, dirs, files in os.walk(node): for root, dirs, files in os.walk(node):
for f in files: for f in files:
filename = os.path.join(root, f) filename = os.path.join(root, f)
pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False) pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True) exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
if pattern_check and not exclude_check: if pattern_check and not exclude_check:
sources.append(filename) sources.append(filename)
## print ' adding source', os.path.abspath( filename ) ## print ' adding source', os.path.abspath( filename )
else: else:
for pattern in file_patterns: for pattern in file_patterns:
sources.extend(glob.glob(os.path.join( node, pattern))) sources.extend(glob.glob(os.path.join( node, pattern)))
## else: ## else:
## print str(node), 'is neither a file nor a directory' ## print str(node), 'is neither a file nor a directory'
sources = map( lambda path: env.File(path), sources ) sources = map( lambda path: env.File(path), sources )
return sources return sources
def DoxySourceScanCheck(node, env): def DoxySourceScanCheck(node, env):
"""Check if we should scan this file""" """Check if we should scan this file"""
return os.path.isfile(node.path) return os.path.isfile(node.path)
def DoxyEmitter(source, target, env): def DoxyEmitter(source, target, env):
"""Doxygen Doxyfile emitter""" """Doxygen Doxyfile emitter"""
# possible output formats and their default values and output locations # possible output formats and their default values and output locations
output_formats = { output_formats = {
"HTML": ("YES", "html"), "HTML": ("YES", "html"),
"LATEX": ("YES", "latex"), "LATEX": ("YES", "latex"),
"RTF": ("NO", "rtf"), "RTF": ("NO", "rtf"),
"MAN": ("YES", "man"), "MAN": ("YES", "man"),
"XML": ("NO", "xml"), "XML": ("NO", "xml"),
} }
## print '#### DoxyEmitter:', source[0].abspath, os.path.exists( source[0].abspath ) ## print '#### DoxyEmitter:', source[0].abspath, os.path.exists( source[0].abspath )
data = DoxyfileParse(source[0].get_contents()) data = DoxyfileParse(source[0].get_contents())
targets = [] targets = []
out_dir = data.get("OUTPUT_DIRECTORY", ".") out_dir = data.get("OUTPUT_DIRECTORY", ".")
# add our output locations # add our output locations
for (k, v) in output_formats.items(): for (k, v) in output_formats.items():
if data.get("GENERATE_" + k, v[0]) == "YES": if data.get("GENERATE_" + k, v[0]) == "YES":
targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) ) targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
# don't clobber targets # don't clobber targets
for node in targets: for node in targets:
env.Precious(node) env.Precious(node)
# set up cleaning stuff # set up cleaning stuff
for node in targets: for node in targets:
clean_cmd = env.Clean(node, node) clean_cmd = env.Clean(node, node)
env.Depends( clean_cmd, source ) env.Depends( clean_cmd, source )
return (targets, source) return (targets, source)
def generate(env): def generate(env):
""" """
Add builders and construction variables for the Add builders and construction variables for the
Doxygen tool. This is currently for Doxygen 1.4.6. Doxygen tool. This is currently for Doxygen 1.4.6.
""" """
doxyfile_scanner = env.Scanner( doxyfile_scanner = env.Scanner(
DoxySourceScan, DoxySourceScan,
"DoxySourceScan", "DoxySourceScan",
scan_check = DoxySourceScanCheck, scan_check = DoxySourceScanCheck,
) )
doxyfile_builder = env.Builder( doxyfile_builder = env.Builder(
action = env.Action("cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}", action = env.Action("cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}",
varlist=['$SOURCES']), varlist=['$SOURCES']),
emitter = DoxyEmitter, emitter = DoxyEmitter,
target_factory = env.fs.Entry, target_factory = env.fs.Entry,
single_source = True, single_source = True,
source_scanner = doxyfile_scanner, source_scanner = doxyfile_scanner,
) )
env.Append(BUILDERS = { env.Append(BUILDERS = {
'Doxygen': doxyfile_builder, 'Doxygen': doxyfile_builder,
}) })
env.AppendUnique( env.AppendUnique(
DOXYGEN = 'doxygen', DOXYGEN = 'doxygen',
) )
def exists(env): def exists(env):
""" """
Make sure doxygen exists. Make sure doxygen exists.
""" """
return env.Detect("doxygen") return env.Detect("doxygen")

View File

@ -1,179 +1,179 @@
import os import os
import os.path import os.path
import glob import glob
from fnmatch import fnmatch from fnmatch import fnmatch
import targz import targz
##def DoxyfileParse(file_contents): ##def DoxyfileParse(file_contents):
## """ ## """
## Parse a Doxygen source file and return a dictionary of all the values. ## Parse a Doxygen source file and return a dictionary of all the values.
## Values will be strings and lists of strings. ## Values will be strings and lists of strings.
## """ ## """
## data = {} ## data = {}
## ##
## import shlex ## import shlex
## lex = shlex.shlex(instream = file_contents, posix = True) ## lex = shlex.shlex(instream = file_contents, posix = True)
## lex.wordchars += "*+./-:" ## lex.wordchars += "*+./-:"
## lex.whitespace = lex.whitespace.replace("\n", "") ## lex.whitespace = lex.whitespace.replace("\n", "")
## lex.escape = "" ## lex.escape = ""
## ##
## lineno = lex.lineno ## lineno = lex.lineno
## last_backslash_lineno = lineno ## last_backslash_lineno = lineno
## token = lex.get_token() ## token = lex.get_token()
## key = token # the first token should be a key ## key = token # the first token should be a key
## last_token = "" ## last_token = ""
## key_token = False ## key_token = False
## next_key = False ## next_key = False
## new_data = True ## new_data = True
## ##
## def append_data(data, key, new_data, token): ## def append_data(data, key, new_data, token):
## if new_data or len(data[key]) == 0: ## if new_data or len(data[key]) == 0:
## data[key].append(token) ## data[key].append(token)
## else: ## else:
## data[key][-1] += token ## data[key][-1] += token
## ##
## while token: ## while token:
## if token in ['\n']: ## if token in ['\n']:
## if last_token not in ['\\']: ## if last_token not in ['\\']:
## key_token = True ## key_token = True
## elif token in ['\\']: ## elif token in ['\\']:
## pass ## pass
## elif key_token: ## elif key_token:
## key = token ## key = token
## key_token = False ## key_token = False
## else: ## else:
## if token == "+=": ## if token == "+=":
## if not data.has_key(key): ## if not data.has_key(key):
## data[key] = list() ## data[key] = list()
## elif token == "=": ## elif token == "=":
## data[key] = list() ## data[key] = list()
## else: ## else:
## append_data( data, key, new_data, token ) ## append_data( data, key, new_data, token )
## new_data = True ## new_data = True
## ##
## last_token = token ## last_token = token
## token = lex.get_token() ## token = lex.get_token()
## ##
## if last_token == '\\' and token != '\n': ## if last_token == '\\' and token != '\n':
## new_data = False ## new_data = False
## append_data( data, key, new_data, '\\' ) ## append_data( data, key, new_data, '\\' )
## ##
## # compress lists of len 1 into single strings ## # compress lists of len 1 into single strings
## for (k, v) in data.items(): ## for (k, v) in data.items():
## if len(v) == 0: ## if len(v) == 0:
## data.pop(k) ## data.pop(k)
## ##
## # items in the following list will be kept as lists and not converted to strings ## # items in the following list will be kept as lists and not converted to strings
## if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]: ## if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
## continue ## continue
## ##
## if len(v) == 1: ## if len(v) == 1:
## data[k] = v[0] ## data[k] = v[0]
## ##
## return data ## return data
## ##
##def DoxySourceScan(node, env, path): ##def DoxySourceScan(node, env, path):
## """ ## """
## Doxygen Doxyfile source scanner. This should scan the Doxygen file and add ## Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
## any files used to generate docs to the list of source files. ## any files used to generate docs to the list of source files.
## """ ## """
## default_file_patterns = [ ## default_file_patterns = [
## '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx', ## '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
## '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++', ## '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
## '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm', ## '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
## '*.py', ## '*.py',
## ] ## ]
## ##
## default_exclude_patterns = [ ## default_exclude_patterns = [
## '*~', ## '*~',
## ] ## ]
## ##
## sources = [] ## sources = []
## ##
## data = DoxyfileParse(node.get_contents()) ## data = DoxyfileParse(node.get_contents())
## ##
## if data.get("RECURSIVE", "NO") == "YES": ## if data.get("RECURSIVE", "NO") == "YES":
## recursive = True ## recursive = True
## else: ## else:
## recursive = False ## recursive = False
## ##
## file_patterns = data.get("FILE_PATTERNS", default_file_patterns) ## file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
## exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns) ## exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
## ##
## for node in data.get("INPUT", []): ## for node in data.get("INPUT", []):
## if os.path.isfile(node): ## if os.path.isfile(node):
## sources.add(node) ## sources.add(node)
## elif os.path.isdir(node): ## elif os.path.isdir(node):
## if recursive: ## if recursive:
## for root, dirs, files in os.walk(node): ## for root, dirs, files in os.walk(node):
## for f in files: ## for f in files:
## filename = os.path.join(root, f) ## filename = os.path.join(root, f)
## ##
## pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False) ## pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
## exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True) ## exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
## ##
## if pattern_check and not exclude_check: ## if pattern_check and not exclude_check:
## sources.append(filename) ## sources.append(filename)
## else: ## else:
## for pattern in file_patterns: ## for pattern in file_patterns:
## sources.extend(glob.glob("/".join([node, pattern]))) ## sources.extend(glob.glob("/".join([node, pattern])))
## sources = map( lambda path: env.File(path), sources ) ## sources = map( lambda path: env.File(path), sources )
## return sources ## return sources
## ##
## ##
##def DoxySourceScanCheck(node, env): ##def DoxySourceScanCheck(node, env):
## """Check if we should scan this file""" ## """Check if we should scan this file"""
## return os.path.isfile(node.path) ## return os.path.isfile(node.path)
def srcDistEmitter(source, target, env): def srcDistEmitter(source, target, env):
## """Doxygen Doxyfile emitter""" ## """Doxygen Doxyfile emitter"""
## # possible output formats and their default values and output locations ## # possible output formats and their default values and output locations
## output_formats = { ## output_formats = {
## "HTML": ("YES", "html"), ## "HTML": ("YES", "html"),
## "LATEX": ("YES", "latex"), ## "LATEX": ("YES", "latex"),
## "RTF": ("NO", "rtf"), ## "RTF": ("NO", "rtf"),
## "MAN": ("YES", "man"), ## "MAN": ("YES", "man"),
## "XML": ("NO", "xml"), ## "XML": ("NO", "xml"),
## } ## }
## ##
## data = DoxyfileParse(source[0].get_contents()) ## data = DoxyfileParse(source[0].get_contents())
## ##
## targets = [] ## targets = []
## out_dir = data.get("OUTPUT_DIRECTORY", ".") ## out_dir = data.get("OUTPUT_DIRECTORY", ".")
## ##
## # add our output locations ## # add our output locations
## for (k, v) in output_formats.items(): ## for (k, v) in output_formats.items():
## if data.get("GENERATE_" + k, v[0]) == "YES": ## if data.get("GENERATE_" + k, v[0]) == "YES":
## targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) ) ## targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
## ##
## # don't clobber targets ## # don't clobber targets
## for node in targets: ## for node in targets:
## env.Precious(node) ## env.Precious(node)
## ##
## # set up cleaning stuff ## # set up cleaning stuff
## for node in targets: ## for node in targets:
## env.Clean(node, node) ## env.Clean(node, node)
## ##
## return (targets, source) ## return (targets, source)
return (target,source) return (target,source)
def generate(env): def generate(env):
""" """
Add builders and construction variables for the Add builders and construction variables for the
SrcDist tool. SrcDist tool.
""" """
## doxyfile_scanner = env.Scanner( ## doxyfile_scanner = env.Scanner(
## DoxySourceScan, ## DoxySourceScan,
## "DoxySourceScan", ## "DoxySourceScan",
## scan_check = DoxySourceScanCheck, ## scan_check = DoxySourceScanCheck,
## ) ## )
srcdist_builder = targz.makeBuilder( srcDistEmitter ) srcdist_builder = targz.makeBuilder( srcDistEmitter )
env['BUILDERS']['SrcDist'] = srcdist_builder env['BUILDERS']['SrcDist'] = srcdist_builder
def exists(env): def exists(env):
""" """
Make sure srcdist exists. Make sure srcdist exists.
""" """
return True return True

View File

@ -1,79 +1,79 @@
import re import re
from SCons.Script import * # the usual scons stuff you get in a SConscript from SCons.Script import * # the usual scons stuff you get in a SConscript
def generate(env): def generate(env):
""" """
Add builders and construction variables for the Add builders and construction variables for the
SubstInFile tool. SubstInFile tool.
Adds SubstInFile builder, which substitutes the keys->values of SUBST_DICT Adds SubstInFile builder, which substitutes the keys->values of SUBST_DICT
from the source to the target. from the source to the target.
The values of SUBST_DICT first have any construction variables expanded The values of SUBST_DICT first have any construction variables expanded
(its keys are not expanded). (its keys are not expanded).
If a value of SUBST_DICT is a python callable function, it is called and If a value of SUBST_DICT is a python callable function, it is called and
the result is expanded as the value. the result is expanded as the value.
If there's more than one source and more than one target, each target gets If there's more than one source and more than one target, each target gets
substituted from the corresponding source. substituted from the corresponding source.
""" """
def do_subst_in_file(targetfile, sourcefile, dict): def do_subst_in_file(targetfile, sourcefile, dict):
"""Replace all instances of the keys of dict with their values. """Replace all instances of the keys of dict with their values.
For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'}, For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
then all instances of %VERSION% in the file will be replaced with 1.2345 etc. then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
""" """
try: try:
f = open(sourcefile, 'rb') f = open(sourcefile, 'rb')
contents = f.read() contents = f.read()
f.close() f.close()
except: except:
raise SCons.Errors.UserError, "Can't read source file %s"%sourcefile raise SCons.Errors.UserError, "Can't read source file %s"%sourcefile
for (k,v) in dict.items(): for (k,v) in dict.items():
contents = re.sub(k, v, contents) contents = re.sub(k, v, contents)
try: try:
f = open(targetfile, 'wb') f = open(targetfile, 'wb')
f.write(contents) f.write(contents)
f.close() f.close()
except: except:
raise SCons.Errors.UserError, "Can't write target file %s"%targetfile raise SCons.Errors.UserError, "Can't write target file %s"%targetfile
return 0 # success return 0 # success
def subst_in_file(target, source, env): def subst_in_file(target, source, env):
if not env.has_key('SUBST_DICT'): if not env.has_key('SUBST_DICT'):
raise SCons.Errors.UserError, "SubstInFile requires SUBST_DICT to be set." raise SCons.Errors.UserError, "SubstInFile requires SUBST_DICT to be set."
d = dict(env['SUBST_DICT']) # copy it d = dict(env['SUBST_DICT']) # copy it
for (k,v) in d.items(): for (k,v) in d.items():
if callable(v): if callable(v):
d[k] = env.subst(v()).replace('\\','\\\\') d[k] = env.subst(v()).replace('\\','\\\\')
elif SCons.Util.is_String(v): elif SCons.Util.is_String(v):
d[k] = env.subst(v).replace('\\','\\\\') d[k] = env.subst(v).replace('\\','\\\\')
else: else:
raise SCons.Errors.UserError, "SubstInFile: key %s: %s must be a string or callable"%(k, repr(v)) raise SCons.Errors.UserError, "SubstInFile: key %s: %s must be a string or callable"%(k, repr(v))
for (t,s) in zip(target, source): for (t,s) in zip(target, source):
return do_subst_in_file(str(t), str(s), d) return do_subst_in_file(str(t), str(s), d)
def subst_in_file_string(target, source, env): def subst_in_file_string(target, source, env):
"""This is what gets printed on the console.""" """This is what gets printed on the console."""
return '\n'.join(['Substituting vars from %s into %s'%(str(s), str(t)) return '\n'.join(['Substituting vars from %s into %s'%(str(s), str(t))
for (t,s) in zip(target, source)]) for (t,s) in zip(target, source)])
def subst_emitter(target, source, env): def subst_emitter(target, source, env):
"""Add dependency from substituted SUBST_DICT to target. """Add dependency from substituted SUBST_DICT to target.
Returns original target, source tuple unchanged. Returns original target, source tuple unchanged.
""" """
d = env['SUBST_DICT'].copy() # copy it d = env['SUBST_DICT'].copy() # copy it
for (k,v) in d.items(): for (k,v) in d.items():
if callable(v): if callable(v):
d[k] = env.subst(v()) d[k] = env.subst(v())
elif SCons.Util.is_String(v): elif SCons.Util.is_String(v):
d[k]=env.subst(v) d[k]=env.subst(v)
Depends(target, SCons.Node.Python.Value(d)) Depends(target, SCons.Node.Python.Value(d))
return target, source return target, source
## env.Append(TOOLS = 'substinfile') # this should be automaticaly done by Scons ?!? ## env.Append(TOOLS = 'substinfile') # this should be automaticaly done by Scons ?!?
subst_action = SCons.Action.Action( subst_in_file, subst_in_file_string ) subst_action = SCons.Action.Action( subst_in_file, subst_in_file_string )
env['BUILDERS']['SubstInFile'] = Builder(action=subst_action, emitter=subst_emitter) env['BUILDERS']['SubstInFile'] = Builder(action=subst_action, emitter=subst_emitter)
def exists(env): def exists(env):
""" """
Make sure tool exists. Make sure tool exists.
""" """
return True return True

View File

@ -3,75 +3,75 @@
Tool-specific initialization for tarball. Tool-specific initialization for tarball.
""" """
## Commands to tackle a command based implementation: ## Commands to tackle a command based implementation:
##to unpack on the fly... ##to unpack on the fly...
##gunzip < FILE.tar.gz | tar xvf - ##gunzip < FILE.tar.gz | tar xvf -
##to pack on the fly... ##to pack on the fly...
##tar cvf - FILE-LIST | gzip -c > FILE.tar.gz ##tar cvf - FILE-LIST | gzip -c > FILE.tar.gz
import os.path import os.path
import SCons.Builder import SCons.Builder
import SCons.Node.FS import SCons.Node.FS
import SCons.Util import SCons.Util
try: try:
import gzip import gzip
import tarfile import tarfile
internal_targz = 1 internal_targz = 1
except ImportError: except ImportError:
internal_targz = 0 internal_targz = 0
TARGZ_DEFAULT_COMPRESSION_LEVEL = 9 TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
if internal_targz: if internal_targz:
def targz(target, source, env): def targz(target, source, env):
def archive_name( path ): def archive_name( path ):
path = os.path.normpath( os.path.abspath( path ) ) path = os.path.normpath( os.path.abspath( path ) )
common_path = os.path.commonprefix( (base_dir, path) ) common_path = os.path.commonprefix( (base_dir, path) )
archive_name = path[len(common_path):] archive_name = path[len(common_path):]
return archive_name return archive_name
def visit(tar, dirname, names): def visit(tar, dirname, names):
for name in names: for name in names:
path = os.path.join(dirname, name) path = os.path.join(dirname, name)
if os.path.isfile(path): if os.path.isfile(path):
tar.add(path, archive_name(path) ) tar.add(path, archive_name(path) )
compression = env.get('TARGZ_COMPRESSION_LEVEL',TARGZ_DEFAULT_COMPRESSION_LEVEL) compression = env.get('TARGZ_COMPRESSION_LEVEL',TARGZ_DEFAULT_COMPRESSION_LEVEL)
base_dir = os.path.normpath( env.get('TARGZ_BASEDIR', env.Dir('.')).abspath ) base_dir = os.path.normpath( env.get('TARGZ_BASEDIR', env.Dir('.')).abspath )
target_path = str(target[0]) target_path = str(target[0])
fileobj = gzip.GzipFile( target_path, 'wb', compression ) fileobj = gzip.GzipFile( target_path, 'wb', compression )
tar = tarfile.TarFile(os.path.splitext(target_path)[0], 'w', fileobj) tar = tarfile.TarFile(os.path.splitext(target_path)[0], 'w', fileobj)
for source in source: for source in source:
source_path = str(source) source_path = str(source)
if source.isdir(): if source.isdir():
os.path.walk(source_path, visit, tar) os.path.walk(source_path, visit, tar)
else: else:
tar.add(source_path, archive_name(source_path) ) # filename, arcname tar.add(source_path, archive_name(source_path) ) # filename, arcname
tar.close() tar.close()
targzAction = SCons.Action.Action(targz, varlist=['TARGZ_COMPRESSION_LEVEL','TARGZ_BASEDIR']) targzAction = SCons.Action.Action(targz, varlist=['TARGZ_COMPRESSION_LEVEL','TARGZ_BASEDIR'])
def makeBuilder( emitter = None ): def makeBuilder( emitter = None ):
return SCons.Builder.Builder(action = SCons.Action.Action('$TARGZ_COM', '$TARGZ_COMSTR'), return SCons.Builder.Builder(action = SCons.Action.Action('$TARGZ_COM', '$TARGZ_COMSTR'),
source_factory = SCons.Node.FS.Entry, source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner, source_scanner = SCons.Defaults.DirScanner,
suffix = '$TARGZ_SUFFIX', suffix = '$TARGZ_SUFFIX',
multi = 1) multi = 1)
TarGzBuilder = makeBuilder() TarGzBuilder = makeBuilder()
def generate(env): def generate(env):
"""Add Builders and construction variables for zip to an Environment. """Add Builders and construction variables for zip to an Environment.
The following environnement variables may be set: The following environnement variables may be set:
TARGZ_COMPRESSION_LEVEL: integer, [0-9]. 0: no compression, 9: best compression (same as gzip compression level). TARGZ_COMPRESSION_LEVEL: integer, [0-9]. 0: no compression, 9: best compression (same as gzip compression level).
TARGZ_BASEDIR: base-directory used to determine archive name (this allow archive name to be relative TARGZ_BASEDIR: base-directory used to determine archive name (this allow archive name to be relative
to something other than top-dir). to something other than top-dir).
""" """
env['BUILDERS']['TarGz'] = TarGzBuilder env['BUILDERS']['TarGz'] = TarGzBuilder
env['TARGZ_COM'] = targzAction env['TARGZ_COM'] = targzAction
env['TARGZ_COMPRESSION_LEVEL'] = TARGZ_DEFAULT_COMPRESSION_LEVEL # range 0-9 env['TARGZ_COMPRESSION_LEVEL'] = TARGZ_DEFAULT_COMPRESSION_LEVEL # range 0-9
env['TARGZ_SUFFIX'] = '.tar.gz' env['TARGZ_SUFFIX'] = '.tar.gz'
env['TARGZ_BASEDIR'] = env.Dir('.') # Sources archive name are made relative to that directory. env['TARGZ_BASEDIR'] = env.Dir('.') # Sources archive name are made relative to that directory.
def exists(env): def exists(env):

View File

@ -1,187 +1,187 @@
#include <json/json.h> #include <json/json.h>
#include <algorithm> // sort #include <algorithm> // sort
#include <stdio.h> #include <stdio.h>
#if defined(_MSC_VER) && _MSC_VER >= 1310 #if defined(_MSC_VER) && _MSC_VER >= 1310
# pragma warning( disable: 4996 ) // disable fopen deprecation warning # pragma warning( disable: 4996 ) // disable fopen deprecation warning
#endif #endif
static std::string static std::string
readInputTestFile( const char *path ) readInputTestFile( const char *path )
{ {
FILE *file = fopen( path, "rb" ); FILE *file = fopen( path, "rb" );
if ( !file ) if ( !file )
return std::string(""); return std::string("");
fseek( file, 0, SEEK_END ); fseek( file, 0, SEEK_END );
long size = ftell( file ); long size = ftell( file );
fseek( file, 0, SEEK_SET ); fseek( file, 0, SEEK_SET );
std::string text; std::string text;
char *buffer = new char[size+1]; char *buffer = new char[size+1];
buffer[size] = 0; buffer[size] = 0;
if ( fread( buffer, 1, size, file ) == (unsigned long)size ) if ( fread( buffer, 1, size, file ) == (unsigned long)size )
text = buffer; text = buffer;
fclose( file ); fclose( file );
delete[] buffer; delete[] buffer;
return text; return text;
} }
static void static void
printValueTree( FILE *fout, Json::Value &value, const std::string &path = "." ) printValueTree( FILE *fout, Json::Value &value, const std::string &path = "." )
{ {
switch ( value.type() ) switch ( value.type() )
{ {
case Json::nullValue: case Json::nullValue:
fprintf( fout, "%s=null\n", path.c_str() ); fprintf( fout, "%s=null\n", path.c_str() );
break; break;
case Json::intValue: case Json::intValue:
fprintf( fout, "%s=%d\n", path.c_str(), value.asInt() ); fprintf( fout, "%s=%d\n", path.c_str(), value.asInt() );
break; break;
case Json::uintValue: case Json::uintValue:
fprintf( fout, "%s=%u\n", path.c_str(), value.asUInt() ); fprintf( fout, "%s=%u\n", path.c_str(), value.asUInt() );
break; break;
case Json::realValue: case Json::realValue:
fprintf( fout, "%s=%.16g\n", path.c_str(), value.asDouble() ); fprintf( fout, "%s=%.16g\n", path.c_str(), value.asDouble() );
break; break;
case Json::stringValue: case Json::stringValue:
fprintf( fout, "%s=\"%s\"\n", path.c_str(), value.asString().c_str() ); fprintf( fout, "%s=\"%s\"\n", path.c_str(), value.asString().c_str() );
break; break;
case Json::booleanValue: case Json::booleanValue:
fprintf( fout, "%s=%s\n", path.c_str(), value.asBool() ? "true" : "false" ); fprintf( fout, "%s=%s\n", path.c_str(), value.asBool() ? "true" : "false" );
break; break;
case Json::arrayValue: case Json::arrayValue:
{ {
fprintf( fout, "%s=[]\n", path.c_str() ); fprintf( fout, "%s=[]\n", path.c_str() );
int size = value.size(); int size = value.size();
for ( int index =0; index < size; ++index ) for ( int index =0; index < size; ++index )
{ {
static char buffer[16]; static char buffer[16];
sprintf( buffer, "[%d]", index ); sprintf( buffer, "[%d]", index );
printValueTree( fout, value[index], path + buffer ); printValueTree( fout, value[index], path + buffer );
} }
} }
break; break;
case Json::objectValue: case Json::objectValue:
{ {
fprintf( fout, "%s={}\n", path.c_str() ); fprintf( fout, "%s={}\n", path.c_str() );
Json::Value::Members members( value.getMemberNames() ); Json::Value::Members members( value.getMemberNames() );
std::sort( members.begin(), members.end() ); std::sort( members.begin(), members.end() );
std::string suffix = *(path.end()-1) == '.' ? "" : "."; std::string suffix = *(path.end()-1) == '.' ? "" : ".";
for ( Json::Value::Members::iterator it = members.begin(); for ( Json::Value::Members::iterator it = members.begin();
it != members.end(); it != members.end();
++it ) ++it )
{ {
const std::string &name = *it; const std::string &name = *it;
printValueTree( fout, value[name], path + suffix + name ); printValueTree( fout, value[name], path + suffix + name );
} }
} }
break; break;
default: default:
break; break;
} }
} }
static int static int
parseAndSaveValueTree( const std::string &input, parseAndSaveValueTree( const std::string &input,
const std::string &actual, const std::string &actual,
const std::string &kind, const std::string &kind,
Json::Value &root ) Json::Value &root )
{ {
Json::Reader reader; Json::Reader reader;
bool parsingSuccessful = reader.parse( input, root ); bool parsingSuccessful = reader.parse( input, root );
if ( !parsingSuccessful ) if ( !parsingSuccessful )
{ {
printf( "Failed to parse %s file: \n%s\n", printf( "Failed to parse %s file: \n%s\n",
kind.c_str(), kind.c_str(),
reader.getFormatedErrorMessages().c_str() ); reader.getFormatedErrorMessages().c_str() );
return 1; return 1;
} }
FILE *factual = fopen( actual.c_str(), "wt" ); FILE *factual = fopen( actual.c_str(), "wt" );
if ( !factual ) if ( !factual )
{ {
printf( "Failed to create %s actual file.\n", kind.c_str() ); printf( "Failed to create %s actual file.\n", kind.c_str() );
return 2; return 2;
} }
printValueTree( factual, root ); printValueTree( factual, root );
fclose( factual ); fclose( factual );
return 0; return 0;
} }
static int static int
rewriteValueTree( const std::string &rewritePath, rewriteValueTree( const std::string &rewritePath,
const Json::Value &root, const Json::Value &root,
std::string &rewrite ) std::string &rewrite )
{ {
//Json::FastWriter writer; //Json::FastWriter writer;
//writer.enableYAMLCompatibility(); //writer.enableYAMLCompatibility();
Json::StyledWriter writer; Json::StyledWriter writer;
rewrite = writer.write( root ); rewrite = writer.write( root );
FILE *fout = fopen( rewritePath.c_str(), "wt" ); FILE *fout = fopen( rewritePath.c_str(), "wt" );
if ( !fout ) if ( !fout )
{ {
printf( "Failed to create rewrite file: %s\n", rewritePath.c_str() ); printf( "Failed to create rewrite file: %s\n", rewritePath.c_str() );
return 2; return 2;
} }
fprintf( fout, "%s\n", rewrite.c_str() ); fprintf( fout, "%s\n", rewrite.c_str() );
fclose( fout ); fclose( fout );
return 0; return 0;
} }
static std::string static std::string
removeSuffix( const std::string &path, removeSuffix( const std::string &path,
const std::string &extension ) const std::string &extension )
{ {
if ( extension.length() >= path.length() ) if ( extension.length() >= path.length() )
return std::string(""); return std::string("");
std::string suffix = path.substr( path.length() - extension.length() ); std::string suffix = path.substr( path.length() - extension.length() );
if ( suffix != extension ) if ( suffix != extension )
return std::string(""); return std::string("");
return path.substr( 0, path.length() - extension.length() ); return path.substr( 0, path.length() - extension.length() );
} }
int main( int argc, const char *argv[] ) int main( int argc, const char *argv[] )
{ {
if ( argc != 2 ) if ( argc != 2 )
{ {
printf( "Usage: %s input-json-file", argv[0] ); printf( "Usage: %s input-json-file", argv[0] );
return 3; return 3;
} }
std::string input = readInputTestFile( argv[1] ); std::string input = readInputTestFile( argv[1] );
if ( input.empty() ) if ( input.empty() )
{ {
printf( "Failed to read input or empty input: %s\n", argv[1] ); printf( "Failed to read input or empty input: %s\n", argv[1] );
return 3; return 3;
} }
std::string basePath = removeSuffix( argv[1], ".json" ); std::string basePath = removeSuffix( argv[1], ".json" );
if ( basePath.empty() ) if ( basePath.empty() )
{ {
printf( "Bad input path. Path does not end with '.expected':\n%s\n", argv[1] ); printf( "Bad input path. Path does not end with '.expected':\n%s\n", argv[1] );
return 3; return 3;
} }
std::string actualPath = basePath + ".actual"; std::string actualPath = basePath + ".actual";
std::string rewritePath = basePath + ".rewrite"; std::string rewritePath = basePath + ".rewrite";
std::string rewriteActualPath = basePath + ".actual-rewrite"; std::string rewriteActualPath = basePath + ".actual-rewrite";
Json::Value root; Json::Value root;
int exitCode = parseAndSaveValueTree( input, actualPath, "input", root ); int exitCode = parseAndSaveValueTree( input, actualPath, "input", root );
if ( exitCode == 0 ) if ( exitCode == 0 )
{ {
std::string rewrite; std::string rewrite;
exitCode = rewriteValueTree( rewritePath, root, rewrite ); exitCode = rewriteValueTree( rewritePath, root, rewrite );
if ( exitCode == 0 ) if ( exitCode == 0 )
{ {
Json::Value rewriteRoot; Json::Value rewriteRoot;
exitCode = parseAndSaveValueTree( rewrite, rewriteActualPath, "rewrite", rewriteRoot ); exitCode = parseAndSaveValueTree( rewrite, rewriteActualPath, "rewrite", rewriteRoot );
} }
} }
return exitCode; return exitCode;
} }

View File

@ -1,6 +1,6 @@
Import( 'env_testing buildJSONTests' ) Import( 'env_testing buildJSONTests' )
buildJSONTests( env_testing, Split( """ buildJSONTests( env_testing, Split( """
main.cpp main.cpp
""" ), """ ),
'jsontestrunner' ) 'jsontestrunner' )

View File

@ -1,10 +1,10 @@
# removes all files created during testing # removes all files created during testing
import glob import glob
import os import os
paths = [] paths = []
for pattern in [ '*.actual', '*.actual-rewrite', '*.rewrite', '*.process-output' ]: for pattern in [ '*.actual', '*.actual-rewrite', '*.rewrite', '*.process-output' ]:
paths += glob.glob( pattern ) paths += glob.glob( pattern )
for path in paths: for path in paths:
os.unlink( path ) os.unlink( path )

View File

@ -1,11 +1,11 @@
import glob import glob
import os.path import os.path
for path in glob.glob( '*.json' ): for path in glob.glob( '*.json' ):
text = file(path,'rt').read() text = file(path,'rt').read()
target = os.path.splitext(path)[0] + '.expected' target = os.path.splitext(path)[0] + '.expected'
if os.path.exists( target ): if os.path.exists( target ):
print 'skipping:', target print 'skipping:', target
else: else:
print 'creating:', target print 'creating:', target
file(target,'wt').write(text) file(target,'wt').write(text)

View File

@ -1,64 +1,64 @@
# Simple implementation of a json test runner to run the test against json-py. # Simple implementation of a json test runner to run the test against json-py.
import sys import sys
import os.path import os.path
import json import json
import types import types
if len(sys.argv) != 2: if len(sys.argv) != 2:
print "Usage: %s input-json-file", sys.argv[0] print "Usage: %s input-json-file", sys.argv[0]
sys.exit(3) sys.exit(3)
input_path = sys.argv[1] input_path = sys.argv[1]
base_path = os.path.splitext(input_path)[0] base_path = os.path.splitext(input_path)[0]
actual_path = base_path + '.actual' actual_path = base_path + '.actual'
rewrite_path = base_path + '.rewrite' rewrite_path = base_path + '.rewrite'
rewrite_actual_path = base_path + '.actual-rewrite' rewrite_actual_path = base_path + '.actual-rewrite'
def valueTreeToString( fout, value, path = '.' ): def valueTreeToString( fout, value, path = '.' ):
ty = type(value) ty = type(value)
if ty is types.DictType: if ty is types.DictType:
fout.write( '%s={}\n' % path ) fout.write( '%s={}\n' % path )
suffix = path[-1] != '.' and '.' or '' suffix = path[-1] != '.' and '.' or ''
names = value.keys() names = value.keys()
names.sort() names.sort()
for name in names: for name in names:
valueTreeToString( fout, value[name], path + suffix + name ) valueTreeToString( fout, value[name], path + suffix + name )
elif ty is types.ListType: elif ty is types.ListType:
fout.write( '%s=[]\n' % path ) fout.write( '%s=[]\n' % path )
for index, childValue in zip( xrange(0,len(value)), value ): for index, childValue in zip( xrange(0,len(value)), value ):
valueTreeToString( fout, childValue, path + '[%d]' % index ) valueTreeToString( fout, childValue, path + '[%d]' % index )
elif ty is types.StringType: elif ty is types.StringType:
fout.write( '%s="%s"\n' % (path,value) ) fout.write( '%s="%s"\n' % (path,value) )
elif ty is types.IntType: elif ty is types.IntType:
fout.write( '%s=%d\n' % (path,value) ) fout.write( '%s=%d\n' % (path,value) )
elif ty is types.FloatType: elif ty is types.FloatType:
fout.write( '%s=%.16g\n' % (path,value) ) fout.write( '%s=%.16g\n' % (path,value) )
elif value is True: elif value is True:
fout.write( '%s=true\n' % path ) fout.write( '%s=true\n' % path )
elif value is False: elif value is False:
fout.write( '%s=false\n' % path ) fout.write( '%s=false\n' % path )
elif value is None: elif value is None:
fout.write( '%s=null\n' % path ) fout.write( '%s=null\n' % path )
else: else:
assert False and "Unexpected value type" assert False and "Unexpected value type"
def parseAndSaveValueTree( input, actual_path ): def parseAndSaveValueTree( input, actual_path ):
root = json.read( input ) root = json.read( input )
fout = file( actual_path, 'wt' ) fout = file( actual_path, 'wt' )
valueTreeToString( fout, root ) valueTreeToString( fout, root )
fout.close() fout.close()
return root return root
def rewriteValueTree( value, rewrite_path ): def rewriteValueTree( value, rewrite_path ):
rewrite = json.write( value ) rewrite = json.write( value )
rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ? rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ?
file( rewrite_path, 'wt').write( rewrite + '\n' ) file( rewrite_path, 'wt').write( rewrite + '\n' )
return rewrite return rewrite
input = file( input_path, 'rt' ).read() input = file( input_path, 'rt' ).read()
root = parseAndSaveValueTree( input, actual_path ) root = parseAndSaveValueTree( input, actual_path )
rewrite = rewriteValueTree( json.write( root ), rewrite_path ) rewrite = rewriteValueTree( json.write( root ), rewrite_path )
rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path ) rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
sys.exit( 0 ) sys.exit( 0 )

View File

@ -1,91 +1,91 @@
import sys import sys
import os import os
import os.path import os.path
import glob import glob
def compareOutputs( expected, actual, message ): def compareOutputs( expected, actual, message ):
expected = expected.strip().replace('\r','').split('\n') expected = expected.strip().replace('\r','').split('\n')
actual = actual.strip().replace('\r','').split('\n') actual = actual.strip().replace('\r','').split('\n')
diff_line = 0 diff_line = 0
max_line_to_compare = min( len(expected), len(actual) ) max_line_to_compare = min( len(expected), len(actual) )
for index in xrange(0,max_line_to_compare): for index in xrange(0,max_line_to_compare):
if expected[index].strip() != actual[index].strip(): if expected[index].strip() != actual[index].strip():
diff_line = index + 1 diff_line = index + 1
break break
if diff_line == 0 and len(expected) != len(actual): if diff_line == 0 and len(expected) != len(actual):
diff_line = max_line_to_compare+1 diff_line = max_line_to_compare+1
if diff_line == 0: if diff_line == 0:
return None return None
def safeGetLine( lines, index ): def safeGetLine( lines, index ):
index += -1 index += -1
if index >= len(lines): if index >= len(lines):
return '' return ''
return lines[index].strip() return lines[index].strip()
return """ Difference in %s at line %d: return """ Difference in %s at line %d:
Expected: '%s' Expected: '%s'
Actual: '%s' Actual: '%s'
""" % (message, diff_line, """ % (message, diff_line,
safeGetLine(expected,diff_line), safeGetLine(expected,diff_line),
safeGetLine(actual,diff_line) ) safeGetLine(actual,diff_line) )
def safeReadFile( path ): def safeReadFile( path ):
try: try:
return file( path, 'rt' ).read() return file( path, 'rt' ).read()
except IOError, e: except IOError, e:
return '<File "%s" is missing: %s>' % (path,e) return '<File "%s" is missing: %s>' % (path,e)
def runAllTests( jsontest_executable_path, input_dir = None ): def runAllTests( jsontest_executable_path, input_dir = None ):
if not input_dir: if not input_dir:
input_dir = os.getcwd() input_dir = os.getcwd()
tests = glob.glob( os.path.join( input_dir, '*.json' ) ) tests = glob.glob( os.path.join( input_dir, '*.json' ) )
failed_tests = [] failed_tests = []
for input_path in tests: for input_path in tests:
print 'TESTING:', input_path, print 'TESTING:', input_path,
pipe = os.popen( "%s %s" % (jsontest_executable_path, input_path) ) pipe = os.popen( "%s %s" % (jsontest_executable_path, input_path) )
process_output = pipe.read() process_output = pipe.read()
status = pipe.close() status = pipe.close()
base_path = os.path.splitext(input_path)[0] base_path = os.path.splitext(input_path)[0]
actual_output = safeReadFile( base_path + '.actual' ) actual_output = safeReadFile( base_path + '.actual' )
actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' ) actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
file(base_path + '.process-output','wt').write( process_output ) file(base_path + '.process-output','wt').write( process_output )
if status: if status:
print 'parsing failed' print 'parsing failed'
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) ) failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
else: else:
expected_output_path = os.path.splitext(input_path)[0] + '.expected' expected_output_path = os.path.splitext(input_path)[0] + '.expected'
expected_output = file( expected_output_path, 'rt' ).read() expected_output = file( expected_output_path, 'rt' ).read()
detail = ( compareOutputs( expected_output, actual_output, 'input' ) detail = ( compareOutputs( expected_output, actual_output, 'input' )
or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) ) or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
if detail: if detail:
print 'FAILED' print 'FAILED'
failed_tests.append( (input_path, detail) ) failed_tests.append( (input_path, detail) )
else: else:
print 'OK' print 'OK'
if failed_tests: if failed_tests:
print print
print 'Failure details:' print 'Failure details:'
for failed_test in failed_tests: for failed_test in failed_tests:
print '* Test', failed_test[0] print '* Test', failed_test[0]
print failed_test[1] print failed_test[1]
print print
print 'Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests), print 'Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
len(failed_tests) ) len(failed_tests) )
return 1 return 1
else: else:
print 'All %d tests passed.' % len(tests) print 'All %d tests passed.' % len(tests)
return 0 return 0
if __name__ == '__main__': if __name__ == '__main__':
if len(sys.argv) < 1 or len(sys.argv) > 2: if len(sys.argv) < 1 or len(sys.argv) > 2:
print "Usage: %s jsontest-executable-path [input-testcase-directory]" % sys.argv[0] print "Usage: %s jsontest-executable-path [input-testcase-directory]" % sys.argv[0]
sys.exit( 1 ) sys.exit( 1 )
jsontest_executable_path = os.path.normpath( os.path.abspath( sys.argv[1] ) ) jsontest_executable_path = os.path.normpath( os.path.abspath( sys.argv[1] ) )
if len(sys.argv) > 2: if len(sys.argv) > 2:
input_path = os.path.normpath( os.path.abspath( sys.argv[2] ) ) input_path = os.path.normpath( os.path.abspath( sys.argv[2] ) )
else: else:
input_path = None input_path = None
status = runAllTests( jsontest_executable_path, input_path ) status = runAllTests( jsontest_executable_path, input_path )
sys.exit( status ) sys.exit( status )

View File

@ -1 +1 @@
.=[] .=[]

View File

@ -1 +1 @@
[] []

View File

@ -1,2 +1,2 @@
.=[] .=[]
.[0]=1 .[0]=1

View File

@ -1 +1 @@
[1] [1]

View File

@ -1,6 +1,6 @@
.=[] .=[]
.[0]=1 .[0]=1
.[1]=2 .[1]=2
.[2]=3 .[2]=3
.[3]=4 .[3]=4
.[4]=5 .[4]=5

View File

@ -1 +1 @@
[ 1, 2 , 3,4,5] [ 1, 2 , 3,4,5]

View File

@ -1,5 +1,5 @@
.=[] .=[]
.[0]=1 .[0]=1
.[1]="abc" .[1]="abc"
.[2]=12.3 .[2]=12.3
.[3]=-4 .[3]=-4

View File

@ -1 +1 @@
[1, "abc" , 12.3, -4] [1, "abc" , 12.3, -4]

View File

@ -1,100 +1,100 @@
.=[] .=[]
.[0]=1 .[0]=1
.[1]=2 .[1]=2
.[2]=3 .[2]=3
.[3]=4 .[3]=4
.[4]=5 .[4]=5
.[5]=6 .[5]=6
.[6]=7 .[6]=7
.[7]=8 .[7]=8
.[8]=9 .[8]=9
.[9]=10 .[9]=10
.[10]=11 .[10]=11
.[11]=12 .[11]=12
.[12]=13 .[12]=13
.[13]=14 .[13]=14
.[14]=15 .[14]=15
.[15]=16 .[15]=16
.[16]=17 .[16]=17
.[17]=18 .[17]=18
.[18]=19 .[18]=19
.[19]=20 .[19]=20
.[20]=21 .[20]=21
.[21]=22 .[21]=22
.[22]=23 .[22]=23
.[23]=24 .[23]=24
.[24]=25 .[24]=25
.[25]=26 .[25]=26
.[26]=27 .[26]=27
.[27]=28 .[27]=28
.[28]=29 .[28]=29
.[29]=30 .[29]=30
.[30]=31 .[30]=31
.[31]=32 .[31]=32
.[32]=33 .[32]=33
.[33]=34 .[33]=34
.[34]=35 .[34]=35
.[35]=36 .[35]=36
.[36]=37 .[36]=37
.[37]=38 .[37]=38
.[38]=39 .[38]=39
.[39]=40 .[39]=40
.[40]=41 .[40]=41
.[41]=42 .[41]=42
.[42]=43 .[42]=43
.[43]=44 .[43]=44
.[44]=45 .[44]=45
.[45]=46 .[45]=46
.[46]=47 .[46]=47
.[47]=48 .[47]=48
.[48]=49 .[48]=49
.[49]=50 .[49]=50
.[50]=51 .[50]=51
.[51]=52 .[51]=52
.[52]=53 .[52]=53
.[53]=54 .[53]=54
.[54]=55 .[54]=55
.[55]=56 .[55]=56
.[56]=57 .[56]=57
.[57]=58 .[57]=58
.[58]=59 .[58]=59
.[59]=60 .[59]=60
.[60]=61 .[60]=61
.[61]=62 .[61]=62
.[62]=63 .[62]=63
.[63]=64 .[63]=64
.[64]=65 .[64]=65
.[65]=66 .[65]=66
.[66]=67 .[66]=67
.[67]=68 .[67]=68
.[68]=69 .[68]=69
.[69]=70 .[69]=70
.[70]=71 .[70]=71
.[71]=72 .[71]=72
.[72]=73 .[72]=73
.[73]=74 .[73]=74
.[74]=75 .[74]=75
.[75]=76 .[75]=76
.[76]=77 .[76]=77
.[77]=78 .[77]=78
.[78]=79 .[78]=79
.[79]=80 .[79]=80
.[80]=81 .[80]=81
.[81]=82 .[81]=82
.[82]=83 .[82]=83
.[83]=84 .[83]=84
.[84]=85 .[84]=85
.[85]=86 .[85]=86
.[86]=87 .[86]=87
.[87]=88 .[87]=88
.[88]=89 .[88]=89
.[89]=90 .[89]=90
.[90]=91 .[90]=91
.[91]=92 .[91]=92
.[92]=93 .[92]=93
.[93]=94 .[93]=94
.[94]=95 .[94]=95
.[95]=96 .[95]=96
.[96]=97 .[96]=97
.[97]=98 .[97]=98
.[98]=99 .[98]=99

View File

@ -1,5 +1,5 @@
.=[] .=[]
.[0]="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" .[0]="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
.[1]="bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" .[1]="bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
.[2]="ccccccccccccccccccccccc" .[2]="ccccccccccccccccccccccc"
.[3]="dddddddddddddddddddddddddddddddddddddddddddddddddddd" .[3]="dddddddddddddddddddddddddddddddddddddddddddddddddddd"

View File

@ -1,4 +1,4 @@
[ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", [ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"ccccccccccccccccccccccc", "ccccccccccccccccccccccc",
"dddddddddddddddddddddddddddddddddddddddddddddddddddd" ] "dddddddddddddddddddddddddddddddddddddddddddddddddddd" ]

View File

@ -1 +1 @@
.=123456789 .=123456789

View File

@ -1 +1 @@
0123456789 0123456789

View File

@ -1 +1 @@
.=-123456789 .=-123456789

View File

@ -1 +1 @@
-0123456789 -0123456789

View File

@ -1,3 +1,3 @@
.=1.2345678 .=1.2345678

View File

@ -1,3 +1,3 @@
1.2345678 1.2345678

View File

@ -1,2 +1,2 @@
.="abcdef" .="abcdef"

View File

@ -1,2 +1,2 @@
"abcdef" "abcdef"

View File

@ -1,2 +1,2 @@
.=null .=null

View File

@ -1,2 +1,2 @@
null null

View File

@ -1,2 +1,2 @@
.=true .=true

View File

@ -1,2 +1,2 @@
true true

View File

@ -1,2 +1,2 @@
.=false .=false

View File

@ -1,2 +1,2 @@
false false

View File

@ -1,2 +1,2 @@
.=null .=null

View File

@ -1,3 +1,3 @@
// C++ style comment // C++ style comment
null null

View File

@ -1,2 +1,2 @@
.=null .=null

View File

@ -1,4 +1,4 @@
/* C style comment /* C style comment
*/ */
null null

View File

@ -1,20 +1,20 @@
.={} .={}
.attribute=[] .attribute=[]
.attribute[0]="random" .attribute[0]="random"
.attribute[1]="short" .attribute[1]="short"
.attribute[2]="bold" .attribute[2]="bold"
.attribute[3]=12 .attribute[3]=12
.attribute[4]={} .attribute[4]={}
.attribute[4].height=7 .attribute[4].height=7
.attribute[4].width=64 .attribute[4].width=64
.count=1234 .count=1234
.name={} .name={}
.name.aka="T.E.S.T." .name.aka="T.E.S.T."
.name.id=123987 .name.id=123987
.test={} .test={}
.test.1={} .test.1={}
.test.1.2={} .test.1.2={}
.test.1.2.3={} .test.1.2.3={}
.test.1.2.3.coord=[] .test.1.2.3.coord=[]
.test.1.2.3.coord[0]=1 .test.1.2.3.coord[0]=1
.test.1.2.3.coord[1]=2 .test.1.2.3.coord[1]=2

View File

@ -1,17 +1,17 @@
{ {
"count" : 1234, "count" : 1234,
"name" : { "aka" : "T.E.S.T.", "id" : 123987 }, "name" : { "aka" : "T.E.S.T.", "id" : 123987 },
"attribute" : [ "attribute" : [
"random", "random",
"short", "short",
"bold", "bold",
12, 12,
{ "height" : 7, "width" : 64 } { "height" : 7, "width" : 64 }
], ],
"test": { "1" : "test": { "1" :
{ "2" : { "2" :
{ "3" : { "coord" : [ 1,2] } { "3" : { "coord" : [ 1,2] }
} }
} }
} }
} }

View File

@ -1 +1 @@
.=2147483647 .=2147483647

View File

@ -1,2 +1,2 @@
// Max signed integer // Max signed integer
2147483647 2147483647

View File

@ -1 +1 @@
.=-2147483648 .=-2147483648

View File

@ -1,2 +1,2 @@
// Min signed integer // Min signed integer
-2147483648 -2147483648

View File

@ -1 +1 @@
.=4294967295 .=4294967295

View File

@ -1,2 +1,2 @@
// Max unsigned integer // Max unsigned integer
4294967295 4294967295

View File

@ -1,2 +1,2 @@
.=0 .=0

View File

@ -1,3 +1,3 @@
// Min unsigned integer // Min unsigned integer
0 0

View File

@ -1,2 +1,2 @@
.=1 .=1

View File

@ -1,2 +1,2 @@
1 1

View File

@ -1 +1 @@
.={} .={}

View File

@ -1 +1 @@
{} {}

View File

@ -1,2 +1,2 @@
.={} .={}
.count=1234 .count=1234

View File

@ -1 +1 @@
{ "count" : 1234 } { "count" : 1234 }

View File

@ -1,4 +1,4 @@
.={} .={}
.attribute="random" .attribute="random"
.count=1234 .count=1234
.name="test" .name="test"

View File

@ -1,5 +1,5 @@
{ {
"count" : 1234, "count" : 1234,
"name" : "test", "name" : "test",
"attribute" : "random" "attribute" : "random"
} }

View File

@ -1,2 +1,2 @@
.={} .={}
.=1234 .=1234

View File

@ -1,3 +1,3 @@
{ {
"" : 1234 "" : 1234
} }

View File

@ -1,3 +1,3 @@
.={} .={}
.first=1 .first=1
.second=2 .second=2

View File

@ -1,14 +1,14 @@
/* A comment /* A comment
at the beginning of the file. at the beginning of the file.
*/ */
{ {
"first" : 1, // comment after 'first' on the same line "first" : 1, // comment after 'first' on the same line
/* Comment before 'second' /* Comment before 'second'
*/ */
"second" : 2 "second" : 2
} }
/* A comment at /* A comment at
the end of the file. the end of the file.
*/ */

View File

@ -1,2 +1,2 @@
.=8589934592 .=8589934592

View File

@ -1,3 +1,3 @@
// 2^33 => out of integer range, switch to double // 2^33 => out of integer range, switch to double
8589934592 8589934592

View File

@ -1,2 +1,2 @@
.=-4294967295 .=-4294967295

View File

@ -1,3 +1,3 @@
// -2^32 => out of signed integer range, switch to double // -2^32 => out of signed integer range, switch to double
-4294967295 -4294967295

View File

@ -1,2 +1,2 @@
.=-4294967295 .=-4294967295

View File

@ -1,3 +1,3 @@
// -2^32 => out of signed integer range, switch to double // -2^32 => out of signed integer range, switch to double
-4294967295 -4294967295

View File

@ -1,2 +1,2 @@
.=1.2345678 .=1.2345678

View File

@ -1,3 +1,3 @@
// 1.2345678 // 1.2345678
12345678e-7 12345678e-7

View File

@ -1,3 +1,3 @@
.=1234567.8 .=1234567.8

View File

@ -1,3 +1,3 @@
// 1234567.8 // 1234567.8
0.12345678e7 0.12345678e7

View File

@ -1,3 +1,3 @@
.=-1.2345678 .=-1.2345678

View File

@ -1,3 +1,3 @@
// -1.2345678 // -1.2345678
-12345678e-7 -12345678e-7

View File

@ -1,3 +1,3 @@
.=-1234567.8 .=-1234567.8

View File

@ -1,3 +1,3 @@
// -1234567.8 // -1234567.8
-0.12345678e7 -0.12345678e7