2014-11-20 06:30:47 +01:00
|
|
|
from __future__ import print_function
|
2015-01-16 20:44:27 +01:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
from io import open
|
|
|
|
from glob import glob
|
2007-06-14 23:01:26 +02:00
|
|
|
import sys
|
|
|
|
import os
|
2015-01-10 05:28:20 +01:00
|
|
|
import os.path
|
2009-11-19 21:16:59 +01:00
|
|
|
import optparse
|
2007-06-14 23:01:26 +02:00
|
|
|
|
2009-11-19 21:16:59 +01:00
|
|
|
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes '
|
2007-06-14 23:01:26 +02:00
|
|
|
|
2015-01-16 20:44:27 +01:00
|
|
|
def getStatusOutput(cmd):
|
|
|
|
"""
|
|
|
|
Return int, unicode (for both Python 2 and 3).
|
|
|
|
Note: os.popen().close() would return None for 0.
|
|
|
|
"""
|
|
|
|
pipe = os.popen(cmd)
|
|
|
|
process_output = pipe.read()
|
|
|
|
try:
|
|
|
|
# We have been using os.popen(). When we read() the result
|
|
|
|
# we get 'str' (bytes) in py2, and 'str' (unicode) in py3.
|
|
|
|
# Ugh! There must be a better way to handle this.
|
|
|
|
process_output = process_output.decode('utf-8')
|
|
|
|
except AttributeError:
|
|
|
|
pass # python3
|
|
|
|
status = pipe.close()
|
|
|
|
return status, process_output
|
2007-06-14 23:01:26 +02:00
|
|
|
def compareOutputs( expected, actual, message ):
|
|
|
|
expected = expected.strip().replace('\r','').split('\n')
|
|
|
|
actual = actual.strip().replace('\r','').split('\n')
|
|
|
|
diff_line = 0
|
|
|
|
max_line_to_compare = min( len(expected), len(actual) )
|
2014-11-20 06:30:47 +01:00
|
|
|
for index in range(0,max_line_to_compare):
|
2007-06-14 23:01:26 +02:00
|
|
|
if expected[index].strip() != actual[index].strip():
|
|
|
|
diff_line = index + 1
|
|
|
|
break
|
|
|
|
if diff_line == 0 and len(expected) != len(actual):
|
|
|
|
diff_line = max_line_to_compare+1
|
|
|
|
if diff_line == 0:
|
|
|
|
return None
|
|
|
|
def safeGetLine( lines, index ):
|
|
|
|
index += -1
|
|
|
|
if index >= len(lines):
|
|
|
|
return ''
|
|
|
|
return lines[index].strip()
|
|
|
|
return """ Difference in %s at line %d:
|
|
|
|
Expected: '%s'
|
|
|
|
Actual: '%s'
|
|
|
|
""" % (message, diff_line,
|
|
|
|
safeGetLine(expected,diff_line),
|
|
|
|
safeGetLine(actual,diff_line) )
|
|
|
|
|
|
|
|
def safeReadFile( path ):
|
|
|
|
try:
|
2015-01-11 10:39:24 +01:00
|
|
|
return open( path, 'rt', encoding = 'utf-8' ).read()
|
2014-11-20 06:10:02 +01:00
|
|
|
except IOError as e:
|
2007-06-14 23:01:26 +02:00
|
|
|
return '<File "%s" is missing: %s>' % (path,e)
|
|
|
|
|
2009-11-19 21:16:59 +01:00
|
|
|
def runAllTests( jsontest_executable_path, input_dir = None,
|
2010-02-21 15:26:08 +01:00
|
|
|
use_valgrind=False, with_json_checker=False ):
|
2007-06-14 23:01:26 +02:00
|
|
|
if not input_dir:
|
2009-11-21 19:20:25 +01:00
|
|
|
input_dir = os.path.join( os.getcwd(), 'data' )
|
2009-11-18 22:27:06 +01:00
|
|
|
tests = glob( os.path.join( input_dir, '*.json' ) )
|
2010-02-21 15:26:08 +01:00
|
|
|
if with_json_checker:
|
|
|
|
test_jsonchecker = glob( os.path.join( input_dir, '../jsonchecker', '*.json' ) )
|
2009-11-18 22:38:54 +01:00
|
|
|
else:
|
|
|
|
test_jsonchecker = []
|
2007-06-14 23:01:26 +02:00
|
|
|
failed_tests = []
|
2009-11-19 21:16:59 +01:00
|
|
|
valgrind_path = use_valgrind and VALGRIND_CMD or ''
|
2009-11-18 22:27:06 +01:00
|
|
|
for input_path in tests + test_jsonchecker:
|
2011-05-01 17:40:47 +02:00
|
|
|
expect_failure = os.path.basename( input_path ).startswith( 'fail' )
|
|
|
|
is_json_checker_test = (input_path in test_jsonchecker) or expect_failure
|
2014-11-20 06:30:47 +01:00
|
|
|
print('TESTING:', input_path, end=' ')
|
2009-11-18 22:27:06 +01:00
|
|
|
options = is_json_checker_test and '--json-checker' or ''
|
2015-01-23 18:53:16 +01:00
|
|
|
options += ' --json-writer StyledWriter'
|
2015-01-16 20:44:27 +01:00
|
|
|
cmd = '%s%s %s "%s"' % (
|
2009-11-19 21:16:59 +01:00
|
|
|
valgrind_path, jsontest_executable_path, options,
|
2015-01-16 20:44:27 +01:00
|
|
|
input_path)
|
|
|
|
status, process_output = getStatusOutput(cmd)
|
2009-11-18 22:27:06 +01:00
|
|
|
if is_json_checker_test:
|
|
|
|
if expect_failure:
|
2015-01-16 20:44:27 +01:00
|
|
|
if not status:
|
2014-11-20 06:30:47 +01:00
|
|
|
print('FAILED')
|
2009-11-18 22:38:54 +01:00
|
|
|
failed_tests.append( (input_path, 'Parsing should have failed:\n%s' %
|
|
|
|
safeReadFile(input_path)) )
|
2009-11-18 22:27:06 +01:00
|
|
|
else:
|
2014-11-20 06:30:47 +01:00
|
|
|
print('OK')
|
2009-11-18 22:27:06 +01:00
|
|
|
else:
|
2015-01-16 20:44:27 +01:00
|
|
|
if status:
|
2014-11-20 06:30:47 +01:00
|
|
|
print('FAILED')
|
2009-11-18 22:27:06 +01:00
|
|
|
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
|
|
|
|
else:
|
2014-11-20 06:30:47 +01:00
|
|
|
print('OK')
|
2007-06-14 23:01:26 +02:00
|
|
|
else:
|
2009-11-18 22:27:06 +01:00
|
|
|
base_path = os.path.splitext(input_path)[0]
|
|
|
|
actual_output = safeReadFile( base_path + '.actual' )
|
|
|
|
actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
|
2015-01-11 10:39:24 +01:00
|
|
|
open(base_path + '.process-output', 'wt', encoding = 'utf-8').write( process_output )
|
2009-11-18 22:27:06 +01:00
|
|
|
if status:
|
2014-11-20 06:30:47 +01:00
|
|
|
print('parsing failed')
|
2009-11-18 22:27:06 +01:00
|
|
|
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
|
2007-06-14 23:01:26 +02:00
|
|
|
else:
|
2009-11-18 22:27:06 +01:00
|
|
|
expected_output_path = os.path.splitext(input_path)[0] + '.expected'
|
2015-01-11 10:39:24 +01:00
|
|
|
expected_output = open( expected_output_path, 'rt', encoding = 'utf-8' ).read()
|
2009-11-18 22:27:06 +01:00
|
|
|
detail = ( compareOutputs( expected_output, actual_output, 'input' )
|
|
|
|
or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
|
|
|
|
if detail:
|
2014-11-20 06:30:47 +01:00
|
|
|
print('FAILED')
|
2009-11-18 22:27:06 +01:00
|
|
|
failed_tests.append( (input_path, detail) )
|
|
|
|
else:
|
2014-11-20 06:30:47 +01:00
|
|
|
print('OK')
|
2007-06-14 23:01:26 +02:00
|
|
|
|
|
|
|
if failed_tests:
|
2014-11-20 06:30:47 +01:00
|
|
|
print()
|
|
|
|
print('Failure details:')
|
2007-06-14 23:01:26 +02:00
|
|
|
for failed_test in failed_tests:
|
2014-11-20 06:30:47 +01:00
|
|
|
print('* Test', failed_test[0])
|
|
|
|
print(failed_test[1])
|
|
|
|
print()
|
|
|
|
print('Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
|
|
|
|
len(failed_tests) ))
|
2007-06-14 23:01:26 +02:00
|
|
|
return 1
|
|
|
|
else:
|
2014-11-20 06:30:47 +01:00
|
|
|
print('All %d tests passed.' % len(tests))
|
2007-06-14 23:01:26 +02:00
|
|
|
return 0
|
|
|
|
|
2009-11-19 21:16:59 +01:00
|
|
|
def main():
|
|
|
|
from optparse import OptionParser
|
|
|
|
parser = OptionParser( usage="%prog [options] <path to jsontestrunner.exe> [test case directory]" )
|
|
|
|
parser.add_option("--valgrind",
|
|
|
|
action="store_true", dest="valgrind", default=False,
|
|
|
|
help="run all the tests using valgrind to detect memory leaks")
|
2010-02-21 15:26:08 +01:00
|
|
|
parser.add_option("-c", "--with-json-checker",
|
|
|
|
action="store_true", dest="with_json_checker", default=False,
|
|
|
|
help="run all the tests from the official JSONChecker test suite of json.org")
|
2009-11-19 21:16:59 +01:00
|
|
|
parser.enable_interspersed_args()
|
|
|
|
options, args = parser.parse_args()
|
|
|
|
|
|
|
|
if len(args) < 1 or len(args) > 2:
|
2009-11-21 19:07:09 +01:00
|
|
|
parser.error( 'Must provides at least path to jsontestrunner executable.' )
|
2007-06-14 23:01:26 +02:00
|
|
|
sys.exit( 1 )
|
|
|
|
|
2009-11-19 21:16:59 +01:00
|
|
|
jsontest_executable_path = os.path.normpath( os.path.abspath( args[0] ) )
|
|
|
|
if len(args) > 1:
|
|
|
|
input_path = os.path.normpath( os.path.abspath( args[1] ) )
|
2007-06-14 23:01:26 +02:00
|
|
|
else:
|
|
|
|
input_path = None
|
2009-11-19 21:16:59 +01:00
|
|
|
status = runAllTests( jsontest_executable_path, input_path,
|
2010-02-21 15:26:08 +01:00
|
|
|
use_valgrind=options.valgrind, with_json_checker=options.with_json_checker )
|
2009-11-19 21:16:59 +01:00
|
|
|
sys.exit( status )
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|