Updated jinja version
This commit is contained in:
parent
2059972bf7
commit
304fd03e64
@ -18,7 +18,7 @@ string(REPLACE "opencv_" "" OPENCV_MATLAB_MODULES "${OPENCV_MODULE_${the_module}
|
||||
${OPENCV_MODULE_${the_module}_OPT_DEPS}")
|
||||
foreach(module ${OPENCV_MATLAB_MODULES})
|
||||
if (HAVE_opencv_${module})
|
||||
list(APPEND opencv_hdrs "${OPENCV_MODULE_opencv_${module}_LOCATION}/include/opencv2/${module}/${module}.hpp")
|
||||
list(APPEND opencv_hdrs "${OPENCV_MODULE_opencv_${module}_LOCATION}/include/opencv2/${module}.hpp")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
|
@ -29,3 +29,5 @@ Patches and suggestions:
|
||||
- Peter van Dijk (Habbie)
|
||||
- Stefan Ebner
|
||||
- Rene Leonhardt
|
||||
- Thomas Waldmann
|
||||
- Cory Benfield (Lukasa)
|
||||
|
@ -27,7 +27,7 @@
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__version__ = '2.7-dev'
|
||||
__version__ = '2.8-dev'
|
||||
|
||||
# high level interface
|
||||
from jinja2.environment import Environment, Template
|
||||
|
109
modules/matlab/generator/jinja2/_compat.py
Normal file
109
modules/matlab/generator/jinja2/_compat.py
Normal file
@ -0,0 +1,109 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2._compat
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Some py2/py3 compatibility support based on a stripped down
|
||||
version of six so we don't have to depend on a specific version
|
||||
of it.
|
||||
|
||||
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
import sys
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
PYPY = hasattr(sys, 'pypy_translation_info')
|
||||
_identity = lambda x: x
|
||||
|
||||
|
||||
if not PY2:
|
||||
unichr = chr
|
||||
range_type = range
|
||||
text_type = str
|
||||
string_types = (str,)
|
||||
|
||||
iterkeys = lambda d: iter(d.keys())
|
||||
itervalues = lambda d: iter(d.values())
|
||||
iteritems = lambda d: iter(d.items())
|
||||
|
||||
import pickle
|
||||
from io import BytesIO, StringIO
|
||||
NativeStringIO = StringIO
|
||||
|
||||
def reraise(tp, value, tb=None):
|
||||
if value.__traceback__ is not tb:
|
||||
raise value.with_traceback(tb)
|
||||
raise value
|
||||
|
||||
ifilter = filter
|
||||
imap = map
|
||||
izip = zip
|
||||
intern = sys.intern
|
||||
|
||||
implements_iterator = _identity
|
||||
implements_to_string = _identity
|
||||
encode_filename = _identity
|
||||
get_next = lambda x: x.__next__
|
||||
|
||||
else:
|
||||
unichr = unichr
|
||||
text_type = unicode
|
||||
range_type = xrange
|
||||
string_types = (str, unicode)
|
||||
|
||||
iterkeys = lambda d: d.iterkeys()
|
||||
itervalues = lambda d: d.itervalues()
|
||||
iteritems = lambda d: d.iteritems()
|
||||
|
||||
import cPickle as pickle
|
||||
from cStringIO import StringIO as BytesIO, StringIO
|
||||
NativeStringIO = BytesIO
|
||||
|
||||
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
|
||||
|
||||
from itertools import imap, izip, ifilter
|
||||
intern = intern
|
||||
|
||||
def implements_iterator(cls):
|
||||
cls.next = cls.__next__
|
||||
del cls.__next__
|
||||
return cls
|
||||
|
||||
def implements_to_string(cls):
|
||||
cls.__unicode__ = cls.__str__
|
||||
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
|
||||
return cls
|
||||
|
||||
get_next = lambda x: x.next
|
||||
|
||||
def encode_filename(filename):
|
||||
if isinstance(filename, unicode):
|
||||
return filename.encode('utf-8')
|
||||
return filename
|
||||
|
||||
|
||||
def with_metaclass(meta, *bases):
|
||||
# This requires a bit of explanation: the basic idea is to make a
|
||||
# dummy metaclass for one level of class instanciation that replaces
|
||||
# itself with the actual metaclass. Because of internal type checks
|
||||
# we also need to make sure that we downgrade the custom metaclass
|
||||
# for one level to something closer to type (that's why __call__ and
|
||||
# __init__ comes back from type etc.).
|
||||
#
|
||||
# This has the advantage over six.with_metaclass in that it does not
|
||||
# introduce dummy classes into the final MRO.
|
||||
class metaclass(meta):
|
||||
__call__ = type.__call__
|
||||
__init__ = type.__init__
|
||||
def __new__(cls, name, this_bases, d):
|
||||
if this_bases is None:
|
||||
return type.__new__(cls, name, (), d)
|
||||
return meta(name, bases, d)
|
||||
return metaclass('temporary_class', None, {})
|
||||
|
||||
|
||||
try:
|
||||
from urllib.parse import quote_from_bytes as url_quote
|
||||
except ImportError:
|
||||
from urllib import quote as url_quote
|
@ -1,49 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2._markupsafe._bundle
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This script pulls in markupsafe from a source folder and
|
||||
bundles it with Jinja2. It does not pull in the speedups
|
||||
module though.
|
||||
|
||||
:copyright: Copyright 2010 by the Jinja team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
def rewrite_imports(lines):
|
||||
for idx, line in enumerate(lines):
|
||||
new_line = re.sub(r'(import|from)\s+markupsafe\b',
|
||||
r'\1 jinja2._markupsafe', line)
|
||||
if new_line != line:
|
||||
lines[idx] = new_line
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print 'error: only argument is path to markupsafe'
|
||||
sys.exit(1)
|
||||
basedir = os.path.dirname(__file__)
|
||||
markupdir = sys.argv[1]
|
||||
for filename in os.listdir(markupdir):
|
||||
if filename.endswith('.py'):
|
||||
f = open(os.path.join(markupdir, filename))
|
||||
try:
|
||||
lines = list(f)
|
||||
finally:
|
||||
f.close()
|
||||
rewrite_imports(lines)
|
||||
f = open(os.path.join(basedir, filename), 'w')
|
||||
try:
|
||||
for line in lines:
|
||||
f.write(line)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -18,22 +18,17 @@ from os import path, listdir
|
||||
import sys
|
||||
import marshal
|
||||
import tempfile
|
||||
import cPickle as pickle
|
||||
import fnmatch
|
||||
try:
|
||||
from hashlib import sha1
|
||||
except ImportError:
|
||||
from sha import new as sha1
|
||||
from hashlib import sha1
|
||||
from jinja2.utils import open_if_exists
|
||||
from jinja2._compat import BytesIO, pickle, PY2
|
||||
|
||||
|
||||
# marshal works better on 3.x, one hack less required
|
||||
if sys.version_info > (3, 0):
|
||||
from io import BytesIO
|
||||
if not PY2:
|
||||
marshal_dump = marshal.dump
|
||||
marshal_load = marshal.load
|
||||
else:
|
||||
from cStringIO import StringIO as BytesIO
|
||||
|
||||
def marshal_dump(code, f):
|
||||
if isinstance(f, file):
|
||||
@ -282,15 +277,26 @@ class MemcachedBytecodeCache(BytecodeCache):
|
||||
|
||||
This bytecode cache does not support clearing of used items in the cache.
|
||||
The clear method is a no-operation function.
|
||||
|
||||
.. versionadded:: 2.7
|
||||
Added support for ignoring memcache errors through the
|
||||
`ignore_memcache_errors` parameter.
|
||||
"""
|
||||
|
||||
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None):
|
||||
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
|
||||
ignore_memcache_errors=True):
|
||||
self.client = client
|
||||
self.prefix = prefix
|
||||
self.timeout = timeout
|
||||
self.ignore_memcache_errors = ignore_memcache_errors
|
||||
|
||||
def load_bytecode(self, bucket):
|
||||
try:
|
||||
code = self.client.get(self.prefix + bucket.key)
|
||||
except Exception:
|
||||
if not self.ignore_memcache_errors:
|
||||
raise
|
||||
code = None
|
||||
if code is not None:
|
||||
bucket.bytecode_from_string(code)
|
||||
|
||||
@ -298,4 +304,8 @@ class MemcachedBytecodeCache(BytecodeCache):
|
||||
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
|
||||
if self.timeout is not None:
|
||||
args += (self.timeout,)
|
||||
try:
|
||||
self.client.set(*args)
|
||||
except Exception:
|
||||
if not self.ignore_memcache_errors:
|
||||
raise
|
||||
|
@ -8,14 +8,16 @@
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from cStringIO import StringIO
|
||||
from itertools import chain
|
||||
from copy import deepcopy
|
||||
from keyword import iskeyword as is_python_keyword
|
||||
from jinja2 import nodes
|
||||
from jinja2.nodes import EvalContext
|
||||
from jinja2.visitor import NodeVisitor
|
||||
from jinja2.exceptions import TemplateAssertionError
|
||||
from jinja2.utils import Markup, concat, escape, is_python_keyword, next
|
||||
from jinja2.utils import Markup, concat, escape
|
||||
from jinja2._compat import range_type, text_type, string_types, \
|
||||
iteritems, NativeStringIO, imap
|
||||
|
||||
|
||||
operators = {
|
||||
@ -29,14 +31,6 @@ operators = {
|
||||
'notin': 'not in'
|
||||
}
|
||||
|
||||
try:
|
||||
exec '(0 if 0 else 0)'
|
||||
except SyntaxError:
|
||||
have_condexpr = False
|
||||
else:
|
||||
have_condexpr = True
|
||||
|
||||
|
||||
# what method to iterate over items do we want to use for dict iteration
|
||||
# in generated code? on 2.x let's go with iteritems, on 3.x with items
|
||||
if hasattr(dict, 'iteritems'):
|
||||
@ -51,7 +45,11 @@ def unoptimize_before_dead_code():
|
||||
def f():
|
||||
if 0: dummy(x)
|
||||
return f
|
||||
unoptimize_before_dead_code = bool(unoptimize_before_dead_code().func_closure)
|
||||
|
||||
# The getattr is necessary for pypy which does not set this attribute if
|
||||
# no closure is on the function
|
||||
unoptimize_before_dead_code = bool(
|
||||
getattr(unoptimize_before_dead_code(), '__closure__', None))
|
||||
|
||||
|
||||
def generate(node, environment, name, filename, stream=None,
|
||||
@ -69,8 +67,8 @@ def has_safe_repr(value):
|
||||
"""Does the node have a safe representation?"""
|
||||
if value is None or value is NotImplemented or value is Ellipsis:
|
||||
return True
|
||||
if isinstance(value, (bool, int, long, float, complex, basestring,
|
||||
xrange, Markup)):
|
||||
if isinstance(value, (bool, int, float, complex, range_type,
|
||||
Markup) + string_types):
|
||||
return True
|
||||
if isinstance(value, (tuple, list, set, frozenset)):
|
||||
for item in value:
|
||||
@ -78,7 +76,7 @@ def has_safe_repr(value):
|
||||
return False
|
||||
return True
|
||||
elif isinstance(value, dict):
|
||||
for key, value in value.iteritems():
|
||||
for key, value in iteritems(value):
|
||||
if not has_safe_repr(key):
|
||||
return False
|
||||
if not has_safe_repr(value):
|
||||
@ -368,7 +366,7 @@ class CodeGenerator(NodeVisitor):
|
||||
def __init__(self, environment, name, filename, stream=None,
|
||||
defer_init=False):
|
||||
if stream is None:
|
||||
stream = StringIO()
|
||||
stream = NativeStringIO()
|
||||
self.environment = environment
|
||||
self.name = name
|
||||
self.filename = filename
|
||||
@ -542,7 +540,7 @@ class CodeGenerator(NodeVisitor):
|
||||
self.write(', ')
|
||||
self.visit(kwarg, frame)
|
||||
if extra_kwargs is not None:
|
||||
for key, value in extra_kwargs.iteritems():
|
||||
for key, value in iteritems(extra_kwargs):
|
||||
self.write(', %s=%s' % (key, value))
|
||||
if node.dyn_args:
|
||||
self.write(', *')
|
||||
@ -558,7 +556,7 @@ class CodeGenerator(NodeVisitor):
|
||||
self.visit(kwarg.value, frame)
|
||||
self.write(', ')
|
||||
if extra_kwargs is not None:
|
||||
for key, value in extra_kwargs.iteritems():
|
||||
for key, value in iteritems(extra_kwargs):
|
||||
self.write('%r: %s, ' % (key, value))
|
||||
if node.dyn_kwargs is not None:
|
||||
self.write('}, **')
|
||||
@ -625,7 +623,7 @@ class CodeGenerator(NodeVisitor):
|
||||
|
||||
def pop_scope(self, aliases, frame):
|
||||
"""Restore all aliases and delete unused variables."""
|
||||
for name, alias in aliases.iteritems():
|
||||
for name, alias in iteritems(aliases):
|
||||
self.writeline('l_%s = %s' % (name, alias))
|
||||
to_delete = set()
|
||||
for name in frame.identifiers.declared_locally:
|
||||
@ -663,16 +661,16 @@ class CodeGenerator(NodeVisitor):
|
||||
# it without aliasing all the variables.
|
||||
# this could be fixed in Python 3 where we have the nonlocal
|
||||
# keyword or if we switch to bytecode generation
|
||||
overriden_closure_vars = (
|
||||
overridden_closure_vars = (
|
||||
func_frame.identifiers.undeclared &
|
||||
func_frame.identifiers.declared &
|
||||
(func_frame.identifiers.declared_locally |
|
||||
func_frame.identifiers.declared_parameter)
|
||||
)
|
||||
if overriden_closure_vars:
|
||||
if overridden_closure_vars:
|
||||
self.fail('It\'s not possible to set and access variables '
|
||||
'derived from an outer scope! (affects: %s)' %
|
||||
', '.join(sorted(overriden_closure_vars)), node.lineno)
|
||||
', '.join(sorted(overridden_closure_vars)), node.lineno)
|
||||
|
||||
# remove variables from a closure from the frame's undeclared
|
||||
# identifiers.
|
||||
@ -827,7 +825,7 @@ class CodeGenerator(NodeVisitor):
|
||||
self.outdent(2 + (not self.has_known_extends))
|
||||
|
||||
# at this point we now have the blocks collected and can visit them too.
|
||||
for name, block in self.blocks.iteritems():
|
||||
for name, block in iteritems(self.blocks):
|
||||
block_frame = Frame(eval_ctx)
|
||||
block_frame.inspect(block.body)
|
||||
block_frame.block = name
|
||||
@ -894,12 +892,13 @@ class CodeGenerator(NodeVisitor):
|
||||
self.indent()
|
||||
self.writeline('raise TemplateRuntimeError(%r)' %
|
||||
'extended multiple times')
|
||||
self.outdent()
|
||||
|
||||
# if we have a known extends already we don't need that code here
|
||||
# as we know that the template execution will end here.
|
||||
if self.has_known_extends:
|
||||
raise CompilerExit()
|
||||
else:
|
||||
self.outdent()
|
||||
|
||||
self.writeline('parent_template = environment.get_template(', node)
|
||||
self.visit(node.template, frame)
|
||||
@ -930,7 +929,7 @@ class CodeGenerator(NodeVisitor):
|
||||
|
||||
func_name = 'get_or_select_template'
|
||||
if isinstance(node.template, nodes.Const):
|
||||
if isinstance(node.template.value, basestring):
|
||||
if isinstance(node.template.value, string_types):
|
||||
func_name = 'get_template'
|
||||
elif isinstance(node.template.value, (tuple, list)):
|
||||
func_name = 'select_template'
|
||||
@ -950,9 +949,16 @@ class CodeGenerator(NodeVisitor):
|
||||
self.indent()
|
||||
|
||||
if node.with_context:
|
||||
self.writeline('include_context = template.new_context('
|
||||
'context.parent, True, locals())')
|
||||
self.writeline('for name, context_blocks in context.'
|
||||
'blocks.%s():' % dict_item_iter)
|
||||
self.indent()
|
||||
self.writeline('include_context.blocks.setdefault('
|
||||
'name, [])[0:0] = context_blocks')
|
||||
self.outdent()
|
||||
self.writeline('for event in template.root_render_func('
|
||||
'template.new_context(context.parent, True, '
|
||||
'locals())):')
|
||||
'include_context):')
|
||||
else:
|
||||
self.writeline('for event in template.module._body_stream:')
|
||||
|
||||
@ -1032,7 +1038,7 @@ class CodeGenerator(NodeVisitor):
|
||||
discarded_names[0])
|
||||
else:
|
||||
self.writeline('context.exported_vars.difference_'
|
||||
'update((%s))' % ', '.join(map(repr, discarded_names)))
|
||||
'update((%s))' % ', '.join(imap(repr, discarded_names)))
|
||||
|
||||
def visit_For(self, node, frame):
|
||||
# when calculating the nodes for the inner frame we have to exclude
|
||||
@ -1060,7 +1066,7 @@ class CodeGenerator(NodeVisitor):
|
||||
|
||||
# otherwise we set up a buffer and add a function def
|
||||
else:
|
||||
self.writeline('def loop(reciter, loop_render_func):', node)
|
||||
self.writeline('def loop(reciter, loop_render_func, depth=0):', node)
|
||||
self.indent()
|
||||
self.buffer(loop_frame)
|
||||
aliases = {}
|
||||
@ -1068,6 +1074,7 @@ class CodeGenerator(NodeVisitor):
|
||||
# make sure the loop variable is a special one and raise a template
|
||||
# assertion error if a loop tries to write to loop
|
||||
if extended_loop:
|
||||
self.writeline('l_loop = missing')
|
||||
loop_frame.identifiers.add_special('loop')
|
||||
for name in node.find_all(nodes.Name):
|
||||
if name.ctx == 'store' and name.name == 'loop':
|
||||
@ -1118,7 +1125,7 @@ class CodeGenerator(NodeVisitor):
|
||||
self.visit(node.iter, loop_frame)
|
||||
|
||||
if node.recursive:
|
||||
self.write(', recurse=loop_render_func):')
|
||||
self.write(', loop_render_func, depth):')
|
||||
else:
|
||||
self.write(extended_loop and '):' or ':')
|
||||
|
||||
@ -1216,9 +1223,9 @@ class CodeGenerator(NodeVisitor):
|
||||
return
|
||||
|
||||
if self.environment.finalize:
|
||||
finalize = lambda x: unicode(self.environment.finalize(x))
|
||||
finalize = lambda x: text_type(self.environment.finalize(x))
|
||||
else:
|
||||
finalize = unicode
|
||||
finalize = text_type
|
||||
|
||||
# if we are inside a frame that requires output checking, we do so
|
||||
outdent_later = False
|
||||
@ -1367,7 +1374,7 @@ class CodeGenerator(NodeVisitor):
|
||||
public_names[0])
|
||||
else:
|
||||
self.writeline('context.exported_vars.update((%s))' %
|
||||
', '.join(map(repr, public_names)))
|
||||
', '.join(imap(repr, public_names)))
|
||||
|
||||
# -- Expression Visitors
|
||||
|
||||
@ -1555,15 +1562,6 @@ class CodeGenerator(NodeVisitor):
|
||||
'expression on %s evaluated to false and '
|
||||
'no else section was defined.' % self.position(node)))
|
||||
|
||||
if not have_condexpr:
|
||||
self.write('((')
|
||||
self.visit(node.test, frame)
|
||||
self.write(') and (')
|
||||
self.visit(node.expr1, frame)
|
||||
self.write(',) or (')
|
||||
write_expr2()
|
||||
self.write(',))[0]')
|
||||
else:
|
||||
self.write('(')
|
||||
self.visit(node.expr1, frame)
|
||||
self.write(' if ')
|
||||
|
@ -8,6 +8,7 @@
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from jinja2._compat import range_type
|
||||
from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner
|
||||
|
||||
|
||||
@ -21,13 +22,15 @@ COMMENT_END_STRING = '#}'
|
||||
LINE_STATEMENT_PREFIX = None
|
||||
LINE_COMMENT_PREFIX = None
|
||||
TRIM_BLOCKS = False
|
||||
LSTRIP_BLOCKS = False
|
||||
NEWLINE_SEQUENCE = '\n'
|
||||
KEEP_TRAILING_NEWLINE = False
|
||||
|
||||
|
||||
# default filters, tests and namespace
|
||||
from jinja2.filters import FILTERS as DEFAULT_FILTERS
|
||||
DEFAULT_NAMESPACE = {
|
||||
'range': xrange,
|
||||
'range': range_type,
|
||||
'dict': lambda **kw: kw,
|
||||
'lipsum': generate_lorem_ipsum,
|
||||
'cycler': Cycler,
|
||||
|
@ -11,16 +11,26 @@
|
||||
import os
|
||||
import sys
|
||||
from jinja2 import nodes
|
||||
from jinja2.defaults import *
|
||||
from jinja2.defaults import BLOCK_START_STRING, \
|
||||
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
|
||||
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
|
||||
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
|
||||
DEFAULT_FILTERS, DEFAULT_NAMESPACE, \
|
||||
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
|
||||
from jinja2.lexer import get_lexer, TokenStream
|
||||
from jinja2.parser import Parser
|
||||
from jinja2.nodes import EvalContext
|
||||
from jinja2.optimizer import optimize
|
||||
from jinja2.compiler import generate
|
||||
from jinja2.runtime import Undefined, new_context
|
||||
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
|
||||
TemplatesNotFound
|
||||
TemplatesNotFound, TemplateRuntimeError
|
||||
from jinja2.utils import import_string, LRUCache, Markup, missing, \
|
||||
concat, consume, internalcode, _encode_filename
|
||||
concat, consume, internalcode
|
||||
from jinja2._compat import imap, ifilter, string_types, iteritems, \
|
||||
text_type, reraise, implements_iterator, implements_to_string, \
|
||||
get_next, encode_filename, PY2, PYPY
|
||||
from functools import reduce
|
||||
|
||||
|
||||
# for direct template usage we have up to ten living environments
|
||||
@ -71,7 +81,7 @@ def load_extensions(environment, extensions):
|
||||
"""
|
||||
result = {}
|
||||
for extension in extensions:
|
||||
if isinstance(extension, basestring):
|
||||
if isinstance(extension, string_types):
|
||||
extension = import_string(extension)
|
||||
result[extension.identifier] = extension(environment)
|
||||
return result
|
||||
@ -134,12 +144,23 @@ class Environment(object):
|
||||
If this is set to ``True`` the first newline after a block is
|
||||
removed (block, not variable tag!). Defaults to `False`.
|
||||
|
||||
`lstrip_blocks`
|
||||
If this is set to ``True`` leading spaces and tabs are stripped
|
||||
from the start of a line to a block. Defaults to `False`.
|
||||
|
||||
`newline_sequence`
|
||||
The sequence that starts a newline. Must be one of ``'\r'``,
|
||||
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
|
||||
useful default for Linux and OS X systems as well as web
|
||||
applications.
|
||||
|
||||
`keep_trailing_newline`
|
||||
Preserve the trailing newline when rendering templates.
|
||||
The default is ``False``, which causes a single newline,
|
||||
if present, to be stripped from the end of the template.
|
||||
|
||||
.. versionadded:: 2.7
|
||||
|
||||
`extensions`
|
||||
List of Jinja extensions to use. This can either be import paths
|
||||
as strings or extension classes. For more information have a
|
||||
@ -224,7 +245,9 @@ class Environment(object):
|
||||
line_statement_prefix=LINE_STATEMENT_PREFIX,
|
||||
line_comment_prefix=LINE_COMMENT_PREFIX,
|
||||
trim_blocks=TRIM_BLOCKS,
|
||||
lstrip_blocks=LSTRIP_BLOCKS,
|
||||
newline_sequence=NEWLINE_SEQUENCE,
|
||||
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
|
||||
extensions=(),
|
||||
optimized=True,
|
||||
undefined=Undefined,
|
||||
@ -255,7 +278,9 @@ class Environment(object):
|
||||
self.line_statement_prefix = line_statement_prefix
|
||||
self.line_comment_prefix = line_comment_prefix
|
||||
self.trim_blocks = trim_blocks
|
||||
self.lstrip_blocks = lstrip_blocks
|
||||
self.newline_sequence = newline_sequence
|
||||
self.keep_trailing_newline = keep_trailing_newline
|
||||
|
||||
# runtime information
|
||||
self.undefined = undefined
|
||||
@ -269,7 +294,6 @@ class Environment(object):
|
||||
|
||||
# set the loader provided
|
||||
self.loader = loader
|
||||
self.bytecode_cache = None
|
||||
self.cache = create_cache(cache_size)
|
||||
self.bytecode_cache = bytecode_cache
|
||||
self.auto_reload = auto_reload
|
||||
@ -291,7 +315,7 @@ class Environment(object):
|
||||
yet. This is used by :ref:`extensions <writing-extensions>` to register
|
||||
callbacks and configuration values without breaking inheritance.
|
||||
"""
|
||||
for key, value in attributes.iteritems():
|
||||
for key, value in iteritems(attributes):
|
||||
if not hasattr(self, key):
|
||||
setattr(self, key, value)
|
||||
|
||||
@ -299,7 +323,8 @@ class Environment(object):
|
||||
variable_start_string=missing, variable_end_string=missing,
|
||||
comment_start_string=missing, comment_end_string=missing,
|
||||
line_statement_prefix=missing, line_comment_prefix=missing,
|
||||
trim_blocks=missing, extensions=missing, optimized=missing,
|
||||
trim_blocks=missing, lstrip_blocks=missing,
|
||||
extensions=missing, optimized=missing,
|
||||
undefined=missing, finalize=missing, autoescape=missing,
|
||||
loader=missing, cache_size=missing, auto_reload=missing,
|
||||
bytecode_cache=missing):
|
||||
@ -322,7 +347,7 @@ class Environment(object):
|
||||
rv.overlayed = True
|
||||
rv.linked_to = self
|
||||
|
||||
for key, value in args.iteritems():
|
||||
for key, value in iteritems(args):
|
||||
if value is not missing:
|
||||
setattr(rv, key, value)
|
||||
|
||||
@ -332,7 +357,7 @@ class Environment(object):
|
||||
rv.cache = copy_cache(self.cache)
|
||||
|
||||
rv.extensions = {}
|
||||
for key, value in self.extensions.iteritems():
|
||||
for key, value in iteritems(self.extensions):
|
||||
rv.extensions[key] = value.bind(rv)
|
||||
if extensions is not missing:
|
||||
rv.extensions.update(load_extensions(rv, extensions))
|
||||
@ -351,7 +376,7 @@ class Environment(object):
|
||||
try:
|
||||
return obj[argument]
|
||||
except (TypeError, LookupError):
|
||||
if isinstance(argument, basestring):
|
||||
if isinstance(argument, string_types):
|
||||
try:
|
||||
attr = str(argument)
|
||||
except Exception:
|
||||
@ -376,6 +401,42 @@ class Environment(object):
|
||||
except (TypeError, LookupError, AttributeError):
|
||||
return self.undefined(obj=obj, name=attribute)
|
||||
|
||||
def call_filter(self, name, value, args=None, kwargs=None,
|
||||
context=None, eval_ctx=None):
|
||||
"""Invokes a filter on a value the same way the compiler does it.
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
func = self.filters.get(name)
|
||||
if func is None:
|
||||
raise TemplateRuntimeError('no filter named %r' % name)
|
||||
args = list(args or ())
|
||||
if getattr(func, 'contextfilter', False):
|
||||
if context is None:
|
||||
raise TemplateRuntimeError('Attempted to invoke context '
|
||||
'filter without context')
|
||||
args.insert(0, context)
|
||||
elif getattr(func, 'evalcontextfilter', False):
|
||||
if eval_ctx is None:
|
||||
if context is not None:
|
||||
eval_ctx = context.eval_ctx
|
||||
else:
|
||||
eval_ctx = EvalContext(self)
|
||||
args.insert(0, eval_ctx)
|
||||
elif getattr(func, 'environmentfilter', False):
|
||||
args.insert(0, self)
|
||||
return func(value, *args, **(kwargs or {}))
|
||||
|
||||
def call_test(self, name, value, args=None, kwargs=None):
|
||||
"""Invokes a test on a value the same way the compiler does it.
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
func = self.tests.get(name)
|
||||
if func is None:
|
||||
raise TemplateRuntimeError('no test named %r' % name)
|
||||
return func(value, *(args or ()), **(kwargs or {}))
|
||||
|
||||
@internalcode
|
||||
def parse(self, source, name=None, filename=None):
|
||||
"""Parse the sourcecode and return the abstract syntax tree. This
|
||||
@ -394,7 +455,7 @@ class Environment(object):
|
||||
|
||||
def _parse(self, source, name, filename):
|
||||
"""Internal parsing function used by `parse` and `compile`."""
|
||||
return Parser(self, source, name, _encode_filename(filename)).parse()
|
||||
return Parser(self, source, name, encode_filename(filename)).parse()
|
||||
|
||||
def lex(self, source, name=None, filename=None):
|
||||
"""Lex the given sourcecode and return a generator that yields
|
||||
@ -406,7 +467,7 @@ class Environment(object):
|
||||
of the extensions to be applied you have to filter source through
|
||||
the :meth:`preprocess` method.
|
||||
"""
|
||||
source = unicode(source)
|
||||
source = text_type(source)
|
||||
try:
|
||||
return self.lexer.tokeniter(source, name, filename)
|
||||
except TemplateSyntaxError:
|
||||
@ -419,7 +480,7 @@ class Environment(object):
|
||||
because there you usually only want the actual source tokenized.
|
||||
"""
|
||||
return reduce(lambda s, e: e.preprocess(s, name, filename),
|
||||
self.iter_extensions(), unicode(source))
|
||||
self.iter_extensions(), text_type(source))
|
||||
|
||||
def _tokenize(self, source, name, filename=None, state=None):
|
||||
"""Called by the parser to do the preprocessing and filtering
|
||||
@ -473,7 +534,7 @@ class Environment(object):
|
||||
"""
|
||||
source_hint = None
|
||||
try:
|
||||
if isinstance(source, basestring):
|
||||
if isinstance(source, string_types):
|
||||
source_hint = source
|
||||
source = self._parse(source, name, filename)
|
||||
if self.optimized:
|
||||
@ -485,7 +546,7 @@ class Environment(object):
|
||||
if filename is None:
|
||||
filename = '<template>'
|
||||
else:
|
||||
filename = _encode_filename(filename)
|
||||
filename = encode_filename(filename)
|
||||
return self._compile(source, filename)
|
||||
except TemplateSyntaxError:
|
||||
exc_info = sys.exc_info()
|
||||
@ -555,7 +616,9 @@ class Environment(object):
|
||||
to `False` and you will get an exception on syntax errors.
|
||||
|
||||
If `py_compile` is set to `True` .pyc files will be written to the
|
||||
target instead of standard .py files.
|
||||
target instead of standard .py files. This flag does not do anything
|
||||
on pypy and Python 3 where pyc files are not picked up by itself and
|
||||
don't give much benefit.
|
||||
|
||||
.. versionadded:: 2.4
|
||||
"""
|
||||
@ -565,6 +628,11 @@ class Environment(object):
|
||||
log_function = lambda x: None
|
||||
|
||||
if py_compile:
|
||||
if not PY2 or PYPY:
|
||||
from warnings import warn
|
||||
warn(Warning('py_compile has no effect on pypy or Python 3'))
|
||||
py_compile = False
|
||||
else:
|
||||
import imp, marshal
|
||||
py_header = imp.get_magic() + \
|
||||
u'\xff\xff\xff\xff'.encode('iso-8859-15')
|
||||
@ -576,7 +644,7 @@ class Environment(object):
|
||||
def write_file(filename, data, mode):
|
||||
if zip:
|
||||
info = ZipInfo(filename)
|
||||
info.external_attr = 0755 << 16L
|
||||
info.external_attr = 0o755 << 16
|
||||
zip_file.writestr(info, data)
|
||||
else:
|
||||
f = open(os.path.join(target, filename), mode)
|
||||
@ -600,7 +668,7 @@ class Environment(object):
|
||||
source, filename, _ = self.loader.get_source(self, name)
|
||||
try:
|
||||
code = self.compile(source, name, filename, True, True)
|
||||
except TemplateSyntaxError, e:
|
||||
except TemplateSyntaxError as e:
|
||||
if not ignore_errors:
|
||||
raise
|
||||
log_function('Could not compile "%s": %s' % (name, e))
|
||||
@ -609,7 +677,7 @@ class Environment(object):
|
||||
filename = ModuleLoader.get_module_filename(name)
|
||||
|
||||
if py_compile:
|
||||
c = self._compile(code, _encode_filename(filename))
|
||||
c = self._compile(code, encode_filename(filename))
|
||||
write_file(filename + 'c', py_header +
|
||||
marshal.dumps(c), 'wb')
|
||||
log_function('Byte-compiled "%s" as %s' %
|
||||
@ -647,7 +715,7 @@ class Environment(object):
|
||||
filter_func = lambda x: '.' in x and \
|
||||
x.rsplit('.', 1)[1] in extensions
|
||||
if filter_func is not None:
|
||||
x = filter(filter_func, x)
|
||||
x = ifilter(filter_func, x)
|
||||
return x
|
||||
|
||||
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
|
||||
@ -670,7 +738,7 @@ class Environment(object):
|
||||
if self.exception_handler is not None:
|
||||
self.exception_handler(traceback)
|
||||
exc_type, exc_value, tb = traceback.standard_exc_info
|
||||
raise exc_type, exc_value, tb
|
||||
reraise(exc_type, exc_value, tb)
|
||||
|
||||
def join_path(self, template, parent):
|
||||
"""Join a template with the parent. By default all the lookups are
|
||||
@ -757,7 +825,7 @@ class Environment(object):
|
||||
|
||||
.. versionadded:: 2.3
|
||||
"""
|
||||
if isinstance(template_name_or_list, basestring):
|
||||
if isinstance(template_name_or_list, string_types):
|
||||
return self.get_template(template_name_or_list, parent, globals)
|
||||
elif isinstance(template_name_or_list, Template):
|
||||
return template_name_or_list
|
||||
@ -819,7 +887,9 @@ class Template(object):
|
||||
line_statement_prefix=LINE_STATEMENT_PREFIX,
|
||||
line_comment_prefix=LINE_COMMENT_PREFIX,
|
||||
trim_blocks=TRIM_BLOCKS,
|
||||
lstrip_blocks=LSTRIP_BLOCKS,
|
||||
newline_sequence=NEWLINE_SEQUENCE,
|
||||
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
|
||||
extensions=(),
|
||||
optimized=True,
|
||||
undefined=Undefined,
|
||||
@ -829,8 +899,9 @@ class Template(object):
|
||||
block_start_string, block_end_string, variable_start_string,
|
||||
variable_end_string, comment_start_string, comment_end_string,
|
||||
line_statement_prefix, line_comment_prefix, trim_blocks,
|
||||
newline_sequence, frozenset(extensions), optimized, undefined,
|
||||
finalize, autoescape, None, 0, False, None)
|
||||
lstrip_blocks, newline_sequence, keep_trailing_newline,
|
||||
frozenset(extensions), optimized, undefined, finalize, autoescape,
|
||||
None, 0, False, None)
|
||||
return env.from_string(source, template_class=cls)
|
||||
|
||||
@classmethod
|
||||
@ -842,7 +913,7 @@ class Template(object):
|
||||
'environment': environment,
|
||||
'__file__': code.co_filename
|
||||
}
|
||||
exec code in namespace
|
||||
exec(code, namespace)
|
||||
rv = cls._from_namespace(environment, namespace, globals)
|
||||
rv._uptodate = uptodate
|
||||
return rv
|
||||
@ -976,7 +1047,7 @@ class Template(object):
|
||||
@property
|
||||
def debug_info(self):
|
||||
"""The debug info mapping."""
|
||||
return [tuple(map(int, x.split('='))) for x in
|
||||
return [tuple(imap(int, x.split('='))) for x in
|
||||
self._debug_info.split('&')]
|
||||
|
||||
def __repr__(self):
|
||||
@ -987,6 +1058,7 @@ class Template(object):
|
||||
return '<%s %s>' % (self.__class__.__name__, name)
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class TemplateModule(object):
|
||||
"""Represents an imported template. All the exported names of the
|
||||
template are available as attributes on this object. Additionally
|
||||
@ -1002,13 +1074,6 @@ class TemplateModule(object):
|
||||
return Markup(concat(self._body_stream))
|
||||
|
||||
def __str__(self):
|
||||
return unicode(self).encode('utf-8')
|
||||
|
||||
# unicode goes after __str__ because we configured 2to3 to rename
|
||||
# __unicode__ to __str__. because the 2to3 tree is not designed to
|
||||
# remove nodes from it, we leave the above __str__ around and let
|
||||
# it override at runtime.
|
||||
def __unicode__(self):
|
||||
return concat(self._body_stream)
|
||||
|
||||
def __repr__(self):
|
||||
@ -1038,6 +1103,7 @@ class TemplateExpression(object):
|
||||
return rv
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class TemplateStream(object):
|
||||
"""A template stream works pretty much like an ordinary python generator
|
||||
but it can buffer multiple items to reduce the number of total iterations.
|
||||
@ -1063,8 +1129,8 @@ class TemplateStream(object):
|
||||
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
|
||||
"""
|
||||
close = False
|
||||
if isinstance(fp, basestring):
|
||||
fp = file(fp, 'w')
|
||||
if isinstance(fp, string_types):
|
||||
fp = open(fp, encoding is None and 'w' or 'wb')
|
||||
close = True
|
||||
try:
|
||||
if encoding is not None:
|
||||
@ -1082,7 +1148,7 @@ class TemplateStream(object):
|
||||
|
||||
def disable_buffering(self):
|
||||
"""Disable the output buffering."""
|
||||
self._next = self._gen.next
|
||||
self._next = get_next(self._gen)
|
||||
self.buffered = False
|
||||
|
||||
def enable_buffering(self, size=5):
|
||||
@ -1110,12 +1176,12 @@ class TemplateStream(object):
|
||||
c_size = 0
|
||||
|
||||
self.buffered = True
|
||||
self._next = generator(self._gen.next).next
|
||||
self._next = get_next(generator(get_next(self._gen)))
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
def __next__(self):
|
||||
return self._next()
|
||||
|
||||
|
||||
|
@ -8,14 +8,16 @@
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from jinja2._compat import imap, text_type, PY2, implements_to_string
|
||||
|
||||
|
||||
class TemplateError(Exception):
|
||||
"""Baseclass for all template errors."""
|
||||
|
||||
if PY2:
|
||||
def __init__(self, message=None):
|
||||
if message is not None:
|
||||
message = unicode(message).encode('utf-8')
|
||||
message = text_type(message).encode('utf-8')
|
||||
Exception.__init__(self, message)
|
||||
|
||||
@property
|
||||
@ -25,7 +27,21 @@ class TemplateError(Exception):
|
||||
if message is not None:
|
||||
return message.decode('utf-8', 'replace')
|
||||
|
||||
def __unicode__(self):
|
||||
return self.message or u''
|
||||
else:
|
||||
def __init__(self, message=None):
|
||||
Exception.__init__(self, message)
|
||||
|
||||
@property
|
||||
def message(self):
|
||||
if self.args:
|
||||
message = self.args[0]
|
||||
if message is not None:
|
||||
return message
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class TemplateNotFound(IOError, LookupError, TemplateError):
|
||||
"""Raised if a template does not exist."""
|
||||
|
||||
@ -42,13 +58,6 @@ class TemplateNotFound(IOError, LookupError, TemplateError):
|
||||
self.templates = [name]
|
||||
|
||||
def __str__(self):
|
||||
return self.message.encode('utf-8')
|
||||
|
||||
# unicode goes after __str__ because we configured 2to3 to rename
|
||||
# __unicode__ to __str__. because the 2to3 tree is not designed to
|
||||
# remove nodes from it, we leave the above __str__ around and let
|
||||
# it override at runtime.
|
||||
def __unicode__(self):
|
||||
return self.message
|
||||
|
||||
|
||||
@ -63,11 +72,12 @@ class TemplatesNotFound(TemplateNotFound):
|
||||
def __init__(self, names=(), message=None):
|
||||
if message is None:
|
||||
message = u'none of the templates given were found: ' + \
|
||||
u', '.join(map(unicode, names))
|
||||
u', '.join(imap(text_type, names))
|
||||
TemplateNotFound.__init__(self, names and names[-1] or None, message)
|
||||
self.templates = list(names)
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class TemplateSyntaxError(TemplateError):
|
||||
"""Raised to tell the user that there is a problem with the template."""
|
||||
|
||||
@ -83,13 +93,6 @@ class TemplateSyntaxError(TemplateError):
|
||||
self.translated = False
|
||||
|
||||
def __str__(self):
|
||||
return unicode(self).encode('utf-8')
|
||||
|
||||
# unicode goes after __str__ because we configured 2to3 to rename
|
||||
# __unicode__ to __str__. because the 2to3 tree is not designed to
|
||||
# remove nodes from it, we leave the above __str__ around and let
|
||||
# it override at runtime.
|
||||
def __unicode__(self):
|
||||
# for translated errors we only return the message
|
||||
if self.translated:
|
||||
return self.message
|
||||
|
@ -10,13 +10,15 @@
|
||||
"""
|
||||
import re
|
||||
import math
|
||||
|
||||
from random import choice
|
||||
from operator import itemgetter
|
||||
from itertools import imap, groupby
|
||||
from itertools import groupby
|
||||
from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
|
||||
unicode_urlencode
|
||||
from jinja2.runtime import Undefined
|
||||
from jinja2.exceptions import FilterArgumentError
|
||||
from jinja2._compat import imap, string_types, text_type, iteritems
|
||||
|
||||
|
||||
_word_re = re.compile(r'\w+(?u)')
|
||||
@ -52,13 +54,17 @@ def environmentfilter(f):
|
||||
def make_attrgetter(environment, attribute):
|
||||
"""Returns a callable that looks up the given attribute from a
|
||||
passed object with the rules of the environment. Dots are allowed
|
||||
to access attributes of attributes.
|
||||
to access attributes of attributes. Integer parts in paths are
|
||||
looked up as integers.
|
||||
"""
|
||||
if not isinstance(attribute, basestring) or '.' not in attribute:
|
||||
if not isinstance(attribute, string_types) \
|
||||
or ('.' not in attribute and not attribute.isdigit()):
|
||||
return lambda x: environment.getitem(x, attribute)
|
||||
attribute = attribute.split('.')
|
||||
def attrgetter(item):
|
||||
for part in attribute:
|
||||
if part.isdigit():
|
||||
part = int(part)
|
||||
item = environment.getitem(item, part)
|
||||
return item
|
||||
return attrgetter
|
||||
@ -68,7 +74,7 @@ def do_forceescape(value):
|
||||
"""Enforce HTML escaping. This will probably double escape variables."""
|
||||
if hasattr(value, '__html__'):
|
||||
value = value.__html__()
|
||||
return escape(unicode(value))
|
||||
return escape(text_type(value))
|
||||
|
||||
|
||||
def do_urlencode(value):
|
||||
@ -79,8 +85,8 @@ def do_urlencode(value):
|
||||
"""
|
||||
itemiter = None
|
||||
if isinstance(value, dict):
|
||||
itemiter = value.iteritems()
|
||||
elif not isinstance(value, basestring):
|
||||
itemiter = iteritems(value)
|
||||
elif not isinstance(value, string_types):
|
||||
try:
|
||||
itemiter = iter(value)
|
||||
except TypeError:
|
||||
@ -110,7 +116,7 @@ def do_replace(eval_ctx, s, old, new, count=None):
|
||||
if count is None:
|
||||
count = -1
|
||||
if not eval_ctx.autoescape:
|
||||
return unicode(s).replace(unicode(old), unicode(new), count)
|
||||
return text_type(s).replace(text_type(old), text_type(new), count)
|
||||
if hasattr(old, '__html__') or hasattr(new, '__html__') and \
|
||||
not hasattr(s, '__html__'):
|
||||
s = escape(s)
|
||||
@ -155,7 +161,7 @@ def do_xmlattr(_eval_ctx, d, autospace=True):
|
||||
"""
|
||||
rv = u' '.join(
|
||||
u'%s="%s"' % (escape(key), escape(value))
|
||||
for key, value in d.iteritems()
|
||||
for key, value in iteritems(d)
|
||||
if value is not None and not isinstance(value, Undefined)
|
||||
)
|
||||
if autospace and rv:
|
||||
@ -177,7 +183,7 @@ def do_title(s):
|
||||
uppercase letters, all remaining characters are lowercase.
|
||||
"""
|
||||
rv = []
|
||||
for item in re.compile(r'([-\s]+)(?u)').split(s):
|
||||
for item in re.compile(r'([-\s]+)(?u)').split(soft_unicode(s)):
|
||||
if not item:
|
||||
continue
|
||||
rv.append(item[0].upper() + item[1:])
|
||||
@ -194,7 +200,7 @@ def do_dictsort(value, case_sensitive=False, by='key'):
|
||||
{% for item in mydict|dictsort %}
|
||||
sort the dict by key, case insensitive
|
||||
|
||||
{% for item in mydict|dicsort(true) %}
|
||||
{% for item in mydict|dictsort(true) %}
|
||||
sort the dict by key, case sensitive
|
||||
|
||||
{% for item in mydict|dictsort(false, 'value') %}
|
||||
@ -210,7 +216,7 @@ def do_dictsort(value, case_sensitive=False, by='key'):
|
||||
'"key" or "value"')
|
||||
def sort_func(item):
|
||||
value = item[pos]
|
||||
if isinstance(value, basestring) and not case_sensitive:
|
||||
if isinstance(value, string_types) and not case_sensitive:
|
||||
value = value.lower()
|
||||
return value
|
||||
|
||||
@ -247,7 +253,7 @@ def do_sort(environment, value, reverse=False, case_sensitive=False,
|
||||
"""
|
||||
if not case_sensitive:
|
||||
def sort_func(item):
|
||||
if isinstance(item, basestring):
|
||||
if isinstance(item, string_types):
|
||||
item = item.lower()
|
||||
return item
|
||||
else:
|
||||
@ -276,7 +282,7 @@ def do_default(value, default_value=u'', boolean=False):
|
||||
|
||||
{{ ''|default('the string was empty', true) }}
|
||||
"""
|
||||
if (boolean and not value) or isinstance(value, Undefined):
|
||||
if isinstance(value, Undefined) or (boolean and not value):
|
||||
return default_value
|
||||
return value
|
||||
|
||||
@ -309,7 +315,7 @@ def do_join(eval_ctx, value, d=u'', attribute=None):
|
||||
|
||||
# no automatic escaping? joining is a lot eaiser then
|
||||
if not eval_ctx.autoescape:
|
||||
return unicode(d).join(imap(unicode, value))
|
||||
return text_type(d).join(imap(text_type, value))
|
||||
|
||||
# if the delimiter doesn't have an html representation we check
|
||||
# if any of the items has. If yes we do a coercion to Markup
|
||||
@ -320,11 +326,11 @@ def do_join(eval_ctx, value, d=u'', attribute=None):
|
||||
if hasattr(item, '__html__'):
|
||||
do_escape = True
|
||||
else:
|
||||
value[idx] = unicode(item)
|
||||
value[idx] = text_type(item)
|
||||
if do_escape:
|
||||
d = escape(d)
|
||||
else:
|
||||
d = unicode(d)
|
||||
d = text_type(d)
|
||||
return d.join(value)
|
||||
|
||||
# no html involved, to normal joining
|
||||
@ -333,14 +339,14 @@ def do_join(eval_ctx, value, d=u'', attribute=None):
|
||||
|
||||
def do_center(value, width=80):
|
||||
"""Centers the value in a field of a given width."""
|
||||
return unicode(value).center(width)
|
||||
return text_type(value).center(width)
|
||||
|
||||
|
||||
@environmentfilter
|
||||
def do_first(environment, seq):
|
||||
"""Return the first item of a sequence."""
|
||||
try:
|
||||
return iter(seq).next()
|
||||
return next(iter(seq))
|
||||
except StopIteration:
|
||||
return environment.undefined('No first item, sequence was empty.')
|
||||
|
||||
@ -349,7 +355,7 @@ def do_first(environment, seq):
|
||||
def do_last(environment, seq):
|
||||
"""Return the last item of a sequence."""
|
||||
try:
|
||||
return iter(reversed(seq)).next()
|
||||
return next(iter(reversed(seq)))
|
||||
except StopIteration:
|
||||
return environment.undefined('No last item, sequence was empty.')
|
||||
|
||||
@ -443,16 +449,17 @@ def do_truncate(s, length=255, killwords=False, end='...'):
|
||||
"""Return a truncated copy of the string. The length is specified
|
||||
with the first parameter which defaults to ``255``. If the second
|
||||
parameter is ``true`` the filter will cut the text at length. Otherwise
|
||||
it will try to save the last word. If the text was in fact
|
||||
it will discard the last word. If the text was in fact
|
||||
truncated it will append an ellipsis sign (``"..."``). If you want a
|
||||
different ellipsis sign than ``"..."`` you can specify it using the
|
||||
third parameter.
|
||||
|
||||
.. sourcecode jinja::
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ mytext|truncate(300, false, '»') }}
|
||||
truncate mytext to 300 chars, don't split up words, use a
|
||||
right pointing double arrow as ellipsis sign.
|
||||
{{ "foo bar"|truncate(5) }}
|
||||
-> "foo ..."
|
||||
{{ "foo bar"|truncate(5, True) }}
|
||||
-> "foo b..."
|
||||
"""
|
||||
if len(s) <= length:
|
||||
return s
|
||||
@ -470,15 +477,23 @@ def do_truncate(s, length=255, killwords=False, end='...'):
|
||||
return u' '.join(result)
|
||||
|
||||
@environmentfilter
|
||||
def do_wordwrap(environment, s, width=79, break_long_words=True):
|
||||
def do_wordwrap(environment, s, width=79, break_long_words=True,
|
||||
wrapstring=None):
|
||||
"""
|
||||
Return a copy of the string passed to the filter wrapped after
|
||||
``79`` characters. You can override this default using the first
|
||||
parameter. If you set the second parameter to `false` Jinja will not
|
||||
split words apart if they are longer than `width`.
|
||||
split words apart if they are longer than `width`. By default, the newlines
|
||||
will be the default newlines for the environment, but this can be changed
|
||||
using the wrapstring keyword argument.
|
||||
|
||||
.. versionadded:: 2.7
|
||||
Added support for the `wrapstring` parameter.
|
||||
"""
|
||||
if not wrapstring:
|
||||
wrapstring = environment.newline_sequence
|
||||
import textwrap
|
||||
return environment.newline_sequence.join(textwrap.wrap(s, width=width, expand_tabs=False,
|
||||
return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
|
||||
replace_whitespace=False,
|
||||
break_long_words=break_long_words))
|
||||
|
||||
@ -539,7 +554,7 @@ def do_striptags(value):
|
||||
"""
|
||||
if hasattr(value, '__html__'):
|
||||
value = value.__html__()
|
||||
return Markup(unicode(value)).striptags()
|
||||
return Markup(text_type(value)).striptags()
|
||||
|
||||
|
||||
def do_slice(value, slices, fill_with=None):
|
||||
@ -567,7 +582,7 @@ def do_slice(value, slices, fill_with=None):
|
||||
items_per_slice = length // slices
|
||||
slices_with_extra = length % slices
|
||||
offset = 0
|
||||
for slice_number in xrange(slices):
|
||||
for slice_number in range(slices):
|
||||
start = offset + slice_number * items_per_slice
|
||||
if slice_number < slices_with_extra:
|
||||
offset += 1
|
||||
@ -692,7 +707,8 @@ class _GroupTuple(tuple):
|
||||
grouper = property(itemgetter(0))
|
||||
list = property(itemgetter(1))
|
||||
|
||||
def __new__(cls, (key, value)):
|
||||
def __new__(cls, xxx_todo_changeme):
|
||||
(key, value) = xxx_todo_changeme
|
||||
return tuple.__new__(cls, (key, list(value)))
|
||||
|
||||
|
||||
@ -733,14 +749,14 @@ def do_mark_safe(value):
|
||||
|
||||
def do_mark_unsafe(value):
|
||||
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
|
||||
return unicode(value)
|
||||
return text_type(value)
|
||||
|
||||
|
||||
def do_reverse(value):
|
||||
"""Reverse the object or return an iterator the iterates over it the other
|
||||
way round.
|
||||
"""
|
||||
if isinstance(value, basestring):
|
||||
if isinstance(value, string_types):
|
||||
return value[::-1]
|
||||
try:
|
||||
return reversed(value)
|
||||
@ -778,6 +794,145 @@ def do_attr(environment, obj, name):
|
||||
return environment.undefined(obj=obj, name=name)
|
||||
|
||||
|
||||
@contextfilter
|
||||
def do_map(*args, **kwargs):
|
||||
"""Applies a filter on a sequence of objects or looks up an attribute.
|
||||
This is useful when dealing with lists of objects but you are really
|
||||
only interested in a certain value of it.
|
||||
|
||||
The basic usage is mapping on an attribute. Imagine you have a list
|
||||
of users but you are only interested in a list of usernames:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
Users on this page: {{ users|map(attribute='username')|join(', ') }}
|
||||
|
||||
Alternatively you can let it invoke a filter by passing the name of the
|
||||
filter and the arguments afterwards. A good example would be applying a
|
||||
text conversion filter on a sequence:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
Users on this page: {{ titles|map('lower')|join(', ') }}
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
context = args[0]
|
||||
seq = args[1]
|
||||
|
||||
if len(args) == 2 and 'attribute' in kwargs:
|
||||
attribute = kwargs.pop('attribute')
|
||||
if kwargs:
|
||||
raise FilterArgumentError('Unexpected keyword argument %r' %
|
||||
next(iter(kwargs)))
|
||||
func = make_attrgetter(context.environment, attribute)
|
||||
else:
|
||||
try:
|
||||
name = args[2]
|
||||
args = args[3:]
|
||||
except LookupError:
|
||||
raise FilterArgumentError('map requires a filter argument')
|
||||
func = lambda item: context.environment.call_filter(
|
||||
name, item, args, kwargs, context=context)
|
||||
|
||||
if seq:
|
||||
for item in seq:
|
||||
yield func(item)
|
||||
|
||||
|
||||
@contextfilter
|
||||
def do_select(*args, **kwargs):
|
||||
"""Filters a sequence of objects by appying a test to the object and only
|
||||
selecting the ones with the test succeeding.
|
||||
|
||||
Example usage:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ numbers|select("odd") }}
|
||||
{{ numbers|select("odd") }}
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
return _select_or_reject(args, kwargs, lambda x: x, False)
|
||||
|
||||
|
||||
@contextfilter
|
||||
def do_reject(*args, **kwargs):
|
||||
"""Filters a sequence of objects by appying a test to the object and
|
||||
rejecting the ones with the test succeeding.
|
||||
|
||||
Example usage:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ numbers|reject("odd") }}
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
return _select_or_reject(args, kwargs, lambda x: not x, False)
|
||||
|
||||
|
||||
@contextfilter
|
||||
def do_selectattr(*args, **kwargs):
|
||||
"""Filters a sequence of objects by appying a test to an attribute of an
|
||||
object and only selecting the ones with the test succeeding.
|
||||
|
||||
Example usage:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ users|selectattr("is_active") }}
|
||||
{{ users|selectattr("email", "none") }}
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
return _select_or_reject(args, kwargs, lambda x: x, True)
|
||||
|
||||
|
||||
@contextfilter
|
||||
def do_rejectattr(*args, **kwargs):
|
||||
"""Filters a sequence of objects by appying a test to an attribute of an
|
||||
object or the attribute and rejecting the ones with the test succeeding.
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ users|rejectattr("is_active") }}
|
||||
{{ users|rejectattr("email", "none") }}
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
return _select_or_reject(args, kwargs, lambda x: not x, True)
|
||||
|
||||
|
||||
def _select_or_reject(args, kwargs, modfunc, lookup_attr):
|
||||
context = args[0]
|
||||
seq = args[1]
|
||||
if lookup_attr:
|
||||
try:
|
||||
attr = args[2]
|
||||
except LookupError:
|
||||
raise FilterArgumentError('Missing parameter for attribute name')
|
||||
transfunc = make_attrgetter(context.environment, attr)
|
||||
off = 1
|
||||
else:
|
||||
off = 0
|
||||
transfunc = lambda x: x
|
||||
|
||||
try:
|
||||
name = args[2 + off]
|
||||
args = args[3 + off:]
|
||||
func = lambda item: context.environment.call_test(
|
||||
name, item, args, kwargs)
|
||||
except LookupError:
|
||||
func = bool
|
||||
|
||||
if seq:
|
||||
for item in seq:
|
||||
if modfunc(func(transfunc(item))):
|
||||
yield item
|
||||
|
||||
|
||||
FILTERS = {
|
||||
'attr': do_attr,
|
||||
'replace': do_replace,
|
||||
@ -802,7 +957,10 @@ FILTERS = {
|
||||
'capitalize': do_capitalize,
|
||||
'first': do_first,
|
||||
'last': do_last,
|
||||
'map': do_map,
|
||||
'random': do_random,
|
||||
'reject': do_reject,
|
||||
'rejectattr': do_rejectattr,
|
||||
'filesizeformat': do_filesizeformat,
|
||||
'pprint': do_pprint,
|
||||
'truncate': do_truncate,
|
||||
@ -816,6 +974,8 @@ FILTERS = {
|
||||
'format': do_format,
|
||||
'trim': do_trim,
|
||||
'striptags': do_striptags,
|
||||
'select': do_select,
|
||||
'selectattr': do_selectattr,
|
||||
'slice': do_slice,
|
||||
'batch': do_batch,
|
||||
'sum': do_sum,
|
||||
|
@ -15,10 +15,13 @@
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
|
||||
from operator import itemgetter
|
||||
from collections import deque
|
||||
from jinja2.exceptions import TemplateSyntaxError
|
||||
from jinja2.utils import LRUCache, next
|
||||
from jinja2.utils import LRUCache
|
||||
from jinja2._compat import iteritems, implements_iterator, text_type, \
|
||||
intern
|
||||
|
||||
|
||||
# cache for the lexers. Exists in order to be able to have multiple
|
||||
@ -126,7 +129,7 @@ operators = {
|
||||
';': TOKEN_SEMICOLON
|
||||
}
|
||||
|
||||
reverse_operators = dict([(v, k) for k, v in operators.iteritems()])
|
||||
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
|
||||
assert len(operators) == len(reverse_operators), 'operators dropped'
|
||||
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
|
||||
sorted(operators, key=lambda x: -len(x))))
|
||||
@ -197,7 +200,7 @@ def compile_rules(environment):
|
||||
|
||||
if environment.line_statement_prefix is not None:
|
||||
rules.append((len(environment.line_statement_prefix), 'linestatement',
|
||||
r'^\s*' + e(environment.line_statement_prefix)))
|
||||
r'^[ \t\v]*' + e(environment.line_statement_prefix)))
|
||||
if environment.line_comment_prefix is not None:
|
||||
rules.append((len(environment.line_comment_prefix), 'linecomment',
|
||||
r'(?:^|(?<=\S))[^\S\r\n]*' +
|
||||
@ -262,6 +265,7 @@ class Token(tuple):
|
||||
)
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class TokenStreamIterator(object):
|
||||
"""The iterator for tokenstreams. Iterate over the stream
|
||||
until the eof token is reached.
|
||||
@ -273,7 +277,7 @@ class TokenStreamIterator(object):
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
def __next__(self):
|
||||
token = self.stream.current
|
||||
if token.type is TOKEN_EOF:
|
||||
self.stream.close()
|
||||
@ -282,6 +286,7 @@ class TokenStreamIterator(object):
|
||||
return token
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class TokenStream(object):
|
||||
"""A token stream is an iterable that yields :class:`Token`\s. The
|
||||
parser however does not iterate over it but calls :meth:`next` to go
|
||||
@ -289,7 +294,7 @@ class TokenStream(object):
|
||||
"""
|
||||
|
||||
def __init__(self, generator, name, filename):
|
||||
self._next = iter(generator).next
|
||||
self._iter = iter(generator)
|
||||
self._pushed = deque()
|
||||
self.name = name
|
||||
self.filename = filename
|
||||
@ -300,8 +305,9 @@ class TokenStream(object):
|
||||
def __iter__(self):
|
||||
return TokenStreamIterator(self)
|
||||
|
||||
def __nonzero__(self):
|
||||
def __bool__(self):
|
||||
return bool(self._pushed) or self.current.type is not TOKEN_EOF
|
||||
__nonzero__ = __bool__ # py2
|
||||
|
||||
eos = property(lambda x: not x, doc="Are we at the end of the stream?")
|
||||
|
||||
@ -319,7 +325,7 @@ class TokenStream(object):
|
||||
|
||||
def skip(self, n=1):
|
||||
"""Got n tokens ahead."""
|
||||
for x in xrange(n):
|
||||
for x in range(n):
|
||||
next(self)
|
||||
|
||||
def next_if(self, expr):
|
||||
@ -333,14 +339,14 @@ class TokenStream(object):
|
||||
"""Like :meth:`next_if` but only returns `True` or `False`."""
|
||||
return self.next_if(expr) is not None
|
||||
|
||||
def next(self):
|
||||
def __next__(self):
|
||||
"""Go one token ahead and return the old one"""
|
||||
rv = self.current
|
||||
if self._pushed:
|
||||
self.current = self._pushed.popleft()
|
||||
elif self.current.type is not TOKEN_EOF:
|
||||
try:
|
||||
self.current = self._next()
|
||||
self.current = next(self._iter)
|
||||
except StopIteration:
|
||||
self.close()
|
||||
return rv
|
||||
@ -348,7 +354,7 @@ class TokenStream(object):
|
||||
def close(self):
|
||||
"""Close the stream."""
|
||||
self.current = Token(self.current.lineno, TOKEN_EOF, '')
|
||||
self._next = None
|
||||
self._iter = None
|
||||
self.closed = True
|
||||
|
||||
def expect(self, expr):
|
||||
@ -383,7 +389,9 @@ def get_lexer(environment):
|
||||
environment.line_statement_prefix,
|
||||
environment.line_comment_prefix,
|
||||
environment.trim_blocks,
|
||||
environment.newline_sequence)
|
||||
environment.lstrip_blocks,
|
||||
environment.newline_sequence,
|
||||
environment.keep_trailing_newline)
|
||||
lexer = _lexer_cache.get(key)
|
||||
if lexer is None:
|
||||
lexer = Lexer(environment)
|
||||
@ -425,7 +433,44 @@ class Lexer(object):
|
||||
# block suffix if trimming is enabled
|
||||
block_suffix_re = environment.trim_blocks and '\\n?' or ''
|
||||
|
||||
# strip leading spaces if lstrip_blocks is enabled
|
||||
prefix_re = {}
|
||||
if environment.lstrip_blocks:
|
||||
# use '{%+' to manually disable lstrip_blocks behavior
|
||||
no_lstrip_re = e('+')
|
||||
# detect overlap between block and variable or comment strings
|
||||
block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
|
||||
# make sure we don't mistake a block for a variable or a comment
|
||||
m = block_diff.match(environment.comment_start_string)
|
||||
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
|
||||
m = block_diff.match(environment.variable_start_string)
|
||||
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
|
||||
|
||||
# detect overlap between comment and variable strings
|
||||
comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
|
||||
m = comment_diff.match(environment.variable_start_string)
|
||||
no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
|
||||
|
||||
lstrip_re = r'^[ \t]*'
|
||||
block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
|
||||
lstrip_re,
|
||||
e(environment.block_start_string),
|
||||
no_lstrip_re,
|
||||
e(environment.block_start_string),
|
||||
)
|
||||
comment_prefix_re = r'%s%s%s|%s\+?' % (
|
||||
lstrip_re,
|
||||
e(environment.comment_start_string),
|
||||
no_variable_re,
|
||||
e(environment.comment_start_string),
|
||||
)
|
||||
prefix_re['block'] = block_prefix_re
|
||||
prefix_re['comment'] = comment_prefix_re
|
||||
else:
|
||||
block_prefix_re = '%s' % e(environment.block_start_string)
|
||||
|
||||
self.newline_sequence = environment.newline_sequence
|
||||
self.keep_trailing_newline = environment.keep_trailing_newline
|
||||
|
||||
# global lexing rules
|
||||
self.rules = {
|
||||
@ -434,11 +479,11 @@ class Lexer(object):
|
||||
(c('(.*?)(?:%s)' % '|'.join(
|
||||
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
|
||||
e(environment.block_start_string),
|
||||
e(environment.block_start_string),
|
||||
block_prefix_re,
|
||||
e(environment.block_end_string),
|
||||
e(environment.block_end_string)
|
||||
)] + [
|
||||
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, r)
|
||||
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
|
||||
for n, r in root_tag_rules
|
||||
])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
|
||||
# data
|
||||
@ -472,7 +517,7 @@ class Lexer(object):
|
||||
TOKEN_RAW_BEGIN: [
|
||||
(c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
|
||||
e(environment.block_start_string),
|
||||
e(environment.block_start_string),
|
||||
block_prefix_re,
|
||||
e(environment.block_end_string),
|
||||
e(environment.block_end_string),
|
||||
block_suffix_re
|
||||
@ -526,7 +571,7 @@ class Lexer(object):
|
||||
value = self._normalize_newlines(value[1:-1]) \
|
||||
.encode('ascii', 'backslashreplace') \
|
||||
.decode('unicode-escape')
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
msg = str(e).split(':')[-1].strip()
|
||||
raise TemplateSyntaxError(msg, lineno, name, filename)
|
||||
# if we can express it as bytestring (ascii only)
|
||||
@ -549,7 +594,14 @@ class Lexer(object):
|
||||
"""This method tokenizes the text and returns the tokens in a
|
||||
generator. Use this method if you just want to tokenize a template.
|
||||
"""
|
||||
source = '\n'.join(unicode(source).splitlines())
|
||||
source = text_type(source)
|
||||
lines = source.splitlines()
|
||||
if self.keep_trailing_newline and source:
|
||||
for newline in ('\r\n', '\r', '\n'):
|
||||
if source.endswith(newline):
|
||||
lines.append('')
|
||||
break
|
||||
source = '\n'.join(lines)
|
||||
pos = 0
|
||||
lineno = 1
|
||||
stack = ['root']
|
||||
@ -590,7 +642,7 @@ class Lexer(object):
|
||||
# yield for the current token the first named
|
||||
# group that matched
|
||||
elif token == '#bygroup':
|
||||
for key, value in m.groupdict().iteritems():
|
||||
for key, value in iteritems(m.groupdict()):
|
||||
if value is not None:
|
||||
yield lineno, key, value
|
||||
lineno += value.count('\n')
|
||||
@ -647,7 +699,7 @@ class Lexer(object):
|
||||
stack.pop()
|
||||
# resolve the new state by group checking
|
||||
elif new_state == '#bygroup':
|
||||
for key, value in m.groupdict().iteritems():
|
||||
for key, value in iteritems(m.groupdict()):
|
||||
if value is not None:
|
||||
stack.append(key)
|
||||
break
|
||||
|
@ -13,12 +13,10 @@ import sys
|
||||
import weakref
|
||||
from types import ModuleType
|
||||
from os import path
|
||||
try:
|
||||
from hashlib import sha1
|
||||
except ImportError:
|
||||
from sha import new as sha1
|
||||
from hashlib import sha1
|
||||
from jinja2.exceptions import TemplateNotFound
|
||||
from jinja2.utils import LRUCache, open_if_exists, internalcode
|
||||
from jinja2.utils import open_if_exists, internalcode
|
||||
from jinja2._compat import string_types, iteritems
|
||||
|
||||
|
||||
def split_template_path(template):
|
||||
@ -153,7 +151,7 @@ class FileSystemLoader(BaseLoader):
|
||||
"""
|
||||
|
||||
def __init__(self, searchpath, encoding='utf-8'):
|
||||
if isinstance(searchpath, basestring):
|
||||
if isinstance(searchpath, string_types):
|
||||
searchpath = [searchpath]
|
||||
self.searchpath = list(searchpath)
|
||||
self.encoding = encoding
|
||||
@ -274,7 +272,7 @@ class DictLoader(BaseLoader):
|
||||
def get_source(self, environment, template):
|
||||
if template in self.mapping:
|
||||
source = self.mapping[template]
|
||||
return source, None, lambda: source != self.mapping.get(template)
|
||||
return source, None, lambda: source == self.mapping.get(template)
|
||||
raise TemplateNotFound(template)
|
||||
|
||||
def list_templates(self):
|
||||
@ -306,7 +304,7 @@ class FunctionLoader(BaseLoader):
|
||||
rv = self.load_func(template)
|
||||
if rv is None:
|
||||
raise TemplateNotFound(template)
|
||||
elif isinstance(rv, basestring):
|
||||
elif isinstance(rv, string_types):
|
||||
return rv, None, None
|
||||
return rv
|
||||
|
||||
@ -359,7 +357,7 @@ class PrefixLoader(BaseLoader):
|
||||
|
||||
def list_templates(self):
|
||||
result = []
|
||||
for prefix, loader in self.mapping.iteritems():
|
||||
for prefix, loader in iteritems(self.mapping):
|
||||
for template in loader.list_templates():
|
||||
result.append(prefix + self.delimiter + template)
|
||||
return result
|
||||
@ -431,7 +429,7 @@ class ModuleLoader(BaseLoader):
|
||||
# create a fake module that looks for the templates in the
|
||||
# path given.
|
||||
mod = _TemplateModule(package_name)
|
||||
if isinstance(path, basestring):
|
||||
if isinstance(path, string_types):
|
||||
path = [path]
|
||||
else:
|
||||
path = list(path)
|
||||
|
@ -9,7 +9,8 @@
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
from itertools import imap
|
||||
from _compat import text_type, string_types, int_types, \
|
||||
unichr, PY2
|
||||
|
||||
|
||||
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
|
||||
@ -19,7 +20,7 @@ _striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
|
||||
_entity_re = re.compile(r'&([^;]+);')
|
||||
|
||||
|
||||
class Markup(unicode):
|
||||
class Markup(text_type):
|
||||
r"""Marks a string as being safe for inclusion in HTML/XML output without
|
||||
needing to be escaped. This implements the `__html__` interface a couple
|
||||
of frameworks and web applications use. :class:`Markup` is a direct
|
||||
@ -68,65 +69,65 @@ class Markup(unicode):
|
||||
if hasattr(base, '__html__'):
|
||||
base = base.__html__()
|
||||
if encoding is None:
|
||||
return unicode.__new__(cls, base)
|
||||
return unicode.__new__(cls, base, encoding, errors)
|
||||
return text_type.__new__(cls, base)
|
||||
return text_type.__new__(cls, base, encoding, errors)
|
||||
|
||||
def __html__(self):
|
||||
return self
|
||||
|
||||
def __add__(self, other):
|
||||
if hasattr(other, '__html__') or isinstance(other, basestring):
|
||||
return self.__class__(unicode(self) + unicode(escape(other)))
|
||||
if isinstance(other, string_types) or hasattr(other, '__html__'):
|
||||
return self.__class__(super(Markup, self).__add__(self.escape(other)))
|
||||
return NotImplemented
|
||||
|
||||
def __radd__(self, other):
|
||||
if hasattr(other, '__html__') or isinstance(other, basestring):
|
||||
return self.__class__(unicode(escape(other)) + unicode(self))
|
||||
if hasattr(other, '__html__') or isinstance(other, string_types):
|
||||
return self.escape(other).__add__(self)
|
||||
return NotImplemented
|
||||
|
||||
def __mul__(self, num):
|
||||
if isinstance(num, (int, long)):
|
||||
return self.__class__(unicode.__mul__(self, num))
|
||||
if isinstance(num, int_types):
|
||||
return self.__class__(text_type.__mul__(self, num))
|
||||
return NotImplemented
|
||||
__rmul__ = __mul__
|
||||
|
||||
def __mod__(self, arg):
|
||||
if isinstance(arg, tuple):
|
||||
arg = tuple(imap(_MarkupEscapeHelper, arg))
|
||||
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
|
||||
else:
|
||||
arg = _MarkupEscapeHelper(arg)
|
||||
return self.__class__(unicode.__mod__(self, arg))
|
||||
arg = _MarkupEscapeHelper(arg, self.escape)
|
||||
return self.__class__(text_type.__mod__(self, arg))
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (
|
||||
self.__class__.__name__,
|
||||
unicode.__repr__(self)
|
||||
text_type.__repr__(self)
|
||||
)
|
||||
|
||||
def join(self, seq):
|
||||
return self.__class__(unicode.join(self, imap(escape, seq)))
|
||||
join.__doc__ = unicode.join.__doc__
|
||||
return self.__class__(text_type.join(self, map(self.escape, seq)))
|
||||
join.__doc__ = text_type.join.__doc__
|
||||
|
||||
def split(self, *args, **kwargs):
|
||||
return map(self.__class__, unicode.split(self, *args, **kwargs))
|
||||
split.__doc__ = unicode.split.__doc__
|
||||
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
|
||||
split.__doc__ = text_type.split.__doc__
|
||||
|
||||
def rsplit(self, *args, **kwargs):
|
||||
return map(self.__class__, unicode.rsplit(self, *args, **kwargs))
|
||||
rsplit.__doc__ = unicode.rsplit.__doc__
|
||||
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
|
||||
rsplit.__doc__ = text_type.rsplit.__doc__
|
||||
|
||||
def splitlines(self, *args, **kwargs):
|
||||
return map(self.__class__, unicode.splitlines(self, *args, **kwargs))
|
||||
splitlines.__doc__ = unicode.splitlines.__doc__
|
||||
return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
|
||||
splitlines.__doc__ = text_type.splitlines.__doc__
|
||||
|
||||
def unescape(self):
|
||||
r"""Unescape markup again into an unicode string. This also resolves
|
||||
r"""Unescape markup again into an text_type string. This also resolves
|
||||
known HTML4 and XHTML entities:
|
||||
|
||||
>>> Markup("Main » <em>About</em>").unescape()
|
||||
u'Main \xbb <em>About</em>'
|
||||
"""
|
||||
from jinja2._markupsafe._constants import HTML_ENTITIES
|
||||
from _constants import HTML_ENTITIES
|
||||
def handle_match(m):
|
||||
name = m.group(1)
|
||||
if name in HTML_ENTITIES:
|
||||
@ -139,10 +140,10 @@ class Markup(unicode):
|
||||
except ValueError:
|
||||
pass
|
||||
return u''
|
||||
return _entity_re.sub(handle_match, unicode(self))
|
||||
return _entity_re.sub(handle_match, text_type(self))
|
||||
|
||||
def striptags(self):
|
||||
r"""Unescape markup into an unicode string and strip all tags. This
|
||||
r"""Unescape markup into an text_type string and strip all tags. This
|
||||
also resolves known HTML4 and XHTML entities. Whitespace is
|
||||
normalized to one:
|
||||
|
||||
@ -164,10 +165,10 @@ class Markup(unicode):
|
||||
return rv
|
||||
|
||||
def make_wrapper(name):
|
||||
orig = getattr(unicode, name)
|
||||
orig = getattr(text_type, name)
|
||||
def func(self, *args, **kwargs):
|
||||
args = _escape_argspec(list(args), enumerate(args))
|
||||
_escape_argspec(kwargs, kwargs.iteritems())
|
||||
args = _escape_argspec(list(args), enumerate(args), self.escape)
|
||||
#_escape_argspec(kwargs, kwargs.iteritems(), None)
|
||||
return self.__class__(orig(self, *args, **kwargs))
|
||||
func.__name__ = orig.__name__
|
||||
func.__doc__ = orig.__doc__
|
||||
@ -180,25 +181,29 @@ class Markup(unicode):
|
||||
locals()[method] = make_wrapper(method)
|
||||
|
||||
# new in python 2.5
|
||||
if hasattr(unicode, 'partition'):
|
||||
partition = make_wrapper('partition'),
|
||||
rpartition = make_wrapper('rpartition')
|
||||
if hasattr(text_type, 'partition'):
|
||||
def partition(self, sep):
|
||||
return tuple(map(self.__class__,
|
||||
text_type.partition(self, self.escape(sep))))
|
||||
def rpartition(self, sep):
|
||||
return tuple(map(self.__class__,
|
||||
text_type.rpartition(self, self.escape(sep))))
|
||||
|
||||
# new in python 2.6
|
||||
if hasattr(unicode, 'format'):
|
||||
if hasattr(text_type, 'format'):
|
||||
format = make_wrapper('format')
|
||||
|
||||
# not in python 3
|
||||
if hasattr(unicode, '__getslice__'):
|
||||
if hasattr(text_type, '__getslice__'):
|
||||
__getslice__ = make_wrapper('__getslice__')
|
||||
|
||||
del method, make_wrapper
|
||||
|
||||
|
||||
def _escape_argspec(obj, iterable):
|
||||
def _escape_argspec(obj, iterable, escape):
|
||||
"""Helper for various string-wrapped functions."""
|
||||
for key, value in iterable:
|
||||
if hasattr(value, '__html__') or isinstance(value, basestring):
|
||||
if hasattr(value, '__html__') or isinstance(value, string_types):
|
||||
obj[key] = escape(value)
|
||||
return obj
|
||||
|
||||
@ -206,13 +211,13 @@ def _escape_argspec(obj, iterable):
|
||||
class _MarkupEscapeHelper(object):
|
||||
"""Helper for Markup.__mod__"""
|
||||
|
||||
def __init__(self, obj):
|
||||
def __init__(self, obj, escape):
|
||||
self.obj = obj
|
||||
self.escape = escape
|
||||
|
||||
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x])
|
||||
__str__ = lambda s: str(escape(s.obj))
|
||||
__unicode__ = lambda s: unicode(escape(s.obj))
|
||||
__repr__ = lambda s: str(escape(repr(s.obj)))
|
||||
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
|
||||
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
|
||||
__repr__ = lambda s: str(s.escape(repr(s.obj)))
|
||||
__int__ = lambda s: int(s.obj)
|
||||
__float__ = lambda s: float(s.obj)
|
||||
|
||||
@ -220,6 +225,10 @@ class _MarkupEscapeHelper(object):
|
||||
# we have to import it down here as the speedups and native
|
||||
# modules imports the markup type which is define above.
|
||||
try:
|
||||
from jinja2._markupsafe._speedups import escape, escape_silent, soft_unicode
|
||||
from _speedups import escape, escape_silent, soft_unicode
|
||||
except ImportError:
|
||||
from jinja2._markupsafe._native import escape, escape_silent, soft_unicode
|
||||
from _native import escape, escape_silent, soft_unicode
|
||||
|
||||
if not PY2:
|
||||
soft_str = soft_unicode
|
||||
__all__.append('soft_str')
|
24
modules/matlab/generator/jinja2/markupsafe/_compat.py
Normal file
24
modules/matlab/generator/jinja2/markupsafe/_compat.py
Normal file
@ -0,0 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
markupsafe._compat
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Compatibility module for different Python versions.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import sys
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
|
||||
if not PY2:
|
||||
text_type = str
|
||||
string_types = (str,)
|
||||
unichr = chr
|
||||
int_types = (int,)
|
||||
else:
|
||||
text_type = unicode
|
||||
string_types = (str, unicode)
|
||||
unichr = unichr
|
||||
int_types = (int, long)
|
267
modules/matlab/generator/jinja2/markupsafe/_constants.py
Normal file
267
modules/matlab/generator/jinja2/markupsafe/_constants.py
Normal file
@ -0,0 +1,267 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
markupsafe._constants
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Highlevel implementation of the Markup string.
|
||||
|
||||
:copyright: (c) 2010 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
|
||||
HTML_ENTITIES = {
|
||||
'AElig': 198,
|
||||
'Aacute': 193,
|
||||
'Acirc': 194,
|
||||
'Agrave': 192,
|
||||
'Alpha': 913,
|
||||
'Aring': 197,
|
||||
'Atilde': 195,
|
||||
'Auml': 196,
|
||||
'Beta': 914,
|
||||
'Ccedil': 199,
|
||||
'Chi': 935,
|
||||
'Dagger': 8225,
|
||||
'Delta': 916,
|
||||
'ETH': 208,
|
||||
'Eacute': 201,
|
||||
'Ecirc': 202,
|
||||
'Egrave': 200,
|
||||
'Epsilon': 917,
|
||||
'Eta': 919,
|
||||
'Euml': 203,
|
||||
'Gamma': 915,
|
||||
'Iacute': 205,
|
||||
'Icirc': 206,
|
||||
'Igrave': 204,
|
||||
'Iota': 921,
|
||||
'Iuml': 207,
|
||||
'Kappa': 922,
|
||||
'Lambda': 923,
|
||||
'Mu': 924,
|
||||
'Ntilde': 209,
|
||||
'Nu': 925,
|
||||
'OElig': 338,
|
||||
'Oacute': 211,
|
||||
'Ocirc': 212,
|
||||
'Ograve': 210,
|
||||
'Omega': 937,
|
||||
'Omicron': 927,
|
||||
'Oslash': 216,
|
||||
'Otilde': 213,
|
||||
'Ouml': 214,
|
||||
'Phi': 934,
|
||||
'Pi': 928,
|
||||
'Prime': 8243,
|
||||
'Psi': 936,
|
||||
'Rho': 929,
|
||||
'Scaron': 352,
|
||||
'Sigma': 931,
|
||||
'THORN': 222,
|
||||
'Tau': 932,
|
||||
'Theta': 920,
|
||||
'Uacute': 218,
|
||||
'Ucirc': 219,
|
||||
'Ugrave': 217,
|
||||
'Upsilon': 933,
|
||||
'Uuml': 220,
|
||||
'Xi': 926,
|
||||
'Yacute': 221,
|
||||
'Yuml': 376,
|
||||
'Zeta': 918,
|
||||
'aacute': 225,
|
||||
'acirc': 226,
|
||||
'acute': 180,
|
||||
'aelig': 230,
|
||||
'agrave': 224,
|
||||
'alefsym': 8501,
|
||||
'alpha': 945,
|
||||
'amp': 38,
|
||||
'and': 8743,
|
||||
'ang': 8736,
|
||||
'apos': 39,
|
||||
'aring': 229,
|
||||
'asymp': 8776,
|
||||
'atilde': 227,
|
||||
'auml': 228,
|
||||
'bdquo': 8222,
|
||||
'beta': 946,
|
||||
'brvbar': 166,
|
||||
'bull': 8226,
|
||||
'cap': 8745,
|
||||
'ccedil': 231,
|
||||
'cedil': 184,
|
||||
'cent': 162,
|
||||
'chi': 967,
|
||||
'circ': 710,
|
||||
'clubs': 9827,
|
||||
'cong': 8773,
|
||||
'copy': 169,
|
||||
'crarr': 8629,
|
||||
'cup': 8746,
|
||||
'curren': 164,
|
||||
'dArr': 8659,
|
||||
'dagger': 8224,
|
||||
'darr': 8595,
|
||||
'deg': 176,
|
||||
'delta': 948,
|
||||
'diams': 9830,
|
||||
'divide': 247,
|
||||
'eacute': 233,
|
||||
'ecirc': 234,
|
||||
'egrave': 232,
|
||||
'empty': 8709,
|
||||
'emsp': 8195,
|
||||
'ensp': 8194,
|
||||
'epsilon': 949,
|
||||
'equiv': 8801,
|
||||
'eta': 951,
|
||||
'eth': 240,
|
||||
'euml': 235,
|
||||
'euro': 8364,
|
||||
'exist': 8707,
|
||||
'fnof': 402,
|
||||
'forall': 8704,
|
||||
'frac12': 189,
|
||||
'frac14': 188,
|
||||
'frac34': 190,
|
||||
'frasl': 8260,
|
||||
'gamma': 947,
|
||||
'ge': 8805,
|
||||
'gt': 62,
|
||||
'hArr': 8660,
|
||||
'harr': 8596,
|
||||
'hearts': 9829,
|
||||
'hellip': 8230,
|
||||
'iacute': 237,
|
||||
'icirc': 238,
|
||||
'iexcl': 161,
|
||||
'igrave': 236,
|
||||
'image': 8465,
|
||||
'infin': 8734,
|
||||
'int': 8747,
|
||||
'iota': 953,
|
||||
'iquest': 191,
|
||||
'isin': 8712,
|
||||
'iuml': 239,
|
||||
'kappa': 954,
|
||||
'lArr': 8656,
|
||||
'lambda': 955,
|
||||
'lang': 9001,
|
||||
'laquo': 171,
|
||||
'larr': 8592,
|
||||
'lceil': 8968,
|
||||
'ldquo': 8220,
|
||||
'le': 8804,
|
||||
'lfloor': 8970,
|
||||
'lowast': 8727,
|
||||
'loz': 9674,
|
||||
'lrm': 8206,
|
||||
'lsaquo': 8249,
|
||||
'lsquo': 8216,
|
||||
'lt': 60,
|
||||
'macr': 175,
|
||||
'mdash': 8212,
|
||||
'micro': 181,
|
||||
'middot': 183,
|
||||
'minus': 8722,
|
||||
'mu': 956,
|
||||
'nabla': 8711,
|
||||
'nbsp': 160,
|
||||
'ndash': 8211,
|
||||
'ne': 8800,
|
||||
'ni': 8715,
|
||||
'not': 172,
|
||||
'notin': 8713,
|
||||
'nsub': 8836,
|
||||
'ntilde': 241,
|
||||
'nu': 957,
|
||||
'oacute': 243,
|
||||
'ocirc': 244,
|
||||
'oelig': 339,
|
||||
'ograve': 242,
|
||||
'oline': 8254,
|
||||
'omega': 969,
|
||||
'omicron': 959,
|
||||
'oplus': 8853,
|
||||
'or': 8744,
|
||||
'ordf': 170,
|
||||
'ordm': 186,
|
||||
'oslash': 248,
|
||||
'otilde': 245,
|
||||
'otimes': 8855,
|
||||
'ouml': 246,
|
||||
'para': 182,
|
||||
'part': 8706,
|
||||
'permil': 8240,
|
||||
'perp': 8869,
|
||||
'phi': 966,
|
||||
'pi': 960,
|
||||
'piv': 982,
|
||||
'plusmn': 177,
|
||||
'pound': 163,
|
||||
'prime': 8242,
|
||||
'prod': 8719,
|
||||
'prop': 8733,
|
||||
'psi': 968,
|
||||
'quot': 34,
|
||||
'rArr': 8658,
|
||||
'radic': 8730,
|
||||
'rang': 9002,
|
||||
'raquo': 187,
|
||||
'rarr': 8594,
|
||||
'rceil': 8969,
|
||||
'rdquo': 8221,
|
||||
'real': 8476,
|
||||
'reg': 174,
|
||||
'rfloor': 8971,
|
||||
'rho': 961,
|
||||
'rlm': 8207,
|
||||
'rsaquo': 8250,
|
||||
'rsquo': 8217,
|
||||
'sbquo': 8218,
|
||||
'scaron': 353,
|
||||
'sdot': 8901,
|
||||
'sect': 167,
|
||||
'shy': 173,
|
||||
'sigma': 963,
|
||||
'sigmaf': 962,
|
||||
'sim': 8764,
|
||||
'spades': 9824,
|
||||
'sub': 8834,
|
||||
'sube': 8838,
|
||||
'sum': 8721,
|
||||
'sup': 8835,
|
||||
'sup1': 185,
|
||||
'sup2': 178,
|
||||
'sup3': 179,
|
||||
'supe': 8839,
|
||||
'szlig': 223,
|
||||
'tau': 964,
|
||||
'there4': 8756,
|
||||
'theta': 952,
|
||||
'thetasym': 977,
|
||||
'thinsp': 8201,
|
||||
'thorn': 254,
|
||||
'tilde': 732,
|
||||
'times': 215,
|
||||
'trade': 8482,
|
||||
'uArr': 8657,
|
||||
'uacute': 250,
|
||||
'uarr': 8593,
|
||||
'ucirc': 251,
|
||||
'ugrave': 249,
|
||||
'uml': 168,
|
||||
'upsih': 978,
|
||||
'upsilon': 965,
|
||||
'uuml': 252,
|
||||
'weierp': 8472,
|
||||
'xi': 958,
|
||||
'yacute': 253,
|
||||
'yen': 165,
|
||||
'yuml': 255,
|
||||
'zeta': 950,
|
||||
'zwj': 8205,
|
||||
'zwnj': 8204
|
||||
}
|
@ -8,7 +8,7 @@
|
||||
:copyright: (c) 2010 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from jinja2._markupsafe import Markup
|
||||
from _compat import text_type
|
||||
|
||||
|
||||
def escape(s):
|
||||
@ -18,7 +18,7 @@ def escape(s):
|
||||
"""
|
||||
if hasattr(s, '__html__'):
|
||||
return s.__html__()
|
||||
return Markup(unicode(s)
|
||||
return Markup(text_type(s)
|
||||
.replace('&', '&')
|
||||
.replace('>', '>')
|
||||
.replace('<', '<')
|
||||
@ -40,6 +40,6 @@ def soft_unicode(s):
|
||||
"""Make a string unicode if it isn't already. That way a markup
|
||||
string is not converted back to unicode.
|
||||
"""
|
||||
if not isinstance(s, unicode):
|
||||
s = unicode(s)
|
||||
if not isinstance(s, text_type):
|
||||
s = text_type(s)
|
||||
return s
|
@ -12,14 +12,16 @@
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import types
|
||||
import operator
|
||||
from itertools import chain, izip
|
||||
|
||||
from collections import deque
|
||||
from jinja2.utils import Markup, MethodType, FunctionType
|
||||
from jinja2.utils import Markup
|
||||
from jinja2._compat import izip, with_metaclass, text_type
|
||||
|
||||
|
||||
#: the types we support for context functions
|
||||
_context_function_types = (FunctionType, MethodType)
|
||||
_context_function_types = (types.FunctionType, types.MethodType)
|
||||
|
||||
|
||||
_binop_to_func = {
|
||||
@ -102,9 +104,9 @@ def get_eval_context(node, ctx):
|
||||
return ctx
|
||||
|
||||
|
||||
class Node(object):
|
||||
class Node(with_metaclass(NodeType, object)):
|
||||
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
|
||||
of different types. There are three major types:
|
||||
of different types. There are four major types:
|
||||
|
||||
- :class:`Stmt`: statements
|
||||
- :class:`Expr`: expressions
|
||||
@ -118,7 +120,6 @@ class Node(object):
|
||||
The `environment` attribute is set at the end of the parsing process for
|
||||
all nodes automatically.
|
||||
"""
|
||||
__metaclass__ = NodeType
|
||||
fields = ()
|
||||
attributes = ('lineno', 'environment')
|
||||
abstract = True
|
||||
@ -142,7 +143,7 @@ class Node(object):
|
||||
setattr(self, attr, attributes.pop(attr, None))
|
||||
if attributes:
|
||||
raise TypeError('unknown attribute %r' %
|
||||
iter(attributes).next())
|
||||
next(iter(attributes)))
|
||||
|
||||
def iter_fields(self, exclude=None, only=None):
|
||||
"""This method iterates over all fields that are defined and yields
|
||||
@ -440,7 +441,7 @@ class Const(Literal):
|
||||
constant value in the generated code, otherwise it will raise
|
||||
an `Impossible` exception.
|
||||
"""
|
||||
from compiler import has_safe_repr
|
||||
from .compiler import has_safe_repr
|
||||
if not has_safe_repr(value):
|
||||
raise Impossible()
|
||||
return cls(value, lineno=lineno, environment=environment)
|
||||
@ -687,7 +688,7 @@ class Concat(Expr):
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
return ''.join(unicode(x.as_const(eval_ctx)) for x in self.nodes)
|
||||
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
|
||||
|
||||
|
||||
class Compare(Expr):
|
||||
|
@ -10,8 +10,8 @@
|
||||
"""
|
||||
from jinja2 import nodes
|
||||
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
|
||||
from jinja2.utils import next
|
||||
from jinja2.lexer import describe_token, describe_token_expr
|
||||
from jinja2._compat import imap
|
||||
|
||||
|
||||
#: statements that callinto
|
||||
@ -53,7 +53,7 @@ class Parser(object):
|
||||
def _fail_ut_eof(self, name, end_token_stack, lineno):
|
||||
expected = []
|
||||
for exprs in end_token_stack:
|
||||
expected.extend(map(describe_token_expr, exprs))
|
||||
expected.extend(imap(describe_token_expr, exprs))
|
||||
if end_token_stack:
|
||||
currently_looking = ' or '.join(
|
||||
"'%s'" % describe_token_expr(expr)
|
||||
|
@ -8,12 +8,14 @@
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD.
|
||||
"""
|
||||
from itertools import chain, imap
|
||||
from itertools import chain
|
||||
from jinja2.nodes import EvalContext, _context_function_types
|
||||
from jinja2.utils import Markup, partial, soft_unicode, escape, missing, \
|
||||
concat, internalcode, next, object_type_repr
|
||||
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
|
||||
internalcode, object_type_repr
|
||||
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
|
||||
TemplateNotFound
|
||||
from jinja2._compat import imap, text_type, iteritems, \
|
||||
implements_iterator, implements_to_string, string_types, PY2
|
||||
|
||||
|
||||
# these variables are exported to the template runtime
|
||||
@ -23,9 +25,8 @@ __all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
|
||||
'TemplateNotFound']
|
||||
|
||||
#: the name of the function that is used to convert something into
|
||||
#: a string. 2to3 will adopt that automatically and the generated
|
||||
#: code can take advantage of it.
|
||||
to_string = unicode
|
||||
#: a string. We can just use the text type here.
|
||||
to_string = text_type
|
||||
|
||||
#: the identity function. Useful for certain things in the environment
|
||||
identity = lambda x: x
|
||||
@ -46,7 +47,7 @@ def markup_join(seq):
|
||||
|
||||
def unicode_join(seq):
|
||||
"""Simple args to unicode conversion and concatenation."""
|
||||
return concat(imap(unicode, seq))
|
||||
return concat(imap(text_type, seq))
|
||||
|
||||
|
||||
def new_context(environment, template_name, blocks, vars=None,
|
||||
@ -63,7 +64,7 @@ def new_context(environment, template_name, blocks, vars=None,
|
||||
# we don't want to modify the dict passed
|
||||
if shared:
|
||||
parent = dict(parent)
|
||||
for key, value in locals.iteritems():
|
||||
for key, value in iteritems(locals):
|
||||
if key[:2] == 'l_' and value is not missing:
|
||||
parent[key[2:]] = value
|
||||
return Context(environment, parent, template_name, blocks)
|
||||
@ -119,7 +120,7 @@ class Context(object):
|
||||
# create the initial mapping of blocks. Whenever template inheritance
|
||||
# takes place the runtime will update this mapping with the new blocks
|
||||
# from the template.
|
||||
self.blocks = dict((k, [v]) for k, v in blocks.iteritems())
|
||||
self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
|
||||
|
||||
def super(self, name, current):
|
||||
"""Render a parent block."""
|
||||
@ -171,6 +172,16 @@ class Context(object):
|
||||
"""
|
||||
if __debug__:
|
||||
__traceback_hide__ = True
|
||||
|
||||
# Allow callable classes to take a context
|
||||
fn = __obj.__call__
|
||||
for fn_type in ('contextfunction',
|
||||
'evalcontextfunction',
|
||||
'environmentfunction'):
|
||||
if hasattr(fn, fn_type):
|
||||
__obj = fn
|
||||
break
|
||||
|
||||
if isinstance(__obj, _context_function_types):
|
||||
if getattr(__obj, 'contextfunction', 0):
|
||||
args = (__self,) + args
|
||||
@ -191,7 +202,7 @@ class Context(object):
|
||||
self.parent, True, None, locals)
|
||||
context.vars.update(self.vars)
|
||||
context.eval_ctx = self.eval_ctx
|
||||
context.blocks.update((k, list(v)) for k, v in self.blocks.iteritems())
|
||||
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
|
||||
return context
|
||||
|
||||
def _all(meth):
|
||||
@ -205,7 +216,7 @@ class Context(object):
|
||||
items = _all('items')
|
||||
|
||||
# not available on python 3
|
||||
if hasattr(dict, 'iterkeys'):
|
||||
if PY2:
|
||||
iterkeys = _all('iterkeys')
|
||||
itervalues = _all('itervalues')
|
||||
iteritems = _all('iteritems')
|
||||
@ -269,11 +280,12 @@ class BlockReference(object):
|
||||
class LoopContext(object):
|
||||
"""A loop context for dynamic iteration."""
|
||||
|
||||
def __init__(self, iterable, recurse=None):
|
||||
def __init__(self, iterable, recurse=None, depth0=0):
|
||||
self._iterator = iter(iterable)
|
||||
self._recurse = recurse
|
||||
self._after = self._safe_next()
|
||||
self.index0 = -1
|
||||
self.depth0 = depth0
|
||||
|
||||
# try to get the length of the iterable early. This must be done
|
||||
# here because there are some broken iterators around where there
|
||||
@ -295,6 +307,7 @@ class LoopContext(object):
|
||||
index = property(lambda x: x.index0 + 1)
|
||||
revindex = property(lambda x: x.length - x.index0)
|
||||
revindex0 = property(lambda x: x.length - x.index)
|
||||
depth = property(lambda x: x.depth0 + 1)
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
@ -313,7 +326,7 @@ class LoopContext(object):
|
||||
if self._recurse is None:
|
||||
raise TypeError('Tried to call non recursive loop. Maybe you '
|
||||
"forgot the 'recursive' modifier.")
|
||||
return self._recurse(iterable, self._recurse)
|
||||
return self._recurse(iterable, self._recurse, self.depth0 + 1)
|
||||
|
||||
# a nifty trick to enhance the error message if someone tried to call
|
||||
# the the loop without or with too many arguments.
|
||||
@ -340,6 +353,7 @@ class LoopContext(object):
|
||||
)
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class LoopContextIterator(object):
|
||||
"""The iterator for a loop context."""
|
||||
__slots__ = ('context',)
|
||||
@ -350,7 +364,7 @@ class LoopContextIterator(object):
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
def __next__(self):
|
||||
ctx = self.context
|
||||
ctx.index0 += 1
|
||||
if ctx._after is _last_iteration:
|
||||
@ -424,6 +438,7 @@ class Macro(object):
|
||||
)
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class Undefined(object):
|
||||
"""The default undefined type. This undefined type can be printed and
|
||||
iterated over, but every other access will raise an :exc:`UndefinedError`:
|
||||
@ -455,7 +470,7 @@ class Undefined(object):
|
||||
if self._undefined_hint is None:
|
||||
if self._undefined_obj is missing:
|
||||
hint = '%r is undefined' % self._undefined_name
|
||||
elif not isinstance(self._undefined_name, basestring):
|
||||
elif not isinstance(self._undefined_name, string_types):
|
||||
hint = '%s has no element %r' % (
|
||||
object_type_repr(self._undefined_obj),
|
||||
self._undefined_name
|
||||
@ -483,13 +498,6 @@ class Undefined(object):
|
||||
_fail_with_undefined_error
|
||||
|
||||
def __str__(self):
|
||||
return unicode(self).encode('utf-8')
|
||||
|
||||
# unicode goes after __str__ because we configured 2to3 to rename
|
||||
# __unicode__ to __str__. because the 2to3 tree is not designed to
|
||||
# remove nodes from it, we leave the above __str__ around and let
|
||||
# it override at runtime.
|
||||
def __unicode__(self):
|
||||
return u''
|
||||
|
||||
def __len__(self):
|
||||
@ -506,6 +514,7 @@ class Undefined(object):
|
||||
return 'Undefined'
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class DebugUndefined(Undefined):
|
||||
"""An undefined that returns the debug info when printed.
|
||||
|
||||
@ -521,7 +530,7 @@ class DebugUndefined(Undefined):
|
||||
"""
|
||||
__slots__ = ()
|
||||
|
||||
def __unicode__(self):
|
||||
def __str__(self):
|
||||
if self._undefined_hint is None:
|
||||
if self._undefined_obj is missing:
|
||||
return u'{{ %s }}' % self._undefined_name
|
||||
@ -532,6 +541,7 @@ class DebugUndefined(Undefined):
|
||||
return u'{{ undefined value printed: %s }}' % self._undefined_hint
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class StrictUndefined(Undefined):
|
||||
"""An undefined that barks on print and iteration as well as boolean
|
||||
tests and all kinds of comparisons. In other words: you can do nothing
|
||||
@ -552,7 +562,7 @@ class StrictUndefined(Undefined):
|
||||
UndefinedError: 'foo' is undefined
|
||||
"""
|
||||
__slots__ = ()
|
||||
__iter__ = __unicode__ = __str__ = __len__ = __nonzero__ = __eq__ = \
|
||||
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
|
||||
__ne__ = __bool__ = Undefined._fail_with_undefined_error
|
||||
|
||||
|
||||
|
@ -9,25 +9,18 @@
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
import sys
|
||||
import errno
|
||||
try:
|
||||
from urllib.parse import quote_from_bytes as url_quote
|
||||
except ImportError:
|
||||
from urllib import quote as url_quote
|
||||
try:
|
||||
from thread import allocate_lock
|
||||
except ImportError:
|
||||
from dummy_thread import allocate_lock
|
||||
from collections import deque
|
||||
from itertools import imap
|
||||
from threading import Lock
|
||||
from jinja2._compat import text_type, string_types, implements_iterator, \
|
||||
url_quote
|
||||
|
||||
|
||||
_word_split_re = re.compile(r'(\s+)')
|
||||
_punctuation_re = re.compile(
|
||||
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
|
||||
'|'.join(imap(re.escape, ('(', '<', '<'))),
|
||||
'|'.join(imap(re.escape, ('.', ',', ')', '>', '\n', '>')))
|
||||
'|'.join(map(re.escape, ('(', '<', '<'))),
|
||||
'|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '>')))
|
||||
)
|
||||
)
|
||||
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
|
||||
@ -42,77 +35,7 @@ missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
|
||||
# internal code
|
||||
internal_code = set()
|
||||
|
||||
|
||||
# concatenate a list of strings and convert them to unicode.
|
||||
# unfortunately there is a bug in python 2.4 and lower that causes
|
||||
# unicode.join trash the traceback.
|
||||
_concat = u''.join
|
||||
try:
|
||||
def _test_gen_bug():
|
||||
raise TypeError(_test_gen_bug)
|
||||
yield None
|
||||
_concat(_test_gen_bug())
|
||||
except TypeError, _error:
|
||||
if not _error.args or _error.args[0] is not _test_gen_bug:
|
||||
def concat(gen):
|
||||
try:
|
||||
return _concat(list(gen))
|
||||
except Exception:
|
||||
# this hack is needed so that the current frame
|
||||
# does not show up in the traceback.
|
||||
exc_type, exc_value, tb = sys.exc_info()
|
||||
raise exc_type, exc_value, tb.tb_next
|
||||
else:
|
||||
concat = _concat
|
||||
del _test_gen_bug, _error
|
||||
|
||||
|
||||
# for python 2.x we create ourselves a next() function that does the
|
||||
# basics without exception catching.
|
||||
try:
|
||||
next = next
|
||||
except NameError:
|
||||
def next(x):
|
||||
return x.next()
|
||||
|
||||
|
||||
# if this python version is unable to deal with unicode filenames
|
||||
# when passed to encode we let this function encode it properly.
|
||||
# This is used in a couple of places. As far as Jinja is concerned
|
||||
# filenames are unicode *or* bytestrings in 2.x and unicode only in
|
||||
# 3.x because compile cannot handle bytes
|
||||
if sys.version_info < (3, 0):
|
||||
def _encode_filename(filename):
|
||||
if isinstance(filename, unicode):
|
||||
return filename.encode('utf-8')
|
||||
return filename
|
||||
else:
|
||||
def _encode_filename(filename):
|
||||
assert filename is None or isinstance(filename, str), \
|
||||
'filenames must be strings'
|
||||
return filename
|
||||
|
||||
from keyword import iskeyword as is_python_keyword
|
||||
|
||||
|
||||
# common types. These do exist in the special types module too which however
|
||||
# does not exist in IronPython out of the box. Also that way we don't have
|
||||
# to deal with implementation specific stuff here
|
||||
class _C(object):
|
||||
def method(self): pass
|
||||
def _func():
|
||||
yield None
|
||||
FunctionType = type(_func)
|
||||
GeneratorType = type(_func())
|
||||
MethodType = type(_C.method)
|
||||
CodeType = type(_C.method.func_code)
|
||||
try:
|
||||
raise TypeError()
|
||||
except TypeError:
|
||||
_tb = sys.exc_info()[2]
|
||||
TracebackType = type(_tb)
|
||||
FrameType = type(_tb.tb_frame)
|
||||
del _C, _tb, _func
|
||||
concat = u''.join
|
||||
|
||||
|
||||
def contextfunction(f):
|
||||
@ -156,7 +79,7 @@ def environmentfunction(f):
|
||||
|
||||
def internalcode(f):
|
||||
"""Marks the function as internally used"""
|
||||
internal_code.add(f.func_code)
|
||||
internal_code.add(f.__code__)
|
||||
return f
|
||||
|
||||
|
||||
@ -226,7 +149,7 @@ def open_if_exists(filename, mode='rb'):
|
||||
"""
|
||||
try:
|
||||
return open(filename, mode)
|
||||
except IOError, e:
|
||||
except IOError as e:
|
||||
if e.errno not in (errno.ENOENT, errno.EISDIR):
|
||||
raise
|
||||
|
||||
@ -275,7 +198,7 @@ def urlize(text, trim_url_limit=None, nofollow=False):
|
||||
trim_url = lambda x, limit=trim_url_limit: limit is not None \
|
||||
and (x[:limit] + (len(x) >=limit and '...'
|
||||
or '')) or x
|
||||
words = _word_split_re.split(unicode(escape(text)))
|
||||
words = _word_split_re.split(text_type(escape(text)))
|
||||
nofollow_attr = nofollow and ' rel="nofollow"' or ''
|
||||
for i, word in enumerate(words):
|
||||
match = _punctuation_re.match(word)
|
||||
@ -284,6 +207,7 @@ def urlize(text, trim_url_limit=None, nofollow=False):
|
||||
if middle.startswith('www.') or (
|
||||
'@' not in middle and
|
||||
not middle.startswith('http://') and
|
||||
not middle.startswith('https://') and
|
||||
len(middle) > 0 and
|
||||
middle[0] in _letters + _digits and (
|
||||
middle.endswith('.org') or
|
||||
@ -311,7 +235,7 @@ def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
|
||||
words = LOREM_IPSUM_WORDS.split()
|
||||
result = []
|
||||
|
||||
for _ in xrange(n):
|
||||
for _ in range(n):
|
||||
next_capitalized = True
|
||||
last_comma = last_fullstop = 0
|
||||
word = None
|
||||
@ -319,7 +243,7 @@ def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
|
||||
p = []
|
||||
|
||||
# each paragraph contains out of 20 to 100 words.
|
||||
for idx, _ in enumerate(xrange(randrange(min, max))):
|
||||
for idx, _ in enumerate(range(randrange(min, max))):
|
||||
while True:
|
||||
word = choice(words)
|
||||
if word != last:
|
||||
@ -361,11 +285,11 @@ def unicode_urlencode(obj, charset='utf-8'):
|
||||
If non strings are provided they are converted to their unicode
|
||||
representation first.
|
||||
"""
|
||||
if not isinstance(obj, basestring):
|
||||
obj = unicode(obj)
|
||||
if isinstance(obj, unicode):
|
||||
if not isinstance(obj, string_types):
|
||||
obj = text_type(obj)
|
||||
if isinstance(obj, text_type):
|
||||
obj = obj.encode(charset)
|
||||
return unicode(url_quote(obj))
|
||||
return text_type(url_quote(obj))
|
||||
|
||||
|
||||
class LRUCache(object):
|
||||
@ -385,18 +309,10 @@ class LRUCache(object):
|
||||
# alias all queue methods for faster lookup
|
||||
self._popleft = self._queue.popleft
|
||||
self._pop = self._queue.pop
|
||||
if hasattr(self._queue, 'remove'):
|
||||
self._remove = self._queue.remove
|
||||
self._wlock = allocate_lock()
|
||||
self._wlock = Lock()
|
||||
self._append = self._queue.append
|
||||
|
||||
def _remove(self, obj):
|
||||
"""Python 2.4 compatibility."""
|
||||
for idx, item in enumerate(self._queue):
|
||||
if item == obj:
|
||||
del self._queue[idx]
|
||||
break
|
||||
|
||||
def __getstate__(self):
|
||||
return {
|
||||
'capacity': self.capacity,
|
||||
@ -429,11 +345,15 @@ class LRUCache(object):
|
||||
"""Set `default` if the key is not in the cache otherwise
|
||||
leave unchanged. Return the value of this key.
|
||||
"""
|
||||
self._wlock.acquire()
|
||||
try:
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
self[key] = default
|
||||
return default
|
||||
finally:
|
||||
self._wlock.release()
|
||||
|
||||
def clear(self):
|
||||
"""Clear the cache."""
|
||||
@ -464,6 +384,8 @@ class LRUCache(object):
|
||||
|
||||
Raise a `KeyError` if it does not exist.
|
||||
"""
|
||||
self._wlock.acquire()
|
||||
try:
|
||||
rv = self._mapping[key]
|
||||
if self._queue[-1] != key:
|
||||
try:
|
||||
@ -475,6 +397,8 @@ class LRUCache(object):
|
||||
pass
|
||||
self._append(key)
|
||||
return rv
|
||||
finally:
|
||||
self._wlock.release()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Sets the value for an item. Moves the item up so that it
|
||||
@ -483,11 +407,7 @@ class LRUCache(object):
|
||||
self._wlock.acquire()
|
||||
try:
|
||||
if key in self._mapping:
|
||||
try:
|
||||
self._remove(key)
|
||||
except ValueError:
|
||||
# __getitem__ is not locked, it might happen
|
||||
pass
|
||||
elif len(self._mapping) == self.capacity:
|
||||
del self._mapping[self._popleft()]
|
||||
self._append(key)
|
||||
@ -557,6 +477,7 @@ except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class Cycler(object):
|
||||
"""A cycle helper for templates."""
|
||||
|
||||
@ -575,7 +496,7 @@ class Cycler(object):
|
||||
"""Returns the current item."""
|
||||
return self.items[self.pos]
|
||||
|
||||
def next(self):
|
||||
def __next__(self):
|
||||
"""Goes one item ahead and returns it."""
|
||||
rv = self.current
|
||||
self.pos = (self.pos + 1) % len(self.items)
|
||||
@ -596,25 +517,5 @@ class Joiner(object):
|
||||
return self.sep
|
||||
|
||||
|
||||
# try markupsafe first, if that fails go with Jinja2's bundled version
|
||||
# of markupsafe. Markupsafe was previously Jinja2's implementation of
|
||||
# the Markup object but was moved into a separate package in a patchlevel
|
||||
# release
|
||||
try:
|
||||
from markupsafe import Markup, escape, soft_unicode
|
||||
except ImportError:
|
||||
from jinja2._markupsafe import Markup, escape, soft_unicode
|
||||
|
||||
|
||||
# partials
|
||||
try:
|
||||
from functools import partial
|
||||
except ImportError:
|
||||
class partial(object):
|
||||
def __init__(self, _func, *args, **kwargs):
|
||||
self._func = _func
|
||||
self._args = args
|
||||
self._kwargs = kwargs
|
||||
def __call__(self, *args, **kwargs):
|
||||
kwargs.update(self._kwargs)
|
||||
return self._func(*(self._args + args), **kwargs)
|
||||
# Imported here because that's where it was in the past
|
||||
from markupsafe import Markup, escape, soft_unicode
|
||||
|
Loading…
x
Reference in New Issue
Block a user