aboutsummaryrefslogtreecommitdiffstats
path: root/python/jinja2
diff options
context:
space:
mode:
Diffstat (limited to 'python/jinja2')
-rw-r--r--python/jinja2/__init__.py83
-rw-r--r--python/jinja2/_compat.py99
-rw-r--r--python/jinja2/_identifier.py2
-rw-r--r--python/jinja2/asyncfilters.py146
-rw-r--r--python/jinja2/asyncsupport.py256
-rw-r--r--python/jinja2/bccache.py362
-rw-r--r--python/jinja2/compiler.py1721
-rw-r--r--python/jinja2/constants.py32
-rw-r--r--python/jinja2/debug.py372
-rw-r--r--python/jinja2/defaults.py56
-rw-r--r--python/jinja2/environment.py1276
-rw-r--r--python/jinja2/exceptions.py146
-rw-r--r--python/jinja2/ext.py627
-rw-r--r--python/jinja2/filters.py1190
-rw-r--r--python/jinja2/idtracking.py286
-rw-r--r--python/jinja2/lexer.py739
-rw-r--r--python/jinja2/loaders.py481
-rw-r--r--python/jinja2/meta.py106
-rw-r--r--python/jinja2/nativetypes.py220
-rw-r--r--python/jinja2/nodes.py999
-rw-r--r--python/jinja2/optimizer.py49
-rw-r--r--python/jinja2/parser.py903
-rw-r--r--python/jinja2/runtime.py813
-rw-r--r--python/jinja2/sandbox.py486
-rw-r--r--python/jinja2/tests.py175
-rw-r--r--python/jinja2/utils.py647
-rw-r--r--python/jinja2/visitor.py87
27 files changed, 12359 insertions, 0 deletions
diff --git a/python/jinja2/__init__.py b/python/jinja2/__init__.py
new file mode 100644
index 0000000..15e13b6
--- /dev/null
+++ b/python/jinja2/__init__.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2
+ ~~~~~~
+
+ Jinja2 is a template engine written in pure Python. It provides a
+ Django inspired non-XML syntax but supports inline expressions and
+ an optional sandboxed environment.
+
+ Nutshell
+ --------
+
+ Here a small example of a Jinja2 template::
+
+ {% extends 'base.html' %}
+ {% block title %}Memberlist{% endblock %}
+ {% block content %}
+ <ul>
+ {% for user in users %}
+ <li><a href="{{ user.url }}">{{ user.username }}</a></li>
+ {% endfor %}
+ </ul>
+ {% endblock %}
+
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+__docformat__ = 'restructuredtext en'
+__version__ = '2.10.1'
+
+# high level interface
+from jinja2.environment import Environment, Template
+
+# loaders
+from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
+ DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
+ ModuleLoader
+
+# bytecode caches
+from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
+ MemcachedBytecodeCache
+
+# undefined types
+from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
+ make_logging_undefined
+
+# exceptions
+from jinja2.exceptions import TemplateError, UndefinedError, \
+ TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
+ TemplateAssertionError, TemplateRuntimeError
+
+# decorators and public utilities
+from jinja2.filters import environmentfilter, contextfilter, \
+ evalcontextfilter
+from jinja2.utils import Markup, escape, clear_caches, \
+ environmentfunction, evalcontextfunction, contextfunction, \
+ is_undefined, select_autoescape
+
+__all__ = [
+ 'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
+ 'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
+ 'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
+ 'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
+ 'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
+ 'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
+ 'TemplateRuntimeError',
+ 'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
+ 'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
+ 'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
+ 'select_autoescape',
+]
+
+
+def _patch_async():
+ from jinja2.utils import have_async_gen
+ if have_async_gen:
+ from jinja2.asyncsupport import patch_all
+ patch_all()
+
+
+_patch_async()
+del _patch_async
diff --git a/python/jinja2/_compat.py b/python/jinja2/_compat.py
new file mode 100644
index 0000000..61d8530
--- /dev/null
+++ b/python/jinja2/_compat.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2._compat
+ ~~~~~~~~~~~~~~
+
+ Some py2/py3 compatibility support based on a stripped down
+ version of six so we don't have to depend on a specific version
+ of it.
+
+ :copyright: Copyright 2013 by the Jinja team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import sys
+
+PY2 = sys.version_info[0] == 2
+PYPY = hasattr(sys, 'pypy_translation_info')
+_identity = lambda x: x
+
+
+if not PY2:
+ unichr = chr
+ range_type = range
+ text_type = str
+ string_types = (str,)
+ integer_types = (int,)
+
+ iterkeys = lambda d: iter(d.keys())
+ itervalues = lambda d: iter(d.values())
+ iteritems = lambda d: iter(d.items())
+
+ import pickle
+ from io import BytesIO, StringIO
+ NativeStringIO = StringIO
+
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+ ifilter = filter
+ imap = map
+ izip = zip
+ intern = sys.intern
+
+ implements_iterator = _identity
+ implements_to_string = _identity
+ encode_filename = _identity
+
+else:
+ unichr = unichr
+ text_type = unicode
+ range_type = xrange
+ string_types = (str, unicode)
+ integer_types = (int, long)
+
+ iterkeys = lambda d: d.iterkeys()
+ itervalues = lambda d: d.itervalues()
+ iteritems = lambda d: d.iteritems()
+
+ import cPickle as pickle
+ from cStringIO import StringIO as BytesIO, StringIO
+ NativeStringIO = BytesIO
+
+ exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
+
+ from itertools import imap, izip, ifilter
+ intern = intern
+
+ def implements_iterator(cls):
+ cls.next = cls.__next__
+ del cls.__next__
+ return cls
+
+ def implements_to_string(cls):
+ cls.__unicode__ = cls.__str__
+ cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
+ return cls
+
+ def encode_filename(filename):
+ if isinstance(filename, unicode):
+ return filename.encode('utf-8')
+ return filename
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a
+ # dummy metaclass for one level of class instantiation that replaces
+ # itself with the actual metaclass.
+ class metaclass(type):
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+try:
+ from urllib.parse import quote_from_bytes as url_quote
+except ImportError:
+ from urllib import quote as url_quote
diff --git a/python/jinja2/_identifier.py b/python/jinja2/_identifier.py
new file mode 100644
index 0000000..2eac35d
--- /dev/null
+++ b/python/jinja2/_identifier.py
@@ -0,0 +1,2 @@
+# generated by scripts/generate_identifier_pattern.py
+pattern = '·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯'
diff --git a/python/jinja2/asyncfilters.py b/python/jinja2/asyncfilters.py
new file mode 100644
index 0000000..5c1f46d
--- /dev/null
+++ b/python/jinja2/asyncfilters.py
@@ -0,0 +1,146 @@
+from functools import wraps
+
+from jinja2.asyncsupport import auto_aiter
+from jinja2 import filters
+
+
+async def auto_to_seq(value):
+ seq = []
+ if hasattr(value, '__aiter__'):
+ async for item in value:
+ seq.append(item)
+ else:
+ for item in value:
+ seq.append(item)
+ return seq
+
+
+async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
+ seq, func = filters.prepare_select_or_reject(
+ args, kwargs, modfunc, lookup_attr)
+ if seq:
+ async for item in auto_aiter(seq):
+ if func(item):
+ yield item
+
+
+def dualfilter(normal_filter, async_filter):
+ wrap_evalctx = False
+ if getattr(normal_filter, 'environmentfilter', False):
+ is_async = lambda args: args[0].is_async
+ wrap_evalctx = False
+ else:
+ if not getattr(normal_filter, 'evalcontextfilter', False) and \
+ not getattr(normal_filter, 'contextfilter', False):
+ wrap_evalctx = True
+ is_async = lambda args: args[0].environment.is_async
+
+ @wraps(normal_filter)
+ def wrapper(*args, **kwargs):
+ b = is_async(args)
+ if wrap_evalctx:
+ args = args[1:]
+ if b:
+ return async_filter(*args, **kwargs)
+ return normal_filter(*args, **kwargs)
+
+ if wrap_evalctx:
+ wrapper.evalcontextfilter = True
+
+ wrapper.asyncfiltervariant = True
+
+ return wrapper
+
+
+def asyncfiltervariant(original):
+ def decorator(f):
+ return dualfilter(original, f)
+ return decorator
+
+
+@asyncfiltervariant(filters.do_first)
+async def do_first(environment, seq):
+ try:
+ return await auto_aiter(seq).__anext__()
+ except StopAsyncIteration:
+ return environment.undefined('No first item, sequence was empty.')
+
+
+@asyncfiltervariant(filters.do_groupby)
+async def do_groupby(environment, value, attribute):
+ expr = filters.make_attrgetter(environment, attribute)
+ return [filters._GroupTuple(key, await auto_to_seq(values))
+ for key, values in filters.groupby(sorted(
+ await auto_to_seq(value), key=expr), expr)]
+
+
+@asyncfiltervariant(filters.do_join)
+async def do_join(eval_ctx, value, d=u'', attribute=None):
+ return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
+
+
+@asyncfiltervariant(filters.do_list)
+async def do_list(value):
+ return await auto_to_seq(value)
+
+
+@asyncfiltervariant(filters.do_reject)
+async def do_reject(*args, **kwargs):
+ return async_select_or_reject(args, kwargs, lambda x: not x, False)
+
+
+@asyncfiltervariant(filters.do_rejectattr)
+async def do_rejectattr(*args, **kwargs):
+ return async_select_or_reject(args, kwargs, lambda x: not x, True)
+
+
+@asyncfiltervariant(filters.do_select)
+async def do_select(*args, **kwargs):
+ return async_select_or_reject(args, kwargs, lambda x: x, False)
+
+
+@asyncfiltervariant(filters.do_selectattr)
+async def do_selectattr(*args, **kwargs):
+ return async_select_or_reject(args, kwargs, lambda x: x, True)
+
+
+@asyncfiltervariant(filters.do_map)
+async def do_map(*args, **kwargs):
+ seq, func = filters.prepare_map(args, kwargs)
+ if seq:
+ async for item in auto_aiter(seq):
+ yield func(item)
+
+
+@asyncfiltervariant(filters.do_sum)
+async def do_sum(environment, iterable, attribute=None, start=0):
+ rv = start
+ if attribute is not None:
+ func = filters.make_attrgetter(environment, attribute)
+ else:
+ func = lambda x: x
+ async for item in auto_aiter(iterable):
+ rv += func(item)
+ return rv
+
+
+@asyncfiltervariant(filters.do_slice)
+async def do_slice(value, slices, fill_with=None):
+ return filters.do_slice(await auto_to_seq(value), slices, fill_with)
+
+
+ASYNC_FILTERS = {
+ 'first': do_first,
+ 'groupby': do_groupby,
+ 'join': do_join,
+ 'list': do_list,
+ # we intentionally do not support do_last because that would be
+ # ridiculous
+ 'reject': do_reject,
+ 'rejectattr': do_rejectattr,
+ 'map': do_map,
+ 'select': do_select,
+ 'selectattr': do_selectattr,
+ 'sum': do_sum,
+ 'slice': do_slice,
+}
diff --git a/python/jinja2/asyncsupport.py b/python/jinja2/asyncsupport.py
new file mode 100644
index 0000000..b1e7b5c
--- /dev/null
+++ b/python/jinja2/asyncsupport.py
@@ -0,0 +1,256 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.asyncsupport
+ ~~~~~~~~~~~~~~~~~~~
+
+ Has all the code for async support which is implemented as a patch
+ for supported Python versions.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import sys
+import asyncio
+import inspect
+from functools import update_wrapper
+
+from jinja2.utils import concat, internalcode, Markup
+from jinja2.environment import TemplateModule
+from jinja2.runtime import LoopContextBase, _last_iteration
+
+
+async def concat_async(async_gen):
+ rv = []
+ async def collect():
+ async for event in async_gen:
+ rv.append(event)
+ await collect()
+ return concat(rv)
+
+
+async def generate_async(self, *args, **kwargs):
+ vars = dict(*args, **kwargs)
+ try:
+ async for event in self.root_render_func(self.new_context(vars)):
+ yield event
+ except Exception:
+ exc_info = sys.exc_info()
+ else:
+ return
+ yield self.environment.handle_exception(exc_info, True)
+
+
+def wrap_generate_func(original_generate):
+ def _convert_generator(self, loop, args, kwargs):
+ async_gen = self.generate_async(*args, **kwargs)
+ try:
+ while 1:
+ yield loop.run_until_complete(async_gen.__anext__())
+ except StopAsyncIteration:
+ pass
+ def generate(self, *args, **kwargs):
+ if not self.environment.is_async:
+ return original_generate(self, *args, **kwargs)
+ return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
+ return update_wrapper(generate, original_generate)
+
+
+async def render_async(self, *args, **kwargs):
+ if not self.environment.is_async:
+ raise RuntimeError('The environment was not created with async mode '
+ 'enabled.')
+
+ vars = dict(*args, **kwargs)
+ ctx = self.new_context(vars)
+
+ try:
+ return await concat_async(self.root_render_func(ctx))
+ except Exception:
+ exc_info = sys.exc_info()
+ return self.environment.handle_exception(exc_info, True)
+
+
+def wrap_render_func(original_render):
+ def render(self, *args, **kwargs):
+ if not self.environment.is_async:
+ return original_render(self, *args, **kwargs)
+ loop = asyncio.get_event_loop()
+ return loop.run_until_complete(self.render_async(*args, **kwargs))
+ return update_wrapper(render, original_render)
+
+
+def wrap_block_reference_call(original_call):
+ @internalcode
+ async def async_call(self):
+ rv = await concat_async(self._stack[self._depth](self._context))
+ if self._context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv
+
+ @internalcode
+ def __call__(self):
+ if not self._context.environment.is_async:
+ return original_call(self)
+ return async_call(self)
+
+ return update_wrapper(__call__, original_call)
+
+
+def wrap_macro_invoke(original_invoke):
+ @internalcode
+ async def async_invoke(self, arguments, autoescape):
+ rv = await self._func(*arguments)
+ if autoescape:
+ rv = Markup(rv)
+ return rv
+
+ @internalcode
+ def _invoke(self, arguments, autoescape):
+ if not self._environment.is_async:
+ return original_invoke(self, arguments, autoescape)
+ return async_invoke(self, arguments, autoescape)
+ return update_wrapper(_invoke, original_invoke)
+
+
+@internalcode
+async def get_default_module_async(self):
+ if self._module is not None:
+ return self._module
+ self._module = rv = await self.make_module_async()
+ return rv
+
+
+def wrap_default_module(original_default_module):
+ @internalcode
+ def _get_default_module(self):
+ if self.environment.is_async:
+ raise RuntimeError('Template module attribute is unavailable '
+ 'in async mode')
+ return original_default_module(self)
+ return _get_default_module
+
+
+async def make_module_async(self, vars=None, shared=False, locals=None):
+ context = self.new_context(vars, shared, locals)
+ body_stream = []
+ async for item in self.root_render_func(context):
+ body_stream.append(item)
+ return TemplateModule(self, context, body_stream)
+
+
+def patch_template():
+ from jinja2 import Template
+ Template.generate = wrap_generate_func(Template.generate)
+ Template.generate_async = update_wrapper(
+ generate_async, Template.generate_async)
+ Template.render_async = update_wrapper(
+ render_async, Template.render_async)
+ Template.render = wrap_render_func(Template.render)
+ Template._get_default_module = wrap_default_module(
+ Template._get_default_module)
+ Template._get_default_module_async = get_default_module_async
+ Template.make_module_async = update_wrapper(
+ make_module_async, Template.make_module_async)
+
+
+def patch_runtime():
+ from jinja2.runtime import BlockReference, Macro
+ BlockReference.__call__ = wrap_block_reference_call(
+ BlockReference.__call__)
+ Macro._invoke = wrap_macro_invoke(Macro._invoke)
+
+
+def patch_filters():
+ from jinja2.filters import FILTERS
+ from jinja2.asyncfilters import ASYNC_FILTERS
+ FILTERS.update(ASYNC_FILTERS)
+
+
+def patch_all():
+ patch_template()
+ patch_runtime()
+ patch_filters()
+
+
+async def auto_await(value):
+ if inspect.isawaitable(value):
+ return await value
+ return value
+
+
+async def auto_aiter(iterable):
+ if hasattr(iterable, '__aiter__'):
+ async for item in iterable:
+ yield item
+ return
+ for item in iterable:
+ yield item
+
+
+class AsyncLoopContext(LoopContextBase):
+
+ def __init__(self, async_iterator, undefined, after, length, recurse=None,
+ depth0=0):
+ LoopContextBase.__init__(self, undefined, recurse, depth0)
+ self._async_iterator = async_iterator
+ self._after = after
+ self._length = length
+
+ @property
+ def length(self):
+ if self._length is None:
+ raise TypeError('Loop length for some iterators cannot be '
+ 'lazily calculated in async mode')
+ return self._length
+
+ def __aiter__(self):
+ return AsyncLoopContextIterator(self)
+
+
+class AsyncLoopContextIterator(object):
+ __slots__ = ('context',)
+
+ def __init__(self, context):
+ self.context = context
+
+ def __aiter__(self):
+ return self
+
+ async def __anext__(self):
+ ctx = self.context
+ ctx.index0 += 1
+ if ctx._after is _last_iteration:
+ raise StopAsyncIteration()
+ ctx._before = ctx._current
+ ctx._current = ctx._after
+ try:
+ ctx._after = await ctx._async_iterator.__anext__()
+ except StopAsyncIteration:
+ ctx._after = _last_iteration
+ return ctx._current, ctx
+
+
+async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
+ # Length is more complicated and less efficient in async mode. The
+ # reason for this is that we cannot know if length will be used
+ # upfront but because length is a property we cannot lazily execute it
+ # later. This means that we need to buffer it up and measure :(
+ #
+ # We however only do this for actual iterators, not for async
+ # iterators as blocking here does not seem like the best idea in the
+ # world.
+ try:
+ length = len(iterable)
+ except (TypeError, AttributeError):
+ if not hasattr(iterable, '__aiter__'):
+ iterable = tuple(iterable)
+ length = len(iterable)
+ else:
+ length = None
+ async_iterator = auto_aiter(iterable)
+ try:
+ after = await async_iterator.__anext__()
+ except StopAsyncIteration:
+ after = _last_iteration
+ return AsyncLoopContext(async_iterator, undefined, after, length, recurse,
+ depth0)
diff --git a/python/jinja2/bccache.py b/python/jinja2/bccache.py
new file mode 100644
index 0000000..080e527
--- /dev/null
+++ b/python/jinja2/bccache.py
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.bccache
+ ~~~~~~~~~~~~~~
+
+ This module implements the bytecode cache system Jinja is optionally
+ using. This is useful if you have very complex template situations and
+ the compiliation of all those templates slow down your application too
+ much.
+
+ Situations where this is useful are often forking web applications that
+ are initialized on the first request.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
+"""
+from os import path, listdir
+import os
+import sys
+import stat
+import errno
+import marshal
+import tempfile
+import fnmatch
+from hashlib import sha1
+from jinja2.utils import open_if_exists
+from jinja2._compat import BytesIO, pickle, PY2, text_type
+
+
+# marshal works better on 3.x, one hack less required
+if not PY2:
+ marshal_dump = marshal.dump
+ marshal_load = marshal.load
+else:
+
+ def marshal_dump(code, f):
+ if isinstance(f, file):
+ marshal.dump(code, f)
+ else:
+ f.write(marshal.dumps(code))
+
+ def marshal_load(f):
+ if isinstance(f, file):
+ return marshal.load(f)
+ return marshal.loads(f.read())
+
+
+bc_version = 3
+
+# magic version used to only change with new jinja versions. With 2.6
+# we change this to also take Python version changes into account. The
+# reason for this is that Python tends to segfault if fed earlier bytecode
+# versions because someone thought it would be a good idea to reuse opcodes
+# or make Python incompatible with earlier versions.
+bc_magic = 'j2'.encode('ascii') + \
+ pickle.dumps(bc_version, 2) + \
+ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
+
+
+class Bucket(object):
+ """Buckets are used to store the bytecode for one template. It's created
+ and initialized by the bytecode cache and passed to the loading functions.
+
+ The buckets get an internal checksum from the cache assigned and use this
+ to automatically reject outdated cache material. Individual bytecode
+ cache subclasses don't have to care about cache invalidation.
+ """
+
+ def __init__(self, environment, key, checksum):
+ self.environment = environment
+ self.key = key
+ self.checksum = checksum
+ self.reset()
+
+ def reset(self):
+ """Resets the bucket (unloads the bytecode)."""
+ self.code = None
+
+ def load_bytecode(self, f):
+ """Loads bytecode from a file or file like object."""
+ # make sure the magic header is correct
+ magic = f.read(len(bc_magic))
+ if magic != bc_magic:
+ self.reset()
+ return
+ # the source code of the file changed, we need to reload
+ checksum = pickle.load(f)
+ if self.checksum != checksum:
+ self.reset()
+ return
+ # if marshal_load fails then we need to reload
+ try:
+ self.code = marshal_load(f)
+ except (EOFError, ValueError, TypeError):
+ self.reset()
+ return
+
+ def write_bytecode(self, f):
+ """Dump the bytecode into the file or file like object passed."""
+ if self.code is None:
+ raise TypeError('can\'t write empty bucket')
+ f.write(bc_magic)
+ pickle.dump(self.checksum, f, 2)
+ marshal_dump(self.code, f)
+
+ def bytecode_from_string(self, string):
+ """Load bytecode from a string."""
+ self.load_bytecode(BytesIO(string))
+
+ def bytecode_to_string(self):
+ """Return the bytecode as string."""
+ out = BytesIO()
+ self.write_bytecode(out)
+ return out.getvalue()
+
+
+class BytecodeCache(object):
+ """To implement your own bytecode cache you have to subclass this class
+ and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
+ these methods are passed a :class:`~jinja2.bccache.Bucket`.
+
+ A very basic bytecode cache that saves the bytecode on the file system::
+
+ from os import path
+
+ class MyCache(BytecodeCache):
+
+ def __init__(self, directory):
+ self.directory = directory
+
+ def load_bytecode(self, bucket):
+ filename = path.join(self.directory, bucket.key)
+ if path.exists(filename):
+ with open(filename, 'rb') as f:
+ bucket.load_bytecode(f)
+
+ def dump_bytecode(self, bucket):
+ filename = path.join(self.directory, bucket.key)
+ with open(filename, 'wb') as f:
+ bucket.write_bytecode(f)
+
+ A more advanced version of a filesystem based bytecode cache is part of
+ Jinja2.
+ """
+
+ def load_bytecode(self, bucket):
+ """Subclasses have to override this method to load bytecode into a
+ bucket. If they are not able to find code in the cache for the
+ bucket, it must not do anything.
+ """
+ raise NotImplementedError()
+
+ def dump_bytecode(self, bucket):
+ """Subclasses have to override this method to write the bytecode
+ from a bucket back to the cache. If it unable to do so it must not
+ fail silently but raise an exception.
+ """
+ raise NotImplementedError()
+
+ def clear(self):
+ """Clears the cache. This method is not used by Jinja2 but should be
+ implemented to allow applications to clear the bytecode cache used
+ by a particular environment.
+ """
+
+ def get_cache_key(self, name, filename=None):
+ """Returns the unique hash key for this template name."""
+ hash = sha1(name.encode('utf-8'))
+ if filename is not None:
+ filename = '|' + filename
+ if isinstance(filename, text_type):
+ filename = filename.encode('utf-8')
+ hash.update(filename)
+ return hash.hexdigest()
+
+ def get_source_checksum(self, source):
+ """Returns a checksum for the source."""
+ return sha1(source.encode('utf-8')).hexdigest()
+
+ def get_bucket(self, environment, name, filename, source):
+ """Return a cache bucket for the given template. All arguments are
+ mandatory but filename may be `None`.
+ """
+ key = self.get_cache_key(name, filename)
+ checksum = self.get_source_checksum(source)
+ bucket = Bucket(environment, key, checksum)
+ self.load_bytecode(bucket)
+ return bucket
+
+ def set_bucket(self, bucket):
+ """Put the bucket into the cache."""
+ self.dump_bytecode(bucket)
+
+
+class FileSystemBytecodeCache(BytecodeCache):
+ """A bytecode cache that stores bytecode on the filesystem. It accepts
+ two arguments: The directory where the cache items are stored and a
+ pattern string that is used to build the filename.
+
+ If no directory is specified a default cache directory is selected. On
+ Windows the user's temp directory is used, on UNIX systems a directory
+ is created for the user in the system temp directory.
+
+ The pattern can be used to have multiple separate caches operate on the
+ same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
+ is replaced with the cache key.
+
+ >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
+
+ This bytecode cache supports clearing of the cache using the clear method.
+ """
+
+ def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
+ if directory is None:
+ directory = self._get_default_cache_dir()
+ self.directory = directory
+ self.pattern = pattern
+
+ def _get_default_cache_dir(self):
+ def _unsafe_dir():
+ raise RuntimeError('Cannot determine safe temp directory. You '
+ 'need to explicitly provide one.')
+
+ tmpdir = tempfile.gettempdir()
+
+ # On windows the temporary directory is used specific unless
+ # explicitly forced otherwise. We can just use that.
+ if os.name == 'nt':
+ return tmpdir
+ if not hasattr(os, 'getuid'):
+ _unsafe_dir()
+
+ dirname = '_jinja2-cache-%d' % os.getuid()
+ actual_dir = os.path.join(tmpdir, dirname)
+
+ try:
+ os.mkdir(actual_dir, stat.S_IRWXU)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ try:
+ os.chmod(actual_dir, stat.S_IRWXU)
+ actual_dir_stat = os.lstat(actual_dir)
+ if actual_dir_stat.st_uid != os.getuid() \
+ or not stat.S_ISDIR(actual_dir_stat.st_mode) \
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
+ _unsafe_dir()
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ actual_dir_stat = os.lstat(actual_dir)
+ if actual_dir_stat.st_uid != os.getuid() \
+ or not stat.S_ISDIR(actual_dir_stat.st_mode) \
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
+ _unsafe_dir()
+
+ return actual_dir
+
+ def _get_cache_filename(self, bucket):
+ return path.join(self.directory, self.pattern % bucket.key)
+
+ def load_bytecode(self, bucket):
+ f = open_if_exists(self._get_cache_filename(bucket), 'rb')
+ if f is not None:
+ try:
+ bucket.load_bytecode(f)
+ finally:
+ f.close()
+
+ def dump_bytecode(self, bucket):
+ f = open(self._get_cache_filename(bucket), 'wb')
+ try:
+ bucket.write_bytecode(f)
+ finally:
+ f.close()
+
+ def clear(self):
+ # imported lazily here because google app-engine doesn't support
+ # write access on the file system and the function does not exist
+ # normally.
+ from os import remove
+ files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
+ for filename in files:
+ try:
+ remove(path.join(self.directory, filename))
+ except OSError:
+ pass
+
+
+class MemcachedBytecodeCache(BytecodeCache):
+ """This class implements a bytecode cache that uses a memcache cache for
+ storing the information. It does not enforce a specific memcache library
+ (tummy's memcache or cmemcache) but will accept any class that provides
+ the minimal interface required.
+
+ Libraries compatible with this class:
+
+ - `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
+ - `python-memcached <https://www.tummy.com/Community/software/python-memcached/>`_
+ - `cmemcache <http://gijsbert.org/cmemcache/>`_
+
+ (Unfortunately the django cache interface is not compatible because it
+ does not support storing binary data, only unicode. You can however pass
+ the underlying cache client to the bytecode cache which is available
+ as `django.core.cache.cache._client`.)
+
+ The minimal interface for the client passed to the constructor is this:
+
+ .. class:: MinimalClientInterface
+
+ .. method:: set(key, value[, timeout])
+
+ Stores the bytecode in the cache. `value` is a string and
+ `timeout` the timeout of the key. If timeout is not provided
+ a default timeout or no timeout should be assumed, if it's
+ provided it's an integer with the number of seconds the cache
+ item should exist.
+
+ .. method:: get(key)
+
+ Returns the value for the cache key. If the item does not
+ exist in the cache the return value must be `None`.
+
+ The other arguments to the constructor are the prefix for all keys that
+ is added before the actual cache key and the timeout for the bytecode in
+ the cache system. We recommend a high (or no) timeout.
+
+ This bytecode cache does not support clearing of used items in the cache.
+ The clear method is a no-operation function.
+
+ .. versionadded:: 2.7
+ Added support for ignoring memcache errors through the
+ `ignore_memcache_errors` parameter.
+ """
+
+ def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
+ ignore_memcache_errors=True):
+ self.client = client
+ self.prefix = prefix
+ self.timeout = timeout
+ self.ignore_memcache_errors = ignore_memcache_errors
+
+ def load_bytecode(self, bucket):
+ try:
+ code = self.client.get(self.prefix + bucket.key)
+ except Exception:
+ if not self.ignore_memcache_errors:
+ raise
+ code = None
+ if code is not None:
+ bucket.bytecode_from_string(code)
+
+ def dump_bytecode(self, bucket):
+ args = (self.prefix + bucket.key, bucket.bytecode_to_string())
+ if self.timeout is not None:
+ args += (self.timeout,)
+ try:
+ self.client.set(*args)
+ except Exception:
+ if not self.ignore_memcache_errors:
+ raise
diff --git a/python/jinja2/compiler.py b/python/jinja2/compiler.py
new file mode 100644
index 0000000..d534a82
--- /dev/null
+++ b/python/jinja2/compiler.py
@@ -0,0 +1,1721 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.compiler
+ ~~~~~~~~~~~~~~~
+
+ Compiles nodes into python code.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+from itertools import chain
+from copy import deepcopy
+from keyword import iskeyword as is_python_keyword
+from functools import update_wrapper
+from jinja2 import nodes
+from jinja2.nodes import EvalContext
+from jinja2.visitor import NodeVisitor
+from jinja2.optimizer import Optimizer
+from jinja2.exceptions import TemplateAssertionError
+from jinja2.utils import Markup, concat, escape
+from jinja2._compat import range_type, text_type, string_types, \
+ iteritems, NativeStringIO, imap, izip
+from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \
+ VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED
+
+
+operators = {
+ 'eq': '==',
+ 'ne': '!=',
+ 'gt': '>',
+ 'gteq': '>=',
+ 'lt': '<',
+ 'lteq': '<=',
+ 'in': 'in',
+ 'notin': 'not in'
+}
+
+# what method to iterate over items do we want to use for dict iteration
+# in generated code? on 2.x let's go with iteritems, on 3.x with items
+if hasattr(dict, 'iteritems'):
+ dict_item_iter = 'iteritems'
+else:
+ dict_item_iter = 'items'
+
+code_features = ['division']
+
+# does this python version support generator stops? (PEP 0479)
+try:
+ exec('from __future__ import generator_stop')
+ code_features.append('generator_stop')
+except SyntaxError:
+ pass
+
+# does this python version support yield from?
+try:
+ exec('def f(): yield from x()')
+except SyntaxError:
+ supports_yield_from = False
+else:
+ supports_yield_from = True
+
+
+def optimizeconst(f):
+ def new_func(self, node, frame, **kwargs):
+ # Only optimize if the frame is not volatile
+ if self.optimized and not frame.eval_ctx.volatile:
+ new_node = self.optimizer.visit(node, frame.eval_ctx)
+ if new_node != node:
+ return self.visit(new_node, frame)
+ return f(self, node, frame, **kwargs)
+ return update_wrapper(new_func, f)
+
+
+def generate(node, environment, name, filename, stream=None,
+ defer_init=False, optimized=True):
+ """Generate the python source for a node tree."""
+ if not isinstance(node, nodes.Template):
+ raise TypeError('Can\'t compile non template nodes')
+ generator = environment.code_generator_class(environment, name, filename,
+ stream, defer_init,
+ optimized)
+ generator.visit(node)
+ if stream is None:
+ return generator.stream.getvalue()
+
+
+def has_safe_repr(value):
+ """Does the node have a safe representation?"""
+ if value is None or value is NotImplemented or value is Ellipsis:
+ return True
+ if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
+ return True
+ if type(value) in (tuple, list, set, frozenset):
+ for item in value:
+ if not has_safe_repr(item):
+ return False
+ return True
+ elif type(value) is dict:
+ for key, value in iteritems(value):
+ if not has_safe_repr(key):
+ return False
+ if not has_safe_repr(value):
+ return False
+ return True
+ return False
+
+
+def find_undeclared(nodes, names):
+ """Check if the names passed are accessed undeclared. The return value
+ is a set of all the undeclared names from the sequence of names found.
+ """
+ visitor = UndeclaredNameVisitor(names)
+ try:
+ for node in nodes:
+ visitor.visit(node)
+ except VisitorExit:
+ pass
+ return visitor.undeclared
+
+
+class MacroRef(object):
+
+ def __init__(self, node):
+ self.node = node
+ self.accesses_caller = False
+ self.accesses_kwargs = False
+ self.accesses_varargs = False
+
+
+class Frame(object):
+ """Holds compile time information for us."""
+
+ def __init__(self, eval_ctx, parent=None, level=None):
+ self.eval_ctx = eval_ctx
+ self.symbols = Symbols(parent and parent.symbols or None,
+ level=level)
+
+ # a toplevel frame is the root + soft frames such as if conditions.
+ self.toplevel = False
+
+ # the root frame is basically just the outermost frame, so no if
+ # conditions. This information is used to optimize inheritance
+ # situations.
+ self.rootlevel = False
+
+ # in some dynamic inheritance situations the compiler needs to add
+ # write tests around output statements.
+ self.require_output_check = parent and parent.require_output_check
+
+ # inside some tags we are using a buffer rather than yield statements.
+ # this for example affects {% filter %} or {% macro %}. If a frame
+ # is buffered this variable points to the name of the list used as
+ # buffer.
+ self.buffer = None
+
+ # the name of the block we're in, otherwise None.
+ self.block = parent and parent.block or None
+
+ # the parent of this frame
+ self.parent = parent
+
+ if parent is not None:
+ self.buffer = parent.buffer
+
+ def copy(self):
+ """Create a copy of the current one."""
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.symbols = self.symbols.copy()
+ return rv
+
+ def inner(self, isolated=False):
+ """Return an inner frame."""
+ if isolated:
+ return Frame(self.eval_ctx, level=self.symbols.level + 1)
+ return Frame(self.eval_ctx, self)
+
+ def soft(self):
+ """Return a soft frame. A soft frame may not be modified as
+ standalone thing as it shares the resources with the frame it
+ was created of, but it's not a rootlevel frame any longer.
+
+ This is only used to implement if-statements.
+ """
+ rv = self.copy()
+ rv.rootlevel = False
+ return rv
+
+ __copy__ = copy
+
+
+class VisitorExit(RuntimeError):
+ """Exception used by the `UndeclaredNameVisitor` to signal a stop."""
+
+
+class DependencyFinderVisitor(NodeVisitor):
+ """A visitor that collects filter and test calls."""
+
+ def __init__(self):
+ self.filters = set()
+ self.tests = set()
+
+ def visit_Filter(self, node):
+ self.generic_visit(node)
+ self.filters.add(node.name)
+
+ def visit_Test(self, node):
+ self.generic_visit(node)
+ self.tests.add(node.name)
+
+ def visit_Block(self, node):
+ """Stop visiting at blocks."""
+
+
+class UndeclaredNameVisitor(NodeVisitor):
+ """A visitor that checks if a name is accessed without being
+ declared. This is different from the frame visitor as it will
+ not stop at closure frames.
+ """
+
+ def __init__(self, names):
+ self.names = set(names)
+ self.undeclared = set()
+
+ def visit_Name(self, node):
+ if node.ctx == 'load' and node.name in self.names:
+ self.undeclared.add(node.name)
+ if self.undeclared == self.names:
+ raise VisitorExit()
+ else:
+ self.names.discard(node.name)
+
+ def visit_Block(self, node):
+ """Stop visiting a blocks."""
+
+
+class CompilerExit(Exception):
+ """Raised if the compiler encountered a situation where it just
+ doesn't make sense to further process the code. Any block that
+ raises such an exception is not further processed.
+ """
+
+
+class CodeGenerator(NodeVisitor):
+
+ def __init__(self, environment, name, filename, stream=None,
+ defer_init=False, optimized=True):
+ if stream is None:
+ stream = NativeStringIO()
+ self.environment = environment
+ self.name = name
+ self.filename = filename
+ self.stream = stream
+ self.created_block_context = False
+ self.defer_init = defer_init
+ self.optimized = optimized
+ if optimized:
+ self.optimizer = Optimizer(environment)
+
+ # aliases for imports
+ self.import_aliases = {}
+
+ # a registry for all blocks. Because blocks are moved out
+ # into the global python scope they are registered here
+ self.blocks = {}
+
+ # the number of extends statements so far
+ self.extends_so_far = 0
+
+ # some templates have a rootlevel extends. In this case we
+ # can safely assume that we're a child template and do some
+ # more optimizations.
+ self.has_known_extends = False
+
+ # the current line number
+ self.code_lineno = 1
+
+ # registry of all filters and tests (global, not block local)
+ self.tests = {}
+ self.filters = {}
+
+ # the debug information
+ self.debug_info = []
+ self._write_debug_info = None
+
+ # the number of new lines before the next write()
+ self._new_lines = 0
+
+ # the line number of the last written statement
+ self._last_line = 0
+
+ # true if nothing was written so far.
+ self._first_write = True
+
+ # used by the `temporary_identifier` method to get new
+ # unique, temporary identifier
+ self._last_identifier = 0
+
+ # the current indentation
+ self._indentation = 0
+
+ # Tracks toplevel assignments
+ self._assign_stack = []
+
+ # Tracks parameter definition blocks
+ self._param_def_block = []
+
+ # Tracks the current context.
+ self._context_reference_stack = ['context']
+
+ # -- Various compilation helpers
+
+ def fail(self, msg, lineno):
+ """Fail with a :exc:`TemplateAssertionError`."""
+ raise TemplateAssertionError(msg, lineno, self.name, self.filename)
+
+ def temporary_identifier(self):
+ """Get a new unique identifier."""
+ self._last_identifier += 1
+ return 't_%d' % self._last_identifier
+
+ def buffer(self, frame):
+ """Enable buffering for the frame from that point onwards."""
+ frame.buffer = self.temporary_identifier()
+ self.writeline('%s = []' % frame.buffer)
+
+ def return_buffer_contents(self, frame, force_unescaped=False):
+ """Return the buffer contents of the frame."""
+ if not force_unescaped:
+ if frame.eval_ctx.volatile:
+ self.writeline('if context.eval_ctx.autoescape:')
+ self.indent()
+ self.writeline('return Markup(concat(%s))' % frame.buffer)
+ self.outdent()
+ self.writeline('else:')
+ self.indent()
+ self.writeline('return concat(%s)' % frame.buffer)
+ self.outdent()
+ return
+ elif frame.eval_ctx.autoescape:
+ self.writeline('return Markup(concat(%s))' % frame.buffer)
+ return
+ self.writeline('return concat(%s)' % frame.buffer)
+
+ def indent(self):
+ """Indent by one."""
+ self._indentation += 1
+
+ def outdent(self, step=1):
+ """Outdent by step."""
+ self._indentation -= step
+
+ def start_write(self, frame, node=None):
+ """Yield or write into the frame buffer."""
+ if frame.buffer is None:
+ self.writeline('yield ', node)
+ else:
+ self.writeline('%s.append(' % frame.buffer, node)
+
+ def end_write(self, frame):
+ """End the writing process started by `start_write`."""
+ if frame.buffer is not None:
+ self.write(')')
+
+ def simple_write(self, s, frame, node=None):
+ """Simple shortcut for start_write + write + end_write."""
+ self.start_write(frame, node)
+ self.write(s)
+ self.end_write(frame)
+
+ def blockvisit(self, nodes, frame):
+ """Visit a list of nodes as block in a frame. If the current frame
+ is no buffer a dummy ``if 0: yield None`` is written automatically.
+ """
+ try:
+ self.writeline('pass')
+ for node in nodes:
+ self.visit(node, frame)
+ except CompilerExit:
+ pass
+
+ def write(self, x):
+ """Write a string into the output stream."""
+ if self._new_lines:
+ if not self._first_write:
+ self.stream.write('\n' * self._new_lines)
+ self.code_lineno += self._new_lines
+ if self._write_debug_info is not None:
+ self.debug_info.append((self._write_debug_info,
+ self.code_lineno))
+ self._write_debug_info = None
+ self._first_write = False
+ self.stream.write(' ' * self._indentation)
+ self._new_lines = 0
+ self.stream.write(x)
+
+ def writeline(self, x, node=None, extra=0):
+ """Combination of newline and write."""
+ self.newline(node, extra)
+ self.write(x)
+
+ def newline(self, node=None, extra=0):
+ """Add one or more newlines before the next write."""
+ self._new_lines = max(self._new_lines, 1 + extra)
+ if node is not None and node.lineno != self._last_line:
+ self._write_debug_info = node.lineno
+ self._last_line = node.lineno
+
+ def signature(self, node, frame, extra_kwargs=None):
+ """Writes a function call to the stream for the current node.
+ A leading comma is added automatically. The extra keyword
+ arguments may not include python keywords otherwise a syntax
+ error could occour. The extra keyword arguments should be given
+ as python dict.
+ """
+ # if any of the given keyword arguments is a python keyword
+ # we have to make sure that no invalid call is created.
+ kwarg_workaround = False
+ for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
+ if is_python_keyword(kwarg):
+ kwarg_workaround = True
+ break
+
+ for arg in node.args:
+ self.write(', ')
+ self.visit(arg, frame)
+
+ if not kwarg_workaround:
+ for kwarg in node.kwargs:
+ self.write(', ')
+ self.visit(kwarg, frame)
+ if extra_kwargs is not None:
+ for key, value in iteritems(extra_kwargs):
+ self.write(', %s=%s' % (key, value))
+ if node.dyn_args:
+ self.write(', *')
+ self.visit(node.dyn_args, frame)
+
+ if kwarg_workaround:
+ if node.dyn_kwargs is not None:
+ self.write(', **dict({')
+ else:
+ self.write(', **{')
+ for kwarg in node.kwargs:
+ self.write('%r: ' % kwarg.key)
+ self.visit(kwarg.value, frame)
+ self.write(', ')
+ if extra_kwargs is not None:
+ for key, value in iteritems(extra_kwargs):
+ self.write('%r: %s, ' % (key, value))
+ if node.dyn_kwargs is not None:
+ self.write('}, **')
+ self.visit(node.dyn_kwargs, frame)
+ self.write(')')
+ else:
+ self.write('}')
+
+ elif node.dyn_kwargs is not None:
+ self.write(', **')
+ self.visit(node.dyn_kwargs, frame)
+
+ def pull_dependencies(self, nodes):
+ """Pull all the dependencies."""
+ visitor = DependencyFinderVisitor()
+ for node in nodes:
+ visitor.visit(node)
+ for dependency in 'filters', 'tests':
+ mapping = getattr(self, dependency)
+ for name in getattr(visitor, dependency):
+ if name not in mapping:
+ mapping[name] = self.temporary_identifier()
+ self.writeline('%s = environment.%s[%r]' %
+ (mapping[name], dependency, name))
+
+ def enter_frame(self, frame):
+ undefs = []
+ for target, (action, param) in iteritems(frame.symbols.loads):
+ if action == VAR_LOAD_PARAMETER:
+ pass
+ elif action == VAR_LOAD_RESOLVE:
+ self.writeline('%s = %s(%r)' %
+ (target, self.get_resolve_func(), param))
+ elif action == VAR_LOAD_ALIAS:
+ self.writeline('%s = %s' % (target, param))
+ elif action == VAR_LOAD_UNDEFINED:
+ undefs.append(target)
+ else:
+ raise NotImplementedError('unknown load instruction')
+ if undefs:
+ self.writeline('%s = missing' % ' = '.join(undefs))
+
+ def leave_frame(self, frame, with_python_scope=False):
+ if not with_python_scope:
+ undefs = []
+ for target, _ in iteritems(frame.symbols.loads):
+ undefs.append(target)
+ if undefs:
+ self.writeline('%s = missing' % ' = '.join(undefs))
+
+ def func(self, name):
+ if self.environment.is_async:
+ return 'async def %s' % name
+ return 'def %s' % name
+
+ def macro_body(self, node, frame):
+ """Dump the function def of a macro or call block."""
+ frame = frame.inner()
+ frame.symbols.analyze_node(node)
+ macro_ref = MacroRef(node)
+
+ explicit_caller = None
+ skip_special_params = set()
+ args = []
+ for idx, arg in enumerate(node.args):
+ if arg.name == 'caller':
+ explicit_caller = idx
+ if arg.name in ('kwargs', 'varargs'):
+ skip_special_params.add(arg.name)
+ args.append(frame.symbols.ref(arg.name))
+
+ undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs'))
+
+ if 'caller' in undeclared:
+ # In older Jinja2 versions there was a bug that allowed caller
+ # to retain the special behavior even if it was mentioned in
+ # the argument list. However thankfully this was only really
+ # working if it was the last argument. So we are explicitly
+ # checking this now and error out if it is anywhere else in
+ # the argument list.
+ if explicit_caller is not None:
+ try:
+ node.defaults[explicit_caller - len(node.args)]
+ except IndexError:
+ self.fail('When defining macros or call blocks the '
+ 'special "caller" argument must be omitted '
+ 'or be given a default.', node.lineno)
+ else:
+ args.append(frame.symbols.declare_parameter('caller'))
+ macro_ref.accesses_caller = True
+ if 'kwargs' in undeclared and not 'kwargs' in skip_special_params:
+ args.append(frame.symbols.declare_parameter('kwargs'))
+ macro_ref.accesses_kwargs = True
+ if 'varargs' in undeclared and not 'varargs' in skip_special_params:
+ args.append(frame.symbols.declare_parameter('varargs'))
+ macro_ref.accesses_varargs = True
+
+ # macros are delayed, they never require output checks
+ frame.require_output_check = False
+ frame.symbols.analyze_node(node)
+ self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node)
+ self.indent()
+
+ self.buffer(frame)
+ self.enter_frame(frame)
+
+ self.push_parameter_definitions(frame)
+ for idx, arg in enumerate(node.args):
+ ref = frame.symbols.ref(arg.name)
+ self.writeline('if %s is missing:' % ref)
+ self.indent()
+ try:
+ default = node.defaults[idx - len(node.args)]
+ except IndexError:
+ self.writeline('%s = undefined(%r, name=%r)' % (
+ ref,
+ 'parameter %r was not provided' % arg.name,
+ arg.name))
+ else:
+ self.writeline('%s = ' % ref)
+ self.visit(default, frame)
+ self.mark_parameter_stored(ref)
+ self.outdent()
+ self.pop_parameter_definitions()
+
+ self.blockvisit(node.body, frame)
+ self.return_buffer_contents(frame, force_unescaped=True)
+ self.leave_frame(frame, with_python_scope=True)
+ self.outdent()
+
+ return frame, macro_ref
+
+ def macro_def(self, macro_ref, frame):
+ """Dump the macro definition for the def created by macro_body."""
+ arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args)
+ name = getattr(macro_ref.node, 'name', None)
+ if len(macro_ref.node.args) == 1:
+ arg_tuple += ','
+ self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, '
+ 'context.eval_ctx.autoescape)' %
+ (name, arg_tuple, macro_ref.accesses_kwargs,
+ macro_ref.accesses_varargs, macro_ref.accesses_caller))
+
+ def position(self, node):
+ """Return a human readable position for the node."""
+ rv = 'line %d' % node.lineno
+ if self.name is not None:
+ rv += ' in ' + repr(self.name)
+ return rv
+
+ def dump_local_context(self, frame):
+ return '{%s}' % ', '.join(
+ '%r: %s' % (name, target) for name, target
+ in iteritems(frame.symbols.dump_stores()))
+
+ def write_commons(self):
+ """Writes a common preamble that is used by root and block functions.
+ Primarily this sets up common local helpers and enforces a generator
+ through a dead branch.
+ """
+ self.writeline('resolve = context.resolve_or_missing')
+ self.writeline('undefined = environment.undefined')
+ self.writeline('if 0: yield None')
+
+ def push_parameter_definitions(self, frame):
+ """Pushes all parameter targets from the given frame into a local
+ stack that permits tracking of yet to be assigned parameters. In
+ particular this enables the optimization from `visit_Name` to skip
+ undefined expressions for parameters in macros as macros can reference
+ otherwise unbound parameters.
+ """
+ self._param_def_block.append(frame.symbols.dump_param_targets())
+
+ def pop_parameter_definitions(self):
+ """Pops the current parameter definitions set."""
+ self._param_def_block.pop()
+
+ def mark_parameter_stored(self, target):
+ """Marks a parameter in the current parameter definitions as stored.
+ This will skip the enforced undefined checks.
+ """
+ if self._param_def_block:
+ self._param_def_block[-1].discard(target)
+
+ def push_context_reference(self, target):
+ self._context_reference_stack.append(target)
+
+ def pop_context_reference(self):
+ self._context_reference_stack.pop()
+
+ def get_context_ref(self):
+ return self._context_reference_stack[-1]
+
+ def get_resolve_func(self):
+ target = self._context_reference_stack[-1]
+ if target == 'context':
+ return 'resolve'
+ return '%s.resolve' % target
+
+ def derive_context(self, frame):
+ return '%s.derived(%s)' % (
+ self.get_context_ref(),
+ self.dump_local_context(frame),
+ )
+
+ def parameter_is_undeclared(self, target):
+ """Checks if a given target is an undeclared parameter."""
+ if not self._param_def_block:
+ return False
+ return target in self._param_def_block[-1]
+
+ def push_assign_tracking(self):
+ """Pushes a new layer for assignment tracking."""
+ self._assign_stack.append(set())
+
+ def pop_assign_tracking(self, frame):
+ """Pops the topmost level for assignment tracking and updates the
+ context variables if necessary.
+ """
+ vars = self._assign_stack.pop()
+ if not frame.toplevel or not vars:
+ return
+ public_names = [x for x in vars if x[:1] != '_']
+ if len(vars) == 1:
+ name = next(iter(vars))
+ ref = frame.symbols.ref(name)
+ self.writeline('context.vars[%r] = %s' % (name, ref))
+ else:
+ self.writeline('context.vars.update({')
+ for idx, name in enumerate(vars):
+ if idx:
+ self.write(', ')
+ ref = frame.symbols.ref(name)
+ self.write('%r: %s' % (name, ref))
+ self.write('})')
+ if public_names:
+ if len(public_names) == 1:
+ self.writeline('context.exported_vars.add(%r)' %
+ public_names[0])
+ else:
+ self.writeline('context.exported_vars.update((%s))' %
+ ', '.join(imap(repr, public_names)))
+
+ # -- Statement Visitors
+
+ def visit_Template(self, node, frame=None):
+ assert frame is None, 'no root frame allowed'
+ eval_ctx = EvalContext(self.environment, self.name)
+
+ from jinja2.runtime import __all__ as exported
+ self.writeline('from __future__ import %s' % ', '.join(code_features))
+ self.writeline('from jinja2.runtime import ' + ', '.join(exported))
+
+ if self.environment.is_async:
+ self.writeline('from jinja2.asyncsupport import auto_await, '
+ 'auto_aiter, make_async_loop_context')
+
+ # if we want a deferred initialization we cannot move the
+ # environment into a local name
+ envenv = not self.defer_init and ', environment=environment' or ''
+
+ # do we have an extends tag at all? If not, we can save some
+ # overhead by just not processing any inheritance code.
+ have_extends = node.find(nodes.Extends) is not None
+
+ # find all blocks
+ for block in node.find_all(nodes.Block):
+ if block.name in self.blocks:
+ self.fail('block %r defined twice' % block.name, block.lineno)
+ self.blocks[block.name] = block
+
+ # find all imports and import them
+ for import_ in node.find_all(nodes.ImportedName):
+ if import_.importname not in self.import_aliases:
+ imp = import_.importname
+ self.import_aliases[imp] = alias = self.temporary_identifier()
+ if '.' in imp:
+ module, obj = imp.rsplit('.', 1)
+ self.writeline('from %s import %s as %s' %
+ (module, obj, alias))
+ else:
+ self.writeline('import %s as %s' % (imp, alias))
+
+ # add the load name
+ self.writeline('name = %r' % self.name)
+
+ # generate the root render function.
+ self.writeline('%s(context, missing=missing%s):' %
+ (self.func('root'), envenv), extra=1)
+ self.indent()
+ self.write_commons()
+
+ # process the root
+ frame = Frame(eval_ctx)
+ if 'self' in find_undeclared(node.body, ('self',)):
+ ref = frame.symbols.declare_parameter('self')
+ self.writeline('%s = TemplateReference(context)' % ref)
+ frame.symbols.analyze_node(node)
+ frame.toplevel = frame.rootlevel = True
+ frame.require_output_check = have_extends and not self.has_known_extends
+ if have_extends:
+ self.writeline('parent_template = None')
+ self.enter_frame(frame)
+ self.pull_dependencies(node.body)
+ self.blockvisit(node.body, frame)
+ self.leave_frame(frame, with_python_scope=True)
+ self.outdent()
+
+ # make sure that the parent root is called.
+ if have_extends:
+ if not self.has_known_extends:
+ self.indent()
+ self.writeline('if parent_template is not None:')
+ self.indent()
+ if supports_yield_from and not self.environment.is_async:
+ self.writeline('yield from parent_template.'
+ 'root_render_func(context)')
+ else:
+ self.writeline('%sfor event in parent_template.'
+ 'root_render_func(context):' %
+ (self.environment.is_async and 'async ' or ''))
+ self.indent()
+ self.writeline('yield event')
+ self.outdent()
+ self.outdent(1 + (not self.has_known_extends))
+
+ # at this point we now have the blocks collected and can visit them too.
+ for name, block in iteritems(self.blocks):
+ self.writeline('%s(context, missing=missing%s):' %
+ (self.func('block_' + name), envenv),
+ block, 1)
+ self.indent()
+ self.write_commons()
+ # It's important that we do not make this frame a child of the
+ # toplevel template. This would cause a variety of
+ # interesting issues with identifier tracking.
+ block_frame = Frame(eval_ctx)
+ undeclared = find_undeclared(block.body, ('self', 'super'))
+ if 'self' in undeclared:
+ ref = block_frame.symbols.declare_parameter('self')
+ self.writeline('%s = TemplateReference(context)' % ref)
+ if 'super' in undeclared:
+ ref = block_frame.symbols.declare_parameter('super')
+ self.writeline('%s = context.super(%r, '
+ 'block_%s)' % (ref, name, name))
+ block_frame.symbols.analyze_node(block)
+ block_frame.block = name
+ self.enter_frame(block_frame)
+ self.pull_dependencies(block.body)
+ self.blockvisit(block.body, block_frame)
+ self.leave_frame(block_frame, with_python_scope=True)
+ self.outdent()
+
+ self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
+ for x in self.blocks),
+ extra=1)
+
+ # add a function that returns the debug info
+ self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
+ in self.debug_info))
+
+ def visit_Block(self, node, frame):
+ """Call a block and register it for the template."""
+ level = 0
+ if frame.toplevel:
+ # if we know that we are a child template, there is no need to
+ # check if we are one
+ if self.has_known_extends:
+ return
+ if self.extends_so_far > 0:
+ self.writeline('if parent_template is None:')
+ self.indent()
+ level += 1
+
+ if node.scoped:
+ context = self.derive_context(frame)
+ else:
+ context = self.get_context_ref()
+
+ if supports_yield_from and not self.environment.is_async and \
+ frame.buffer is None:
+ self.writeline('yield from context.blocks[%r][0](%s)' % (
+ node.name, context), node)
+ else:
+ loop = self.environment.is_async and 'async for' or 'for'
+ self.writeline('%s event in context.blocks[%r][0](%s):' % (
+ loop, node.name, context), node)
+ self.indent()
+ self.simple_write('event', frame)
+ self.outdent()
+
+ self.outdent(level)
+
+ def visit_Extends(self, node, frame):
+ """Calls the extender."""
+ if not frame.toplevel:
+ self.fail('cannot use extend from a non top-level scope',
+ node.lineno)
+
+ # if the number of extends statements in general is zero so
+ # far, we don't have to add a check if something extended
+ # the template before this one.
+ if self.extends_so_far > 0:
+
+ # if we have a known extends we just add a template runtime
+ # error into the generated code. We could catch that at compile
+ # time too, but i welcome it not to confuse users by throwing the
+ # same error at different times just "because we can".
+ if not self.has_known_extends:
+ self.writeline('if parent_template is not None:')
+ self.indent()
+ self.writeline('raise TemplateRuntimeError(%r)' %
+ 'extended multiple times')
+
+ # if we have a known extends already we don't need that code here
+ # as we know that the template execution will end here.
+ if self.has_known_extends:
+ raise CompilerExit()
+ else:
+ self.outdent()
+
+ self.writeline('parent_template = environment.get_template(', node)
+ self.visit(node.template, frame)
+ self.write(', %r)' % self.name)
+ self.writeline('for name, parent_block in parent_template.'
+ 'blocks.%s():' % dict_item_iter)
+ self.indent()
+ self.writeline('context.blocks.setdefault(name, []).'
+ 'append(parent_block)')
+ self.outdent()
+
+ # if this extends statement was in the root level we can take
+ # advantage of that information and simplify the generated code
+ # in the top level from this point onwards
+ if frame.rootlevel:
+ self.has_known_extends = True
+
+ # and now we have one more
+ self.extends_so_far += 1
+
+ def visit_Include(self, node, frame):
+ """Handles includes."""
+ if node.ignore_missing:
+ self.writeline('try:')
+ self.indent()
+
+ func_name = 'get_or_select_template'
+ if isinstance(node.template, nodes.Const):
+ if isinstance(node.template.value, string_types):
+ func_name = 'get_template'
+ elif isinstance(node.template.value, (tuple, list)):
+ func_name = 'select_template'
+ elif isinstance(node.template, (nodes.Tuple, nodes.List)):
+ func_name = 'select_template'
+
+ self.writeline('template = environment.%s(' % func_name, node)
+ self.visit(node.template, frame)
+ self.write(', %r)' % self.name)
+ if node.ignore_missing:
+ self.outdent()
+ self.writeline('except TemplateNotFound:')
+ self.indent()
+ self.writeline('pass')
+ self.outdent()
+ self.writeline('else:')
+ self.indent()
+
+ skip_event_yield = False
+ if node.with_context:
+ loop = self.environment.is_async and 'async for' or 'for'
+ self.writeline('%s event in template.root_render_func('
+ 'template.new_context(context.get_all(), True, '
+ '%s)):' % (loop, self.dump_local_context(frame)))
+ elif self.environment.is_async:
+ self.writeline('for event in (await '
+ 'template._get_default_module_async())'
+ '._body_stream:')
+ else:
+ if supports_yield_from:
+ self.writeline('yield from template._get_default_module()'
+ '._body_stream')
+ skip_event_yield = True
+ else:
+ self.writeline('for event in template._get_default_module()'
+ '._body_stream:')
+
+ if not skip_event_yield:
+ self.indent()
+ self.simple_write('event', frame)
+ self.outdent()
+
+ if node.ignore_missing:
+ self.outdent()
+
+ def visit_Import(self, node, frame):
+ """Visit regular imports."""
+ self.writeline('%s = ' % frame.symbols.ref(node.target), node)
+ if frame.toplevel:
+ self.write('context.vars[%r] = ' % node.target)
+ if self.environment.is_async:
+ self.write('await ')
+ self.write('environment.get_template(')
+ self.visit(node.template, frame)
+ self.write(', %r).' % self.name)
+ if node.with_context:
+ self.write('make_module%s(context.get_all(), True, %s)'
+ % (self.environment.is_async and '_async' or '',
+ self.dump_local_context(frame)))
+ elif self.environment.is_async:
+ self.write('_get_default_module_async()')
+ else:
+ self.write('_get_default_module()')
+ if frame.toplevel and not node.target.startswith('_'):
+ self.writeline('context.exported_vars.discard(%r)' % node.target)
+
+ def visit_FromImport(self, node, frame):
+ """Visit named imports."""
+ self.newline(node)
+ self.write('included_template = %senvironment.get_template('
+ % (self.environment.is_async and 'await ' or ''))
+ self.visit(node.template, frame)
+ self.write(', %r).' % self.name)
+ if node.with_context:
+ self.write('make_module%s(context.get_all(), True, %s)'
+ % (self.environment.is_async and '_async' or '',
+ self.dump_local_context(frame)))
+ elif self.environment.is_async:
+ self.write('_get_default_module_async()')
+ else:
+ self.write('_get_default_module()')
+
+ var_names = []
+ discarded_names = []
+ for name in node.names:
+ if isinstance(name, tuple):
+ name, alias = name
+ else:
+ alias = name
+ self.writeline('%s = getattr(included_template, '
+ '%r, missing)' % (frame.symbols.ref(alias), name))
+ self.writeline('if %s is missing:' % frame.symbols.ref(alias))
+ self.indent()
+ self.writeline('%s = undefined(%r %% '
+ 'included_template.__name__, '
+ 'name=%r)' %
+ (frame.symbols.ref(alias),
+ 'the template %%r (imported on %s) does '
+ 'not export the requested name %s' % (
+ self.position(node),
+ repr(name)
+ ), name))
+ self.outdent()
+ if frame.toplevel:
+ var_names.append(alias)
+ if not alias.startswith('_'):
+ discarded_names.append(alias)
+
+ if var_names:
+ if len(var_names) == 1:
+ name = var_names[0]
+ self.writeline('context.vars[%r] = %s' %
+ (name, frame.symbols.ref(name)))
+ else:
+ self.writeline('context.vars.update({%s})' % ', '.join(
+ '%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
+ ))
+ if discarded_names:
+ if len(discarded_names) == 1:
+ self.writeline('context.exported_vars.discard(%r)' %
+ discarded_names[0])
+ else:
+ self.writeline('context.exported_vars.difference_'
+ 'update((%s))' % ', '.join(imap(repr, discarded_names)))
+
+ def visit_For(self, node, frame):
+ loop_frame = frame.inner()
+ test_frame = frame.inner()
+ else_frame = frame.inner()
+
+ # try to figure out if we have an extended loop. An extended loop
+ # is necessary if the loop is in recursive mode if the special loop
+ # variable is accessed in the body.
+ extended_loop = node.recursive or 'loop' in \
+ find_undeclared(node.iter_child_nodes(
+ only=('body',)), ('loop',))
+
+ loop_ref = None
+ if extended_loop:
+ loop_ref = loop_frame.symbols.declare_parameter('loop')
+
+ loop_frame.symbols.analyze_node(node, for_branch='body')
+ if node.else_:
+ else_frame.symbols.analyze_node(node, for_branch='else')
+
+ if node.test:
+ loop_filter_func = self.temporary_identifier()
+ test_frame.symbols.analyze_node(node, for_branch='test')
+ self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test)
+ self.indent()
+ self.enter_frame(test_frame)
+ self.writeline(self.environment.is_async and 'async for ' or 'for ')
+ self.visit(node.target, loop_frame)
+ self.write(' in ')
+ self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter')
+ self.write(':')
+ self.indent()
+ self.writeline('if ', node.test)
+ self.visit(node.test, test_frame)
+ self.write(':')
+ self.indent()
+ self.writeline('yield ')
+ self.visit(node.target, loop_frame)
+ self.outdent(3)
+ self.leave_frame(test_frame, with_python_scope=True)
+
+ # if we don't have an recursive loop we have to find the shadowed
+ # variables at that point. Because loops can be nested but the loop
+ # variable is a special one we have to enforce aliasing for it.
+ if node.recursive:
+ self.writeline('%s(reciter, loop_render_func, depth=0):' %
+ self.func('loop'), node)
+ self.indent()
+ self.buffer(loop_frame)
+
+ # Use the same buffer for the else frame
+ else_frame.buffer = loop_frame.buffer
+
+ # make sure the loop variable is a special one and raise a template
+ # assertion error if a loop tries to write to loop
+ if extended_loop:
+ self.writeline('%s = missing' % loop_ref)
+
+ for name in node.find_all(nodes.Name):
+ if name.ctx == 'store' and name.name == 'loop':
+ self.fail('Can\'t assign to special loop variable '
+ 'in for-loop target', name.lineno)
+
+ if node.else_:
+ iteration_indicator = self.temporary_identifier()
+ self.writeline('%s = 1' % iteration_indicator)
+
+ self.writeline(self.environment.is_async and 'async for ' or 'for ', node)
+ self.visit(node.target, loop_frame)
+ if extended_loop:
+ if self.environment.is_async:
+ self.write(', %s in await make_async_loop_context(' % loop_ref)
+ else:
+ self.write(', %s in LoopContext(' % loop_ref)
+ else:
+ self.write(' in ')
+
+ if node.test:
+ self.write('%s(' % loop_filter_func)
+ if node.recursive:
+ self.write('reciter')
+ else:
+ if self.environment.is_async and not extended_loop:
+ self.write('auto_aiter(')
+ self.visit(node.iter, frame)
+ if self.environment.is_async and not extended_loop:
+ self.write(')')
+ if node.test:
+ self.write(')')
+
+ if node.recursive:
+ self.write(', undefined, loop_render_func, depth):')
+ else:
+ self.write(extended_loop and ', undefined):' or ':')
+
+ self.indent()
+ self.enter_frame(loop_frame)
+
+ self.blockvisit(node.body, loop_frame)
+ if node.else_:
+ self.writeline('%s = 0' % iteration_indicator)
+ self.outdent()
+ self.leave_frame(loop_frame, with_python_scope=node.recursive
+ and not node.else_)
+
+ if node.else_:
+ self.writeline('if %s:' % iteration_indicator)
+ self.indent()
+ self.enter_frame(else_frame)
+ self.blockvisit(node.else_, else_frame)
+ self.leave_frame(else_frame)
+ self.outdent()
+
+ # if the node was recursive we have to return the buffer contents
+ # and start the iteration code
+ if node.recursive:
+ self.return_buffer_contents(loop_frame)
+ self.outdent()
+ self.start_write(frame, node)
+ if self.environment.is_async:
+ self.write('await ')
+ self.write('loop(')
+ if self.environment.is_async:
+ self.write('auto_aiter(')
+ self.visit(node.iter, frame)
+ if self.environment.is_async:
+ self.write(')')
+ self.write(', loop)')
+ self.end_write(frame)
+
+ def visit_If(self, node, frame):
+ if_frame = frame.soft()
+ self.writeline('if ', node)
+ self.visit(node.test, if_frame)
+ self.write(':')
+ self.indent()
+ self.blockvisit(node.body, if_frame)
+ self.outdent()
+ for elif_ in node.elif_:
+ self.writeline('elif ', elif_)
+ self.visit(elif_.test, if_frame)
+ self.write(':')
+ self.indent()
+ self.blockvisit(elif_.body, if_frame)
+ self.outdent()
+ if node.else_:
+ self.writeline('else:')
+ self.indent()
+ self.blockvisit(node.else_, if_frame)
+ self.outdent()
+
+ def visit_Macro(self, node, frame):
+ macro_frame, macro_ref = self.macro_body(node, frame)
+ self.newline()
+ if frame.toplevel:
+ if not node.name.startswith('_'):
+ self.write('context.exported_vars.add(%r)' % node.name)
+ ref = frame.symbols.ref(node.name)
+ self.writeline('context.vars[%r] = ' % node.name)
+ self.write('%s = ' % frame.symbols.ref(node.name))
+ self.macro_def(macro_ref, macro_frame)
+
+ def visit_CallBlock(self, node, frame):
+ call_frame, macro_ref = self.macro_body(node, frame)
+ self.writeline('caller = ')
+ self.macro_def(macro_ref, call_frame)
+ self.start_write(frame, node)
+ self.visit_Call(node.call, frame, forward_caller=True)
+ self.end_write(frame)
+
+ def visit_FilterBlock(self, node, frame):
+ filter_frame = frame.inner()
+ filter_frame.symbols.analyze_node(node)
+ self.enter_frame(filter_frame)
+ self.buffer(filter_frame)
+ self.blockvisit(node.body, filter_frame)
+ self.start_write(frame, node)
+ self.visit_Filter(node.filter, filter_frame)
+ self.end_write(frame)
+ self.leave_frame(filter_frame)
+
+ def visit_With(self, node, frame):
+ with_frame = frame.inner()
+ with_frame.symbols.analyze_node(node)
+ self.enter_frame(with_frame)
+ for idx, (target, expr) in enumerate(izip(node.targets, node.values)):
+ self.newline()
+ self.visit(target, with_frame)
+ self.write(' = ')
+ self.visit(expr, frame)
+ self.blockvisit(node.body, with_frame)
+ self.leave_frame(with_frame)
+
+ def visit_ExprStmt(self, node, frame):
+ self.newline(node)
+ self.visit(node.node, frame)
+
+ def visit_Output(self, node, frame):
+ # if we have a known extends statement, we don't output anything
+ # if we are in a require_output_check section
+ if self.has_known_extends and frame.require_output_check:
+ return
+
+ allow_constant_finalize = True
+ if self.environment.finalize:
+ func = self.environment.finalize
+ if getattr(func, 'contextfunction', False) or \
+ getattr(func, 'evalcontextfunction', False):
+ allow_constant_finalize = False
+ elif getattr(func, 'environmentfunction', False):
+ finalize = lambda x: text_type(
+ self.environment.finalize(self.environment, x))
+ else:
+ finalize = lambda x: text_type(self.environment.finalize(x))
+ else:
+ finalize = text_type
+
+ # if we are inside a frame that requires output checking, we do so
+ outdent_later = False
+ if frame.require_output_check:
+ self.writeline('if parent_template is None:')
+ self.indent()
+ outdent_later = True
+
+ # try to evaluate as many chunks as possible into a static
+ # string at compile time.
+ body = []
+ for child in node.nodes:
+ try:
+ if not allow_constant_finalize:
+ raise nodes.Impossible()
+ const = child.as_const(frame.eval_ctx)
+ except nodes.Impossible:
+ body.append(child)
+ continue
+ # the frame can't be volatile here, becaus otherwise the
+ # as_const() function would raise an Impossible exception
+ # at that point.
+ try:
+ if frame.eval_ctx.autoescape:
+ if hasattr(const, '__html__'):
+ const = const.__html__()
+ else:
+ const = escape(const)
+ const = finalize(const)
+ except Exception:
+ # if something goes wrong here we evaluate the node
+ # at runtime for easier debugging
+ body.append(child)
+ continue
+ if body and isinstance(body[-1], list):
+ body[-1].append(const)
+ else:
+ body.append([const])
+
+ # if we have less than 3 nodes or a buffer we yield or extend/append
+ if len(body) < 3 or frame.buffer is not None:
+ if frame.buffer is not None:
+ # for one item we append, for more we extend
+ if len(body) == 1:
+ self.writeline('%s.append(' % frame.buffer)
+ else:
+ self.writeline('%s.extend((' % frame.buffer)
+ self.indent()
+ for item in body:
+ if isinstance(item, list):
+ val = repr(concat(item))
+ if frame.buffer is None:
+ self.writeline('yield ' + val)
+ else:
+ self.writeline(val + ',')
+ else:
+ if frame.buffer is None:
+ self.writeline('yield ', item)
+ else:
+ self.newline(item)
+ close = 1
+ if frame.eval_ctx.volatile:
+ self.write('(escape if context.eval_ctx.autoescape'
+ ' else to_string)(')
+ elif frame.eval_ctx.autoescape:
+ self.write('escape(')
+ else:
+ self.write('to_string(')
+ if self.environment.finalize is not None:
+ self.write('environment.finalize(')
+ if getattr(self.environment.finalize,
+ "contextfunction", False):
+ self.write('context, ')
+ close += 1
+ self.visit(item, frame)
+ self.write(')' * close)
+ if frame.buffer is not None:
+ self.write(',')
+ if frame.buffer is not None:
+ # close the open parentheses
+ self.outdent()
+ self.writeline(len(body) == 1 and ')' or '))')
+
+ # otherwise we create a format string as this is faster in that case
+ else:
+ format = []
+ arguments = []
+ for item in body:
+ if isinstance(item, list):
+ format.append(concat(item).replace('%', '%%'))
+ else:
+ format.append('%s')
+ arguments.append(item)
+ self.writeline('yield ')
+ self.write(repr(concat(format)) + ' % (')
+ self.indent()
+ for argument in arguments:
+ self.newline(argument)
+ close = 0
+ if frame.eval_ctx.volatile:
+ self.write('(escape if context.eval_ctx.autoescape else'
+ ' to_string)(')
+ close += 1
+ elif frame.eval_ctx.autoescape:
+ self.write('escape(')
+ close += 1
+ if self.environment.finalize is not None:
+ self.write('environment.finalize(')
+ if getattr(self.environment.finalize,
+ 'contextfunction', False):
+ self.write('context, ')
+ elif getattr(self.environment.finalize,
+ 'evalcontextfunction', False):
+ self.write('context.eval_ctx, ')
+ elif getattr(self.environment.finalize,
+ 'environmentfunction', False):
+ self.write('environment, ')
+ close += 1
+ self.visit(argument, frame)
+ self.write(')' * close + ', ')
+ self.outdent()
+ self.writeline(')')
+
+ if outdent_later:
+ self.outdent()
+
+ def visit_Assign(self, node, frame):
+ self.push_assign_tracking()
+ self.newline(node)
+ self.visit(node.target, frame)
+ self.write(' = ')
+ self.visit(node.node, frame)
+ self.pop_assign_tracking(frame)
+
+ def visit_AssignBlock(self, node, frame):
+ self.push_assign_tracking()
+ block_frame = frame.inner()
+ # This is a special case. Since a set block always captures we
+ # will disable output checks. This way one can use set blocks
+ # toplevel even in extended templates.
+ block_frame.require_output_check = False
+ block_frame.symbols.analyze_node(node)
+ self.enter_frame(block_frame)
+ self.buffer(block_frame)
+ self.blockvisit(node.body, block_frame)
+ self.newline(node)
+ self.visit(node.target, frame)
+ self.write(' = (Markup if context.eval_ctx.autoescape '
+ 'else identity)(')
+ if node.filter is not None:
+ self.visit_Filter(node.filter, block_frame)
+ else:
+ self.write('concat(%s)' % block_frame.buffer)
+ self.write(')')
+ self.pop_assign_tracking(frame)
+ self.leave_frame(block_frame)
+
+ # -- Expression Visitors
+
+ def visit_Name(self, node, frame):
+ if node.ctx == 'store' and frame.toplevel:
+ if self._assign_stack:
+ self._assign_stack[-1].add(node.name)
+ ref = frame.symbols.ref(node.name)
+
+ # If we are looking up a variable we might have to deal with the
+ # case where it's undefined. We can skip that case if the load
+ # instruction indicates a parameter which are always defined.
+ if node.ctx == 'load':
+ load = frame.symbols.find_load(ref)
+ if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \
+ not self.parameter_is_undeclared(ref)):
+ self.write('(undefined(name=%r) if %s is missing else %s)' %
+ (node.name, ref, ref))
+ return
+
+ self.write(ref)
+
+ def visit_NSRef(self, node, frame):
+ # NSRefs can only be used to store values; since they use the normal
+ # `foo.bar` notation they will be parsed as a normal attribute access
+ # when used anywhere but in a `set` context
+ ref = frame.symbols.ref(node.name)
+ self.writeline('if not isinstance(%s, Namespace):' % ref)
+ self.indent()
+ self.writeline('raise TemplateRuntimeError(%r)' %
+ 'cannot assign attribute on non-namespace object')
+ self.outdent()
+ self.writeline('%s[%r]' % (ref, node.attr))
+
+ def visit_Const(self, node, frame):
+ val = node.as_const(frame.eval_ctx)
+ if isinstance(val, float):
+ self.write(str(val))
+ else:
+ self.write(repr(val))
+
+ def visit_TemplateData(self, node, frame):
+ try:
+ self.write(repr(node.as_const(frame.eval_ctx)))
+ except nodes.Impossible:
+ self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)'
+ % node.data)
+
+ def visit_Tuple(self, node, frame):
+ self.write('(')
+ idx = -1
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(', ')
+ self.visit(item, frame)
+ self.write(idx == 0 and ',)' or ')')
+
+ def visit_List(self, node, frame):
+ self.write('[')
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(', ')
+ self.visit(item, frame)
+ self.write(']')
+
+ def visit_Dict(self, node, frame):
+ self.write('{')
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(', ')
+ self.visit(item.key, frame)
+ self.write(': ')
+ self.visit(item.value, frame)
+ self.write('}')
+
+ def binop(operator, interceptable=True):
+ @optimizeconst
+ def visitor(self, node, frame):
+ if self.environment.sandboxed and \
+ operator in self.environment.intercepted_binops:
+ self.write('environment.call_binop(context, %r, ' % operator)
+ self.visit(node.left, frame)
+ self.write(', ')
+ self.visit(node.right, frame)
+ else:
+ self.write('(')
+ self.visit(node.left, frame)
+ self.write(' %s ' % operator)
+ self.visit(node.right, frame)
+ self.write(')')
+ return visitor
+
+ def uaop(operator, interceptable=True):
+ @optimizeconst
+ def visitor(self, node, frame):
+ if self.environment.sandboxed and \
+ operator in self.environment.intercepted_unops:
+ self.write('environment.call_unop(context, %r, ' % operator)
+ self.visit(node.node, frame)
+ else:
+ self.write('(' + operator)
+ self.visit(node.node, frame)
+ self.write(')')
+ return visitor
+
+ visit_Add = binop('+')
+ visit_Sub = binop('-')
+ visit_Mul = binop('*')
+ visit_Div = binop('/')
+ visit_FloorDiv = binop('//')
+ visit_Pow = binop('**')
+ visit_Mod = binop('%')
+ visit_And = binop('and', interceptable=False)
+ visit_Or = binop('or', interceptable=False)
+ visit_Pos = uaop('+')
+ visit_Neg = uaop('-')
+ visit_Not = uaop('not ', interceptable=False)
+ del binop, uaop
+
+ @optimizeconst
+ def visit_Concat(self, node, frame):
+ if frame.eval_ctx.volatile:
+ func_name = '(context.eval_ctx.volatile and' \
+ ' markup_join or unicode_join)'
+ elif frame.eval_ctx.autoescape:
+ func_name = 'markup_join'
+ else:
+ func_name = 'unicode_join'
+ self.write('%s((' % func_name)
+ for arg in node.nodes:
+ self.visit(arg, frame)
+ self.write(', ')
+ self.write('))')
+
+ @optimizeconst
+ def visit_Compare(self, node, frame):
+ self.visit(node.expr, frame)
+ for op in node.ops:
+ self.visit(op, frame)
+
+ def visit_Operand(self, node, frame):
+ self.write(' %s ' % operators[node.op])
+ self.visit(node.expr, frame)
+
+ @optimizeconst
+ def visit_Getattr(self, node, frame):
+ self.write('environment.getattr(')
+ self.visit(node.node, frame)
+ self.write(', %r)' % node.attr)
+
+ @optimizeconst
+ def visit_Getitem(self, node, frame):
+ # slices bypass the environment getitem method.
+ if isinstance(node.arg, nodes.Slice):
+ self.visit(node.node, frame)
+ self.write('[')
+ self.visit(node.arg, frame)
+ self.write(']')
+ else:
+ self.write('environment.getitem(')
+ self.visit(node.node, frame)
+ self.write(', ')
+ self.visit(node.arg, frame)
+ self.write(')')
+
+ def visit_Slice(self, node, frame):
+ if node.start is not None:
+ self.visit(node.start, frame)
+ self.write(':')
+ if node.stop is not None:
+ self.visit(node.stop, frame)
+ if node.step is not None:
+ self.write(':')
+ self.visit(node.step, frame)
+
+ @optimizeconst
+ def visit_Filter(self, node, frame):
+ if self.environment.is_async:
+ self.write('await auto_await(')
+ self.write(self.filters[node.name] + '(')
+ func = self.environment.filters.get(node.name)
+ if func is None:
+ self.fail('no filter named %r' % node.name, node.lineno)
+ if getattr(func, 'contextfilter', False):
+ self.write('context, ')
+ elif getattr(func, 'evalcontextfilter', False):
+ self.write('context.eval_ctx, ')
+ elif getattr(func, 'environmentfilter', False):
+ self.write('environment, ')
+
+ # if the filter node is None we are inside a filter block
+ # and want to write to the current buffer
+ if node.node is not None:
+ self.visit(node.node, frame)
+ elif frame.eval_ctx.volatile:
+ self.write('(context.eval_ctx.autoescape and'
+ ' Markup(concat(%s)) or concat(%s))' %
+ (frame.buffer, frame.buffer))
+ elif frame.eval_ctx.autoescape:
+ self.write('Markup(concat(%s))' % frame.buffer)
+ else:
+ self.write('concat(%s)' % frame.buffer)
+ self.signature(node, frame)
+ self.write(')')
+ if self.environment.is_async:
+ self.write(')')
+
+ @optimizeconst
+ def visit_Test(self, node, frame):
+ self.write(self.tests[node.name] + '(')
+ if node.name not in self.environment.tests:
+ self.fail('no test named %r' % node.name, node.lineno)
+ self.visit(node.node, frame)
+ self.signature(node, frame)
+ self.write(')')
+
+ @optimizeconst
+ def visit_CondExpr(self, node, frame):
+ def write_expr2():
+ if node.expr2 is not None:
+ return self.visit(node.expr2, frame)
+ self.write('undefined(%r)' % ('the inline if-'
+ 'expression on %s evaluated to false and '
+ 'no else section was defined.' % self.position(node)))
+
+ self.write('(')
+ self.visit(node.expr1, frame)
+ self.write(' if ')
+ self.visit(node.test, frame)
+ self.write(' else ')
+ write_expr2()
+ self.write(')')
+
+ @optimizeconst
+ def visit_Call(self, node, frame, forward_caller=False):
+ if self.environment.is_async:
+ self.write('await auto_await(')
+ if self.environment.sandboxed:
+ self.write('environment.call(context, ')
+ else:
+ self.write('context.call(')
+ self.visit(node.node, frame)
+ extra_kwargs = forward_caller and {'caller': 'caller'} or None
+ self.signature(node, frame, extra_kwargs)
+ self.write(')')
+ if self.environment.is_async:
+ self.write(')')
+
+ def visit_Keyword(self, node, frame):
+ self.write(node.key + '=')
+ self.visit(node.value, frame)
+
+ # -- Unused nodes for extensions
+
+ def visit_MarkSafe(self, node, frame):
+ self.write('Markup(')
+ self.visit(node.expr, frame)
+ self.write(')')
+
+ def visit_MarkSafeIfAutoescape(self, node, frame):
+ self.write('(context.eval_ctx.autoescape and Markup or identity)(')
+ self.visit(node.expr, frame)
+ self.write(')')
+
+ def visit_EnvironmentAttribute(self, node, frame):
+ self.write('environment.' + node.name)
+
+ def visit_ExtensionAttribute(self, node, frame):
+ self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
+
+ def visit_ImportedName(self, node, frame):
+ self.write(self.import_aliases[node.importname])
+
+ def visit_InternalName(self, node, frame):
+ self.write(node.name)
+
+ def visit_ContextReference(self, node, frame):
+ self.write('context')
+
+ def visit_Continue(self, node, frame):
+ self.writeline('continue', node)
+
+ def visit_Break(self, node, frame):
+ self.writeline('break', node)
+
+ def visit_Scope(self, node, frame):
+ scope_frame = frame.inner()
+ scope_frame.symbols.analyze_node(node)
+ self.enter_frame(scope_frame)
+ self.blockvisit(node.body, scope_frame)
+ self.leave_frame(scope_frame)
+
+ def visit_OverlayScope(self, node, frame):
+ ctx = self.temporary_identifier()
+ self.writeline('%s = %s' % (ctx, self.derive_context(frame)))
+ self.writeline('%s.vars = ' % ctx)
+ self.visit(node.context, frame)
+ self.push_context_reference(ctx)
+
+ scope_frame = frame.inner(isolated=True)
+ scope_frame.symbols.analyze_node(node)
+ self.enter_frame(scope_frame)
+ self.blockvisit(node.body, scope_frame)
+ self.leave_frame(scope_frame)
+ self.pop_context_reference()
+
+ def visit_EvalContextModifier(self, node, frame):
+ for keyword in node.options:
+ self.writeline('context.eval_ctx.%s = ' % keyword.key)
+ self.visit(keyword.value, frame)
+ try:
+ val = keyword.value.as_const(frame.eval_ctx)
+ except nodes.Impossible:
+ frame.eval_ctx.volatile = True
+ else:
+ setattr(frame.eval_ctx, keyword.key, val)
+
+ def visit_ScopedEvalContextModifier(self, node, frame):
+ old_ctx_name = self.temporary_identifier()
+ saved_ctx = frame.eval_ctx.save()
+ self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
+ self.visit_EvalContextModifier(node, frame)
+ for child in node.body:
+ self.visit(child, frame)
+ frame.eval_ctx.revert(saved_ctx)
+ self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
diff --git a/python/jinja2/constants.py b/python/jinja2/constants.py
new file mode 100644
index 0000000..11efd1e
--- /dev/null
+++ b/python/jinja2/constants.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja.constants
+ ~~~~~~~~~~~~~~~
+
+ Various constants.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+
+
+#: list of lorem ipsum words used by the lipsum() helper function
+LOREM_IPSUM_WORDS = u'''\
+a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
+auctor augue bibendum blandit class commodo condimentum congue consectetuer
+consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
+diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
+elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
+faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
+hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
+justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
+luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
+mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
+nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
+penatibus per pharetra phasellus placerat platea porta porttitor posuere
+potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
+ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
+sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
+tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
+ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
+viverra volutpat vulputate'''
diff --git a/python/jinja2/debug.py b/python/jinja2/debug.py
new file mode 100644
index 0000000..b61139f
--- /dev/null
+++ b/python/jinja2/debug.py
@@ -0,0 +1,372 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.debug
+ ~~~~~~~~~~~~
+
+ Implements the debug interface for Jinja. This module does some pretty
+ ugly stuff with the Python traceback system in order to achieve tracebacks
+ with correct line numbers, locals and contents.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import sys
+import traceback
+from types import TracebackType, CodeType
+from jinja2.utils import missing, internal_code
+from jinja2.exceptions import TemplateSyntaxError
+from jinja2._compat import iteritems, reraise, PY2
+
+# on pypy we can take advantage of transparent proxies
+try:
+ from __pypy__ import tproxy
+except ImportError:
+ tproxy = None
+
+
+# how does the raise helper look like?
+try:
+ exec("raise TypeError, 'foo'")
+except SyntaxError:
+ raise_helper = 'raise __jinja_exception__[1]'
+except TypeError:
+ raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
+
+
+class TracebackFrameProxy(object):
+ """Proxies a traceback frame."""
+
+ def __init__(self, tb):
+ self.tb = tb
+ self._tb_next = None
+
+ @property
+ def tb_next(self):
+ return self._tb_next
+
+ def set_next(self, next):
+ if tb_set_next is not None:
+ try:
+ tb_set_next(self.tb, next and next.tb or None)
+ except Exception:
+ # this function can fail due to all the hackery it does
+ # on various python implementations. We just catch errors
+ # down and ignore them if necessary.
+ pass
+ self._tb_next = next
+
+ @property
+ def is_jinja_frame(self):
+ return '__jinja_template__' in self.tb.tb_frame.f_globals
+
+ def __getattr__(self, name):
+ return getattr(self.tb, name)
+
+
+def make_frame_proxy(frame):
+ proxy = TracebackFrameProxy(frame)
+ if tproxy is None:
+ return proxy
+ def operation_handler(operation, *args, **kwargs):
+ if operation in ('__getattribute__', '__getattr__'):
+ return getattr(proxy, args[0])
+ elif operation == '__setattr__':
+ proxy.__setattr__(*args, **kwargs)
+ else:
+ return getattr(proxy, operation)(*args, **kwargs)
+ return tproxy(TracebackType, operation_handler)
+
+
+class ProcessedTraceback(object):
+ """Holds a Jinja preprocessed traceback for printing or reraising."""
+
+ def __init__(self, exc_type, exc_value, frames):
+ assert frames, 'no frames for this traceback?'
+ self.exc_type = exc_type
+ self.exc_value = exc_value
+ self.frames = frames
+
+ # newly concatenate the frames (which are proxies)
+ prev_tb = None
+ for tb in self.frames:
+ if prev_tb is not None:
+ prev_tb.set_next(tb)
+ prev_tb = tb
+ prev_tb.set_next(None)
+
+ def render_as_text(self, limit=None):
+ """Return a string with the traceback."""
+ lines = traceback.format_exception(self.exc_type, self.exc_value,
+ self.frames[0], limit=limit)
+ return ''.join(lines).rstrip()
+
+ def render_as_html(self, full=False):
+ """Return a unicode string with the traceback as rendered HTML."""
+ from jinja2.debugrenderer import render_traceback
+ return u'%s\n\n<!--\n%s\n-->' % (
+ render_traceback(self, full=full),
+ self.render_as_text().decode('utf-8', 'replace')
+ )
+
+ @property
+ def is_template_syntax_error(self):
+ """`True` if this is a template syntax error."""
+ return isinstance(self.exc_value, TemplateSyntaxError)
+
+ @property
+ def exc_info(self):
+ """Exception info tuple with a proxy around the frame objects."""
+ return self.exc_type, self.exc_value, self.frames[0]
+
+ @property
+ def standard_exc_info(self):
+ """Standard python exc_info for re-raising"""
+ tb = self.frames[0]
+ # the frame will be an actual traceback (or transparent proxy) if
+ # we are on pypy or a python implementation with support for tproxy
+ if type(tb) is not TracebackType:
+ tb = tb.tb
+ return self.exc_type, self.exc_value, tb
+
+
+def make_traceback(exc_info, source_hint=None):
+ """Creates a processed traceback object from the exc_info."""
+ exc_type, exc_value, tb = exc_info
+ if isinstance(exc_value, TemplateSyntaxError):
+ exc_info = translate_syntax_error(exc_value, source_hint)
+ initial_skip = 0
+ else:
+ initial_skip = 1
+ return translate_exception(exc_info, initial_skip)
+
+
+def translate_syntax_error(error, source=None):
+ """Rewrites a syntax error to please traceback systems."""
+ error.source = source
+ error.translated = True
+ exc_info = (error.__class__, error, None)
+ filename = error.filename
+ if filename is None:
+ filename = '<unknown>'
+ return fake_exc_info(exc_info, filename, error.lineno)
+
+
+def translate_exception(exc_info, initial_skip=0):
+ """If passed an exc_info it will automatically rewrite the exceptions
+ all the way down to the correct line numbers and frames.
+ """
+ tb = exc_info[2]
+ frames = []
+
+ # skip some internal frames if wanted
+ for x in range(initial_skip):
+ if tb is not None:
+ tb = tb.tb_next
+ initial_tb = tb
+
+ while tb is not None:
+ # skip frames decorated with @internalcode. These are internal
+ # calls we can't avoid and that are useless in template debugging
+ # output.
+ if tb.tb_frame.f_code in internal_code:
+ tb = tb.tb_next
+ continue
+
+ # save a reference to the next frame if we override the current
+ # one with a faked one.
+ next = tb.tb_next
+
+ # fake template exceptions
+ template = tb.tb_frame.f_globals.get('__jinja_template__')
+ if template is not None:
+ lineno = template.get_corresponding_lineno(tb.tb_lineno)
+ tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
+ lineno)[2]
+
+ frames.append(make_frame_proxy(tb))
+ tb = next
+
+ # if we don't have any exceptions in the frames left, we have to
+ # reraise it unchanged.
+ # XXX: can we backup here? when could this happen?
+ if not frames:
+ reraise(exc_info[0], exc_info[1], exc_info[2])
+
+ return ProcessedTraceback(exc_info[0], exc_info[1], frames)
+
+
+def get_jinja_locals(real_locals):
+ ctx = real_locals.get('context')
+ if ctx:
+ locals = ctx.get_all().copy()
+ else:
+ locals = {}
+
+ local_overrides = {}
+
+ for name, value in iteritems(real_locals):
+ if not name.startswith('l_') or value is missing:
+ continue
+ try:
+ _, depth, name = name.split('_', 2)
+ depth = int(depth)
+ except ValueError:
+ continue
+ cur_depth = local_overrides.get(name, (-1,))[0]
+ if cur_depth < depth:
+ local_overrides[name] = (depth, value)
+
+ for name, (_, value) in iteritems(local_overrides):
+ if value is missing:
+ locals.pop(name, None)
+ else:
+ locals[name] = value
+
+ return locals
+
+
+def fake_exc_info(exc_info, filename, lineno):
+ """Helper for `translate_exception`."""
+ exc_type, exc_value, tb = exc_info
+
+ # figure the real context out
+ if tb is not None:
+ locals = get_jinja_locals(tb.tb_frame.f_locals)
+
+ # if there is a local called __jinja_exception__, we get
+ # rid of it to not break the debug functionality.
+ locals.pop('__jinja_exception__', None)
+ else:
+ locals = {}
+
+ # assamble fake globals we need
+ globals = {
+ '__name__': filename,
+ '__file__': filename,
+ '__jinja_exception__': exc_info[:2],
+
+ # we don't want to keep the reference to the template around
+ # to not cause circular dependencies, but we mark it as Jinja
+ # frame for the ProcessedTraceback
+ '__jinja_template__': None
+ }
+
+ # and fake the exception
+ code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
+
+ # if it's possible, change the name of the code. This won't work
+ # on some python environments such as google appengine
+ try:
+ if tb is None:
+ location = 'template'
+ else:
+ function = tb.tb_frame.f_code.co_name
+ if function == 'root':
+ location = 'top-level template code'
+ elif function.startswith('block_'):
+ location = 'block "%s"' % function[6:]
+ else:
+ location = 'template'
+
+ if PY2:
+ code = CodeType(0, code.co_nlocals, code.co_stacksize,
+ code.co_flags, code.co_code, code.co_consts,
+ code.co_names, code.co_varnames, filename,
+ location, code.co_firstlineno,
+ code.co_lnotab, (), ())
+ else:
+ code = CodeType(0, code.co_kwonlyargcount,
+ code.co_nlocals, code.co_stacksize,
+ code.co_flags, code.co_code, code.co_consts,
+ code.co_names, code.co_varnames, filename,
+ location, code.co_firstlineno,
+ code.co_lnotab, (), ())
+ except Exception as e:
+ pass
+
+ # execute the code and catch the new traceback
+ try:
+ exec(code, globals, locals)
+ except:
+ exc_info = sys.exc_info()
+ new_tb = exc_info[2].tb_next
+
+ # return without this frame
+ return exc_info[:2] + (new_tb,)
+
+
+def _init_ugly_crap():
+ """This function implements a few ugly things so that we can patch the
+ traceback objects. The function returned allows resetting `tb_next` on
+ any python traceback object. Do not attempt to use this on non cpython
+ interpreters
+ """
+ import ctypes
+ from types import TracebackType
+
+ if PY2:
+ # figure out size of _Py_ssize_t for Python 2:
+ if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
+ _Py_ssize_t = ctypes.c_int64
+ else:
+ _Py_ssize_t = ctypes.c_int
+ else:
+ # platform ssize_t on Python 3
+ _Py_ssize_t = ctypes.c_ssize_t
+
+ # regular python
+ class _PyObject(ctypes.Structure):
+ pass
+ _PyObject._fields_ = [
+ ('ob_refcnt', _Py_ssize_t),
+ ('ob_type', ctypes.POINTER(_PyObject))
+ ]
+
+ # python with trace
+ if hasattr(sys, 'getobjects'):
+ class _PyObject(ctypes.Structure):
+ pass
+ _PyObject._fields_ = [
+ ('_ob_next', ctypes.POINTER(_PyObject)),
+ ('_ob_prev', ctypes.POINTER(_PyObject)),
+ ('ob_refcnt', _Py_ssize_t),
+ ('ob_type', ctypes.POINTER(_PyObject))
+ ]
+
+ class _Traceback(_PyObject):
+ pass
+ _Traceback._fields_ = [
+ ('tb_next', ctypes.POINTER(_Traceback)),
+ ('tb_frame', ctypes.POINTER(_PyObject)),
+ ('tb_lasti', ctypes.c_int),
+ ('tb_lineno', ctypes.c_int)
+ ]
+
+ def tb_set_next(tb, next):
+ """Set the tb_next attribute of a traceback object."""
+ if not (isinstance(tb, TracebackType) and
+ (next is None or isinstance(next, TracebackType))):
+ raise TypeError('tb_set_next arguments must be traceback objects')
+ obj = _Traceback.from_address(id(tb))
+ if tb.tb_next is not None:
+ old = _Traceback.from_address(id(tb.tb_next))
+ old.ob_refcnt -= 1
+ if next is None:
+ obj.tb_next = ctypes.POINTER(_Traceback)()
+ else:
+ next = _Traceback.from_address(id(next))
+ next.ob_refcnt += 1
+ obj.tb_next = ctypes.pointer(next)
+
+ return tb_set_next
+
+
+# try to get a tb_set_next implementation if we don't have transparent
+# proxies.
+tb_set_next = None
+if tproxy is None:
+ try:
+ tb_set_next = _init_ugly_crap()
+ except:
+ pass
+ del _init_ugly_crap
diff --git a/python/jinja2/defaults.py b/python/jinja2/defaults.py
new file mode 100644
index 0000000..7c93dec
--- /dev/null
+++ b/python/jinja2/defaults.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.defaults
+ ~~~~~~~~~~~~~~~
+
+ Jinja default filters and tags.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2._compat import range_type
+from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner, Namespace
+
+
+# defaults for the parser / lexer
+BLOCK_START_STRING = '{%'
+BLOCK_END_STRING = '%}'
+VARIABLE_START_STRING = '{{'
+VARIABLE_END_STRING = '}}'
+COMMENT_START_STRING = '{#'
+COMMENT_END_STRING = '#}'
+LINE_STATEMENT_PREFIX = None
+LINE_COMMENT_PREFIX = None
+TRIM_BLOCKS = False
+LSTRIP_BLOCKS = False
+NEWLINE_SEQUENCE = '\n'
+KEEP_TRAILING_NEWLINE = False
+
+
+# default filters, tests and namespace
+from jinja2.filters import FILTERS as DEFAULT_FILTERS
+from jinja2.tests import TESTS as DEFAULT_TESTS
+DEFAULT_NAMESPACE = {
+ 'range': range_type,
+ 'dict': dict,
+ 'lipsum': generate_lorem_ipsum,
+ 'cycler': Cycler,
+ 'joiner': Joiner,
+ 'namespace': Namespace
+}
+
+
+# default policies
+DEFAULT_POLICIES = {
+ 'compiler.ascii_str': True,
+ 'urlize.rel': 'noopener',
+ 'urlize.target': None,
+ 'truncate.leeway': 5,
+ 'json.dumps_function': None,
+ 'json.dumps_kwargs': {'sort_keys': True},
+ 'ext.i18n.trimmed': False,
+}
+
+
+# export all constants
+__all__ = tuple(x for x in locals().keys() if x.isupper())
diff --git a/python/jinja2/environment.py b/python/jinja2/environment.py
new file mode 100644
index 0000000..549d9af
--- /dev/null
+++ b/python/jinja2/environment.py
@@ -0,0 +1,1276 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.environment
+ ~~~~~~~~~~~~~~~~~~
+
+ Provides a class that holds runtime and parsing time options.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import os
+import sys
+import weakref
+from functools import reduce, partial
+from jinja2 import nodes
+from jinja2.defaults import BLOCK_START_STRING, \
+ BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
+ COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
+ LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
+ DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
+ DEFAULT_POLICIES, KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
+from jinja2.lexer import get_lexer, TokenStream
+from jinja2.parser import Parser
+from jinja2.nodes import EvalContext
+from jinja2.compiler import generate, CodeGenerator
+from jinja2.runtime import Undefined, new_context, Context
+from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
+ TemplatesNotFound, TemplateRuntimeError
+from jinja2.utils import import_string, LRUCache, Markup, missing, \
+ concat, consume, internalcode, have_async_gen
+from jinja2._compat import imap, ifilter, string_types, iteritems, \
+ text_type, reraise, implements_iterator, implements_to_string, \
+ encode_filename, PY2, PYPY
+
+
+# for direct template usage we have up to ten living environments
+_spontaneous_environments = LRUCache(10)
+
+# the function to create jinja traceback objects. This is dynamically
+# imported on the first exception in the exception handler.
+_make_traceback = None
+
+
+def get_spontaneous_environment(*args):
+ """Return a new spontaneous environment. A spontaneous environment is an
+ unnamed and unaccessible (in theory) environment that is used for
+ templates generated from a string and not from the file system.
+ """
+ try:
+ env = _spontaneous_environments.get(args)
+ except TypeError:
+ return Environment(*args)
+ if env is not None:
+ return env
+ _spontaneous_environments[args] = env = Environment(*args)
+ env.shared = True
+ return env
+
+
+def create_cache(size):
+ """Return the cache class for the given size."""
+ if size == 0:
+ return None
+ if size < 0:
+ return {}
+ return LRUCache(size)
+
+
+def copy_cache(cache):
+ """Create an empty copy of the given cache."""
+ if cache is None:
+ return None
+ elif type(cache) is dict:
+ return {}
+ return LRUCache(cache.capacity)
+
+
+def load_extensions(environment, extensions):
+ """Load the extensions from the list and bind it to the environment.
+ Returns a dict of instantiated environments.
+ """
+ result = {}
+ for extension in extensions:
+ if isinstance(extension, string_types):
+ extension = import_string(extension)
+ result[extension.identifier] = extension(environment)
+ return result
+
+
+def fail_for_missing_callable(string, name):
+ msg = string % name
+ if isinstance(name, Undefined):
+ try:
+ name._fail_with_undefined_error()
+ except Exception as e:
+ msg = '%s (%s; did you forget to quote the callable name?)' % (msg, e)
+ raise TemplateRuntimeError(msg)
+
+
+def _environment_sanity_check(environment):
+ """Perform a sanity check on the environment."""
+ assert issubclass(environment.undefined, Undefined), 'undefined must ' \
+ 'be a subclass of undefined because filters depend on it.'
+ assert environment.block_start_string != \
+ environment.variable_start_string != \
+ environment.comment_start_string, 'block, variable and comment ' \
+ 'start strings must be different'
+ assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
+ 'newline_sequence set to unknown line ending string.'
+ return environment
+
+
+class Environment(object):
+ r"""The core component of Jinja is the `Environment`. It contains
+ important shared variables like configuration, filters, tests,
+ globals and others. Instances of this class may be modified if
+ they are not shared and if no template was loaded so far.
+ Modifications on environments after the first template was loaded
+ will lead to surprising effects and undefined behavior.
+
+ Here are the possible initialization parameters:
+
+ `block_start_string`
+ The string marking the beginning of a block. Defaults to ``'{%'``.
+
+ `block_end_string`
+ The string marking the end of a block. Defaults to ``'%}'``.
+
+ `variable_start_string`
+ The string marking the beginning of a print statement.
+ Defaults to ``'{{'``.
+
+ `variable_end_string`
+ The string marking the end of a print statement. Defaults to
+ ``'}}'``.
+
+ `comment_start_string`
+ The string marking the beginning of a comment. Defaults to ``'{#'``.
+
+ `comment_end_string`
+ The string marking the end of a comment. Defaults to ``'#}'``.
+
+ `line_statement_prefix`
+ If given and a string, this will be used as prefix for line based
+ statements. See also :ref:`line-statements`.
+
+ `line_comment_prefix`
+ If given and a string, this will be used as prefix for line based
+ comments. See also :ref:`line-statements`.
+
+ .. versionadded:: 2.2
+
+ `trim_blocks`
+ If this is set to ``True`` the first newline after a block is
+ removed (block, not variable tag!). Defaults to `False`.
+
+ `lstrip_blocks`
+ If this is set to ``True`` leading spaces and tabs are stripped
+ from the start of a line to a block. Defaults to `False`.
+
+ `newline_sequence`
+ The sequence that starts a newline. Must be one of ``'\r'``,
+ ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
+ useful default for Linux and OS X systems as well as web
+ applications.
+
+ `keep_trailing_newline`
+ Preserve the trailing newline when rendering templates.
+ The default is ``False``, which causes a single newline,
+ if present, to be stripped from the end of the template.
+
+ .. versionadded:: 2.7
+
+ `extensions`
+ List of Jinja extensions to use. This can either be import paths
+ as strings or extension classes. For more information have a
+ look at :ref:`the extensions documentation <jinja-extensions>`.
+
+ `optimized`
+ should the optimizer be enabled? Default is ``True``.
+
+ `undefined`
+ :class:`Undefined` or a subclass of it that is used to represent
+ undefined values in the template.
+
+ `finalize`
+ A callable that can be used to process the result of a variable
+ expression before it is output. For example one can convert
+ ``None`` implicitly into an empty string here.
+
+ `autoescape`
+ If set to ``True`` the XML/HTML autoescaping feature is enabled by
+ default. For more details about autoescaping see
+ :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
+ be a callable that is passed the template name and has to
+ return ``True`` or ``False`` depending on autoescape should be
+ enabled by default.
+
+ .. versionchanged:: 2.4
+ `autoescape` can now be a function
+
+ `loader`
+ The template loader for this environment.
+
+ `cache_size`
+ The size of the cache. Per default this is ``400`` which means
+ that if more than 400 templates are loaded the loader will clean
+ out the least recently used template. If the cache size is set to
+ ``0`` templates are recompiled all the time, if the cache size is
+ ``-1`` the cache will not be cleaned.
+
+ .. versionchanged:: 2.8
+ The cache size was increased to 400 from a low 50.
+
+ `auto_reload`
+ Some loaders load templates from locations where the template
+ sources may change (ie: file system or database). If
+ ``auto_reload`` is set to ``True`` (default) every time a template is
+ requested the loader checks if the source changed and if yes, it
+ will reload the template. For higher performance it's possible to
+ disable that.
+
+ `bytecode_cache`
+ If set to a bytecode cache object, this object will provide a
+ cache for the internal Jinja bytecode so that templates don't
+ have to be parsed if they were not changed.
+
+ See :ref:`bytecode-cache` for more information.
+
+ `enable_async`
+ If set to true this enables async template execution which allows
+ you to take advantage of newer Python features. This requires
+ Python 3.6 or later.
+ """
+
+ #: if this environment is sandboxed. Modifying this variable won't make
+ #: the environment sandboxed though. For a real sandboxed environment
+ #: have a look at jinja2.sandbox. This flag alone controls the code
+ #: generation by the compiler.
+ sandboxed = False
+
+ #: True if the environment is just an overlay
+ overlayed = False
+
+ #: the environment this environment is linked to if it is an overlay
+ linked_to = None
+
+ #: shared environments have this set to `True`. A shared environment
+ #: must not be modified
+ shared = False
+
+ #: these are currently EXPERIMENTAL undocumented features.
+ exception_handler = None
+ exception_formatter = None
+
+ #: the class that is used for code generation. See
+ #: :class:`~jinja2.compiler.CodeGenerator` for more information.
+ code_generator_class = CodeGenerator
+
+ #: the context class thatis used for templates. See
+ #: :class:`~jinja2.runtime.Context` for more information.
+ context_class = Context
+
+ def __init__(self,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False,
+ loader=None,
+ cache_size=400,
+ auto_reload=True,
+ bytecode_cache=None,
+ enable_async=False):
+ # !!Important notice!!
+ # The constructor accepts quite a few arguments that should be
+ # passed by keyword rather than position. However it's important to
+ # not change the order of arguments because it's used at least
+ # internally in those cases:
+ # - spontaneous environments (i18n extension and Template)
+ # - unittests
+ # If parameter changes are required only add parameters at the end
+ # and don't change the arguments (or the defaults!) of the arguments
+ # existing already.
+
+ # lexer / parser information
+ self.block_start_string = block_start_string
+ self.block_end_string = block_end_string
+ self.variable_start_string = variable_start_string
+ self.variable_end_string = variable_end_string
+ self.comment_start_string = comment_start_string
+ self.comment_end_string = comment_end_string
+ self.line_statement_prefix = line_statement_prefix
+ self.line_comment_prefix = line_comment_prefix
+ self.trim_blocks = trim_blocks
+ self.lstrip_blocks = lstrip_blocks
+ self.newline_sequence = newline_sequence
+ self.keep_trailing_newline = keep_trailing_newline
+
+ # runtime information
+ self.undefined = undefined
+ self.optimized = optimized
+ self.finalize = finalize
+ self.autoescape = autoescape
+
+ # defaults
+ self.filters = DEFAULT_FILTERS.copy()
+ self.tests = DEFAULT_TESTS.copy()
+ self.globals = DEFAULT_NAMESPACE.copy()
+
+ # set the loader provided
+ self.loader = loader
+ self.cache = create_cache(cache_size)
+ self.bytecode_cache = bytecode_cache
+ self.auto_reload = auto_reload
+
+ # configurable policies
+ self.policies = DEFAULT_POLICIES.copy()
+
+ # load extensions
+ self.extensions = load_extensions(self, extensions)
+
+ self.enable_async = enable_async
+ self.is_async = self.enable_async and have_async_gen
+
+ _environment_sanity_check(self)
+
+ def add_extension(self, extension):
+ """Adds an extension after the environment was created.
+
+ .. versionadded:: 2.5
+ """
+ self.extensions.update(load_extensions(self, [extension]))
+
+ def extend(self, **attributes):
+ """Add the items to the instance of the environment if they do not exist
+ yet. This is used by :ref:`extensions <writing-extensions>` to register
+ callbacks and configuration values without breaking inheritance.
+ """
+ for key, value in iteritems(attributes):
+ if not hasattr(self, key):
+ setattr(self, key, value)
+
+ def overlay(self, block_start_string=missing, block_end_string=missing,
+ variable_start_string=missing, variable_end_string=missing,
+ comment_start_string=missing, comment_end_string=missing,
+ line_statement_prefix=missing, line_comment_prefix=missing,
+ trim_blocks=missing, lstrip_blocks=missing,
+ extensions=missing, optimized=missing,
+ undefined=missing, finalize=missing, autoescape=missing,
+ loader=missing, cache_size=missing, auto_reload=missing,
+ bytecode_cache=missing):
+ """Create a new overlay environment that shares all the data with the
+ current environment except for cache and the overridden attributes.
+ Extensions cannot be removed for an overlayed environment. An overlayed
+ environment automatically gets all the extensions of the environment it
+ is linked to plus optional extra extensions.
+
+ Creating overlays should happen after the initial environment was set
+ up completely. Not all attributes are truly linked, some are just
+ copied over so modifications on the original environment may not shine
+ through.
+ """
+ args = dict(locals())
+ del args['self'], args['cache_size'], args['extensions']
+
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.overlayed = True
+ rv.linked_to = self
+
+ for key, value in iteritems(args):
+ if value is not missing:
+ setattr(rv, key, value)
+
+ if cache_size is not missing:
+ rv.cache = create_cache(cache_size)
+ else:
+ rv.cache = copy_cache(self.cache)
+
+ rv.extensions = {}
+ for key, value in iteritems(self.extensions):
+ rv.extensions[key] = value.bind(rv)
+ if extensions is not missing:
+ rv.extensions.update(load_extensions(rv, extensions))
+
+ return _environment_sanity_check(rv)
+
+ lexer = property(get_lexer, doc="The lexer for this environment.")
+
+ def iter_extensions(self):
+ """Iterates over the extensions by priority."""
+ return iter(sorted(self.extensions.values(),
+ key=lambda x: x.priority))
+
+ def getitem(self, obj, argument):
+ """Get an item or attribute of an object but prefer the item."""
+ try:
+ return obj[argument]
+ except (AttributeError, TypeError, LookupError):
+ if isinstance(argument, string_types):
+ try:
+ attr = str(argument)
+ except Exception:
+ pass
+ else:
+ try:
+ return getattr(obj, attr)
+ except AttributeError:
+ pass
+ return self.undefined(obj=obj, name=argument)
+
+ def getattr(self, obj, attribute):
+ """Get an item or attribute of an object but prefer the attribute.
+ Unlike :meth:`getitem` the attribute *must* be a bytestring.
+ """
+ try:
+ return getattr(obj, attribute)
+ except AttributeError:
+ pass
+ try:
+ return obj[attribute]
+ except (TypeError, LookupError, AttributeError):
+ return self.undefined(obj=obj, name=attribute)
+
+ def call_filter(self, name, value, args=None, kwargs=None,
+ context=None, eval_ctx=None):
+ """Invokes a filter on a value the same way the compiler does it.
+
+ Note that on Python 3 this might return a coroutine in case the
+ filter is running from an environment in async mode and the filter
+ supports async execution. It's your responsibility to await this
+ if needed.
+
+ .. versionadded:: 2.7
+ """
+ func = self.filters.get(name)
+ if func is None:
+ fail_for_missing_callable('no filter named %r', name)
+ args = [value] + list(args or ())
+ if getattr(func, 'contextfilter', False):
+ if context is None:
+ raise TemplateRuntimeError('Attempted to invoke context '
+ 'filter without context')
+ args.insert(0, context)
+ elif getattr(func, 'evalcontextfilter', False):
+ if eval_ctx is None:
+ if context is not None:
+ eval_ctx = context.eval_ctx
+ else:
+ eval_ctx = EvalContext(self)
+ args.insert(0, eval_ctx)
+ elif getattr(func, 'environmentfilter', False):
+ args.insert(0, self)
+ return func(*args, **(kwargs or {}))
+
+ def call_test(self, name, value, args=None, kwargs=None):
+ """Invokes a test on a value the same way the compiler does it.
+
+ .. versionadded:: 2.7
+ """
+ func = self.tests.get(name)
+ if func is None:
+ fail_for_missing_callable('no test named %r', name)
+ return func(value, *(args or ()), **(kwargs or {}))
+
+ @internalcode
+ def parse(self, source, name=None, filename=None):
+ """Parse the sourcecode and return the abstract syntax tree. This
+ tree of nodes is used by the compiler to convert the template into
+ executable source- or bytecode. This is useful for debugging or to
+ extract information from templates.
+
+ If you are :ref:`developing Jinja2 extensions <writing-extensions>`
+ this gives you a good overview of the node tree generated.
+ """
+ try:
+ return self._parse(source, name, filename)
+ except TemplateSyntaxError:
+ exc_info = sys.exc_info()
+ self.handle_exception(exc_info, source_hint=source)
+
+ def _parse(self, source, name, filename):
+ """Internal parsing function used by `parse` and `compile`."""
+ return Parser(self, source, name, encode_filename(filename)).parse()
+
+ def lex(self, source, name=None, filename=None):
+ """Lex the given sourcecode and return a generator that yields
+ tokens as tuples in the form ``(lineno, token_type, value)``.
+ This can be useful for :ref:`extension development <writing-extensions>`
+ and debugging templates.
+
+ This does not perform preprocessing. If you want the preprocessing
+ of the extensions to be applied you have to filter source through
+ the :meth:`preprocess` method.
+ """
+ source = text_type(source)
+ try:
+ return self.lexer.tokeniter(source, name, filename)
+ except TemplateSyntaxError:
+ exc_info = sys.exc_info()
+ self.handle_exception(exc_info, source_hint=source)
+
+ def preprocess(self, source, name=None, filename=None):
+ """Preprocesses the source with all extensions. This is automatically
+ called for all parsing and compiling methods but *not* for :meth:`lex`
+ because there you usually only want the actual source tokenized.
+ """
+ return reduce(lambda s, e: e.preprocess(s, name, filename),
+ self.iter_extensions(), text_type(source))
+
+ def _tokenize(self, source, name, filename=None, state=None):
+ """Called by the parser to do the preprocessing and filtering
+ for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
+ """
+ source = self.preprocess(source, name, filename)
+ stream = self.lexer.tokenize(source, name, filename, state)
+ for ext in self.iter_extensions():
+ stream = ext.filter_stream(stream)
+ if not isinstance(stream, TokenStream):
+ stream = TokenStream(stream, name, filename)
+ return stream
+
+ def _generate(self, source, name, filename, defer_init=False):
+ """Internal hook that can be overridden to hook a different generate
+ method in.
+
+ .. versionadded:: 2.5
+ """
+ return generate(source, self, name, filename, defer_init=defer_init,
+ optimized=self.optimized)
+
+ def _compile(self, source, filename):
+ """Internal hook that can be overridden to hook a different compile
+ method in.
+
+ .. versionadded:: 2.5
+ """
+ return compile(source, filename, 'exec')
+
+ @internalcode
+ def compile(self, source, name=None, filename=None, raw=False,
+ defer_init=False):
+ """Compile a node or template source code. The `name` parameter is
+ the load name of the template after it was joined using
+ :meth:`join_path` if necessary, not the filename on the file system.
+ the `filename` parameter is the estimated filename of the template on
+ the file system. If the template came from a database or memory this
+ can be omitted.
+
+ The return value of this method is a python code object. If the `raw`
+ parameter is `True` the return value will be a string with python
+ code equivalent to the bytecode returned otherwise. This method is
+ mainly used internally.
+
+ `defer_init` is use internally to aid the module code generator. This
+ causes the generated code to be able to import without the global
+ environment variable to be set.
+
+ .. versionadded:: 2.4
+ `defer_init` parameter added.
+ """
+ source_hint = None
+ try:
+ if isinstance(source, string_types):
+ source_hint = source
+ source = self._parse(source, name, filename)
+ source = self._generate(source, name, filename,
+ defer_init=defer_init)
+ if raw:
+ return source
+ if filename is None:
+ filename = '<template>'
+ else:
+ filename = encode_filename(filename)
+ return self._compile(source, filename)
+ except TemplateSyntaxError:
+ exc_info = sys.exc_info()
+ self.handle_exception(exc_info, source_hint=source_hint)
+
+ def compile_expression(self, source, undefined_to_none=True):
+ """A handy helper method that returns a callable that accepts keyword
+ arguments that appear as variables in the expression. If called it
+ returns the result of the expression.
+
+ This is useful if applications want to use the same rules as Jinja
+ in template "configuration files" or similar situations.
+
+ Example usage:
+
+ >>> env = Environment()
+ >>> expr = env.compile_expression('foo == 42')
+ >>> expr(foo=23)
+ False
+ >>> expr(foo=42)
+ True
+
+ Per default the return value is converted to `None` if the
+ expression returns an undefined value. This can be changed
+ by setting `undefined_to_none` to `False`.
+
+ >>> env.compile_expression('var')() is None
+ True
+ >>> env.compile_expression('var', undefined_to_none=False)()
+ Undefined
+
+ .. versionadded:: 2.1
+ """
+ parser = Parser(self, source, state='variable')
+ exc_info = None
+ try:
+ expr = parser.parse_expression()
+ if not parser.stream.eos:
+ raise TemplateSyntaxError('chunk after expression',
+ parser.stream.current.lineno,
+ None, None)
+ expr.set_environment(self)
+ except TemplateSyntaxError:
+ exc_info = sys.exc_info()
+ if exc_info is not None:
+ self.handle_exception(exc_info, source_hint=source)
+ body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
+ template = self.from_string(nodes.Template(body, lineno=1))
+ return TemplateExpression(template, undefined_to_none)
+
+ def compile_templates(self, target, extensions=None, filter_func=None,
+ zip='deflated', log_function=None,
+ ignore_errors=True, py_compile=False):
+ """Finds all the templates the loader can find, compiles them
+ and stores them in `target`. If `zip` is `None`, instead of in a
+ zipfile, the templates will be stored in a directory.
+ By default a deflate zip algorithm is used. To switch to
+ the stored algorithm, `zip` can be set to ``'stored'``.
+
+ `extensions` and `filter_func` are passed to :meth:`list_templates`.
+ Each template returned will be compiled to the target folder or
+ zipfile.
+
+ By default template compilation errors are ignored. In case a
+ log function is provided, errors are logged. If you want template
+ syntax errors to abort the compilation you can set `ignore_errors`
+ to `False` and you will get an exception on syntax errors.
+
+ If `py_compile` is set to `True` .pyc files will be written to the
+ target instead of standard .py files. This flag does not do anything
+ on pypy and Python 3 where pyc files are not picked up by itself and
+ don't give much benefit.
+
+ .. versionadded:: 2.4
+ """
+ from jinja2.loaders import ModuleLoader
+
+ if log_function is None:
+ log_function = lambda x: None
+
+ if py_compile:
+ if not PY2 or PYPY:
+ from warnings import warn
+ warn(Warning('py_compile has no effect on pypy or Python 3'))
+ py_compile = False
+ else:
+ import imp
+ import marshal
+ py_header = imp.get_magic() + \
+ u'\xff\xff\xff\xff'.encode('iso-8859-15')
+
+ # Python 3.3 added a source filesize to the header
+ if sys.version_info >= (3, 3):
+ py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
+
+ def write_file(filename, data, mode):
+ if zip:
+ info = ZipInfo(filename)
+ info.external_attr = 0o755 << 16
+ zip_file.writestr(info, data)
+ else:
+ f = open(os.path.join(target, filename), mode)
+ try:
+ f.write(data)
+ finally:
+ f.close()
+
+ if zip is not None:
+ from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
+ zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
+ stored=ZIP_STORED)[zip])
+ log_function('Compiling into Zip archive "%s"' % target)
+ else:
+ if not os.path.isdir(target):
+ os.makedirs(target)
+ log_function('Compiling into folder "%s"' % target)
+
+ try:
+ for name in self.list_templates(extensions, filter_func):
+ source, filename, _ = self.loader.get_source(self, name)
+ try:
+ code = self.compile(source, name, filename, True, True)
+ except TemplateSyntaxError as e:
+ if not ignore_errors:
+ raise
+ log_function('Could not compile "%s": %s' % (name, e))
+ continue
+
+ filename = ModuleLoader.get_module_filename(name)
+
+ if py_compile:
+ c = self._compile(code, encode_filename(filename))
+ write_file(filename + 'c', py_header +
+ marshal.dumps(c), 'wb')
+ log_function('Byte-compiled "%s" as %s' %
+ (name, filename + 'c'))
+ else:
+ write_file(filename, code, 'w')
+ log_function('Compiled "%s" as %s' % (name, filename))
+ finally:
+ if zip:
+ zip_file.close()
+
+ log_function('Finished compiling templates')
+
+ def list_templates(self, extensions=None, filter_func=None):
+ """Returns a list of templates for this environment. This requires
+ that the loader supports the loader's
+ :meth:`~BaseLoader.list_templates` method.
+
+ If there are other files in the template folder besides the
+ actual templates, the returned list can be filtered. There are two
+ ways: either `extensions` is set to a list of file extensions for
+ templates, or a `filter_func` can be provided which is a callable that
+ is passed a template name and should return `True` if it should end up
+ in the result list.
+
+ If the loader does not support that, a :exc:`TypeError` is raised.
+
+ .. versionadded:: 2.4
+ """
+ x = self.loader.list_templates()
+ if extensions is not None:
+ if filter_func is not None:
+ raise TypeError('either extensions or filter_func '
+ 'can be passed, but not both')
+ filter_func = lambda x: '.' in x and \
+ x.rsplit('.', 1)[1] in extensions
+ if filter_func is not None:
+ x = list(ifilter(filter_func, x))
+ return x
+
+ def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
+ """Exception handling helper. This is used internally to either raise
+ rewritten exceptions or return a rendered traceback for the template.
+ """
+ global _make_traceback
+ if exc_info is None:
+ exc_info = sys.exc_info()
+
+ # the debugging module is imported when it's used for the first time.
+ # we're doing a lot of stuff there and for applications that do not
+ # get any exceptions in template rendering there is no need to load
+ # all of that.
+ if _make_traceback is None:
+ from jinja2.debug import make_traceback as _make_traceback
+ traceback = _make_traceback(exc_info, source_hint)
+ if rendered and self.exception_formatter is not None:
+ return self.exception_formatter(traceback)
+ if self.exception_handler is not None:
+ self.exception_handler(traceback)
+ exc_type, exc_value, tb = traceback.standard_exc_info
+ reraise(exc_type, exc_value, tb)
+
+ def join_path(self, template, parent):
+ """Join a template with the parent. By default all the lookups are
+ relative to the loader root so this method returns the `template`
+ parameter unchanged, but if the paths should be relative to the
+ parent template, this function can be used to calculate the real
+ template name.
+
+ Subclasses may override this method and implement template path
+ joining here.
+ """
+ return template
+
+ @internalcode
+ def _load_template(self, name, globals):
+ if self.loader is None:
+ raise TypeError('no loader for this environment specified')
+ cache_key = (weakref.ref(self.loader), name)
+ if self.cache is not None:
+ template = self.cache.get(cache_key)
+ if template is not None and (not self.auto_reload or
+ template.is_up_to_date):
+ return template
+ template = self.loader.load(self, name, globals)
+ if self.cache is not None:
+ self.cache[cache_key] = template
+ return template
+
+ @internalcode
+ def get_template(self, name, parent=None, globals=None):
+ """Load a template from the loader. If a loader is configured this
+ method asks the loader for the template and returns a :class:`Template`.
+ If the `parent` parameter is not `None`, :meth:`join_path` is called
+ to get the real template name before loading.
+
+ The `globals` parameter can be used to provide template wide globals.
+ These variables are available in the context at render time.
+
+ If the template does not exist a :exc:`TemplateNotFound` exception is
+ raised.
+
+ .. versionchanged:: 2.4
+ If `name` is a :class:`Template` object it is returned from the
+ function unchanged.
+ """
+ if isinstance(name, Template):
+ return name
+ if parent is not None:
+ name = self.join_path(name, parent)
+ return self._load_template(name, self.make_globals(globals))
+
+ @internalcode
+ def select_template(self, names, parent=None, globals=None):
+ """Works like :meth:`get_template` but tries a number of templates
+ before it fails. If it cannot find any of the templates, it will
+ raise a :exc:`TemplatesNotFound` exception.
+
+ .. versionadded:: 2.3
+
+ .. versionchanged:: 2.4
+ If `names` contains a :class:`Template` object it is returned
+ from the function unchanged.
+ """
+ if not names:
+ raise TemplatesNotFound(message=u'Tried to select from an empty list '
+ u'of templates.')
+ globals = self.make_globals(globals)
+ for name in names:
+ if isinstance(name, Template):
+ return name
+ if parent is not None:
+ name = self.join_path(name, parent)
+ try:
+ return self._load_template(name, globals)
+ except TemplateNotFound:
+ pass
+ raise TemplatesNotFound(names)
+
+ @internalcode
+ def get_or_select_template(self, template_name_or_list,
+ parent=None, globals=None):
+ """Does a typecheck and dispatches to :meth:`select_template`
+ if an iterable of template names is given, otherwise to
+ :meth:`get_template`.
+
+ .. versionadded:: 2.3
+ """
+ if isinstance(template_name_or_list, string_types):
+ return self.get_template(template_name_or_list, parent, globals)
+ elif isinstance(template_name_or_list, Template):
+ return template_name_or_list
+ return self.select_template(template_name_or_list, parent, globals)
+
+ def from_string(self, source, globals=None, template_class=None):
+ """Load a template from a string. This parses the source given and
+ returns a :class:`Template` object.
+ """
+ globals = self.make_globals(globals)
+ cls = template_class or self.template_class
+ return cls.from_code(self, self.compile(source), globals, None)
+
+ def make_globals(self, d):
+ """Return a dict for the globals."""
+ if not d:
+ return self.globals
+ return dict(self.globals, **d)
+
+
+class Template(object):
+ """The central template object. This class represents a compiled template
+ and is used to evaluate it.
+
+ Normally the template object is generated from an :class:`Environment` but
+ it also has a constructor that makes it possible to create a template
+ instance directly using the constructor. It takes the same arguments as
+ the environment constructor but it's not possible to specify a loader.
+
+ Every template object has a few methods and members that are guaranteed
+ to exist. However it's important that a template object should be
+ considered immutable. Modifications on the object are not supported.
+
+ Template objects created from the constructor rather than an environment
+ do have an `environment` attribute that points to a temporary environment
+ that is probably shared with other templates created with the constructor
+ and compatible settings.
+
+ >>> template = Template('Hello {{ name }}!')
+ >>> template.render(name='John Doe') == u'Hello John Doe!'
+ True
+ >>> stream = template.stream(name='John Doe')
+ >>> next(stream) == u'Hello John Doe!'
+ True
+ >>> next(stream)
+ Traceback (most recent call last):
+ ...
+ StopIteration
+ """
+
+ def __new__(cls, source,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False,
+ enable_async=False):
+ env = get_spontaneous_environment(
+ block_start_string, block_end_string, variable_start_string,
+ variable_end_string, comment_start_string, comment_end_string,
+ line_statement_prefix, line_comment_prefix, trim_blocks,
+ lstrip_blocks, newline_sequence, keep_trailing_newline,
+ frozenset(extensions), optimized, undefined, finalize, autoescape,
+ None, 0, False, None, enable_async)
+ return env.from_string(source, template_class=cls)
+
+ @classmethod
+ def from_code(cls, environment, code, globals, uptodate=None):
+ """Creates a template object from compiled code and the globals. This
+ is used by the loaders and environment to create a template object.
+ """
+ namespace = {
+ 'environment': environment,
+ '__file__': code.co_filename
+ }
+ exec(code, namespace)
+ rv = cls._from_namespace(environment, namespace, globals)
+ rv._uptodate = uptodate
+ return rv
+
+ @classmethod
+ def from_module_dict(cls, environment, module_dict, globals):
+ """Creates a template object from a module. This is used by the
+ module loader to create a template object.
+
+ .. versionadded:: 2.4
+ """
+ return cls._from_namespace(environment, module_dict, globals)
+
+ @classmethod
+ def _from_namespace(cls, environment, namespace, globals):
+ t = object.__new__(cls)
+ t.environment = environment
+ t.globals = globals
+ t.name = namespace['name']
+ t.filename = namespace['__file__']
+ t.blocks = namespace['blocks']
+
+ # render function and module
+ t.root_render_func = namespace['root']
+ t._module = None
+
+ # debug and loader helpers
+ t._debug_info = namespace['debug_info']
+ t._uptodate = None
+
+ # store the reference
+ namespace['environment'] = environment
+ namespace['__jinja_template__'] = t
+
+ return t
+
+ def render(self, *args, **kwargs):
+ """This method accepts the same arguments as the `dict` constructor:
+ A dict, a dict subclass or some keyword arguments. If no arguments
+ are given the context will be empty. These two calls do the same::
+
+ template.render(knights='that say nih')
+ template.render({'knights': 'that say nih'})
+
+ This will return the rendered template as unicode string.
+ """
+ vars = dict(*args, **kwargs)
+ try:
+ return concat(self.root_render_func(self.new_context(vars)))
+ except Exception:
+ exc_info = sys.exc_info()
+ return self.environment.handle_exception(exc_info, True)
+
+ def render_async(self, *args, **kwargs):
+ """This works similar to :meth:`render` but returns a coroutine
+ that when awaited returns the entire rendered template string. This
+ requires the async feature to be enabled.
+
+ Example usage::
+
+ await template.render_async(knights='that say nih; asynchronously')
+ """
+ # see asyncsupport for the actual implementation
+ raise NotImplementedError('This feature is not available for this '
+ 'version of Python')
+
+ def stream(self, *args, **kwargs):
+ """Works exactly like :meth:`generate` but returns a
+ :class:`TemplateStream`.
+ """
+ return TemplateStream(self.generate(*args, **kwargs))
+
+ def generate(self, *args, **kwargs):
+ """For very large templates it can be useful to not render the whole
+ template at once but evaluate each statement after another and yield
+ piece for piece. This method basically does exactly that and returns
+ a generator that yields one item after another as unicode strings.
+
+ It accepts the same arguments as :meth:`render`.
+ """
+ vars = dict(*args, **kwargs)
+ try:
+ for event in self.root_render_func(self.new_context(vars)):
+ yield event
+ except Exception:
+ exc_info = sys.exc_info()
+ else:
+ return
+ yield self.environment.handle_exception(exc_info, True)
+
+ def generate_async(self, *args, **kwargs):
+ """An async version of :meth:`generate`. Works very similarly but
+ returns an async iterator instead.
+ """
+ # see asyncsupport for the actual implementation
+ raise NotImplementedError('This feature is not available for this '
+ 'version of Python')
+
+ def new_context(self, vars=None, shared=False, locals=None):
+ """Create a new :class:`Context` for this template. The vars
+ provided will be passed to the template. Per default the globals
+ are added to the context. If shared is set to `True` the data
+ is passed as it to the context without adding the globals.
+
+ `locals` can be a dict of local variables for internal usage.
+ """
+ return new_context(self.environment, self.name, self.blocks,
+ vars, shared, self.globals, locals)
+
+ def make_module(self, vars=None, shared=False, locals=None):
+ """This method works like the :attr:`module` attribute when called
+ without arguments but it will evaluate the template on every call
+ rather than caching it. It's also possible to provide
+ a dict which is then used as context. The arguments are the same
+ as for the :meth:`new_context` method.
+ """
+ return TemplateModule(self, self.new_context(vars, shared, locals))
+
+ def make_module_async(self, vars=None, shared=False, locals=None):
+ """As template module creation can invoke template code for
+ asynchronous exections this method must be used instead of the
+ normal :meth:`make_module` one. Likewise the module attribute
+ becomes unavailable in async mode.
+ """
+ # see asyncsupport for the actual implementation
+ raise NotImplementedError('This feature is not available for this '
+ 'version of Python')
+
+ @internalcode
+ def _get_default_module(self):
+ if self._module is not None:
+ return self._module
+ self._module = rv = self.make_module()
+ return rv
+
+ @property
+ def module(self):
+ """The template as module. This is used for imports in the
+ template runtime but is also useful if one wants to access
+ exported template variables from the Python layer:
+
+ >>> t = Template('{% macro foo() %}42{% endmacro %}23')
+ >>> str(t.module)
+ '23'
+ >>> t.module.foo() == u'42'
+ True
+
+ This attribute is not available if async mode is enabled.
+ """
+ return self._get_default_module()
+
+ def get_corresponding_lineno(self, lineno):
+ """Return the source line number of a line number in the
+ generated bytecode as they are not in sync.
+ """
+ for template_line, code_line in reversed(self.debug_info):
+ if code_line <= lineno:
+ return template_line
+ return 1
+
+ @property
+ def is_up_to_date(self):
+ """If this variable is `False` there is a newer version available."""
+ if self._uptodate is None:
+ return True
+ return self._uptodate()
+
+ @property
+ def debug_info(self):
+ """The debug info mapping."""
+ return [tuple(imap(int, x.split('='))) for x in
+ self._debug_info.split('&')]
+
+ def __repr__(self):
+ if self.name is None:
+ name = 'memory:%x' % id(self)
+ else:
+ name = repr(self.name)
+ return '<%s %s>' % (self.__class__.__name__, name)
+
+
+@implements_to_string
+class TemplateModule(object):
+ """Represents an imported template. All the exported names of the
+ template are available as attributes on this object. Additionally
+ converting it into an unicode- or bytestrings renders the contents.
+ """
+
+ def __init__(self, template, context, body_stream=None):
+ if body_stream is None:
+ if context.environment.is_async:
+ raise RuntimeError('Async mode requires a body stream '
+ 'to be passed to a template module. Use '
+ 'the async methods of the API you are '
+ 'using.')
+ body_stream = list(template.root_render_func(context))
+ self._body_stream = body_stream
+ self.__dict__.update(context.get_exported())
+ self.__name__ = template.name
+
+ def __html__(self):
+ return Markup(concat(self._body_stream))
+
+ def __str__(self):
+ return concat(self._body_stream)
+
+ def __repr__(self):
+ if self.__name__ is None:
+ name = 'memory:%x' % id(self)
+ else:
+ name = repr(self.__name__)
+ return '<%s %s>' % (self.__class__.__name__, name)
+
+
+class TemplateExpression(object):
+ """The :meth:`jinja2.Environment.compile_expression` method returns an
+ instance of this object. It encapsulates the expression-like access
+ to the template with an expression it wraps.
+ """
+
+ def __init__(self, template, undefined_to_none):
+ self._template = template
+ self._undefined_to_none = undefined_to_none
+
+ def __call__(self, *args, **kwargs):
+ context = self._template.new_context(dict(*args, **kwargs))
+ consume(self._template.root_render_func(context))
+ rv = context.vars['result']
+ if self._undefined_to_none and isinstance(rv, Undefined):
+ rv = None
+ return rv
+
+
+@implements_iterator
+class TemplateStream(object):
+ """A template stream works pretty much like an ordinary python generator
+ but it can buffer multiple items to reduce the number of total iterations.
+ Per default the output is unbuffered which means that for every unbuffered
+ instruction in the template one unicode string is yielded.
+
+ If buffering is enabled with a buffer size of 5, five items are combined
+ into a new unicode string. This is mainly useful if you are streaming
+ big templates to a client via WSGI which flushes after each iteration.
+ """
+
+ def __init__(self, gen):
+ self._gen = gen
+ self.disable_buffering()
+
+ def dump(self, fp, encoding=None, errors='strict'):
+ """Dump the complete stream into a file or file-like object.
+ Per default unicode strings are written, if you want to encode
+ before writing specify an `encoding`.
+
+ Example usage::
+
+ Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
+ """
+ close = False
+ if isinstance(fp, string_types):
+ if encoding is None:
+ encoding = 'utf-8'
+ fp = open(fp, 'wb')
+ close = True
+ try:
+ if encoding is not None:
+ iterable = (x.encode(encoding, errors) for x in self)
+ else:
+ iterable = self
+ if hasattr(fp, 'writelines'):
+ fp.writelines(iterable)
+ else:
+ for item in iterable:
+ fp.write(item)
+ finally:
+ if close:
+ fp.close()
+
+ def disable_buffering(self):
+ """Disable the output buffering."""
+ self._next = partial(next, self._gen)
+ self.buffered = False
+
+ def _buffered_generator(self, size):
+ buf = []
+ c_size = 0
+ push = buf.append
+
+ while 1:
+ try:
+ while c_size < size:
+ c = next(self._gen)
+ push(c)
+ if c:
+ c_size += 1
+ except StopIteration:
+ if not c_size:
+ return
+ yield concat(buf)
+ del buf[:]
+ c_size = 0
+
+ def enable_buffering(self, size=5):
+ """Enable buffering. Buffer `size` items before yielding them."""
+ if size <= 1:
+ raise ValueError('buffer size too small')
+
+ self.buffered = True
+ self._next = partial(next, self._buffered_generator(size))
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._next()
+
+
+# hook in default template class. if anyone reads this comment: ignore that
+# it's possible to use custom templates ;-)
+Environment.template_class = Template
diff --git a/python/jinja2/exceptions.py b/python/jinja2/exceptions.py
new file mode 100644
index 0000000..c018a33
--- /dev/null
+++ b/python/jinja2/exceptions.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.exceptions
+ ~~~~~~~~~~~~~~~~~
+
+ Jinja exceptions.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2._compat import imap, text_type, PY2, implements_to_string
+
+
+class TemplateError(Exception):
+ """Baseclass for all template errors."""
+
+ if PY2:
+ def __init__(self, message=None):
+ if message is not None:
+ message = text_type(message).encode('utf-8')
+ Exception.__init__(self, message)
+
+ @property
+ def message(self):
+ if self.args:
+ message = self.args[0]
+ if message is not None:
+ return message.decode('utf-8', 'replace')
+
+ def __unicode__(self):
+ return self.message or u''
+ else:
+ def __init__(self, message=None):
+ Exception.__init__(self, message)
+
+ @property
+ def message(self):
+ if self.args:
+ message = self.args[0]
+ if message is not None:
+ return message
+
+
+@implements_to_string
+class TemplateNotFound(IOError, LookupError, TemplateError):
+ """Raised if a template does not exist."""
+
+ # looks weird, but removes the warning descriptor that just
+ # bogusly warns us about message being deprecated
+ message = None
+
+ def __init__(self, name, message=None):
+ IOError.__init__(self)
+ if message is None:
+ message = name
+ self.message = message
+ self.name = name
+ self.templates = [name]
+
+ def __str__(self):
+ return self.message
+
+
+class TemplatesNotFound(TemplateNotFound):
+ """Like :class:`TemplateNotFound` but raised if multiple templates
+ are selected. This is a subclass of :class:`TemplateNotFound`
+ exception, so just catching the base exception will catch both.
+
+ .. versionadded:: 2.2
+ """
+
+ def __init__(self, names=(), message=None):
+ if message is None:
+ message = u'none of the templates given were found: ' + \
+ u', '.join(imap(text_type, names))
+ TemplateNotFound.__init__(self, names and names[-1] or None, message)
+ self.templates = list(names)
+
+
+@implements_to_string
+class TemplateSyntaxError(TemplateError):
+ """Raised to tell the user that there is a problem with the template."""
+
+ def __init__(self, message, lineno, name=None, filename=None):
+ TemplateError.__init__(self, message)
+ self.lineno = lineno
+ self.name = name
+ self.filename = filename
+ self.source = None
+
+ # this is set to True if the debug.translate_syntax_error
+ # function translated the syntax error into a new traceback
+ self.translated = False
+
+ def __str__(self):
+ # for translated errors we only return the message
+ if self.translated:
+ return self.message
+
+ # otherwise attach some stuff
+ location = 'line %d' % self.lineno
+ name = self.filename or self.name
+ if name:
+ location = 'File "%s", %s' % (name, location)
+ lines = [self.message, ' ' + location]
+
+ # if the source is set, add the line to the output
+ if self.source is not None:
+ try:
+ line = self.source.splitlines()[self.lineno - 1]
+ except IndexError:
+ line = None
+ if line:
+ lines.append(' ' + line.strip())
+
+ return u'\n'.join(lines)
+
+
+class TemplateAssertionError(TemplateSyntaxError):
+ """Like a template syntax error, but covers cases where something in the
+ template caused an error at compile time that wasn't necessarily caused
+ by a syntax error. However it's a direct subclass of
+ :exc:`TemplateSyntaxError` and has the same attributes.
+ """
+
+
+class TemplateRuntimeError(TemplateError):
+ """A generic runtime error in the template engine. Under some situations
+ Jinja may raise this exception.
+ """
+
+
+class UndefinedError(TemplateRuntimeError):
+ """Raised if a template tries to operate on :class:`Undefined`."""
+
+
+class SecurityError(TemplateRuntimeError):
+ """Raised if a template tries to do something insecure if the
+ sandbox is enabled.
+ """
+
+
+class FilterArgumentError(TemplateRuntimeError):
+ """This error is raised if a filter was called with inappropriate
+ arguments
+ """
diff --git a/python/jinja2/ext.py b/python/jinja2/ext.py
new file mode 100644
index 0000000..0734a84
--- /dev/null
+++ b/python/jinja2/ext.py
@@ -0,0 +1,627 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.ext
+ ~~~~~~~~~~
+
+ Jinja extensions allow to add custom tags similar to the way django custom
+ tags work. By default two example extensions exist: an i18n and a cache
+ extension.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
+"""
+import re
+
+from jinja2 import nodes
+from jinja2.defaults import BLOCK_START_STRING, \
+ BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
+ COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
+ LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
+ KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
+from jinja2.environment import Environment
+from jinja2.runtime import concat
+from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
+from jinja2.utils import contextfunction, import_string, Markup
+from jinja2._compat import with_metaclass, string_types, iteritems
+
+
+# the only real useful gettext functions for a Jinja template. Note
+# that ugettext must be assigned to gettext as Jinja doesn't support
+# non unicode strings.
+GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
+
+
+class ExtensionRegistry(type):
+ """Gives the extension an unique identifier."""
+
+ def __new__(cls, name, bases, d):
+ rv = type.__new__(cls, name, bases, d)
+ rv.identifier = rv.__module__ + '.' + rv.__name__
+ return rv
+
+
+class Extension(with_metaclass(ExtensionRegistry, object)):
+ """Extensions can be used to add extra functionality to the Jinja template
+ system at the parser level. Custom extensions are bound to an environment
+ but may not store environment specific data on `self`. The reason for
+ this is that an extension can be bound to another environment (for
+ overlays) by creating a copy and reassigning the `environment` attribute.
+
+ As extensions are created by the environment they cannot accept any
+ arguments for configuration. One may want to work around that by using
+ a factory function, but that is not possible as extensions are identified
+ by their import name. The correct way to configure the extension is
+ storing the configuration values on the environment. Because this way the
+ environment ends up acting as central configuration storage the
+ attributes may clash which is why extensions have to ensure that the names
+ they choose for configuration are not too generic. ``prefix`` for example
+ is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
+ name as includes the name of the extension (fragment cache).
+ """
+
+ #: if this extension parses this is the list of tags it's listening to.
+ tags = set()
+
+ #: the priority of that extension. This is especially useful for
+ #: extensions that preprocess values. A lower value means higher
+ #: priority.
+ #:
+ #: .. versionadded:: 2.4
+ priority = 100
+
+ def __init__(self, environment):
+ self.environment = environment
+
+ def bind(self, environment):
+ """Create a copy of this extension bound to another environment."""
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.environment = environment
+ return rv
+
+ def preprocess(self, source, name, filename=None):
+ """This method is called before the actual lexing and can be used to
+ preprocess the source. The `filename` is optional. The return value
+ must be the preprocessed source.
+ """
+ return source
+
+ def filter_stream(self, stream):
+ """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
+ to filter tokens returned. This method has to return an iterable of
+ :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
+ :class:`~jinja2.lexer.TokenStream`.
+
+ In the `ext` folder of the Jinja2 source distribution there is a file
+ called `inlinegettext.py` which implements a filter that utilizes this
+ method.
+ """
+ return stream
+
+ def parse(self, parser):
+ """If any of the :attr:`tags` matched this method is called with the
+ parser as first argument. The token the parser stream is pointing at
+ is the name token that matched. This method has to return one or a
+ list of multiple nodes.
+ """
+ raise NotImplementedError()
+
+ def attr(self, name, lineno=None):
+ """Return an attribute node for the current extension. This is useful
+ to pass constants on extensions to generated template code.
+
+ ::
+
+ self.attr('_my_attribute', lineno=lineno)
+ """
+ return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
+
+ def call_method(self, name, args=None, kwargs=None, dyn_args=None,
+ dyn_kwargs=None, lineno=None):
+ """Call a method of the extension. This is a shortcut for
+ :meth:`attr` + :class:`jinja2.nodes.Call`.
+ """
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = []
+ return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
+ dyn_args, dyn_kwargs, lineno=lineno)
+
+
+@contextfunction
+def _gettext_alias(__context, *args, **kwargs):
+ return __context.call(__context.resolve('gettext'), *args, **kwargs)
+
+
+def _make_new_gettext(func):
+ @contextfunction
+ def gettext(__context, __string, **variables):
+ rv = __context.call(func, __string)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv % variables
+ return gettext
+
+
+def _make_new_ngettext(func):
+ @contextfunction
+ def ngettext(__context, __singular, __plural, __num, **variables):
+ variables.setdefault('num', __num)
+ rv = __context.call(func, __singular, __plural, __num)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv % variables
+ return ngettext
+
+
+class InternationalizationExtension(Extension):
+ """This extension adds gettext support to Jinja2."""
+ tags = set(['trans'])
+
+ # TODO: the i18n extension is currently reevaluating values in a few
+ # situations. Take this example:
+ # {% trans count=something() %}{{ count }} foo{% pluralize
+ # %}{{ count }} fooss{% endtrans %}
+ # something is called twice here. One time for the gettext value and
+ # the other time for the n-parameter of the ngettext function.
+
+ def __init__(self, environment):
+ Extension.__init__(self, environment)
+ environment.globals['_'] = _gettext_alias
+ environment.extend(
+ install_gettext_translations=self._install,
+ install_null_translations=self._install_null,
+ install_gettext_callables=self._install_callables,
+ uninstall_gettext_translations=self._uninstall,
+ extract_translations=self._extract,
+ newstyle_gettext=False
+ )
+
+ def _install(self, translations, newstyle=None):
+ gettext = getattr(translations, 'ugettext', None)
+ if gettext is None:
+ gettext = translations.gettext
+ ngettext = getattr(translations, 'ungettext', None)
+ if ngettext is None:
+ ngettext = translations.ngettext
+ self._install_callables(gettext, ngettext, newstyle)
+
+ def _install_null(self, newstyle=None):
+ self._install_callables(
+ lambda x: x,
+ lambda s, p, n: (n != 1 and (p,) or (s,))[0],
+ newstyle
+ )
+
+ def _install_callables(self, gettext, ngettext, newstyle=None):
+ if newstyle is not None:
+ self.environment.newstyle_gettext = newstyle
+ if self.environment.newstyle_gettext:
+ gettext = _make_new_gettext(gettext)
+ ngettext = _make_new_ngettext(ngettext)
+ self.environment.globals.update(
+ gettext=gettext,
+ ngettext=ngettext
+ )
+
+ def _uninstall(self, translations):
+ for key in 'gettext', 'ngettext':
+ self.environment.globals.pop(key, None)
+
+ def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
+ if isinstance(source, string_types):
+ source = self.environment.parse(source)
+ return extract_from_ast(source, gettext_functions)
+
+ def parse(self, parser):
+ """Parse a translatable tag."""
+ lineno = next(parser.stream).lineno
+ num_called_num = False
+
+ # find all the variables referenced. Additionally a variable can be
+ # defined in the body of the trans block too, but this is checked at
+ # a later state.
+ plural_expr = None
+ plural_expr_assignment = None
+ variables = {}
+ trimmed = None
+ while parser.stream.current.type != 'block_end':
+ if variables:
+ parser.stream.expect('comma')
+
+ # skip colon for python compatibility
+ if parser.stream.skip_if('colon'):
+ break
+
+ name = parser.stream.expect('name')
+ if name.value in variables:
+ parser.fail('translatable variable %r defined twice.' %
+ name.value, name.lineno,
+ exc=TemplateAssertionError)
+
+ # expressions
+ if parser.stream.current.type == 'assign':
+ next(parser.stream)
+ variables[name.value] = var = parser.parse_expression()
+ elif trimmed is None and name.value in ('trimmed', 'notrimmed'):
+ trimmed = name.value == 'trimmed'
+ continue
+ else:
+ variables[name.value] = var = nodes.Name(name.value, 'load')
+
+ if plural_expr is None:
+ if isinstance(var, nodes.Call):
+ plural_expr = nodes.Name('_trans', 'load')
+ variables[name.value] = plural_expr
+ plural_expr_assignment = nodes.Assign(
+ nodes.Name('_trans', 'store'), var)
+ else:
+ plural_expr = var
+ num_called_num = name.value == 'num'
+
+ parser.stream.expect('block_end')
+
+ plural = None
+ have_plural = False
+ referenced = set()
+
+ # now parse until endtrans or pluralize
+ singular_names, singular = self._parse_block(parser, True)
+ if singular_names:
+ referenced.update(singular_names)
+ if plural_expr is None:
+ plural_expr = nodes.Name(singular_names[0], 'load')
+ num_called_num = singular_names[0] == 'num'
+
+ # if we have a pluralize block, we parse that too
+ if parser.stream.current.test('name:pluralize'):
+ have_plural = True
+ next(parser.stream)
+ if parser.stream.current.type != 'block_end':
+ name = parser.stream.expect('name')
+ if name.value not in variables:
+ parser.fail('unknown variable %r for pluralization' %
+ name.value, name.lineno,
+ exc=TemplateAssertionError)
+ plural_expr = variables[name.value]
+ num_called_num = name.value == 'num'
+ parser.stream.expect('block_end')
+ plural_names, plural = self._parse_block(parser, False)
+ next(parser.stream)
+ referenced.update(plural_names)
+ else:
+ next(parser.stream)
+
+ # register free names as simple name expressions
+ for var in referenced:
+ if var not in variables:
+ variables[var] = nodes.Name(var, 'load')
+
+ if not have_plural:
+ plural_expr = None
+ elif plural_expr is None:
+ parser.fail('pluralize without variables', lineno)
+
+ if trimmed is None:
+ trimmed = self.environment.policies['ext.i18n.trimmed']
+ if trimmed:
+ singular = self._trim_whitespace(singular)
+ if plural:
+ plural = self._trim_whitespace(plural)
+
+ node = self._make_node(singular, plural, variables, plural_expr,
+ bool(referenced),
+ num_called_num and have_plural)
+ node.set_lineno(lineno)
+ if plural_expr_assignment is not None:
+ return [plural_expr_assignment, node]
+ else:
+ return node
+
+ def _trim_whitespace(self, string, _ws_re=re.compile(r'\s*\n\s*')):
+ return _ws_re.sub(' ', string.strip())
+
+ def _parse_block(self, parser, allow_pluralize):
+ """Parse until the next block tag with a given name."""
+ referenced = []
+ buf = []
+ while 1:
+ if parser.stream.current.type == 'data':
+ buf.append(parser.stream.current.value.replace('%', '%%'))
+ next(parser.stream)
+ elif parser.stream.current.type == 'variable_begin':
+ next(parser.stream)
+ name = parser.stream.expect('name').value
+ referenced.append(name)
+ buf.append('%%(%s)s' % name)
+ parser.stream.expect('variable_end')
+ elif parser.stream.current.type == 'block_begin':
+ next(parser.stream)
+ if parser.stream.current.test('name:endtrans'):
+ break
+ elif parser.stream.current.test('name:pluralize'):
+ if allow_pluralize:
+ break
+ parser.fail('a translatable section can have only one '
+ 'pluralize section')
+ parser.fail('control structures in translatable sections are '
+ 'not allowed')
+ elif parser.stream.eos:
+ parser.fail('unclosed translation block')
+ else:
+ assert False, 'internal parser error'
+
+ return referenced, concat(buf)
+
+ def _make_node(self, singular, plural, variables, plural_expr,
+ vars_referenced, num_called_num):
+ """Generates a useful node from the data provided."""
+ # no variables referenced? no need to escape for old style
+ # gettext invocations only if there are vars.
+ if not vars_referenced and not self.environment.newstyle_gettext:
+ singular = singular.replace('%%', '%')
+ if plural:
+ plural = plural.replace('%%', '%')
+
+ # singular only:
+ if plural_expr is None:
+ gettext = nodes.Name('gettext', 'load')
+ node = nodes.Call(gettext, [nodes.Const(singular)],
+ [], None, None)
+
+ # singular and plural
+ else:
+ ngettext = nodes.Name('ngettext', 'load')
+ node = nodes.Call(ngettext, [
+ nodes.Const(singular),
+ nodes.Const(plural),
+ plural_expr
+ ], [], None, None)
+
+ # in case newstyle gettext is used, the method is powerful
+ # enough to handle the variable expansion and autoescape
+ # handling itself
+ if self.environment.newstyle_gettext:
+ for key, value in iteritems(variables):
+ # the function adds that later anyways in case num was
+ # called num, so just skip it.
+ if num_called_num and key == 'num':
+ continue
+ node.kwargs.append(nodes.Keyword(key, value))
+
+ # otherwise do that here
+ else:
+ # mark the return value as safe if we are in an
+ # environment with autoescaping turned on
+ node = nodes.MarkSafeIfAutoescape(node)
+ if variables:
+ node = nodes.Mod(node, nodes.Dict([
+ nodes.Pair(nodes.Const(key), value)
+ for key, value in variables.items()
+ ]))
+ return nodes.Output([node])
+
+
+class ExprStmtExtension(Extension):
+ """Adds a `do` tag to Jinja2 that works like the print statement just
+ that it doesn't print the return value.
+ """
+ tags = set(['do'])
+
+ def parse(self, parser):
+ node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
+ node.node = parser.parse_tuple()
+ return node
+
+
+class LoopControlExtension(Extension):
+ """Adds break and continue to the template engine."""
+ tags = set(['break', 'continue'])
+
+ def parse(self, parser):
+ token = next(parser.stream)
+ if token.value == 'break':
+ return nodes.Break(lineno=token.lineno)
+ return nodes.Continue(lineno=token.lineno)
+
+
+class WithExtension(Extension):
+ pass
+
+
+class AutoEscapeExtension(Extension):
+ pass
+
+
+def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
+ babel_style=True):
+ """Extract localizable strings from the given template node. Per
+ default this function returns matches in babel style that means non string
+ parameters as well as keyword arguments are returned as `None`. This
+ allows Babel to figure out what you really meant if you are using
+ gettext functions that allow keyword arguments for placeholder expansion.
+ If you don't want that behavior set the `babel_style` parameter to `False`
+ which causes only strings to be returned and parameters are always stored
+ in tuples. As a consequence invalid gettext calls (calls without a single
+ string parameter or string parameters after non-string parameters) are
+ skipped.
+
+ This example explains the behavior:
+
+ >>> from jinja2 import Environment
+ >>> env = Environment()
+ >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
+ >>> list(extract_from_ast(node))
+ [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
+ >>> list(extract_from_ast(node, babel_style=False))
+ [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
+
+ For every string found this function yields a ``(lineno, function,
+ message)`` tuple, where:
+
+ * ``lineno`` is the number of the line on which the string was found,
+ * ``function`` is the name of the ``gettext`` function used (if the
+ string was extracted from embedded Python code), and
+ * ``message`` is the string itself (a ``unicode`` object, or a tuple
+ of ``unicode`` objects for functions with multiple string arguments).
+
+ This extraction function operates on the AST and is because of that unable
+ to extract any comments. For comment support you have to use the babel
+ extraction interface or extract comments yourself.
+ """
+ for node in node.find_all(nodes.Call):
+ if not isinstance(node.node, nodes.Name) or \
+ node.node.name not in gettext_functions:
+ continue
+
+ strings = []
+ for arg in node.args:
+ if isinstance(arg, nodes.Const) and \
+ isinstance(arg.value, string_types):
+ strings.append(arg.value)
+ else:
+ strings.append(None)
+
+ for arg in node.kwargs:
+ strings.append(None)
+ if node.dyn_args is not None:
+ strings.append(None)
+ if node.dyn_kwargs is not None:
+ strings.append(None)
+
+ if not babel_style:
+ strings = tuple(x for x in strings if x is not None)
+ if not strings:
+ continue
+ else:
+ if len(strings) == 1:
+ strings = strings[0]
+ else:
+ strings = tuple(strings)
+ yield node.lineno, node.node.name, strings
+
+
+class _CommentFinder(object):
+ """Helper class to find comments in a token stream. Can only
+ find comments for gettext calls forwards. Once the comment
+ from line 4 is found, a comment for line 1 will not return a
+ usable value.
+ """
+
+ def __init__(self, tokens, comment_tags):
+ self.tokens = tokens
+ self.comment_tags = comment_tags
+ self.offset = 0
+ self.last_lineno = 0
+
+ def find_backwards(self, offset):
+ try:
+ for _, token_type, token_value in \
+ reversed(self.tokens[self.offset:offset]):
+ if token_type in ('comment', 'linecomment'):
+ try:
+ prefix, comment = token_value.split(None, 1)
+ except ValueError:
+ continue
+ if prefix in self.comment_tags:
+ return [comment.rstrip()]
+ return []
+ finally:
+ self.offset = offset
+
+ def find_comments(self, lineno):
+ if not self.comment_tags or self.last_lineno > lineno:
+ return []
+ for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
+ if token_lineno > lineno:
+ return self.find_backwards(self.offset + idx)
+ return self.find_backwards(len(self.tokens))
+
+
+def babel_extract(fileobj, keywords, comment_tags, options):
+ """Babel extraction method for Jinja templates.
+
+ .. versionchanged:: 2.3
+ Basic support for translation comments was added. If `comment_tags`
+ is now set to a list of keywords for extraction, the extractor will
+ try to find the best preceeding comment that begins with one of the
+ keywords. For best results, make sure to not have more than one
+ gettext call in one line of code and the matching comment in the
+ same line or the line before.
+
+ .. versionchanged:: 2.5.1
+ The `newstyle_gettext` flag can be set to `True` to enable newstyle
+ gettext calls.
+
+ .. versionchanged:: 2.7
+ A `silent` option can now be provided. If set to `False` template
+ syntax errors are propagated instead of being ignored.
+
+ :param fileobj: the file-like object the messages should be extracted from
+ :param keywords: a list of keywords (i.e. function names) that should be
+ recognized as translation functions
+ :param comment_tags: a list of translator tags to search for and include
+ in the results.
+ :param options: a dictionary of additional options (optional)
+ :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
+ (comments will be empty currently)
+ """
+ extensions = set()
+ for extension in options.get('extensions', '').split(','):
+ extension = extension.strip()
+ if not extension:
+ continue
+ extensions.add(import_string(extension))
+ if InternationalizationExtension not in extensions:
+ extensions.add(InternationalizationExtension)
+
+ def getbool(options, key, default=False):
+ return options.get(key, str(default)).lower() in \
+ ('1', 'on', 'yes', 'true')
+
+ silent = getbool(options, 'silent', True)
+ environment = Environment(
+ options.get('block_start_string', BLOCK_START_STRING),
+ options.get('block_end_string', BLOCK_END_STRING),
+ options.get('variable_start_string', VARIABLE_START_STRING),
+ options.get('variable_end_string', VARIABLE_END_STRING),
+ options.get('comment_start_string', COMMENT_START_STRING),
+ options.get('comment_end_string', COMMENT_END_STRING),
+ options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
+ options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
+ getbool(options, 'trim_blocks', TRIM_BLOCKS),
+ getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
+ NEWLINE_SEQUENCE,
+ getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
+ frozenset(extensions),
+ cache_size=0,
+ auto_reload=False
+ )
+
+ if getbool(options, 'trimmed'):
+ environment.policies['ext.i18n.trimmed'] = True
+ if getbool(options, 'newstyle_gettext'):
+ environment.newstyle_gettext = True
+
+ source = fileobj.read().decode(options.get('encoding', 'utf-8'))
+ try:
+ node = environment.parse(source)
+ tokens = list(environment.lex(environment.preprocess(source)))
+ except TemplateSyntaxError as e:
+ if not silent:
+ raise
+ # skip templates with syntax errors
+ return
+
+ finder = _CommentFinder(tokens, comment_tags)
+ for lineno, func, message in extract_from_ast(node, keywords):
+ yield lineno, func, message, finder.find_comments(lineno)
+
+
+#: nicer import names
+i18n = InternationalizationExtension
+do = ExprStmtExtension
+loopcontrols = LoopControlExtension
+with_ = WithExtension
+autoescape = AutoEscapeExtension
diff --git a/python/jinja2/filters.py b/python/jinja2/filters.py
new file mode 100644
index 0000000..267dddd
--- /dev/null
+++ b/python/jinja2/filters.py
@@ -0,0 +1,1190 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.filters
+ ~~~~~~~~~~~~~~
+
+ Bundled jinja filters.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import re
+import math
+import random
+import warnings
+
+from itertools import groupby, chain
+from collections import namedtuple
+from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
+ unicode_urlencode, htmlsafe_json_dumps
+from jinja2.runtime import Undefined
+from jinja2.exceptions import FilterArgumentError
+from jinja2._compat import imap, string_types, text_type, iteritems, PY2
+
+
+_word_re = re.compile(r'\w+', re.UNICODE)
+_word_beginning_split_re = re.compile(r'([-\s\(\{\[\<]+)', re.UNICODE)
+
+
+def contextfilter(f):
+ """Decorator for marking context dependent filters. The current
+ :class:`Context` will be passed as first argument.
+ """
+ f.contextfilter = True
+ return f
+
+
+def evalcontextfilter(f):
+ """Decorator for marking eval-context dependent filters. An eval
+ context object is passed as first argument. For more information
+ about the eval context, see :ref:`eval-context`.
+
+ .. versionadded:: 2.4
+ """
+ f.evalcontextfilter = True
+ return f
+
+
+def environmentfilter(f):
+ """Decorator for marking environment dependent filters. The current
+ :class:`Environment` is passed to the filter as first argument.
+ """
+ f.environmentfilter = True
+ return f
+
+
+def ignore_case(value):
+ """For use as a postprocessor for :func:`make_attrgetter`. Converts strings
+ to lowercase and returns other types as-is."""
+ return value.lower() if isinstance(value, string_types) else value
+
+
+def make_attrgetter(environment, attribute, postprocess=None):
+ """Returns a callable that looks up the given attribute from a
+ passed object with the rules of the environment. Dots are allowed
+ to access attributes of attributes. Integer parts in paths are
+ looked up as integers.
+ """
+ if attribute is None:
+ attribute = []
+ elif isinstance(attribute, string_types):
+ attribute = [int(x) if x.isdigit() else x for x in attribute.split('.')]
+ else:
+ attribute = [attribute]
+
+ def attrgetter(item):
+ for part in attribute:
+ item = environment.getitem(item, part)
+
+ if postprocess is not None:
+ item = postprocess(item)
+
+ return item
+
+ return attrgetter
+
+
+def do_forceescape(value):
+ """Enforce HTML escaping. This will probably double escape variables."""
+ if hasattr(value, '__html__'):
+ value = value.__html__()
+ return escape(text_type(value))
+
+
+def do_urlencode(value):
+ """Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
+ dictionaries and regular strings as well as pairwise iterables.
+
+ .. versionadded:: 2.7
+ """
+ itemiter = None
+ if isinstance(value, dict):
+ itemiter = iteritems(value)
+ elif not isinstance(value, string_types):
+ try:
+ itemiter = iter(value)
+ except TypeError:
+ pass
+ if itemiter is None:
+ return unicode_urlencode(value)
+ return u'&'.join(unicode_urlencode(k) + '=' +
+ unicode_urlencode(v, for_qs=True)
+ for k, v in itemiter)
+
+
+@evalcontextfilter
+def do_replace(eval_ctx, s, old, new, count=None):
+ """Return a copy of the value with all occurrences of a substring
+ replaced with a new one. The first argument is the substring
+ that should be replaced, the second is the replacement string.
+ If the optional third argument ``count`` is given, only the first
+ ``count`` occurrences are replaced:
+
+ .. sourcecode:: jinja
+
+ {{ "Hello World"|replace("Hello", "Goodbye") }}
+ -> Goodbye World
+
+ {{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
+ -> d'oh, d'oh, aaargh
+ """
+ if count is None:
+ count = -1
+ if not eval_ctx.autoescape:
+ return text_type(s).replace(text_type(old), text_type(new), count)
+ if hasattr(old, '__html__') or hasattr(new, '__html__') and \
+ not hasattr(s, '__html__'):
+ s = escape(s)
+ else:
+ s = soft_unicode(s)
+ return s.replace(soft_unicode(old), soft_unicode(new), count)
+
+
+def do_upper(s):
+ """Convert a value to uppercase."""
+ return soft_unicode(s).upper()
+
+
+def do_lower(s):
+ """Convert a value to lowercase."""
+ return soft_unicode(s).lower()
+
+
+@evalcontextfilter
+def do_xmlattr(_eval_ctx, d, autospace=True):
+ """Create an SGML/XML attribute string based on the items in a dict.
+ All values that are neither `none` nor `undefined` are automatically
+ escaped:
+
+ .. sourcecode:: html+jinja
+
+ <ul{{ {'class': 'my_list', 'missing': none,
+ 'id': 'list-%d'|format(variable)}|xmlattr }}>
+ ...
+ </ul>
+
+ Results in something like this:
+
+ .. sourcecode:: html
+
+ <ul class="my_list" id="list-42">
+ ...
+ </ul>
+
+ As you can see it automatically prepends a space in front of the item
+ if the filter returned something unless the second parameter is false.
+ """
+ rv = u' '.join(
+ u'%s="%s"' % (escape(key), escape(value))
+ for key, value in iteritems(d)
+ if value is not None and not isinstance(value, Undefined)
+ )
+ if autospace and rv:
+ rv = u' ' + rv
+ if _eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv
+
+
+def do_capitalize(s):
+ """Capitalize a value. The first character will be uppercase, all others
+ lowercase.
+ """
+ return soft_unicode(s).capitalize()
+
+
+def do_title(s):
+ """Return a titlecased version of the value. I.e. words will start with
+ uppercase letters, all remaining characters are lowercase.
+ """
+ return ''.join(
+ [item[0].upper() + item[1:].lower()
+ for item in _word_beginning_split_re.split(soft_unicode(s))
+ if item])
+
+
+def do_dictsort(value, case_sensitive=False, by='key', reverse=False):
+ """Sort a dict and yield (key, value) pairs. Because python dicts are
+ unsorted you may want to use this function to order them by either
+ key or value:
+
+ .. sourcecode:: jinja
+
+ {% for item in mydict|dictsort %}
+ sort the dict by key, case insensitive
+
+ {% for item in mydict|dictsort(reverse=true) %}
+ sort the dict by key, case insensitive, reverse order
+
+ {% for item in mydict|dictsort(true) %}
+ sort the dict by key, case sensitive
+
+ {% for item in mydict|dictsort(false, 'value') %}
+ sort the dict by value, case insensitive
+ """
+ if by == 'key':
+ pos = 0
+ elif by == 'value':
+ pos = 1
+ else:
+ raise FilterArgumentError(
+ 'You can only sort by either "key" or "value"'
+ )
+
+ def sort_func(item):
+ value = item[pos]
+
+ if not case_sensitive:
+ value = ignore_case(value)
+
+ return value
+
+ return sorted(value.items(), key=sort_func, reverse=reverse)
+
+
+@environmentfilter
+def do_sort(
+ environment, value, reverse=False, case_sensitive=False, attribute=None
+):
+ """Sort an iterable. Per default it sorts ascending, if you pass it
+ true as first argument it will reverse the sorting.
+
+ If the iterable is made of strings the third parameter can be used to
+ control the case sensitiveness of the comparison which is disabled by
+ default.
+
+ .. sourcecode:: jinja
+
+ {% for item in iterable|sort %}
+ ...
+ {% endfor %}
+
+ It is also possible to sort by an attribute (for example to sort
+ by the date of an object) by specifying the `attribute` parameter:
+
+ .. sourcecode:: jinja
+
+ {% for item in iterable|sort(attribute='date') %}
+ ...
+ {% endfor %}
+
+ .. versionchanged:: 2.6
+ The `attribute` parameter was added.
+ """
+ key_func = make_attrgetter(
+ environment, attribute,
+ postprocess=ignore_case if not case_sensitive else None
+ )
+ return sorted(value, key=key_func, reverse=reverse)
+
+
+@environmentfilter
+def do_unique(environment, value, case_sensitive=False, attribute=None):
+ """Returns a list of unique items from the the given iterable.
+
+ .. sourcecode:: jinja
+
+ {{ ['foo', 'bar', 'foobar', 'FooBar']|unique }}
+ -> ['foo', 'bar', 'foobar']
+
+ The unique items are yielded in the same order as their first occurrence in
+ the iterable passed to the filter.
+
+ :param case_sensitive: Treat upper and lower case strings as distinct.
+ :param attribute: Filter objects with unique values for this attribute.
+ """
+ getter = make_attrgetter(
+ environment, attribute,
+ postprocess=ignore_case if not case_sensitive else None
+ )
+ seen = set()
+
+ for item in value:
+ key = getter(item)
+
+ if key not in seen:
+ seen.add(key)
+ yield item
+
+
+def _min_or_max(environment, value, func, case_sensitive, attribute):
+ it = iter(value)
+
+ try:
+ first = next(it)
+ except StopIteration:
+ return environment.undefined('No aggregated item, sequence was empty.')
+
+ key_func = make_attrgetter(
+ environment, attribute,
+ ignore_case if not case_sensitive else None
+ )
+ return func(chain([first], it), key=key_func)
+
+
+@environmentfilter
+def do_min(environment, value, case_sensitive=False, attribute=None):
+ """Return the smallest item from the sequence.
+
+ .. sourcecode:: jinja
+
+ {{ [1, 2, 3]|min }}
+ -> 1
+
+ :param case_sensitive: Treat upper and lower case strings as distinct.
+ :param attribute: Get the object with the max value of this attribute.
+ """
+ return _min_or_max(environment, value, min, case_sensitive, attribute)
+
+
+@environmentfilter
+def do_max(environment, value, case_sensitive=False, attribute=None):
+ """Return the largest item from the sequence.
+
+ .. sourcecode:: jinja
+
+ {{ [1, 2, 3]|max }}
+ -> 3
+
+ :param case_sensitive: Treat upper and lower case strings as distinct.
+ :param attribute: Get the object with the max value of this attribute.
+ """
+ return _min_or_max(environment, value, max, case_sensitive, attribute)
+
+
+def do_default(value, default_value=u'', boolean=False):
+ """If the value is undefined it will return the passed default value,
+ otherwise the value of the variable:
+
+ .. sourcecode:: jinja
+
+ {{ my_variable|default('my_variable is not defined') }}
+
+ This will output the value of ``my_variable`` if the variable was
+ defined, otherwise ``'my_variable is not defined'``. If you want
+ to use default with variables that evaluate to false you have to
+ set the second parameter to `true`:
+
+ .. sourcecode:: jinja
+
+ {{ ''|default('the string was empty', true) }}
+ """
+ if isinstance(value, Undefined) or (boolean and not value):
+ return default_value
+ return value
+
+
+@evalcontextfilter
+def do_join(eval_ctx, value, d=u'', attribute=None):
+ """Return a string which is the concatenation of the strings in the
+ sequence. The separator between elements is an empty string per
+ default, you can define it with the optional parameter:
+
+ .. sourcecode:: jinja
+
+ {{ [1, 2, 3]|join('|') }}
+ -> 1|2|3
+
+ {{ [1, 2, 3]|join }}
+ -> 123
+
+ It is also possible to join certain attributes of an object:
+
+ .. sourcecode:: jinja
+
+ {{ users|join(', ', attribute='username') }}
+
+ .. versionadded:: 2.6
+ The `attribute` parameter was added.
+ """
+ if attribute is not None:
+ value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
+
+ # no automatic escaping? joining is a lot eaiser then
+ if not eval_ctx.autoescape:
+ return text_type(d).join(imap(text_type, value))
+
+ # if the delimiter doesn't have an html representation we check
+ # if any of the items has. If yes we do a coercion to Markup
+ if not hasattr(d, '__html__'):
+ value = list(value)
+ do_escape = False
+ for idx, item in enumerate(value):
+ if hasattr(item, '__html__'):
+ do_escape = True
+ else:
+ value[idx] = text_type(item)
+ if do_escape:
+ d = escape(d)
+ else:
+ d = text_type(d)
+ return d.join(value)
+
+ # no html involved, to normal joining
+ return soft_unicode(d).join(imap(soft_unicode, value))
+
+
+def do_center(value, width=80):
+ """Centers the value in a field of a given width."""
+ return text_type(value).center(width)
+
+
+@environmentfilter
+def do_first(environment, seq):
+ """Return the first item of a sequence."""
+ try:
+ return next(iter(seq))
+ except StopIteration:
+ return environment.undefined('No first item, sequence was empty.')
+
+
+@environmentfilter
+def do_last(environment, seq):
+ """Return the last item of a sequence."""
+ try:
+ return next(iter(reversed(seq)))
+ except StopIteration:
+ return environment.undefined('No last item, sequence was empty.')
+
+
+@contextfilter
+def do_random(context, seq):
+ """Return a random item from the sequence."""
+ try:
+ return random.choice(seq)
+ except IndexError:
+ return context.environment.undefined('No random item, sequence was empty.')
+
+
+def do_filesizeformat(value, binary=False):
+ """Format the value like a 'human-readable' file size (i.e. 13 kB,
+ 4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
+ Giga, etc.), if the second parameter is set to `True` the binary
+ prefixes are used (Mebi, Gibi).
+ """
+ bytes = float(value)
+ base = binary and 1024 or 1000
+ prefixes = [
+ (binary and 'KiB' or 'kB'),
+ (binary and 'MiB' or 'MB'),
+ (binary and 'GiB' or 'GB'),
+ (binary and 'TiB' or 'TB'),
+ (binary and 'PiB' or 'PB'),
+ (binary and 'EiB' or 'EB'),
+ (binary and 'ZiB' or 'ZB'),
+ (binary and 'YiB' or 'YB')
+ ]
+ if bytes == 1:
+ return '1 Byte'
+ elif bytes < base:
+ return '%d Bytes' % bytes
+ else:
+ for i, prefix in enumerate(prefixes):
+ unit = base ** (i + 2)
+ if bytes < unit:
+ return '%.1f %s' % ((base * bytes / unit), prefix)
+ return '%.1f %s' % ((base * bytes / unit), prefix)
+
+
+def do_pprint(value, verbose=False):
+ """Pretty print a variable. Useful for debugging.
+
+ With Jinja 1.2 onwards you can pass it a parameter. If this parameter
+ is truthy the output will be more verbose (this requires `pretty`)
+ """
+ return pformat(value, verbose=verbose)
+
+
+@evalcontextfilter
+def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
+ target=None, rel=None):
+ """Converts URLs in plain text into clickable links.
+
+ If you pass the filter an additional integer it will shorten the urls
+ to that number. Also a third argument exists that makes the urls
+ "nofollow":
+
+ .. sourcecode:: jinja
+
+ {{ mytext|urlize(40, true) }}
+ links are shortened to 40 chars and defined with rel="nofollow"
+
+ If *target* is specified, the ``target`` attribute will be added to the
+ ``<a>`` tag:
+
+ .. sourcecode:: jinja
+
+ {{ mytext|urlize(40, target='_blank') }}
+
+ .. versionchanged:: 2.8+
+ The *target* parameter was added.
+ """
+ policies = eval_ctx.environment.policies
+ rel = set((rel or '').split() or [])
+ if nofollow:
+ rel.add('nofollow')
+ rel.update((policies['urlize.rel'] or '').split())
+ if target is None:
+ target = policies['urlize.target']
+ rel = ' '.join(sorted(rel)) or None
+ rv = urlize(value, trim_url_limit, rel=rel, target=target)
+ if eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv
+
+
+def do_indent(
+ s, width=4, first=False, blank=False, indentfirst=None
+):
+ """Return a copy of the string with each line indented by 4 spaces. The
+ first line and blank lines are not indented by default.
+
+ :param width: Number of spaces to indent by.
+ :param first: Don't skip indenting the first line.
+ :param blank: Don't skip indenting empty lines.
+
+ .. versionchanged:: 2.10
+ Blank lines are not indented by default.
+
+ Rename the ``indentfirst`` argument to ``first``.
+ """
+ if indentfirst is not None:
+ warnings.warn(DeprecationWarning(
+ 'The "indentfirst" argument is renamed to "first".'
+ ), stacklevel=2)
+ first = indentfirst
+
+ s += u'\n' # this quirk is necessary for splitlines method
+ indention = u' ' * width
+
+ if blank:
+ rv = (u'\n' + indention).join(s.splitlines())
+ else:
+ lines = s.splitlines()
+ rv = lines.pop(0)
+
+ if lines:
+ rv += u'\n' + u'\n'.join(
+ indention + line if line else line for line in lines
+ )
+
+ if first:
+ rv = indention + rv
+
+ return rv
+
+
+@environmentfilter
+def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None):
+ """Return a truncated copy of the string. The length is specified
+ with the first parameter which defaults to ``255``. If the second
+ parameter is ``true`` the filter will cut the text at length. Otherwise
+ it will discard the last word. If the text was in fact
+ truncated it will append an ellipsis sign (``"..."``). If you want a
+ different ellipsis sign than ``"..."`` you can specify it using the
+ third parameter. Strings that only exceed the length by the tolerance
+ margin given in the fourth parameter will not be truncated.
+
+ .. sourcecode:: jinja
+
+ {{ "foo bar baz qux"|truncate(9) }}
+ -> "foo..."
+ {{ "foo bar baz qux"|truncate(9, True) }}
+ -> "foo ba..."
+ {{ "foo bar baz qux"|truncate(11) }}
+ -> "foo bar baz qux"
+ {{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
+ -> "foo bar..."
+
+ The default leeway on newer Jinja2 versions is 5 and was 0 before but
+ can be reconfigured globally.
+ """
+ if leeway is None:
+ leeway = env.policies['truncate.leeway']
+ assert length >= len(end), 'expected length >= %s, got %s' % (len(end), length)
+ assert leeway >= 0, 'expected leeway >= 0, got %s' % leeway
+ if len(s) <= length + leeway:
+ return s
+ if killwords:
+ return s[:length - len(end)] + end
+ result = s[:length - len(end)].rsplit(' ', 1)[0]
+ return result + end
+
+
+@environmentfilter
+def do_wordwrap(environment, s, width=79, break_long_words=True,
+ wrapstring=None):
+ """
+ Return a copy of the string passed to the filter wrapped after
+ ``79`` characters. You can override this default using the first
+ parameter. If you set the second parameter to `false` Jinja will not
+ split words apart if they are longer than `width`. By default, the newlines
+ will be the default newlines for the environment, but this can be changed
+ using the wrapstring keyword argument.
+
+ .. versionadded:: 2.7
+ Added support for the `wrapstring` parameter.
+ """
+ if not wrapstring:
+ wrapstring = environment.newline_sequence
+ import textwrap
+ return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
+ replace_whitespace=False,
+ break_long_words=break_long_words))
+
+
+def do_wordcount(s):
+ """Count the words in that string."""
+ return len(_word_re.findall(s))
+
+
+def do_int(value, default=0, base=10):
+ """Convert the value into an integer. If the
+ conversion doesn't work it will return ``0``. You can
+ override this default using the first parameter. You
+ can also override the default base (10) in the second
+ parameter, which handles input with prefixes such as
+ 0b, 0o and 0x for bases 2, 8 and 16 respectively.
+ The base is ignored for decimal numbers and non-string values.
+ """
+ try:
+ if isinstance(value, string_types):
+ return int(value, base)
+ return int(value)
+ except (TypeError, ValueError):
+ # this quirk is necessary so that "42.23"|int gives 42.
+ try:
+ return int(float(value))
+ except (TypeError, ValueError):
+ return default
+
+
+def do_float(value, default=0.0):
+ """Convert the value into a floating point number. If the
+ conversion doesn't work it will return ``0.0``. You can
+ override this default using the first parameter.
+ """
+ try:
+ return float(value)
+ except (TypeError, ValueError):
+ return default
+
+
+def do_format(value, *args, **kwargs):
+ """
+ Apply python string formatting on an object:
+
+ .. sourcecode:: jinja
+
+ {{ "%s - %s"|format("Hello?", "Foo!") }}
+ -> Hello? - Foo!
+ """
+ if args and kwargs:
+ raise FilterArgumentError('can\'t handle positional and keyword '
+ 'arguments at the same time')
+ return soft_unicode(value) % (kwargs or args)
+
+
+def do_trim(value):
+ """Strip leading and trailing whitespace."""
+ return soft_unicode(value).strip()
+
+
+def do_striptags(value):
+ """Strip SGML/XML tags and replace adjacent whitespace by one space.
+ """
+ if hasattr(value, '__html__'):
+ value = value.__html__()
+ return Markup(text_type(value)).striptags()
+
+
+def do_slice(value, slices, fill_with=None):
+ """Slice an iterator and return a list of lists containing
+ those items. Useful if you want to create a div containing
+ three ul tags that represent columns:
+
+ .. sourcecode:: html+jinja
+
+ <div class="columwrapper">
+ {%- for column in items|slice(3) %}
+ <ul class="column-{{ loop.index }}">
+ {%- for item in column %}
+ <li>{{ item }}</li>
+ {%- endfor %}
+ </ul>
+ {%- endfor %}
+ </div>
+
+ If you pass it a second argument it's used to fill missing
+ values on the last iteration.
+ """
+ seq = list(value)
+ length = len(seq)
+ items_per_slice = length // slices
+ slices_with_extra = length % slices
+ offset = 0
+ for slice_number in range(slices):
+ start = offset + slice_number * items_per_slice
+ if slice_number < slices_with_extra:
+ offset += 1
+ end = offset + (slice_number + 1) * items_per_slice
+ tmp = seq[start:end]
+ if fill_with is not None and slice_number >= slices_with_extra:
+ tmp.append(fill_with)
+ yield tmp
+
+
+def do_batch(value, linecount, fill_with=None):
+ """
+ A filter that batches items. It works pretty much like `slice`
+ just the other way round. It returns a list of lists with the
+ given number of items. If you provide a second parameter this
+ is used to fill up missing items. See this example:
+
+ .. sourcecode:: html+jinja
+
+ <table>
+ {%- for row in items|batch(3, '&nbsp;') %}
+ <tr>
+ {%- for column in row %}
+ <td>{{ column }}</td>
+ {%- endfor %}
+ </tr>
+ {%- endfor %}
+ </table>
+ """
+ tmp = []
+ for item in value:
+ if len(tmp) == linecount:
+ yield tmp
+ tmp = []
+ tmp.append(item)
+ if tmp:
+ if fill_with is not None and len(tmp) < linecount:
+ tmp += [fill_with] * (linecount - len(tmp))
+ yield tmp
+
+
+def do_round(value, precision=0, method='common'):
+ """Round the number to a given precision. The first
+ parameter specifies the precision (default is ``0``), the
+ second the rounding method:
+
+ - ``'common'`` rounds either up or down
+ - ``'ceil'`` always rounds up
+ - ``'floor'`` always rounds down
+
+ If you don't specify a method ``'common'`` is used.
+
+ .. sourcecode:: jinja
+
+ {{ 42.55|round }}
+ -> 43.0
+ {{ 42.55|round(1, 'floor') }}
+ -> 42.5
+
+ Note that even if rounded to 0 precision, a float is returned. If
+ you need a real integer, pipe it through `int`:
+
+ .. sourcecode:: jinja
+
+ {{ 42.55|round|int }}
+ -> 43
+ """
+ if not method in ('common', 'ceil', 'floor'):
+ raise FilterArgumentError('method must be common, ceil or floor')
+ if method == 'common':
+ return round(value, precision)
+ func = getattr(math, method)
+ return func(value * (10 ** precision)) / (10 ** precision)
+
+
+# Use a regular tuple repr here. This is what we did in the past and we
+# really want to hide this custom type as much as possible. In particular
+# we do not want to accidentally expose an auto generated repr in case
+# people start to print this out in comments or something similar for
+# debugging.
+_GroupTuple = namedtuple('_GroupTuple', ['grouper', 'list'])
+_GroupTuple.__repr__ = tuple.__repr__
+_GroupTuple.__str__ = tuple.__str__
+
+@environmentfilter
+def do_groupby(environment, value, attribute):
+ """Group a sequence of objects by a common attribute.
+
+ If you for example have a list of dicts or objects that represent persons
+ with `gender`, `first_name` and `last_name` attributes and you want to
+ group all users by genders you can do something like the following
+ snippet:
+
+ .. sourcecode:: html+jinja
+
+ <ul>
+ {% for group in persons|groupby('gender') %}
+ <li>{{ group.grouper }}<ul>
+ {% for person in group.list %}
+ <li>{{ person.first_name }} {{ person.last_name }}</li>
+ {% endfor %}</ul></li>
+ {% endfor %}
+ </ul>
+
+ Additionally it's possible to use tuple unpacking for the grouper and
+ list:
+
+ .. sourcecode:: html+jinja
+
+ <ul>
+ {% for grouper, list in persons|groupby('gender') %}
+ ...
+ {% endfor %}
+ </ul>
+
+ As you can see the item we're grouping by is stored in the `grouper`
+ attribute and the `list` contains all the objects that have this grouper
+ in common.
+
+ .. versionchanged:: 2.6
+ It's now possible to use dotted notation to group by the child
+ attribute of another attribute.
+ """
+ expr = make_attrgetter(environment, attribute)
+ return [_GroupTuple(key, list(values)) for key, values
+ in groupby(sorted(value, key=expr), expr)]
+
+
+@environmentfilter
+def do_sum(environment, iterable, attribute=None, start=0):
+ """Returns the sum of a sequence of numbers plus the value of parameter
+ 'start' (which defaults to 0). When the sequence is empty it returns
+ start.
+
+ It is also possible to sum up only certain attributes:
+
+ .. sourcecode:: jinja
+
+ Total: {{ items|sum(attribute='price') }}
+
+ .. versionchanged:: 2.6
+ The `attribute` parameter was added to allow suming up over
+ attributes. Also the `start` parameter was moved on to the right.
+ """
+ if attribute is not None:
+ iterable = imap(make_attrgetter(environment, attribute), iterable)
+ return sum(iterable, start)
+
+
+def do_list(value):
+ """Convert the value into a list. If it was a string the returned list
+ will be a list of characters.
+ """
+ return list(value)
+
+
+def do_mark_safe(value):
+ """Mark the value as safe which means that in an environment with automatic
+ escaping enabled this variable will not be escaped.
+ """
+ return Markup(value)
+
+
+def do_mark_unsafe(value):
+ """Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
+ return text_type(value)
+
+
+def do_reverse(value):
+ """Reverse the object or return an iterator that iterates over it the other
+ way round.
+ """
+ if isinstance(value, string_types):
+ return value[::-1]
+ try:
+ return reversed(value)
+ except TypeError:
+ try:
+ rv = list(value)
+ rv.reverse()
+ return rv
+ except TypeError:
+ raise FilterArgumentError('argument must be iterable')
+
+
+@environmentfilter
+def do_attr(environment, obj, name):
+ """Get an attribute of an object. ``foo|attr("bar")`` works like
+ ``foo.bar`` just that always an attribute is returned and items are not
+ looked up.
+
+ See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
+ """
+ try:
+ name = str(name)
+ except UnicodeError:
+ pass
+ else:
+ try:
+ value = getattr(obj, name)
+ except AttributeError:
+ pass
+ else:
+ if environment.sandboxed and not \
+ environment.is_safe_attribute(obj, name, value):
+ return environment.unsafe_undefined(obj, name)
+ return value
+ return environment.undefined(obj=obj, name=name)
+
+
+@contextfilter
+def do_map(*args, **kwargs):
+ """Applies a filter on a sequence of objects or looks up an attribute.
+ This is useful when dealing with lists of objects but you are really
+ only interested in a certain value of it.
+
+ The basic usage is mapping on an attribute. Imagine you have a list
+ of users but you are only interested in a list of usernames:
+
+ .. sourcecode:: jinja
+
+ Users on this page: {{ users|map(attribute='username')|join(', ') }}
+
+ Alternatively you can let it invoke a filter by passing the name of the
+ filter and the arguments afterwards. A good example would be applying a
+ text conversion filter on a sequence:
+
+ .. sourcecode:: jinja
+
+ Users on this page: {{ titles|map('lower')|join(', ') }}
+
+ .. versionadded:: 2.7
+ """
+ seq, func = prepare_map(args, kwargs)
+ if seq:
+ for item in seq:
+ yield func(item)
+
+
+@contextfilter
+def do_select(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to each object,
+ and only selecting the objects with the test succeeding.
+
+ If no test is specified, each object will be evaluated as a boolean.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ numbers|select("odd") }}
+ {{ numbers|select("odd") }}
+ {{ numbers|select("divisibleby", 3) }}
+ {{ numbers|select("lessthan", 42) }}
+ {{ strings|select("equalto", "mystring") }}
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(args, kwargs, lambda x: x, False)
+
+
+@contextfilter
+def do_reject(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to each object,
+ and rejecting the objects with the test succeeding.
+
+ If no test is specified, each object will be evaluated as a boolean.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ numbers|reject("odd") }}
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(args, kwargs, lambda x: not x, False)
+
+
+@contextfilter
+def do_selectattr(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to the specified
+ attribute of each object, and only selecting the objects with the
+ test succeeding.
+
+ If no test is specified, the attribute's value will be evaluated as
+ a boolean.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ users|selectattr("is_active") }}
+ {{ users|selectattr("email", "none") }}
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(args, kwargs, lambda x: x, True)
+
+
+@contextfilter
+def do_rejectattr(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to the specified
+ attribute of each object, and rejecting the objects with the test
+ succeeding.
+
+ If no test is specified, the attribute's value will be evaluated as
+ a boolean.
+
+ .. sourcecode:: jinja
+
+ {{ users|rejectattr("is_active") }}
+ {{ users|rejectattr("email", "none") }}
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(args, kwargs, lambda x: not x, True)
+
+
+@evalcontextfilter
+def do_tojson(eval_ctx, value, indent=None):
+ """Dumps a structure to JSON so that it's safe to use in ``<script>``
+ tags. It accepts the same arguments and returns a JSON string. Note that
+ this is available in templates through the ``|tojson`` filter which will
+ also mark the result as safe. Due to how this function escapes certain
+ characters this is safe even if used outside of ``<script>`` tags.
+
+ The following characters are escaped in strings:
+
+ - ``<``
+ - ``>``
+ - ``&``
+ - ``'``
+
+ This makes it safe to embed such strings in any place in HTML with the
+ notable exception of double quoted attributes. In that case single
+ quote your attributes or HTML escape it in addition.
+
+ The indent parameter can be used to enable pretty printing. Set it to
+ the number of spaces that the structures should be indented with.
+
+ Note that this filter is for use in HTML contexts only.
+
+ .. versionadded:: 2.9
+ """
+ policies = eval_ctx.environment.policies
+ dumper = policies['json.dumps_function']
+ options = policies['json.dumps_kwargs']
+ if indent is not None:
+ options = dict(options)
+ options['indent'] = indent
+ return htmlsafe_json_dumps(value, dumper=dumper, **options)
+
+
+def prepare_map(args, kwargs):
+ context = args[0]
+ seq = args[1]
+
+ if len(args) == 2 and 'attribute' in kwargs:
+ attribute = kwargs.pop('attribute')
+ if kwargs:
+ raise FilterArgumentError('Unexpected keyword argument %r' %
+ next(iter(kwargs)))
+ func = make_attrgetter(context.environment, attribute)
+ else:
+ try:
+ name = args[2]
+ args = args[3:]
+ except LookupError:
+ raise FilterArgumentError('map requires a filter argument')
+ func = lambda item: context.environment.call_filter(
+ name, item, args, kwargs, context=context)
+
+ return seq, func
+
+
+def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr):
+ context = args[0]
+ seq = args[1]
+ if lookup_attr:
+ try:
+ attr = args[2]
+ except LookupError:
+ raise FilterArgumentError('Missing parameter for attribute name')
+ transfunc = make_attrgetter(context.environment, attr)
+ off = 1
+ else:
+ off = 0
+ transfunc = lambda x: x
+
+ try:
+ name = args[2 + off]
+ args = args[3 + off:]
+ func = lambda item: context.environment.call_test(
+ name, item, args, kwargs)
+ except LookupError:
+ func = bool
+
+ return seq, lambda item: modfunc(func(transfunc(item)))
+
+
+def select_or_reject(args, kwargs, modfunc, lookup_attr):
+ seq, func = prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
+ if seq:
+ for item in seq:
+ if func(item):
+ yield item
+
+
+FILTERS = {
+ 'abs': abs,
+ 'attr': do_attr,
+ 'batch': do_batch,
+ 'capitalize': do_capitalize,
+ 'center': do_center,
+ 'count': len,
+ 'd': do_default,
+ 'default': do_default,
+ 'dictsort': do_dictsort,
+ 'e': escape,
+ 'escape': escape,
+ 'filesizeformat': do_filesizeformat,
+ 'first': do_first,
+ 'float': do_float,
+ 'forceescape': do_forceescape,
+ 'format': do_format,
+ 'groupby': do_groupby,
+ 'indent': do_indent,
+ 'int': do_int,
+ 'join': do_join,
+ 'last': do_last,
+ 'length': len,
+ 'list': do_list,
+ 'lower': do_lower,
+ 'map': do_map,
+ 'min': do_min,
+ 'max': do_max,
+ 'pprint': do_pprint,
+ 'random': do_random,
+ 'reject': do_reject,
+ 'rejectattr': do_rejectattr,
+ 'replace': do_replace,
+ 'reverse': do_reverse,
+ 'round': do_round,
+ 'safe': do_mark_safe,
+ 'select': do_select,
+ 'selectattr': do_selectattr,
+ 'slice': do_slice,
+ 'sort': do_sort,
+ 'string': soft_unicode,
+ 'striptags': do_striptags,
+ 'sum': do_sum,
+ 'title': do_title,
+ 'trim': do_trim,
+ 'truncate': do_truncate,
+ 'unique': do_unique,
+ 'upper': do_upper,
+ 'urlencode': do_urlencode,
+ 'urlize': do_urlize,
+ 'wordcount': do_wordcount,
+ 'wordwrap': do_wordwrap,
+ 'xmlattr': do_xmlattr,
+ 'tojson': do_tojson,
+}
diff --git a/python/jinja2/idtracking.py b/python/jinja2/idtracking.py
new file mode 100644
index 0000000..491bfe0
--- /dev/null
+++ b/python/jinja2/idtracking.py
@@ -0,0 +1,286 @@
+from jinja2.visitor import NodeVisitor
+from jinja2._compat import iteritems
+
+
+VAR_LOAD_PARAMETER = 'param'
+VAR_LOAD_RESOLVE = 'resolve'
+VAR_LOAD_ALIAS = 'alias'
+VAR_LOAD_UNDEFINED = 'undefined'
+
+
+def find_symbols(nodes, parent_symbols=None):
+ sym = Symbols(parent=parent_symbols)
+ visitor = FrameSymbolVisitor(sym)
+ for node in nodes:
+ visitor.visit(node)
+ return sym
+
+
+def symbols_for_node(node, parent_symbols=None):
+ sym = Symbols(parent=parent_symbols)
+ sym.analyze_node(node)
+ return sym
+
+
+class Symbols(object):
+
+ def __init__(self, parent=None, level=None):
+ if level is None:
+ if parent is None:
+ level = 0
+ else:
+ level = parent.level + 1
+ self.level = level
+ self.parent = parent
+ self.refs = {}
+ self.loads = {}
+ self.stores = set()
+
+ def analyze_node(self, node, **kwargs):
+ visitor = RootVisitor(self)
+ visitor.visit(node, **kwargs)
+
+ def _define_ref(self, name, load=None):
+ ident = 'l_%d_%s' % (self.level, name)
+ self.refs[name] = ident
+ if load is not None:
+ self.loads[ident] = load
+ return ident
+
+ def find_load(self, target):
+ if target in self.loads:
+ return self.loads[target]
+ if self.parent is not None:
+ return self.parent.find_load(target)
+
+ def find_ref(self, name):
+ if name in self.refs:
+ return self.refs[name]
+ if self.parent is not None:
+ return self.parent.find_ref(name)
+
+ def ref(self, name):
+ rv = self.find_ref(name)
+ if rv is None:
+ raise AssertionError('Tried to resolve a name to a reference that '
+ 'was unknown to the frame (%r)' % name)
+ return rv
+
+ def copy(self):
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.refs = self.refs.copy()
+ rv.loads = self.loads.copy()
+ rv.stores = self.stores.copy()
+ return rv
+
+ def store(self, name):
+ self.stores.add(name)
+
+ # If we have not see the name referenced yet, we need to figure
+ # out what to set it to.
+ if name not in self.refs:
+ # If there is a parent scope we check if the name has a
+ # reference there. If it does it means we might have to alias
+ # to a variable there.
+ if self.parent is not None:
+ outer_ref = self.parent.find_ref(name)
+ if outer_ref is not None:
+ self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
+ return
+
+ # Otherwise we can just set it to undefined.
+ self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
+
+ def declare_parameter(self, name):
+ self.stores.add(name)
+ return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
+
+ def load(self, name):
+ target = self.find_ref(name)
+ if target is None:
+ self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
+
+ def branch_update(self, branch_symbols):
+ stores = {}
+ for branch in branch_symbols:
+ for target in branch.stores:
+ if target in self.stores:
+ continue
+ stores[target] = stores.get(target, 0) + 1
+
+ for sym in branch_symbols:
+ self.refs.update(sym.refs)
+ self.loads.update(sym.loads)
+ self.stores.update(sym.stores)
+
+ for name, branch_count in iteritems(stores):
+ if branch_count == len(branch_symbols):
+ continue
+ target = self.find_ref(name)
+ assert target is not None, 'should not happen'
+
+ if self.parent is not None:
+ outer_target = self.parent.find_ref(name)
+ if outer_target is not None:
+ self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
+ continue
+ self.loads[target] = (VAR_LOAD_RESOLVE, name)
+
+ def dump_stores(self):
+ rv = {}
+ node = self
+ while node is not None:
+ for name in node.stores:
+ if name not in rv:
+ rv[name] = self.find_ref(name)
+ node = node.parent
+ return rv
+
+ def dump_param_targets(self):
+ rv = set()
+ node = self
+ while node is not None:
+ for target, (instr, _) in iteritems(self.loads):
+ if instr == VAR_LOAD_PARAMETER:
+ rv.add(target)
+ node = node.parent
+ return rv
+
+
+class RootVisitor(NodeVisitor):
+
+ def __init__(self, symbols):
+ self.sym_visitor = FrameSymbolVisitor(symbols)
+
+ def _simple_visit(self, node, **kwargs):
+ for child in node.iter_child_nodes():
+ self.sym_visitor.visit(child)
+
+ visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \
+ visit_Scope = visit_If = visit_ScopedEvalContextModifier = \
+ _simple_visit
+
+ def visit_AssignBlock(self, node, **kwargs):
+ for child in node.body:
+ self.sym_visitor.visit(child)
+
+ def visit_CallBlock(self, node, **kwargs):
+ for child in node.iter_child_nodes(exclude=('call',)):
+ self.sym_visitor.visit(child)
+
+ def visit_OverlayScope(self, node, **kwargs):
+ for child in node.body:
+ self.sym_visitor.visit(child)
+
+ def visit_For(self, node, for_branch='body', **kwargs):
+ if for_branch == 'body':
+ self.sym_visitor.visit(node.target, store_as_param=True)
+ branch = node.body
+ elif for_branch == 'else':
+ branch = node.else_
+ elif for_branch == 'test':
+ self.sym_visitor.visit(node.target, store_as_param=True)
+ if node.test is not None:
+ self.sym_visitor.visit(node.test)
+ return
+ else:
+ raise RuntimeError('Unknown for branch')
+ for item in branch or ():
+ self.sym_visitor.visit(item)
+
+ def visit_With(self, node, **kwargs):
+ for target in node.targets:
+ self.sym_visitor.visit(target)
+ for child in node.body:
+ self.sym_visitor.visit(child)
+
+ def generic_visit(self, node, *args, **kwargs):
+ raise NotImplementedError('Cannot find symbols for %r' %
+ node.__class__.__name__)
+
+
+class FrameSymbolVisitor(NodeVisitor):
+ """A visitor for `Frame.inspect`."""
+
+ def __init__(self, symbols):
+ self.symbols = symbols
+
+ def visit_Name(self, node, store_as_param=False, **kwargs):
+ """All assignments to names go through this function."""
+ if store_as_param or node.ctx == 'param':
+ self.symbols.declare_parameter(node.name)
+ elif node.ctx == 'store':
+ self.symbols.store(node.name)
+ elif node.ctx == 'load':
+ self.symbols.load(node.name)
+
+ def visit_NSRef(self, node, **kwargs):
+ self.symbols.load(node.name)
+
+ def visit_If(self, node, **kwargs):
+ self.visit(node.test, **kwargs)
+
+ original_symbols = self.symbols
+
+ def inner_visit(nodes):
+ self.symbols = rv = original_symbols.copy()
+ for subnode in nodes:
+ self.visit(subnode, **kwargs)
+ self.symbols = original_symbols
+ return rv
+
+ body_symbols = inner_visit(node.body)
+ elif_symbols = inner_visit(node.elif_)
+ else_symbols = inner_visit(node.else_ or ())
+
+ self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
+
+ def visit_Macro(self, node, **kwargs):
+ self.symbols.store(node.name)
+
+ def visit_Import(self, node, **kwargs):
+ self.generic_visit(node, **kwargs)
+ self.symbols.store(node.target)
+
+ def visit_FromImport(self, node, **kwargs):
+ self.generic_visit(node, **kwargs)
+ for name in node.names:
+ if isinstance(name, tuple):
+ self.symbols.store(name[1])
+ else:
+ self.symbols.store(name)
+
+ def visit_Assign(self, node, **kwargs):
+ """Visit assignments in the correct order."""
+ self.visit(node.node, **kwargs)
+ self.visit(node.target, **kwargs)
+
+ def visit_For(self, node, **kwargs):
+ """Visiting stops at for blocks. However the block sequence
+ is visited as part of the outer scope.
+ """
+ self.visit(node.iter, **kwargs)
+
+ def visit_CallBlock(self, node, **kwargs):
+ self.visit(node.call, **kwargs)
+
+ def visit_FilterBlock(self, node, **kwargs):
+ self.visit(node.filter, **kwargs)
+
+ def visit_With(self, node, **kwargs):
+ for target in node.values:
+ self.visit(target)
+
+ def visit_AssignBlock(self, node, **kwargs):
+ """Stop visiting at block assigns."""
+ self.visit(node.target, **kwargs)
+
+ def visit_Scope(self, node, **kwargs):
+ """Stop visiting at scopes."""
+
+ def visit_Block(self, node, **kwargs):
+ """Stop visiting at blocks."""
+
+ def visit_OverlayScope(self, node, **kwargs):
+ """Do not visit into overlay scopes."""
diff --git a/python/jinja2/lexer.py b/python/jinja2/lexer.py
new file mode 100644
index 0000000..6fd135d
--- /dev/null
+++ b/python/jinja2/lexer.py
@@ -0,0 +1,739 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.lexer
+ ~~~~~~~~~~~~
+
+ This module implements a Jinja / Python combination lexer. The
+ `Lexer` class provided by this module is used to do some preprocessing
+ for Jinja.
+
+ On the one hand it filters out invalid operators like the bitshift
+ operators we don't allow in templates. On the other hand it separates
+ template code and python code in expressions.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import re
+from collections import deque
+from operator import itemgetter
+
+from jinja2._compat import implements_iterator, intern, iteritems, text_type
+from jinja2.exceptions import TemplateSyntaxError
+from jinja2.utils import LRUCache
+
+# cache for the lexers. Exists in order to be able to have multiple
+# environments with the same lexer
+_lexer_cache = LRUCache(50)
+
+# static regular expressions
+whitespace_re = re.compile(r'\s+', re.U)
+string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
+ r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
+integer_re = re.compile(r'\d+')
+
+try:
+ # check if this Python supports Unicode identifiers
+ compile('föö', '<unknown>', 'eval')
+except SyntaxError:
+ # no Unicode support, use ASCII identifiers
+ name_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
+ check_ident = False
+else:
+ # Unicode support, build a pattern to match valid characters, and set flag
+ # to use str.isidentifier to validate during lexing
+ from jinja2 import _identifier
+ name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern))
+ check_ident = True
+ # remove the pattern from memory after building the regex
+ import sys
+ del sys.modules['jinja2._identifier']
+ import jinja2
+ del jinja2._identifier
+ del _identifier
+
+float_re = re.compile(r'(?<!\.)\d+\.\d+')
+newline_re = re.compile(r'(\r\n|\r|\n)')
+
+# internal the tokens and keep references to them
+TOKEN_ADD = intern('add')
+TOKEN_ASSIGN = intern('assign')
+TOKEN_COLON = intern('colon')
+TOKEN_COMMA = intern('comma')
+TOKEN_DIV = intern('div')
+TOKEN_DOT = intern('dot')
+TOKEN_EQ = intern('eq')
+TOKEN_FLOORDIV = intern('floordiv')
+TOKEN_GT = intern('gt')
+TOKEN_GTEQ = intern('gteq')
+TOKEN_LBRACE = intern('lbrace')
+TOKEN_LBRACKET = intern('lbracket')
+TOKEN_LPAREN = intern('lparen')
+TOKEN_LT = intern('lt')
+TOKEN_LTEQ = intern('lteq')
+TOKEN_MOD = intern('mod')
+TOKEN_MUL = intern('mul')
+TOKEN_NE = intern('ne')
+TOKEN_PIPE = intern('pipe')
+TOKEN_POW = intern('pow')
+TOKEN_RBRACE = intern('rbrace')
+TOKEN_RBRACKET = intern('rbracket')
+TOKEN_RPAREN = intern('rparen')
+TOKEN_SEMICOLON = intern('semicolon')
+TOKEN_SUB = intern('sub')
+TOKEN_TILDE = intern('tilde')
+TOKEN_WHITESPACE = intern('whitespace')
+TOKEN_FLOAT = intern('float')
+TOKEN_INTEGER = intern('integer')
+TOKEN_NAME = intern('name')
+TOKEN_STRING = intern('string')
+TOKEN_OPERATOR = intern('operator')
+TOKEN_BLOCK_BEGIN = intern('block_begin')
+TOKEN_BLOCK_END = intern('block_end')
+TOKEN_VARIABLE_BEGIN = intern('variable_begin')
+TOKEN_VARIABLE_END = intern('variable_end')
+TOKEN_RAW_BEGIN = intern('raw_begin')
+TOKEN_RAW_END = intern('raw_end')
+TOKEN_COMMENT_BEGIN = intern('comment_begin')
+TOKEN_COMMENT_END = intern('comment_end')
+TOKEN_COMMENT = intern('comment')
+TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
+TOKEN_LINESTATEMENT_END = intern('linestatement_end')
+TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
+TOKEN_LINECOMMENT_END = intern('linecomment_end')
+TOKEN_LINECOMMENT = intern('linecomment')
+TOKEN_DATA = intern('data')
+TOKEN_INITIAL = intern('initial')
+TOKEN_EOF = intern('eof')
+
+# bind operators to token types
+operators = {
+ '+': TOKEN_ADD,
+ '-': TOKEN_SUB,
+ '/': TOKEN_DIV,
+ '//': TOKEN_FLOORDIV,
+ '*': TOKEN_MUL,
+ '%': TOKEN_MOD,
+ '**': TOKEN_POW,
+ '~': TOKEN_TILDE,
+ '[': TOKEN_LBRACKET,
+ ']': TOKEN_RBRACKET,
+ '(': TOKEN_LPAREN,
+ ')': TOKEN_RPAREN,
+ '{': TOKEN_LBRACE,
+ '}': TOKEN_RBRACE,
+ '==': TOKEN_EQ,
+ '!=': TOKEN_NE,
+ '>': TOKEN_GT,
+ '>=': TOKEN_GTEQ,
+ '<': TOKEN_LT,
+ '<=': TOKEN_LTEQ,
+ '=': TOKEN_ASSIGN,
+ '.': TOKEN_DOT,
+ ':': TOKEN_COLON,
+ '|': TOKEN_PIPE,
+ ',': TOKEN_COMMA,
+ ';': TOKEN_SEMICOLON
+}
+
+reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
+assert len(operators) == len(reverse_operators), 'operators dropped'
+operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
+ sorted(operators, key=lambda x: -len(x))))
+
+ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
+ TOKEN_COMMENT_END, TOKEN_WHITESPACE,
+ TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
+ TOKEN_LINECOMMENT])
+ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
+ TOKEN_COMMENT, TOKEN_LINECOMMENT])
+
+
+def _describe_token_type(token_type):
+ if token_type in reverse_operators:
+ return reverse_operators[token_type]
+ return {
+ TOKEN_COMMENT_BEGIN: 'begin of comment',
+ TOKEN_COMMENT_END: 'end of comment',
+ TOKEN_COMMENT: 'comment',
+ TOKEN_LINECOMMENT: 'comment',
+ TOKEN_BLOCK_BEGIN: 'begin of statement block',
+ TOKEN_BLOCK_END: 'end of statement block',
+ TOKEN_VARIABLE_BEGIN: 'begin of print statement',
+ TOKEN_VARIABLE_END: 'end of print statement',
+ TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
+ TOKEN_LINESTATEMENT_END: 'end of line statement',
+ TOKEN_DATA: 'template data / text',
+ TOKEN_EOF: 'end of template'
+ }.get(token_type, token_type)
+
+
+def describe_token(token):
+ """Returns a description of the token."""
+ if token.type == 'name':
+ return token.value
+ return _describe_token_type(token.type)
+
+
+def describe_token_expr(expr):
+ """Like `describe_token` but for token expressions."""
+ if ':' in expr:
+ type, value = expr.split(':', 1)
+ if type == 'name':
+ return value
+ else:
+ type = expr
+ return _describe_token_type(type)
+
+
+def count_newlines(value):
+ """Count the number of newline characters in the string. This is
+ useful for extensions that filter a stream.
+ """
+ return len(newline_re.findall(value))
+
+
+def compile_rules(environment):
+ """Compiles all the rules from the environment into a list of rules."""
+ e = re.escape
+ rules = [
+ (len(environment.comment_start_string), 'comment',
+ e(environment.comment_start_string)),
+ (len(environment.block_start_string), 'block',
+ e(environment.block_start_string)),
+ (len(environment.variable_start_string), 'variable',
+ e(environment.variable_start_string))
+ ]
+
+ if environment.line_statement_prefix is not None:
+ rules.append((len(environment.line_statement_prefix), 'linestatement',
+ r'^[ \t\v]*' + e(environment.line_statement_prefix)))
+ if environment.line_comment_prefix is not None:
+ rules.append((len(environment.line_comment_prefix), 'linecomment',
+ r'(?:^|(?<=\S))[^\S\r\n]*' +
+ e(environment.line_comment_prefix)))
+
+ return [x[1:] for x in sorted(rules, reverse=True)]
+
+
+class Failure(object):
+ """Class that raises a `TemplateSyntaxError` if called.
+ Used by the `Lexer` to specify known errors.
+ """
+
+ def __init__(self, message, cls=TemplateSyntaxError):
+ self.message = message
+ self.error_class = cls
+
+ def __call__(self, lineno, filename):
+ raise self.error_class(self.message, lineno, filename)
+
+
+class Token(tuple):
+ """Token class."""
+ __slots__ = ()
+ lineno, type, value = (property(itemgetter(x)) for x in range(3))
+
+ def __new__(cls, lineno, type, value):
+ return tuple.__new__(cls, (lineno, intern(str(type)), value))
+
+ def __str__(self):
+ if self.type in reverse_operators:
+ return reverse_operators[self.type]
+ elif self.type == 'name':
+ return self.value
+ return self.type
+
+ def test(self, expr):
+ """Test a token against a token expression. This can either be a
+ token type or ``'token_type:token_value'``. This can only test
+ against string values and types.
+ """
+ # here we do a regular string equality check as test_any is usually
+ # passed an iterable of not interned strings.
+ if self.type == expr:
+ return True
+ elif ':' in expr:
+ return expr.split(':', 1) == [self.type, self.value]
+ return False
+
+ def test_any(self, *iterable):
+ """Test against multiple token expressions."""
+ for expr in iterable:
+ if self.test(expr):
+ return True
+ return False
+
+ def __repr__(self):
+ return 'Token(%r, %r, %r)' % (
+ self.lineno,
+ self.type,
+ self.value
+ )
+
+
+@implements_iterator
+class TokenStreamIterator(object):
+ """The iterator for tokenstreams. Iterate over the stream
+ until the eof token is reached.
+ """
+
+ def __init__(self, stream):
+ self.stream = stream
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ token = self.stream.current
+ if token.type is TOKEN_EOF:
+ self.stream.close()
+ raise StopIteration()
+ next(self.stream)
+ return token
+
+
+@implements_iterator
+class TokenStream(object):
+ """A token stream is an iterable that yields :class:`Token`\\s. The
+ parser however does not iterate over it but calls :meth:`next` to go
+ one token ahead. The current active token is stored as :attr:`current`.
+ """
+
+ def __init__(self, generator, name, filename):
+ self._iter = iter(generator)
+ self._pushed = deque()
+ self.name = name
+ self.filename = filename
+ self.closed = False
+ self.current = Token(1, TOKEN_INITIAL, '')
+ next(self)
+
+ def __iter__(self):
+ return TokenStreamIterator(self)
+
+ def __bool__(self):
+ return bool(self._pushed) or self.current.type is not TOKEN_EOF
+ __nonzero__ = __bool__ # py2
+
+ eos = property(lambda x: not x, doc="Are we at the end of the stream?")
+
+ def push(self, token):
+ """Push a token back to the stream."""
+ self._pushed.append(token)
+
+ def look(self):
+ """Look at the next token."""
+ old_token = next(self)
+ result = self.current
+ self.push(result)
+ self.current = old_token
+ return result
+
+ def skip(self, n=1):
+ """Got n tokens ahead."""
+ for x in range(n):
+ next(self)
+
+ def next_if(self, expr):
+ """Perform the token test and return the token if it matched.
+ Otherwise the return value is `None`.
+ """
+ if self.current.test(expr):
+ return next(self)
+
+ def skip_if(self, expr):
+ """Like :meth:`next_if` but only returns `True` or `False`."""
+ return self.next_if(expr) is not None
+
+ def __next__(self):
+ """Go one token ahead and return the old one.
+
+ Use the built-in :func:`next` instead of calling this directly.
+ """
+ rv = self.current
+ if self._pushed:
+ self.current = self._pushed.popleft()
+ elif self.current.type is not TOKEN_EOF:
+ try:
+ self.current = next(self._iter)
+ except StopIteration:
+ self.close()
+ return rv
+
+ def close(self):
+ """Close the stream."""
+ self.current = Token(self.current.lineno, TOKEN_EOF, '')
+ self._iter = None
+ self.closed = True
+
+ def expect(self, expr):
+ """Expect a given token type and return it. This accepts the same
+ argument as :meth:`jinja2.lexer.Token.test`.
+ """
+ if not self.current.test(expr):
+ expr = describe_token_expr(expr)
+ if self.current.type is TOKEN_EOF:
+ raise TemplateSyntaxError('unexpected end of template, '
+ 'expected %r.' % expr,
+ self.current.lineno,
+ self.name, self.filename)
+ raise TemplateSyntaxError("expected token %r, got %r" %
+ (expr, describe_token(self.current)),
+ self.current.lineno,
+ self.name, self.filename)
+ try:
+ return self.current
+ finally:
+ next(self)
+
+
+def get_lexer(environment):
+ """Return a lexer which is probably cached."""
+ key = (environment.block_start_string,
+ environment.block_end_string,
+ environment.variable_start_string,
+ environment.variable_end_string,
+ environment.comment_start_string,
+ environment.comment_end_string,
+ environment.line_statement_prefix,
+ environment.line_comment_prefix,
+ environment.trim_blocks,
+ environment.lstrip_blocks,
+ environment.newline_sequence,
+ environment.keep_trailing_newline)
+ lexer = _lexer_cache.get(key)
+ if lexer is None:
+ lexer = Lexer(environment)
+ _lexer_cache[key] = lexer
+ return lexer
+
+
+class Lexer(object):
+ """Class that implements a lexer for a given environment. Automatically
+ created by the environment class, usually you don't have to do that.
+
+ Note that the lexer is not automatically bound to an environment.
+ Multiple environments can share the same lexer.
+ """
+
+ def __init__(self, environment):
+ # shortcuts
+ c = lambda x: re.compile(x, re.M | re.S)
+ e = re.escape
+
+ # lexing rules for tags
+ tag_rules = [
+ (whitespace_re, TOKEN_WHITESPACE, None),
+ (float_re, TOKEN_FLOAT, None),
+ (integer_re, TOKEN_INTEGER, None),
+ (name_re, TOKEN_NAME, None),
+ (string_re, TOKEN_STRING, None),
+ (operator_re, TOKEN_OPERATOR, None)
+ ]
+
+ # assemble the root lexing rule. because "|" is ungreedy
+ # we have to sort by length so that the lexer continues working
+ # as expected when we have parsing rules like <% for block and
+ # <%= for variables. (if someone wants asp like syntax)
+ # variables are just part of the rules if variable processing
+ # is required.
+ root_tag_rules = compile_rules(environment)
+
+ # block suffix if trimming is enabled
+ block_suffix_re = environment.trim_blocks and '\\n?' or ''
+
+ # strip leading spaces if lstrip_blocks is enabled
+ prefix_re = {}
+ if environment.lstrip_blocks:
+ # use '{%+' to manually disable lstrip_blocks behavior
+ no_lstrip_re = e('+')
+ # detect overlap between block and variable or comment strings
+ block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
+ # make sure we don't mistake a block for a variable or a comment
+ m = block_diff.match(environment.comment_start_string)
+ no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
+ m = block_diff.match(environment.variable_start_string)
+ no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
+
+ # detect overlap between comment and variable strings
+ comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
+ m = comment_diff.match(environment.variable_start_string)
+ no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
+
+ lstrip_re = r'^[ \t]*'
+ block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
+ lstrip_re,
+ e(environment.block_start_string),
+ no_lstrip_re,
+ e(environment.block_start_string),
+ )
+ comment_prefix_re = r'%s%s%s|%s\+?' % (
+ lstrip_re,
+ e(environment.comment_start_string),
+ no_variable_re,
+ e(environment.comment_start_string),
+ )
+ prefix_re['block'] = block_prefix_re
+ prefix_re['comment'] = comment_prefix_re
+ else:
+ block_prefix_re = '%s' % e(environment.block_start_string)
+
+ self.newline_sequence = environment.newline_sequence
+ self.keep_trailing_newline = environment.keep_trailing_newline
+
+ # global lexing rules
+ self.rules = {
+ 'root': [
+ # directives
+ (c('(.*?)(?:%s)' % '|'.join(
+ [r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
+ e(environment.block_start_string),
+ block_prefix_re,
+ e(environment.block_end_string),
+ e(environment.block_end_string)
+ )] + [
+ r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
+ for n, r in root_tag_rules
+ ])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
+ # data
+ (c('.+'), TOKEN_DATA, None)
+ ],
+ # comments
+ TOKEN_COMMENT_BEGIN: [
+ (c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
+ e(environment.comment_end_string),
+ e(environment.comment_end_string),
+ block_suffix_re
+ )), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
+ (c('(.)'), (Failure('Missing end of comment tag'),), None)
+ ],
+ # blocks
+ TOKEN_BLOCK_BEGIN: [
+ (c(r'(?:\-%s\s*|%s)%s' % (
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re
+ )), TOKEN_BLOCK_END, '#pop'),
+ ] + tag_rules,
+ # variables
+ TOKEN_VARIABLE_BEGIN: [
+ (c(r'\-%s\s*|%s' % (
+ e(environment.variable_end_string),
+ e(environment.variable_end_string)
+ )), TOKEN_VARIABLE_END, '#pop')
+ ] + tag_rules,
+ # raw block
+ TOKEN_RAW_BEGIN: [
+ (c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
+ e(environment.block_start_string),
+ block_prefix_re,
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re
+ )), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
+ (c('(.)'), (Failure('Missing end of raw directive'),), None)
+ ],
+ # line statements
+ TOKEN_LINESTATEMENT_BEGIN: [
+ (c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
+ ] + tag_rules,
+ # line comments
+ TOKEN_LINECOMMENT_BEGIN: [
+ (c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
+ TOKEN_LINECOMMENT_END), '#pop')
+ ]
+ }
+
+ def _normalize_newlines(self, value):
+ """Called for strings and template data to normalize it to unicode."""
+ return newline_re.sub(self.newline_sequence, value)
+
+ def tokenize(self, source, name=None, filename=None, state=None):
+ """Calls tokeniter + tokenize and wraps it in a token stream.
+ """
+ stream = self.tokeniter(source, name, filename, state)
+ return TokenStream(self.wrap(stream, name, filename), name, filename)
+
+ def wrap(self, stream, name=None, filename=None):
+ """This is called with the stream as returned by `tokenize` and wraps
+ every token in a :class:`Token` and converts the value.
+ """
+ for lineno, token, value in stream:
+ if token in ignored_tokens:
+ continue
+ elif token == 'linestatement_begin':
+ token = 'block_begin'
+ elif token == 'linestatement_end':
+ token = 'block_end'
+ # we are not interested in those tokens in the parser
+ elif token in ('raw_begin', 'raw_end'):
+ continue
+ elif token == 'data':
+ value = self._normalize_newlines(value)
+ elif token == 'keyword':
+ token = value
+ elif token == 'name':
+ value = str(value)
+ if check_ident and not value.isidentifier():
+ raise TemplateSyntaxError(
+ 'Invalid character in identifier',
+ lineno, name, filename)
+ elif token == 'string':
+ # try to unescape string
+ try:
+ value = self._normalize_newlines(value[1:-1]) \
+ .encode('ascii', 'backslashreplace') \
+ .decode('unicode-escape')
+ except Exception as e:
+ msg = str(e).split(':')[-1].strip()
+ raise TemplateSyntaxError(msg, lineno, name, filename)
+ elif token == 'integer':
+ value = int(value)
+ elif token == 'float':
+ value = float(value)
+ elif token == 'operator':
+ token = operators[value]
+ yield Token(lineno, token, value)
+
+ def tokeniter(self, source, name, filename=None, state=None):
+ """This method tokenizes the text and returns the tokens in a
+ generator. Use this method if you just want to tokenize a template.
+ """
+ source = text_type(source)
+ lines = source.splitlines()
+ if self.keep_trailing_newline and source:
+ for newline in ('\r\n', '\r', '\n'):
+ if source.endswith(newline):
+ lines.append('')
+ break
+ source = '\n'.join(lines)
+ pos = 0
+ lineno = 1
+ stack = ['root']
+ if state is not None and state != 'root':
+ assert state in ('variable', 'block'), 'invalid state'
+ stack.append(state + '_begin')
+ else:
+ state = 'root'
+ statetokens = self.rules[stack[-1]]
+ source_length = len(source)
+
+ balancing_stack = []
+
+ while 1:
+ # tokenizer loop
+ for regex, tokens, new_state in statetokens:
+ m = regex.match(source, pos)
+ # if no match we try again with the next rule
+ if m is None:
+ continue
+
+ # we only match blocks and variables if braces / parentheses
+ # are balanced. continue parsing with the lower rule which
+ # is the operator rule. do this only if the end tags look
+ # like operators
+ if balancing_stack and \
+ tokens in ('variable_end', 'block_end',
+ 'linestatement_end'):
+ continue
+
+ # tuples support more options
+ if isinstance(tokens, tuple):
+ for idx, token in enumerate(tokens):
+ # failure group
+ if token.__class__ is Failure:
+ raise token(lineno, filename)
+ # bygroup is a bit more complex, in that case we
+ # yield for the current token the first named
+ # group that matched
+ elif token == '#bygroup':
+ for key, value in iteritems(m.groupdict()):
+ if value is not None:
+ yield lineno, key, value
+ lineno += value.count('\n')
+ break
+ else:
+ raise RuntimeError('%r wanted to resolve '
+ 'the token dynamically'
+ ' but no group matched'
+ % regex)
+ # normal group
+ else:
+ data = m.group(idx + 1)
+ if data or token not in ignore_if_empty:
+ yield lineno, token, data
+ lineno += data.count('\n')
+
+ # strings as token just are yielded as it.
+ else:
+ data = m.group()
+ # update brace/parentheses balance
+ if tokens == 'operator':
+ if data == '{':
+ balancing_stack.append('}')
+ elif data == '(':
+ balancing_stack.append(')')
+ elif data == '[':
+ balancing_stack.append(']')
+ elif data in ('}', ')', ']'):
+ if not balancing_stack:
+ raise TemplateSyntaxError('unexpected \'%s\'' %
+ data, lineno, name,
+ filename)
+ expected_op = balancing_stack.pop()
+ if expected_op != data:
+ raise TemplateSyntaxError('unexpected \'%s\', '
+ 'expected \'%s\'' %
+ (data, expected_op),
+ lineno, name,
+ filename)
+ # yield items
+ if data or tokens not in ignore_if_empty:
+ yield lineno, tokens, data
+ lineno += data.count('\n')
+
+ # fetch new position into new variable so that we can check
+ # if there is a internal parsing error which would result
+ # in an infinite loop
+ pos2 = m.end()
+
+ # handle state changes
+ if new_state is not None:
+ # remove the uppermost state
+ if new_state == '#pop':
+ stack.pop()
+ # resolve the new state by group checking
+ elif new_state == '#bygroup':
+ for key, value in iteritems(m.groupdict()):
+ if value is not None:
+ stack.append(key)
+ break
+ else:
+ raise RuntimeError('%r wanted to resolve the '
+ 'new state dynamically but'
+ ' no group matched' %
+ regex)
+ # direct state name given
+ else:
+ stack.append(new_state)
+ statetokens = self.rules[stack[-1]]
+ # we are still at the same position and no stack change.
+ # this means a loop without break condition, avoid that and
+ # raise error
+ elif pos2 == pos:
+ raise RuntimeError('%r yielded empty string without '
+ 'stack change' % regex)
+ # publish new function and start again
+ pos = pos2
+ break
+ # if loop terminated without break we haven't found a single match
+ # either we are at the end of the file or we have a problem
+ else:
+ # end of text
+ if pos >= source_length:
+ return
+ # something went wrong
+ raise TemplateSyntaxError('unexpected char %r at %d' %
+ (source[pos], pos), lineno,
+ name, filename)
diff --git a/python/jinja2/loaders.py b/python/jinja2/loaders.py
new file mode 100644
index 0000000..4c79793
--- /dev/null
+++ b/python/jinja2/loaders.py
@@ -0,0 +1,481 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.loaders
+ ~~~~~~~~~~~~~~
+
+ Jinja loader classes.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import os
+import sys
+import weakref
+from types import ModuleType
+from os import path
+from hashlib import sha1
+from jinja2.exceptions import TemplateNotFound
+from jinja2.utils import open_if_exists, internalcode
+from jinja2._compat import string_types, iteritems
+
+
+def split_template_path(template):
+ """Split a path into segments and perform a sanity check. If it detects
+ '..' in the path it will raise a `TemplateNotFound` error.
+ """
+ pieces = []
+ for piece in template.split('/'):
+ if path.sep in piece \
+ or (path.altsep and path.altsep in piece) or \
+ piece == path.pardir:
+ raise TemplateNotFound(template)
+ elif piece and piece != '.':
+ pieces.append(piece)
+ return pieces
+
+
+class BaseLoader(object):
+ """Baseclass for all loaders. Subclass this and override `get_source` to
+ implement a custom loading mechanism. The environment provides a
+ `get_template` method that calls the loader's `load` method to get the
+ :class:`Template` object.
+
+ A very basic example for a loader that looks up templates on the file
+ system could look like this::
+
+ from jinja2 import BaseLoader, TemplateNotFound
+ from os.path import join, exists, getmtime
+
+ class MyLoader(BaseLoader):
+
+ def __init__(self, path):
+ self.path = path
+
+ def get_source(self, environment, template):
+ path = join(self.path, template)
+ if not exists(path):
+ raise TemplateNotFound(template)
+ mtime = getmtime(path)
+ with file(path) as f:
+ source = f.read().decode('utf-8')
+ return source, path, lambda: mtime == getmtime(path)
+ """
+
+ #: if set to `False` it indicates that the loader cannot provide access
+ #: to the source of templates.
+ #:
+ #: .. versionadded:: 2.4
+ has_source_access = True
+
+ def get_source(self, environment, template):
+ """Get the template source, filename and reload helper for a template.
+ It's passed the environment and template name and has to return a
+ tuple in the form ``(source, filename, uptodate)`` or raise a
+ `TemplateNotFound` error if it can't locate the template.
+
+ The source part of the returned tuple must be the source of the
+ template as unicode string or a ASCII bytestring. The filename should
+ be the name of the file on the filesystem if it was loaded from there,
+ otherwise `None`. The filename is used by python for the tracebacks
+ if no loader extension is used.
+
+ The last item in the tuple is the `uptodate` function. If auto
+ reloading is enabled it's always called to check if the template
+ changed. No arguments are passed so the function must store the
+ old state somewhere (for example in a closure). If it returns `False`
+ the template will be reloaded.
+ """
+ if not self.has_source_access:
+ raise RuntimeError('%s cannot provide access to the source' %
+ self.__class__.__name__)
+ raise TemplateNotFound(template)
+
+ def list_templates(self):
+ """Iterates over all templates. If the loader does not support that
+ it should raise a :exc:`TypeError` which is the default behavior.
+ """
+ raise TypeError('this loader cannot iterate over all templates')
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ """Loads a template. This method looks up the template in the cache
+ or loads one by calling :meth:`get_source`. Subclasses should not
+ override this method as loaders working on collections of other
+ loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
+ will not call this method but `get_source` directly.
+ """
+ code = None
+ if globals is None:
+ globals = {}
+
+ # first we try to get the source for this template together
+ # with the filename and the uptodate function.
+ source, filename, uptodate = self.get_source(environment, name)
+
+ # try to load the code from the bytecode cache if there is a
+ # bytecode cache configured.
+ bcc = environment.bytecode_cache
+ if bcc is not None:
+ bucket = bcc.get_bucket(environment, name, filename, source)
+ code = bucket.code
+
+ # if we don't have code so far (not cached, no longer up to
+ # date) etc. we compile the template
+ if code is None:
+ code = environment.compile(source, name, filename)
+
+ # if the bytecode cache is available and the bucket doesn't
+ # have a code so far, we give the bucket the new code and put
+ # it back to the bytecode cache.
+ if bcc is not None and bucket.code is None:
+ bucket.code = code
+ bcc.set_bucket(bucket)
+
+ return environment.template_class.from_code(environment, code,
+ globals, uptodate)
+
+
+class FileSystemLoader(BaseLoader):
+ """Loads templates from the file system. This loader can find templates
+ in folders on the file system and is the preferred way to load them.
+
+ The loader takes the path to the templates as string, or if multiple
+ locations are wanted a list of them which is then looked up in the
+ given order::
+
+ >>> loader = FileSystemLoader('/path/to/templates')
+ >>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
+
+ Per default the template encoding is ``'utf-8'`` which can be changed
+ by setting the `encoding` parameter to something else.
+
+ To follow symbolic links, set the *followlinks* parameter to ``True``::
+
+ >>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
+
+ .. versionchanged:: 2.8+
+ The *followlinks* parameter was added.
+ """
+
+ def __init__(self, searchpath, encoding='utf-8', followlinks=False):
+ if isinstance(searchpath, string_types):
+ searchpath = [searchpath]
+ self.searchpath = list(searchpath)
+ self.encoding = encoding
+ self.followlinks = followlinks
+
+ def get_source(self, environment, template):
+ pieces = split_template_path(template)
+ for searchpath in self.searchpath:
+ filename = path.join(searchpath, *pieces)
+ f = open_if_exists(filename)
+ if f is None:
+ continue
+ try:
+ contents = f.read().decode(self.encoding)
+ finally:
+ f.close()
+
+ mtime = path.getmtime(filename)
+
+ def uptodate():
+ try:
+ return path.getmtime(filename) == mtime
+ except OSError:
+ return False
+ return contents, filename, uptodate
+ raise TemplateNotFound(template)
+
+ def list_templates(self):
+ found = set()
+ for searchpath in self.searchpath:
+ walk_dir = os.walk(searchpath, followlinks=self.followlinks)
+ for dirpath, dirnames, filenames in walk_dir:
+ for filename in filenames:
+ template = os.path.join(dirpath, filename) \
+ [len(searchpath):].strip(os.path.sep) \
+ .replace(os.path.sep, '/')
+ if template[:2] == './':
+ template = template[2:]
+ if template not in found:
+ found.add(template)
+ return sorted(found)
+
+
+class PackageLoader(BaseLoader):
+ """Load templates from python eggs or packages. It is constructed with
+ the name of the python package and the path to the templates in that
+ package::
+
+ loader = PackageLoader('mypackage', 'views')
+
+ If the package path is not given, ``'templates'`` is assumed.
+
+ Per default the template encoding is ``'utf-8'`` which can be changed
+ by setting the `encoding` parameter to something else. Due to the nature
+ of eggs it's only possible to reload templates if the package was loaded
+ from the file system and not a zip file.
+ """
+
+ def __init__(self, package_name, package_path='templates',
+ encoding='utf-8'):
+ from pkg_resources import DefaultProvider, ResourceManager, \
+ get_provider
+ provider = get_provider(package_name)
+ self.encoding = encoding
+ self.manager = ResourceManager()
+ self.filesystem_bound = isinstance(provider, DefaultProvider)
+ self.provider = provider
+ self.package_path = package_path
+
+ def get_source(self, environment, template):
+ pieces = split_template_path(template)
+ p = '/'.join((self.package_path,) + tuple(pieces))
+ if not self.provider.has_resource(p):
+ raise TemplateNotFound(template)
+
+ filename = uptodate = None
+ if self.filesystem_bound:
+ filename = self.provider.get_resource_filename(self.manager, p)
+ mtime = path.getmtime(filename)
+ def uptodate():
+ try:
+ return path.getmtime(filename) == mtime
+ except OSError:
+ return False
+
+ source = self.provider.get_resource_string(self.manager, p)
+ return source.decode(self.encoding), filename, uptodate
+
+ def list_templates(self):
+ path = self.package_path
+ if path[:2] == './':
+ path = path[2:]
+ elif path == '.':
+ path = ''
+ offset = len(path)
+ results = []
+ def _walk(path):
+ for filename in self.provider.resource_listdir(path):
+ fullname = path + '/' + filename
+ if self.provider.resource_isdir(fullname):
+ _walk(fullname)
+ else:
+ results.append(fullname[offset:].lstrip('/'))
+ _walk(path)
+ results.sort()
+ return results
+
+
+class DictLoader(BaseLoader):
+ """Loads a template from a python dict. It's passed a dict of unicode
+ strings bound to template names. This loader is useful for unittesting:
+
+ >>> loader = DictLoader({'index.html': 'source here'})
+
+ Because auto reloading is rarely useful this is disabled per default.
+ """
+
+ def __init__(self, mapping):
+ self.mapping = mapping
+
+ def get_source(self, environment, template):
+ if template in self.mapping:
+ source = self.mapping[template]
+ return source, None, lambda: source == self.mapping.get(template)
+ raise TemplateNotFound(template)
+
+ def list_templates(self):
+ return sorted(self.mapping)
+
+
+class FunctionLoader(BaseLoader):
+ """A loader that is passed a function which does the loading. The
+ function receives the name of the template and has to return either
+ an unicode string with the template source, a tuple in the form ``(source,
+ filename, uptodatefunc)`` or `None` if the template does not exist.
+
+ >>> def load_template(name):
+ ... if name == 'index.html':
+ ... return '...'
+ ...
+ >>> loader = FunctionLoader(load_template)
+
+ The `uptodatefunc` is a function that is called if autoreload is enabled
+ and has to return `True` if the template is still up to date. For more
+ details have a look at :meth:`BaseLoader.get_source` which has the same
+ return value.
+ """
+
+ def __init__(self, load_func):
+ self.load_func = load_func
+
+ def get_source(self, environment, template):
+ rv = self.load_func(template)
+ if rv is None:
+ raise TemplateNotFound(template)
+ elif isinstance(rv, string_types):
+ return rv, None, None
+ return rv
+
+
+class PrefixLoader(BaseLoader):
+ """A loader that is passed a dict of loaders where each loader is bound
+ to a prefix. The prefix is delimited from the template by a slash per
+ default, which can be changed by setting the `delimiter` argument to
+ something else::
+
+ loader = PrefixLoader({
+ 'app1': PackageLoader('mypackage.app1'),
+ 'app2': PackageLoader('mypackage.app2')
+ })
+
+ By loading ``'app1/index.html'`` the file from the app1 package is loaded,
+ by loading ``'app2/index.html'`` the file from the second.
+ """
+
+ def __init__(self, mapping, delimiter='/'):
+ self.mapping = mapping
+ self.delimiter = delimiter
+
+ def get_loader(self, template):
+ try:
+ prefix, name = template.split(self.delimiter, 1)
+ loader = self.mapping[prefix]
+ except (ValueError, KeyError):
+ raise TemplateNotFound(template)
+ return loader, name
+
+ def get_source(self, environment, template):
+ loader, name = self.get_loader(template)
+ try:
+ return loader.get_source(environment, name)
+ except TemplateNotFound:
+ # re-raise the exception with the correct filename here.
+ # (the one that includes the prefix)
+ raise TemplateNotFound(template)
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ loader, local_name = self.get_loader(name)
+ try:
+ return loader.load(environment, local_name, globals)
+ except TemplateNotFound:
+ # re-raise the exception with the correct filename here.
+ # (the one that includes the prefix)
+ raise TemplateNotFound(name)
+
+ def list_templates(self):
+ result = []
+ for prefix, loader in iteritems(self.mapping):
+ for template in loader.list_templates():
+ result.append(prefix + self.delimiter + template)
+ return result
+
+
+class ChoiceLoader(BaseLoader):
+ """This loader works like the `PrefixLoader` just that no prefix is
+ specified. If a template could not be found by one loader the next one
+ is tried.
+
+ >>> loader = ChoiceLoader([
+ ... FileSystemLoader('/path/to/user/templates'),
+ ... FileSystemLoader('/path/to/system/templates')
+ ... ])
+
+ This is useful if you want to allow users to override builtin templates
+ from a different location.
+ """
+
+ def __init__(self, loaders):
+ self.loaders = loaders
+
+ def get_source(self, environment, template):
+ for loader in self.loaders:
+ try:
+ return loader.get_source(environment, template)
+ except TemplateNotFound:
+ pass
+ raise TemplateNotFound(template)
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ for loader in self.loaders:
+ try:
+ return loader.load(environment, name, globals)
+ except TemplateNotFound:
+ pass
+ raise TemplateNotFound(name)
+
+ def list_templates(self):
+ found = set()
+ for loader in self.loaders:
+ found.update(loader.list_templates())
+ return sorted(found)
+
+
+class _TemplateModule(ModuleType):
+ """Like a normal module but with support for weak references"""
+
+
+class ModuleLoader(BaseLoader):
+ """This loader loads templates from precompiled templates.
+
+ Example usage:
+
+ >>> loader = ChoiceLoader([
+ ... ModuleLoader('/path/to/compiled/templates'),
+ ... FileSystemLoader('/path/to/templates')
+ ... ])
+
+ Templates can be precompiled with :meth:`Environment.compile_templates`.
+ """
+
+ has_source_access = False
+
+ def __init__(self, path):
+ package_name = '_jinja2_module_templates_%x' % id(self)
+
+ # create a fake module that looks for the templates in the
+ # path given.
+ mod = _TemplateModule(package_name)
+ if isinstance(path, string_types):
+ path = [path]
+ else:
+ path = list(path)
+ mod.__path__ = path
+
+ sys.modules[package_name] = weakref.proxy(mod,
+ lambda x: sys.modules.pop(package_name, None))
+
+ # the only strong reference, the sys.modules entry is weak
+ # so that the garbage collector can remove it once the
+ # loader that created it goes out of business.
+ self.module = mod
+ self.package_name = package_name
+
+ @staticmethod
+ def get_template_key(name):
+ return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
+
+ @staticmethod
+ def get_module_filename(name):
+ return ModuleLoader.get_template_key(name) + '.py'
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ key = self.get_template_key(name)
+ module = '%s.%s' % (self.package_name, key)
+ mod = getattr(self.module, module, None)
+ if mod is None:
+ try:
+ mod = __import__(module, None, None, ['root'])
+ except ImportError:
+ raise TemplateNotFound(name)
+
+ # remove the entry from sys.modules, we only want the attribute
+ # on the module object we have stored on the loader.
+ sys.modules.pop(module, None)
+
+ return environment.template_class.from_module_dict(
+ environment, mod.__dict__, globals)
diff --git a/python/jinja2/meta.py b/python/jinja2/meta.py
new file mode 100644
index 0000000..7421914
--- /dev/null
+++ b/python/jinja2/meta.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.meta
+ ~~~~~~~~~~~
+
+ This module implements various functions that exposes information about
+ templates that might be interesting for various kinds of applications.
+
+ :copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2 import nodes
+from jinja2.compiler import CodeGenerator
+from jinja2._compat import string_types, iteritems
+
+
+class TrackingCodeGenerator(CodeGenerator):
+ """We abuse the code generator for introspection."""
+
+ def __init__(self, environment):
+ CodeGenerator.__init__(self, environment, '<introspection>',
+ '<introspection>')
+ self.undeclared_identifiers = set()
+
+ def write(self, x):
+ """Don't write."""
+
+ def enter_frame(self, frame):
+ """Remember all undeclared identifiers."""
+ CodeGenerator.enter_frame(self, frame)
+ for _, (action, param) in iteritems(frame.symbols.loads):
+ if action == 'resolve':
+ self.undeclared_identifiers.add(param)
+
+
+def find_undeclared_variables(ast):
+ """Returns a set of all variables in the AST that will be looked up from
+ the context at runtime. Because at compile time it's not known which
+ variables will be used depending on the path the execution takes at
+ runtime, all variables are returned.
+
+ >>> from jinja2 import Environment, meta
+ >>> env = Environment()
+ >>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
+ >>> meta.find_undeclared_variables(ast) == set(['bar'])
+ True
+
+ .. admonition:: Implementation
+
+ Internally the code generator is used for finding undeclared variables.
+ This is good to know because the code generator might raise a
+ :exc:`TemplateAssertionError` during compilation and as a matter of
+ fact this function can currently raise that exception as well.
+ """
+ codegen = TrackingCodeGenerator(ast.environment)
+ codegen.visit(ast)
+ return codegen.undeclared_identifiers
+
+
+def find_referenced_templates(ast):
+ """Finds all the referenced templates from the AST. This will return an
+ iterator over all the hardcoded template extensions, inclusions and
+ imports. If dynamic inheritance or inclusion is used, `None` will be
+ yielded.
+
+ >>> from jinja2 import Environment, meta
+ >>> env = Environment()
+ >>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
+ >>> list(meta.find_referenced_templates(ast))
+ ['layout.html', None]
+
+ This function is useful for dependency tracking. For example if you want
+ to rebuild parts of the website after a layout template has changed.
+ """
+ for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
+ nodes.Include)):
+ if not isinstance(node.template, nodes.Const):
+ # a tuple with some non consts in there
+ if isinstance(node.template, (nodes.Tuple, nodes.List)):
+ for template_name in node.template.items:
+ # something const, only yield the strings and ignore
+ # non-string consts that really just make no sense
+ if isinstance(template_name, nodes.Const):
+ if isinstance(template_name.value, string_types):
+ yield template_name.value
+ # something dynamic in there
+ else:
+ yield None
+ # something dynamic we don't know about here
+ else:
+ yield None
+ continue
+ # constant is a basestring, direct template name
+ if isinstance(node.template.value, string_types):
+ yield node.template.value
+ # a tuple or list (latter *should* not happen) made of consts,
+ # yield the consts that are strings. We could warn here for
+ # non string values
+ elif isinstance(node, nodes.Include) and \
+ isinstance(node.template.value, (tuple, list)):
+ for template_name in node.template.value:
+ if isinstance(template_name, string_types):
+ yield template_name
+ # something else we don't care about, we could warn here
+ else:
+ yield None
diff --git a/python/jinja2/nativetypes.py b/python/jinja2/nativetypes.py
new file mode 100644
index 0000000..fe17e41
--- /dev/null
+++ b/python/jinja2/nativetypes.py
@@ -0,0 +1,220 @@
+import sys
+from ast import literal_eval
+from itertools import islice, chain
+from jinja2 import nodes
+from jinja2._compat import text_type
+from jinja2.compiler import CodeGenerator, has_safe_repr
+from jinja2.environment import Environment, Template
+from jinja2.utils import concat, escape
+
+
+def native_concat(nodes):
+ """Return a native Python type from the list of compiled nodes. If the
+ result is a single node, its value is returned. Otherwise, the nodes are
+ concatenated as strings. If the result can be parsed with
+ :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
+ string is returned.
+ """
+ head = list(islice(nodes, 2))
+
+ if not head:
+ return None
+
+ if len(head) == 1:
+ out = head[0]
+ else:
+ out = u''.join([text_type(v) for v in chain(head, nodes)])
+
+ try:
+ return literal_eval(out)
+ except (ValueError, SyntaxError, MemoryError):
+ return out
+
+
+class NativeCodeGenerator(CodeGenerator):
+ """A code generator which avoids injecting ``to_string()`` calls around the
+ internal code Jinja uses to render templates.
+ """
+
+ def visit_Output(self, node, frame):
+ """Same as :meth:`CodeGenerator.visit_Output`, but do not call
+ ``to_string`` on output nodes in generated code.
+ """
+ if self.has_known_extends and frame.require_output_check:
+ return
+
+ finalize = self.environment.finalize
+ finalize_context = getattr(finalize, 'contextfunction', False)
+ finalize_eval = getattr(finalize, 'evalcontextfunction', False)
+ finalize_env = getattr(finalize, 'environmentfunction', False)
+
+ if finalize is not None:
+ if finalize_context or finalize_eval:
+ const_finalize = None
+ elif finalize_env:
+ def const_finalize(x):
+ return finalize(self.environment, x)
+ else:
+ const_finalize = finalize
+ else:
+ def const_finalize(x):
+ return x
+
+ # If we are inside a frame that requires output checking, we do so.
+ outdent_later = False
+
+ if frame.require_output_check:
+ self.writeline('if parent_template is None:')
+ self.indent()
+ outdent_later = True
+
+ # Try to evaluate as many chunks as possible into a static string at
+ # compile time.
+ body = []
+
+ for child in node.nodes:
+ try:
+ if const_finalize is None:
+ raise nodes.Impossible()
+
+ const = child.as_const(frame.eval_ctx)
+ if not has_safe_repr(const):
+ raise nodes.Impossible()
+ except nodes.Impossible:
+ body.append(child)
+ continue
+
+ # the frame can't be volatile here, because otherwise the as_const
+ # function would raise an Impossible exception at that point
+ try:
+ if frame.eval_ctx.autoescape:
+ if hasattr(const, '__html__'):
+ const = const.__html__()
+ else:
+ const = escape(const)
+
+ const = const_finalize(const)
+ except Exception:
+ # if something goes wrong here we evaluate the node at runtime
+ # for easier debugging
+ body.append(child)
+ continue
+
+ if body and isinstance(body[-1], list):
+ body[-1].append(const)
+ else:
+ body.append([const])
+
+ # if we have less than 3 nodes or a buffer we yield or extend/append
+ if len(body) < 3 or frame.buffer is not None:
+ if frame.buffer is not None:
+ # for one item we append, for more we extend
+ if len(body) == 1:
+ self.writeline('%s.append(' % frame.buffer)
+ else:
+ self.writeline('%s.extend((' % frame.buffer)
+
+ self.indent()
+
+ for item in body:
+ if isinstance(item, list):
+ val = repr(native_concat(item))
+
+ if frame.buffer is None:
+ self.writeline('yield ' + val)
+ else:
+ self.writeline(val + ',')
+ else:
+ if frame.buffer is None:
+ self.writeline('yield ', item)
+ else:
+ self.newline(item)
+
+ close = 0
+
+ if finalize is not None:
+ self.write('environment.finalize(')
+
+ if finalize_context:
+ self.write('context, ')
+
+ close += 1
+
+ self.visit(item, frame)
+
+ if close > 0:
+ self.write(')' * close)
+
+ if frame.buffer is not None:
+ self.write(',')
+
+ if frame.buffer is not None:
+ # close the open parentheses
+ self.outdent()
+ self.writeline(len(body) == 1 and ')' or '))')
+
+ # otherwise we create a format string as this is faster in that case
+ else:
+ format = []
+ arguments = []
+
+ for item in body:
+ if isinstance(item, list):
+ format.append(native_concat(item).replace('%', '%%'))
+ else:
+ format.append('%s')
+ arguments.append(item)
+
+ self.writeline('yield ')
+ self.write(repr(concat(format)) + ' % (')
+ self.indent()
+
+ for argument in arguments:
+ self.newline(argument)
+ close = 0
+
+ if finalize is not None:
+ self.write('environment.finalize(')
+
+ if finalize_context:
+ self.write('context, ')
+ elif finalize_eval:
+ self.write('context.eval_ctx, ')
+ elif finalize_env:
+ self.write('environment, ')
+
+ close += 1
+
+ self.visit(argument, frame)
+ self.write(')' * close + ', ')
+
+ self.outdent()
+ self.writeline(')')
+
+ if outdent_later:
+ self.outdent()
+
+
+class NativeTemplate(Template):
+ def render(self, *args, **kwargs):
+ """Render the template to produce a native Python type. If the result
+ is a single node, its value is returned. Otherwise, the nodes are
+ concatenated as strings. If the result can be parsed with
+ :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
+ string is returned.
+ """
+ vars = dict(*args, **kwargs)
+
+ try:
+ return native_concat(self.root_render_func(self.new_context(vars)))
+ except Exception:
+ exc_info = sys.exc_info()
+
+ return self.environment.handle_exception(exc_info, True)
+
+
+class NativeEnvironment(Environment):
+ """An environment that renders templates to native Python types."""
+
+ code_generator_class = NativeCodeGenerator
+ template_class = NativeTemplate
diff --git a/python/jinja2/nodes.py b/python/jinja2/nodes.py
new file mode 100644
index 0000000..4d9a01a
--- /dev/null
+++ b/python/jinja2/nodes.py
@@ -0,0 +1,999 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.nodes
+ ~~~~~~~~~~~~
+
+ This module implements additional nodes derived from the ast base node.
+
+ It also provides some node tree helper functions like `in_lineno` and
+ `get_nodes` used by the parser and translator in order to normalize
+ python and jinja nodes.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import types
+import operator
+
+from collections import deque
+from jinja2.utils import Markup
+from jinja2._compat import izip, with_metaclass, text_type, PY2
+
+
+#: the types we support for context functions
+_context_function_types = (types.FunctionType, types.MethodType)
+
+
+_binop_to_func = {
+ '*': operator.mul,
+ '/': operator.truediv,
+ '//': operator.floordiv,
+ '**': operator.pow,
+ '%': operator.mod,
+ '+': operator.add,
+ '-': operator.sub
+}
+
+_uaop_to_func = {
+ 'not': operator.not_,
+ '+': operator.pos,
+ '-': operator.neg
+}
+
+_cmpop_to_func = {
+ 'eq': operator.eq,
+ 'ne': operator.ne,
+ 'gt': operator.gt,
+ 'gteq': operator.ge,
+ 'lt': operator.lt,
+ 'lteq': operator.le,
+ 'in': lambda a, b: a in b,
+ 'notin': lambda a, b: a not in b
+}
+
+
+class Impossible(Exception):
+ """Raised if the node could not perform a requested action."""
+
+
+class NodeType(type):
+ """A metaclass for nodes that handles the field and attribute
+ inheritance. fields and attributes from the parent class are
+ automatically forwarded to the child."""
+
+ def __new__(cls, name, bases, d):
+ for attr in 'fields', 'attributes':
+ storage = []
+ storage.extend(getattr(bases[0], attr, ()))
+ storage.extend(d.get(attr, ()))
+ assert len(bases) == 1, 'multiple inheritance not allowed'
+ assert len(storage) == len(set(storage)), 'layout conflict'
+ d[attr] = tuple(storage)
+ d.setdefault('abstract', False)
+ return type.__new__(cls, name, bases, d)
+
+
+class EvalContext(object):
+ """Holds evaluation time information. Custom attributes can be attached
+ to it in extensions.
+ """
+
+ def __init__(self, environment, template_name=None):
+ self.environment = environment
+ if callable(environment.autoescape):
+ self.autoescape = environment.autoescape(template_name)
+ else:
+ self.autoescape = environment.autoescape
+ self.volatile = False
+
+ def save(self):
+ return self.__dict__.copy()
+
+ def revert(self, old):
+ self.__dict__.clear()
+ self.__dict__.update(old)
+
+
+def get_eval_context(node, ctx):
+ if ctx is None:
+ if node.environment is None:
+ raise RuntimeError('if no eval context is passed, the '
+ 'node must have an attached '
+ 'environment.')
+ return EvalContext(node.environment)
+ return ctx
+
+
+class Node(with_metaclass(NodeType, object)):
+ """Baseclass for all Jinja2 nodes. There are a number of nodes available
+ of different types. There are four major types:
+
+ - :class:`Stmt`: statements
+ - :class:`Expr`: expressions
+ - :class:`Helper`: helper nodes
+ - :class:`Template`: the outermost wrapper node
+
+ All nodes have fields and attributes. Fields may be other nodes, lists,
+ or arbitrary values. Fields are passed to the constructor as regular
+ positional arguments, attributes as keyword arguments. Each node has
+ two attributes: `lineno` (the line number of the node) and `environment`.
+ The `environment` attribute is set at the end of the parsing process for
+ all nodes automatically.
+ """
+ fields = ()
+ attributes = ('lineno', 'environment')
+ abstract = True
+
+ def __init__(self, *fields, **attributes):
+ if self.abstract:
+ raise TypeError('abstract nodes are not instanciable')
+ if fields:
+ if len(fields) != len(self.fields):
+ if not self.fields:
+ raise TypeError('%r takes 0 arguments' %
+ self.__class__.__name__)
+ raise TypeError('%r takes 0 or %d argument%s' % (
+ self.__class__.__name__,
+ len(self.fields),
+ len(self.fields) != 1 and 's' or ''
+ ))
+ for name, arg in izip(self.fields, fields):
+ setattr(self, name, arg)
+ for attr in self.attributes:
+ setattr(self, attr, attributes.pop(attr, None))
+ if attributes:
+ raise TypeError('unknown attribute %r' %
+ next(iter(attributes)))
+
+ def iter_fields(self, exclude=None, only=None):
+ """This method iterates over all fields that are defined and yields
+ ``(key, value)`` tuples. Per default all fields are returned, but
+ it's possible to limit that to some fields by providing the `only`
+ parameter or to exclude some using the `exclude` parameter. Both
+ should be sets or tuples of field names.
+ """
+ for name in self.fields:
+ if (exclude is only is None) or \
+ (exclude is not None and name not in exclude) or \
+ (only is not None and name in only):
+ try:
+ yield name, getattr(self, name)
+ except AttributeError:
+ pass
+
+ def iter_child_nodes(self, exclude=None, only=None):
+ """Iterates over all direct child nodes of the node. This iterates
+ over all fields and yields the values of they are nodes. If the value
+ of a field is a list all the nodes in that list are returned.
+ """
+ for field, item in self.iter_fields(exclude, only):
+ if isinstance(item, list):
+ for n in item:
+ if isinstance(n, Node):
+ yield n
+ elif isinstance(item, Node):
+ yield item
+
+ def find(self, node_type):
+ """Find the first node of a given type. If no such node exists the
+ return value is `None`.
+ """
+ for result in self.find_all(node_type):
+ return result
+
+ def find_all(self, node_type):
+ """Find all the nodes of a given type. If the type is a tuple,
+ the check is performed for any of the tuple items.
+ """
+ for child in self.iter_child_nodes():
+ if isinstance(child, node_type):
+ yield child
+ for result in child.find_all(node_type):
+ yield result
+
+ def set_ctx(self, ctx):
+ """Reset the context of a node and all child nodes. Per default the
+ parser will all generate nodes that have a 'load' context as it's the
+ most common one. This method is used in the parser to set assignment
+ targets and other nodes to a store context.
+ """
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ if 'ctx' in node.fields:
+ node.ctx = ctx
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def set_lineno(self, lineno, override=False):
+ """Set the line numbers of the node and children."""
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ if 'lineno' in node.attributes:
+ if node.lineno is None or override:
+ node.lineno = lineno
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def set_environment(self, environment):
+ """Set the environment for all nodes."""
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ node.environment = environment
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def __eq__(self, other):
+ return type(self) is type(other) and \
+ tuple(self.iter_fields()) == tuple(other.iter_fields())
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ # Restore Python 2 hashing behavior on Python 3
+ __hash__ = object.__hash__
+
+ def __repr__(self):
+ return '%s(%s)' % (
+ self.__class__.__name__,
+ ', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
+ arg in self.fields)
+ )
+
+ def dump(self):
+ def _dump(node):
+ if not isinstance(node, Node):
+ buf.append(repr(node))
+ return
+
+ buf.append('nodes.%s(' % node.__class__.__name__)
+ if not node.fields:
+ buf.append(')')
+ return
+ for idx, field in enumerate(node.fields):
+ if idx:
+ buf.append(', ')
+ value = getattr(node, field)
+ if isinstance(value, list):
+ buf.append('[')
+ for idx, item in enumerate(value):
+ if idx:
+ buf.append(', ')
+ _dump(item)
+ buf.append(']')
+ else:
+ _dump(value)
+ buf.append(')')
+ buf = []
+ _dump(self)
+ return ''.join(buf)
+
+
+
+class Stmt(Node):
+ """Base node for all statements."""
+ abstract = True
+
+
+class Helper(Node):
+ """Nodes that exist in a specific context only."""
+ abstract = True
+
+
+class Template(Node):
+ """Node that represents a template. This must be the outermost node that
+ is passed to the compiler.
+ """
+ fields = ('body',)
+
+
+class Output(Stmt):
+ """A node that holds multiple expressions which are then printed out.
+ This is used both for the `print` statement and the regular template data.
+ """
+ fields = ('nodes',)
+
+
+class Extends(Stmt):
+ """Represents an extends statement."""
+ fields = ('template',)
+
+
+class For(Stmt):
+ """The for loop. `target` is the target for the iteration (usually a
+ :class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
+ of nodes that are used as loop-body, and `else_` a list of nodes for the
+ `else` block. If no else node exists it has to be an empty list.
+
+ For filtered nodes an expression can be stored as `test`, otherwise `None`.
+ """
+ fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
+
+
+class If(Stmt):
+ """If `test` is true, `body` is rendered, else `else_`."""
+ fields = ('test', 'body', 'elif_', 'else_')
+
+
+class Macro(Stmt):
+ """A macro definition. `name` is the name of the macro, `args` a list of
+ arguments and `defaults` a list of defaults if there are any. `body` is
+ a list of nodes for the macro body.
+ """
+ fields = ('name', 'args', 'defaults', 'body')
+
+
+class CallBlock(Stmt):
+ """Like a macro without a name but a call instead. `call` is called with
+ the unnamed macro as `caller` argument this node holds.
+ """
+ fields = ('call', 'args', 'defaults', 'body')
+
+
+class FilterBlock(Stmt):
+ """Node for filter sections."""
+ fields = ('body', 'filter')
+
+
+class With(Stmt):
+ """Specific node for with statements. In older versions of Jinja the
+ with statement was implemented on the base of the `Scope` node instead.
+
+ .. versionadded:: 2.9.3
+ """
+ fields = ('targets', 'values', 'body')
+
+
+class Block(Stmt):
+ """A node that represents a block."""
+ fields = ('name', 'body', 'scoped')
+
+
+class Include(Stmt):
+ """A node that represents the include tag."""
+ fields = ('template', 'with_context', 'ignore_missing')
+
+
+class Import(Stmt):
+ """A node that represents the import tag."""
+ fields = ('template', 'target', 'with_context')
+
+
+class FromImport(Stmt):
+ """A node that represents the from import tag. It's important to not
+ pass unsafe names to the name attribute. The compiler translates the
+ attribute lookups directly into getattr calls and does *not* use the
+ subscript callback of the interface. As exported variables may not
+ start with double underscores (which the parser asserts) this is not a
+ problem for regular Jinja code, but if this node is used in an extension
+ extra care must be taken.
+
+ The list of names may contain tuples if aliases are wanted.
+ """
+ fields = ('template', 'names', 'with_context')
+
+
+class ExprStmt(Stmt):
+ """A statement that evaluates an expression and discards the result."""
+ fields = ('node',)
+
+
+class Assign(Stmt):
+ """Assigns an expression to a target."""
+ fields = ('target', 'node')
+
+
+class AssignBlock(Stmt):
+ """Assigns a block to a target."""
+ fields = ('target', 'filter', 'body')
+
+
+class Expr(Node):
+ """Baseclass for all expressions."""
+ abstract = True
+
+ def as_const(self, eval_ctx=None):
+ """Return the value of the expression as constant or raise
+ :exc:`Impossible` if this was not possible.
+
+ An :class:`EvalContext` can be provided, if none is given
+ a default context is created which requires the nodes to have
+ an attached environment.
+
+ .. versionchanged:: 2.4
+ the `eval_ctx` parameter was added.
+ """
+ raise Impossible()
+
+ def can_assign(self):
+ """Check if it's possible to assign something to this node."""
+ return False
+
+
+class BinExpr(Expr):
+ """Baseclass for all binary expressions."""
+ fields = ('left', 'right')
+ operator = None
+ abstract = True
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ # intercepted operators cannot be folded at compile time
+ if self.environment.sandboxed and \
+ self.operator in self.environment.intercepted_binops:
+ raise Impossible()
+ f = _binop_to_func[self.operator]
+ try:
+ return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+
+class UnaryExpr(Expr):
+ """Baseclass for all unary expressions."""
+ fields = ('node',)
+ operator = None
+ abstract = True
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ # intercepted operators cannot be folded at compile time
+ if self.environment.sandboxed and \
+ self.operator in self.environment.intercepted_unops:
+ raise Impossible()
+ f = _uaop_to_func[self.operator]
+ try:
+ return f(self.node.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+
+class Name(Expr):
+ """Looks up a name or stores a value in a name.
+ The `ctx` of the node can be one of the following values:
+
+ - `store`: store a value in the name
+ - `load`: load that name
+ - `param`: like `store` but if the name was defined as function parameter.
+ """
+ fields = ('name', 'ctx')
+
+ def can_assign(self):
+ return self.name not in ('true', 'false', 'none',
+ 'True', 'False', 'None')
+
+
+class NSRef(Expr):
+ """Reference to a namespace value assignment"""
+ fields = ('name', 'attr')
+
+ def can_assign(self):
+ # We don't need any special checks here; NSRef assignments have a
+ # runtime check to ensure the target is a namespace object which will
+ # have been checked already as it is created using a normal assignment
+ # which goes through a `Name` node.
+ return True
+
+
+class Literal(Expr):
+ """Baseclass for literals."""
+ abstract = True
+
+
+class Const(Literal):
+ """All constant values. The parser will return this node for simple
+ constants such as ``42`` or ``"foo"`` but it can be used to store more
+ complex values such as lists too. Only constants with a safe
+ representation (objects where ``eval(repr(x)) == x`` is true).
+ """
+ fields = ('value',)
+
+ def as_const(self, eval_ctx=None):
+ rv = self.value
+ if PY2 and type(rv) is text_type and \
+ self.environment.policies['compiler.ascii_str']:
+ try:
+ rv = rv.encode('ascii')
+ except UnicodeError:
+ pass
+ return rv
+
+ @classmethod
+ def from_untrusted(cls, value, lineno=None, environment=None):
+ """Return a const object if the value is representable as
+ constant value in the generated code, otherwise it will raise
+ an `Impossible` exception.
+ """
+ from .compiler import has_safe_repr
+ if not has_safe_repr(value):
+ raise Impossible()
+ return cls(value, lineno=lineno, environment=environment)
+
+
+class TemplateData(Literal):
+ """A constant template string."""
+ fields = ('data',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if eval_ctx.volatile:
+ raise Impossible()
+ if eval_ctx.autoescape:
+ return Markup(self.data)
+ return self.data
+
+
+class Tuple(Literal):
+ """For loop unpacking and some other things like multiple arguments
+ for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
+ is used for loading the names or storing.
+ """
+ fields = ('items', 'ctx')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return tuple(x.as_const(eval_ctx) for x in self.items)
+
+ def can_assign(self):
+ for item in self.items:
+ if not item.can_assign():
+ return False
+ return True
+
+
+class List(Literal):
+ """Any list literal such as ``[1, 2, 3]``"""
+ fields = ('items',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return [x.as_const(eval_ctx) for x in self.items]
+
+
+class Dict(Literal):
+ """Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
+ :class:`Pair` nodes.
+ """
+ fields = ('items',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return dict(x.as_const(eval_ctx) for x in self.items)
+
+
+class Pair(Helper):
+ """A key, value pair for dicts."""
+ fields = ('key', 'value')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
+
+
+class Keyword(Helper):
+ """A key, value pair for keyword arguments where key is a string."""
+ fields = ('key', 'value')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.key, self.value.as_const(eval_ctx)
+
+
+class CondExpr(Expr):
+ """A conditional expression (inline if expression). (``{{
+ foo if bar else baz }}``)
+ """
+ fields = ('test', 'expr1', 'expr2')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if self.test.as_const(eval_ctx):
+ return self.expr1.as_const(eval_ctx)
+
+ # if we evaluate to an undefined object, we better do that at runtime
+ if self.expr2 is None:
+ raise Impossible()
+
+ return self.expr2.as_const(eval_ctx)
+
+
+def args_as_const(node, eval_ctx):
+ args = [x.as_const(eval_ctx) for x in node.args]
+ kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs)
+
+ if node.dyn_args is not None:
+ try:
+ args.extend(node.dyn_args.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+ if node.dyn_kwargs is not None:
+ try:
+ kwargs.update(node.dyn_kwargs.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+ return args, kwargs
+
+
+class Filter(Expr):
+ """This node applies a filter on an expression. `name` is the name of
+ the filter, the rest of the fields are the same as for :class:`Call`.
+
+ If the `node` of a filter is `None` the contents of the last buffer are
+ filtered. Buffers are created by macros and filter blocks.
+ """
+
+ fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+
+ if eval_ctx.volatile or self.node is None:
+ raise Impossible()
+
+ # we have to be careful here because we call filter_ below.
+ # if this variable would be called filter, 2to3 would wrap the
+ # call in a list beause it is assuming we are talking about the
+ # builtin filter function here which no longer returns a list in
+ # python 3. because of that, do not rename filter_ to filter!
+ filter_ = self.environment.filters.get(self.name)
+
+ if filter_ is None or getattr(filter_, 'contextfilter', False):
+ raise Impossible()
+
+ # We cannot constant handle async filters, so we need to make sure
+ # to not go down this path.
+ if (
+ eval_ctx.environment.is_async
+ and getattr(filter_, 'asyncfiltervariant', False)
+ ):
+ raise Impossible()
+
+ args, kwargs = args_as_const(self, eval_ctx)
+ args.insert(0, self.node.as_const(eval_ctx))
+
+ if getattr(filter_, 'evalcontextfilter', False):
+ args.insert(0, eval_ctx)
+ elif getattr(filter_, 'environmentfilter', False):
+ args.insert(0, self.environment)
+
+ try:
+ return filter_(*args, **kwargs)
+ except Exception:
+ raise Impossible()
+
+
+class Test(Expr):
+ """Applies a test on an expression. `name` is the name of the test, the
+ rest of the fields are the same as for :class:`Call`.
+ """
+
+ fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+ def as_const(self, eval_ctx=None):
+ test = self.environment.tests.get(self.name)
+
+ if test is None:
+ raise Impossible()
+
+ eval_ctx = get_eval_context(self, eval_ctx)
+ args, kwargs = args_as_const(self, eval_ctx)
+ args.insert(0, self.node.as_const(eval_ctx))
+
+ try:
+ return test(*args, **kwargs)
+ except Exception:
+ raise Impossible()
+
+
+class Call(Expr):
+ """Calls an expression. `args` is a list of arguments, `kwargs` a list
+ of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
+ and `dyn_kwargs` has to be either `None` or a node that is used as
+ node for dynamic positional (``*args``) or keyword (``**kwargs``)
+ arguments.
+ """
+ fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+
+class Getitem(Expr):
+ """Get an attribute or item from an expression and prefer the item."""
+ fields = ('node', 'arg', 'ctx')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if self.ctx != 'load':
+ raise Impossible()
+ try:
+ return self.environment.getitem(self.node.as_const(eval_ctx),
+ self.arg.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+ def can_assign(self):
+ return False
+
+
+class Getattr(Expr):
+ """Get an attribute or item from an expression that is a ascii-only
+ bytestring and prefer the attribute.
+ """
+ fields = ('node', 'attr', 'ctx')
+
+ def as_const(self, eval_ctx=None):
+ if self.ctx != 'load':
+ raise Impossible()
+ try:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.environment.getattr(self.node.as_const(eval_ctx),
+ self.attr)
+ except Exception:
+ raise Impossible()
+
+ def can_assign(self):
+ return False
+
+
+class Slice(Expr):
+ """Represents a slice object. This must only be used as argument for
+ :class:`Subscript`.
+ """
+ fields = ('start', 'stop', 'step')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ def const(obj):
+ if obj is None:
+ return None
+ return obj.as_const(eval_ctx)
+ return slice(const(self.start), const(self.stop), const(self.step))
+
+
+class Concat(Expr):
+ """Concatenates the list of expressions provided after converting them to
+ unicode.
+ """
+ fields = ('nodes',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
+
+
+class Compare(Expr):
+ """Compares an expression with some other expressions. `ops` must be a
+ list of :class:`Operand`\\s.
+ """
+ fields = ('expr', 'ops')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ result = value = self.expr.as_const(eval_ctx)
+ try:
+ for op in self.ops:
+ new_value = op.expr.as_const(eval_ctx)
+ result = _cmpop_to_func[op.op](value, new_value)
+ value = new_value
+ except Exception:
+ raise Impossible()
+ return result
+
+
+class Operand(Helper):
+ """Holds an operator and an expression."""
+ fields = ('op', 'expr')
+
+if __debug__:
+ Operand.__doc__ += '\nThe following operators are available: ' + \
+ ', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
+ set(_uaop_to_func) | set(_cmpop_to_func)))
+
+
+class Mul(BinExpr):
+ """Multiplies the left with the right node."""
+ operator = '*'
+
+
+class Div(BinExpr):
+ """Divides the left by the right node."""
+ operator = '/'
+
+
+class FloorDiv(BinExpr):
+ """Divides the left by the right node and truncates conver the
+ result into an integer by truncating.
+ """
+ operator = '//'
+
+
+class Add(BinExpr):
+ """Add the left to the right node."""
+ operator = '+'
+
+
+class Sub(BinExpr):
+ """Subtract the right from the left node."""
+ operator = '-'
+
+
+class Mod(BinExpr):
+ """Left modulo right."""
+ operator = '%'
+
+
+class Pow(BinExpr):
+ """Left to the power of right."""
+ operator = '**'
+
+
+class And(BinExpr):
+ """Short circuited AND."""
+ operator = 'and'
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
+
+
+class Or(BinExpr):
+ """Short circuited OR."""
+ operator = 'or'
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
+
+
+class Not(UnaryExpr):
+ """Negate the expression."""
+ operator = 'not'
+
+
+class Neg(UnaryExpr):
+ """Make the expression negative."""
+ operator = '-'
+
+
+class Pos(UnaryExpr):
+ """Make the expression positive (noop for most expressions)"""
+ operator = '+'
+
+
+# Helpers for extensions
+
+
+class EnvironmentAttribute(Expr):
+ """Loads an attribute from the environment object. This is useful for
+ extensions that want to call a callback stored on the environment.
+ """
+ fields = ('name',)
+
+
+class ExtensionAttribute(Expr):
+ """Returns the attribute of an extension bound to the environment.
+ The identifier is the identifier of the :class:`Extension`.
+
+ This node is usually constructed by calling the
+ :meth:`~jinja2.ext.Extension.attr` method on an extension.
+ """
+ fields = ('identifier', 'name')
+
+
+class ImportedName(Expr):
+ """If created with an import name the import name is returned on node
+ access. For example ``ImportedName('cgi.escape')`` returns the `escape`
+ function from the cgi module on evaluation. Imports are optimized by the
+ compiler so there is no need to assign them to local variables.
+ """
+ fields = ('importname',)
+
+
+class InternalName(Expr):
+ """An internal name in the compiler. You cannot create these nodes
+ yourself but the parser provides a
+ :meth:`~jinja2.parser.Parser.free_identifier` method that creates
+ a new identifier for you. This identifier is not available from the
+ template and is not threated specially by the compiler.
+ """
+ fields = ('name',)
+
+ def __init__(self):
+ raise TypeError('Can\'t create internal names. Use the '
+ '`free_identifier` method on a parser.')
+
+
+class MarkSafe(Expr):
+ """Mark the wrapped expression as safe (wrap it as `Markup`)."""
+ fields = ('expr',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return Markup(self.expr.as_const(eval_ctx))
+
+
+class MarkSafeIfAutoescape(Expr):
+ """Mark the wrapped expression as safe (wrap it as `Markup`) but
+ only if autoescaping is active.
+
+ .. versionadded:: 2.5
+ """
+ fields = ('expr',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if eval_ctx.volatile:
+ raise Impossible()
+ expr = self.expr.as_const(eval_ctx)
+ if eval_ctx.autoescape:
+ return Markup(expr)
+ return expr
+
+
+class ContextReference(Expr):
+ """Returns the current template context. It can be used like a
+ :class:`Name` node, with a ``'load'`` ctx and will return the
+ current :class:`~jinja2.runtime.Context` object.
+
+ Here an example that assigns the current template name to a
+ variable named `foo`::
+
+ Assign(Name('foo', ctx='store'),
+ Getattr(ContextReference(), 'name'))
+ """
+
+
+class Continue(Stmt):
+ """Continue a loop."""
+
+
+class Break(Stmt):
+ """Break a loop."""
+
+
+class Scope(Stmt):
+ """An artificial scope."""
+ fields = ('body',)
+
+
+class OverlayScope(Stmt):
+ """An overlay scope for extensions. This is a largely unoptimized scope
+ that however can be used to introduce completely arbitrary variables into
+ a sub scope from a dictionary or dictionary like object. The `context`
+ field has to evaluate to a dictionary object.
+
+ Example usage::
+
+ OverlayScope(context=self.call_method('get_context'),
+ body=[...])
+
+ .. versionadded:: 2.10
+ """
+ fields = ('context', 'body')
+
+
+class EvalContextModifier(Stmt):
+ """Modifies the eval context. For each option that should be modified,
+ a :class:`Keyword` has to be added to the :attr:`options` list.
+
+ Example to change the `autoescape` setting::
+
+ EvalContextModifier(options=[Keyword('autoescape', Const(True))])
+ """
+ fields = ('options',)
+
+
+class ScopedEvalContextModifier(EvalContextModifier):
+ """Modifies the eval context and reverts it later. Works exactly like
+ :class:`EvalContextModifier` but will only modify the
+ :class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
+ """
+ fields = ('body',)
+
+
+# make sure nobody creates custom nodes
+def _failing_new(*args, **kwargs):
+ raise TypeError('can\'t create custom node types')
+NodeType.__new__ = staticmethod(_failing_new); del _failing_new
diff --git a/python/jinja2/optimizer.py b/python/jinja2/optimizer.py
new file mode 100644
index 0000000..65ab3ce
--- /dev/null
+++ b/python/jinja2/optimizer.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.optimizer
+ ~~~~~~~~~~~~~~~~
+
+ The jinja optimizer is currently trying to constant fold a few expressions
+ and modify the AST in place so that it should be easier to evaluate it.
+
+ Because the AST does not contain all the scoping information and the
+ compiler has to find that out, we cannot do all the optimizations we
+ want. For example loop unrolling doesn't work because unrolled loops would
+ have a different scoping.
+
+ The solution would be a second syntax tree that has the scoping rules stored.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
+"""
+from jinja2 import nodes
+from jinja2.visitor import NodeTransformer
+
+
+def optimize(node, environment):
+ """The context hint can be used to perform an static optimization
+ based on the context given."""
+ optimizer = Optimizer(environment)
+ return optimizer.visit(node)
+
+
+class Optimizer(NodeTransformer):
+
+ def __init__(self, environment):
+ self.environment = environment
+
+ def fold(self, node, eval_ctx=None):
+ """Do constant folding."""
+ node = self.generic_visit(node)
+ try:
+ return nodes.Const.from_untrusted(node.as_const(eval_ctx),
+ lineno=node.lineno,
+ environment=self.environment)
+ except nodes.Impossible:
+ return node
+
+ visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
+ visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
+ visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
+ visit_Filter = visit_Test = visit_CondExpr = fold
+ del fold
diff --git a/python/jinja2/parser.py b/python/jinja2/parser.py
new file mode 100644
index 0000000..ed00d97
--- /dev/null
+++ b/python/jinja2/parser.py
@@ -0,0 +1,903 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.parser
+ ~~~~~~~~~~~~~
+
+ Implements the template parser.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2 import nodes
+from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
+from jinja2.lexer import describe_token, describe_token_expr
+from jinja2._compat import imap
+
+
+_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
+ 'macro', 'include', 'from', 'import',
+ 'set', 'with', 'autoescape'])
+_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
+
+_math_nodes = {
+ 'add': nodes.Add,
+ 'sub': nodes.Sub,
+ 'mul': nodes.Mul,
+ 'div': nodes.Div,
+ 'floordiv': nodes.FloorDiv,
+ 'mod': nodes.Mod,
+}
+
+
+class Parser(object):
+ """This is the central parsing class Jinja2 uses. It's passed to
+ extensions and can be used to parse expressions or statements.
+ """
+
+ def __init__(self, environment, source, name=None, filename=None,
+ state=None):
+ self.environment = environment
+ self.stream = environment._tokenize(source, name, filename, state)
+ self.name = name
+ self.filename = filename
+ self.closed = False
+ self.extensions = {}
+ for extension in environment.iter_extensions():
+ for tag in extension.tags:
+ self.extensions[tag] = extension.parse
+ self._last_identifier = 0
+ self._tag_stack = []
+ self._end_token_stack = []
+
+ def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
+ """Convenience method that raises `exc` with the message, passed
+ line number or last line number as well as the current name and
+ filename.
+ """
+ if lineno is None:
+ lineno = self.stream.current.lineno
+ raise exc(msg, lineno, self.name, self.filename)
+
+ def _fail_ut_eof(self, name, end_token_stack, lineno):
+ expected = []
+ for exprs in end_token_stack:
+ expected.extend(imap(describe_token_expr, exprs))
+ if end_token_stack:
+ currently_looking = ' or '.join(
+ "'%s'" % describe_token_expr(expr)
+ for expr in end_token_stack[-1])
+ else:
+ currently_looking = None
+
+ if name is None:
+ message = ['Unexpected end of template.']
+ else:
+ message = ['Encountered unknown tag \'%s\'.' % name]
+
+ if currently_looking:
+ if name is not None and name in expected:
+ message.append('You probably made a nesting mistake. Jinja '
+ 'is expecting this tag, but currently looking '
+ 'for %s.' % currently_looking)
+ else:
+ message.append('Jinja was looking for the following tags: '
+ '%s.' % currently_looking)
+
+ if self._tag_stack:
+ message.append('The innermost block that needs to be '
+ 'closed is \'%s\'.' % self._tag_stack[-1])
+
+ self.fail(' '.join(message), lineno)
+
+ def fail_unknown_tag(self, name, lineno=None):
+ """Called if the parser encounters an unknown tag. Tries to fail
+ with a human readable error message that could help to identify
+ the problem.
+ """
+ return self._fail_ut_eof(name, self._end_token_stack, lineno)
+
+ def fail_eof(self, end_tokens=None, lineno=None):
+ """Like fail_unknown_tag but for end of template situations."""
+ stack = list(self._end_token_stack)
+ if end_tokens is not None:
+ stack.append(end_tokens)
+ return self._fail_ut_eof(None, stack, lineno)
+
+ def is_tuple_end(self, extra_end_rules=None):
+ """Are we at the end of a tuple?"""
+ if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
+ return True
+ elif extra_end_rules is not None:
+ return self.stream.current.test_any(extra_end_rules)
+ return False
+
+ def free_identifier(self, lineno=None):
+ """Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
+ self._last_identifier += 1
+ rv = object.__new__(nodes.InternalName)
+ nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
+ return rv
+
+ def parse_statement(self):
+ """Parse a single statement."""
+ token = self.stream.current
+ if token.type != 'name':
+ self.fail('tag name expected', token.lineno)
+ self._tag_stack.append(token.value)
+ pop_tag = True
+ try:
+ if token.value in _statement_keywords:
+ return getattr(self, 'parse_' + self.stream.current.value)()
+ if token.value == 'call':
+ return self.parse_call_block()
+ if token.value == 'filter':
+ return self.parse_filter_block()
+ ext = self.extensions.get(token.value)
+ if ext is not None:
+ return ext(self)
+
+ # did not work out, remove the token we pushed by accident
+ # from the stack so that the unknown tag fail function can
+ # produce a proper error message.
+ self._tag_stack.pop()
+ pop_tag = False
+ self.fail_unknown_tag(token.value, token.lineno)
+ finally:
+ if pop_tag:
+ self._tag_stack.pop()
+
+ def parse_statements(self, end_tokens, drop_needle=False):
+ """Parse multiple statements into a list until one of the end tokens
+ is reached. This is used to parse the body of statements as it also
+ parses template data if appropriate. The parser checks first if the
+ current token is a colon and skips it if there is one. Then it checks
+ for the block end and parses until if one of the `end_tokens` is
+ reached. Per default the active token in the stream at the end of
+ the call is the matched end token. If this is not wanted `drop_needle`
+ can be set to `True` and the end token is removed.
+ """
+ # the first token may be a colon for python compatibility
+ self.stream.skip_if('colon')
+
+ # in the future it would be possible to add whole code sections
+ # by adding some sort of end of statement token and parsing those here.
+ self.stream.expect('block_end')
+ result = self.subparse(end_tokens)
+
+ # we reached the end of the template too early, the subparser
+ # does not check for this, so we do that now
+ if self.stream.current.type == 'eof':
+ self.fail_eof(end_tokens)
+
+ if drop_needle:
+ next(self.stream)
+ return result
+
+ def parse_set(self):
+ """Parse an assign statement."""
+ lineno = next(self.stream).lineno
+ target = self.parse_assign_target(with_namespace=True)
+ if self.stream.skip_if('assign'):
+ expr = self.parse_tuple()
+ return nodes.Assign(target, expr, lineno=lineno)
+ filter_node = self.parse_filter(None)
+ body = self.parse_statements(('name:endset',),
+ drop_needle=True)
+ return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
+
+ def parse_for(self):
+ """Parse a for loop."""
+ lineno = self.stream.expect('name:for').lineno
+ target = self.parse_assign_target(extra_end_rules=('name:in',))
+ self.stream.expect('name:in')
+ iter = self.parse_tuple(with_condexpr=False,
+ extra_end_rules=('name:recursive',))
+ test = None
+ if self.stream.skip_if('name:if'):
+ test = self.parse_expression()
+ recursive = self.stream.skip_if('name:recursive')
+ body = self.parse_statements(('name:endfor', 'name:else'))
+ if next(self.stream).value == 'endfor':
+ else_ = []
+ else:
+ else_ = self.parse_statements(('name:endfor',), drop_needle=True)
+ return nodes.For(target, iter, body, else_, test,
+ recursive, lineno=lineno)
+
+ def parse_if(self):
+ """Parse an if construct."""
+ node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
+ while 1:
+ node.test = self.parse_tuple(with_condexpr=False)
+ node.body = self.parse_statements(('name:elif', 'name:else',
+ 'name:endif'))
+ node.elif_ = []
+ node.else_ = []
+ token = next(self.stream)
+ if token.test('name:elif'):
+ node = nodes.If(lineno=self.stream.current.lineno)
+ result.elif_.append(node)
+ continue
+ elif token.test('name:else'):
+ result.else_ = self.parse_statements(('name:endif',),
+ drop_needle=True)
+ break
+ return result
+
+ def parse_with(self):
+ node = nodes.With(lineno=next(self.stream).lineno)
+ targets = []
+ values = []
+ while self.stream.current.type != 'block_end':
+ lineno = self.stream.current.lineno
+ if targets:
+ self.stream.expect('comma')
+ target = self.parse_assign_target()
+ target.set_ctx('param')
+ targets.append(target)
+ self.stream.expect('assign')
+ values.append(self.parse_expression())
+ node.targets = targets
+ node.values = values
+ node.body = self.parse_statements(('name:endwith',),
+ drop_needle=True)
+ return node
+
+ def parse_autoescape(self):
+ node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
+ node.options = [
+ nodes.Keyword('autoescape', self.parse_expression())
+ ]
+ node.body = self.parse_statements(('name:endautoescape',),
+ drop_needle=True)
+ return nodes.Scope([node])
+
+ def parse_block(self):
+ node = nodes.Block(lineno=next(self.stream).lineno)
+ node.name = self.stream.expect('name').value
+ node.scoped = self.stream.skip_if('name:scoped')
+
+ # common problem people encounter when switching from django
+ # to jinja. we do not support hyphens in block names, so let's
+ # raise a nicer error message in that case.
+ if self.stream.current.type == 'sub':
+ self.fail('Block names in Jinja have to be valid Python '
+ 'identifiers and may not contain hyphens, use an '
+ 'underscore instead.')
+
+ node.body = self.parse_statements(('name:endblock',), drop_needle=True)
+ self.stream.skip_if('name:' + node.name)
+ return node
+
+ def parse_extends(self):
+ node = nodes.Extends(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ return node
+
+ def parse_import_context(self, node, default):
+ if self.stream.current.test_any('name:with', 'name:without') and \
+ self.stream.look().test('name:context'):
+ node.with_context = next(self.stream).value == 'with'
+ self.stream.skip()
+ else:
+ node.with_context = default
+ return node
+
+ def parse_include(self):
+ node = nodes.Include(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ if self.stream.current.test('name:ignore') and \
+ self.stream.look().test('name:missing'):
+ node.ignore_missing = True
+ self.stream.skip(2)
+ else:
+ node.ignore_missing = False
+ return self.parse_import_context(node, True)
+
+ def parse_import(self):
+ node = nodes.Import(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ self.stream.expect('name:as')
+ node.target = self.parse_assign_target(name_only=True).name
+ return self.parse_import_context(node, False)
+
+ def parse_from(self):
+ node = nodes.FromImport(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ self.stream.expect('name:import')
+ node.names = []
+
+ def parse_context():
+ if self.stream.current.value in ('with', 'without') and \
+ self.stream.look().test('name:context'):
+ node.with_context = next(self.stream).value == 'with'
+ self.stream.skip()
+ return True
+ return False
+
+ while 1:
+ if node.names:
+ self.stream.expect('comma')
+ if self.stream.current.type == 'name':
+ if parse_context():
+ break
+ target = self.parse_assign_target(name_only=True)
+ if target.name.startswith('_'):
+ self.fail('names starting with an underline can not '
+ 'be imported', target.lineno,
+ exc=TemplateAssertionError)
+ if self.stream.skip_if('name:as'):
+ alias = self.parse_assign_target(name_only=True)
+ node.names.append((target.name, alias.name))
+ else:
+ node.names.append(target.name)
+ if parse_context() or self.stream.current.type != 'comma':
+ break
+ else:
+ self.stream.expect('name')
+ if not hasattr(node, 'with_context'):
+ node.with_context = False
+ return node
+
+ def parse_signature(self, node):
+ node.args = args = []
+ node.defaults = defaults = []
+ self.stream.expect('lparen')
+ while self.stream.current.type != 'rparen':
+ if args:
+ self.stream.expect('comma')
+ arg = self.parse_assign_target(name_only=True)
+ arg.set_ctx('param')
+ if self.stream.skip_if('assign'):
+ defaults.append(self.parse_expression())
+ elif defaults:
+ self.fail('non-default argument follows default argument')
+ args.append(arg)
+ self.stream.expect('rparen')
+
+ def parse_call_block(self):
+ node = nodes.CallBlock(lineno=next(self.stream).lineno)
+ if self.stream.current.type == 'lparen':
+ self.parse_signature(node)
+ else:
+ node.args = []
+ node.defaults = []
+
+ node.call = self.parse_expression()
+ if not isinstance(node.call, nodes.Call):
+ self.fail('expected call', node.lineno)
+ node.body = self.parse_statements(('name:endcall',), drop_needle=True)
+ return node
+
+ def parse_filter_block(self):
+ node = nodes.FilterBlock(lineno=next(self.stream).lineno)
+ node.filter = self.parse_filter(None, start_inline=True)
+ node.body = self.parse_statements(('name:endfilter',),
+ drop_needle=True)
+ return node
+
+ def parse_macro(self):
+ node = nodes.Macro(lineno=next(self.stream).lineno)
+ node.name = self.parse_assign_target(name_only=True).name
+ self.parse_signature(node)
+ node.body = self.parse_statements(('name:endmacro',),
+ drop_needle=True)
+ return node
+
+ def parse_print(self):
+ node = nodes.Output(lineno=next(self.stream).lineno)
+ node.nodes = []
+ while self.stream.current.type != 'block_end':
+ if node.nodes:
+ self.stream.expect('comma')
+ node.nodes.append(self.parse_expression())
+ return node
+
+ def parse_assign_target(self, with_tuple=True, name_only=False,
+ extra_end_rules=None, with_namespace=False):
+ """Parse an assignment target. As Jinja2 allows assignments to
+ tuples, this function can parse all allowed assignment targets. Per
+ default assignments to tuples are parsed, that can be disable however
+ by setting `with_tuple` to `False`. If only assignments to names are
+ wanted `name_only` can be set to `True`. The `extra_end_rules`
+ parameter is forwarded to the tuple parsing function. If
+ `with_namespace` is enabled, a namespace assignment may be parsed.
+ """
+ if with_namespace and self.stream.look().type == 'dot':
+ token = self.stream.expect('name')
+ next(self.stream) # dot
+ attr = self.stream.expect('name')
+ target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
+ elif name_only:
+ token = self.stream.expect('name')
+ target = nodes.Name(token.value, 'store', lineno=token.lineno)
+ else:
+ if with_tuple:
+ target = self.parse_tuple(simplified=True,
+ extra_end_rules=extra_end_rules)
+ else:
+ target = self.parse_primary()
+ target.set_ctx('store')
+ if not target.can_assign():
+ self.fail('can\'t assign to %r' % target.__class__.
+ __name__.lower(), target.lineno)
+ return target
+
+ def parse_expression(self, with_condexpr=True):
+ """Parse an expression. Per default all expressions are parsed, if
+ the optional `with_condexpr` parameter is set to `False` conditional
+ expressions are not parsed.
+ """
+ if with_condexpr:
+ return self.parse_condexpr()
+ return self.parse_or()
+
+ def parse_condexpr(self):
+ lineno = self.stream.current.lineno
+ expr1 = self.parse_or()
+ while self.stream.skip_if('name:if'):
+ expr2 = self.parse_or()
+ if self.stream.skip_if('name:else'):
+ expr3 = self.parse_condexpr()
+ else:
+ expr3 = None
+ expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return expr1
+
+ def parse_or(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_and()
+ while self.stream.skip_if('name:or'):
+ right = self.parse_and()
+ left = nodes.Or(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_and(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_not()
+ while self.stream.skip_if('name:and'):
+ right = self.parse_not()
+ left = nodes.And(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_not(self):
+ if self.stream.current.test('name:not'):
+ lineno = next(self.stream).lineno
+ return nodes.Not(self.parse_not(), lineno=lineno)
+ return self.parse_compare()
+
+ def parse_compare(self):
+ lineno = self.stream.current.lineno
+ expr = self.parse_math1()
+ ops = []
+ while 1:
+ token_type = self.stream.current.type
+ if token_type in _compare_operators:
+ next(self.stream)
+ ops.append(nodes.Operand(token_type, self.parse_math1()))
+ elif self.stream.skip_if('name:in'):
+ ops.append(nodes.Operand('in', self.parse_math1()))
+ elif (self.stream.current.test('name:not') and
+ self.stream.look().test('name:in')):
+ self.stream.skip(2)
+ ops.append(nodes.Operand('notin', self.parse_math1()))
+ else:
+ break
+ lineno = self.stream.current.lineno
+ if not ops:
+ return expr
+ return nodes.Compare(expr, ops, lineno=lineno)
+
+ def parse_math1(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_concat()
+ while self.stream.current.type in ('add', 'sub'):
+ cls = _math_nodes[self.stream.current.type]
+ next(self.stream)
+ right = self.parse_concat()
+ left = cls(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_concat(self):
+ lineno = self.stream.current.lineno
+ args = [self.parse_math2()]
+ while self.stream.current.type == 'tilde':
+ next(self.stream)
+ args.append(self.parse_math2())
+ if len(args) == 1:
+ return args[0]
+ return nodes.Concat(args, lineno=lineno)
+
+ def parse_math2(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_pow()
+ while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
+ cls = _math_nodes[self.stream.current.type]
+ next(self.stream)
+ right = self.parse_pow()
+ left = cls(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_pow(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_unary()
+ while self.stream.current.type == 'pow':
+ next(self.stream)
+ right = self.parse_unary()
+ left = nodes.Pow(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_unary(self, with_filter=True):
+ token_type = self.stream.current.type
+ lineno = self.stream.current.lineno
+ if token_type == 'sub':
+ next(self.stream)
+ node = nodes.Neg(self.parse_unary(False), lineno=lineno)
+ elif token_type == 'add':
+ next(self.stream)
+ node = nodes.Pos(self.parse_unary(False), lineno=lineno)
+ else:
+ node = self.parse_primary()
+ node = self.parse_postfix(node)
+ if with_filter:
+ node = self.parse_filter_expr(node)
+ return node
+
+ def parse_primary(self):
+ token = self.stream.current
+ if token.type == 'name':
+ if token.value in ('true', 'false', 'True', 'False'):
+ node = nodes.Const(token.value in ('true', 'True'),
+ lineno=token.lineno)
+ elif token.value in ('none', 'None'):
+ node = nodes.Const(None, lineno=token.lineno)
+ else:
+ node = nodes.Name(token.value, 'load', lineno=token.lineno)
+ next(self.stream)
+ elif token.type == 'string':
+ next(self.stream)
+ buf = [token.value]
+ lineno = token.lineno
+ while self.stream.current.type == 'string':
+ buf.append(self.stream.current.value)
+ next(self.stream)
+ node = nodes.Const(''.join(buf), lineno=lineno)
+ elif token.type in ('integer', 'float'):
+ next(self.stream)
+ node = nodes.Const(token.value, lineno=token.lineno)
+ elif token.type == 'lparen':
+ next(self.stream)
+ node = self.parse_tuple(explicit_parentheses=True)
+ self.stream.expect('rparen')
+ elif token.type == 'lbracket':
+ node = self.parse_list()
+ elif token.type == 'lbrace':
+ node = self.parse_dict()
+ else:
+ self.fail("unexpected '%s'" % describe_token(token), token.lineno)
+ return node
+
+ def parse_tuple(self, simplified=False, with_condexpr=True,
+ extra_end_rules=None, explicit_parentheses=False):
+ """Works like `parse_expression` but if multiple expressions are
+ delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
+ This method could also return a regular expression instead of a tuple
+ if no commas where found.
+
+ The default parsing mode is a full tuple. If `simplified` is `True`
+ only names and literals are parsed. The `no_condexpr` parameter is
+ forwarded to :meth:`parse_expression`.
+
+ Because tuples do not require delimiters and may end in a bogus comma
+ an extra hint is needed that marks the end of a tuple. For example
+ for loops support tuples between `for` and `in`. In that case the
+ `extra_end_rules` is set to ``['name:in']``.
+
+ `explicit_parentheses` is true if the parsing was triggered by an
+ expression in parentheses. This is used to figure out if an empty
+ tuple is a valid expression or not.
+ """
+ lineno = self.stream.current.lineno
+ if simplified:
+ parse = self.parse_primary
+ elif with_condexpr:
+ parse = self.parse_expression
+ else:
+ parse = lambda: self.parse_expression(with_condexpr=False)
+ args = []
+ is_tuple = False
+ while 1:
+ if args:
+ self.stream.expect('comma')
+ if self.is_tuple_end(extra_end_rules):
+ break
+ args.append(parse())
+ if self.stream.current.type == 'comma':
+ is_tuple = True
+ else:
+ break
+ lineno = self.stream.current.lineno
+
+ if not is_tuple:
+ if args:
+ return args[0]
+
+ # if we don't have explicit parentheses, an empty tuple is
+ # not a valid expression. This would mean nothing (literally
+ # nothing) in the spot of an expression would be an empty
+ # tuple.
+ if not explicit_parentheses:
+ self.fail('Expected an expression, got \'%s\'' %
+ describe_token(self.stream.current))
+
+ return nodes.Tuple(args, 'load', lineno=lineno)
+
+ def parse_list(self):
+ token = self.stream.expect('lbracket')
+ items = []
+ while self.stream.current.type != 'rbracket':
+ if items:
+ self.stream.expect('comma')
+ if self.stream.current.type == 'rbracket':
+ break
+ items.append(self.parse_expression())
+ self.stream.expect('rbracket')
+ return nodes.List(items, lineno=token.lineno)
+
+ def parse_dict(self):
+ token = self.stream.expect('lbrace')
+ items = []
+ while self.stream.current.type != 'rbrace':
+ if items:
+ self.stream.expect('comma')
+ if self.stream.current.type == 'rbrace':
+ break
+ key = self.parse_expression()
+ self.stream.expect('colon')
+ value = self.parse_expression()
+ items.append(nodes.Pair(key, value, lineno=key.lineno))
+ self.stream.expect('rbrace')
+ return nodes.Dict(items, lineno=token.lineno)
+
+ def parse_postfix(self, node):
+ while 1:
+ token_type = self.stream.current.type
+ if token_type == 'dot' or token_type == 'lbracket':
+ node = self.parse_subscript(node)
+ # calls are valid both after postfix expressions (getattr
+ # and getitem) as well as filters and tests
+ elif token_type == 'lparen':
+ node = self.parse_call(node)
+ else:
+ break
+ return node
+
+ def parse_filter_expr(self, node):
+ while 1:
+ token_type = self.stream.current.type
+ if token_type == 'pipe':
+ node = self.parse_filter(node)
+ elif token_type == 'name' and self.stream.current.value == 'is':
+ node = self.parse_test(node)
+ # calls are valid both after postfix expressions (getattr
+ # and getitem) as well as filters and tests
+ elif token_type == 'lparen':
+ node = self.parse_call(node)
+ else:
+ break
+ return node
+
+ def parse_subscript(self, node):
+ token = next(self.stream)
+ if token.type == 'dot':
+ attr_token = self.stream.current
+ next(self.stream)
+ if attr_token.type == 'name':
+ return nodes.Getattr(node, attr_token.value, 'load',
+ lineno=token.lineno)
+ elif attr_token.type != 'integer':
+ self.fail('expected name or number', attr_token.lineno)
+ arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
+ return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
+ if token.type == 'lbracket':
+ args = []
+ while self.stream.current.type != 'rbracket':
+ if args:
+ self.stream.expect('comma')
+ args.append(self.parse_subscribed())
+ self.stream.expect('rbracket')
+ if len(args) == 1:
+ arg = args[0]
+ else:
+ arg = nodes.Tuple(args, 'load', lineno=token.lineno)
+ return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
+ self.fail('expected subscript expression', self.lineno)
+
+ def parse_subscribed(self):
+ lineno = self.stream.current.lineno
+
+ if self.stream.current.type == 'colon':
+ next(self.stream)
+ args = [None]
+ else:
+ node = self.parse_expression()
+ if self.stream.current.type != 'colon':
+ return node
+ next(self.stream)
+ args = [node]
+
+ if self.stream.current.type == 'colon':
+ args.append(None)
+ elif self.stream.current.type not in ('rbracket', 'comma'):
+ args.append(self.parse_expression())
+ else:
+ args.append(None)
+
+ if self.stream.current.type == 'colon':
+ next(self.stream)
+ if self.stream.current.type not in ('rbracket', 'comma'):
+ args.append(self.parse_expression())
+ else:
+ args.append(None)
+ else:
+ args.append(None)
+
+ return nodes.Slice(lineno=lineno, *args)
+
+ def parse_call(self, node):
+ token = self.stream.expect('lparen')
+ args = []
+ kwargs = []
+ dyn_args = dyn_kwargs = None
+ require_comma = False
+
+ def ensure(expr):
+ if not expr:
+ self.fail('invalid syntax for function call expression',
+ token.lineno)
+
+ while self.stream.current.type != 'rparen':
+ if require_comma:
+ self.stream.expect('comma')
+ # support for trailing comma
+ if self.stream.current.type == 'rparen':
+ break
+ if self.stream.current.type == 'mul':
+ ensure(dyn_args is None and dyn_kwargs is None)
+ next(self.stream)
+ dyn_args = self.parse_expression()
+ elif self.stream.current.type == 'pow':
+ ensure(dyn_kwargs is None)
+ next(self.stream)
+ dyn_kwargs = self.parse_expression()
+ else:
+ ensure(dyn_args is None and dyn_kwargs is None)
+ if self.stream.current.type == 'name' and \
+ self.stream.look().type == 'assign':
+ key = self.stream.current.value
+ self.stream.skip(2)
+ value = self.parse_expression()
+ kwargs.append(nodes.Keyword(key, value,
+ lineno=value.lineno))
+ else:
+ ensure(not kwargs)
+ args.append(self.parse_expression())
+
+ require_comma = True
+ self.stream.expect('rparen')
+
+ if node is None:
+ return args, kwargs, dyn_args, dyn_kwargs
+ return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
+ lineno=token.lineno)
+
+ def parse_filter(self, node, start_inline=False):
+ while self.stream.current.type == 'pipe' or start_inline:
+ if not start_inline:
+ next(self.stream)
+ token = self.stream.expect('name')
+ name = token.value
+ while self.stream.current.type == 'dot':
+ next(self.stream)
+ name += '.' + self.stream.expect('name').value
+ if self.stream.current.type == 'lparen':
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
+ else:
+ args = []
+ kwargs = []
+ dyn_args = dyn_kwargs = None
+ node = nodes.Filter(node, name, args, kwargs, dyn_args,
+ dyn_kwargs, lineno=token.lineno)
+ start_inline = False
+ return node
+
+ def parse_test(self, node):
+ token = next(self.stream)
+ if self.stream.current.test('name:not'):
+ next(self.stream)
+ negated = True
+ else:
+ negated = False
+ name = self.stream.expect('name').value
+ while self.stream.current.type == 'dot':
+ next(self.stream)
+ name += '.' + self.stream.expect('name').value
+ dyn_args = dyn_kwargs = None
+ kwargs = []
+ if self.stream.current.type == 'lparen':
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
+ elif (self.stream.current.type in ('name', 'string', 'integer',
+ 'float', 'lparen', 'lbracket',
+ 'lbrace') and not
+ self.stream.current.test_any('name:else', 'name:or',
+ 'name:and')):
+ if self.stream.current.test('name:is'):
+ self.fail('You cannot chain multiple tests with is')
+ args = [self.parse_primary()]
+ else:
+ args = []
+ node = nodes.Test(node, name, args, kwargs, dyn_args,
+ dyn_kwargs, lineno=token.lineno)
+ if negated:
+ node = nodes.Not(node, lineno=token.lineno)
+ return node
+
+ def subparse(self, end_tokens=None):
+ body = []
+ data_buffer = []
+ add_data = data_buffer.append
+
+ if end_tokens is not None:
+ self._end_token_stack.append(end_tokens)
+
+ def flush_data():
+ if data_buffer:
+ lineno = data_buffer[0].lineno
+ body.append(nodes.Output(data_buffer[:], lineno=lineno))
+ del data_buffer[:]
+
+ try:
+ while self.stream:
+ token = self.stream.current
+ if token.type == 'data':
+ if token.value:
+ add_data(nodes.TemplateData(token.value,
+ lineno=token.lineno))
+ next(self.stream)
+ elif token.type == 'variable_begin':
+ next(self.stream)
+ add_data(self.parse_tuple(with_condexpr=True))
+ self.stream.expect('variable_end')
+ elif token.type == 'block_begin':
+ flush_data()
+ next(self.stream)
+ if end_tokens is not None and \
+ self.stream.current.test_any(*end_tokens):
+ return body
+ rv = self.parse_statement()
+ if isinstance(rv, list):
+ body.extend(rv)
+ else:
+ body.append(rv)
+ self.stream.expect('block_end')
+ else:
+ raise AssertionError('internal parsing error')
+
+ flush_data()
+ finally:
+ if end_tokens is not None:
+ self._end_token_stack.pop()
+
+ return body
+
+ def parse(self):
+ """Parse the whole template into a `Template` node."""
+ result = nodes.Template(self.subparse(), lineno=1)
+ result.set_environment(self.environment)
+ return result
diff --git a/python/jinja2/runtime.py b/python/jinja2/runtime.py
new file mode 100644
index 0000000..f9d7a68
--- /dev/null
+++ b/python/jinja2/runtime.py
@@ -0,0 +1,813 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.runtime
+ ~~~~~~~~~~~~~~
+
+ Runtime helpers.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
+"""
+import sys
+
+from itertools import chain
+from types import MethodType
+
+from jinja2.nodes import EvalContext, _context_function_types
+from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
+ internalcode, object_type_repr, evalcontextfunction, Namespace
+from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
+ TemplateNotFound
+from jinja2._compat import imap, text_type, iteritems, \
+ implements_iterator, implements_to_string, string_types, PY2, \
+ with_metaclass
+
+
+# these variables are exported to the template runtime
+__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
+ 'TemplateRuntimeError', 'missing', 'concat', 'escape',
+ 'markup_join', 'unicode_join', 'to_string', 'identity',
+ 'TemplateNotFound', 'Namespace']
+
+#: the name of the function that is used to convert something into
+#: a string. We can just use the text type here.
+to_string = text_type
+
+#: the identity function. Useful for certain things in the environment
+identity = lambda x: x
+
+_first_iteration = object()
+_last_iteration = object()
+
+
+def markup_join(seq):
+ """Concatenation that escapes if necessary and converts to unicode."""
+ buf = []
+ iterator = imap(soft_unicode, seq)
+ for arg in iterator:
+ buf.append(arg)
+ if hasattr(arg, '__html__'):
+ return Markup(u'').join(chain(buf, iterator))
+ return concat(buf)
+
+
+def unicode_join(seq):
+ """Simple args to unicode conversion and concatenation."""
+ return concat(imap(text_type, seq))
+
+
+def new_context(environment, template_name, blocks, vars=None,
+ shared=None, globals=None, locals=None):
+ """Internal helper to for context creation."""
+ if vars is None:
+ vars = {}
+ if shared:
+ parent = vars
+ else:
+ parent = dict(globals or (), **vars)
+ if locals:
+ # if the parent is shared a copy should be created because
+ # we don't want to modify the dict passed
+ if shared:
+ parent = dict(parent)
+ for key, value in iteritems(locals):
+ if value is not missing:
+ parent[key] = value
+ return environment.context_class(environment, parent, template_name,
+ blocks)
+
+
+class TemplateReference(object):
+ """The `self` in templates."""
+
+ def __init__(self, context):
+ self.__context = context
+
+ def __getitem__(self, name):
+ blocks = self.__context.blocks[name]
+ return BlockReference(name, self.__context, blocks, 0)
+
+ def __repr__(self):
+ return '<%s %r>' % (
+ self.__class__.__name__,
+ self.__context.name
+ )
+
+
+def _get_func(x):
+ return getattr(x, '__func__', x)
+
+
+class ContextMeta(type):
+
+ def __new__(cls, name, bases, d):
+ rv = type.__new__(cls, name, bases, d)
+ if bases == ():
+ return rv
+
+ resolve = _get_func(rv.resolve)
+ default_resolve = _get_func(Context.resolve)
+ resolve_or_missing = _get_func(rv.resolve_or_missing)
+ default_resolve_or_missing = _get_func(Context.resolve_or_missing)
+
+ # If we have a changed resolve but no changed default or missing
+ # resolve we invert the call logic.
+ if resolve is not default_resolve and \
+ resolve_or_missing is default_resolve_or_missing:
+ rv._legacy_resolve_mode = True
+ elif resolve is default_resolve and \
+ resolve_or_missing is default_resolve_or_missing:
+ rv._fast_resolve_mode = True
+
+ return rv
+
+
+def resolve_or_missing(context, key, missing=missing):
+ if key in context.vars:
+ return context.vars[key]
+ if key in context.parent:
+ return context.parent[key]
+ return missing
+
+
+class Context(with_metaclass(ContextMeta)):
+ """The template context holds the variables of a template. It stores the
+ values passed to the template and also the names the template exports.
+ Creating instances is neither supported nor useful as it's created
+ automatically at various stages of the template evaluation and should not
+ be created by hand.
+
+ The context is immutable. Modifications on :attr:`parent` **must not**
+ happen and modifications on :attr:`vars` are allowed from generated
+ template code only. Template filters and global functions marked as
+ :func:`contextfunction`\\s get the active context passed as first argument
+ and are allowed to access the context read-only.
+
+ The template context supports read only dict operations (`get`,
+ `keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
+ `__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
+ method that doesn't fail with a `KeyError` but returns an
+ :class:`Undefined` object for missing variables.
+ """
+ # XXX: we want to eventually make this be a deprecation warning and
+ # remove it.
+ _legacy_resolve_mode = False
+ _fast_resolve_mode = False
+
+ def __init__(self, environment, parent, name, blocks):
+ self.parent = parent
+ self.vars = {}
+ self.environment = environment
+ self.eval_ctx = EvalContext(self.environment, name)
+ self.exported_vars = set()
+ self.name = name
+
+ # create the initial mapping of blocks. Whenever template inheritance
+ # takes place the runtime will update this mapping with the new blocks
+ # from the template.
+ self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
+
+ # In case we detect the fast resolve mode we can set up an alias
+ # here that bypasses the legacy code logic.
+ if self._fast_resolve_mode:
+ self.resolve_or_missing = MethodType(resolve_or_missing, self)
+
+ def super(self, name, current):
+ """Render a parent block."""
+ try:
+ blocks = self.blocks[name]
+ index = blocks.index(current) + 1
+ blocks[index]
+ except LookupError:
+ return self.environment.undefined('there is no parent block '
+ 'called %r.' % name,
+ name='super')
+ return BlockReference(name, self, blocks, index)
+
+ def get(self, key, default=None):
+ """Returns an item from the template context, if it doesn't exist
+ `default` is returned.
+ """
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def resolve(self, key):
+ """Looks up a variable like `__getitem__` or `get` but returns an
+ :class:`Undefined` object with the name of the name looked up.
+ """
+ if self._legacy_resolve_mode:
+ rv = resolve_or_missing(self, key)
+ else:
+ rv = self.resolve_or_missing(key)
+ if rv is missing:
+ return self.environment.undefined(name=key)
+ return rv
+
+ def resolve_or_missing(self, key):
+ """Resolves a variable like :meth:`resolve` but returns the
+ special `missing` value if it cannot be found.
+ """
+ if self._legacy_resolve_mode:
+ rv = self.resolve(key)
+ if isinstance(rv, Undefined):
+ rv = missing
+ return rv
+ return resolve_or_missing(self, key)
+
+ def get_exported(self):
+ """Get a new dict with the exported variables."""
+ return dict((k, self.vars[k]) for k in self.exported_vars)
+
+ def get_all(self):
+ """Return the complete context as dict including the exported
+ variables. For optimizations reasons this might not return an
+ actual copy so be careful with using it.
+ """
+ if not self.vars:
+ return self.parent
+ if not self.parent:
+ return self.vars
+ return dict(self.parent, **self.vars)
+
+ @internalcode
+ def call(__self, __obj, *args, **kwargs):
+ """Call the callable with the arguments and keyword arguments
+ provided but inject the active context or environment as first
+ argument if the callable is a :func:`contextfunction` or
+ :func:`environmentfunction`.
+ """
+ if __debug__:
+ __traceback_hide__ = True # noqa
+
+ # Allow callable classes to take a context
+ if hasattr(__obj, '__call__'):
+ fn = __obj.__call__
+ for fn_type in ('contextfunction',
+ 'evalcontextfunction',
+ 'environmentfunction'):
+ if hasattr(fn, fn_type):
+ __obj = fn
+ break
+
+ if isinstance(__obj, _context_function_types):
+ if getattr(__obj, 'contextfunction', 0):
+ args = (__self,) + args
+ elif getattr(__obj, 'evalcontextfunction', 0):
+ args = (__self.eval_ctx,) + args
+ elif getattr(__obj, 'environmentfunction', 0):
+ args = (__self.environment,) + args
+ try:
+ return __obj(*args, **kwargs)
+ except StopIteration:
+ return __self.environment.undefined('value was undefined because '
+ 'a callable raised a '
+ 'StopIteration exception')
+
+ def derived(self, locals=None):
+ """Internal helper function to create a derived context. This is
+ used in situations where the system needs a new context in the same
+ template that is independent.
+ """
+ context = new_context(self.environment, self.name, {},
+ self.get_all(), True, None, locals)
+ context.eval_ctx = self.eval_ctx
+ context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
+ return context
+
+ def _all(meth):
+ proxy = lambda self: getattr(self.get_all(), meth)()
+ proxy.__doc__ = getattr(dict, meth).__doc__
+ proxy.__name__ = meth
+ return proxy
+
+ keys = _all('keys')
+ values = _all('values')
+ items = _all('items')
+
+ # not available on python 3
+ if PY2:
+ iterkeys = _all('iterkeys')
+ itervalues = _all('itervalues')
+ iteritems = _all('iteritems')
+ del _all
+
+ def __contains__(self, name):
+ return name in self.vars or name in self.parent
+
+ def __getitem__(self, key):
+ """Lookup a variable or raise `KeyError` if the variable is
+ undefined.
+ """
+ item = self.resolve_or_missing(key)
+ if item is missing:
+ raise KeyError(key)
+ return item
+
+ def __repr__(self):
+ return '<%s %s of %r>' % (
+ self.__class__.__name__,
+ repr(self.get_all()),
+ self.name
+ )
+
+
+# register the context as mapping if possible
+try:
+ from collections import Mapping
+ Mapping.register(Context)
+except ImportError:
+ pass
+
+
+class BlockReference(object):
+ """One block on a template reference."""
+
+ def __init__(self, name, context, stack, depth):
+ self.name = name
+ self._context = context
+ self._stack = stack
+ self._depth = depth
+
+ @property
+ def super(self):
+ """Super the block."""
+ if self._depth + 1 >= len(self._stack):
+ return self._context.environment. \
+ undefined('there is no parent block called %r.' %
+ self.name, name='super')
+ return BlockReference(self.name, self._context, self._stack,
+ self._depth + 1)
+
+ @internalcode
+ def __call__(self):
+ rv = concat(self._stack[self._depth](self._context))
+ if self._context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv
+
+
+class LoopContextBase(object):
+ """A loop context for dynamic iteration."""
+
+ _before = _first_iteration
+ _current = _first_iteration
+ _after = _last_iteration
+ _length = None
+
+ def __init__(self, undefined, recurse=None, depth0=0):
+ self._undefined = undefined
+ self._recurse = recurse
+ self.index0 = -1
+ self.depth0 = depth0
+ self._last_checked_value = missing
+
+ def cycle(self, *args):
+ """Cycles among the arguments with the current loop index."""
+ if not args:
+ raise TypeError('no items for cycling given')
+ return args[self.index0 % len(args)]
+
+ def changed(self, *value):
+ """Checks whether the value has changed since the last call."""
+ if self._last_checked_value != value:
+ self._last_checked_value = value
+ return True
+ return False
+
+ first = property(lambda x: x.index0 == 0)
+ last = property(lambda x: x._after is _last_iteration)
+ index = property(lambda x: x.index0 + 1)
+ revindex = property(lambda x: x.length - x.index0)
+ revindex0 = property(lambda x: x.length - x.index)
+ depth = property(lambda x: x.depth0 + 1)
+
+ @property
+ def previtem(self):
+ if self._before is _first_iteration:
+ return self._undefined('there is no previous item')
+ return self._before
+
+ @property
+ def nextitem(self):
+ if self._after is _last_iteration:
+ return self._undefined('there is no next item')
+ return self._after
+
+ def __len__(self):
+ return self.length
+
+ @internalcode
+ def loop(self, iterable):
+ if self._recurse is None:
+ raise TypeError('Tried to call non recursive loop. Maybe you '
+ "forgot the 'recursive' modifier.")
+ return self._recurse(iterable, self._recurse, self.depth0 + 1)
+
+ # a nifty trick to enhance the error message if someone tried to call
+ # the the loop without or with too many arguments.
+ __call__ = loop
+ del loop
+
+ def __repr__(self):
+ return '<%s %r/%r>' % (
+ self.__class__.__name__,
+ self.index,
+ self.length
+ )
+
+
+class LoopContext(LoopContextBase):
+
+ def __init__(self, iterable, undefined, recurse=None, depth0=0):
+ LoopContextBase.__init__(self, undefined, recurse, depth0)
+ self._iterator = iter(iterable)
+
+ # try to get the length of the iterable early. This must be done
+ # here because there are some broken iterators around where there
+ # __len__ is the number of iterations left (i'm looking at your
+ # listreverseiterator!).
+ try:
+ self._length = len(iterable)
+ except (TypeError, AttributeError):
+ self._length = None
+ self._after = self._safe_next()
+
+ @property
+ def length(self):
+ if self._length is None:
+ # if was not possible to get the length of the iterator when
+ # the loop context was created (ie: iterating over a generator)
+ # we have to convert the iterable into a sequence and use the
+ # length of that + the number of iterations so far.
+ iterable = tuple(self._iterator)
+ self._iterator = iter(iterable)
+ iterations_done = self.index0 + 2
+ self._length = len(iterable) + iterations_done
+ return self._length
+
+ def __iter__(self):
+ return LoopContextIterator(self)
+
+ def _safe_next(self):
+ try:
+ return next(self._iterator)
+ except StopIteration:
+ return _last_iteration
+
+
+@implements_iterator
+class LoopContextIterator(object):
+ """The iterator for a loop context."""
+ __slots__ = ('context',)
+
+ def __init__(self, context):
+ self.context = context
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ ctx = self.context
+ ctx.index0 += 1
+ if ctx._after is _last_iteration:
+ raise StopIteration()
+ ctx._before = ctx._current
+ ctx._current = ctx._after
+ ctx._after = ctx._safe_next()
+ return ctx._current, ctx
+
+
+class Macro(object):
+ """Wraps a macro function."""
+
+ def __init__(self, environment, func, name, arguments,
+ catch_kwargs, catch_varargs, caller,
+ default_autoescape=None):
+ self._environment = environment
+ self._func = func
+ self._argument_count = len(arguments)
+ self.name = name
+ self.arguments = arguments
+ self.catch_kwargs = catch_kwargs
+ self.catch_varargs = catch_varargs
+ self.caller = caller
+ self.explicit_caller = 'caller' in arguments
+ if default_autoescape is None:
+ default_autoescape = environment.autoescape
+ self._default_autoescape = default_autoescape
+
+ @internalcode
+ @evalcontextfunction
+ def __call__(self, *args, **kwargs):
+ # This requires a bit of explanation, In the past we used to
+ # decide largely based on compile-time information if a macro is
+ # safe or unsafe. While there was a volatile mode it was largely
+ # unused for deciding on escaping. This turns out to be
+ # problemtic for macros because if a macro is safe or not not so
+ # much depends on the escape mode when it was defined but when it
+ # was used.
+ #
+ # Because however we export macros from the module system and
+ # there are historic callers that do not pass an eval context (and
+ # will continue to not pass one), we need to perform an instance
+ # check here.
+ #
+ # This is considered safe because an eval context is not a valid
+ # argument to callables otherwise anwyays. Worst case here is
+ # that if no eval context is passed we fall back to the compile
+ # time autoescape flag.
+ if args and isinstance(args[0], EvalContext):
+ autoescape = args[0].autoescape
+ args = args[1:]
+ else:
+ autoescape = self._default_autoescape
+
+ # try to consume the positional arguments
+ arguments = list(args[:self._argument_count])
+ off = len(arguments)
+
+ # For information why this is necessary refer to the handling
+ # of caller in the `macro_body` handler in the compiler.
+ found_caller = False
+
+ # if the number of arguments consumed is not the number of
+ # arguments expected we start filling in keyword arguments
+ # and defaults.
+ if off != self._argument_count:
+ for idx, name in enumerate(self.arguments[len(arguments):]):
+ try:
+ value = kwargs.pop(name)
+ except KeyError:
+ value = missing
+ if name == 'caller':
+ found_caller = True
+ arguments.append(value)
+ else:
+ found_caller = self.explicit_caller
+
+ # it's important that the order of these arguments does not change
+ # if not also changed in the compiler's `function_scoping` method.
+ # the order is caller, keyword arguments, positional arguments!
+ if self.caller and not found_caller:
+ caller = kwargs.pop('caller', None)
+ if caller is None:
+ caller = self._environment.undefined('No caller defined',
+ name='caller')
+ arguments.append(caller)
+
+ if self.catch_kwargs:
+ arguments.append(kwargs)
+ elif kwargs:
+ if 'caller' in kwargs:
+ raise TypeError('macro %r was invoked with two values for '
+ 'the special caller argument. This is '
+ 'most likely a bug.' % self.name)
+ raise TypeError('macro %r takes no keyword argument %r' %
+ (self.name, next(iter(kwargs))))
+ if self.catch_varargs:
+ arguments.append(args[self._argument_count:])
+ elif len(args) > self._argument_count:
+ raise TypeError('macro %r takes not more than %d argument(s)' %
+ (self.name, len(self.arguments)))
+
+ return self._invoke(arguments, autoescape)
+
+ def _invoke(self, arguments, autoescape):
+ """This method is being swapped out by the async implementation."""
+ rv = self._func(*arguments)
+ if autoescape:
+ rv = Markup(rv)
+ return rv
+
+ def __repr__(self):
+ return '<%s %s>' % (
+ self.__class__.__name__,
+ self.name is None and 'anonymous' or repr(self.name)
+ )
+
+
+@implements_to_string
+class Undefined(object):
+ """The default undefined type. This undefined type can be printed and
+ iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
+
+ >>> foo = Undefined(name='foo')
+ >>> str(foo)
+ ''
+ >>> not foo
+ True
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+ __slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
+ '_undefined_exception')
+
+ def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
+ self._undefined_hint = hint
+ self._undefined_obj = obj
+ self._undefined_name = name
+ self._undefined_exception = exc
+
+ @internalcode
+ def _fail_with_undefined_error(self, *args, **kwargs):
+ """Regular callback function for undefined objects that raises an
+ `jinja2.exceptions.UndefinedError` on call.
+ """
+ if self._undefined_hint is None:
+ if self._undefined_obj is missing:
+ hint = '%r is undefined' % self._undefined_name
+ elif not isinstance(self._undefined_name, string_types):
+ hint = '%s has no element %r' % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name
+ )
+ else:
+ hint = '%r has no attribute %r' % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name
+ )
+ else:
+ hint = self._undefined_hint
+ raise self._undefined_exception(hint)
+
+ @internalcode
+ def __getattr__(self, name):
+ if name[:2] == '__':
+ raise AttributeError(name)
+ return self._fail_with_undefined_error()
+
+ __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
+ __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
+ __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
+ __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
+ __float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \
+ __rsub__ = _fail_with_undefined_error
+
+ def __eq__(self, other):
+ return type(self) is type(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return id(type(self))
+
+ def __str__(self):
+ return u''
+
+ def __len__(self):
+ return 0
+
+ def __iter__(self):
+ if 0:
+ yield None
+
+ def __nonzero__(self):
+ return False
+ __bool__ = __nonzero__
+
+ def __repr__(self):
+ return 'Undefined'
+
+
+def make_logging_undefined(logger=None, base=None):
+ """Given a logger object this returns a new undefined class that will
+ log certain failures. It will log iterations and printing. If no
+ logger is given a default logger is created.
+
+ Example::
+
+ logger = logging.getLogger(__name__)
+ LoggingUndefined = make_logging_undefined(
+ logger=logger,
+ base=Undefined
+ )
+
+ .. versionadded:: 2.8
+
+ :param logger: the logger to use. If not provided, a default logger
+ is created.
+ :param base: the base class to add logging functionality to. This
+ defaults to :class:`Undefined`.
+ """
+ if logger is None:
+ import logging
+ logger = logging.getLogger(__name__)
+ logger.addHandler(logging.StreamHandler(sys.stderr))
+ if base is None:
+ base = Undefined
+
+ def _log_message(undef):
+ if undef._undefined_hint is None:
+ if undef._undefined_obj is missing:
+ hint = '%s is undefined' % undef._undefined_name
+ elif not isinstance(undef._undefined_name, string_types):
+ hint = '%s has no element %s' % (
+ object_type_repr(undef._undefined_obj),
+ undef._undefined_name)
+ else:
+ hint = '%s has no attribute %s' % (
+ object_type_repr(undef._undefined_obj),
+ undef._undefined_name)
+ else:
+ hint = undef._undefined_hint
+ logger.warning('Template variable warning: %s', hint)
+
+ class LoggingUndefined(base):
+
+ def _fail_with_undefined_error(self, *args, **kwargs):
+ try:
+ return base._fail_with_undefined_error(self, *args, **kwargs)
+ except self._undefined_exception as e:
+ logger.error('Template variable error: %s', str(e))
+ raise e
+
+ def __str__(self):
+ rv = base.__str__(self)
+ _log_message(self)
+ return rv
+
+ def __iter__(self):
+ rv = base.__iter__(self)
+ _log_message(self)
+ return rv
+
+ if PY2:
+ def __nonzero__(self):
+ rv = base.__nonzero__(self)
+ _log_message(self)
+ return rv
+
+ def __unicode__(self):
+ rv = base.__unicode__(self)
+ _log_message(self)
+ return rv
+ else:
+ def __bool__(self):
+ rv = base.__bool__(self)
+ _log_message(self)
+ return rv
+
+ return LoggingUndefined
+
+
+@implements_to_string
+class DebugUndefined(Undefined):
+ """An undefined that returns the debug info when printed.
+
+ >>> foo = DebugUndefined(name='foo')
+ >>> str(foo)
+ '{{ foo }}'
+ >>> not foo
+ True
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+ __slots__ = ()
+
+ def __str__(self):
+ if self._undefined_hint is None:
+ if self._undefined_obj is missing:
+ return u'{{ %s }}' % self._undefined_name
+ return '{{ no such element: %s[%r] }}' % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name
+ )
+ return u'{{ undefined value printed: %s }}' % self._undefined_hint
+
+
+@implements_to_string
+class StrictUndefined(Undefined):
+ """An undefined that barks on print and iteration as well as boolean
+ tests and all kinds of comparisons. In other words: you can do nothing
+ with it except checking if it's defined using the `defined` test.
+
+ >>> foo = StrictUndefined(name='foo')
+ >>> str(foo)
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ >>> not foo
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+ __slots__ = ()
+ __iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
+ __ne__ = __bool__ = __hash__ = \
+ Undefined._fail_with_undefined_error
+
+
+# remove remaining slots attributes, after the metaclass did the magic they
+# are unneeded and irritating as they contain wrong data for the subclasses.
+del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
diff --git a/python/jinja2/sandbox.py b/python/jinja2/sandbox.py
new file mode 100644
index 0000000..752e812
--- /dev/null
+++ b/python/jinja2/sandbox.py
@@ -0,0 +1,486 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.sandbox
+ ~~~~~~~~~~~~~~
+
+ Adds a sandbox layer to Jinja as it was the default behavior in the old
+ Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
+ default behavior is easier to use.
+
+ The behavior can be changed by subclassing the environment.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
+"""
+import types
+import operator
+from collections import Mapping
+from jinja2.environment import Environment
+from jinja2.exceptions import SecurityError
+from jinja2._compat import string_types, PY2
+from jinja2.utils import Markup
+
+from markupsafe import EscapeFormatter
+from string import Formatter
+
+
+#: maximum number of items a range may produce
+MAX_RANGE = 100000
+
+#: attributes of function objects that are considered unsafe.
+if PY2:
+ UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
+ 'func_defaults', 'func_globals'])
+else:
+ # On versions > python 2 the special attributes on functions are gone,
+ # but they remain on methods and generators for whatever reason.
+ UNSAFE_FUNCTION_ATTRIBUTES = set()
+
+
+#: unsafe method attributes. function attributes are unsafe for methods too
+UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
+
+#: unsafe generator attirbutes.
+UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
+
+#: unsafe attributes on coroutines
+UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code'])
+
+#: unsafe attributes on async generators
+UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame'])
+
+import warnings
+
+# make sure we don't warn in python 2.6 about stuff we don't care about
+warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
+ module='jinja2.sandbox')
+
+from collections import deque
+
+_mutable_set_types = (set,)
+_mutable_mapping_types = (dict,)
+_mutable_sequence_types = (list,)
+
+
+# on python 2.x we can register the user collection types
+try:
+ from UserDict import UserDict, DictMixin
+ from UserList import UserList
+ _mutable_mapping_types += (UserDict, DictMixin)
+ _mutable_set_types += (UserList,)
+except ImportError:
+ pass
+
+# if sets is still available, register the mutable set from there as well
+try:
+ from sets import Set
+ _mutable_set_types += (Set,)
+except ImportError:
+ pass
+
+#: register Python 2.6 abstract base classes
+from collections import MutableSet, MutableMapping, MutableSequence
+_mutable_set_types += (MutableSet,)
+_mutable_mapping_types += (MutableMapping,)
+_mutable_sequence_types += (MutableSequence,)
+
+
+_mutable_spec = (
+ (_mutable_set_types, frozenset([
+ 'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
+ 'symmetric_difference_update', 'update'
+ ])),
+ (_mutable_mapping_types, frozenset([
+ 'clear', 'pop', 'popitem', 'setdefault', 'update'
+ ])),
+ (_mutable_sequence_types, frozenset([
+ 'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
+ ])),
+ (deque, frozenset([
+ 'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
+ 'popleft', 'remove', 'rotate'
+ ]))
+)
+
+
+class _MagicFormatMapping(Mapping):
+ """This class implements a dummy wrapper to fix a bug in the Python
+ standard library for string formatting.
+
+ See https://bugs.python.org/issue13598 for information about why
+ this is necessary.
+ """
+
+ def __init__(self, args, kwargs):
+ self._args = args
+ self._kwargs = kwargs
+ self._last_index = 0
+
+ def __getitem__(self, key):
+ if key == '':
+ idx = self._last_index
+ self._last_index += 1
+ try:
+ return self._args[idx]
+ except LookupError:
+ pass
+ key = str(idx)
+ return self._kwargs[key]
+
+ def __iter__(self):
+ return iter(self._kwargs)
+
+ def __len__(self):
+ return len(self._kwargs)
+
+
+def inspect_format_method(callable):
+ if not isinstance(callable, (types.MethodType,
+ types.BuiltinMethodType)) or \
+ callable.__name__ not in ('format', 'format_map'):
+ return None
+ obj = callable.__self__
+ if isinstance(obj, string_types):
+ return obj
+
+
+def safe_range(*args):
+ """A range that can't generate ranges with a length of more than
+ MAX_RANGE items.
+ """
+ rng = range(*args)
+ if len(rng) > MAX_RANGE:
+ raise OverflowError('range too big, maximum size for range is %d' %
+ MAX_RANGE)
+ return rng
+
+
+def unsafe(f):
+ """Marks a function or method as unsafe.
+
+ ::
+
+ @unsafe
+ def delete(self):
+ pass
+ """
+ f.unsafe_callable = True
+ return f
+
+
+def is_internal_attribute(obj, attr):
+ """Test if the attribute given is an internal python attribute. For
+ example this function returns `True` for the `func_code` attribute of
+ python objects. This is useful if the environment method
+ :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
+
+ >>> from jinja2.sandbox import is_internal_attribute
+ >>> is_internal_attribute(str, "mro")
+ True
+ >>> is_internal_attribute(str, "upper")
+ False
+ """
+ if isinstance(obj, types.FunctionType):
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES:
+ return True
+ elif isinstance(obj, types.MethodType):
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
+ attr in UNSAFE_METHOD_ATTRIBUTES:
+ return True
+ elif isinstance(obj, type):
+ if attr == 'mro':
+ return True
+ elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
+ return True
+ elif isinstance(obj, types.GeneratorType):
+ if attr in UNSAFE_GENERATOR_ATTRIBUTES:
+ return True
+ elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
+ if attr in UNSAFE_COROUTINE_ATTRIBUTES:
+ return True
+ elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
+ if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
+ return True
+ return attr.startswith('__')
+
+
+def modifies_known_mutable(obj, attr):
+ """This function checks if an attribute on a builtin mutable object
+ (list, dict, set or deque) would modify it if called. It also supports
+ the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
+ with Python 2.6 onwards the abstract base classes `MutableSet`,
+ `MutableMapping`, and `MutableSequence`.
+
+ >>> modifies_known_mutable({}, "clear")
+ True
+ >>> modifies_known_mutable({}, "keys")
+ False
+ >>> modifies_known_mutable([], "append")
+ True
+ >>> modifies_known_mutable([], "index")
+ False
+
+ If called with an unsupported object (such as unicode) `False` is
+ returned.
+
+ >>> modifies_known_mutable("foo", "upper")
+ False
+ """
+ for typespec, unsafe in _mutable_spec:
+ if isinstance(obj, typespec):
+ return attr in unsafe
+ return False
+
+
+class SandboxedEnvironment(Environment):
+ """The sandboxed environment. It works like the regular environment but
+ tells the compiler to generate sandboxed code. Additionally subclasses of
+ this environment may override the methods that tell the runtime what
+ attributes or functions are safe to access.
+
+ If the template tries to access insecure code a :exc:`SecurityError` is
+ raised. However also other exceptions may occur during the rendering so
+ the caller has to ensure that all exceptions are caught.
+ """
+ sandboxed = True
+
+ #: default callback table for the binary operators. A copy of this is
+ #: available on each instance of a sandboxed environment as
+ #: :attr:`binop_table`
+ default_binop_table = {
+ '+': operator.add,
+ '-': operator.sub,
+ '*': operator.mul,
+ '/': operator.truediv,
+ '//': operator.floordiv,
+ '**': operator.pow,
+ '%': operator.mod
+ }
+
+ #: default callback table for the unary operators. A copy of this is
+ #: available on each instance of a sandboxed environment as
+ #: :attr:`unop_table`
+ default_unop_table = {
+ '+': operator.pos,
+ '-': operator.neg
+ }
+
+ #: a set of binary operators that should be intercepted. Each operator
+ #: that is added to this set (empty by default) is delegated to the
+ #: :meth:`call_binop` method that will perform the operator. The default
+ #: operator callback is specified by :attr:`binop_table`.
+ #:
+ #: The following binary operators are interceptable:
+ #: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
+ #:
+ #: The default operation form the operator table corresponds to the
+ #: builtin function. Intercepted calls are always slower than the native
+ #: operator call, so make sure only to intercept the ones you are
+ #: interested in.
+ #:
+ #: .. versionadded:: 2.6
+ intercepted_binops = frozenset()
+
+ #: a set of unary operators that should be intercepted. Each operator
+ #: that is added to this set (empty by default) is delegated to the
+ #: :meth:`call_unop` method that will perform the operator. The default
+ #: operator callback is specified by :attr:`unop_table`.
+ #:
+ #: The following unary operators are interceptable: ``+``, ``-``
+ #:
+ #: The default operation form the operator table corresponds to the
+ #: builtin function. Intercepted calls are always slower than the native
+ #: operator call, so make sure only to intercept the ones you are
+ #: interested in.
+ #:
+ #: .. versionadded:: 2.6
+ intercepted_unops = frozenset()
+
+ def intercept_unop(self, operator):
+ """Called during template compilation with the name of a unary
+ operator to check if it should be intercepted at runtime. If this
+ method returns `True`, :meth:`call_unop` is excuted for this unary
+ operator. The default implementation of :meth:`call_unop` will use
+ the :attr:`unop_table` dictionary to perform the operator with the
+ same logic as the builtin one.
+
+ The following unary operators are interceptable: ``+`` and ``-``
+
+ Intercepted calls are always slower than the native operator call,
+ so make sure only to intercept the ones you are interested in.
+
+ .. versionadded:: 2.6
+ """
+ return False
+
+
+ def __init__(self, *args, **kwargs):
+ Environment.__init__(self, *args, **kwargs)
+ self.globals['range'] = safe_range
+ self.binop_table = self.default_binop_table.copy()
+ self.unop_table = self.default_unop_table.copy()
+
+ def is_safe_attribute(self, obj, attr, value):
+ """The sandboxed environment will call this method to check if the
+ attribute of an object is safe to access. Per default all attributes
+ starting with an underscore are considered private as well as the
+ special attributes of internal python objects as returned by the
+ :func:`is_internal_attribute` function.
+ """
+ return not (attr.startswith('_') or is_internal_attribute(obj, attr))
+
+ def is_safe_callable(self, obj):
+ """Check if an object is safely callable. Per default a function is
+ considered safe unless the `unsafe_callable` attribute exists and is
+ True. Override this method to alter the behavior, but this won't
+ affect the `unsafe` decorator from this module.
+ """
+ return not (getattr(obj, 'unsafe_callable', False) or
+ getattr(obj, 'alters_data', False))
+
+ def call_binop(self, context, operator, left, right):
+ """For intercepted binary operator calls (:meth:`intercepted_binops`)
+ this function is executed instead of the builtin operator. This can
+ be used to fine tune the behavior of certain operators.
+
+ .. versionadded:: 2.6
+ """
+ return self.binop_table[operator](left, right)
+
+ def call_unop(self, context, operator, arg):
+ """For intercepted unary operator calls (:meth:`intercepted_unops`)
+ this function is executed instead of the builtin operator. This can
+ be used to fine tune the behavior of certain operators.
+
+ .. versionadded:: 2.6
+ """
+ return self.unop_table[operator](arg)
+
+ def getitem(self, obj, argument):
+ """Subscribe an object from sandboxed code."""
+ try:
+ return obj[argument]
+ except (TypeError, LookupError):
+ if isinstance(argument, string_types):
+ try:
+ attr = str(argument)
+ except Exception:
+ pass
+ else:
+ try:
+ value = getattr(obj, attr)
+ except AttributeError:
+ pass
+ else:
+ if self.is_safe_attribute(obj, argument, value):
+ return value
+ return self.unsafe_undefined(obj, argument)
+ return self.undefined(obj=obj, name=argument)
+
+ def getattr(self, obj, attribute):
+ """Subscribe an object from sandboxed code and prefer the
+ attribute. The attribute passed *must* be a bytestring.
+ """
+ try:
+ value = getattr(obj, attribute)
+ except AttributeError:
+ try:
+ return obj[attribute]
+ except (TypeError, LookupError):
+ pass
+ else:
+ if self.is_safe_attribute(obj, attribute, value):
+ return value
+ return self.unsafe_undefined(obj, attribute)
+ return self.undefined(obj=obj, name=attribute)
+
+ def unsafe_undefined(self, obj, attribute):
+ """Return an undefined object for unsafe attributes."""
+ return self.undefined('access to attribute %r of %r '
+ 'object is unsafe.' % (
+ attribute,
+ obj.__class__.__name__
+ ), name=attribute, obj=obj, exc=SecurityError)
+
+ def format_string(self, s, args, kwargs, format_func=None):
+ """If a format call is detected, then this is routed through this
+ method so that our safety sandbox can be used for it.
+ """
+ if isinstance(s, Markup):
+ formatter = SandboxedEscapeFormatter(self, s.escape)
+ else:
+ formatter = SandboxedFormatter(self)
+
+ if format_func is not None and format_func.__name__ == 'format_map':
+ if len(args) != 1 or kwargs:
+ raise TypeError(
+ 'format_map() takes exactly one argument %d given'
+ % (len(args) + (kwargs is not None))
+ )
+
+ kwargs = args[0]
+ args = None
+
+ kwargs = _MagicFormatMapping(args, kwargs)
+ rv = formatter.vformat(s, args, kwargs)
+ return type(s)(rv)
+
+ def call(__self, __context, __obj, *args, **kwargs):
+ """Call an object from sandboxed code."""
+ fmt = inspect_format_method(__obj)
+ if fmt is not None:
+ return __self.format_string(fmt, args, kwargs, __obj)
+
+ # the double prefixes are to avoid double keyword argument
+ # errors when proxying the call.
+ if not __self.is_safe_callable(__obj):
+ raise SecurityError('%r is not safely callable' % (__obj,))
+ return __context.call(__obj, *args, **kwargs)
+
+
+class ImmutableSandboxedEnvironment(SandboxedEnvironment):
+ """Works exactly like the regular `SandboxedEnvironment` but does not
+ permit modifications on the builtin mutable objects `list`, `set`, and
+ `dict` by using the :func:`modifies_known_mutable` function.
+ """
+
+ def is_safe_attribute(self, obj, attr, value):
+ if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
+ return False
+ return not modifies_known_mutable(obj, attr)
+
+
+# This really is not a public API apparenlty.
+try:
+ from _string import formatter_field_name_split
+except ImportError:
+ def formatter_field_name_split(field_name):
+ return field_name._formatter_field_name_split()
+
+
+class SandboxedFormatterMixin(object):
+
+ def __init__(self, env):
+ self._env = env
+
+ def get_field(self, field_name, args, kwargs):
+ first, rest = formatter_field_name_split(field_name)
+ obj = self.get_value(first, args, kwargs)
+ for is_attr, i in rest:
+ if is_attr:
+ obj = self._env.getattr(obj, i)
+ else:
+ obj = self._env.getitem(obj, i)
+ return obj, first
+
+class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
+
+ def __init__(self, env):
+ SandboxedFormatterMixin.__init__(self, env)
+ Formatter.__init__(self)
+
+class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
+
+ def __init__(self, env, escape):
+ SandboxedFormatterMixin.__init__(self, env)
+ EscapeFormatter.__init__(self, escape)
diff --git a/python/jinja2/tests.py b/python/jinja2/tests.py
new file mode 100644
index 0000000..0adc3d4
--- /dev/null
+++ b/python/jinja2/tests.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.tests
+ ~~~~~~~~~~~~
+
+ Jinja test functions. Used with the "is" operator.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import operator
+import re
+from collections import Mapping
+from jinja2.runtime import Undefined
+from jinja2._compat import text_type, string_types, integer_types
+import decimal
+
+number_re = re.compile(r'^-?\d+(\.\d+)?$')
+regex_type = type(number_re)
+
+
+test_callable = callable
+
+
+def test_odd(value):
+ """Return true if the variable is odd."""
+ return value % 2 == 1
+
+
+def test_even(value):
+ """Return true if the variable is even."""
+ return value % 2 == 0
+
+
+def test_divisibleby(value, num):
+ """Check if a variable is divisible by a number."""
+ return value % num == 0
+
+
+def test_defined(value):
+ """Return true if the variable is defined:
+
+ .. sourcecode:: jinja
+
+ {% if variable is defined %}
+ value of variable: {{ variable }}
+ {% else %}
+ variable is not defined
+ {% endif %}
+
+ See the :func:`default` filter for a simple way to set undefined
+ variables.
+ """
+ return not isinstance(value, Undefined)
+
+
+def test_undefined(value):
+ """Like :func:`defined` but the other way round."""
+ return isinstance(value, Undefined)
+
+
+def test_none(value):
+ """Return true if the variable is none."""
+ return value is None
+
+
+def test_lower(value):
+ """Return true if the variable is lowercased."""
+ return text_type(value).islower()
+
+
+def test_upper(value):
+ """Return true if the variable is uppercased."""
+ return text_type(value).isupper()
+
+
+def test_string(value):
+ """Return true if the object is a string."""
+ return isinstance(value, string_types)
+
+
+def test_mapping(value):
+ """Return true if the object is a mapping (dict etc.).
+
+ .. versionadded:: 2.6
+ """
+ return isinstance(value, Mapping)
+
+
+def test_number(value):
+ """Return true if the variable is a number."""
+ return isinstance(value, integer_types + (float, complex, decimal.Decimal))
+
+
+def test_sequence(value):
+ """Return true if the variable is a sequence. Sequences are variables
+ that are iterable.
+ """
+ try:
+ len(value)
+ value.__getitem__
+ except:
+ return False
+ return True
+
+
+def test_sameas(value, other):
+ """Check if an object points to the same memory address than another
+ object:
+
+ .. sourcecode:: jinja
+
+ {% if foo.attribute is sameas false %}
+ the foo attribute really is the `False` singleton
+ {% endif %}
+ """
+ return value is other
+
+
+def test_iterable(value):
+ """Check if it's possible to iterate over an object."""
+ try:
+ iter(value)
+ except TypeError:
+ return False
+ return True
+
+
+def test_escaped(value):
+ """Check if the value is escaped."""
+ return hasattr(value, '__html__')
+
+
+def test_in(value, seq):
+ """Check if value is in seq.
+
+ .. versionadded:: 2.10
+ """
+ return value in seq
+
+
+TESTS = {
+ 'odd': test_odd,
+ 'even': test_even,
+ 'divisibleby': test_divisibleby,
+ 'defined': test_defined,
+ 'undefined': test_undefined,
+ 'none': test_none,
+ 'lower': test_lower,
+ 'upper': test_upper,
+ 'string': test_string,
+ 'mapping': test_mapping,
+ 'number': test_number,
+ 'sequence': test_sequence,
+ 'iterable': test_iterable,
+ 'callable': test_callable,
+ 'sameas': test_sameas,
+ 'escaped': test_escaped,
+ 'in': test_in,
+ '==': operator.eq,
+ 'eq': operator.eq,
+ 'equalto': operator.eq,
+ '!=': operator.ne,
+ 'ne': operator.ne,
+ '>': operator.gt,
+ 'gt': operator.gt,
+ 'greaterthan': operator.gt,
+ 'ge': operator.ge,
+ '>=': operator.ge,
+ '<': operator.lt,
+ 'lt': operator.lt,
+ 'lessthan': operator.lt,
+ '<=': operator.le,
+ 'le': operator.le,
+}
diff --git a/python/jinja2/utils.py b/python/jinja2/utils.py
new file mode 100644
index 0000000..502a311
--- /dev/null
+++ b/python/jinja2/utils.py
@@ -0,0 +1,647 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.utils
+ ~~~~~~~~~~~~
+
+ Utility functions.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import re
+import json
+import errno
+from collections import deque
+from threading import Lock
+from jinja2._compat import text_type, string_types, implements_iterator, \
+ url_quote
+
+
+_word_split_re = re.compile(r'(\s+)')
+_punctuation_re = re.compile(
+ '^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
+ '|'.join(map(re.escape, ('(', '<', '&lt;'))),
+ '|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '&gt;')))
+ )
+)
+_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
+_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
+_entity_re = re.compile(r'&([^;]+);')
+_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+_digits = '0123456789'
+
+# special singleton representing missing values for the runtime
+missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
+
+# internal code
+internal_code = set()
+
+concat = u''.join
+
+_slash_escape = '\\/' not in json.dumps('/')
+
+
+def contextfunction(f):
+ """This decorator can be used to mark a function or method context callable.
+ A context callable is passed the active :class:`Context` as first argument when
+ called from the template. This is useful if a function wants to get access
+ to the context or functions provided on the context object. For example
+ a function that returns a sorted list of template variables the current
+ template exports could look like this::
+
+ @contextfunction
+ def get_exported_names(context):
+ return sorted(context.exported_vars)
+ """
+ f.contextfunction = True
+ return f
+
+
+def evalcontextfunction(f):
+ """This decorator can be used to mark a function or method as an eval
+ context callable. This is similar to the :func:`contextfunction`
+ but instead of passing the context, an evaluation context object is
+ passed. For more information about the eval context, see
+ :ref:`eval-context`.
+
+ .. versionadded:: 2.4
+ """
+ f.evalcontextfunction = True
+ return f
+
+
+def environmentfunction(f):
+ """This decorator can be used to mark a function or method as environment
+ callable. This decorator works exactly like the :func:`contextfunction`
+ decorator just that the first argument is the active :class:`Environment`
+ and not context.
+ """
+ f.environmentfunction = True
+ return f
+
+
+def internalcode(f):
+ """Marks the function as internally used"""
+ internal_code.add(f.__code__)
+ return f
+
+
+def is_undefined(obj):
+ """Check if the object passed is undefined. This does nothing more than
+ performing an instance check against :class:`Undefined` but looks nicer.
+ This can be used for custom filters or tests that want to react to
+ undefined variables. For example a custom default filter can look like
+ this::
+
+ def default(var, default=''):
+ if is_undefined(var):
+ return default
+ return var
+ """
+ from jinja2.runtime import Undefined
+ return isinstance(obj, Undefined)
+
+
+def consume(iterable):
+ """Consumes an iterable without doing anything with it."""
+ for event in iterable:
+ pass
+
+
+def clear_caches():
+ """Jinja2 keeps internal caches for environments and lexers. These are
+ used so that Jinja2 doesn't have to recreate environments and lexers all
+ the time. Normally you don't have to care about that but if you are
+ measuring memory consumption you may want to clean the caches.
+ """
+ from jinja2.environment import _spontaneous_environments
+ from jinja2.lexer import _lexer_cache
+ _spontaneous_environments.clear()
+ _lexer_cache.clear()
+
+
+def import_string(import_name, silent=False):
+ """Imports an object based on a string. This is useful if you want to
+ use import paths as endpoints or something similar. An import path can
+ be specified either in dotted notation (``xml.sax.saxutils.escape``)
+ or with a colon as object delimiter (``xml.sax.saxutils:escape``).
+
+ If the `silent` is True the return value will be `None` if the import
+ fails.
+
+ :return: imported object
+ """
+ try:
+ if ':' in import_name:
+ module, obj = import_name.split(':', 1)
+ elif '.' in import_name:
+ items = import_name.split('.')
+ module = '.'.join(items[:-1])
+ obj = items[-1]
+ else:
+ return __import__(import_name)
+ return getattr(__import__(module, None, None, [obj]), obj)
+ except (ImportError, AttributeError):
+ if not silent:
+ raise
+
+
+def open_if_exists(filename, mode='rb'):
+ """Returns a file descriptor for the filename if that file exists,
+ otherwise `None`.
+ """
+ try:
+ return open(filename, mode)
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
+ raise
+
+
+def object_type_repr(obj):
+ """Returns the name of the object's type. For some recognized
+ singletons the name of the object is returned instead. (For
+ example for `None` and `Ellipsis`).
+ """
+ if obj is None:
+ return 'None'
+ elif obj is Ellipsis:
+ return 'Ellipsis'
+ # __builtin__ in 2.x, builtins in 3.x
+ if obj.__class__.__module__ in ('__builtin__', 'builtins'):
+ name = obj.__class__.__name__
+ else:
+ name = obj.__class__.__module__ + '.' + obj.__class__.__name__
+ return '%s object' % name
+
+
+def pformat(obj, verbose=False):
+ """Prettyprint an object. Either use the `pretty` library or the
+ builtin `pprint`.
+ """
+ try:
+ from pretty import pretty
+ return pretty(obj, verbose=verbose)
+ except ImportError:
+ from pprint import pformat
+ return pformat(obj)
+
+
+def urlize(text, trim_url_limit=None, rel=None, target=None):
+ """Converts any URLs in text into clickable links. Works on http://,
+ https:// and www. links. Links can have trailing punctuation (periods,
+ commas, close-parens) and leading punctuation (opening parens) and
+ it'll still do the right thing.
+
+ If trim_url_limit is not None, the URLs in link text will be limited
+ to trim_url_limit characters.
+
+ If nofollow is True, the URLs in link text will get a rel="nofollow"
+ attribute.
+
+ If target is not None, a target attribute will be added to the link.
+ """
+ trim_url = lambda x, limit=trim_url_limit: limit is not None \
+ and (x[:limit] + (len(x) >=limit and '...'
+ or '')) or x
+ words = _word_split_re.split(text_type(escape(text)))
+ rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ''
+ target_attr = target and ' target="%s"' % escape(target) or ''
+
+ for i, word in enumerate(words):
+ match = _punctuation_re.match(word)
+ if match:
+ lead, middle, trail = match.groups()
+ if middle.startswith('www.') or (
+ '@' not in middle and
+ not middle.startswith('http://') and
+ not middle.startswith('https://') and
+ len(middle) > 0 and
+ middle[0] in _letters + _digits and (
+ middle.endswith('.org') or
+ middle.endswith('.net') or
+ middle.endswith('.com')
+ )):
+ middle = '<a href="http://%s"%s%s>%s</a>' % (middle,
+ rel_attr, target_attr, trim_url(middle))
+ if middle.startswith('http://') or \
+ middle.startswith('https://'):
+ middle = '<a href="%s"%s%s>%s</a>' % (middle,
+ rel_attr, target_attr, trim_url(middle))
+ if '@' in middle and not middle.startswith('www.') and \
+ not ':' in middle and _simple_email_re.match(middle):
+ middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
+ if lead + middle + trail != word:
+ words[i] = lead + middle + trail
+ return u''.join(words)
+
+
+def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
+ """Generate some lorem ipsum for the template."""
+ from jinja2.constants import LOREM_IPSUM_WORDS
+ from random import choice, randrange
+ words = LOREM_IPSUM_WORDS.split()
+ result = []
+
+ for _ in range(n):
+ next_capitalized = True
+ last_comma = last_fullstop = 0
+ word = None
+ last = None
+ p = []
+
+ # each paragraph contains out of 20 to 100 words.
+ for idx, _ in enumerate(range(randrange(min, max))):
+ while True:
+ word = choice(words)
+ if word != last:
+ last = word
+ break
+ if next_capitalized:
+ word = word.capitalize()
+ next_capitalized = False
+ # add commas
+ if idx - randrange(3, 8) > last_comma:
+ last_comma = idx
+ last_fullstop += 2
+ word += ','
+ # add end of sentences
+ if idx - randrange(10, 20) > last_fullstop:
+ last_comma = last_fullstop = idx
+ word += '.'
+ next_capitalized = True
+ p.append(word)
+
+ # ensure that the paragraph ends with a dot.
+ p = u' '.join(p)
+ if p.endswith(','):
+ p = p[:-1] + '.'
+ elif not p.endswith('.'):
+ p += '.'
+ result.append(p)
+
+ if not html:
+ return u'\n\n'.join(result)
+ return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
+
+
+def unicode_urlencode(obj, charset='utf-8', for_qs=False):
+ """URL escapes a single bytestring or unicode string with the
+ given charset if applicable to URL safe quoting under all rules
+ that need to be considered under all supported Python versions.
+
+ If non strings are provided they are converted to their unicode
+ representation first.
+ """
+ if not isinstance(obj, string_types):
+ obj = text_type(obj)
+ if isinstance(obj, text_type):
+ obj = obj.encode(charset)
+ safe = not for_qs and b'/' or b''
+ rv = text_type(url_quote(obj, safe))
+ if for_qs:
+ rv = rv.replace('%20', '+')
+ return rv
+
+
+class LRUCache(object):
+ """A simple LRU Cache implementation."""
+
+ # this is fast for small capacities (something below 1000) but doesn't
+ # scale. But as long as it's only used as storage for templates this
+ # won't do any harm.
+
+ def __init__(self, capacity):
+ self.capacity = capacity
+ self._mapping = {}
+ self._queue = deque()
+ self._postinit()
+
+ def _postinit(self):
+ # alias all queue methods for faster lookup
+ self._popleft = self._queue.popleft
+ self._pop = self._queue.pop
+ self._remove = self._queue.remove
+ self._wlock = Lock()
+ self._append = self._queue.append
+
+ def __getstate__(self):
+ return {
+ 'capacity': self.capacity,
+ '_mapping': self._mapping,
+ '_queue': self._queue
+ }
+
+ def __setstate__(self, d):
+ self.__dict__.update(d)
+ self._postinit()
+
+ def __getnewargs__(self):
+ return (self.capacity,)
+
+ def copy(self):
+ """Return a shallow copy of the instance."""
+ rv = self.__class__(self.capacity)
+ rv._mapping.update(self._mapping)
+ rv._queue = deque(self._queue)
+ return rv
+
+ def get(self, key, default=None):
+ """Return an item from the cache dict or `default`"""
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def setdefault(self, key, default=None):
+ """Set `default` if the key is not in the cache otherwise
+ leave unchanged. Return the value of this key.
+ """
+ self._wlock.acquire()
+ try:
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+ finally:
+ self._wlock.release()
+
+ def clear(self):
+ """Clear the cache."""
+ self._wlock.acquire()
+ try:
+ self._mapping.clear()
+ self._queue.clear()
+ finally:
+ self._wlock.release()
+
+ def __contains__(self, key):
+ """Check if a key exists in this cache."""
+ return key in self._mapping
+
+ def __len__(self):
+ """Return the current size of the cache."""
+ return len(self._mapping)
+
+ def __repr__(self):
+ return '<%s %r>' % (
+ self.__class__.__name__,
+ self._mapping
+ )
+
+ def __getitem__(self, key):
+ """Get an item from the cache. Moves the item up so that it has the
+ highest priority then.
+
+ Raise a `KeyError` if it does not exist.
+ """
+ self._wlock.acquire()
+ try:
+ rv = self._mapping[key]
+ if self._queue[-1] != key:
+ try:
+ self._remove(key)
+ except ValueError:
+ # if something removed the key from the container
+ # when we read, ignore the ValueError that we would
+ # get otherwise.
+ pass
+ self._append(key)
+ return rv
+ finally:
+ self._wlock.release()
+
+ def __setitem__(self, key, value):
+ """Sets the value for an item. Moves the item up so that it
+ has the highest priority then.
+ """
+ self._wlock.acquire()
+ try:
+ if key in self._mapping:
+ self._remove(key)
+ elif len(self._mapping) == self.capacity:
+ del self._mapping[self._popleft()]
+ self._append(key)
+ self._mapping[key] = value
+ finally:
+ self._wlock.release()
+
+ def __delitem__(self, key):
+ """Remove an item from the cache dict.
+ Raise a `KeyError` if it does not exist.
+ """
+ self._wlock.acquire()
+ try:
+ del self._mapping[key]
+ try:
+ self._remove(key)
+ except ValueError:
+ # __getitem__ is not locked, it might happen
+ pass
+ finally:
+ self._wlock.release()
+
+ def items(self):
+ """Return a list of items."""
+ result = [(key, self._mapping[key]) for key in list(self._queue)]
+ result.reverse()
+ return result
+
+ def iteritems(self):
+ """Iterate over all items."""
+ return iter(self.items())
+
+ def values(self):
+ """Return a list of all values."""
+ return [x[1] for x in self.items()]
+
+ def itervalue(self):
+ """Iterate over all values."""
+ return iter(self.values())
+
+ def keys(self):
+ """Return a list of all keys ordered by most recent usage."""
+ return list(self)
+
+ def iterkeys(self):
+ """Iterate over all keys in the cache dict, ordered by
+ the most recent usage.
+ """
+ return reversed(tuple(self._queue))
+
+ __iter__ = iterkeys
+
+ def __reversed__(self):
+ """Iterate over the values in the cache dict, oldest items
+ coming first.
+ """
+ return iter(tuple(self._queue))
+
+ __copy__ = copy
+
+
+# register the LRU cache as mutable mapping if possible
+try:
+ from collections import MutableMapping
+ MutableMapping.register(LRUCache)
+except ImportError:
+ pass
+
+
+def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
+ disabled_extensions=(),
+ default_for_string=True,
+ default=False):
+ """Intelligently sets the initial value of autoescaping based on the
+ filename of the template. This is the recommended way to configure
+ autoescaping if you do not want to write a custom function yourself.
+
+ If you want to enable it for all templates created from strings or
+ for all templates with `.html` and `.xml` extensions::
+
+ from jinja2 import Environment, select_autoescape
+ env = Environment(autoescape=select_autoescape(
+ enabled_extensions=('html', 'xml'),
+ default_for_string=True,
+ ))
+
+ Example configuration to turn it on at all times except if the template
+ ends with `.txt`::
+
+ from jinja2 import Environment, select_autoescape
+ env = Environment(autoescape=select_autoescape(
+ disabled_extensions=('txt',),
+ default_for_string=True,
+ default=True,
+ ))
+
+ The `enabled_extensions` is an iterable of all the extensions that
+ autoescaping should be enabled for. Likewise `disabled_extensions` is
+ a list of all templates it should be disabled for. If a template is
+ loaded from a string then the default from `default_for_string` is used.
+ If nothing matches then the initial value of autoescaping is set to the
+ value of `default`.
+
+ For security reasons this function operates case insensitive.
+
+ .. versionadded:: 2.9
+ """
+ enabled_patterns = tuple('.' + x.lstrip('.').lower()
+ for x in enabled_extensions)
+ disabled_patterns = tuple('.' + x.lstrip('.').lower()
+ for x in disabled_extensions)
+ def autoescape(template_name):
+ if template_name is None:
+ return default_for_string
+ template_name = template_name.lower()
+ if template_name.endswith(enabled_patterns):
+ return True
+ if template_name.endswith(disabled_patterns):
+ return False
+ return default
+ return autoescape
+
+
+def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
+ """Works exactly like :func:`dumps` but is safe for use in ``<script>``
+ tags. It accepts the same arguments and returns a JSON string. Note that
+ this is available in templates through the ``|tojson`` filter which will
+ also mark the result as safe. Due to how this function escapes certain
+ characters this is safe even if used outside of ``<script>`` tags.
+
+ The following characters are escaped in strings:
+
+ - ``<``
+ - ``>``
+ - ``&``
+ - ``'``
+
+ This makes it safe to embed such strings in any place in HTML with the
+ notable exception of double quoted attributes. In that case single
+ quote your attributes or HTML escape it in addition.
+ """
+ if dumper is None:
+ dumper = json.dumps
+ rv = dumper(obj, **kwargs) \
+ .replace(u'<', u'\\u003c') \
+ .replace(u'>', u'\\u003e') \
+ .replace(u'&', u'\\u0026') \
+ .replace(u"'", u'\\u0027')
+ return Markup(rv)
+
+
+@implements_iterator
+class Cycler(object):
+ """A cycle helper for templates."""
+
+ def __init__(self, *items):
+ if not items:
+ raise RuntimeError('at least one item has to be provided')
+ self.items = items
+ self.reset()
+
+ def reset(self):
+ """Resets the cycle."""
+ self.pos = 0
+
+ @property
+ def current(self):
+ """Returns the current item."""
+ return self.items[self.pos]
+
+ def next(self):
+ """Goes one item ahead and returns it."""
+ rv = self.current
+ self.pos = (self.pos + 1) % len(self.items)
+ return rv
+
+ __next__ = next
+
+
+class Joiner(object):
+ """A joining helper for templates."""
+
+ def __init__(self, sep=u', '):
+ self.sep = sep
+ self.used = False
+
+ def __call__(self):
+ if not self.used:
+ self.used = True
+ return u''
+ return self.sep
+
+
+class Namespace(object):
+ """A namespace object that can hold arbitrary attributes. It may be
+ initialized from a dictionary or with keyword argments."""
+
+ def __init__(*args, **kwargs):
+ self, args = args[0], args[1:]
+ self.__attrs = dict(*args, **kwargs)
+
+ def __getattribute__(self, name):
+ if name == '_Namespace__attrs':
+ return object.__getattribute__(self, name)
+ try:
+ return self.__attrs[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ def __setitem__(self, name, value):
+ self.__attrs[name] = value
+
+ def __repr__(self):
+ return '<Namespace %r>' % self.__attrs
+
+
+# does this python version support async for in and async generators?
+try:
+ exec('async def _():\n async for _ in ():\n yield _')
+ have_async_gen = True
+except SyntaxError:
+ have_async_gen = False
+
+
+# Imported here because that's where it was in the past
+from markupsafe import Markup, escape, soft_unicode
diff --git a/python/jinja2/visitor.py b/python/jinja2/visitor.py
new file mode 100644
index 0000000..ba526df
--- /dev/null
+++ b/python/jinja2/visitor.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.visitor
+ ~~~~~~~~~~~~~~
+
+ This module implements a visitor for the nodes.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
+"""
+from jinja2.nodes import Node
+
+
+class NodeVisitor(object):
+ """Walks the abstract syntax tree and call visitor functions for every
+ node found. The visitor functions may return values which will be
+ forwarded by the `visit` method.
+
+ Per default the visitor functions for the nodes are ``'visit_'`` +
+ class name of the node. So a `TryFinally` node visit function would
+ be `visit_TryFinally`. This behavior can be changed by overriding
+ the `get_visitor` function. If no visitor function exists for a node
+ (return value `None`) the `generic_visit` visitor is used instead.
+ """
+
+ def get_visitor(self, node):
+ """Return the visitor function for this node or `None` if no visitor
+ exists for this node. In that case the generic visit function is
+ used instead.
+ """
+ method = 'visit_' + node.__class__.__name__
+ return getattr(self, method, None)
+
+ def visit(self, node, *args, **kwargs):
+ """Visit a node."""
+ f = self.get_visitor(node)
+ if f is not None:
+ return f(node, *args, **kwargs)
+ return self.generic_visit(node, *args, **kwargs)
+
+ def generic_visit(self, node, *args, **kwargs):
+ """Called if no explicit visitor function exists for a node."""
+ for node in node.iter_child_nodes():
+ self.visit(node, *args, **kwargs)
+
+
+class NodeTransformer(NodeVisitor):
+ """Walks the abstract syntax tree and allows modifications of nodes.
+
+ The `NodeTransformer` will walk the AST and use the return value of the
+ visitor functions to replace or remove the old node. If the return
+ value of the visitor function is `None` the node will be removed
+ from the previous location otherwise it's replaced with the return
+ value. The return value may be the original node in which case no
+ replacement takes place.
+ """
+
+ def generic_visit(self, node, *args, **kwargs):
+ for field, old_value in node.iter_fields():
+ if isinstance(old_value, list):
+ new_values = []
+ for value in old_value:
+ if isinstance(value, Node):
+ value = self.visit(value, *args, **kwargs)
+ if value is None:
+ continue
+ elif not isinstance(value, Node):
+ new_values.extend(value)
+ continue
+ new_values.append(value)
+ old_value[:] = new_values
+ elif isinstance(old_value, Node):
+ new_node = self.visit(old_value, *args, **kwargs)
+ if new_node is None:
+ delattr(node, field)
+ else:
+ setattr(node, field, new_node)
+ return node
+
+ def visit_list(self, node, *args, **kwargs):
+ """As transformers may return lists in some places this method
+ can be used to enforce a list as return value.
+ """
+ rv = self.visit(node, *args, **kwargs)
+ if not isinstance(rv, list):
+ rv = [rv]
+ return rv