desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Noting row boundaries, see if we can return to the starting point.'
def scan_up(self, top, left, bottom, right):
rowseps = {} for i in range((bottom - 1), top, (-1)): if (self.block[i][left] == '+'): rowseps[i] = [left] elif (self.block[i][left] != '|'): return None return rowseps
'From the data collected by `scan_cell()`, convert to the final data structure.'
def structure_from_cells(self):
rowseps = self.rowseps.keys() rowseps.sort() rowindex = {} for i in range(len(rowseps)): rowindex[rowseps[i]] = i colseps = self.colseps.keys() colseps.sort() colindex = {} for i in range(len(colseps)): colindex[colseps[i]] = i colspecs = [((colseps[i] - colseps[(i - 1)]) - 1) for i in range(1, len(colseps))] onerow = [None for i in range((len(colseps) - 1))] rows = [onerow[:] for i in range((len(rowseps) - 1))] remaining = ((len(rowseps) - 1) * (len(colseps) - 1)) for (top, left, bottom, right, block) in self.cells: rownum = rowindex[top] colnum = colindex[left] assert (rows[rownum][colnum] is None), ('Cell (row %s, column %s) already used.' % ((rownum + 1), (colnum + 1))) morerows = ((rowindex[bottom] - rownum) - 1) morecols = ((colindex[right] - colnum) - 1) remaining -= ((morerows + 1) * (morecols + 1)) rows[rownum][colnum] = (morerows, morecols, (top + 1), block) assert (remaining == 0), 'Unused cells remaining.' if self.head_body_sep: numheadrows = rowindex[self.head_body_sep] headrows = rows[:numheadrows] bodyrows = rows[numheadrows:] else: headrows = [] bodyrows = rows return (colspecs, headrows, bodyrows)
'First determine the column boundaries from the top border, then process rows. Each row may consist of multiple lines; accumulate lines until a row is complete. Call `self.parse_row` to finish the job.'
def parse_table(self):
self.columns = self.parse_columns(self.block[0], 0) self.border_end = self.columns[(-1)][1] (firststart, firstend) = self.columns[0] offset = 1 start = 1 text_found = None while (offset < len(self.block)): line = self.block[offset] if self.span_pat.match(line): self.parse_row(self.block[start:offset], start, (line.rstrip(), offset)) start = (offset + 1) text_found = None elif line[firststart:firstend].strip(): if (text_found and (offset != start)): self.parse_row(self.block[start:offset], start) start = offset text_found = 1 elif (not text_found): start = (offset + 1) offset += 1
'Given a column span underline, return a list of (begin, end) pairs.'
def parse_columns(self, line, offset):
cols = [] end = 0 while 1: begin = line.find('-', end) end = line.find(' ', begin) if (begin < 0): break if (end < 0): end = len(line) cols.append((begin, end)) if self.columns: if (cols[(-1)][1] != self.border_end): raise TableMarkupError(('Column span incomplete at line offset %s.' % offset)) cols[(-1)] = (cols[(-1)][0], self.columns[(-1)][1]) return cols
'Given the text `lines` of a row, parse it and append to `self.table`. The row is parsed according to the current column spec (either `spanline` if provided or `self.columns`). For each column, extract text from each line, and check for text in column margins. Finally, adjust for insigificant whitespace.'
def parse_row(self, lines, start, spanline=None):
if (not (lines or spanline)): return if spanline: columns = self.parse_columns(*spanline) span_offset = spanline[1] else: columns = self.columns[:] span_offset = start self.check_columns(lines, start, columns) row = self.init_row(columns, start) for i in range(len(columns)): (start, end) = columns[i] cellblock = lines.get_2D_block(0, start, len(lines), end) cellblock.disconnect() cellblock.replace(self.double_width_pad_char, '') row[i][3] = cellblock self.table.append(row)
'Check for text in column margins and text overflow in the last column. Raise TableMarkupError if anything but whitespace is in column margins. Adjust the end value for the last column if there is text overflow.'
def check_columns(self, lines, first_line, columns):
columns.append((sys.maxint, None)) lastcol = (len(columns) - 2) for i in range((len(columns) - 1)): (start, end) = columns[i] nextstart = columns[(i + 1)][0] offset = 0 for line in lines: if ((i == lastcol) and line[end:].strip()): text = line[start:].rstrip() new_end = (start + len(text)) columns[i] = (start, new_end) (main_start, main_end) = self.columns[(-1)] if (new_end > main_end): self.columns[(-1)] = (main_start, new_end) elif line[end:nextstart].strip(): raise TableMarkupError(('Text in column margin at line offset %s.' % (first_line + offset))) offset += 1 columns.pop()
'Test a token against a token expression. This can either be a token type or ``\'token_type:token_value\'``. This can only test against string values and types.'
def test(self, expr):
if (self.type == expr): return True elif (':' in expr): return (expr.split(':', 1) == [self.type, self.value]) return False
'Test against multiple token expressions.'
def test_any(self, *iterable):
for expr in iterable: if self.test(expr): return True return False
'Push a token back to the stream.'
def push(self, token):
self._pushed.append(token)
'Look at the next token.'
def look(self):
old_token = next(self) result = self.current self.push(result) self.current = old_token return result
'Got n tokens ahead.'
def skip(self, n=1):
for x in xrange(n): next(self)
'Perform the token test and return the token if it matched. Otherwise the return value is `None`.'
def next_if(self, expr):
if self.current.test(expr): return next(self)
'Like :meth:`next_if` but only returns `True` or `False`.'
def skip_if(self, expr):
return (self.next_if(expr) is not None)
'Go one token ahead and return the old one'
def next(self):
rv = self.current if self._pushed: self.current = self._pushed.popleft() elif (self.current.type is not TOKEN_EOF): try: self.current = self._next() except StopIteration: self.close() return rv
'Close the stream.'
def close(self):
self.current = Token(self.current.lineno, TOKEN_EOF, '') self._next = None self.closed = True
'Expect a given token type and return it. This accepts the same argument as :meth:`jinja2.lexer.Token.test`.'
def expect(self, expr):
if (not self.current.test(expr)): if (':' in expr): expr = expr.split(':')[1] if (self.current.type is TOKEN_EOF): raise TemplateSyntaxError(('unexpected end of template, expected %r.' % expr), self.current.lineno, self.name, self.filename) raise TemplateSyntaxError(('expected token %r, got %r' % (expr, str(self.current))), self.current.lineno, self.name, self.filename) try: return self.current finally: next(self)
'Called for strings and template data to normlize it to unicode.'
def _normalize_newlines(self, value):
return newline_re.sub(self.newline_sequence, value)
'Calls tokeniter + tokenize and wraps it in a token stream.'
def tokenize(self, source, name=None, filename=None, state=None):
stream = self.tokeniter(source, name, filename, state) return TokenStream(self.wrap(stream, name, filename), name, filename)
'This is called with the stream as returned by `tokenize` and wraps every token in a :class:`Token` and converts the value.'
def wrap(self, stream, name=None, filename=None):
for (lineno, token, value) in stream: if (token in ignored_tokens): continue elif (token == 'linestatement_begin'): token = 'block_begin' elif (token == 'linestatement_end'): token = 'block_end' elif (token in ('raw_begin', 'raw_end')): continue elif (token == 'data'): value = self._normalize_newlines(value) elif (token == 'keyword'): token = value elif (token == 'name'): value = str(value) elif (token == 'string'): try: value = self._normalize_newlines(value[1:(-1)]).encode('ascii', 'backslashreplace').decode('unicode-escape') except Exception as e: msg = str(e).split(':')[(-1)].strip() raise TemplateSyntaxError(msg, lineno, name, filename) try: value = str(value) except UnicodeError: pass elif (token == 'integer'): value = int(value) elif (token == 'float'): value = float(value) elif (token == 'operator'): token = operators[value] (yield Token(lineno, token, value))
'This method tokenizes the text and returns the tokens in a generator. Use this method if you just want to tokenize a template.'
def tokeniter(self, source, name, filename=None, state=None):
source = '\n'.join(unicode(source).splitlines()) pos = 0 lineno = 1 stack = ['root'] if ((state is not None) and (state != 'root')): assert (state in ('variable', 'block')), 'invalid state' stack.append((state + '_begin')) else: state = 'root' statetokens = self.rules[stack[(-1)]] source_length = len(source) balancing_stack = [] while 1: for (regex, tokens, new_state) in statetokens: m = regex.match(source, pos) if (m is None): continue if (balancing_stack and (tokens in ('variable_end', 'block_end', 'linestatement_end'))): continue if isinstance(tokens, tuple): for (idx, token) in enumerate(tokens): if (token.__class__ is Failure): raise token(lineno, filename) elif (token == '#bygroup'): for (key, value) in m.groupdict().iteritems(): if (value is not None): (yield (lineno, key, value)) lineno += value.count('\n') break else: raise RuntimeError(('%r wanted to resolve the token dynamically but no group matched' % regex)) else: data = m.group((idx + 1)) if (data or (token not in ignore_if_empty)): (yield (lineno, token, data)) lineno += data.count('\n') else: data = m.group() if (tokens == 'operator'): if (data == '{'): balancing_stack.append('}') elif (data == '('): balancing_stack.append(')') elif (data == '['): balancing_stack.append(']') elif (data in ('}', ')', ']')): if (not balancing_stack): raise TemplateSyntaxError(('unexpected "%s"' % data), lineno, name, filename) expected_op = balancing_stack.pop() if (expected_op != data): raise TemplateSyntaxError(('unexpected "%s", expected "%s"' % (data, expected_op)), lineno, name, filename) if (data or (tokens not in ignore_if_empty)): (yield (lineno, tokens, data)) lineno += data.count('\n') pos2 = m.end() if (new_state is not None): if (new_state == '#pop'): stack.pop() elif (new_state == '#bygroup'): for (key, value) in m.groupdict().iteritems(): if (value is not None): stack.append(key) break else: raise RuntimeError(('%r wanted to resolve the new state dynamically but no group matched' % regex)) else: stack.append(new_state) statetokens = self.rules[stack[(-1)]] elif (pos2 == pos): raise RuntimeError(('%r yielded empty string without stack change' % regex)) pos = pos2 break else: if (pos >= source_length): return raise TemplateSyntaxError(('unexpected char %r at %d' % (source[pos], pos)), lineno, name, filename)
'Unescape markup again into an unicode string. This also resolves known HTML4 and XHTML entities: >>> Markup("Main &raquo; <em>About</em>").unescape() u\'Main \xbb <em>About</em>\''
def unescape(self):
from jinja2.constants import HTML_ENTITIES def handle_match(m): name = m.group(1) if (name in HTML_ENTITIES): return unichr(HTML_ENTITIES[name]) try: if (name[:2] in ('#x', '#X')): return unichr(int(name[2:], 16)) elif name.startswith('#'): return unichr(int(name[1:])) except ValueError: pass return u'' return _entity_re.sub(handle_match, unicode(self))
'Unescape markup into an unicode string and strip all tags. This also resolves known HTML4 and XHTML entities. Whitespace is normalized to one: >>> Markup("Main &raquo; <em>About</em>").striptags() u\'Main \xbb About\''
def striptags(self):
stripped = u' '.join(_striptags_re.sub('', self).split()) return Markup(stripped).unescape()
'Escape the string. Works like :func:`escape` with the difference that for subclasses of :class:`Markup` this function would return the correct subclass.'
@classmethod def escape(cls, s):
rv = escape(s) if (rv.__class__ is not cls): return cls(rv) return rv
'Python 2.4 compatibility.'
def _remove(self, obj):
for (idx, item) in enumerate(self._queue): if (item == obj): del self._queue[idx] break
'Return an shallow copy of the instance.'
def copy(self):
rv = self.__class__(self.capacity) rv._mapping.update(self._mapping) rv._queue = deque(self._queue) return rv
'Return an item from the cache dict or `default`'
def get(self, key, default=None):
try: return self[key] except KeyError: return default
'Set `default` if the key is not in the cache otherwise leave unchanged. Return the value of this key.'
def setdefault(self, key, default=None):
try: return self[key] except KeyError: self[key] = default return default
'Clear the cache.'
def clear(self):
self._wlock.acquire() try: self._mapping.clear() self._queue.clear() finally: self._wlock.release()
'Check if a key exists in this cache.'
def __contains__(self, key):
return (key in self._mapping)
'Return the current size of the cache.'
def __len__(self):
return len(self._mapping)
'Get an item from the cache. Moves the item up so that it has the highest priority then. Raise an `KeyError` if it does not exist.'
def __getitem__(self, key):
rv = self._mapping[key] if (self._queue[(-1)] != key): try: self._remove(key) except ValueError: pass self._append(key) return rv
'Sets the value for an item. Moves the item up so that it has the highest priority then.'
def __setitem__(self, key, value):
self._wlock.acquire() try: if (key in self._mapping): self._remove(key) elif (len(self._mapping) == self.capacity): del self._mapping[self._popleft()] self._append(key) self._mapping[key] = value finally: self._wlock.release()
'Remove an item from the cache dict. Raise an `KeyError` if it does not exist.'
def __delitem__(self, key):
self._wlock.acquire() try: del self._mapping[key] try: self._remove(key) except ValueError: pass finally: self._wlock.release()
'Return a list of items.'
def items(self):
result = [(key, self._mapping[key]) for key in list(self._queue)] result.reverse() return result
'Iterate over all items.'
def iteritems(self):
return iter(self.items())
'Return a list of all values.'
def values(self):
return [x[1] for x in self.items()]
'Iterate over all values.'
def itervalue(self):
return iter(self.values())
'Return a list of all keys ordered by most recent usage.'
def keys(self):
return list(self)
'Iterate over all keys in the cache dict, ordered by the most recent usage.'
def iterkeys(self):
return reversed(tuple(self._queue))
'Iterate over the values in the cache dict, oldest items coming first.'
def __reversed__(self):
return iter(tuple(self._queue))
'Resets the cycle.'
def reset(self):
self.pos = 0
'Returns the current item.'
@property def current(self):
return self.items[self.pos]
'Goes one item ahead and returns it.'
def next(self):
rv = self.current self.pos = ((self.pos + 1) % len(self.items)) return rv
'Create a copy of this extension bound to another environment.'
def bind(self, environment):
rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.environment = environment return rv
'This method is called before the actual lexing and can be used to preprocess the source. The `filename` is optional. The return value must be the preprocessed source.'
def preprocess(self, source, name, filename=None):
return source
'It\'s passed a :class:`~jinja2.lexer.TokenStream` that can be used to filter tokens returned. This method has to return an iterable of :class:`~jinja2.lexer.Token`\s, but it doesn\'t have to return a :class:`~jinja2.lexer.TokenStream`. In the `ext` folder of the Jinja2 source distribution there is a file called `inlinegettext.py` which implements a filter that utilizes this method.'
def filter_stream(self, stream):
return stream
'If any of the :attr:`tags` matched this method is called with the parser as first argument. The token the parser stream is pointing at is the name token that matched. This method has to return one or a list of multiple nodes.'
def parse(self, parser):
raise NotImplementedError()
'Return an attribute node for the current extension. This is useful to pass constants on extensions to generated template code:: self.attr(\'_my_attribute\', lineno=lineno)'
def attr(self, name, lineno=None):
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
'Call a method of the extension. This is a shortcut for :meth:`attr` + :class:`jinja2.nodes.Call`.'
def call_method(self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None):
if (args is None): args = [] if (kwargs is None): kwargs = [] return nodes.Call(self.attr(name, lineno=lineno), args, kwargs, dyn_args, dyn_kwargs, lineno=lineno)
'Parse a translatable tag.'
def parse(self, parser):
lineno = next(parser.stream).lineno plural_expr = None variables = {} while (parser.stream.current.type != 'block_end'): if variables: parser.stream.expect('comma') if parser.stream.skip_if('colon'): break name = parser.stream.expect('name') if (name.value in variables): parser.fail(('translatable variable %r defined twice.' % name.value), name.lineno, exc=TemplateAssertionError) if (parser.stream.current.type == 'assign'): next(parser.stream) variables[name.value] = var = parser.parse_expression() else: variables[name.value] = var = nodes.Name(name.value, 'load') if (plural_expr is None): plural_expr = var parser.stream.expect('block_end') plural = plural_names = None have_plural = False referenced = set() (singular_names, singular) = self._parse_block(parser, True) if singular_names: referenced.update(singular_names) if (plural_expr is None): plural_expr = nodes.Name(singular_names[0], 'load') if parser.stream.current.test('name:pluralize'): have_plural = True next(parser.stream) if (parser.stream.current.type != 'block_end'): name = parser.stream.expect('name') if (name.value not in variables): parser.fail(('unknown variable %r for pluralization' % name.value), name.lineno, exc=TemplateAssertionError) plural_expr = variables[name.value] parser.stream.expect('block_end') (plural_names, plural) = self._parse_block(parser, False) next(parser.stream) referenced.update(plural_names) else: next(parser.stream) for var in referenced: if (var not in variables): variables[var] = nodes.Name(var, 'load') if (not referenced): singular = singular.replace('%%', '%') if plural: plural = plural.replace('%%', '%') if (not have_plural): plural_expr = None elif (plural_expr is None): parser.fail('pluralize without variables', lineno) if variables: variables = nodes.Dict([nodes.Pair(nodes.Const(x, lineno=lineno), y) for (x, y) in variables.items()]) else: variables = None node = self._make_node(singular, plural, variables, plural_expr) node.set_lineno(lineno) return node
'Parse until the next block tag with a given name.'
def _parse_block(self, parser, allow_pluralize):
referenced = [] buf = [] while 1: if (parser.stream.current.type == 'data'): buf.append(parser.stream.current.value.replace('%', '%%')) next(parser.stream) elif (parser.stream.current.type == 'variable_begin'): next(parser.stream) name = parser.stream.expect('name').value referenced.append(name) buf.append(('%%(%s)s' % name)) parser.stream.expect('variable_end') elif (parser.stream.current.type == 'block_begin'): next(parser.stream) if parser.stream.current.test('name:endtrans'): break elif parser.stream.current.test('name:pluralize'): if allow_pluralize: break parser.fail('a translatable section can have only one pluralize section') parser.fail('control structures in translatable sections are not allowed') elif parser.stream.eos: parser.fail('unclosed translation block') else: assert False, 'internal parser error' return (referenced, concat(buf))
'Generates a useful node from the data provided.'
def _make_node(self, singular, plural, variables, plural_expr):
if (plural_expr is None): gettext = nodes.Name('gettext', 'load') node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None) else: ngettext = nodes.Name('ngettext', 'load') node = nodes.Call(ngettext, [nodes.Const(singular), nodes.Const(plural), plural_expr], [], None, None) if self.environment.autoescape: node = nodes.MarkSafe(node) if variables: node = nodes.Mod(node, variables) return nodes.Output([node])
'This method iterates over all fields that are defined and yields ``(key, value)`` tuples. Per default all fields are returned, but it\'s possible to limit that to some fields by providing the `only` parameter or to exclude some using the `exclude` parameter. Both should be sets or tuples of field names.'
def iter_fields(self, exclude=None, only=None):
for name in self.fields: if ((exclude is only is None) or ((exclude is not None) and (name not in exclude)) or ((only is not None) and (name in only))): try: (yield (name, getattr(self, name))) except AttributeError: pass
'Iterates over all direct child nodes of the node. This iterates over all fields and yields the values of they are nodes. If the value of a field is a list all the nodes in that list are returned.'
def iter_child_nodes(self, exclude=None, only=None):
for (field, item) in self.iter_fields(exclude, only): if isinstance(item, list): for n in item: if isinstance(n, Node): (yield n) elif isinstance(item, Node): (yield item)
'Find the first node of a given type. If no such node exists the return value is `None`.'
def find(self, node_type):
for result in self.find_all(node_type): return result
'Find all the nodes of a given type. If the type is a tuple, the check is performed for any of the tuple items.'
def find_all(self, node_type):
for child in self.iter_child_nodes(): if isinstance(child, node_type): (yield child) for result in child.find_all(node_type): (yield result)
'Reset the context of a node and all child nodes. Per default the parser will all generate nodes that have a \'load\' context as it\'s the most common one. This method is used in the parser to set assignment targets and other nodes to a store context.'
def set_ctx(self, ctx):
todo = deque([self]) while todo: node = todo.popleft() if ('ctx' in node.fields): node.ctx = ctx todo.extend(node.iter_child_nodes()) return self
'Set the line numbers of the node and children.'
def set_lineno(self, lineno, override=False):
todo = deque([self]) while todo: node = todo.popleft() if ('lineno' in node.attributes): if ((node.lineno is None) or override): node.lineno = lineno todo.extend(node.iter_child_nodes()) return self
'Set the environment for all nodes.'
def set_environment(self, environment):
todo = deque([self]) while todo: node = todo.popleft() node.environment = environment todo.extend(node.iter_child_nodes()) return self
'Return the value of the expression as constant or raise :exc:`Impossible` if this was not possible: >>> Add(Const(23), Const(42)).as_const() 65 >>> Add(Const(23), Name(\'var\', \'load\')).as_const() Traceback (most recent call last): Impossible This requires the `environment` attribute of all nodes to be set to the environment that created the nodes.'
def as_const(self):
raise Impossible()
'Check if it\'s possible to assign something to this node.'
def can_assign(self):
return False
'Return a const object if the value is representable as constant value in the generated code, otherwise it will raise an `Impossible` exception.'
@classmethod def from_untrusted(cls, value, lineno=None, environment=None):
from compiler import has_safe_repr if (not has_safe_repr(value)): raise Impossible() return cls(value, lineno=lineno, environment=environment)
'Remember all undeclared identifiers.'
def pull_locals(self, frame):
self.undeclared_identifiers.update(frame.identifiers.undeclared)
'Render a parent block.'
def super(self, name, current):
try: blocks = self.blocks[name] index = (blocks.index(current) + 1) blocks[index] except LookupError: return self.environment.undefined(('there is no parent block called %r.' % name), name='super') return BlockReference(name, self, blocks, index)
'Returns an item from the template context, if it doesn\'t exist `default` is returned.'
def get(self, key, default=None):
try: return self[key] except KeyError: return default
'Looks up a variable like `__getitem__` or `get` but returns an :class:`Undefined` object with the name of the name looked up.'
def resolve(self, key):
if (key in self.vars): return self.vars[key] if (key in self.parent): return self.parent[key] return self.environment.undefined(name=key)
'Get a new dict with the exported variables.'
def get_exported(self):
return dict(((k, self.vars[k]) for k in self.exported_vars))
'Return a copy of the complete context as dict including the exported variables.'
def get_all(self):
return dict(self.parent, **self.vars)
'Call the callable with the arguments and keyword arguments provided but inject the active context or environment as first argument if the callable is a :func:`contextfunction` or :func:`environmentfunction`.'
@internalcode def call(__self, __obj, *args, **kwargs):
if __debug__: __traceback_hide__ = True if isinstance(__obj, _context_function_types): if getattr(__obj, 'contextfunction', 0): args = ((__self,) + args) elif getattr(__obj, 'environmentfunction', 0): args = ((__self.environment,) + args) return __obj(*args, **kwargs)
'Internal helper function to create a derived context.'
def derived(self, locals=None):
context = new_context(self.environment, self.name, {}, self.parent, True, None, locals) context.blocks.update(((k, list(v)) for (k, v) in self.blocks.iteritems())) return context
'Lookup a variable or raise `KeyError` if the variable is undefined.'
def __getitem__(self, key):
item = self.resolve(key) if isinstance(item, Undefined): raise KeyError(key) return item
'Super the block.'
@property def super(self):
if ((self._depth + 1) >= len(self._stack)): return self._context.environment.undefined(('there is no parent block called %r.' % self.name), name='super') return BlockReference(self.name, self._context, self._stack, (self._depth + 1))
'Cycles among the arguments with the current loop index.'
def cycle(self, *args):
if (not args): raise TypeError('no items for cycling given') return args[(self.index0 % len(args))]
'Regular callback function for undefined objects that raises an `UndefinedError` on call.'
@internalcode def _fail_with_undefined_error(self, *args, **kwargs):
if (self._undefined_hint is None): if (self._undefined_obj is None): hint = ('%r is undefined' % self._undefined_name) elif (not isinstance(self._undefined_name, basestring)): hint = ('%r object has no element %r' % (self._undefined_obj.__class__.__name__, self._undefined_name)) else: hint = ('%r object has no attribute %r' % (self._undefined_obj.__class__.__name__, self._undefined_name)) else: hint = self._undefined_hint raise self._undefined_exception(hint)
'Convenience method that raises `exc` with the message, passed line number or last line number as well as the current name and filename.'
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
if (lineno is None): lineno = self.stream.current.lineno raise exc(msg, lineno, self.name, self.filename)
'Are we at the end of a tuple?'
def is_tuple_end(self, extra_end_rules=None):
if (self.stream.current.type in ('variable_end', 'block_end', 'rparen')): return True elif (extra_end_rules is not None): return self.stream.current.test_any(extra_end_rules) return False
'Return a new free identifier as :class:`~jinja2.nodes.InternalName`.'
def free_identifier(self, lineno=None):
self._last_identifier += 1 rv = object.__new__(nodes.InternalName) nodes.Node.__init__(rv, ('fi%d' % self._last_identifier), lineno=lineno) return rv
'Parse a single statement.'
def parse_statement(self):
token = self.stream.current if (token.type != 'name'): self.fail('tag name expected', token.lineno) if (token.value in _statement_keywords): return getattr(self, ('parse_' + self.stream.current.value))() if (token.value == 'call'): return self.parse_call_block() if (token.value == 'filter'): return self.parse_filter_block() ext = self.extensions.get(token.value) if (ext is not None): return ext(self) self.fail(('unknown tag %r' % token.value), token.lineno)
'Parse multiple statements into a list until one of the end tokens is reached. This is used to parse the body of statements as it also parses template data if appropriate. The parser checks first if the current token is a colon and skips it if there is one. Then it checks for the block end and parses until if one of the `end_tokens` is reached. Per default the active token in the stream at the end of the call is the matched end token. If this is not wanted `drop_needle` can be set to `True` and the end token is removed.'
def parse_statements(self, end_tokens, drop_needle=False):
self.stream.skip_if('colon') self.stream.expect('block_end') result = self.subparse(end_tokens) if drop_needle: next(self.stream) return result
'Parse an assign statement.'
def parse_set(self):
lineno = next(self.stream).lineno target = self.parse_assign_target() self.stream.expect('assign') expr = self.parse_tuple() return nodes.Assign(target, expr, lineno=lineno)
'Parse a for loop.'
def parse_for(self):
lineno = self.stream.expect('name:for').lineno target = self.parse_assign_target(extra_end_rules=('name:in',)) self.stream.expect('name:in') iter = self.parse_tuple(with_condexpr=False, extra_end_rules=('name:recursive',)) test = None if self.stream.skip_if('name:if'): test = self.parse_expression() recursive = self.stream.skip_if('name:recursive') body = self.parse_statements(('name:endfor', 'name:else')) if (next(self.stream).value == 'endfor'): else_ = [] else: else_ = self.parse_statements(('name:endfor',), drop_needle=True) return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
'Parse an if construct.'
def parse_if(self):
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno) while 1: node.test = self.parse_tuple(with_condexpr=False) node.body = self.parse_statements(('name:elif', 'name:else', 'name:endif')) token = next(self.stream) if token.test('name:elif'): new_node = nodes.If(lineno=self.stream.current.lineno) node.else_ = [new_node] node = new_node continue elif token.test('name:else'): node.else_ = self.parse_statements(('name:endif',), drop_needle=True) else: node.else_ = [] break return result
'Parse an assignment target. As Jinja2 allows assignments to tuples, this function can parse all allowed assignment targets. Per default assignments to tuples are parsed, that can be disable however by setting `with_tuple` to `False`. If only assignments to names are wanted `name_only` can be set to `True`. The `extra_end_rules` parameter is forwarded to the tuple parsing function.'
def parse_assign_target(self, with_tuple=True, name_only=False, extra_end_rules=None):
if name_only: token = self.stream.expect('name') target = nodes.Name(token.value, 'store', lineno=token.lineno) else: if with_tuple: target = self.parse_tuple(simplified=True, extra_end_rules=extra_end_rules) else: target = self.parse_primary(with_postfix=False) target.set_ctx('store') if (not target.can_assign()): self.fail(("can't assign to %r" % target.__class__.__name__.lower()), target.lineno) return target
'Parse an expression. Per default all expressions are parsed, if the optional `with_condexpr` parameter is set to `False` conditional expressions are not parsed.'
def parse_expression(self, with_condexpr=True):
if with_condexpr: return self.parse_condexpr() return self.parse_or()
'Works like `parse_expression` but if multiple expressions are delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created. This method could also return a regular expression instead of a tuple if no commas where found. The default parsing mode is a full tuple. If `simplified` is `True` only names and literals are parsed. The `no_condexpr` parameter is forwarded to :meth:`parse_expression`. Because tuples do not require delimiters and may end in a bogus comma an extra hint is needed that marks the end of a tuple. For example for loops support tuples between `for` and `in`. In that case the `extra_end_rules` is set to ``[\'name:in\']``.'
def parse_tuple(self, simplified=False, with_condexpr=True, extra_end_rules=None):
lineno = self.stream.current.lineno if simplified: parse = (lambda : self.parse_primary(with_postfix=False)) elif with_condexpr: parse = self.parse_expression else: parse = (lambda : self.parse_expression(with_condexpr=False)) args = [] is_tuple = False while 1: if args: self.stream.expect('comma') if self.is_tuple_end(extra_end_rules): break args.append(parse()) if (self.stream.current.type == 'comma'): is_tuple = True else: break lineno = self.stream.current.lineno if ((not is_tuple) and args): return args[0] return nodes.Tuple(args, 'load', lineno=lineno)
'Parse the whole template into a `Template` node.'
def parse(self):
result = nodes.Template(self.subparse(), lineno=1) result.set_environment(self.environment) return result
'Return the visitor function for this node or `None` if no visitor exists for this node. In that case the generic visit function is used instead.'
def get_visitor(self, node):
method = ('visit_' + node.__class__.__name__) return getattr(self, method, None)
'Visit a node.'
def visit(self, node, *args, **kwargs):
f = self.get_visitor(node) if (f is not None): return f(node, *args, **kwargs) return self.generic_visit(node, *args, **kwargs)
'Called if no explicit visitor function exists for a node.'
def generic_visit(self, node, *args, **kwargs):
for node in node.iter_child_nodes(): self.visit(node, *args, **kwargs)
'As transformers may return lists in some places this method can be used to enforce a list as return value.'
def visit_list(self, node, *args, **kwargs):
rv = self.visit(node, *args, **kwargs) if (not isinstance(rv, list)): rv = [rv] return rv
'The sandboxed environment will call this method to check if the attribute of an object is safe to access. Per default all attributes starting with an underscore are considered private as well as the special attributes of internal python objects as returned by the :func:`is_internal_attribute` function.'
def is_safe_attribute(self, obj, attr, value):
return (not (attr.startswith('_') or is_internal_attribute(obj, attr)))
'Check if an object is safely callable. Per default a function is considered safe unless the `unsafe_callable` attribute exists and is True. Override this method to alter the behavior, but this won\'t affect the `unsafe` decorator from this module.'
def is_safe_callable(self, obj):
return (not (getattr(obj, 'unsafe_callable', False) or getattr(obj, 'alters_data', False)))
'Subscribe an object from sandboxed code.'
def getitem(self, obj, argument):
try: return obj[argument] except (TypeError, LookupError): if isinstance(argument, basestring): try: attr = str(argument) except: pass else: try: value = getattr(obj, attr) except AttributeError: pass else: if self.is_safe_attribute(obj, argument, value): return value return self.unsafe_undefined(obj, argument) return self.undefined(obj=obj, name=argument)
'Subscribe an object from sandboxed code and prefer the attribute. The attribute passed *must* be a bytestring.'
def getattr(self, obj, attribute):
try: value = getattr(obj, attribute) except AttributeError: try: return obj[attribute] except (TypeError, LookupError): pass else: if self.is_safe_attribute(obj, attribute, value): return value return self.unsafe_undefined(obj, attribute) return self.undefined(obj=obj, name=attribute)
'Return an undefined object for unsafe attributes.'
def unsafe_undefined(self, obj, attribute):
return self.undefined(('access to attribute %r of %r object is unsafe.' % (attribute, obj.__class__.__name__)), name=attribute, obj=obj, exc=SecurityError)
'Call an object from sandboxed code.'
def call(__self, __context, __obj, *args, **kwargs):
if (not __self.is_safe_callable(__obj)): raise SecurityError(('%r is not safely callable' % (__obj,))) return __context.call(__obj, *args, **kwargs)
'Resets the bucket (unloads the bytecode).'
def reset(self):
self.code = None
'Loads bytecode from a file or file like object.'
def load_bytecode(self, f):
magic = f.read(len(bc_magic)) if (magic != bc_magic): self.reset() return checksum = pickle.load(f) if (self.checksum != checksum): self.reset() return if isinstance(f, file): self.code = marshal.load(f) else: self.code = marshal.loads(f.read())
'Dump the bytecode into the file or file like object passed.'
def write_bytecode(self, f):
if (self.code is None): raise TypeError("can't write empty bucket") f.write(bc_magic) pickle.dump(self.checksum, f, 2) if isinstance(f, file): marshal.dump(self.code, f) else: f.write(marshal.dumps(self.code))
'Load bytecode from a string.'
def bytecode_from_string(self, string):
self.load_bytecode(StringIO(string))