本文整理汇总了Python中jinja2.lexer.Token方法的典型用法代码示例。如果您正苦于以下问题:Python lexer.Token方法的具体用法?Python lexer.Token怎么用?Python lexer.Token使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类jinja2.lexer
的用法示例。
在下文中一共展示了lexer.Token方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: interpolate
# 需要导入模块: from jinja2 import lexer [as 别名]
# 或者: from jinja2.lexer import Token [as 别名]
def interpolate(self, token):
pos = 0
end = len(token.value)
lineno = token.lineno
while 1:
match = _gettext_re.search(token.value, pos)
if match is None:
break
value = token.value[pos:match.start()]
if value:
yield Token(lineno, 'data', value)
lineno += count_newlines(token.value)
yield Token(lineno, 'variable_begin', None)
yield Token(lineno, 'name', 'gettext')
yield Token(lineno, 'lparen', None)
yield Token(lineno, 'string', match.group(1))
yield Token(lineno, 'rparen', None)
yield Token(lineno, 'variable_end', None)
pos = match.end()
if pos < end:
yield Token(lineno, 'data', token.value[pos:])
示例2: filter_stream
# 需要导入模块: from jinja2 import lexer [as 别名]
# 或者: from jinja2.lexer import Token [as 别名]
def filter_stream(self, stream):
ctx = StreamProcessContext(stream)
for token in stream:
if token.type != 'data':
yield token
continue
ctx.token = token
value = self.normalize(ctx)
yield Token(token.lineno, 'data', value)
示例3: filter_stream
# 需要导入模块: from jinja2 import lexer [as 别名]
# 或者: from jinja2.lexer import Token [as 别名]
def filter_stream(self, stream):
"""
We convert
{{ some.variable | filter1 | filter 2}}
to
{{ some.variable | filter1 | filter 2 | yaml}}
... for all variable declarations in the template
This function is called by jinja2 immediately
after the lexing stage, but before the parser is called.
"""
while not stream.eos:
token = next(stream)
if token.test("variable_begin"):
var_expr = []
while not token.test("variable_end"):
var_expr.append(token)
token = next(stream)
variable_end = token
last_token = var_expr[-1]
if last_token.test("name") and last_token.value == "yaml":
# don't yaml twice
continue
# Wrap the whole expression between the `variable_begin`
# and `variable_end` marks in parens:
var_expr.insert(1, Token(var_expr[0].lineno, "lparen", None))
var_expr.append(Token(var_expr[-1].lineno, "rparen", None))
var_expr.append(Token(token.lineno, "pipe", "|"))
var_expr.append(Token(token.lineno, "name", "yaml"))
var_expr.append(variable_end)
for token in var_expr:
yield token
else:
yield token
示例4: filter_stream
# 需要导入模块: from jinja2 import lexer [as 别名]
# 或者: from jinja2.lexer import Token [as 别名]
def filter_stream(self, stream):
"""
We convert
{{ some.variable | filter1 | filter 2}}
to
{{ ( some.variable | filter1 | filter 2 ) | bind}}
... for all variable declarations in the template
Note the extra ( and ). We want the | bind to apply to the entire value, not just the last value.
The parentheses are mostly redundant, except in expressions like {{ '%' ~ myval ~ '%' }}
This function is called by jinja2 immediately
after the lexing stage, but before the parser is called.
"""
while not stream.eos:
token = next(stream)
if token.test("variable_begin"):
var_expr = []
while not token.test("variable_end"):
var_expr.append(token)
token = next(stream)
variable_end = token
last_token = var_expr[-1]
lineno = last_token.lineno
# don't bind twice
if (not last_token.test("name")
or not last_token.value in ('bind', 'inclause', 'sqlsafe')):
param_name = self.extract_param_name(var_expr)
var_expr.insert(1, Token(lineno, 'lparen', u'('))
var_expr.append(Token(lineno, 'rparen', u')'))
var_expr.append(Token(lineno, 'pipe', u'|'))
var_expr.append(Token(lineno, 'name', u'bind'))
var_expr.append(Token(lineno, 'lparen', u'('))
var_expr.append(Token(lineno, 'string', param_name))
var_expr.append(Token(lineno, 'rparen', u')'))
var_expr.append(variable_end)
for token in var_expr:
yield token
else:
yield token