本文整理匯總了Python中jinja2.lexer.Token方法的典型用法代碼示例。如果您正苦於以下問題:Python lexer.Token方法的具體用法?Python lexer.Token怎麽用?Python lexer.Token使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類jinja2.lexer
的用法示例。
在下文中一共展示了lexer.Token方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: interpolate
# 需要導入模塊: from jinja2 import lexer [as 別名]
# 或者: from jinja2.lexer import Token [as 別名]
def interpolate(self, token):
pos = 0
end = len(token.value)
lineno = token.lineno
while 1:
match = _gettext_re.search(token.value, pos)
if match is None:
break
value = token.value[pos:match.start()]
if value:
yield Token(lineno, 'data', value)
lineno += count_newlines(token.value)
yield Token(lineno, 'variable_begin', None)
yield Token(lineno, 'name', 'gettext')
yield Token(lineno, 'lparen', None)
yield Token(lineno, 'string', match.group(1))
yield Token(lineno, 'rparen', None)
yield Token(lineno, 'variable_end', None)
pos = match.end()
if pos < end:
yield Token(lineno, 'data', token.value[pos:])
示例2: filter_stream
# 需要導入模塊: from jinja2 import lexer [as 別名]
# 或者: from jinja2.lexer import Token [as 別名]
def filter_stream(self, stream):
ctx = StreamProcessContext(stream)
for token in stream:
if token.type != 'data':
yield token
continue
ctx.token = token
value = self.normalize(ctx)
yield Token(token.lineno, 'data', value)
示例3: filter_stream
# 需要導入模塊: from jinja2 import lexer [as 別名]
# 或者: from jinja2.lexer import Token [as 別名]
def filter_stream(self, stream):
"""
We convert
{{ some.variable | filter1 | filter 2}}
to
{{ some.variable | filter1 | filter 2 | yaml}}
... for all variable declarations in the template
This function is called by jinja2 immediately
after the lexing stage, but before the parser is called.
"""
while not stream.eos:
token = next(stream)
if token.test("variable_begin"):
var_expr = []
while not token.test("variable_end"):
var_expr.append(token)
token = next(stream)
variable_end = token
last_token = var_expr[-1]
if last_token.test("name") and last_token.value == "yaml":
# don't yaml twice
continue
# Wrap the whole expression between the `variable_begin`
# and `variable_end` marks in parens:
var_expr.insert(1, Token(var_expr[0].lineno, "lparen", None))
var_expr.append(Token(var_expr[-1].lineno, "rparen", None))
var_expr.append(Token(token.lineno, "pipe", "|"))
var_expr.append(Token(token.lineno, "name", "yaml"))
var_expr.append(variable_end)
for token in var_expr:
yield token
else:
yield token
示例4: filter_stream
# 需要導入模塊: from jinja2 import lexer [as 別名]
# 或者: from jinja2.lexer import Token [as 別名]
def filter_stream(self, stream):
"""
We convert
{{ some.variable | filter1 | filter 2}}
to
{{ ( some.variable | filter1 | filter 2 ) | bind}}
... for all variable declarations in the template
Note the extra ( and ). We want the | bind to apply to the entire value, not just the last value.
The parentheses are mostly redundant, except in expressions like {{ '%' ~ myval ~ '%' }}
This function is called by jinja2 immediately
after the lexing stage, but before the parser is called.
"""
while not stream.eos:
token = next(stream)
if token.test("variable_begin"):
var_expr = []
while not token.test("variable_end"):
var_expr.append(token)
token = next(stream)
variable_end = token
last_token = var_expr[-1]
lineno = last_token.lineno
# don't bind twice
if (not last_token.test("name")
or not last_token.value in ('bind', 'inclause', 'sqlsafe')):
param_name = self.extract_param_name(var_expr)
var_expr.insert(1, Token(lineno, 'lparen', u'('))
var_expr.append(Token(lineno, 'rparen', u')'))
var_expr.append(Token(lineno, 'pipe', u'|'))
var_expr.append(Token(lineno, 'name', u'bind'))
var_expr.append(Token(lineno, 'lparen', u'('))
var_expr.append(Token(lineno, 'string', param_name))
var_expr.append(Token(lineno, 'rparen', u')'))
var_expr.append(variable_end)
for token in var_expr:
yield token
else:
yield token