本文整理汇总了Python中tokenizer.Tokenizer.next_token方法的典型用法代码示例。如果您正苦于以下问题:Python Tokenizer.next_token方法的具体用法?Python Tokenizer.next_token怎么用?Python Tokenizer.next_token使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tokenizer.Tokenizer
的用法示例。
在下文中一共展示了Tokenizer.next_token方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: truncate
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next_token [as 别名]
def truncate(cls, value, target_len=200, ellipsis='...'):
"""Returns a copy of str truncated to target_len characters,
preserving HTML markup (which does not count towards the length).
Any tags that would be left open by truncation will be closed at
the end of the returned string. Optionally append ellipsis if
the string was truncated."""
# open tags are pushed on here, then popped when
# the matching close tag is found
stack = []
# string to be returned
retval = []
# number of characters (not counting markup) placed in retval so far
length = 0
tokens = Tokenizer(value)
tok = tokens.next_token()
while tok != tokens.token_end:
if not length < target_len:
retval.append(ellipsis)
break
if tok.__class__.__name__ == 'OpenTag':
stack.append(tok)
retval.append(tok.as_string())
elif tok.__class__.__name__ == 'CloseTag':
if stack[-1].tag == tok.tag:
stack.pop()
retval.append(tok.as_string())
else:
raise UnbalancedError(tok.as_string())
elif tok.__class__.__name__ == 'SelfClosingTag':
retval.append(tok.as_string())
else:
retval.append(tok)
length += 1
tok = tokens.next_token()
while len(stack) > 0:
tok = CloseTag(stack.pop().tag)
retval.append(tok.as_string())
return ''.join(retval)
示例2: Shunter
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next_token [as 别名]
class Shunter():
'''
Converts infix to postfix, storing tokens and the progress of the
computation as object data.
'''
def __init__(self, expression, **kwargs):
'''
Initialize the module.
'''
self.expr = ' '.join(expression)
self.output = []
self.operators = {
'+': lambda x,y: x + y,
'-': lambda x,y: x - y,
'/': lambda x,y: x / y,
'*': lambda x,y: x * y,
'^': lambda x,y: x ** y
}
self.tokenizer = Tokenizer(self.expr, '+-*/^')
self.precedence = {'+': 0, '-': 0, '*': 1, '/': 1, '^': 2 }
def _has_higher_precedence(self, op1, op2):
'''
Returns true if op1 has a higher precedence than op2; false otherwise.
'''
return self.precedence[op1] > self.precedence[op2]
def convert(self):
'''
Converts the given infix into postfix using the shunting-yard
algorithm.
'''
stack = []
while self.tokenizer.has_token():
token = self.tokenizer.next_token().strip()
if token.isdigit():
self.output.append(token)
elif self.operators.has_key(token):
# TODO optimize
if len(stack) == 0:
stack.append(token)
else:
while len(stack) and self._has_higher_precedence(
stack[-1], token):
self.output.append(stack.pop())
stack.append(token)
else:
print 'Parser error: invalid character "{0}"'.format(token)
sys.exit(1)
while len(stack):
print stack
self.output.append(stack.pop())
return ' '.join(self.output)
def eval(self):
'''
Evaluates an expression in postfix notation.
'''
stack = []
for token in self.output:
if token.isdigit():
stack.append(token)
elif self.operators.has_key(token):
try:
y, x = stack.pop(), stack.pop()
stack.append(self.operators[token](x, y))
except IndexError:
print 'Evaluator error: invalid postfix. Sorry.'
sys.exit(1)
else:
if len(stack) != 1:
print 'Invalid postfix.'
sys.exit(1)
else:
return stack.pop()