本文整理汇总了Python中tokenizer.Tokenizer.get_next_token方法的典型用法代码示例。如果您正苦于以下问题:Python Tokenizer.get_next_token方法的具体用法?Python Tokenizer.get_next_token怎么用?Python Tokenizer.get_next_token使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tokenizer.Tokenizer
的用法示例。
在下文中一共展示了Tokenizer.get_next_token方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: input
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import get_next_token [as 别名]
# ask for a file name
# read it
# tokenize it
from tokenizer import Tokenizer, Token
from scanner import Scanner
n = input(">> ")
with open(n, "r") as f:
text = f.read()
sc = Scanner(text)
tok = Tokenizer(sc)
t = tok.get_next_token()
while t.get_type() != Token.EOF:
print(t)
t = tok.get_next_token()
示例2: Parser
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import get_next_token [as 别名]
class Parser(object):
def __init__(self, expression):
self.sc = Scanner(expression)
self.tok = Tokenizer(self.sc)
self.tokens = None
self.tokens = self.get_token_sequence()
self.root = None
def get_token_sequence(self):
if self.tokens is not None:
return self.tokens[::]
self.tokens = []
self.tokens.append(self.tok.get_next_token())
while self.tokens[-1].get_type() != Token.EOF:
self.tokens.append(self.tok.get_next_token())
return self.tokens[::]
def generate_tree(self):
# get the expression subtree
subtree = self.get_expr()
while self.tokens[0].get_type() is Token.SEPARATOR:
tok = self.tokens[0]
# the subtree we already have becomes the left node
self.tokens = self.tokens[1:]
# if we got to the end there is no right child
if self.tokens[0].get_type() is Token.EOF:
right = None
# fetch the subtree to the right of the separator node
else:
right = self.get_expr()
subtree = BinOp(tok, parent=None, left_child=subtree,
right_child=right)
# the subtree we got so far is the only child of the EOF node
if self.tokens[0].get_type() is Token.EOF:
self.root = UnOp(Token.EOF, parent=None, child=subtree)
# something weird happened! :D
else:
self.error("Program did not terminate with expected EOF")
def get_expr(self):
subtree = self.get_term()
while self.tokens[0].get_type() in (Token.PLUS,
Token.MINUS):
tok = self.tokens[0]
self.tokens = self.tokens[1:]
# set the left subtree, get the right child
subtree = BinOp(tok, parent=None,
left_child=subtree, right_child=self.get_term())
return subtree
def get_term(self):
subtree = self.get_power()
# do we have a * or / operator? if so, create its node
while self.tokens[0].get_type() in (Token.PRODUCT,
Token.DIVISION):
tok = self.tokens[0]
self.tokens = self.tokens[1:]
# set the left subtree, get the right child
subtree = BinOp(tok, parent=None,
left_child=subtree, right_child=self.get_power())
return subtree
def get_power(self):
subtree = self.get_base()
# do we have a power operator here?
while self.tokens[0].get_type() is Token.POWER:
tok = self.tokens[0]
self.tokens = self.tokens[1:]
# if so, create its node and set the left subtree
# get the right child subtree
subtree = BinOp(tok, parent=None,
left_child=subtree, right_child=self.get_base())
return subtree
def get_base(self):
tok = self.tokens[0]
self.tokens = self.tokens[1:]
# if we find a ( we must get the corresponding subtree
if tok.get_type() in (Token.INTEGER, Token.FLOAT):
return Literal(tok)
elif tok.get_type() is Token.LGROUP:
# get the subtree
subtree = self.get_expr()
tok = self.tokens[0]
self.tokens = self.tokens[1:]
# right enclosing parenthesis was found, return subtree
if tok.get_type() is not Token.RGROUP:
#.........这里部分代码省略.........
示例3: Parser
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import get_next_token [as 别名]
class Parser(object):
"""Implement a recursive-descent parser;
The parser will attempt to build an AST of the program;
Its only argument is the string to be parsed; Upon success, it will
store the AST's root (the EOF Node) in self.root.
The show() method provides pretty tree-like printing for debugging"""
def __init__(self, expression):
"""Initialize the parser by generating the token sequence"""
self.sc = Scanner(expression)
self.tok = Tokenizer(self.sc)
self.tokens = None
self.tokens = self.get_token_sequence()
self.root = None
def get_token_sequence(self):
"""Generate the whole token sequence for the given program"""
# prevent redundant calls
if self.tokens is not None:
return self.tokens[::]
self.tokens = []
self.tokens.append(self.tok.get_next_token())
while self.tokens[-1].get_type() != Token.EOF:
self.tokens.append(self.tok.get_next_token())
return self.tokens[::]
def get_program(self):
"""Entry point to the parser; The method will raise an error
if it is not able to build an AST Tree for the program"""
token_list = self.tokens[::]
subtree, token_list = self.get_suite(token_list)
if subtree is None:
raise ParserException("Could not parse the program correctly")
if token_list[0].get_type() != Token.EOF:
raise ParserException("Could not parse the program")
else:
eof = token_list[0]
self.root = UnOp(eof, parent=None, child=subtree)
def get_suite(self, token_list):
tokens = token_list[::]
subtree, token_list = self.get_stmt(token_list)
while token_list and token_list[0].get_type() is Token.SEPARATOR:
tok = token_list[0]
token_list = token_list[1:]
if token_list[0].get_type() is Token.EOF:
right = Literal(Token(Token.NULL, "Null"), parent=None)
else:
right, token_list = self.get_stmt(token_list)
if right is None:
break
subtree = BinOp(tok, parent=None, left_child=subtree,
right_child=right)
if subtree is None:
return None, tokens
return subtree, token_list
def get_stmt(self, token_list):
tokens = token_list[::]
subtree, token_list = self.get_io_stmt(token_list)
if subtree is None:
# could not get an IO statement, try a control
subtree, token_list = self.get_control(token_list)
if subtree is None:
# could not get a control statement, try an assignment
subtree, token_list = self.get_assignment(token_list)
if subtree is None:
# could not get an assignment, try a compound stmt
subtree, token_list = self.get_compound(token_list)
if subtree is None:
# could not get a compound stmt, try an expression
subtree, token_list = self.get_expression(token_list)
if subtree is None:
return None, tokens
return subtree, token_list
def get_io_stmt(self, token_list):
tokens = token_list[::]
subtree, token_list = self.get_in_stmt(token_list)
if subtree is None:
subtree, token_list = self.get_out_stmt(token_list)
if subtree is None:
return None, tokens
return subtree, token_list
def get_control(self, token_list):
tokens = token_list[::]
if token_list[0].get_type() not in [Token.STOP, Token.RETURN,
Token.JUMPOVER, Token.HALT]:
#.........这里部分代码省略.........
示例4: ParserInterpreter
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import get_next_token [as 别名]
class ParserInterpreter(object):
def __init__(self, expression):
self.sc = Scanner(expression)
self.tok = Tokenizer(self.sc)
self.tokens = None
def get_token_sequence(self):
if self.tokens is not None:
return self.tokens[::]
self.tokens = []
self.tokens.append(self.tok.get_next_token())
while self.tokens[-1].get_type() != Token.EOF:
self.tokens.append(self.tok.get_next_token())
return self.tokens[::]
def get_program(self):
result = self.get_expr()
print("pre-result: {}".format(result))
while self.tokens[0].get_type() is Token.SEPARATOR:
self.tokens = self.tokens[1:]
if self.tokens[0].get_type() is Token.EOF:
break
else:
result = self.get_expr()
print("pre-result: {}".format(result))
if self.tokens[0].get_type() is Token.EOF:
return result
else:
self.error("Program did not terminate with expected EOF")
def get_expr(self):
result = self.get_term()
while self.tokens[0].get_type() in (Token.PLUS,
Token.MINUS):
tok = self.tokens[0]
self.tokens = self.tokens[1:]
other = self.get_term()
if tok.get_type() == Token.PLUS:
result += other
elif tok.get_type() == Token.MINUS:
result -= other
return result
def get_term(self):
result = self.get_power()
while self.tokens[0].get_type() in (Token.PRODUCT,
Token.DIVISION):
tok = self.tokens[0]
self.tokens = self.tokens[1:]
other = self.get_power()
if tok.get_type() == Token.PRODUCT:
result *= other
elif tok.get_type() == Token.DIVISION:
result /= other
return result
def get_power(self):
result = self.get_base()
while self.tokens[0].get_type() is Token.POWER:
self.tokens = self.tokens[1:]
other = self.get_base()
result = pow(result, other)
return result
def get_base(self):
tok = self.tokens[0]
self.tokens = self.tokens[1:]
if tok.get_type() not in (Token.INTEGER, Token.FLOAT):
if tok.get_type() is not Token.LGROUP:
self.error("Could not get a valid base")
else:
base = self.get_expr()
tok = self.tokens[0]
self.tokens = self.tokens[1:]
if tok.get_type() is not Token.RGROUP:
self.error("Unmatched left parenthesis")
return base
else:
return tok.get_value()
def error(self, msg):
raise Exception(msg)