当前位置: 首页>>代码示例>>Python>>正文


Python Lexer.next_token方法代码示例

本文整理汇总了Python中Lexer.Lexer.next_token方法的典型用法代码示例。如果您正苦于以下问题:Python Lexer.next_token方法的具体用法?Python Lexer.next_token怎么用?Python Lexer.next_token使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Lexer.Lexer的用法示例。


在下文中一共展示了Lexer.next_token方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_math_symbols

# 需要导入模块: from Lexer import Lexer [as 别名]
# 或者: from Lexer.Lexer import next_token [as 别名]
def test_math_symbols():
    lexer = Lexer('+ - * /')

    assert lexer.next_token() == Token(TokenTypes.ADD)
    assert lexer.next_token() == Token(TokenTypes.SUB)
    assert lexer.next_token() == Token(TokenTypes.MUL)
    assert lexer.next_token() == Token(TokenTypes.DIV)
开发者ID:Demotivated,项目名称:parsec,代码行数:9,代码来源:test_lexer.py

示例2: test_next_token

# 需要导入模块: from Lexer import Lexer [as 别名]
# 或者: from Lexer.Lexer import next_token [as 别名]
def test_next_token():
    lexer = Lexer('1+ 3')

    assert lexer.next_token() == Token(TokenTypes.INT, 1)
    assert lexer.next_token() == Token(TokenTypes.ADD)
    assert lexer.next_token() == Token(TokenTypes.INT, 3)
    assert lexer.next_token() == Token(TokenTypes.EOF)
开发者ID:Demotivated,项目名称:parsec,代码行数:9,代码来源:test_lexer.py

示例3: test_skip_whitespace

# 需要导入模块: from Lexer import Lexer [as 别名]
# 或者: from Lexer.Lexer import next_token [as 别名]
def test_skip_whitespace():
    lexer = Lexer('1   +3 9')

    assert lexer.next_token() == Token(TokenTypes.INT, 1)
    assert lexer.next_token() == Token(TokenTypes.ADD)
    assert lexer.next_token() == Token(TokenTypes.INT, 3)
    assert lexer.next_token() == Token(TokenTypes.INT, 9)
    assert lexer.next_token() == Token(TokenTypes.EOF)
开发者ID:Demotivated,项目名称:parsec,代码行数:10,代码来源:test_lexer.py

示例4: Parser

# 需要导入模块: from Lexer import Lexer [as 别名]
# 或者: from Lexer.Lexer import next_token [as 别名]
class Parser(object):

    def __init__(self, path_to_file):
        self.lexer = Lexer(path_to_file)
        self.path_to_file = path_to_file
        self.ast_nodes = {}

    def parse_file(self):
        self.current_token = self.lexer.next_token()
        self.parse()
        return self.ast_nodes

    def print_nodes(self):
        for node in self.ast_nodes.keys():
            print(node, self.ast_nodes[node])

    def add_parent_child(self, id_node, current_token):
        self.add_node(id_node)
        self.add_node(current_token)
        if current_token not in self.ast_nodes[id_node]["children"]:
            self.ast_nodes[id_node]["children"].append(current_token)

    def add_node(self, id_node):
        if id_node not in self.ast_nodes.keys():
            self.ast_nodes[id_node] = {}
            self.ast_nodes[id_node]["children"] = []

    def parse(self):
        self.parse_strict()
        if self.current_token.kind == "DIGRAPH":
            self.parse_digraph()
        elif self.current_token.kind == "GRAPH":
            self.parse_graph()
        else:
            raise ParserException(self.current_token)

    def accept(self, token, token_kind):
        if token.kind == token_kind:
            self.current_token = self.lexer.next_token()
        else:
            raise ParserException(self.current_token)

    def parse_digraph(self):
        if self.current_token.kind == "DIGRAPH":
            self.accept(self.current_token, "DIGRAPH")
            self.parse_optional_id()
            self.accept(self.current_token, "LEFT_CB")
            self.parse_statement_list()
            self.accept(self.current_token, "RIGHT_CB")
        else:
            raise ParserException(self.current_token)

    def parse_graph(self):
        if self.current_token.kind == "GRAPH":
            self.accept(self.current_token, "GRAPH")
            self.parse_optional_id()
            self.accept(self.current_token, "LEFT_CB")
            self.parse_statement_list()
            self.accept(self.current_token, "RIGHT_CB")
        else:
            raise ParserException(self.current_token)

    def parse_statement(self):
        if (self.current_token.kind == "NODE") or (self.current_token.kind == "EDGE") or (self.current_token.kind == "GRAPH"):
            self.parse_node_statement()
        elif self.current_token.kind == "ID":
            id_node = self.current_token.value
            self.accept(self.current_token, "ID")
            if (self.current_token.kind == "DIRECTED_EDGE") or (self.current_token.kind == "UNDIRECTED_EDGE"):
                self.parse_edge_statement(id_node)
            elif self.current_token.kind == "LEFT_SB":
                self.parse_node_creation(id_node)
            elif self.current_token.kind == "EQUALS":
                self.parse_single_assignment()
        elif self.current_token.kind == "SUBGRAPH":
            self.parse_subgraph()
        else:
            raise ParserException(self.current_token)

    def parse_single_assignment(self):
        if self.current_token.kind == "EQUALS":
            self.accept(self.current_token, "EQUALS")
            self.accept(self.current_token, "ID")
        else:
            raise ParserException(self.current_token)

    def parse_subgraph(self):
        if self.current_token.kind == "SUBGRAPH":
            self.accept(self.current_token, "SUBGRAPH")
            self.parse_optional_id()
            self.accept(self.current_token, "LEFT_CB")
            self.parse_statement_list()
            self.accept(self.current_token, "RIGHT_CB")
        else:
            raise ParserException(self.current_token)

    def parse_node_creation(self, id_node):
        self.add_node(id_node)
        if self.current_token.kind == "LEFT_SB":
            self.parse_atttribute_list()
#.........这里部分代码省略.........
开发者ID:DanLindeman,项目名称:luthor,代码行数:103,代码来源:Parser.py

示例5: Parser

# 需要导入模块: from Lexer import Lexer [as 别名]
# 或者: from Lexer.Lexer import next_token [as 别名]
class Parser(object):

    def __init__(self, source_code):
        self.lexer = Lexer(source_code)
        self.current_token = self.lexer.next_token()

    def __str__(self):
        return self.__repr__()

    def __repr__(self):
        return '<Parser {lexer} {token}>'.format(
            lexer=self.lexer,
            token=self.lexer.current_token
        )

    def error(self, message):
        raise Exception(message)

    def eat(self, token_type):
        if self.current_token.type != token_type:
            self.error('Attempted to eat {expected} but found {actual}'.format(
                expected=token_type,
                actual=self.current_token
            ))
        self.current_token = self.lexer.next_token()

    def parse(self):
        """Returns the complete Abstract Syntax Tree"""
        return self.expression()

    def expression(self):
        """expression: term ((ADD | SUB) term)*"""
        node = self.term()

        while self.lexer.current_token.type in (
            TokenTypes.ADD,
            TokenTypes.SUB
        ):
            current_token = self.lexer.current_token

            if current_token.type == TokenTypes.ADD:
                self.eat(TokenTypes.ADD)
                node = BinOp(node, current_token, self.term())

            elif current_token.type == TokenTypes.SUB:
                self.eat(TokenTypes.SUB)
                node = BinOp(node, current_token, self.term())

        return node

    def term(self):
        """term : factor ((MUL | DIV) factor)*"""
        node = self.factor()

        while self.lexer.current_token.type in (
            TokenTypes.MUL,
            TokenTypes.DIV
        ):
            current_token = self.lexer.current_token

            if current_token.type == TokenTypes.MUL:
                self.eat(TokenTypes.MUL)
                node = BinOp(node, current_token, self.factor())

            elif current_token.type == TokenTypes.DIV:
                self.eat(TokenTypes.DIV)
                node = BinOp(node, current_token, self.factor())

        return node

    def factor(self):
        """factor : INTEGER | L_PAREN expression R_PAREN"""
        token = self.current_token

        if token.type == TokenTypes.INT:
            self.eat(TokenTypes.INT)
            node = Num(token)

        elif token.type == TokenTypes.L_PAREN:
            self.eat(TokenTypes.L_PAREN)
            node = self.expression()
            self.eat(TokenTypes.R_PAREN)

        return node
开发者ID:Demotivated,项目名称:parsec,代码行数:86,代码来源:Parser.py

示例6: test_negative_numbers

# 需要导入模块: from Lexer import Lexer [as 别名]
# 或者: from Lexer.Lexer import next_token [as 别名]
def test_negative_numbers():
    lexer = Lexer('-3 * -2')

    assert lexer.next_token() == Token(TokenTypes.INT, -3)
    assert lexer.next_token() == Token(TokenTypes.MUL)
    assert lexer.next_token() == Token(TokenTypes.INT, -2)
开发者ID:Demotivated,项目名称:parsec,代码行数:8,代码来源:test_lexer.py

示例7: test_bad_input

# 需要导入模块: from Lexer import Lexer [as 别名]
# 或者: from Lexer.Lexer import next_token [as 别名]
def test_bad_input():
    lexer = Lexer('&')

    with raises(Exception):
        lexer.next_token()
开发者ID:Demotivated,项目名称:parsec,代码行数:7,代码来源:test_lexer.py

示例8: test_empty_program

# 需要导入模块: from Lexer import Lexer [as 别名]
# 或者: from Lexer.Lexer import next_token [as 别名]
def test_empty_program():
    lexer = Lexer('')

    assert repr(lexer) == '<Lexer EOF>'
    assert lexer.next_token() == Token(TokenTypes.EOF)
开发者ID:Demotivated,项目名称:parsec,代码行数:7,代码来源:test_lexer.py

示例9: test_minus_as_final_token_crash

# 需要导入模块: from Lexer import Lexer [as 别名]
# 或者: from Lexer.Lexer import next_token [as 别名]
def test_minus_as_final_token_crash():
    lexer = Lexer('-')

    assert lexer.next_token() == Token(TokenTypes.SUB)
开发者ID:Demotivated,项目名称:parsec,代码行数:6,代码来源:test_lexer.py


注:本文中的Lexer.Lexer.next_token方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。