当前位置: 首页>>代码示例>>Python>>正文


Python Lexer.next_token方法代码示例

本文整理汇总了Python中lexer.Lexer.next_token方法的典型用法代码示例。如果您正苦于以下问题:Python Lexer.next_token方法的具体用法?Python Lexer.next_token怎么用?Python Lexer.next_token使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lexer.Lexer的用法示例。


在下文中一共展示了Lexer.next_token方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: helper

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import next_token [as 别名]
 def helper(self, sql):
     print "SQL: ", sql
     lexer = Lexer(sql)
     lexer.next_token()
     while lexer.token.name != EOF.name:
         print lexer.info()
         lexer.next_token()
开发者ID:xumingming,项目名称:pysqlparser,代码行数:9,代码来源:expr_parser_test.py

示例2: Parser

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import next_token [as 别名]
class Parser(object):
    mode_lexer = 0
    mode_parser = 1
    mode_stable = 2
    mode_compile = 3
    mode_execute = 4

    def __init__(self, stdin, stdout=sys.stdout, stderr=sys.stderr, mode=mode_execute):
        """
        Those streams will be closed at last.
        :param stdin: the source code input stream
        :param stdout: the standard output stream
        :param stderr: the standard error stream
        :param mode: mode
        """
        self.stdin = stdin
        self.stdout = stdout
        self.stderr = stderr
        self.lexer = Lexer(stdin, stdout=stdout, stderr=stderr)

        self.mode = mode

        self.tokenTree = TokenTree()
        self.rootNode = None
        self.stable = STable()
        self.ahead = None  # The token just read
        self.buff = []  # unget buffer
        self.currentLine = 0  # controller for printing lexer analysis result

    def lexse(self):
        """
        Run lexer
        :return: token_tree_root_node
        """
        echo = StringIO.StringIO()
        self.ahead = self.lexer.next_token()
        while self.ahead:
            echo.write(self._build_token_tree())
            try:
                self.ahead = self.lexer.next_token()
            except InvalidTokenError:
                return
        self.stdout.write(echo.getvalue())
        echo.close()
        return self.tokenTree.rootNode

    def parse(self):
        """
        Run parser
        :return: syntax_tree_root_node,token_tree_root_node
        """
        try:
            self.rootNode = self._parse_exter_stmts()
        except InvalidTokenError:
            return None
        else:
            if self.mode == Parser.mode_parser:
                self.stdout.write('%s\n' % self.rootNode.gen_tree())
            return self.rootNode, self.tokenTree.rootNode
        finally:
            self.stdin.close()

    def semantic(self):
        """
        Semantic analysing using DFS.
        :return: root_stable, root_node, root_token_node
        """

        # do parse first
        parse_result = self.parse()
        if not parse_result:
            return None

        # add `read` and `write` function to stable
        self.stable.symbol_append(Symbol('read', STypeFunc(SType(tokens.Token_INT), [])))
        self.stable.symbol_append(Symbol('write', STypeFunc(SType(tokens.Token_VOID), [SType(Token_INT)])))

        stack = [(self.rootNode, self.stable)]  # the node and the direct symbol table which it is in
        while len(stack) > 0:
            node, stable = stack.pop()
            try:
                table = node.gen_stable(stable)
            except SemanticsError, e:
                self.stderr.write('%s %s\n' % (str(e), node.gen_location()))
                return None
            else:
                children = list(node.childItems)
                children.reverse()
                children = [(child, table or stable) for child in children]
                stack += children

        # check main function
        error = self.stable.check_main()
        if error:
            self.stderr.write('%s\n' % error)
            return None
        elif self.mode == Parser.mode_stable:
            self.stdout.write(self.stable.gen_tree())

        return self.stable, parse_result[0], parse_result[1]
开发者ID:YieldNull,项目名称:cinter,代码行数:102,代码来源:parser.py

示例3: open

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import next_token [as 别名]
from tokens import Token
from lexer import Lexer

with open('lextest.txt') as f:
    lexer = Lexer(f)
    while True:
        if lexer.peek_token() == Token.EOF:
            break
        print(lexer.next_token())
开发者ID:andars,项目名称:ll_compiler,代码行数:11,代码来源:lex.py

示例4: Parser

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import next_token [as 别名]
class Parser(object):
    def expect(self, *a, **kw):
        t, _ = self.tokens.peak()
        if t == "EOF":
            return False
        if t in list(a) + [v for k, v in kw.iteritems()]:
            self.tokens.next()
            return True
        return False

    def expect_not(self, *a, **kw):
        t, _ = self.tokens.peak()
        if t == "EOF":
            return False
        if t not in list(a) + [v for k, v in kw.iteritems()]:
            self.tokens.next()
            return True
        return False

    def must(self, *a, **kw):
        t, _ = self.tokens.peak()
        if t in list(a) + [v for k, v in kw.iteritems()]:
            self.tokens.next()
            return True
        raise Exception(
            "Expected {}, but got {}".format(
                ",".join(list(a) + [v for k, v in kw.iteritems()]), t
            )
        )

    def must_be(self, typ, val):
        t, v = self.tokens.peak()
        if t == typ and v == val:
            self.tokens.next()
            return True
        raise Exception(
            "Expected {} ({}), but got {} ({})".format(
                val, typ,
                v, t,
            )
        )

    def parse(self, text):
        self.lexer = Lexer(text)
        self.tokens = Peep([t for t in self.lexer.next_token()])

        ast = Steps()

        retval = self.instruction()
        assert retval and retval.instruction == "FROM"
        while retval:  # read instructions
            if type(retval) != bool:
                ast.steps.append(retval)
            retval = self.instruction()

        return ast

    def instruction(self):
        while self.expect("WHITESPACE"):
            pass
        if self.expect("NEWLINE"):
            return True
        step = Step()
        if self.expect("COMMENT"):
            return True
        elif self.expect("KEYWORD"):
            step.instruction = self.tokens.last()[1]
            self.expect("WHITESPACE")

            if step.instruction in KEYWORDS_JSON_SUPPORT \
                    and self.expect("BRACKET_OPEN"):
                # expect json array
                self.must("STRING")
                step.arguments.append(self.tokens.last()[1])
                while self.expect("COMMA"):
                    self.expect("WHITESPACE")
                    self.must("STRING")
                    step.arguments.append(self.tokens.last()[1])
                    self.expect("WHITESPACE")
                self.must("BRACKET_CLOSE")
                while self.expect("WHITESPACE"):
                    pass
                self.expect("NEWLINE")
            else:
                while self.expect_not("NEWLINE"):
                    if self.tokens.last()[0] == "WHITESPACE":  # skip spaces
                        continue
                    step.arguments.append(self.tokens.last()[1])
                self.expect("NEWLINE")
            return step
        return False
开发者ID:jebjerg,项目名称:dockerlint,代码行数:93,代码来源:parser.py


注:本文中的lexer.Lexer.next_token方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。