当前位置: 首页>>代码示例>>Python>>正文


Python token.ENDMARKER属性代码示例

本文整理汇总了Python中token.ENDMARKER属性的典型用法代码示例。如果您正苦于以下问题:Python token.ENDMARKER属性的具体用法?Python token.ENDMARKER怎么用?Python token.ENDMARKER使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在token的用法示例。


在下文中一共展示了token.ENDMARKER属性的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: parse_kwargs

# 需要导入模块: import token [as 别名]
# 或者: from token import ENDMARKER [as 别名]
def parse_kwargs(self):
        kwargs = {}

        tok = self.current

        while tok.exact_type != token.ENDMARKER:
            if tok.exact_type == token.NEWLINE:
                tok = self.next()
                continue

            if tok.exact_type != token.NAME:
                raise SyntaxError(f"Expected name, found {tok}")
            name = tok.string
            tok = self.next()

            if tok.exact_type != token.EQUAL:
                raise SyntaxError(f"Expected =, found {tok}")
            tok = self.next()

            kwargs[name] = self._parse()

            tok = self.next()

        return kwargs 
开发者ID:funkybob,项目名称:stencil,代码行数:26,代码来源:stencil.py

示例2: file_input

# 需要导入模块: import token [as 别名]
# 或者: from token import ENDMARKER [as 别名]
def file_input(self, nodelist):
        doc = self.get_docstring(nodelist, symbol.file_input)
        if doc is not None:
            i = 1
        else:
            i = 0
        stmts = []
        for node in nodelist[i:]:
            if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
                self.com_append_stmt(stmts, node)
        return Module(doc, Stmt(stmts)) 
开发者ID:IronLanguages,项目名称:ironpython2,代码行数:13,代码来源:transformer.py

示例3: _SnippetizeNode

# 需要导入模块: import token [as 别名]
# 或者: from token import ENDMARKER [as 别名]
def _SnippetizeNode(node, tokens):
  # The parser module gives a syntax tree that discards comments,
  # non-terminating newlines, and whitespace information. Use the tokens given
  # by the tokenize module to annotate the syntax tree with the information
  # needed to exactly reproduce the original source code.
  node_type = node[0]

  if node_type >= token.NT_OFFSET:
    # Symbol.
    children = tuple(_SnippetizeNode(child, tokens) for child in node[1:])
    return Symbol(node_type, children)
  else:
    # Token.
    grabbed_tokens = []
    while tokens and (
        tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL):
      grabbed_tokens.append(tokens.popleft())

    # parser has 2 NEWLINEs right before the end.
    # tokenize has 0 or 1 depending on if the file has one.
    # Create extra nodes without consuming tokens to account for this.
    if node_type == token.NEWLINE:
      for tok in tokens:
        if tok.type == token.ENDMARKER:
          return TokenSnippet(node_type, grabbed_tokens)
        if tok.type != token.DEDENT:
          break

    assert tokens[0].type == token.OP or node_type == tokens[0].type

    grabbed_tokens.append(tokens.popleft())
    return TokenSnippet(node_type, grabbed_tokens) 
开发者ID:FSecureLABS,项目名称:Jandroid,代码行数:34,代码来源:snippet.py

示例4: tokenize_code

# 需要导入模块: import token [as 别名]
# 或者: from token import ENDMARKER [as 别名]
def tokenize_code(string, concat_symbol=None):
    tokens = []
    string = string.strip().decode('utf-8').encode('ascii', 'strict') #.decode('string_escape')
    for toknum, tokval, _, _, _  in tokenize.generate_tokens(StringIO(string).readline):
        # We ignore these tokens during evaluation.
        if toknum not in [token.ENDMARKER, token.INDENT, token.DEDENT]:
            tokens.append(tokval.lower())

    return tokens 
开发者ID:pcyin,项目名称:tranX,代码行数:11,代码来源:conala_eval.py

示例5: tokenize_code

# 需要导入模块: import token [as 别名]
# 或者: from token import ENDMARKER [as 别名]
def tokenize_code(code, mode=None):
    token_stream = generate_tokens(StringIO(code).readline)
    tokens = []
    for toknum, tokval, (srow, scol), (erow, ecol), _ in token_stream:
        if toknum == tk.ENDMARKER:
            break

        if mode == 'decoder':
            if toknum == tk.STRING:
                quote = tokval[0]
                tokval = tokval[1:-1]
                tokens.append(quote)
                tokens.append(tokval)
                tokens.append(quote)
            elif toknum == tk.DEDENT:
                continue
            else:
                tokens.append(tokval)
        elif mode == 'canonicalize':
            if toknum == tk.STRING:
                tokens.append('_STR_')
            elif toknum == tk.DEDENT:
                continue
            else:
                tokens.append(tokval)
        else:
            tokens.append(tokval)

    return tokens 
开发者ID:pcyin,项目名称:tranX,代码行数:31,代码来源:py_utils.py

示例6: create_mark_checker

# 需要导入模块: import token [as 别名]
# 或者: from token import ENDMARKER [as 别名]
def create_mark_checker(self, source, verify=True):
    atok = self.create_asttokens(source)
    checker = tools.MarkChecker(atok)

    # The last token should always be an ENDMARKER
    # None of the nodes should contain that token
    assert atok.tokens[-1].type == token.ENDMARKER
    if atok.text:  # except for empty files
      for node in checker.all_nodes:
        assert node.last_token.type != token.ENDMARKER

    if verify:
      checker.verify_all_nodes(self)
    return checker 
开发者ID:gristlabs,项目名称:asttokens,代码行数:16,代码来源:test_mark_tokens.py

示例7: parse

# 需要导入模块: import token [as 别名]
# 或者: from token import ENDMARKER [as 别名]
def parse(s):
        p = Expression(s)
        result = p._parse()

        if p.current.exact_type not in (token.NEWLINE, token.ENDMARKER):
            raise SyntaxError(f"Parse ended unexpectedly: {p.current}")

        return result 
开发者ID:funkybob,项目名称:stencil,代码行数:10,代码来源:stencil.py

示例8: _simulate_compile_singlemode

# 需要导入模块: import token [as 别名]
# 或者: from token import ENDMARKER [as 别名]
def _simulate_compile_singlemode(self, s):
        # Calculate line offsets
        lines = [0, 0]
        pos = 0
        while 1:
            pos = s.find('\n', pos)+1
            if not pos: break
            lines.append(pos)
        lines.append(len(s))

        oldpos = 0
        parenlevel = 0
        deflevel = 0
        output = []
        stmt = []

        text = StringIO(s)
        tok_gen = tokenize.generate_tokens(text.readline)
        for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen:
            newpos = lines[srow] + scol
            stmt.append(s[oldpos:newpos])
            if tok != '':
                stmt.append(tok)
            oldpos = newpos + len(tok)

            # Update the paren level.
            if tok in '([{':
                parenlevel += 1
            if tok in '}])':
                parenlevel -= 1

            if tok in ('def', 'class') and deflevel == 0:
                deflevel = 1
            if deflevel and toktype == token.INDENT:
                deflevel += 1
            if deflevel and toktype == token.DEDENT:
                deflevel -= 1

            # Are we starting a statement?
            if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT,
                             token.INDENT, token.ENDMARKER) or
                 tok==':') and parenlevel == 0):
                if deflevel == 0 and self._is_expr(stmt[1:-2]):
                    output += stmt[0]
                    output.append('__print__((')
                    output += stmt[1:-2]
                    output.append('))')
                    output += stmt[-2:]
                else:
                    output += stmt
                stmt = []
        return ''.join(output) 
开发者ID:rafasashi,项目名称:razzy-spinner,代码行数:54,代码来源:doctest_driver.py

示例9: __init__

# 需要导入模块: import token [as 别名]
# 或者: from token import ENDMARKER [as 别名]
def __init__(self, error_info):
        import tokenize

        super().__init__(error_info)

        self.tokens = []
        self.token_error = None

        if self.error_info["message"] == "EOL while scanning string literal":
            self.intro_text = (
                "You haven't properly closed the string on line %s." % self.error_info["lineno"]
                + "\n(If you want a multi-line string, then surround it with"
                + " `'''` or `\"\"\"` at both ends.)"
            )

        elif self.error_info["message"] == "EOF while scanning triple-quoted string literal":
            # lineno is not useful, as it is at the end of the file and user probably
            # didn't want the string to end there
            self.intro_text = "You haven't properly closed a triple-quoted string"

        else:
            if self.error_info["filename"] and os.path.isfile(self.error_info["filename"]):
                with open(self.error_info["filename"], mode="rb") as fp:
                    try:
                        for t in tokenize.tokenize(fp.readline):
                            self.tokens.append(t)
                    except tokenize.TokenError as e:
                        self.token_error = e
                    except IndentationError as e:
                        self.indentation_error = e

                if not self.tokens or self.tokens[-1].type not in [
                    token.ERRORTOKEN,
                    token.ENDMARKER,
                ]:
                    self.tokens.append(tokenize.TokenInfo(token.ERRORTOKEN, "", None, None, ""))
            else:
                self.tokens = []

            unbalanced = self._sug_unbalanced_parens()
            if unbalanced:
                self.intro_text = (
                    "Unbalanced parentheses, brackets or braces:\n\n" + unbalanced.body
                )
                self.intro_confidence = 5
            else:
                self.intro_text = "Python doesn't know how to read your program."

                if "^" in str(self.error_info):
                    self.intro_text += (
                        "\n\nSmall `^` in the original error message shows where it gave up,"
                        + " but the actual mistake can be before this."
                    )

                self.suggestions = [self._sug_missing_or_misplaced_colon()] 
开发者ID:thonny,项目名称:thonny,代码行数:57,代码来源:stdlib_error_helpers.py

示例10: _parse

# 需要导入模块: import token [as 别名]
# 或者: from token import ENDMARKER [as 别名]
def _parse(self):
        tok = self.current

        if tok.exact_type in (token.ENDMARKER, token.COMMA):
            return  # TODO

        if tok.exact_type == token.STRING:
            self.next()
            return AstLiteral(tok.string[1:-1])

        if tok.exact_type == token.NUMBER:
            self.next()
            try:
                value = int(tok.string)
            except ValueError:
                value = float(tok.string)
            return AstLiteral(value)

        if tok.exact_type == token.NAME:
            state = AstContext(tok.string)

            while True:
                tok = self.next()

                if tok.exact_type == token.DOT:
                    tok = self.next()
                    if tok.exact_type != token.NAME:
                        raise SyntaxError(f"Invalid attr lookup: {tok}")
                    state = AstAttr(state, tok.string)

                elif tok.exact_type == token.LSQB:
                    self.next()
                    right = self._parse()
                    state = AstLookup(state, right)
                    if self.current.exact_type != token.RSQB:
                        raise SyntaxError(f"Expected ] but found {self.current}")

                elif tok.exact_type == token.LPAR:
                    state = AstCall(state)
                    self.next()
                    while self.current.exact_type != token.RPAR:
                        arg = self._parse()
                        state.add_arg(arg)
                        if self.current.exact_type != token.COMMA:
                            break
                        self.next()

                    if self.current.exact_type != token.RPAR:
                        raise SyntaxError(f"Expected ( but found {self.current}")

                    self.next()

                else:
                    break

            return state

        raise SyntaxError(
            f"Error parsing expression {tok.line !r}: Unexpected token {tok.string!r} at position {tok.start[0]}."
        ) 
开发者ID:funkybob,项目名称:stencil,代码行数:62,代码来源:stencil.py


注:本文中的token.ENDMARKER属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。