当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.tok_name方法代码示例

本文整理汇总了Python中tokenize.tok_name方法的典型用法代码示例。如果您正苦于以下问题:Python tokenize.tok_name方法的具体用法?Python tokenize.tok_name怎么用?Python tokenize.tok_name使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tokenize的用法示例。


在下文中一共展示了tokenize.tok_name方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: handle_token

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def handle_token(self, *args):
        # dispatch incoming tokens to the current handler
        if DEBUG > 1:
            print self.handler.im_func.func_name, self.indent,
            print tokenize.tok_name[args[0]], repr(args[1])
        if args[0] == tokenize.DEDENT:
            self.indent = self.indent - 1
            while self.scope and self.scope[-1][0] >= self.indent:
                del self.scope[-1]
                del self.stack[-1]
        self.handler = apply(self.handler, args)
        if args[0] == tokenize.INDENT:
            self.indent = self.indent + 1

    ##
    # (Token handler) Scans for encoding directive. 
开发者ID:alexfeng,项目名称:InternationalizationScript-iOS,代码行数:18,代码来源:pythondoc.py

示例2: visit

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def visit(self, token: tokenize.TokenInfo) -> None:
        """
        Runs custom defined handlers in a visitor for each specific token type.

        Uses ``.exact_type`` property to fetch the token name.
        So, you have to be extra careful with tokens
        like ``->`` and other operators,
        since they might resolve in just ``OP`` name.

        Does nothing if handler for any token type is not defined.

        Inspired by ``NodeVisitor`` class.

        See also:
            https://docs.python.org/3/library/tokenize.html

        """
        token_type = tokenize.tok_name[token.exact_type].lower()
        method = getattr(self, 'visit_{0}'.format(token_type), None)
        if method is not None:
            method(token) 
开发者ID:wemake-services,项目名称:wemake-python-styleguide,代码行数:23,代码来源:base.py

示例3: log_token

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def log_token(log, token):
    """Log a token to a provided logging object."""
    if token[2][0] == token[3][0]:
        pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
    else:
        pos = 'l.%s' % token[3][0]
    log.log(flake8._EXTRA_VERBOSE, 'l.%s\t%s\t%s\t%r' %
            (token[2][0], pos, tokenize.tok_name[token[0]],
                token[1]))


# NOTE(sigmavirus24): This was taken wholesale from
# https://github.com/PyCQA/pycodestyle 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:15,代码来源:processor.py

示例4: __repr__

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def __repr__(self):
        return "tk.{}".format(tk.tok_name[self]) 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:4,代码来源:parser.py

示例5: to_rbnf_token

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def to_rbnf_token(tk: tokenize.TokenInfo) -> Tokenizer:
    name = cast(tokenize.tok_name[tk.type])
    if name == 'NAME' and tk.string in kwlist:
        value = cast(tk.string)
        name = cast('KEYWORD')
    else:
        value = cast(tk.string) if name not in ('NAME', 'STRING', 'NUMBER') else tk.string
    return Tokenizer(name, value, *tk.start) 
开发者ID:Xython,项目名称:YAPyPy,代码行数:10,代码来源:parser.py

示例6: _check_opendaylight_lowercase

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def _check_opendaylight_lowercase(logical_line, tokens, noqa, token_type):
    """ND01 - Enforce using OpenDaylight in given token."""
    if noqa:
        return

    for _token_type, text, start_index, _, _ in tokens:
        if _token_type == token_type:
            pos = text.find(_ND01_OPENDAYLIGHT)
            if pos >= 0:
                msg = "{} in {}".format(
                    _ND01_MSG, tokenize.tok_name[token_type].lower())
                yield (start_index[1] + pos, msg) 
开发者ID:openstack,项目名称:networking-odl,代码行数:14,代码来源:checks.py

示例7: getLineOfTokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def getLineOfTokens(gen):
    tokens = []
    nextNeg = 0
    token = gen.next()
    if token[0] == tokenize.ENDMARKER:
        return None
    while token[0] != tokenize.NEWLINE and token[0] != tokenize.NL:
        if token[0] == tokenize.COMMENT:
            pass
        elif token[0] == tokenize.OP and token[1] == '-':
            nextNeg = 1
        elif token[0] == tokenize.NUMBER:
            if nextNeg:
                tokens.append(-eval(token[1]))
                nextNeg = 0
            else:
                tokens.append(eval(token[1]))
        elif token[0] == tokenize.STRING:
            tokens.append(eval(token[1]))
        elif token[0] == tokenize.NAME:
            tokens.append(token[1])
        else:
            notify.warning('Ignored token type: %s on line: %s' % (tokenize.tok_name[token[0]], token[2][0]))
        token = gen.next()

    return tokens 
开发者ID:PiratesOnlineRewritten,项目名称:Pirates-Online-Rewritten,代码行数:28,代码来源:QuestParser.py

示例8: _error

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def _error(tok, msg):
    raise SyntaxError("%s token(%s, '%s') at %s = from/to [row, col]"
                      % (msg, tokenize.tok_name[tok[0]],
                         tok[1], str(tok[2:4]))) 
开发者ID:ActiveState,项目名称:code,代码行数:6,代码来源:recipe-511473.py

示例9: next_of_type

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def next_of_type(self, token_type):
    """Parse a token of the given type and return it."""
    token = self.next()
    if token.type != token_type:
      raise ValueError("Expected %r but found %r\nline %d: %s" % (
          tokenize.tok_name[token_type], token.src, token.start[0],
          self.lines[token.start[0] - 1]))
    return token 
开发者ID:google,项目名称:pasta,代码行数:10,代码来源:token_generator.py

示例10: check_all

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def check_all(self, expected=None, line_offset=0):
        """Run all checks on the input file."""
        self.report.init_file(self.filename, self.lines, expected, line_offset)
        self.total_lines = len(self.lines)
        if self._ast_checks:
            self.check_ast()
        self.line_number = 0
        self.indent_char = None
        self.indent_level = self.previous_indent_level = 0
        self.previous_logical = ''
        self.previous_unindented_logical_line = ''
        self.tokens = []
        self.blank_lines = self.blank_before = 0
        parens = 0
        for token in self.generate_tokens():
            self.tokens.append(token)
            token_type, text = token[0:2]
            if self.verbose >= 3:
                if token[2][0] == token[3][0]:
                    pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
                else:
                    pos = 'l.%s' % token[3][0]
                print('l.%s\t%s\t%s\t%r' %
                      (token[2][0], pos, tokenize.tok_name[token[0]], text))
            if token_type == tokenize.OP:
                if text in '([{':
                    parens += 1
                elif text in '}])':
                    parens -= 1
            elif not parens:
                if token_type in NEWLINE:
                    if token_type == tokenize.NEWLINE:
                        self.check_logical()
                        self.blank_before = 0
                    elif len(self.tokens) == 1:
                        # The physical line contains only this token.
                        self.blank_lines += 1
                        del self.tokens[0]
                    else:
                        self.check_logical()
                elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
                    if len(self.tokens) == 1:
                        # The comment also ends a physical line
                        token = list(token)
                        token[1] = text.rstrip('\r\n')
                        token[3] = (token[2][0], token[2][1] + len(token[1]))
                        self.tokens = [tuple(token)]
                        self.check_logical()
        if self.tokens:
            self.check_physical(self.lines[-1])
            self.check_logical()
        return self.report.get_file_results() 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:54,代码来源:pycodestyle.py

示例11: check_all

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def check_all(self, expected=None, line_offset=0):
        """Run all checks on the input file."""
        self.report.init_file(self.filename, self.lines, expected, line_offset)
        self.total_lines = len(self.lines)
        if self._ast_checks:
            self.check_ast()
        self.line_number = 0
        self.indent_char = None
        self.indent_level = self.previous_indent_level = 0
        self.previous_logical = ''
        self.previous_unindented_logical_line = ''
        self.tokens = []
        self.blank_lines = self.blank_before = 0
        parens = 0
        for token in self.generate_tokens():
            self.tokens.append(token)
            token_type, text = token[0:2]
            if self.verbose >= 3:
                if token[2][0] == token[3][0]:
                    pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
                else:
                    pos = 'l.%s' % token[3][0]
                print('l.%s\t%s\t%s\t%r' %
                      (token[2][0], pos, tokenize.tok_name[token[0]], text))
            if token_type == tokenize.OP:
                if text in '([{':
                    parens += 1
                elif text in '}])':
                    parens -= 1
            elif not parens:
                if token_type in NEWLINE:
                    if token_type == tokenize.NEWLINE:
                        self.check_logical()
                        self.blank_before = 0
                    elif len(self.tokens) == 1:
                        # The physical line contains only this token.
                        self.blank_lines += 1
                        del self.tokens[0]
                    else:
                        self.check_logical()
        if self.tokens:
            self.check_physical(self.lines[-1])
            self.check_logical()
        return self.report.get_file_results() 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:46,代码来源:pycodestyle.py

示例12: check_all

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def check_all(self, expected=None, line_offset=0):
        """Run all checks on the input file."""
        self.report.init_file(self.filename, self.lines, expected, line_offset)
        self.total_lines = len(self.lines)
        if self._ast_checks:
            self.check_ast()
        self.line_number = 0
        self.indent_char = None
        self.indent_level = self.previous_indent_level = 0
        self.previous_logical = ''
        self.tokens = []
        self.blank_lines = self.blank_before = 0
        parens = 0
        for token in self.generate_tokens():
            self.tokens.append(token)
            token_type, text = token[0:2]
            if self.verbose >= 3:
                if token[2][0] == token[3][0]:
                    pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
                else:
                    pos = 'l.%s' % token[3][0]
                print('l.%s\t%s\t%s\t%r' %
                      (token[2][0], pos, tokenize.tok_name[token[0]], text))
            if token_type == tokenize.OP:
                if text in '([{':
                    parens += 1
                elif text in '}])':
                    parens -= 1
            elif not parens:
                if token_type in NEWLINE:
                    if token_type == tokenize.NEWLINE:
                        self.check_logical()
                        self.blank_before = 0
                    elif len(self.tokens) == 1:
                        # The physical line contains only this token.
                        self.blank_lines += 1
                        del self.tokens[0]
                    else:
                        self.check_logical()
                elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
                    if len(self.tokens) == 1:
                        # The comment also ends a physical line
                        token = list(token)
                        token[1] = text.rstrip('\r\n')
                        token[3] = (token[2][0], token[2][1] + len(token[1]))
                        self.tokens = [tuple(token)]
                        self.check_logical()
        if self.tokens:
            self.check_physical(self.lines[-1])
            self.check_logical()
        return self.report.get_file_results() 
开发者ID:jmwright,项目名称:cadquery-freecad-module,代码行数:53,代码来源:pep8.py

示例13: check_all

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import tok_name [as 别名]
def check_all(self, expected=None, line_offset=0):
        """
        Run all checks on the input file.
        """
        self.report.init_file(self.filename, self.lines, expected, line_offset)
        if self._ast_checks:
            self.check_ast()
        self.line_number = 0
        self.indent_char = None
        self.indent_level = 0
        self.previous_indent_level = 0
        self.previous_logical = ''
        self.tokens = []
        self.blank_lines = blank_lines_before_comment = 0
        parens = 0
        for token in self.generate_tokens():
            self.tokens.append(token)
            token_type, text = token[0:2]
            if self.verbose >= 3:
                if token[2][0] == token[3][0]:
                    pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
                else:
                    pos = 'l.%s' % token[3][0]
                print('l.%s\t%s\t%s\t%r' %
                      (token[2][0], pos, tokenize.tok_name[token[0]], text))
            if token_type == tokenize.OP:
                if text in '([{':
                    parens += 1
                elif text in '}])':
                    parens -= 1
            elif not parens:
                if token_type == tokenize.NEWLINE:
                    if self.blank_lines < blank_lines_before_comment:
                        self.blank_lines = blank_lines_before_comment
                    self.check_logical()
                    self.blank_lines = blank_lines_before_comment = 0
                elif token_type == tokenize.NL:
                    if len(self.tokens) == 1:
                        # The physical line contains only this token.
                        self.blank_lines += 1
                        del self.tokens[0]
                    else:
                        self.check_logical()
                elif token_type == tokenize.COMMENT and len(self.tokens) == 1:
                    if blank_lines_before_comment < self.blank_lines:
                        blank_lines_before_comment = self.blank_lines
                    self.blank_lines = 0
                    if COMMENT_WITH_NL:
                        # The comment also ends a physical line
                        text = text.rstrip('\r\n')
                        self.tokens = [(token_type, text) + token[2:]]
                        self.check_logical()
        return self.report.get_file_results() 
开发者ID:zchengquan,项目名称:TextDetector,代码行数:55,代码来源:pep8.py


注:本文中的tokenize.tok_name方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。