當前位置: 首頁>>代碼示例>>Python>>正文


Python tokenize.tok_name方法代碼示例

本文整理匯總了Python中tokenize.tok_name方法的典型用法代碼示例。如果您正苦於以下問題:Python tokenize.tok_name方法的具體用法?Python tokenize.tok_name怎麽用?Python tokenize.tok_name使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tokenize的用法示例。


在下文中一共展示了tokenize.tok_name方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: handle_token

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def handle_token(self, *args):
        # dispatch incoming tokens to the current handler
        if DEBUG > 1:
            print self.handler.im_func.func_name, self.indent,
            print tokenize.tok_name[args[0]], repr(args[1])
        if args[0] == tokenize.DEDENT:
            self.indent = self.indent - 1
            while self.scope and self.scope[-1][0] >= self.indent:
                del self.scope[-1]
                del self.stack[-1]
        self.handler = apply(self.handler, args)
        if args[0] == tokenize.INDENT:
            self.indent = self.indent + 1

    ##
    # (Token handler) Scans for encoding directive. 
開發者ID:alexfeng,項目名稱:InternationalizationScript-iOS,代碼行數:18,代碼來源:pythondoc.py

示例2: visit

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def visit(self, token: tokenize.TokenInfo) -> None:
        """
        Runs custom defined handlers in a visitor for each specific token type.

        Uses ``.exact_type`` property to fetch the token name.
        So, you have to be extra careful with tokens
        like ``->`` and other operators,
        since they might resolve in just ``OP`` name.

        Does nothing if handler for any token type is not defined.

        Inspired by ``NodeVisitor`` class.

        See also:
            https://docs.python.org/3/library/tokenize.html

        """
        token_type = tokenize.tok_name[token.exact_type].lower()
        method = getattr(self, 'visit_{0}'.format(token_type), None)
        if method is not None:
            method(token) 
開發者ID:wemake-services,項目名稱:wemake-python-styleguide,代碼行數:23,代碼來源:base.py

示例3: log_token

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def log_token(log, token):
    """Log a token to a provided logging object."""
    if token[2][0] == token[3][0]:
        pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
    else:
        pos = 'l.%s' % token[3][0]
    log.log(flake8._EXTRA_VERBOSE, 'l.%s\t%s\t%s\t%r' %
            (token[2][0], pos, tokenize.tok_name[token[0]],
                token[1]))


# NOTE(sigmavirus24): This was taken wholesale from
# https://github.com/PyCQA/pycodestyle 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:15,代碼來源:processor.py

示例4: __repr__

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def __repr__(self):
        return "tk.{}".format(tk.tok_name[self]) 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:4,代碼來源:parser.py

示例5: to_rbnf_token

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def to_rbnf_token(tk: tokenize.TokenInfo) -> Tokenizer:
    name = cast(tokenize.tok_name[tk.type])
    if name == 'NAME' and tk.string in kwlist:
        value = cast(tk.string)
        name = cast('KEYWORD')
    else:
        value = cast(tk.string) if name not in ('NAME', 'STRING', 'NUMBER') else tk.string
    return Tokenizer(name, value, *tk.start) 
開發者ID:Xython,項目名稱:YAPyPy,代碼行數:10,代碼來源:parser.py

示例6: _check_opendaylight_lowercase

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def _check_opendaylight_lowercase(logical_line, tokens, noqa, token_type):
    """ND01 - Enforce using OpenDaylight in given token."""
    if noqa:
        return

    for _token_type, text, start_index, _, _ in tokens:
        if _token_type == token_type:
            pos = text.find(_ND01_OPENDAYLIGHT)
            if pos >= 0:
                msg = "{} in {}".format(
                    _ND01_MSG, tokenize.tok_name[token_type].lower())
                yield (start_index[1] + pos, msg) 
開發者ID:openstack,項目名稱:networking-odl,代碼行數:14,代碼來源:checks.py

示例7: getLineOfTokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def getLineOfTokens(gen):
    tokens = []
    nextNeg = 0
    token = gen.next()
    if token[0] == tokenize.ENDMARKER:
        return None
    while token[0] != tokenize.NEWLINE and token[0] != tokenize.NL:
        if token[0] == tokenize.COMMENT:
            pass
        elif token[0] == tokenize.OP and token[1] == '-':
            nextNeg = 1
        elif token[0] == tokenize.NUMBER:
            if nextNeg:
                tokens.append(-eval(token[1]))
                nextNeg = 0
            else:
                tokens.append(eval(token[1]))
        elif token[0] == tokenize.STRING:
            tokens.append(eval(token[1]))
        elif token[0] == tokenize.NAME:
            tokens.append(token[1])
        else:
            notify.warning('Ignored token type: %s on line: %s' % (tokenize.tok_name[token[0]], token[2][0]))
        token = gen.next()

    return tokens 
開發者ID:PiratesOnlineRewritten,項目名稱:Pirates-Online-Rewritten,代碼行數:28,代碼來源:QuestParser.py

示例8: _error

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def _error(tok, msg):
    raise SyntaxError("%s token(%s, '%s') at %s = from/to [row, col]"
                      % (msg, tokenize.tok_name[tok[0]],
                         tok[1], str(tok[2:4]))) 
開發者ID:ActiveState,項目名稱:code,代碼行數:6,代碼來源:recipe-511473.py

示例9: next_of_type

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def next_of_type(self, token_type):
    """Parse a token of the given type and return it."""
    token = self.next()
    if token.type != token_type:
      raise ValueError("Expected %r but found %r\nline %d: %s" % (
          tokenize.tok_name[token_type], token.src, token.start[0],
          self.lines[token.start[0] - 1]))
    return token 
開發者ID:google,項目名稱:pasta,代碼行數:10,代碼來源:token_generator.py

示例10: check_all

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def check_all(self, expected=None, line_offset=0):
        """Run all checks on the input file."""
        self.report.init_file(self.filename, self.lines, expected, line_offset)
        self.total_lines = len(self.lines)
        if self._ast_checks:
            self.check_ast()
        self.line_number = 0
        self.indent_char = None
        self.indent_level = self.previous_indent_level = 0
        self.previous_logical = ''
        self.previous_unindented_logical_line = ''
        self.tokens = []
        self.blank_lines = self.blank_before = 0
        parens = 0
        for token in self.generate_tokens():
            self.tokens.append(token)
            token_type, text = token[0:2]
            if self.verbose >= 3:
                if token[2][0] == token[3][0]:
                    pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
                else:
                    pos = 'l.%s' % token[3][0]
                print('l.%s\t%s\t%s\t%r' %
                      (token[2][0], pos, tokenize.tok_name[token[0]], text))
            if token_type == tokenize.OP:
                if text in '([{':
                    parens += 1
                elif text in '}])':
                    parens -= 1
            elif not parens:
                if token_type in NEWLINE:
                    if token_type == tokenize.NEWLINE:
                        self.check_logical()
                        self.blank_before = 0
                    elif len(self.tokens) == 1:
                        # The physical line contains only this token.
                        self.blank_lines += 1
                        del self.tokens[0]
                    else:
                        self.check_logical()
                elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
                    if len(self.tokens) == 1:
                        # The comment also ends a physical line
                        token = list(token)
                        token[1] = text.rstrip('\r\n')
                        token[3] = (token[2][0], token[2][1] + len(token[1]))
                        self.tokens = [tuple(token)]
                        self.check_logical()
        if self.tokens:
            self.check_physical(self.lines[-1])
            self.check_logical()
        return self.report.get_file_results() 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:54,代碼來源:pycodestyle.py

示例11: check_all

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def check_all(self, expected=None, line_offset=0):
        """Run all checks on the input file."""
        self.report.init_file(self.filename, self.lines, expected, line_offset)
        self.total_lines = len(self.lines)
        if self._ast_checks:
            self.check_ast()
        self.line_number = 0
        self.indent_char = None
        self.indent_level = self.previous_indent_level = 0
        self.previous_logical = ''
        self.previous_unindented_logical_line = ''
        self.tokens = []
        self.blank_lines = self.blank_before = 0
        parens = 0
        for token in self.generate_tokens():
            self.tokens.append(token)
            token_type, text = token[0:2]
            if self.verbose >= 3:
                if token[2][0] == token[3][0]:
                    pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
                else:
                    pos = 'l.%s' % token[3][0]
                print('l.%s\t%s\t%s\t%r' %
                      (token[2][0], pos, tokenize.tok_name[token[0]], text))
            if token_type == tokenize.OP:
                if text in '([{':
                    parens += 1
                elif text in '}])':
                    parens -= 1
            elif not parens:
                if token_type in NEWLINE:
                    if token_type == tokenize.NEWLINE:
                        self.check_logical()
                        self.blank_before = 0
                    elif len(self.tokens) == 1:
                        # The physical line contains only this token.
                        self.blank_lines += 1
                        del self.tokens[0]
                    else:
                        self.check_logical()
        if self.tokens:
            self.check_physical(self.lines[-1])
            self.check_logical()
        return self.report.get_file_results() 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:46,代碼來源:pycodestyle.py

示例12: check_all

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def check_all(self, expected=None, line_offset=0):
        """Run all checks on the input file."""
        self.report.init_file(self.filename, self.lines, expected, line_offset)
        self.total_lines = len(self.lines)
        if self._ast_checks:
            self.check_ast()
        self.line_number = 0
        self.indent_char = None
        self.indent_level = self.previous_indent_level = 0
        self.previous_logical = ''
        self.tokens = []
        self.blank_lines = self.blank_before = 0
        parens = 0
        for token in self.generate_tokens():
            self.tokens.append(token)
            token_type, text = token[0:2]
            if self.verbose >= 3:
                if token[2][0] == token[3][0]:
                    pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
                else:
                    pos = 'l.%s' % token[3][0]
                print('l.%s\t%s\t%s\t%r' %
                      (token[2][0], pos, tokenize.tok_name[token[0]], text))
            if token_type == tokenize.OP:
                if text in '([{':
                    parens += 1
                elif text in '}])':
                    parens -= 1
            elif not parens:
                if token_type in NEWLINE:
                    if token_type == tokenize.NEWLINE:
                        self.check_logical()
                        self.blank_before = 0
                    elif len(self.tokens) == 1:
                        # The physical line contains only this token.
                        self.blank_lines += 1
                        del self.tokens[0]
                    else:
                        self.check_logical()
                elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
                    if len(self.tokens) == 1:
                        # The comment also ends a physical line
                        token = list(token)
                        token[1] = text.rstrip('\r\n')
                        token[3] = (token[2][0], token[2][1] + len(token[1]))
                        self.tokens = [tuple(token)]
                        self.check_logical()
        if self.tokens:
            self.check_physical(self.lines[-1])
            self.check_logical()
        return self.report.get_file_results() 
開發者ID:jmwright,項目名稱:cadquery-freecad-module,代碼行數:53,代碼來源:pep8.py

示例13: check_all

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import tok_name [as 別名]
def check_all(self, expected=None, line_offset=0):
        """
        Run all checks on the input file.
        """
        self.report.init_file(self.filename, self.lines, expected, line_offset)
        if self._ast_checks:
            self.check_ast()
        self.line_number = 0
        self.indent_char = None
        self.indent_level = 0
        self.previous_indent_level = 0
        self.previous_logical = ''
        self.tokens = []
        self.blank_lines = blank_lines_before_comment = 0
        parens = 0
        for token in self.generate_tokens():
            self.tokens.append(token)
            token_type, text = token[0:2]
            if self.verbose >= 3:
                if token[2][0] == token[3][0]:
                    pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
                else:
                    pos = 'l.%s' % token[3][0]
                print('l.%s\t%s\t%s\t%r' %
                      (token[2][0], pos, tokenize.tok_name[token[0]], text))
            if token_type == tokenize.OP:
                if text in '([{':
                    parens += 1
                elif text in '}])':
                    parens -= 1
            elif not parens:
                if token_type == tokenize.NEWLINE:
                    if self.blank_lines < blank_lines_before_comment:
                        self.blank_lines = blank_lines_before_comment
                    self.check_logical()
                    self.blank_lines = blank_lines_before_comment = 0
                elif token_type == tokenize.NL:
                    if len(self.tokens) == 1:
                        # The physical line contains only this token.
                        self.blank_lines += 1
                        del self.tokens[0]
                    else:
                        self.check_logical()
                elif token_type == tokenize.COMMENT and len(self.tokens) == 1:
                    if blank_lines_before_comment < self.blank_lines:
                        blank_lines_before_comment = self.blank_lines
                    self.blank_lines = 0
                    if COMMENT_WITH_NL:
                        # The comment also ends a physical line
                        text = text.rstrip('\r\n')
                        self.tokens = [(token_type, text) + token[2:]]
                        self.check_logical()
        return self.report.get_file_results() 
開發者ID:zchengquan,項目名稱:TextDetector,代碼行數:55,代碼來源:pep8.py


注:本文中的tokenize.tok_name方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。