当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.TokenInfo方法代码示例

本文整理汇总了Python中tokenize.TokenInfo方法的典型用法代码示例。如果您正苦于以下问题:Python tokenize.TokenInfo方法的具体用法?Python tokenize.TokenInfo怎么用?Python tokenize.TokenInfo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tokenize的用法示例。


在下文中一共展示了tokenize.TokenInfo方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def __init__(self, model, lstm_layers, lstm_size):
        self.__model = model

        # Initial state
        self._h0 = torch.zeros((lstm_layers, 1, lstm_size), device=device)
        self._c0 = torch.zeros((lstm_layers, 1, lstm_size), device=device)

        # Last line of source code read
        self._last_line = ""

        self._tokens: List[tokenize.TokenInfo] = []

        # Last token, because we need to input that to the model for inference
        self._last_token = 0

        # Last bit of the input string
        self._untokenized = ""

        # For timing
        self.time_add = 0
        self.time_predict = 0
        self.time_check = 0 
开发者ID:vpj,项目名称:python_autocomplete,代码行数:24,代码来源:evaluate.py

示例2: __get_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def __get_tokens(it):
        tokens: List[tokenize.TokenInfo] = []

        try:
            for t in it:
                if t.type in tokenizer.SKIP_TOKENS:
                    continue
                if t.type == tokenize.NEWLINE and t.string == '':
                    continue
                if t.type == tokenize.DEDENT:
                    continue
                if t.type == tokenize.ERRORTOKEN:
                    continue
                tokens.append(t)
        except tokenize.TokenError as e:
            if not e.args[0].startswith('EOF in'):
                print(e)
        except IndentationError as e:
            print(e)

        return tokens 
开发者ID:vpj,项目名称:python_autocomplete,代码行数:23,代码来源:evaluate.py

示例3: convert_toplevel_docstring

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def convert_toplevel_docstring(tokens):
    for token in tokens:
        # For each string
        if token.type == tokenize.STRING:
            text = token.string
            # Must be a docstring
            if text.startswith('"""') or text.startswith("'''"):
                startline, startcol = token.start
                # Starting column MUST be 0
                if startcol == 0:
                    endline, endcol = token.end
                    lines = ['# ' + line
                             for line in text.strip('"\' \n').split('\n')]
                    text = '\n'.join(lines)
                    fmt = '# <markdowncell>\n{0}\n# <codecell>'.format(text)
                    yield TokenInfo(type=tokenize.COMMENT,
                                    start=(startline, startcol),
                                    end=(endline, endcol),
                                    string=fmt,
                                    line='#')
                    # To next token
                    continue
        # Return untouched
        yield token 
开发者ID:sklam,项目名称:py2nb,代码行数:26,代码来源:reader.py

示例4: fix_newlines

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def fix_newlines(tokens):
    first = True
    curline = 1
    for token in tokens:
        if first:
            first = False
            curline = token.end[0] + 1
        else:
            # Fill NEWLINE token in between
            while curline < token.start[0]:
                yield TokenInfo(type=tokenize.NEWLINE,
                                string='\n',
                                start=(curline, 0),
                                end=(curline, 0),
                                line='\n', )
                curline += 1

            curline = token.end[0] + 1
        yield token 
开发者ID:sklam,项目名称:py2nb,代码行数:21,代码来源:reader.py

示例5: __init__

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def __init__(
        self,
        tree: ast.AST,
        file_tokens: Sequence[tokenize.TokenInfo],
        filename: str = constants.STDIN,
    ) -> None:
        """
        Creates new checker instance.

        These parameter names should not be changed.
        ``flake8`` has special API that passes concrete parameters to
        the plugins that ask for them.

        ``flake8`` also decides how to execute this plugin
        based on its parameters. This one is executed once per module.

        Arguments:
            tree: ``ast`` tree parsed by ``flake8``.
            file_tokens: ``tokenize.tokenize`` parsed file tokens.
            filename: module file name, might be empty if piping is used.

        """
        self.tree = transform(tree)
        self.filename = filename
        self.file_tokens = file_tokens 
开发者ID:wemake-services,项目名称:wemake-python-styleguide,代码行数:27,代码来源:checker.py

示例6: visit

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def visit(self, token: tokenize.TokenInfo) -> None:
        """
        Runs custom defined handlers in a visitor for each specific token type.

        Uses ``.exact_type`` property to fetch the token name.
        So, you have to be extra careful with tokens
        like ``->`` and other operators,
        since they might resolve in just ``OP`` name.

        Does nothing if handler for any token type is not defined.

        Inspired by ``NodeVisitor`` class.

        See also:
            https://docs.python.org/3/library/tokenize.html

        """
        token_type = tokenize.tok_name[token.exact_type].lower()
        method = getattr(self, 'visit_{0}'.format(token_type), None)
        if method is not None:
            method(token) 
开发者ID:wemake-services,项目名称:wemake-python-styleguide,代码行数:23,代码来源:base.py

示例7: _check_executable_mismatch

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def _check_executable_mismatch(
        self,
        token: tokenize.TokenInfo,
        *,
        is_shebang: bool,
    ) -> None:
        if is_windows() or self.filename == STDIN:
            # Windows does not have this concept of "executable" file.
            # The same for STDIN inputs.
            return

        is_executable = is_executable_file(self.filename)
        if is_executable and not is_shebang:
            self.add_violation(
                ShebangViolation(
                    text='file is executable but no shebang is present',
                ),
            )
        elif not is_executable and is_shebang:
            self.add_violation(
                ShebangViolation(
                    text='shebang is present but the file is not executable',
                ),
            ) 
开发者ID:wemake-services,项目名称:wemake-python-styleguide,代码行数:26,代码来源:comments.py

示例8: _check_valid_shebang

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def _check_valid_shebang(self, token: tokenize.TokenInfo) -> None:
        if self._python_executable not in token.line:
            self.add_violation(
                ShebangViolation(
                    text='shebang is present but does not contain `python`',
                ),
            )

        if token.start[1] != 0:
            self.add_violation(
                ShebangViolation(
                    text='there is a whitespace before shebang',
                ),
            )

        if token.start[0] != 1:
            self.add_violation(
                ShebangViolation(
                    text='there are blank or comment lines before shebang',
                ),
            ) 
开发者ID:wemake-services,项目名称:wemake-python-styleguide,代码行数:23,代码来源:comments.py

示例9: visit_number

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def visit_number(self, token: tokenize.TokenInfo) -> None:
        """
        Checks number declarations.

        Raises:
            UnderscoredNumberViolation
            PartialFloatViolation
            BadNumberSuffixViolation
            BadComplexNumberSuffixViolation
            NumberWithMeaninglessZeroViolation
            PositiveExponentViolation
            FloatZeroViolation

        Regressions:
        https://github.com/wemake-services/wemake-python-styleguide/issues/557

        """
        self._check_complex_suffix(token)
        self._check_underscored_number(token)
        self._check_partial_float(token)
        self._check_bad_number_suffixes(token)
        self._check_float_zeros(token) 
开发者ID:wemake-services,项目名称:wemake-python-styleguide,代码行数:24,代码来源:primitives.py

示例10: visit_string

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def visit_string(self, token: tokenize.TokenInfo) -> None:
        """
        Finds incorrect string usages.

        ``u`` can only be the only prefix.
        You cannot combine it with ``r``, ``b``, or ``f``.
        Since it will raise a ``SyntaxError`` while parsing.

        Raises:
            UnicodeStringViolation
            WrongMultilineStringViolation
            ImplicitRawStringViolation
            WrongUnicodeEscapeViolation

        """
        self._check_correct_multiline(token)
        self._check_string_modifiers(token)
        self._check_implicit_raw_string(token)
        self._check_wrong_unicode_escape(token) 
开发者ID:wemake-services,项目名称:wemake-python-styleguide,代码行数:21,代码来源:primitives.py

示例11: _check_string_modifiers

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def _check_string_modifiers(self, token: tokenize.TokenInfo) -> None:
        modifiers, _ = split_prefixes(token.string)

        if 'u' in modifiers.lower():
            self.add_violation(
                consistency.UnicodeStringViolation(token, text=token.string),
            )

        for mod in modifiers:
            if mod in self._bad_string_modifiers:
                self.add_violation(
                    consistency.UppercaseStringModifierViolation(
                        token,
                        text=mod,
                    ),
                ) 
开发者ID:wemake-services,项目名称:wemake-python-styleguide,代码行数:18,代码来源:primitives.py

示例12: _check_wrong_unicode_escape

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def _check_wrong_unicode_escape(self, token: tokenize.TokenInfo) -> None:
        # See: http://docs.python.org/reference/lexical_analysis.html
        modifiers, string_body = split_prefixes(token.string)

        index = 0
        while True:
            index = string_body.find('\\', index)
            if index == -1:
                break

            next_char = string_body[index + 1]
            if 'b' in modifiers.lower() and next_char in self._unicode_escapes:
                self.add_violation(
                    WrongUnicodeEscapeViolation(token, text=token.string),
                )

            # Whether it was a valid escape or not, backslash followed by
            # another character can always be consumed whole: the second
            # character can never be the start of a new backslash escape.
            index += 2 
开发者ID:wemake-services,项目名称:wemake-python-styleguide,代码行数:22,代码来源:primitives.py

示例13: is_trailing_comma

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def is_trailing_comma(tokens, index):
    """Check if the given token is a trailing comma

    :param tokens: Sequence of modules tokens
    :type tokens: list[tokenize.TokenInfo]
    :param int index: Index of token under check in tokens
    :returns: True if the token is a comma which trails an expression
    :rtype: bool
    """
    token = tokens[index]
    if token.exact_type != tokenize.COMMA:
        return False
    # Must have remaining tokens on the same line such as NEWLINE
    left_tokens = itertools.islice(tokens, index + 1, None)
    same_line_remaining_tokens = list(itertools.takewhile(
        lambda other_token, _token=token: other_token.start[0] == _token.start[0],
        left_tokens
    ))
    # Note: If the newline is tokenize.NEWLINE and not tokenize.NL
    # then the newline denotes the end of expression
    is_last_element = all(
        other_token.type in (tokenize.NEWLINE, tokenize.COMMENT)
        for other_token in same_line_remaining_tokens
    )
    if not same_line_remaining_tokens or not is_last_element:
        return False
    def get_curline_index_start():
        """Get the index denoting the start of the current line"""
        for subindex, token in enumerate(reversed(tokens[:index])):
            # See Lib/tokenize.py and Lib/token.py in cpython for more info
            if token.type in (tokenize.NEWLINE, tokenize.NL):
                return index - subindex
        return 0
    curline_start = get_curline_index_start()
    for prevtoken in tokens[curline_start:index]:
        if '=' in prevtoken.string:
            return True
    return False 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:40,代码来源:refactoring.py

示例14: to_rbnf_token

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def to_rbnf_token(tk: tokenize.TokenInfo) -> Tokenizer:
    name = cast(tokenize.tok_name[tk.type])
    if name == 'NAME' and tk.string in kwlist:
        value = cast(tk.string)
        name = cast('KEYWORD')
    else:
        value = cast(tk.string) if name not in ('NAME', 'STRING', 'NUMBER') else tk.string
    return Tokenizer(name, value, *tk.start) 
开发者ID:Xython,项目名称:YAPyPy,代码行数:10,代码来源:parser.py

示例15: not_to_ignore

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenInfo [as 别名]
def not_to_ignore(tk: tokenize.TokenInfo) -> bool:
    return tk.type not in tokens_to_ignore 
开发者ID:Xython,项目名称:YAPyPy,代码行数:4,代码来源:parser.py


注:本文中的tokenize.TokenInfo方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。