當前位置: 首頁>>代碼示例>>Python>>正文


Python tokenize.TokenInfo方法代碼示例

本文整理匯總了Python中tokenize.TokenInfo方法的典型用法代碼示例。如果您正苦於以下問題:Python tokenize.TokenInfo方法的具體用法?Python tokenize.TokenInfo怎麽用?Python tokenize.TokenInfo使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tokenize的用法示例。


在下文中一共展示了tokenize.TokenInfo方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def __init__(self, model, lstm_layers, lstm_size):
        self.__model = model

        # Initial state
        self._h0 = torch.zeros((lstm_layers, 1, lstm_size), device=device)
        self._c0 = torch.zeros((lstm_layers, 1, lstm_size), device=device)

        # Last line of source code read
        self._last_line = ""

        self._tokens: List[tokenize.TokenInfo] = []

        # Last token, because we need to input that to the model for inference
        self._last_token = 0

        # Last bit of the input string
        self._untokenized = ""

        # For timing
        self.time_add = 0
        self.time_predict = 0
        self.time_check = 0 
開發者ID:vpj,項目名稱:python_autocomplete,代碼行數:24,代碼來源:evaluate.py

示例2: __get_tokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def __get_tokens(it):
        tokens: List[tokenize.TokenInfo] = []

        try:
            for t in it:
                if t.type in tokenizer.SKIP_TOKENS:
                    continue
                if t.type == tokenize.NEWLINE and t.string == '':
                    continue
                if t.type == tokenize.DEDENT:
                    continue
                if t.type == tokenize.ERRORTOKEN:
                    continue
                tokens.append(t)
        except tokenize.TokenError as e:
            if not e.args[0].startswith('EOF in'):
                print(e)
        except IndentationError as e:
            print(e)

        return tokens 
開發者ID:vpj,項目名稱:python_autocomplete,代碼行數:23,代碼來源:evaluate.py

示例3: convert_toplevel_docstring

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def convert_toplevel_docstring(tokens):
    for token in tokens:
        # For each string
        if token.type == tokenize.STRING:
            text = token.string
            # Must be a docstring
            if text.startswith('"""') or text.startswith("'''"):
                startline, startcol = token.start
                # Starting column MUST be 0
                if startcol == 0:
                    endline, endcol = token.end
                    lines = ['# ' + line
                             for line in text.strip('"\' \n').split('\n')]
                    text = '\n'.join(lines)
                    fmt = '# <markdowncell>\n{0}\n# <codecell>'.format(text)
                    yield TokenInfo(type=tokenize.COMMENT,
                                    start=(startline, startcol),
                                    end=(endline, endcol),
                                    string=fmt,
                                    line='#')
                    # To next token
                    continue
        # Return untouched
        yield token 
開發者ID:sklam,項目名稱:py2nb,代碼行數:26,代碼來源:reader.py

示例4: fix_newlines

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def fix_newlines(tokens):
    first = True
    curline = 1
    for token in tokens:
        if first:
            first = False
            curline = token.end[0] + 1
        else:
            # Fill NEWLINE token in between
            while curline < token.start[0]:
                yield TokenInfo(type=tokenize.NEWLINE,
                                string='\n',
                                start=(curline, 0),
                                end=(curline, 0),
                                line='\n', )
                curline += 1

            curline = token.end[0] + 1
        yield token 
開發者ID:sklam,項目名稱:py2nb,代碼行數:21,代碼來源:reader.py

示例5: __init__

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def __init__(
        self,
        tree: ast.AST,
        file_tokens: Sequence[tokenize.TokenInfo],
        filename: str = constants.STDIN,
    ) -> None:
        """
        Creates new checker instance.

        These parameter names should not be changed.
        ``flake8`` has special API that passes concrete parameters to
        the plugins that ask for them.

        ``flake8`` also decides how to execute this plugin
        based on its parameters. This one is executed once per module.

        Arguments:
            tree: ``ast`` tree parsed by ``flake8``.
            file_tokens: ``tokenize.tokenize`` parsed file tokens.
            filename: module file name, might be empty if piping is used.

        """
        self.tree = transform(tree)
        self.filename = filename
        self.file_tokens = file_tokens 
開發者ID:wemake-services,項目名稱:wemake-python-styleguide,代碼行數:27,代碼來源:checker.py

示例6: visit

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def visit(self, token: tokenize.TokenInfo) -> None:
        """
        Runs custom defined handlers in a visitor for each specific token type.

        Uses ``.exact_type`` property to fetch the token name.
        So, you have to be extra careful with tokens
        like ``->`` and other operators,
        since they might resolve in just ``OP`` name.

        Does nothing if handler for any token type is not defined.

        Inspired by ``NodeVisitor`` class.

        See also:
            https://docs.python.org/3/library/tokenize.html

        """
        token_type = tokenize.tok_name[token.exact_type].lower()
        method = getattr(self, 'visit_{0}'.format(token_type), None)
        if method is not None:
            method(token) 
開發者ID:wemake-services,項目名稱:wemake-python-styleguide,代碼行數:23,代碼來源:base.py

示例7: _check_executable_mismatch

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def _check_executable_mismatch(
        self,
        token: tokenize.TokenInfo,
        *,
        is_shebang: bool,
    ) -> None:
        if is_windows() or self.filename == STDIN:
            # Windows does not have this concept of "executable" file.
            # The same for STDIN inputs.
            return

        is_executable = is_executable_file(self.filename)
        if is_executable and not is_shebang:
            self.add_violation(
                ShebangViolation(
                    text='file is executable but no shebang is present',
                ),
            )
        elif not is_executable and is_shebang:
            self.add_violation(
                ShebangViolation(
                    text='shebang is present but the file is not executable',
                ),
            ) 
開發者ID:wemake-services,項目名稱:wemake-python-styleguide,代碼行數:26,代碼來源:comments.py

示例8: _check_valid_shebang

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def _check_valid_shebang(self, token: tokenize.TokenInfo) -> None:
        if self._python_executable not in token.line:
            self.add_violation(
                ShebangViolation(
                    text='shebang is present but does not contain `python`',
                ),
            )

        if token.start[1] != 0:
            self.add_violation(
                ShebangViolation(
                    text='there is a whitespace before shebang',
                ),
            )

        if token.start[0] != 1:
            self.add_violation(
                ShebangViolation(
                    text='there are blank or comment lines before shebang',
                ),
            ) 
開發者ID:wemake-services,項目名稱:wemake-python-styleguide,代碼行數:23,代碼來源:comments.py

示例9: visit_number

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def visit_number(self, token: tokenize.TokenInfo) -> None:
        """
        Checks number declarations.

        Raises:
            UnderscoredNumberViolation
            PartialFloatViolation
            BadNumberSuffixViolation
            BadComplexNumberSuffixViolation
            NumberWithMeaninglessZeroViolation
            PositiveExponentViolation
            FloatZeroViolation

        Regressions:
        https://github.com/wemake-services/wemake-python-styleguide/issues/557

        """
        self._check_complex_suffix(token)
        self._check_underscored_number(token)
        self._check_partial_float(token)
        self._check_bad_number_suffixes(token)
        self._check_float_zeros(token) 
開發者ID:wemake-services,項目名稱:wemake-python-styleguide,代碼行數:24,代碼來源:primitives.py

示例10: visit_string

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def visit_string(self, token: tokenize.TokenInfo) -> None:
        """
        Finds incorrect string usages.

        ``u`` can only be the only prefix.
        You cannot combine it with ``r``, ``b``, or ``f``.
        Since it will raise a ``SyntaxError`` while parsing.

        Raises:
            UnicodeStringViolation
            WrongMultilineStringViolation
            ImplicitRawStringViolation
            WrongUnicodeEscapeViolation

        """
        self._check_correct_multiline(token)
        self._check_string_modifiers(token)
        self._check_implicit_raw_string(token)
        self._check_wrong_unicode_escape(token) 
開發者ID:wemake-services,項目名稱:wemake-python-styleguide,代碼行數:21,代碼來源:primitives.py

示例11: _check_string_modifiers

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def _check_string_modifiers(self, token: tokenize.TokenInfo) -> None:
        modifiers, _ = split_prefixes(token.string)

        if 'u' in modifiers.lower():
            self.add_violation(
                consistency.UnicodeStringViolation(token, text=token.string),
            )

        for mod in modifiers:
            if mod in self._bad_string_modifiers:
                self.add_violation(
                    consistency.UppercaseStringModifierViolation(
                        token,
                        text=mod,
                    ),
                ) 
開發者ID:wemake-services,項目名稱:wemake-python-styleguide,代碼行數:18,代碼來源:primitives.py

示例12: _check_wrong_unicode_escape

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def _check_wrong_unicode_escape(self, token: tokenize.TokenInfo) -> None:
        # See: http://docs.python.org/reference/lexical_analysis.html
        modifiers, string_body = split_prefixes(token.string)

        index = 0
        while True:
            index = string_body.find('\\', index)
            if index == -1:
                break

            next_char = string_body[index + 1]
            if 'b' in modifiers.lower() and next_char in self._unicode_escapes:
                self.add_violation(
                    WrongUnicodeEscapeViolation(token, text=token.string),
                )

            # Whether it was a valid escape or not, backslash followed by
            # another character can always be consumed whole: the second
            # character can never be the start of a new backslash escape.
            index += 2 
開發者ID:wemake-services,項目名稱:wemake-python-styleguide,代碼行數:22,代碼來源:primitives.py

示例13: is_trailing_comma

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def is_trailing_comma(tokens, index):
    """Check if the given token is a trailing comma

    :param tokens: Sequence of modules tokens
    :type tokens: list[tokenize.TokenInfo]
    :param int index: Index of token under check in tokens
    :returns: True if the token is a comma which trails an expression
    :rtype: bool
    """
    token = tokens[index]
    if token.exact_type != tokenize.COMMA:
        return False
    # Must have remaining tokens on the same line such as NEWLINE
    left_tokens = itertools.islice(tokens, index + 1, None)
    same_line_remaining_tokens = list(itertools.takewhile(
        lambda other_token, _token=token: other_token.start[0] == _token.start[0],
        left_tokens
    ))
    # Note: If the newline is tokenize.NEWLINE and not tokenize.NL
    # then the newline denotes the end of expression
    is_last_element = all(
        other_token.type in (tokenize.NEWLINE, tokenize.COMMENT)
        for other_token in same_line_remaining_tokens
    )
    if not same_line_remaining_tokens or not is_last_element:
        return False
    def get_curline_index_start():
        """Get the index denoting the start of the current line"""
        for subindex, token in enumerate(reversed(tokens[:index])):
            # See Lib/tokenize.py and Lib/token.py in cpython for more info
            if token.type in (tokenize.NEWLINE, tokenize.NL):
                return index - subindex
        return 0
    curline_start = get_curline_index_start()
    for prevtoken in tokens[curline_start:index]:
        if '=' in prevtoken.string:
            return True
    return False 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:40,代碼來源:refactoring.py

示例14: to_rbnf_token

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def to_rbnf_token(tk: tokenize.TokenInfo) -> Tokenizer:
    name = cast(tokenize.tok_name[tk.type])
    if name == 'NAME' and tk.string in kwlist:
        value = cast(tk.string)
        name = cast('KEYWORD')
    else:
        value = cast(tk.string) if name not in ('NAME', 'STRING', 'NUMBER') else tk.string
    return Tokenizer(name, value, *tk.start) 
開發者ID:Xython,項目名稱:YAPyPy,代碼行數:10,代碼來源:parser.py

示例15: not_to_ignore

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import TokenInfo [as 別名]
def not_to_ignore(tk: tokenize.TokenInfo) -> bool:
    return tk.type not in tokens_to_ignore 
開發者ID:Xython,項目名稱:YAPyPy,代碼行數:4,代碼來源:parser.py


注:本文中的tokenize.TokenInfo方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。