当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.OP属性代码示例

本文整理汇总了Python中tokenize.OP属性的典型用法代码示例。如果您正苦于以下问题:Python tokenize.OP属性的具体用法?Python tokenize.OP怎么用?Python tokenize.OP使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在tokenize的用法示例。


在下文中一共展示了tokenize.OP属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _parse_from_import_source

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def _parse_from_import_source(self):
        """Parse the 'from x import' part in a 'from x import y' statement.

        Return true iff `x` is __future__.
        """
        assert self.current.value == 'from', self.current.value
        self.stream.move()
        is_future_import = self.current.value == '__future__'
        self.stream.move()
        while (self.current is not None and
               self.current.kind in (tk.DOT, tk.NAME, tk.OP) and
               self.current.value != 'import'):
            self.stream.move()
        if self.current is None or self.current.value != 'import':
            return False
        self.check_current(value='import')
        assert self.current.value == 'import', self.current.value
        self.stream.move()
        return is_future_import 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:21,代码来源:parser.py

示例2: _replace_booleans

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def _replace_booleans(tok):
    """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
    precedence is changed to boolean precedence.

    Parameters
    ----------
    tok : tuple of int, str
        ints correspond to the all caps constants in the tokenize module

    Returns
    -------
    t : tuple of int, str
        Either the input or token or the replacement values
    """
    toknum, tokval = tok
    if toknum == tokenize.OP:
        if tokval == '&':
            return tokenize.NAME, 'and'
        elif tokval == '|':
            return tokenize.NAME, 'or'
        return toknum, tokval
    return toknum, tokval 
开发者ID:Frank-qlu,项目名称:recruit,代码行数:24,代码来源:expr.py

示例3: _replace_locals

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def _replace_locals(tok):
    """Replace local variables with a syntactically valid name.

    Parameters
    ----------
    tok : tuple of int, str
        ints correspond to the all caps constants in the tokenize module

    Returns
    -------
    t : tuple of int, str
        Either the input or token or the replacement values

    Notes
    -----
    This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as
    ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_``
    is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
    """
    toknum, tokval = tok
    if toknum == tokenize.OP and tokval == '@':
        return tokenize.OP, _LOCAL_TAG
    return toknum, tokval 
开发者ID:Frank-qlu,项目名称:recruit,代码行数:25,代码来源:expr.py

示例4: _check_for_locals

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def _check_for_locals(expr, stack_level, parser):
    from pandas.core.computation.expr import tokenize_string

    at_top_of_stack = stack_level == 0
    not_pandas_parser = parser != 'pandas'

    if not_pandas_parser:
        msg = "The '@' prefix is only supported by the pandas parser"
    elif at_top_of_stack:
        msg = ("The '@' prefix is not allowed in "
               "top-level eval calls, \nplease refer to "
               "your variables by name without the '@' "
               "prefix")

    if at_top_of_stack or not_pandas_parser:
        for toknum, tokval in tokenize_string(expr):
            if toknum == tokenize.OP and tokval == '@':
                raise SyntaxError(msg) 
开发者ID:Frank-qlu,项目名称:recruit,代码行数:20,代码来源:eval.py

示例5: _find_logical

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def _find_logical(source_lines):
    # Make a variable which is the index of all the starts of lines.
    logical_start = []
    logical_end = []
    last_newline = True
    parens = 0
    for t in generate_tokens(''.join(source_lines)):
        if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
                    tokenize.INDENT, tokenize.NL,
                    tokenize.ENDMARKER]:
            continue
        if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
            last_newline = True
            logical_end.append((t[3][0] - 1, t[2][1]))
            continue
        if last_newline and not parens:
            logical_start.append((t[2][0] - 1, t[2][1]))
            last_newline = False
        if t[0] == tokenize.OP:
            if t[1] in '([{':
                parens += 1
            elif t[1] in '}])':
                parens -= 1
    return (logical_start, logical_end) 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:26,代码来源:autopep8.py

示例6: test_annotated_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def test_annotated_tokens():
    tokens_without_origins = [(token_type, token, props)
                              for (token_type, token, origin, props)
                              in (annotated_tokens("a(b) + c.d"))]
    assert (tokens_without_origins
            == [(tokenize.NAME, "a", {"bare_ref": True, "bare_funcall": True}),
                (tokenize.OP, "(", {"bare_ref": False, "bare_funcall": False}),
                (tokenize.NAME, "b", {"bare_ref": True, "bare_funcall": False}),
                (tokenize.OP, ")", {"bare_ref": False, "bare_funcall": False}),
                (tokenize.OP, "+", {"bare_ref": False, "bare_funcall": False}),
                (tokenize.NAME, "c", {"bare_ref": True, "bare_funcall": False}),
                (tokenize.OP, ".", {"bare_ref": False, "bare_funcall": False}),
                (tokenize.NAME, "d",
                    {"bare_ref": False, "bare_funcall": False}),
                ])

    # This was a bug:
    assert len(list(annotated_tokens("x"))) == 1 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:20,代码来源:eval.py

示例7: decistmt

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def decistmt(tokens):
    """Substitute Decimals for floats in a string of statements.

    Based on an example from the tokenize module docs.
    """
    result = []
    for toknum, tokval, _, _, _  in tokens:
        if toknum == tokenize.NUMBER and '.' in tokval:  # replace NUMBER tokens
            for newtok in [
                (tokenize.NAME, 'Decimal'),
                (tokenize.OP, '('),
                (tokenize.STRING, repr(tokval)),
                (tokenize.OP, ')')
            ]:
                yield newtok
        else:
            yield (toknum, tokval) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:19,代码来源:test_inputtransformer.py

示例8: __openseen

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def __openseen(self, ttype, tstring, lineno):
        if ttype == tokenize.OP and tstring == ')':
            # We've seen the last of the translatable strings.  Record the
            # line number of the first line of the strings and update the list
            # of messages seen.  Reset state for the next batch.  If there
            # were no strings inside _(), then just ignore this entry.
            if self.__data:
                self.__addentry(EMPTYSTRING.join(self.__data))
            self.__state = self.__waiting
        elif ttype == tokenize.STRING:
            self.__data.append(safe_eval(tstring))
        elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
                           token.NEWLINE, tokenize.NL]:
            # warn if we see anything else than STRING or whitespace
            print >> sys.stderr, _(
                '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
                ) % {
                'token': tstring,
                'file': self.__curfile,
                'lineno': self.__lineno
                }
            self.__state = self.__waiting 
开发者ID:aliyun,项目名称:oss-ftp,代码行数:24,代码来源:pygettext.py

示例9: process_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def process_tokens(self):
        """Process tokens and trigger checks.

        This can raise a :class:`flake8.exceptions.InvalidSyntax` exception.
        Instead of using this directly, you should use
        :meth:`flake8.checker.FileChecker.run_checks`.
        """
        parens = 0
        statistics = self.statistics
        file_processor = self.processor
        for token in file_processor.generate_tokens():
            statistics['tokens'] += 1
            self.check_physical_eol(token)
            token_type, text = token[0:2]
            processor.log_token(LOG, token)
            if token_type == tokenize.OP:
                parens = processor.count_parentheses(parens, text)
            elif parens == 0:
                if processor.token_is_newline(token):
                    self.handle_newline(token_type)
                elif (processor.token_is_comment(token) and
                        len(file_processor.tokens) == 1):
                    self.handle_comment(token, text)

        if file_processor.tokens:
            # If any tokens are left over, process them
            self.run_physical_checks(file_processor.lines[-1])
            self.run_logical_checks() 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:30,代码来源:checker.py

示例10: parse_definitions

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def parse_definitions(self, class_, all=False):
        """Parse multiple definitions and yield them."""
        while self.current is not None:
            self.log.debug("parsing definition list, current token is %r (%s)",
                           self.current.kind, self.current.value)
            self.log.debug('got_newline: %s', self.stream.got_logical_newline)
            if all and self.current.value == '__all__':
                self.parse_all()
            elif (self.current.kind == tk.OP and
                  self.current.value == '@' and
                  self.stream.got_logical_newline):
                self.consume(tk.OP)
                self.parse_decorators()
            elif self.current.value in ['def', 'class']:
                yield self.parse_definition(class_._nest(self.current.value))
            elif self.current.kind == tk.INDENT:
                self.consume(tk.INDENT)
                for definition in self.parse_definitions(class_):
                    yield definition
            elif self.current.kind == tk.DEDENT:
                self.consume(tk.DEDENT)
                return
            elif self.current.value == 'from':
                self.parse_from_import_statement()
            else:
                self.stream.move() 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:28,代码来源:parser.py

示例11: parse_all

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def parse_all(self):
        """Parse the __all__ definition in a module."""
        assert self.current.value == '__all__'
        self.consume(tk.NAME)
        if self.current.value != '=':
            raise AllError('Could not evaluate contents of __all__. ')
        self.consume(tk.OP)
        if self.current.value not in '([':
            raise AllError('Could not evaluate contents of __all__. ')
        self.consume(tk.OP)

        self.all = []
        all_content = "("
        while self.current.kind != tk.OP or self.current.value not in ")]":
            if self.current.kind in (tk.NL, tk.COMMENT):
                pass
            elif (self.current.kind == tk.STRING or
                    self.current.value == ','):
                all_content += self.current.value
            else:
                raise AllError('Unexpected token kind in  __all__: {!r}. '
                               .format(self.current.kind))
            self.stream.move()
        self.consume(tk.OP)
        all_content += ")"
        try:
            self.all = eval(all_content, {})
        except BaseException as e:
            raise AllError('Could not evaluate contents of __all__.'
                           '\bThe value was {}. The exception was:\n{}'
                           .format(all_content, e)) 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:33,代码来源:parser.py

示例12: _parse_from_import_names

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def _parse_from_import_names(self, is_future_import):
        """Parse the 'y' part in a 'from x import y' statement."""
        if self.current.value == '(':
            self.consume(tk.OP)
            expected_end_kinds = (tk.OP, )
        else:
            expected_end_kinds = (tk.NEWLINE, tk.ENDMARKER)
        while self.current.kind not in expected_end_kinds and not (
                    self.current.kind == tk.OP and self.current.value == ';'):
            if self.current.kind != tk.NAME:
                self.stream.move()
                continue
            self.log.debug("parsing import, token is %r (%s)",
                           self.current.kind, self.current.value)
            if is_future_import:
                self.log.debug('found future import: %s', self.current.value)
                self.future_imports.add(self.current.value)
            self.consume(tk.NAME)
            self.log.debug("parsing import, token is %r (%s)",
                           self.current.kind, self.current.value)
            if self.current.kind == tk.NAME and self.current.value == 'as':
                self.consume(tk.NAME)  # as
                if self.current.kind == tk.NAME:
                    self.consume(tk.NAME)  # new name, irrelevant
            if self.current.value == ',':
                self.consume(tk.OP)
            self.log.debug("parsing import, token is %r (%s)",
                           self.current.kind, self.current.value) 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:30,代码来源:parser.py

示例13: whitespace_before_parameters

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def whitespace_before_parameters(logical_line, tokens):
    r"""Avoid extraneous whitespace.

    Avoid extraneous whitespace in the following situations:
    - before the open parenthesis that starts the argument list of a
      function call.
    - before the open parenthesis that starts an indexing or slicing.

    Okay: spam(1)
    E211: spam (1)

    Okay: dict['key'] = list[index]
    E211: dict ['key'] = list[index]
    E211: dict['key'] = list [index]
    """
    prev_type, prev_text, __, prev_end, __ = tokens[0]
    for index in range(1, len(tokens)):
        token_type, text, start, end, __ = tokens[index]
        if (token_type == tokenize.OP and
            text in '([' and
            start != prev_end and
            (prev_type == tokenize.NAME or prev_text in '}])') and
            # Syntax "class A (B):" is allowed, but avoid it
            (index < 2 or tokens[index - 2][1] != 'class') and
                # Allow "return (a.foo for a in range(5))"
                not keyword.iskeyword(prev_text)):
            yield prev_end, "E211 whitespace before '%s'" % text
        prev_type = token_type
        prev_text = text
        prev_end = end 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:32,代码来源:pycodestyle.py

示例14: explicit_line_join

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def explicit_line_join(logical_line, tokens):
    r"""Avoid explicit line join between brackets.

    The preferred way of wrapping long lines is by using Python's implied line
    continuation inside parentheses, brackets and braces.  Long lines can be
    broken over multiple lines by wrapping expressions in parentheses.  These
    should be used in preference to using a backslash for line continuation.

    E502: aaa = [123, \\n       123]
    E502: aaa = ("bbb " \\n       "ccc")

    Okay: aaa = [123,\n       123]
    Okay: aaa = ("bbb "\n       "ccc")
    Okay: aaa = "bbb " \\n    "ccc"
    Okay: aaa = 123  # \\
    """
    prev_start = prev_end = parens = 0
    comment = False
    backslash = None
    for token_type, text, start, end, line in tokens:
        if token_type == tokenize.COMMENT:
            comment = True
        if start[0] != prev_start and parens and backslash and not comment:
            yield backslash, "E502 the backslash is redundant between brackets"
        if end[0] != prev_end:
            if line.rstrip('\r\n').endswith('\\'):
                backslash = (end[0], len(line.splitlines()[-1]) - 1)
            else:
                backslash = None
            prev_start = prev_end = end[0]
        else:
            prev_start = start[0]
        if token_type == tokenize.OP:
            if text in '([{':
                parens += 1
            elif text in ')]}':
                parens -= 1 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:39,代码来源:pycodestyle.py

示例15: _is_binary_operator

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import OP [as 别名]
def _is_binary_operator(token_type, text):
    is_op_token = token_type == tokenize.OP
    is_conjunction = text in ['and', 'or']
    # NOTE(sigmavirus24): Previously the not_a_symbol check was executed
    # conditionally. Since it is now *always* executed, text may be None.
    # In that case we get a TypeError for `text not in str`.
    not_a_symbol = text and text not in "()[]{},:.;@=%~"
    # The % character is strictly speaking a binary operator, but the
    # common usage seems to be to put it next to the format parameters,
    # after a line break.
    return ((is_op_token or is_conjunction) and not_a_symbol) 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:13,代码来源:pycodestyle.py


注:本文中的tokenize.OP属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。