当前位置: 首页>>代码示例>>Python>>正文


Python token.OP属性代码示例

本文整理汇总了Python中token.OP属性的典型用法代码示例。如果您正苦于以下问题:Python token.OP属性的具体用法?Python token.OP怎么用?Python token.OP使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在token的用法示例。


在下文中一共展示了token.OP属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _find_logical

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def _find_logical(source_lines):
    # Make a variable which is the index of all the starts of lines.
    logical_start = []
    logical_end = []
    last_newline = True
    parens = 0
    for t in generate_tokens(''.join(source_lines)):
        if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
                    tokenize.INDENT, tokenize.NL,
                    tokenize.ENDMARKER]:
            continue
        if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
            last_newline = True
            logical_end.append((t[3][0] - 1, t[2][1]))
            continue
        if last_newline and not parens:
            logical_start.append((t[2][0] - 1, t[2][1]))
            last_newline = False
        if t[0] == tokenize.OP:
            if t[1] in '([{':
                parens += 1
            elif t[1] in '}])':
                parens -= 1
    return (logical_start, logical_end) 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:26,代码来源:autopep8.py

示例2: op_

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def op_(s):
    return some(lambda tok: tok.type == token.OP and tok.string == s) 
开发者ID:pyta-uoft,项目名称:pyta,代码行数:4,代码来源:__init__.py

示例3: s_

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def s_(string):
    return skip(some(lambda tok: tok.type == token.OP and tok.string == string)) 
开发者ID:pyta-uoft,项目名称:pyta,代码行数:4,代码来源:__init__.py

示例4: ending_of_bad_tuple

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def ending_of_bad_tuple(x):
    return x.type == token.OP and x.string == ',' 
开发者ID:ar4s,项目名称:flake8_tuple,代码行数:4,代码来源:flake8_tuple.py

示例5: check_for_wrong_tuple

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def check_for_wrong_tuple(tree, code, noqa):
    errors = []
    candidates = []
    for assign in ast.walk(tree):
        if not isinstance(assign, (ast.Assign, ast.Return)):
            continue
        elif assign.lineno in noqa:
            continue
        elif isinstance(assign.value, ast.Call):
            continue
        for tuple_el in ast.walk(assign):
            if isinstance(tuple_el, ast.Tuple) and len(tuple_el.elts) == 1:
                candidates.append((assign.lineno, assign.col_offset))
                break
    if not candidates:
        return []
    for candidate in candidates:
        number_nl = 0  # account for logical newlines within statements
        tokens = tokenize.generate_tokens(
            lambda L=iter(code): next(L)
        )
        previous_token = None
        for t in tokens:
            if previous_token is not None and previous_token.type == tokenize.NEWLINE:
                number_nl = 0
            x = TokenInfo(*t)
            if x.start[0] - number_nl != candidate[0]:
                previous_token = x
                continue
            if x.type == tokenize.NL:
                number_nl += 1
            if x.type == token.NEWLINE and ending_of_bad_tuple(previous_token):
                errors.append(x.start)
            if x.type == token.OP and x.string == '=' and previous_token.type != token.NAME:
                x = TokenInfo(*next(tokens))
                if x.type != token.OP and x.string != '(':
                    x_next = TokenInfo(*next(tokens))
                    if ending_of_bad_tuple(x_next):
                        errors.append(x.start)
            previous_token = x
    return errors 
开发者ID:ar4s,项目名称:flake8_tuple,代码行数:43,代码来源:flake8_tuple.py

示例6: _is_binary_operator

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def _is_binary_operator(token_type, text):
    return ((token_type == tokenize.OP or text in ['and', 'or']) and
            text not in '()[]{},:.;@=%~')


# A convenient way to handle tokens. 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:8,代码来源:autopep8.py

示例7: __call__

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def __call__(self, toktype, toktext, start_pos, end_pos, line):
        """ Token handler, with syntax highlighting."""
        (srow,scol) = start_pos
        (erow,ecol) = end_pos
        colors = self.colors
        owrite = self.out.write

        # line separator, so this works across platforms
        linesep = os.linesep

        # calculate new positions
        oldpos = self.pos
        newpos = self.lines[srow] + scol
        self.pos = newpos + len(toktext)

        # send the original whitespace, if needed
        if newpos > oldpos:
            owrite(self.raw[oldpos:newpos])

        # skip indenting tokens
        if toktype in [token.INDENT, token.DEDENT]:
            self.pos = newpos
            return

        # map token type to a color group
        if token.LPAR <= toktype and toktype <= token.OP:
            toktype = token.OP
        elif toktype == token.NAME and keyword.iskeyword(toktext):
            toktype = _KEYWORD
        color = colors.get(toktype, colors[_TEXT])

        #print '<%s>' % toktext,    # dbg

        # Triple quoted strings must be handled carefully so that backtracking
        # in pagers works correctly. We need color terminators on _each_ line.
        if linesep in toktext:
            toktext = toktext.replace(linesep, '%s%s%s' %
                                      (colors.normal,linesep,color))

        # send text
        owrite('%s%s%s' % (color,toktext,colors.normal)) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:43,代码来源:PyColorize.py

示例8: tokeneater

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def tokeneater(self, toktype, toktext, xxx_todo_changeme, xxx_todo_changeme1, line):
        (srow, scol) = xxx_todo_changeme
        (erow, ecol) = xxx_todo_changeme1
        if toktype == token.ERRORTOKEN:
            raise RuntimeError("ErrorToken occured")
        if toktype in [token.NEWLINE, tokenize.NL]:
            self.output.write('\n')
            self.col = 0
        else:
            # map token type to a color group
            if token.LPAR <= toktype and toktype <= token.OP:
                toktype = token.OP
            elif toktype == token.NAME and keyword.iskeyword(toktext):
                toktype = _KEYWORD

            # restore whitespace
            assert scol >= self.col
            self.output.write(" "*(scol-self.col))

            try:
                tokclass = tokclasses[toktype]
            except KeyError:
                tokclass = None
            if self.tokclass is not None and tokclass != self.tokclass:
                self.output.write('</span>')
            if tokclass is not None and tokclass != self.tokclass:
                self.output.write('<span class="%s">' % tokclass)
            self.output.write(cgi.escape(toktext))
            self.tokclass = tokclass

            # calculate new column position
            self.col = scol + len(toktext)
            newline = toktext.rfind("\n")
            if newline != -1:
                self.col = len(toktext) - newline - 1 
开发者ID:pyx-project,项目名称:pyx,代码行数:37,代码来源:pt2html.py

示例9: __init__

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def __init__(self, token_type, tokens):
    # For operators and delimiters, the TokenSnippet's type may be more specific
    # than the type of the constituent token. E.g. the TokenSnippet type is
    # token.DOT, but the token type is token.OP. This is because the parser
    # has more context than the tokenizer.
    self._type = token_type
    self._tokens = tokens
    self._modified = False 
开发者ID:FSecureLABS,项目名称:Jandroid,代码行数:10,代码来源:snippet.py

示例10: _SnippetizeNode

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def _SnippetizeNode(node, tokens):
  # The parser module gives a syntax tree that discards comments,
  # non-terminating newlines, and whitespace information. Use the tokens given
  # by the tokenize module to annotate the syntax tree with the information
  # needed to exactly reproduce the original source code.
  node_type = node[0]

  if node_type >= token.NT_OFFSET:
    # Symbol.
    children = tuple(_SnippetizeNode(child, tokens) for child in node[1:])
    return Symbol(node_type, children)
  else:
    # Token.
    grabbed_tokens = []
    while tokens and (
        tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL):
      grabbed_tokens.append(tokens.popleft())

    # parser has 2 NEWLINEs right before the end.
    # tokenize has 0 or 1 depending on if the file has one.
    # Create extra nodes without consuming tokens to account for this.
    if node_type == token.NEWLINE:
      for tok in tokens:
        if tok.type == token.ENDMARKER:
          return TokenSnippet(node_type, grabbed_tokens)
        if tok.type != token.DEDENT:
          break

    assert tokens[0].type == token.OP or node_type == tokens[0].type

    grabbed_tokens.append(tokens.popleft())
    return TokenSnippet(node_type, grabbed_tokens) 
开发者ID:FSecureLABS,项目名称:Jandroid,代码行数:34,代码来源:snippet.py

示例11: is_percent_op

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def is_percent_op(self):
        return self.toknum == token.OP and self.tokval == "%" 
开发者ID:ikamensh,项目名称:flynt,代码行数:4,代码来源:PyToken.py

示例12: is_sq_brack_op

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def is_sq_brack_op(self):
        return self.toknum == token.OP and self.tokval == "[" 
开发者ID:ikamensh,项目名称:flynt,代码行数:4,代码来源:PyToken.py

示例13: is_dot_op

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def is_dot_op(self):
        return self.toknum == token.OP and self.tokval == "." 
开发者ID:ikamensh,项目名称:flynt,代码行数:4,代码来源:PyToken.py

示例14: is_paren_op

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def is_paren_op(self):
        return self.toknum == token.OP and self.tokval == "(" 
开发者ID:ikamensh,项目名称:flynt,代码行数:4,代码来源:PyToken.py

示例15: is_exponentiation_op

# 需要导入模块: import token [as 别名]
# 或者: from token import OP [as 别名]
def is_exponentiation_op(self):
        return self.toknum == token.OP and self.tokval == "**" 
开发者ID:ikamensh,项目名称:flynt,代码行数:4,代码来源:PyToken.py


注:本文中的token.OP属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。