當前位置: 首頁>>代碼示例>>Python>>正文


Python token.OP屬性代碼示例

本文整理匯總了Python中token.OP屬性的典型用法代碼示例。如果您正苦於以下問題:Python token.OP屬性的具體用法?Python token.OP怎麽用?Python token.OP使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在token的用法示例。


在下文中一共展示了token.OP屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _find_logical

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def _find_logical(source_lines):
    # Make a variable which is the index of all the starts of lines.
    logical_start = []
    logical_end = []
    last_newline = True
    parens = 0
    for t in generate_tokens(''.join(source_lines)):
        if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
                    tokenize.INDENT, tokenize.NL,
                    tokenize.ENDMARKER]:
            continue
        if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
            last_newline = True
            logical_end.append((t[3][0] - 1, t[2][1]))
            continue
        if last_newline and not parens:
            logical_start.append((t[2][0] - 1, t[2][1]))
            last_newline = False
        if t[0] == tokenize.OP:
            if t[1] in '([{':
                parens += 1
            elif t[1] in '}])':
                parens -= 1
    return (logical_start, logical_end) 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:26,代碼來源:autopep8.py

示例2: op_

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def op_(s):
    return some(lambda tok: tok.type == token.OP and tok.string == s) 
開發者ID:pyta-uoft,項目名稱:pyta,代碼行數:4,代碼來源:__init__.py

示例3: s_

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def s_(string):
    return skip(some(lambda tok: tok.type == token.OP and tok.string == string)) 
開發者ID:pyta-uoft,項目名稱:pyta,代碼行數:4,代碼來源:__init__.py

示例4: ending_of_bad_tuple

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def ending_of_bad_tuple(x):
    return x.type == token.OP and x.string == ',' 
開發者ID:ar4s,項目名稱:flake8_tuple,代碼行數:4,代碼來源:flake8_tuple.py

示例5: check_for_wrong_tuple

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def check_for_wrong_tuple(tree, code, noqa):
    errors = []
    candidates = []
    for assign in ast.walk(tree):
        if not isinstance(assign, (ast.Assign, ast.Return)):
            continue
        elif assign.lineno in noqa:
            continue
        elif isinstance(assign.value, ast.Call):
            continue
        for tuple_el in ast.walk(assign):
            if isinstance(tuple_el, ast.Tuple) and len(tuple_el.elts) == 1:
                candidates.append((assign.lineno, assign.col_offset))
                break
    if not candidates:
        return []
    for candidate in candidates:
        number_nl = 0  # account for logical newlines within statements
        tokens = tokenize.generate_tokens(
            lambda L=iter(code): next(L)
        )
        previous_token = None
        for t in tokens:
            if previous_token is not None and previous_token.type == tokenize.NEWLINE:
                number_nl = 0
            x = TokenInfo(*t)
            if x.start[0] - number_nl != candidate[0]:
                previous_token = x
                continue
            if x.type == tokenize.NL:
                number_nl += 1
            if x.type == token.NEWLINE and ending_of_bad_tuple(previous_token):
                errors.append(x.start)
            if x.type == token.OP and x.string == '=' and previous_token.type != token.NAME:
                x = TokenInfo(*next(tokens))
                if x.type != token.OP and x.string != '(':
                    x_next = TokenInfo(*next(tokens))
                    if ending_of_bad_tuple(x_next):
                        errors.append(x.start)
            previous_token = x
    return errors 
開發者ID:ar4s,項目名稱:flake8_tuple,代碼行數:43,代碼來源:flake8_tuple.py

示例6: _is_binary_operator

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def _is_binary_operator(token_type, text):
    return ((token_type == tokenize.OP or text in ['and', 'or']) and
            text not in '()[]{},:.;@=%~')


# A convenient way to handle tokens. 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:8,代碼來源:autopep8.py

示例7: __call__

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def __call__(self, toktype, toktext, start_pos, end_pos, line):
        """ Token handler, with syntax highlighting."""
        (srow,scol) = start_pos
        (erow,ecol) = end_pos
        colors = self.colors
        owrite = self.out.write

        # line separator, so this works across platforms
        linesep = os.linesep

        # calculate new positions
        oldpos = self.pos
        newpos = self.lines[srow] + scol
        self.pos = newpos + len(toktext)

        # send the original whitespace, if needed
        if newpos > oldpos:
            owrite(self.raw[oldpos:newpos])

        # skip indenting tokens
        if toktype in [token.INDENT, token.DEDENT]:
            self.pos = newpos
            return

        # map token type to a color group
        if token.LPAR <= toktype and toktype <= token.OP:
            toktype = token.OP
        elif toktype == token.NAME and keyword.iskeyword(toktext):
            toktype = _KEYWORD
        color = colors.get(toktype, colors[_TEXT])

        #print '<%s>' % toktext,    # dbg

        # Triple quoted strings must be handled carefully so that backtracking
        # in pagers works correctly. We need color terminators on _each_ line.
        if linesep in toktext:
            toktext = toktext.replace(linesep, '%s%s%s' %
                                      (colors.normal,linesep,color))

        # send text
        owrite('%s%s%s' % (color,toktext,colors.normal)) 
開發者ID:ktraunmueller,項目名稱:Computable,代碼行數:43,代碼來源:PyColorize.py

示例8: tokeneater

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def tokeneater(self, toktype, toktext, xxx_todo_changeme, xxx_todo_changeme1, line):
        (srow, scol) = xxx_todo_changeme
        (erow, ecol) = xxx_todo_changeme1
        if toktype == token.ERRORTOKEN:
            raise RuntimeError("ErrorToken occured")
        if toktype in [token.NEWLINE, tokenize.NL]:
            self.output.write('\n')
            self.col = 0
        else:
            # map token type to a color group
            if token.LPAR <= toktype and toktype <= token.OP:
                toktype = token.OP
            elif toktype == token.NAME and keyword.iskeyword(toktext):
                toktype = _KEYWORD

            # restore whitespace
            assert scol >= self.col
            self.output.write(" "*(scol-self.col))

            try:
                tokclass = tokclasses[toktype]
            except KeyError:
                tokclass = None
            if self.tokclass is not None and tokclass != self.tokclass:
                self.output.write('</span>')
            if tokclass is not None and tokclass != self.tokclass:
                self.output.write('<span class="%s">' % tokclass)
            self.output.write(cgi.escape(toktext))
            self.tokclass = tokclass

            # calculate new column position
            self.col = scol + len(toktext)
            newline = toktext.rfind("\n")
            if newline != -1:
                self.col = len(toktext) - newline - 1 
開發者ID:pyx-project,項目名稱:pyx,代碼行數:37,代碼來源:pt2html.py

示例9: __init__

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def __init__(self, token_type, tokens):
    # For operators and delimiters, the TokenSnippet's type may be more specific
    # than the type of the constituent token. E.g. the TokenSnippet type is
    # token.DOT, but the token type is token.OP. This is because the parser
    # has more context than the tokenizer.
    self._type = token_type
    self._tokens = tokens
    self._modified = False 
開發者ID:FSecureLABS,項目名稱:Jandroid,代碼行數:10,代碼來源:snippet.py

示例10: _SnippetizeNode

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def _SnippetizeNode(node, tokens):
  # The parser module gives a syntax tree that discards comments,
  # non-terminating newlines, and whitespace information. Use the tokens given
  # by the tokenize module to annotate the syntax tree with the information
  # needed to exactly reproduce the original source code.
  node_type = node[0]

  if node_type >= token.NT_OFFSET:
    # Symbol.
    children = tuple(_SnippetizeNode(child, tokens) for child in node[1:])
    return Symbol(node_type, children)
  else:
    # Token.
    grabbed_tokens = []
    while tokens and (
        tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL):
      grabbed_tokens.append(tokens.popleft())

    # parser has 2 NEWLINEs right before the end.
    # tokenize has 0 or 1 depending on if the file has one.
    # Create extra nodes without consuming tokens to account for this.
    if node_type == token.NEWLINE:
      for tok in tokens:
        if tok.type == token.ENDMARKER:
          return TokenSnippet(node_type, grabbed_tokens)
        if tok.type != token.DEDENT:
          break

    assert tokens[0].type == token.OP or node_type == tokens[0].type

    grabbed_tokens.append(tokens.popleft())
    return TokenSnippet(node_type, grabbed_tokens) 
開發者ID:FSecureLABS,項目名稱:Jandroid,代碼行數:34,代碼來源:snippet.py

示例11: is_percent_op

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def is_percent_op(self):
        return self.toknum == token.OP and self.tokval == "%" 
開發者ID:ikamensh,項目名稱:flynt,代碼行數:4,代碼來源:PyToken.py

示例12: is_sq_brack_op

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def is_sq_brack_op(self):
        return self.toknum == token.OP and self.tokval == "[" 
開發者ID:ikamensh,項目名稱:flynt,代碼行數:4,代碼來源:PyToken.py

示例13: is_dot_op

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def is_dot_op(self):
        return self.toknum == token.OP and self.tokval == "." 
開發者ID:ikamensh,項目名稱:flynt,代碼行數:4,代碼來源:PyToken.py

示例14: is_paren_op

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def is_paren_op(self):
        return self.toknum == token.OP and self.tokval == "(" 
開發者ID:ikamensh,項目名稱:flynt,代碼行數:4,代碼來源:PyToken.py

示例15: is_exponentiation_op

# 需要導入模塊: import token [as 別名]
# 或者: from token import OP [as 別名]
def is_exponentiation_op(self):
        return self.toknum == token.OP and self.tokval == "**" 
開發者ID:ikamensh,項目名稱:flynt,代碼行數:4,代碼來源:PyToken.py


注:本文中的token.OP屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。