当前位置: 首页>>代码示例>>Python>>正文


Python token.DEDENT属性代码示例

本文整理汇总了Python中token.DEDENT属性的典型用法代码示例。如果您正苦于以下问题:Python token.DEDENT属性的具体用法?Python token.DEDENT怎么用?Python token.DEDENT使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在token的用法示例。


在下文中一共展示了token.DEDENT属性的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __openseen

# 需要导入模块: import token [as 别名]
# 或者: from token import DEDENT [as 别名]
def __openseen(self, ttype, tstring, lineno):
        if ttype == tokenize.OP and tstring == ')':
            # We've seen the last of the translatable strings.  Record the
            # line number of the first line of the strings and update the list
            # of messages seen.  Reset state for the next batch.  If there
            # were no strings inside _(), then just ignore this entry.
            if self.__data:
                self.__addentry(EMPTYSTRING.join(self.__data))
            self.__state = self.__waiting
        elif ttype == tokenize.STRING:
            self.__data.append(safe_eval(tstring))
        elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
                           token.NEWLINE, tokenize.NL]:
            # warn if we see anything else than STRING or whitespace
            print >> sys.stderr, _(
                '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
                ) % {
                'token': tstring,
                'file': self.__curfile,
                'lineno': self.__lineno
                }
            self.__state = self.__waiting 
开发者ID:aliyun,项目名称:oss-ftp,代码行数:24,代码来源:pygettext.py

示例2: __openseen

# 需要导入模块: import token [as 别名]
# 或者: from token import DEDENT [as 别名]
def __openseen(self, ttype, tstring, lineno):
        if ttype == tokenize.OP and tstring == ')':
            # We've seen the last of the translatable strings.  Record the
            # line number of the first line of the strings and update the list
            # of messages seen.  Reset state for the next batch.  If there
            # were no strings inside _(), then just ignore this entry.
            if self.__data:
                self.__addentry(EMPTYSTRING.join(self.__data))
            self.__state = self.__waiting
        elif ttype == tokenize.STRING and is_literal_string(tstring):
            self.__data.append(safe_eval(tstring))
        elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
                           token.NEWLINE, tokenize.NL]:
            # warn if we see anything else than STRING or whitespace
            print(_(
                '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
                ) % {
                'token': tstring,
                'file': self.__curfile,
                'lineno': self.__lineno
                }, file=sys.stderr)
            self.__state = self.__waiting 
开发者ID:guohuadeng,项目名称:odoo13-x64,代码行数:24,代码来源:pygettext.py

示例3: __call__

# 需要导入模块: import token [as 别名]
# 或者: from token import DEDENT [as 别名]
def __call__(self, toktype, toktext, start_pos, end_pos, line):
        """ Token handler, with syntax highlighting."""
        (srow,scol) = start_pos
        (erow,ecol) = end_pos
        colors = self.colors
        owrite = self.out.write

        # line separator, so this works across platforms
        linesep = os.linesep

        # calculate new positions
        oldpos = self.pos
        newpos = self.lines[srow] + scol
        self.pos = newpos + len(toktext)

        # send the original whitespace, if needed
        if newpos > oldpos:
            owrite(self.raw[oldpos:newpos])

        # skip indenting tokens
        if toktype in [token.INDENT, token.DEDENT]:
            self.pos = newpos
            return

        # map token type to a color group
        if token.LPAR <= toktype and toktype <= token.OP:
            toktype = token.OP
        elif toktype == token.NAME and keyword.iskeyword(toktext):
            toktype = _KEYWORD
        color = colors.get(toktype, colors[_TEXT])

        #print '<%s>' % toktext,    # dbg

        # Triple quoted strings must be handled carefully so that backtracking
        # in pagers works correctly. We need color terminators on _each_ line.
        if linesep in toktext:
            toktext = toktext.replace(linesep, '%s%s%s' %
                                      (colors.normal,linesep,color))

        # send text
        owrite('%s%s%s' % (color,toktext,colors.normal)) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:43,代码来源:PyColorize.py

示例4: _SnippetizeNode

# 需要导入模块: import token [as 别名]
# 或者: from token import DEDENT [as 别名]
def _SnippetizeNode(node, tokens):
  # The parser module gives a syntax tree that discards comments,
  # non-terminating newlines, and whitespace information. Use the tokens given
  # by the tokenize module to annotate the syntax tree with the information
  # needed to exactly reproduce the original source code.
  node_type = node[0]

  if node_type >= token.NT_OFFSET:
    # Symbol.
    children = tuple(_SnippetizeNode(child, tokens) for child in node[1:])
    return Symbol(node_type, children)
  else:
    # Token.
    grabbed_tokens = []
    while tokens and (
        tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL):
      grabbed_tokens.append(tokens.popleft())

    # parser has 2 NEWLINEs right before the end.
    # tokenize has 0 or 1 depending on if the file has one.
    # Create extra nodes without consuming tokens to account for this.
    if node_type == token.NEWLINE:
      for tok in tokens:
        if tok.type == token.ENDMARKER:
          return TokenSnippet(node_type, grabbed_tokens)
        if tok.type != token.DEDENT:
          break

    assert tokens[0].type == token.OP or node_type == tokens[0].type

    grabbed_tokens.append(tokens.popleft())
    return TokenSnippet(node_type, grabbed_tokens) 
开发者ID:FSecureLABS,项目名称:Jandroid,代码行数:34,代码来源:snippet.py

示例5: tokenize_code

# 需要导入模块: import token [as 别名]
# 或者: from token import DEDENT [as 别名]
def tokenize_code(string, concat_symbol=None):
    tokens = []
    string = string.strip().decode('utf-8').encode('ascii', 'strict') #.decode('string_escape')
    for toknum, tokval, _, _, _  in tokenize.generate_tokens(StringIO(string).readline):
        # We ignore these tokens during evaluation.
        if toknum not in [token.ENDMARKER, token.INDENT, token.DEDENT]:
            tokens.append(tokval.lower())

    return tokens 
开发者ID:pcyin,项目名称:tranX,代码行数:11,代码来源:conala_eval.py

示例6: tokenize_code

# 需要导入模块: import token [as 别名]
# 或者: from token import DEDENT [as 别名]
def tokenize_code(code, mode=None):
    token_stream = generate_tokens(StringIO(code).readline)
    tokens = []
    for toknum, tokval, (srow, scol), (erow, ecol), _ in token_stream:
        if toknum == tk.ENDMARKER:
            break

        if mode == 'decoder':
            if toknum == tk.STRING:
                quote = tokval[0]
                tokval = tokval[1:-1]
                tokens.append(quote)
                tokens.append(tokval)
                tokens.append(quote)
            elif toknum == tk.DEDENT:
                continue
            else:
                tokens.append(tokval)
        elif mode == 'canonicalize':
            if toknum == tk.STRING:
                tokens.append('_STR_')
            elif toknum == tk.DEDENT:
                continue
            else:
                tokens.append(tokval)
        else:
            tokens.append(tokval)

    return tokens 
开发者ID:pcyin,项目名称:tranX,代码行数:31,代码来源:py_utils.py

示例7: _simulate_compile_singlemode

# 需要导入模块: import token [as 别名]
# 或者: from token import DEDENT [as 别名]
def _simulate_compile_singlemode(self, s):
        # Calculate line offsets
        lines = [0, 0]
        pos = 0
        while 1:
            pos = s.find('\n', pos)+1
            if not pos: break
            lines.append(pos)
        lines.append(len(s))

        oldpos = 0
        parenlevel = 0
        deflevel = 0
        output = []
        stmt = []

        text = StringIO(s)
        tok_gen = tokenize.generate_tokens(text.readline)
        for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen:
            newpos = lines[srow] + scol
            stmt.append(s[oldpos:newpos])
            if tok != '':
                stmt.append(tok)
            oldpos = newpos + len(tok)

            # Update the paren level.
            if tok in '([{':
                parenlevel += 1
            if tok in '}])':
                parenlevel -= 1

            if tok in ('def', 'class') and deflevel == 0:
                deflevel = 1
            if deflevel and toktype == token.INDENT:
                deflevel += 1
            if deflevel and toktype == token.DEDENT:
                deflevel -= 1

            # Are we starting a statement?
            if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT,
                             token.INDENT, token.ENDMARKER) or
                 tok==':') and parenlevel == 0):
                if deflevel == 0 and self._is_expr(stmt[1:-2]):
                    output += stmt[0]
                    output.append('__print__((')
                    output += stmt[1:-2]
                    output.append('))')
                    output += stmt[-2:]
                else:
                    output += stmt
                stmt = []
        return ''.join(output) 
开发者ID:rafasashi,项目名称:razzy-spinner,代码行数:54,代码来源:doctest_driver.py

示例8: source_token_lines

# 需要导入模块: import token [as 别名]
# 或者: from token import DEDENT [as 别名]
def source_token_lines(source):
    """Generate a series of lines, one for each line in `source`.

    Each line is a list of pairs, each pair is a token::

        [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]

    Each pair has a token class, and the token text.

    If you concatenate all the token texts, and then join them with newlines,
    you should have your original `source` back, with two differences:
    trailing whitespace is not preserved, and a final line with no newline
    is indistinguishable from a final line with a newline.

    """

    ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL])
    line = []
    col = 0

    source = source.expandtabs(8).replace('\r\n', '\n')
    tokgen = generate_tokens(source)

    for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen):
        mark_start = True
        for part in re.split('(\n)', ttext):
            if part == '\n':
                yield line
                line = []
                col = 0
                mark_end = False
            elif part == '':
                mark_end = False
            elif ttype in ws_tokens:
                mark_end = False
            else:
                if mark_start and scol > col:
                    line.append(("ws", u" " * (scol - col)))
                    mark_start = False
                tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
                if ttype == token.NAME and keyword.iskeyword(ttext):
                    tok_class = "key"
                line.append((tok_class, part))
                mark_end = True
            scol = 0
        if mark_end:
            col = ecol

    if line:
        yield line 
开发者ID:nedbat,项目名称:coveragepy-bbmirror,代码行数:52,代码来源:phystokens.py


注:本文中的token.DEDENT属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。