当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.DEDENT属性代码示例

本文整理汇总了Python中tokenize.DEDENT属性的典型用法代码示例。如果您正苦于以下问题:Python tokenize.DEDENT属性的具体用法?Python tokenize.DEDENT怎么用?Python tokenize.DEDENT使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在tokenize的用法示例。


在下文中一共展示了tokenize.DEDENT属性的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _find_logical

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def _find_logical(source_lines):
    # Make a variable which is the index of all the starts of lines.
    logical_start = []
    logical_end = []
    last_newline = True
    parens = 0
    for t in generate_tokens(''.join(source_lines)):
        if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
                    tokenize.INDENT, tokenize.NL,
                    tokenize.ENDMARKER]:
            continue
        if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
            last_newline = True
            logical_end.append((t[3][0] - 1, t[2][1]))
            continue
        if last_newline and not parens:
            logical_start.append((t[2][0] - 1, t[2][1]))
            last_newline = False
        if t[0] == tokenize.OP:
            if t[1] in '([{':
                parens += 1
            elif t[1] in '}])':
                parens -= 1
    return (logical_start, logical_end) 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:26,代码来源:autopep8.py

示例2: __get_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def __get_tokens(it):
        tokens: List[tokenize.TokenInfo] = []

        try:
            for t in it:
                if t.type in tokenizer.SKIP_TOKENS:
                    continue
                if t.type == tokenize.NEWLINE and t.string == '':
                    continue
                if t.type == tokenize.DEDENT:
                    continue
                if t.type == tokenize.ERRORTOKEN:
                    continue
                tokens.append(t)
        except tokenize.TokenError as e:
            if not e.args[0].startswith('EOF in'):
                print(e)
        except IndentationError as e:
            print(e)

        return tokens 
开发者ID:vpj,项目名称:python_autocomplete,代码行数:23,代码来源:evaluate.py

示例3: handle_token

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def handle_token(self, *args):
        # dispatch incoming tokens to the current handler
        if DEBUG > 1:
            print self.handler.im_func.func_name, self.indent,
            print tokenize.tok_name[args[0]], repr(args[1])
        if args[0] == tokenize.DEDENT:
            self.indent = self.indent - 1
            while self.scope and self.scope[-1][0] >= self.indent:
                del self.scope[-1]
                del self.stack[-1]
        self.handler = apply(self.handler, args)
        if args[0] == tokenize.INDENT:
            self.indent = self.indent + 1

    ##
    # (Token handler) Scans for encoding directive. 
开发者ID:alexfeng,项目名称:InternationalizationScript-iOS,代码行数:18,代码来源:pythondoc.py

示例4: reindent

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def reindent(tokens, indent=' '):
    """
    Replace existing indentation in a token steam, with `indent`.
    """
    old_levels = []
    old_level = 0
    new_level = 0
    for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:
        if typ == tokenize.INDENT:
            old_levels.append(old_level)
            old_level = len(tok)
            new_level += 1
            tok = indent * new_level
        elif typ == tokenize.DEDENT:
            old_level = old_levels.pop()
            new_level -= 1
        start_col = max(0, start_col - old_level + new_level)
        if start_row == end_row:
            end_col = start_col + len(tok)
        yield typ, tok, (start_row, start_col), (end_row, end_col), line 
开发者ID:dw,项目名称:mitogen,代码行数:22,代码来源:minify.py

示例5: _does_else_belong_to_if

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def _does_else_belong_to_if(self, start_index: int) -> bool:
        previous_token = self.file_tokens[start_index - 1]

        if previous_token.type != tokenize.DEDENT:
            # This is not the first token on the line, which means that it can
            # also be "embedded" else: x if A else B
            return False

        for token in reversed(self.file_tokens[:start_index - 1]):
            if token.type != tokenize.NAME:
                continue

            # Here we rely upon an intuition that in Python else have to be
            # on the same level (same indentation) as parent statement.
            if token.start[1] == previous_token.start[1]:
                return token.string in {'if', 'elif'}

        return False 
开发者ID:wemake-services,项目名称:wemake-python-styleguide,代码行数:20,代码来源:conditions.py

示例6: read_data

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def read_data(path, listfile):
    if isinstance(listfile, list):
        python_files = [os.path.join(path, f) for f in listfile]
    else:
        with open(listfile) as f:
            python_files = [os.path.join(path, x) for x in f.read().splitlines()]

    for filename in python_files:
        try:
            with open(filename) as f:
                tokens = list(tokenize.generate_tokens(f.readline))

                yield [preprocess(tokenType, tokenVal) for tokenType, tokenVal, _, _, _
                       in tokens
                       if tokenType != tokenize.COMMENT and
                       not tokenVal.startswith("'''") and
                       not tokenVal.startswith('"""') and
                       (tokenType == tokenize.DEDENT or tokenVal != "")]
        except:
            pass 
开发者ID:uclnlp,项目名称:pycodesuggest,代码行数:22,代码来源:reader.py

示例7: tokeneater

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def tokeneater(self, type, token, srowcol, erowcol, line):
        if not self.started:
            # look for the first "def", "class" or "lambda"
            if token in ("def", "class", "lambda"):
                if token == "lambda":
                    self.islambda = True
                self.started = True
            self.passline = True    # skip to the end of the line
        elif type == tokenize.NEWLINE:
            self.passline = False   # stop skipping when a NEWLINE is seen
            self.last = srowcol[0]
            if self.islambda:       # lambdas always end at the first NEWLINE
                raise EndOfBlock
        elif self.passline:
            pass
        elif type == tokenize.INDENT:
            self.indent = self.indent + 1
            self.passline = True
        elif type == tokenize.DEDENT:
            self.indent = self.indent - 1
            # the end of matching indent/dedent pairs end a block
            # (note that this only works for "def"/"class" blocks,
            #  not e.g. for "if: else:" or "try: finally:" blocks)
            if self.indent <= 0:
                raise EndOfBlock
        elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
            # any other token on the same indentation level end the previous
            # block as well, except the pseudo-tokens COMMENT and NL.
            raise EndOfBlock 
开发者ID:war-and-code,项目名称:jawfish,代码行数:31,代码来源:inspect.py

示例8: parse_definitions

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def parse_definitions(self, class_, all=False):
        """Parse multiple definitions and yield them."""
        while self.current is not None:
            self.log.debug("parsing definition list, current token is %r (%s)",
                           self.current.kind, self.current.value)
            self.log.debug('got_newline: %s', self.stream.got_logical_newline)
            if all and self.current.value == '__all__':
                self.parse_all()
            elif (self.current.kind == tk.OP and
                  self.current.value == '@' and
                  self.stream.got_logical_newline):
                self.consume(tk.OP)
                self.parse_decorators()
            elif self.current.value in ['def', 'class']:
                yield self.parse_definition(class_._nest(self.current.value))
            elif self.current.kind == tk.INDENT:
                self.consume(tk.INDENT)
                for definition in self.parse_definitions(class_):
                    yield definition
            elif self.current.kind == tk.DEDENT:
                self.consume(tk.DEDENT)
                return
            elif self.current.value == 'from':
                self.parse_from_import_statement()
            else:
                self.stream.move() 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:28,代码来源:parser.py

示例9: tokeneater

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def tokeneater(self, type, token, srow_scol, erow_ecol, line):
        srow, scol = srow_scol
        erow, ecol = erow_ecol
        if not self.started:
            # look for the first "def", "class" or "lambda"
            if token in ("def", "class", "lambda"):
                if token == "lambda":
                    self.islambda = True
                self.started = True
            self.passline = True    # skip to the end of the line
        elif type == tokenize.NEWLINE:
            self.passline = False   # stop skipping when a NEWLINE is seen
            self.last = srow
            if self.islambda:       # lambdas always end at the first NEWLINE
                raise EndOfBlock
        elif self.passline:
            pass
        elif type == tokenize.INDENT:
            self.indent = self.indent + 1
            self.passline = True
        elif type == tokenize.DEDENT:
            self.indent = self.indent - 1
            # the end of matching indent/dedent pairs end a block
            # (note that this only works for "def"/"class" blocks,
            #  not e.g. for "if: else:" or "try: finally:" blocks)
            if self.indent <= 0:
                raise EndOfBlock
        elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
            # any other token on the same indentation level end the previous
            # block as well, except the pseudo-tokens COMMENT and NL.
            raise EndOfBlock 
开发者ID:glmcdona,项目名称:meddle,代码行数:33,代码来源:inspect.py

示例10: tokeneater

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def tokeneater(self, type, token, pos, end, line,
                   INDENT=tokenize.INDENT,
                   DEDENT=tokenize.DEDENT,
                   NEWLINE=tokenize.NEWLINE,
                   COMMENT=tokenize.COMMENT,
                   NL=tokenize.NL):
        sline, scol = pos
        if type == NEWLINE:
            # A program statement, or ENDMARKER, will eventually follow,
            # after some (possibly empty) run of tokens of the form
            #     (NL | COMMENT)* (INDENT | DEDENT+)?
            self.find_stmt = 1

        elif type == INDENT:
            self.find_stmt = 1
            self.level += 1

        elif type == DEDENT:
            self.find_stmt = 1
            self.level -= 1

        elif type == COMMENT:
            if self.find_stmt:
                self.stats.append((sline, -1))
                # but we're still looking for a new stmt, so leave
                # find_stmt alone

        elif type == NL:
            pass

        elif self.find_stmt:
            # This is the first "real token" following a NEWLINE, so it
            # must be the first token of the next program statement, or an
            # ENDMARKER.
            self.find_stmt = 0
            if line:   # not endmarker
                self.stats.append((sline, self.level))

# Count number of leading blanks. 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:41,代码来源:reindent.py

示例11: tokeneater

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def tokeneater(self, type, token, slinecol, end, line,
                   INDENT=tokenize.INDENT,
                   DEDENT=tokenize.DEDENT,
                   NEWLINE=tokenize.NEWLINE,
                   COMMENT=tokenize.COMMENT,
                   NL=tokenize.NL):

        if type == NEWLINE:
            # A program statement, or ENDMARKER, will eventually follow,
            # after some (possibly empty) run of tokens of the form
            #     (NL | COMMENT)* (INDENT | DEDENT+)?
            self.find_stmt = 1

        elif type == INDENT:
            self.find_stmt = 1
            self.level += 1

        elif type == DEDENT:
            self.find_stmt = 1
            self.level -= 1

        elif type == COMMENT:
            if self.find_stmt:
                self.stats.append((slinecol[0], -1))
                # but we're still looking for a new stmt, so leave
                # find_stmt alone

        elif type == NL:
            pass

        elif self.find_stmt:
            # This is the first "real token" following a NEWLINE, so it
            # must be the first token of the next program statement, or an
            # ENDMARKER.
            self.find_stmt = 0
            if line:   # not endmarker
                self.stats.append((slinecol[0], self.level))


# Count number of leading blanks. 
开发者ID:cclauss,项目名称:Upgrade-to-Python3,代码行数:42,代码来源:reindent.py

示例12: tokeneater

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def tokeneater(self, type, token, (sline, scol), end, line,
                   INDENT=tokenize.INDENT,
                   DEDENT=tokenize.DEDENT,
                   NEWLINE=tokenize.NEWLINE,
                   COMMENT=tokenize.COMMENT,
                   NL=tokenize.NL): 
开发者ID:aliyun,项目名称:oss-ftp,代码行数:8,代码来源:reindent.py

示例13: tabify

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def tabify(filename):
	mode = os.stat(filename)[ST_MODE]
	os.rename(filename, filename+".bak")
	
	infile = file(filename+".bak")
	outfile = file(filename,"w")
	tokens = tokenize.generate_tokens(infile.readline)
		
	text = []
	indent = 0
	minlineno = 0
	for (toktype, token, start, end, line) in tokens:
		y, x = end
		
		if toktype == tokenize.INDENT:
			indent += 1
		elif toktype == tokenize.DEDENT:
			indent -= 1
		elif y > minlineno:
			minlineno = y
			text += "%s%s\n" % ("\t"*indent,line.strip())
			
	outfile.write("".join(text))
	
	infile.close()
	outfile.close()
	os.chmod(filename, mode) 
开发者ID:ActiveState,项目名称:code,代码行数:29,代码来源:recipe-496893.py

示例14: _skip_whitespace_and_comments

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import DEDENT [as 别名]
def _skip_whitespace_and_comments(self):
    skippable_token_kinds = [
        tokenize.COMMENT, tokenize.NL, tokenize.INDENT, tokenize.DEDENT
    ]
    while self._current_token.kind in skippable_token_kinds:
      self._advance_one_token() 
开发者ID:google,项目名称:gin-config,代码行数:8,代码来源:config_parser.py


注:本文中的tokenize.DEDENT属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。