当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.ENCODING属性代码示例

本文整理汇总了Python中tokenize.ENCODING属性的典型用法代码示例。如果您正苦于以下问题:Python tokenize.ENCODING属性的具体用法?Python tokenize.ENCODING怎么用?Python tokenize.ENCODING使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在tokenize的用法示例。


在下文中一共展示了tokenize.ENCODING属性的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: process_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import ENCODING [as 别名]
def process_tokens(self, tokens):
        encoding = "ascii"
        for i, (tok_type, token, start, _, line) in enumerate(tokens):
            if tok_type == tokenize.ENCODING:
                # this is always the first token processed
                encoding = token
            elif tok_type == tokenize.STRING:
                # 'token' is the whole un-parsed token; we can look at the start
                # of it to see whether it's a raw or unicode string etc.
                self.process_string_token(token, start[0])
                # We figure the next token, ignoring comments & newlines:
                j = i + 1
                while j < len(tokens) and tokens[j].type in (
                    tokenize.NEWLINE,
                    tokenize.NL,
                    tokenize.COMMENT,
                ):
                    j += 1
                next_token = tokens[j] if j < len(tokens) else None
                if encoding != "ascii":
                    # We convert `tokenize` character count into a byte count,
                    # to match with astroid `.col_offset`
                    start = (start[0], len(line[: start[1]].encode(encoding)))
                self.string_tokens[start] = (str_eval(token), next_token) 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:26,代码来源:strings.py

示例2: test_encoding_token

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import ENCODING [as 别名]
def test_encoding_token(self):
        """Make sure the encoding token doesn't change the checker's behavior

        _tokenize_str doesn't produce an encoding token, but
        reading a file does
        """
        with self.assertNoMessages():
            encoding_token = tokenize.TokenInfo(
                tokenize.ENCODING, "utf-8", (0, 0), (0, 0), ""
            )
            tokens = [encoding_token] + _tokenize_str(
                "if (\n        None):\n    pass\n"
            )
            self.checker.process_tokens(tokens) 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:16,代码来源:unittest_checker_format.py

示例3: printtoken

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import ENCODING [as 别名]
def printtoken(self, type, token, sCoordinates, eCoordinates, line):
        if hasattr(tokenize, "ENCODING") and type == tokenize.ENCODING:
            self.encoding = token
            return

        (srow, scol) = sCoordinates
        (erow, ecol) = eCoordinates
        if self.currentLine < srow:
            self.writer('\n'*(srow-self.currentLine))
            self.currentLine, self.currentCol = srow, 0
        self.writer(' '*(scol-self.currentCol))
        if self.lastIdentifier:
            type = "identifier"
            self.parameters = 1
        elif type == tokenize.NAME:
            if keyword.iskeyword(token):
                type = 'keyword'
            else:
                if self.parameters:
                    type = 'parameter'
                else:
                    type = 'variable'
        else:
            type = tokenize.tok_name.get(type).lower()
        self.writer(token, type)
        self.currentCol = ecol
        self.currentLine += token.count('\n')
        if self.currentLine != erow:
            self.currentCol = 0
        self.lastIdentifier = token in ('def', 'class')
        if token == ':':
            self.parameters = 0 
开发者ID:proxysh,项目名称:Safejumper-for-Desktop,代码行数:34,代码来源:htmlizer.py

示例4: printtoken

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import ENCODING [as 别名]
def printtoken(self, type, token, sCoordinates, eCoordinates, line):
        if hasattr(tokenize, "ENCODING") and type == tokenize.ENCODING:
            self.encoding = token
            return

        if not isinstance(token, bytes):
            token = token.encode(self.encoding)

        (srow, scol) = sCoordinates
        (erow, ecol) = eCoordinates
        if self.currentLine < srow:
            self.writer(b'\n' * (srow-self.currentLine))
            self.currentLine, self.currentCol = srow, 0
        self.writer(b' ' * (scol-self.currentCol))
        if self.lastIdentifier:
            type = "identifier"
            self.parameters = 1
        elif type == tokenize.NAME:
            if keyword.iskeyword(token):
                type = 'keyword'
            else:
                if self.parameters:
                    type = 'parameter'
                else:
                    type = 'variable'
        else:
            type = tokenize.tok_name.get(type).lower()
        self.writer(token, type)
        self.currentCol = ecol
        self.currentLine += token.count(b'\n')
        if self.currentLine != erow:
            self.currentCol = 0
        self.lastIdentifier = token in (b'def', b'class')
        if token == b':':
            self.parameters = 0 
开发者ID:wistbean,项目名称:learn_python3_spider,代码行数:37,代码来源:htmlizer.py

示例5: highlight

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import ENCODING [as 别名]
def highlight(self, source):
        style = self._style
        row, column = 0, 0
        output = ""

        for token in self.tokenize(source):
            type_, string, start, end, line = token

            if type_ == tokenize.NAME:
                if string in self._constants:
                    color = style["constant"]
                elif keyword.iskeyword(string):
                    color = style["keyword"]
                elif string in self._builtins:
                    color = style["builtin"]
                else:
                    color = style["identifier"]
            elif type_ == tokenize.OP:
                if string in self._punctation:
                    color = style["punctuation"]
                else:
                    color = style["operator"]
            elif type_ == tokenize.NUMBER:
                color = style["number"]
            elif type_ == tokenize.STRING:
                color = style["string"]
            elif type_ == tokenize.COMMENT:
                color = style["comment"]
            else:
                color = style["other"]

            start_row, start_column = start
            _, end_column = end

            if start_row != row:
                source = source[:column]
                row, column = start_row, 0

            if type_ != tokenize.ENCODING:
                output += line[column:start_column]
                output += color.format(string)

            column = end_column

        output += source[column:]

        return output 
开发者ID:Delgan,项目名称:loguru,代码行数:49,代码来源:_better_exceptions.py


注:本文中的tokenize.ENCODING属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。