當前位置: 首頁>>代碼示例>>Python>>正文


Python tokenize.ENCODING屬性代碼示例

本文整理匯總了Python中tokenize.ENCODING屬性的典型用法代碼示例。如果您正苦於以下問題:Python tokenize.ENCODING屬性的具體用法?Python tokenize.ENCODING怎麽用?Python tokenize.ENCODING使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在tokenize的用法示例。


在下文中一共展示了tokenize.ENCODING屬性的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: process_tokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import ENCODING [as 別名]
def process_tokens(self, tokens):
        encoding = "ascii"
        for i, (tok_type, token, start, _, line) in enumerate(tokens):
            if tok_type == tokenize.ENCODING:
                # this is always the first token processed
                encoding = token
            elif tok_type == tokenize.STRING:
                # 'token' is the whole un-parsed token; we can look at the start
                # of it to see whether it's a raw or unicode string etc.
                self.process_string_token(token, start[0])
                # We figure the next token, ignoring comments & newlines:
                j = i + 1
                while j < len(tokens) and tokens[j].type in (
                    tokenize.NEWLINE,
                    tokenize.NL,
                    tokenize.COMMENT,
                ):
                    j += 1
                next_token = tokens[j] if j < len(tokens) else None
                if encoding != "ascii":
                    # We convert `tokenize` character count into a byte count,
                    # to match with astroid `.col_offset`
                    start = (start[0], len(line[: start[1]].encode(encoding)))
                self.string_tokens[start] = (str_eval(token), next_token) 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:26,代碼來源:strings.py

示例2: test_encoding_token

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import ENCODING [as 別名]
def test_encoding_token(self):
        """Make sure the encoding token doesn't change the checker's behavior

        _tokenize_str doesn't produce an encoding token, but
        reading a file does
        """
        with self.assertNoMessages():
            encoding_token = tokenize.TokenInfo(
                tokenize.ENCODING, "utf-8", (0, 0), (0, 0), ""
            )
            tokens = [encoding_token] + _tokenize_str(
                "if (\n        None):\n    pass\n"
            )
            self.checker.process_tokens(tokens) 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:16,代碼來源:unittest_checker_format.py

示例3: printtoken

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import ENCODING [as 別名]
def printtoken(self, type, token, sCoordinates, eCoordinates, line):
        if hasattr(tokenize, "ENCODING") and type == tokenize.ENCODING:
            self.encoding = token
            return

        (srow, scol) = sCoordinates
        (erow, ecol) = eCoordinates
        if self.currentLine < srow:
            self.writer('\n'*(srow-self.currentLine))
            self.currentLine, self.currentCol = srow, 0
        self.writer(' '*(scol-self.currentCol))
        if self.lastIdentifier:
            type = "identifier"
            self.parameters = 1
        elif type == tokenize.NAME:
            if keyword.iskeyword(token):
                type = 'keyword'
            else:
                if self.parameters:
                    type = 'parameter'
                else:
                    type = 'variable'
        else:
            type = tokenize.tok_name.get(type).lower()
        self.writer(token, type)
        self.currentCol = ecol
        self.currentLine += token.count('\n')
        if self.currentLine != erow:
            self.currentCol = 0
        self.lastIdentifier = token in ('def', 'class')
        if token == ':':
            self.parameters = 0 
開發者ID:proxysh,項目名稱:Safejumper-for-Desktop,代碼行數:34,代碼來源:htmlizer.py

示例4: printtoken

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import ENCODING [as 別名]
def printtoken(self, type, token, sCoordinates, eCoordinates, line):
        if hasattr(tokenize, "ENCODING") and type == tokenize.ENCODING:
            self.encoding = token
            return

        if not isinstance(token, bytes):
            token = token.encode(self.encoding)

        (srow, scol) = sCoordinates
        (erow, ecol) = eCoordinates
        if self.currentLine < srow:
            self.writer(b'\n' * (srow-self.currentLine))
            self.currentLine, self.currentCol = srow, 0
        self.writer(b' ' * (scol-self.currentCol))
        if self.lastIdentifier:
            type = "identifier"
            self.parameters = 1
        elif type == tokenize.NAME:
            if keyword.iskeyword(token):
                type = 'keyword'
            else:
                if self.parameters:
                    type = 'parameter'
                else:
                    type = 'variable'
        else:
            type = tokenize.tok_name.get(type).lower()
        self.writer(token, type)
        self.currentCol = ecol
        self.currentLine += token.count(b'\n')
        if self.currentLine != erow:
            self.currentCol = 0
        self.lastIdentifier = token in (b'def', b'class')
        if token == b':':
            self.parameters = 0 
開發者ID:wistbean,項目名稱:learn_python3_spider,代碼行數:37,代碼來源:htmlizer.py

示例5: highlight

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import ENCODING [as 別名]
def highlight(self, source):
        style = self._style
        row, column = 0, 0
        output = ""

        for token in self.tokenize(source):
            type_, string, start, end, line = token

            if type_ == tokenize.NAME:
                if string in self._constants:
                    color = style["constant"]
                elif keyword.iskeyword(string):
                    color = style["keyword"]
                elif string in self._builtins:
                    color = style["builtin"]
                else:
                    color = style["identifier"]
            elif type_ == tokenize.OP:
                if string in self._punctation:
                    color = style["punctuation"]
                else:
                    color = style["operator"]
            elif type_ == tokenize.NUMBER:
                color = style["number"]
            elif type_ == tokenize.STRING:
                color = style["string"]
            elif type_ == tokenize.COMMENT:
                color = style["comment"]
            else:
                color = style["other"]

            start_row, start_column = start
            _, end_column = end

            if start_row != row:
                source = source[:column]
                row, column = start_row, 0

            if type_ != tokenize.ENCODING:
                output += line[column:start_column]
                output += color.format(string)

            column = end_column

        output += source[column:]

        return output 
開發者ID:Delgan,項目名稱:loguru,代碼行數:49,代碼來源:_better_exceptions.py


注:本文中的tokenize.ENCODING屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。