當前位置: 首頁>>代碼示例>>Python>>正文


Python pygments.token方法代碼示例

本文整理匯總了Python中pygments.token方法的典型用法代碼示例。如果您正苦於以下問題:Python pygments.token方法的具體用法?Python pygments.token怎麽用?Python pygments.token使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pygments的用法示例。


在下文中一共展示了pygments.token方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_blocks

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import token [as 別名]
def get_blocks(tokens):
    """
    """
    block = []
    level = 0

    from pygments.token import Token

    for token, value in tokens:
        block.append((token,value))

        if value == ")":
            level += 1
        elif value == "(":
            level -= 1

        if level == 0:
            yield block[::-1]
            block = [] 
開發者ID:johncsnyder,項目名稱:SwiftKitten,代碼行數:21,代碼來源:SwiftKitten.py

示例2: __init__

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import token [as 別名]
def __init__(self, app):
        self.app = app
        self.token_map = {
            pygments.token.Comment: "comment",
            pygments.token.Comment.Single: "comment",
            pygments.token.Operator: "keyword",
            pygments.token.Name.Function: "entity.name.function",
            pygments.token.Name.Class: "entity.name.class",
            pygments.token.Name.Tag: "entity.name.tag",
            pygments.token.Name.Attribute: "entity.other.attribute-name",
            pygments.token.Name.Variable: "variable",
            pygments.token.Name.Builtin.Pseudo: "constant.language",
            pygments.token.Literal.String: "string",
            pygments.token.Literal.String.Doc: "string",
            pygments.token.Punctuation: "punctuation",
            pygments.token.Literal.Number: "constant.numeric",
            pygments.token.Name: "entity.name",
            pygments.token.Keyword: "keyword",
            pygments.token.Generic.Deleted: "invalid",
        } 
開發者ID:richrd,項目名稱:suplemon,代碼行數:22,代碼來源:lexer.py

示例3: _serialize_token

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import token [as 別名]
def _serialize_token(self, pair):
        """Get string representation of (token, value) pair.
        """
        from pygments.token import Token
        token, value = pair
        # for literals, autocomplete only depends
        # on type of argument, not the value
        if token in [Token.Literal.Number.Float,
                     Token.Literal.Number.Integer,
                     Token.Literal.String]:
            return str(token)
        else:
            return value 
開發者ID:johncsnyder,項目名稱:SwiftKitten,代碼行數:15,代碼來源:SwiftKitten.py

示例4: get_autocomplete_stub

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import token [as 別名]
def get_autocomplete_stub(lexer, text):
    """
    """
    entity = []

    from pygments.token import Token

    # ignored tokens
    ignored = [Token.Comment, Token.Text, Token.Text.Whitespace, Token.Comment.Single]
    filtered = lambda pair: pair[0] not in ignored  # pair = (token,value)

    tokens = filter(filtered, get_tokens_reversed(lexer, text))
    blocks = get_blocks(tokens)
    block = next(blocks, [])

    if len(block) == 1 and block[0][1] == ".":
        block = next(blocks, [])

        if len(block) > 0 and block[0][1] == "(":
            block_ = next(blocks, [])

            if len(block_) == 1 and block[0][0] is Token.Name:
                return block_ + block

        return block

    return [] 
開發者ID:johncsnyder,項目名稱:SwiftKitten,代碼行數:29,代碼來源:SwiftKitten.py

示例5: get_tokens_unprocessed

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import token [as 別名]
def get_tokens_unprocessed(self, text):
        for index, token, value in super().get_tokens_unprocessed(text):
            if token is Token.Name.Variable and value in self.EXTRA_BUILTINS:
                yield index, Token.Name.Builtin, value
            elif token is Token.Name.Variable and value in self.EXTRA_KEYWORDS:
                yield index, Token.Keyword.Type, value
            elif token is Token.Punctuation:
                for c in value:
                    if c in "+-*/%^&":
                        yield index, Token.Operator, c
                    else:
                        yield index, token, c
                    index += 1
            else:
                yield index, token, value 
開發者ID:wapiflapi,項目名稱:gxf,代碼行數:17,代碼來源:disassembly.py

示例6: filter

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import token [as 別名]
def filter(self, lexer, stream):

        lexed = list(map(list, stream))

        hexdumps = {}
        count = 0
        for i, token in enumerate(lexed):
            if token[0] is Token.Comment.Special:
                token[1] = token[1].strip() + ' '
                count += 1
            elif count > 0:
                hexdumps[i - count] = count
                count = 0

        if hexdumps:

            # This is not a real median, its 90%.
            median = sorted(hexdumps.values())[int(len(hexdumps) * 0.9)]

            for i, count in hexdumps.items():
                token = lexed[i + count - 1]
                value = token[1]

                backoff = 2
                padding = median
                while padding < count:
                    padding += backoff
                    backoff *= 2

                padding = int(padding) - count
                value += padding * 3 * ' '

                token[1] += value

        yield from lexed 
開發者ID:wapiflapi,項目名稱:gxf,代碼行數:37,代碼來源:disassembly.py

示例7: lex

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import token [as 別名]
def lex(self, code, lex):
        """Return tokenified code.

        Return a list of tuples (scope, word) where word is the word to be
        printed and scope the scope name representing the context.

        :param str code: Code to tokenify.
        :param lex: Lexer to use.
        :return:
        """
        if lex is None:
            if not type(code) is str:
                # if not suitable lexer is found, return decoded code
                code = code.decode("utf-8")
            return (("global", code),)

        words = pygments.lex(code, lex)

        scopes = []
        for word in words:
            token = word[0]
            scope = "global"

            if token in self.token_map.keys():
                scope = self.token_map[token]

            scopes.append((scope, word[1]))
        return scopes 
開發者ID:richrd,項目名稱:suplemon,代碼行數:30,代碼來源:lexer.py

示例8: run

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import token [as 別名]
def run(self, edit):
        """
        """
        view = self.view
        sel = view.sel()

        if len(sel) == 0:
            return

        a,b = sel[0]
        query = view.substr(view.word(a)) if a == b else view.substr(sel[0])

        if query == "":
            return

        # run docsetutil command
        docset = SwiftKittenEventListener.get_settings(view, "docset")
        cmd = self.get_docsetutil_cmd(view, docset, query)
        results = check_output(cmd, stderr=STDOUT)
        results = str(results, 'utf-8')

        if len(results) == 0:
            print("No documentation found.")
            return

        lines = results.splitlines()

        # split each line into two paths
        pairs = map(lambda line: line.strip().split("   "), lines)

        get_lang = lambda a: a.split('/')[0]
        get_path = lambda a,b: os.path.join(os.path.dirname(a),
            os.path.basename(b))

        #
        docs = {get_lang(a) : get_path(a,b) for a,b in pairs}

        # prefer Swift, Objective-C, C
        lang = sorted(docs.keys())[-1]

        # construct path to documentation token
        path = os.path.join(self.get_tokens_path(docset), docs[lang] + ".xml")

        # read documentation file
        with open(path, "rb") as f:
            xml = f.read()

        # convert xml to html
        html = str(self.convert_docs_to_html(xml), "utf-8")

        #
        # TO DO:
        # add on_navigate handler
        #

        # display documentation
        view.show_popup(html, max_width=400, max_height=600) 
開發者ID:johncsnyder,項目名稱:SwiftKitten,代碼行數:59,代碼來源:SwiftKitten.py


注:本文中的pygments.token方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。