当前位置: 首页>>代码示例>>Python>>正文


Python pygments.token方法代码示例

本文整理汇总了Python中pygments.token方法的典型用法代码示例。如果您正苦于以下问题:Python pygments.token方法的具体用法?Python pygments.token怎么用?Python pygments.token使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments的用法示例。


在下文中一共展示了pygments.token方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_blocks

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import token [as 别名]
def get_blocks(tokens):
    """
    """
    block = []
    level = 0

    from pygments.token import Token

    for token, value in tokens:
        block.append((token,value))

        if value == ")":
            level += 1
        elif value == "(":
            level -= 1

        if level == 0:
            yield block[::-1]
            block = [] 
开发者ID:johncsnyder,项目名称:SwiftKitten,代码行数:21,代码来源:SwiftKitten.py

示例2: __init__

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import token [as 别名]
def __init__(self, app):
        self.app = app
        self.token_map = {
            pygments.token.Comment: "comment",
            pygments.token.Comment.Single: "comment",
            pygments.token.Operator: "keyword",
            pygments.token.Name.Function: "entity.name.function",
            pygments.token.Name.Class: "entity.name.class",
            pygments.token.Name.Tag: "entity.name.tag",
            pygments.token.Name.Attribute: "entity.other.attribute-name",
            pygments.token.Name.Variable: "variable",
            pygments.token.Name.Builtin.Pseudo: "constant.language",
            pygments.token.Literal.String: "string",
            pygments.token.Literal.String.Doc: "string",
            pygments.token.Punctuation: "punctuation",
            pygments.token.Literal.Number: "constant.numeric",
            pygments.token.Name: "entity.name",
            pygments.token.Keyword: "keyword",
            pygments.token.Generic.Deleted: "invalid",
        } 
开发者ID:richrd,项目名称:suplemon,代码行数:22,代码来源:lexer.py

示例3: _serialize_token

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import token [as 别名]
def _serialize_token(self, pair):
        """Get string representation of (token, value) pair.
        """
        from pygments.token import Token
        token, value = pair
        # for literals, autocomplete only depends
        # on type of argument, not the value
        if token in [Token.Literal.Number.Float,
                     Token.Literal.Number.Integer,
                     Token.Literal.String]:
            return str(token)
        else:
            return value 
开发者ID:johncsnyder,项目名称:SwiftKitten,代码行数:15,代码来源:SwiftKitten.py

示例4: get_autocomplete_stub

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import token [as 别名]
def get_autocomplete_stub(lexer, text):
    """
    """
    entity = []

    from pygments.token import Token

    # ignored tokens
    ignored = [Token.Comment, Token.Text, Token.Text.Whitespace, Token.Comment.Single]
    filtered = lambda pair: pair[0] not in ignored  # pair = (token,value)

    tokens = filter(filtered, get_tokens_reversed(lexer, text))
    blocks = get_blocks(tokens)
    block = next(blocks, [])

    if len(block) == 1 and block[0][1] == ".":
        block = next(blocks, [])

        if len(block) > 0 and block[0][1] == "(":
            block_ = next(blocks, [])

            if len(block_) == 1 and block[0][0] is Token.Name:
                return block_ + block

        return block

    return [] 
开发者ID:johncsnyder,项目名称:SwiftKitten,代码行数:29,代码来源:SwiftKitten.py

示例5: get_tokens_unprocessed

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import token [as 别名]
def get_tokens_unprocessed(self, text):
        for index, token, value in super().get_tokens_unprocessed(text):
            if token is Token.Name.Variable and value in self.EXTRA_BUILTINS:
                yield index, Token.Name.Builtin, value
            elif token is Token.Name.Variable and value in self.EXTRA_KEYWORDS:
                yield index, Token.Keyword.Type, value
            elif token is Token.Punctuation:
                for c in value:
                    if c in "+-*/%^&":
                        yield index, Token.Operator, c
                    else:
                        yield index, token, c
                    index += 1
            else:
                yield index, token, value 
开发者ID:wapiflapi,项目名称:gxf,代码行数:17,代码来源:disassembly.py

示例6: filter

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import token [as 别名]
def filter(self, lexer, stream):

        lexed = list(map(list, stream))

        hexdumps = {}
        count = 0
        for i, token in enumerate(lexed):
            if token[0] is Token.Comment.Special:
                token[1] = token[1].strip() + ' '
                count += 1
            elif count > 0:
                hexdumps[i - count] = count
                count = 0

        if hexdumps:

            # This is not a real median, its 90%.
            median = sorted(hexdumps.values())[int(len(hexdumps) * 0.9)]

            for i, count in hexdumps.items():
                token = lexed[i + count - 1]
                value = token[1]

                backoff = 2
                padding = median
                while padding < count:
                    padding += backoff
                    backoff *= 2

                padding = int(padding) - count
                value += padding * 3 * ' '

                token[1] += value

        yield from lexed 
开发者ID:wapiflapi,项目名称:gxf,代码行数:37,代码来源:disassembly.py

示例7: lex

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import token [as 别名]
def lex(self, code, lex):
        """Return tokenified code.

        Return a list of tuples (scope, word) where word is the word to be
        printed and scope the scope name representing the context.

        :param str code: Code to tokenify.
        :param lex: Lexer to use.
        :return:
        """
        if lex is None:
            if not type(code) is str:
                # if not suitable lexer is found, return decoded code
                code = code.decode("utf-8")
            return (("global", code),)

        words = pygments.lex(code, lex)

        scopes = []
        for word in words:
            token = word[0]
            scope = "global"

            if token in self.token_map.keys():
                scope = self.token_map[token]

            scopes.append((scope, word[1]))
        return scopes 
开发者ID:richrd,项目名称:suplemon,代码行数:30,代码来源:lexer.py

示例8: run

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import token [as 别名]
def run(self, edit):
        """
        """
        view = self.view
        sel = view.sel()

        if len(sel) == 0:
            return

        a,b = sel[0]
        query = view.substr(view.word(a)) if a == b else view.substr(sel[0])

        if query == "":
            return

        # run docsetutil command
        docset = SwiftKittenEventListener.get_settings(view, "docset")
        cmd = self.get_docsetutil_cmd(view, docset, query)
        results = check_output(cmd, stderr=STDOUT)
        results = str(results, 'utf-8')

        if len(results) == 0:
            print("No documentation found.")
            return

        lines = results.splitlines()

        # split each line into two paths
        pairs = map(lambda line: line.strip().split("   "), lines)

        get_lang = lambda a: a.split('/')[0]
        get_path = lambda a,b: os.path.join(os.path.dirname(a),
            os.path.basename(b))

        #
        docs = {get_lang(a) : get_path(a,b) for a,b in pairs}

        # prefer Swift, Objective-C, C
        lang = sorted(docs.keys())[-1]

        # construct path to documentation token
        path = os.path.join(self.get_tokens_path(docset), docs[lang] + ".xml")

        # read documentation file
        with open(path, "rb") as f:
            xml = f.read()

        # convert xml to html
        html = str(self.convert_docs_to_html(xml), "utf-8")

        #
        # TO DO:
        # add on_navigate handler
        #

        # display documentation
        view.show_popup(html, max_width=400, max_height=600) 
开发者ID:johncsnyder,项目名称:SwiftKitten,代码行数:59,代码来源:SwiftKitten.py


注:本文中的pygments.token方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。