当前位置: 首页>>代码示例>>Python>>正文


Python Token.Keyword方法代码示例

本文整理汇总了Python中pygments.token.Token.Keyword方法的典型用法代码示例。如果您正苦于以下问题:Python Token.Keyword方法的具体用法?Python Token.Keyword怎么用?Python Token.Keyword使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.token.Token的用法示例。


在下文中一共展示了Token.Keyword方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: filter

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def filter(self, lexer, stream):

        prefix = False
        for ttype, value in stream:

            if prefix and ttype is Token.Name.Variable:
                if value in self.prefixes:
                    ttype = Token.Keyword.Type
                else:
                    ttype = Token.Name.Function

            elif ttype is Token.Name.Function and value in self.prefixes:
                prefix = True
                ttype = Token.Keyword.Type

            elif ttype is not Token.Text:
                prefix = False

            yield ttype, value 
开发者ID:wapiflapi,项目名称:gxf,代码行数:21,代码来源:disassembly.py

示例2: color_mapping

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def color_mapping(curr_completion, completion, prompt, command, subcommand,
                  param, text, line, example, toolbar):

    return style_from_dict({
        # Completion colors
        Token.Menu.Completions.Completion.Current: curr_completion,
        Token.Menu.Completions.Completion: completion,
        Token.Menu.Completions.ProgressButton: 'bg:#b78991',
        Token.Menu.Completions.ProgressBar: 'bg:#ffc0cb',

        Token.Az: prompt,
        Token.Prompt.Arg: prompt,

        # Pretty Words
        Token.Keyword: command,
        Token.Keyword.Declaration: subcommand,
        Token.Name.Class: param,
        Token.Text: text,

        Token.Line: line,
        Token.Number: example,
        # toolbar
        Token.Operator: toolbar,
        Token.Toolbar: toolbar
    }) 
开发者ID:Azure,项目名称:azure-cli-shell,代码行数:27,代码来源:color_styles.py

示例3: test_walrus_operator

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def test_walrus_operator(lexer3):
    """
    Tests that ':=' is recognized as an Operator
    """
    fragment = u'if (a := 2) > 4:'
    tokens = [
        (Token.Keyword, 'if'),
        (Token.Text, ' '),
        (Token.Punctuation, '('),
        (Token.Name, 'a'),
        (Token.Text, ' '),
        (Token.Operator, ':='),
        (Token.Text, ' '),
        (Token.Literal.Number.Integer, '2'),
        (Token.Punctuation, ')'),
        (Token.Text, ' '),
        (Token.Operator, '>'),
        (Token.Text, ' '),
        (Token.Literal.Number.Integer, '4'),
        (Token.Punctuation, ':'),
        (Token.Text, '\n'),
    ]
    assert list(lexer3.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:25,代码来源:test_python.py

示例4: test_line_continuation

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def test_line_continuation(lexer):
    """
    Test that line continuation by ellipses does not produce generic
    output on the second line.
    """
    fragment = "set('T',300,...\n'P',101325);\n"
    tokens = [
        (Token.Name, 'set'),
        (Token.Punctuation, '('),
        (Token.Literal.String, "'"),
        (Token.Literal.String, "T'"),
        (Token.Punctuation, ','),
        (Token.Literal.Number.Integer, '300'),
        (Token.Punctuation, ','),
        (Token.Keyword, '...'),
        (Token.Text, '\n'),
        (Token.Literal.String, "'"),
        (Token.Literal.String, "P'"),
        (Token.Punctuation, ','),
        (Token.Literal.Number.Integer, '101325'),
        (Token.Punctuation, ')'),
        (Token.Punctuation, ';'),
        (Token.Text, '\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:27,代码来源:test_matlab.py

示例5: test_comment_after_continuation

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def test_comment_after_continuation(lexer):
    """
    Test that text after the line continuation ellipses is marked as a comment.
    """
    fragment = "set('T',300,... a comment\n'P',101325);\n"
    tokens = [
        (Token.Name, 'set'),
        (Token.Punctuation, '('),
        (Token.Literal.String, "'"),
        (Token.Literal.String, "T'"),
        (Token.Punctuation, ','),
        (Token.Literal.Number.Integer, '300'),
        (Token.Punctuation, ','),
        (Token.Keyword, '...'),
        (Token.Comment, ' a comment'),
        (Token.Text, '\n'),
        (Token.Literal.String, "'"),
        (Token.Literal.String, "P'"),
        (Token.Punctuation, ','),
        (Token.Literal.Number.Integer, '101325'),
        (Token.Punctuation, ')'),
        (Token.Punctuation, ';'),
        (Token.Text, '\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:27,代码来源:test_matlab.py

示例6: test_simple_function

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def test_simple_function(lexer):
    fragment = "func abc(arg):\n\tprint(\"Hello, World!\")"
    tokens = [
        (Token.Keyword, "func"),
        (Token.Text, " "),
        (Token.Name, "abc"),
        (Token.Punctuation, "("),
        (Token.Name, "arg"),
        (Token.Punctuation, ")"),
        (Token.Punctuation, ":"),
        (Token.Text, "\n"),
        (Token.Text, "\t"),
        (Token.Name.Builtin, "print"),
        (Token.Punctuation, "("),
        (Token.Literal.String.Double, "\""),
        (Token.Literal.String.Double, "Hello, World!"),
        (Token.Literal.String.Double, "\""),
        (Token.Punctuation, ")"),
        (Token.Text, "\n"),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:23,代码来源:test_gdscript.py

示例7: test_signal

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def test_signal(lexer):
    fragment = "signal sig (arg1, arg2)"
    tokens = [
        (Token.Keyword, "signal"),
        (Token.Text, " "),
        (Token.Name, "sig"),
        (Token.Text, " "),
        (Token.Punctuation, "("),
        (Token.Name, "arg1"),
        (Token.Punctuation, ","),
        (Token.Text, " "),
        (Token.Name, "arg2"),
        (Token.Punctuation, ")"),
        (Token.Text, "\n"),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:18,代码来源:test_gdscript.py

示例8: test_export_array

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def test_export_array(lexer):
    fragment = "export (Array, AudioStream) var streams"
    tokens = [
        (Token.Keyword, "export"),
        (Token.Text, " "),
        (Token.Punctuation, "("),
        (Token.Name.Builtin.Type, "Array"),
        (Token.Punctuation, ","),
        (Token.Text, " "),
        (Token.Name, "AudioStream"),
        (Token.Punctuation, ")"),
        (Token.Text, " "),
        (Token.Keyword, "var"),
        (Token.Text, " "),
        (Token.Name, "streams"),
        (Token.Text, "\n"),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:20,代码来源:test_gdscript.py

示例9: test_inner_class

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def test_inner_class(lexer):
    fragment = "class InnerClass:\n\tvar a = 5"
    tokens = [
        (Token.Keyword, "class"),
        (Token.Text, " "),
        (Token.Name, "InnerClass"),
        (Token.Punctuation, ":"),
        (Token.Text, "\n"),
        (Token.Text, "\t"),
        (Token.Keyword, "var"),
        (Token.Text, " "),
        (Token.Name, "a"),
        (Token.Text, " "),
        (Token.Operator, "="),
        (Token.Text, " "),
        (Token.Literal.Number.Integer, "5"),
        (Token.Text, "\n"),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:21,代码来源:test_gdscript.py

示例10: get_tokens_unprocessed

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def get_tokens_unprocessed(self, text):
        for index, token, value in super().get_tokens_unprocessed(text):
            if token is Token.Name.Variable and value in self.EXTRA_BUILTINS:
                yield index, Token.Name.Builtin, value
            elif token is Token.Name.Variable and value in self.EXTRA_KEYWORDS:
                yield index, Token.Keyword.Type, value
            elif token is Token.Punctuation:
                for c in value:
                    if c in "+-*/%^&":
                        yield index, Token.Operator, c
                    else:
                        yield index, token, c
                    index += 1
            else:
                yield index, token, value 
开发者ID:wapiflapi,项目名称:gxf,代码行数:17,代码来源:disassembly.py

示例11: _convert_intel

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def _convert_intel(self, tokens):

        relative = False
        ctokens = []

        for t, v in tokens:

            if v == "[":

                ctokens.append((Token.Operator, "*"))
                ctokens.append((Token.Punctuation, "("))

                # should convert types to cast here.
                ctokens.append((Token.Keyword.Type, "void"))
                ctokens.append((Token.Operator, "*"))

                ctokens.append((Token.Operator, "*"))
                ctokens.append((Token.Punctuation, ")"))
                ctokens.append((Token.Punctuation, "("))

            elif v == "]":
                ctokens.append((Token.Punctuation, ")"))

            elif t in (Token.Name.Variable, Token.Name.Builtin):
                if v in ("eip", "rip", "pc"):
                    ctokens.append((Token.Punctuation, "("))
                    ctokens.append((t, "$%s" % v))
                    ctokens.append((Token.Operator, "+"))
                    ctokens.append((Token.Literal.Number, "%d" % self.length))
                    ctokens.append((Token.Punctuation, ")"))
                else:
                    ctokens.append((t, "$%s" % v))

            else:
                ctokens.append((t, v))

        return ctokens, relative 
开发者ID:wapiflapi,项目名称:gxf,代码行数:39,代码来源:disassembly.py

示例12: get_expression

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def get_expression(self):

        types = []
        tokens = []

        start = self.instidx
        while self.tokens[start][0] is not Token.Text:
            start += 1

        for t, v in self.tokens[start:]:
            if tokens and t is Token.Text:
                break
            if t is Token.Text:
                pass
            elif t is Token.Keyword.Type:
                types.append((t, v))
            else:
                tokens.append((t, v))

        if disassemblyflavor.value == "intel":
            ctokens, relative = self._convert_intel(tokens)
        elif disassemblyflavor.value == "att":
            ctokens, relative = self._convert_att(tokens)
        else:
            assert False, "not intel or att."

        if relative:
            if not len(ctokens) == 1 and ctokens[0][0] is Token.Number.Integer:

                ctokens = [(Token.Name.Variable, "$pc"),
                           (Token.Operator, "+")
                           ] + ctokens

        return gxf.Expression(ctokens) 
开发者ID:wapiflapi,项目名称:gxf,代码行数:36,代码来源:disassembly.py

示例13: fmtaddr

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def fmtaddr(self, addr):

        if "x" in self.perms:
            token = Token.Generic.Deleted
        elif "w" in self.perms:
            token = Token.Keyword
        elif "r" in self.perms:
            token = Token.Generic.Inserted
        elif "u" in self.perms:
            token = Token.Comment
        else:
            token = Token.Text

        yield (token, "%#.x" % addr) 
开发者ID:wapiflapi,项目名称:gxf,代码行数:16,代码来源:memory.py

示例14: lexers

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def lexers(self):
        lexerNames = [
            'send_nym',
            'send_get_nym',
            'send_attrib',
            'send_cred_def',
            'send_isr_key',
            'send_node',
            'send_pool_upg',
            'add_genesis',
            'show_file',
            'conn'
            'load_file',
            'show_link',
            'sync_link',
            'ping_target'
            'show_claim',
            'show_claim_req',
            'req_claim',
            'accept_link_invite',
            'set_attr',
            'send_claim'
        ]
        lexers = {n: SimpleLexer(Token.Keyword) for n in lexerNames}
        # Add more lexers to base class lexers
        return {**super().lexers, **lexers} 
开发者ID:sovrin-foundation,项目名称:old-sovrin,代码行数:28,代码来源:cli.py

示例15: test_namespace_1

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Keyword [as 别名]
def test_namespace_1(lexer):
    """
    Namespace `urn:test:std:yang` should not be explicitly highlighted
    """
    fragment = u'namespace urn:test:std:yang;\n'
    tokens = [
        (Token.Keyword, u'namespace'),
        (Token.Text.Whitespace, u' '),
        (Token.Name.Variable, u'urn:test:std:yang'),
        (Token.Punctuation, u';'),
        (Token.Text.Whitespace, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:15,代码来源:test_yang.py


注:本文中的pygments.token.Token.Keyword方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。