当前位置: 首页>>代码示例>>Python>>正文


Python token.Keyword方法代码示例

本文整理汇总了Python中pygments.token.Keyword方法的典型用法代码示例。如果您正苦于以下问题:Python token.Keyword方法的具体用法?Python token.Keyword怎么用?Python token.Keyword使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.token的用法示例。


在下文中一共展示了token.Keyword方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _create_toolbar_handler

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def _create_toolbar_handler(self, get_cluster_name, get_namespace, get_user, get_inline_help):
        def get_toolbar_items(_):
            if get_inline_help():
                help_token = Token.Toolbar.On
                help = "ON"
            else:
                help_token = Token.Toolbar.Off
                help = "OFF"

            return [
                (Keyword, ' [F4] Cluster: '),
                (Token.Toolbar, get_cluster_name()),
                (Keyword, ' [F5] Namespace: '),
                (Token.Toolbar, get_namespace()),
                (Keyword, ' User: '),
                (Token.Toolbar, get_user()),
                (Keyword, ' [F9] In-line help: '),
                (help_token, '{0}'.format(help)),
                (Keyword, ' [F10] Exit ')
            ]

        return get_toolbar_items 
开发者ID:cloudnativelabs,项目名称:kube-shell,代码行数:24,代码来源:toolbar.py

示例2: test_can_cope_with_destructuring

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_can_cope_with_destructuring(lexer):
    fragment = u'val (a, b) = '
    tokens = [
        (Keyword, u'val'),
        (Text, u' '),
        (Punctuation, u'('),
        (Name.Property, u'a'),
        (Punctuation, u','),
        (Text, u' '),
        (Name.Property, u'b'),
        (Punctuation, u')'),
        (Text, u' '),
        (Punctuation, u'='),
        (Text, u' '),
        (Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:19,代码来源:test_kotlin.py

示例3: test_enhanced_for

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_enhanced_for(lexer):
    fragment = u'label:\nfor(String var2: var1) {}\n'
    tokens = [
        (Name.Label, u'label:'),
        (Text, u'\n'),
        (Keyword, u'for'),
        (Punctuation, u'('),
        (Name, u'String'),
        (Text, u' '),
        (Name, u'var2'),
        (Punctuation, u':'),
        (Text, u' '),
        (Name, u'var1'),
        (Punctuation, u')'),
        (Text, u' '),
        (Punctuation, u'{'),
        (Punctuation, u'}'),
        (Text, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:22,代码来源:test_java.py

示例4: analyze

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def analyze(self, text):
        if any([token is Error for token, value in self.get_tokens(text)]):
            return 2 * (None, )
        tokens, args, kwargs = self.get_tokens(text), [], {}
        for token, value in tokens:
            if token is Keyword:
                token = token in ['true', 'True']
            elif token is Number:
                token = int(token)
            if token in (Keyword, Number, String):
                args.append(value)
            if token is Name:
                next(tokens)  # pass the Operator '='
                kwargs.update({value: next(tokens)[1]})
        return args, kwargs 
开发者ID:dhondta,项目名称:rpl-attacks,代码行数:17,代码来源:lexer.py

示例5: test_can_cope_with_backtick_names_in_functions

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_can_cope_with_backtick_names_in_functions(lexer):
    fragment = u'fun `wo bble`'
    tokens = [
        (Keyword, u'fun'),
        (Text, u' '),
        (Name.Function, u'`wo bble`'),
        (Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:11,代码来源:test_kotlin.py

示例6: test_can_cope_with_commas_and_dashes_in_backtick_Names

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_can_cope_with_commas_and_dashes_in_backtick_Names(lexer):
    fragment = u'fun `wo,-bble`'
    tokens = [
        (Keyword, u'fun'),
        (Text, u' '),
        (Name.Function, u'`wo,-bble`'),
        (Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:11,代码来源:test_kotlin.py

示例7: test_can_cope_generics_in_destructuring

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_can_cope_generics_in_destructuring(lexer):
    fragment = u'val (a: List<Something>, b: Set<Wobble>) ='
    tokens = [
        (Keyword, u'val'),
        (Text, u' '),
        (Punctuation, u'('),
        (Name.Property, u'a'),
        (Punctuation, u':'),
        (Text, u' '),
        (Name.Property, u'List'),
        (Punctuation, u'<'),
        (Name, u'Something'),
        (Punctuation, u'>'),
        (Punctuation, u','),
        (Text, u' '),
        (Name.Property, u'b'),
        (Punctuation, u':'),
        (Text, u' '),
        (Name.Property, u'Set'),
        (Punctuation, u'<'),
        (Name, u'Wobble'),
        (Punctuation, u'>'),
        (Punctuation, u')'),
        (Text, u' '),
        (Punctuation, u'='),
        (Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:30,代码来源:test_kotlin.py

示例8: test_can_cope_with_generics

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_can_cope_with_generics(lexer):
    fragment = u'inline fun <reified T : ContractState> VaultService.queryBy(): Vault.Page<T> {'
    tokens = [
        (Keyword, u'inline fun'),
        (Text, u' '),
        (Punctuation, u'<'),
        (Keyword, u'reified'),
        (Text, u' '),
        (Name, u'T'),
        (Text, u' '),
        (Punctuation, u':'),
        (Text, u' '),
        (Name, u'ContractState'),
        (Punctuation, u'>'),
        (Text, u' '),
        (Name.Class, u'VaultService'),
        (Punctuation, u'.'),
        (Name.Function, u'queryBy'),
        (Punctuation, u'('),
        (Punctuation, u')'),
        (Punctuation, u':'),
        (Text, u' '),
        (Name, u'Vault'),
        (Punctuation, u'.'),
        (Name, u'Page'),
        (Punctuation, u'<'),
        (Name, u'T'),
        (Punctuation, u'>'),
        (Text, u' '),
        (Punctuation, u'{'),
        (Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:35,代码来源:test_kotlin.py

示例9: test_package_statement

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_package_statement(lexer):
    assert_tokens(lexer, ['package', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
    assert_tokens(lexer, ['package', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace]) 
开发者ID:pygments,项目名称:pygments,代码行数:5,代码来源:test_perllexer.py

示例10: test_use_statement

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_use_statement(lexer):
    assert_tokens(lexer, ['use', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
    assert_tokens(lexer, ['use', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace]) 
开发者ID:pygments,项目名称:pygments,代码行数:5,代码来源:test_perllexer.py

示例11: test_no_statement

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_no_statement(lexer):
    assert_tokens(lexer, ['no', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
    assert_tokens(lexer, ['no', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace]) 
开发者ID:pygments,项目名称:pygments,代码行数:5,代码来源:test_perllexer.py

示例12: test_require_statement

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_require_statement(lexer):
    assert_tokens(lexer, ['require', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
    assert_tokens(lexer, ['require', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
    assert_tokens(lexer, ['require', ' ', '"Foo/Bar.pm"'], [Keyword, Text, String]) 
开发者ID:pygments,项目名称:pygments,代码行数:6,代码来源:test_perllexer.py

示例13: test_simple

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_simple(self):
        self.assertEqual(self.get_tokens('cd api/v1'), [
            (Keyword, 'cd'),
            (String, 'api/v1')
        ]) 
开发者ID:eliangcs,项目名称:http-prompt,代码行数:7,代码来源:test_lexer.py

示例14: test_double_quoted

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_double_quoted(self):
        self.assertEqual(self.get_tokens('cd "api/v 1"'), [
            (Keyword, 'cd'),
            (Text, '"'),
            (String, 'api/v 1'),
            (Text, '"')
        ]) 
开发者ID:eliangcs,项目名称:http-prompt,代码行数:9,代码来源:test_lexer.py

示例15: test_single_quoted

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Keyword [as 别名]
def test_single_quoted(self):
        self.assertEqual(self.get_tokens("cd 'api/v 1'"), [
            (Keyword, 'cd'),
            (Text, "'"),
            (String, 'api/v 1'),
            (Text, "'")
        ]) 
开发者ID:eliangcs,项目名称:http-prompt,代码行数:9,代码来源:test_lexer.py


注:本文中的pygments.token.Keyword方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。