当前位置: 首页>>代码示例>>Python>>正文


Python Token.Name方法代码示例

本文整理汇总了Python中pygments.token.Token.Name方法的典型用法代码示例。如果您正苦于以下问题:Python Token.Name方法的具体用法?Python Token.Name怎么用?Python Token.Name使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.token.Token的用法示例。


在下文中一共展示了Token.Name方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_gcd_expr

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_gcd_expr(lexer):
    fragment = u'1^3+(5-5)*gcd(a,b)\n'
    tokens = [
        (Token.Number.Integer, u'1'),
        (Token.Operator, u'^'),
        (Token.Literal.Number.Integer, u'3'),
        (Token.Operator, u'+'),
        (Token.Punctuation, u'('),
        (Token.Literal.Number.Integer, u'5'),
        (Token.Operator, u'-'),
        (Token.Literal.Number.Integer, u'5'),
        (Token.Punctuation, u')'),
        (Token.Operator, u'*'),
        (Token.Name, u'gcd'),
        (Token.Punctuation, u'('),
        (Token.Name, u'a'),
        (Token.Operator, u','),
        (Token.Name, u'b'),
        (Token.Punctuation, u')'),
        (Token.Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:24,代码来源:test_ezhil.py

示例2: test_walrus_operator

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_walrus_operator(lexer3):
    """
    Tests that ':=' is recognized as an Operator
    """
    fragment = u'if (a := 2) > 4:'
    tokens = [
        (Token.Keyword, 'if'),
        (Token.Text, ' '),
        (Token.Punctuation, '('),
        (Token.Name, 'a'),
        (Token.Text, ' '),
        (Token.Operator, ':='),
        (Token.Text, ' '),
        (Token.Literal.Number.Integer, '2'),
        (Token.Punctuation, ')'),
        (Token.Text, ' '),
        (Token.Operator, '>'),
        (Token.Text, ' '),
        (Token.Literal.Number.Integer, '4'),
        (Token.Punctuation, ':'),
        (Token.Text, '\n'),
    ]
    assert list(lexer3.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:25,代码来源:test_python.py

示例3: test_escaped_bracestring

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_escaped_bracestring(lexer):
    fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
    tokens = [
        (Token.Name, u'str'),
        (Token.Operator, u'.'),
        (Token.Name, u'gsub'),
        (Token.Punctuation, u'('),
        (Token.Literal.String.Regex, u'%r{'),
        (Token.Literal.String.Regex, u'\\\\'),
        (Token.Literal.String.Regex, u'\\\\'),
        (Token.Literal.String.Regex, u'}'),
        (Token.Punctuation, u','),
        (Token.Text, u' '),
        (Token.Literal.String.Double, u'"'),
        (Token.Literal.String.Double, u'/'),
        (Token.Literal.String.Double, u'"'),
        (Token.Punctuation, u')'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:22,代码来源:test_ruby.py

示例4: test_single_line

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_single_line(lexer):
    """
    Test that a single line with strings, a method, and numbers is parsed correctly.
    """
    fragment = "set('T',300,'P',101325);\n"
    tokens = [
        (Token.Name, 'set'),
        (Token.Punctuation, '('),
        (Token.Literal.String, "'"),
        (Token.Literal.String, "T'"),
        (Token.Punctuation, ','),
        (Token.Literal.Number.Integer, '300'),
        (Token.Punctuation, ','),
        (Token.Literal.String, "'"),
        (Token.Literal.String, "P'"),
        (Token.Punctuation, ','),
        (Token.Literal.Number.Integer, '101325'),
        (Token.Punctuation, ')'),
        (Token.Punctuation, ';'),
        (Token.Text, '\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:24,代码来源:test_matlab.py

示例5: test_line_continuation

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_line_continuation(lexer):
    """
    Test that line continuation by ellipses does not produce generic
    output on the second line.
    """
    fragment = "set('T',300,...\n'P',101325);\n"
    tokens = [
        (Token.Name, 'set'),
        (Token.Punctuation, '('),
        (Token.Literal.String, "'"),
        (Token.Literal.String, "T'"),
        (Token.Punctuation, ','),
        (Token.Literal.Number.Integer, '300'),
        (Token.Punctuation, ','),
        (Token.Keyword, '...'),
        (Token.Text, '\n'),
        (Token.Literal.String, "'"),
        (Token.Literal.String, "P'"),
        (Token.Punctuation, ','),
        (Token.Literal.Number.Integer, '101325'),
        (Token.Punctuation, ')'),
        (Token.Punctuation, ';'),
        (Token.Text, '\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:27,代码来源:test_matlab.py

示例6: test_comment_after_continuation

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_comment_after_continuation(lexer):
    """
    Test that text after the line continuation ellipses is marked as a comment.
    """
    fragment = "set('T',300,... a comment\n'P',101325);\n"
    tokens = [
        (Token.Name, 'set'),
        (Token.Punctuation, '('),
        (Token.Literal.String, "'"),
        (Token.Literal.String, "T'"),
        (Token.Punctuation, ','),
        (Token.Literal.Number.Integer, '300'),
        (Token.Punctuation, ','),
        (Token.Keyword, '...'),
        (Token.Comment, ' a comment'),
        (Token.Text, '\n'),
        (Token.Literal.String, "'"),
        (Token.Literal.String, "P'"),
        (Token.Punctuation, ','),
        (Token.Literal.Number.Integer, '101325'),
        (Token.Punctuation, ')'),
        (Token.Punctuation, ';'),
        (Token.Text, '\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:27,代码来源:test_matlab.py

示例7: test_multiple_spaces_variable_assignment

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_multiple_spaces_variable_assignment(lexer):
    """
    Test that multiple spaces with an equal sign doesn't get formatted to a string.
    """
    fragment = 'x  = 100;\n'
    tokens = [
        (Token.Name, 'x'),
        (Token.Text, ' '),
        (Token.Text, ' '),
        (Token.Punctuation, '='),
        (Token.Text, ' '),
        (Token.Literal.Number.Integer, '100'),
        (Token.Punctuation, ';'),
        (Token.Text, '\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:18,代码来源:test_matlab.py

示例8: test_operator_multiple_space

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_operator_multiple_space(lexer):
    """
    Test that multiple spaces with an operator doesn't get formatted to a string.
    """
    fragment = 'x  > 100;\n'
    tokens = [
        (Token.Name, 'x'),
        (Token.Text, ' '),
        (Token.Text, ' '),
        (Token.Operator, '>'),
        (Token.Text, ' '),
        (Token.Literal.Number.Integer, '100'),
        (Token.Punctuation, ';'),
        (Token.Text, '\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:18,代码来源:test_matlab.py

示例9: test_simple_function

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_simple_function(lexer):
    fragment = "func abc(arg):\n\tprint(\"Hello, World!\")"
    tokens = [
        (Token.Keyword, "func"),
        (Token.Text, " "),
        (Token.Name, "abc"),
        (Token.Punctuation, "("),
        (Token.Name, "arg"),
        (Token.Punctuation, ")"),
        (Token.Punctuation, ":"),
        (Token.Text, "\n"),
        (Token.Text, "\t"),
        (Token.Name.Builtin, "print"),
        (Token.Punctuation, "("),
        (Token.Literal.String.Double, "\""),
        (Token.Literal.String.Double, "Hello, World!"),
        (Token.Literal.String.Double, "\""),
        (Token.Punctuation, ")"),
        (Token.Text, "\n"),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:23,代码来源:test_gdscript.py

示例10: test_export_array

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_export_array(lexer):
    fragment = "export (Array, AudioStream) var streams"
    tokens = [
        (Token.Keyword, "export"),
        (Token.Text, " "),
        (Token.Punctuation, "("),
        (Token.Name.Builtin.Type, "Array"),
        (Token.Punctuation, ","),
        (Token.Text, " "),
        (Token.Name, "AudioStream"),
        (Token.Punctuation, ")"),
        (Token.Text, " "),
        (Token.Keyword, "var"),
        (Token.Text, " "),
        (Token.Name, "streams"),
        (Token.Text, "\n"),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:20,代码来源:test_gdscript.py

示例11: test_inner_class

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_inner_class(lexer):
    fragment = "class InnerClass:\n\tvar a = 5"
    tokens = [
        (Token.Keyword, "class"),
        (Token.Text, " "),
        (Token.Name, "InnerClass"),
        (Token.Punctuation, ":"),
        (Token.Text, "\n"),
        (Token.Text, "\t"),
        (Token.Keyword, "var"),
        (Token.Text, " "),
        (Token.Name, "a"),
        (Token.Text, " "),
        (Token.Operator, "="),
        (Token.Text, " "),
        (Token.Literal.Number.Integer, "5"),
        (Token.Text, "\n"),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:21,代码来源:test_gdscript.py

示例12: get_autocomplete_stub

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def get_autocomplete_stub(lexer, text):
    """
    """
    entity = []

    from pygments.token import Token

    # ignored tokens
    ignored = [Token.Comment, Token.Text, Token.Text.Whitespace, Token.Comment.Single]
    filtered = lambda pair: pair[0] not in ignored  # pair = (token,value)

    tokens = filter(filtered, get_tokens_reversed(lexer, text))
    blocks = get_blocks(tokens)
    block = next(blocks, [])

    if len(block) == 1 and block[0][1] == ".":
        block = next(blocks, [])

        if len(block) > 0 and block[0][1] == "(":
            block_ = next(blocks, [])

            if len(block_) == 1 and block[0][0] is Token.Name:
                return block_ + block

        return block

    return [] 
开发者ID:johncsnyder,项目名称:SwiftKitten,代码行数:29,代码来源:SwiftKitten.py

示例13: parse_func_from_str

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def parse_func_from_str(text):
    tks = lex(text,lan='C',process=True)
    tks = strip_tokens(tks)
    i = 0
    while i < len(tks) - 1:
        if tks[i][0] in Token.Name and tks[i+1][1] == '(':
            name = tks[i][1]
            e_i = find_close(tks,i+1)
            if e_i is None:
                i += 1
                continue
            args = _parse_func_args(tks[i+2:e_i])
            return (name,args)
        i += 1
    return None 
开发者ID:fiberx,项目名称:fiber,代码行数:17,代码来源:src_parser.py

示例14: _l_cls_call

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def _l_cls_call(line,tks):
    tks = strip_tokens(tks)
    if not tks or len(tks) < 3:
        return False
    for i in range(len(tks)):
        if tks[i][1] == '(':
            if i >= 1 and tks[i-1][0] in Token.Name:
                return True
    return False 
开发者ID:fiberx,项目名称:fiber,代码行数:11,代码来源:src_parser.py

示例15: test_cls_builtin

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_cls_builtin(lexer2):
    """
    Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo
    """
    fragment = 'class TestClass():\n    @classmethod\n    def hello(cls):\n        pass\n'
    tokens = [
        (Token.Keyword, 'class'),
        (Token.Text, ' '),
        (Token.Name.Class, 'TestClass'),
        (Token.Punctuation, '('),
        (Token.Punctuation, ')'),
        (Token.Punctuation, ':'),
        (Token.Text, '\n'),
        (Token.Text, '    '),
        (Token.Name.Decorator, '@classmethod'),
        (Token.Text, '\n'),
        (Token.Text, '    '),
        (Token.Keyword, 'def'),
        (Token.Text, ' '),
        (Token.Name.Function, 'hello'),
        (Token.Punctuation, '('),
        (Token.Name.Builtin.Pseudo, 'cls'),
        (Token.Punctuation, ')'),
        (Token.Punctuation, ':'),
        (Token.Text, '\n'),
        (Token.Text, '        '),
        (Token.Keyword, 'pass'),
        (Token.Text, '\n'),
    ]
    assert list(lexer2.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:32,代码来源:test_python.py


注:本文中的pygments.token.Token.Name方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。