本文整理汇总了Python中pygments.token.Token.Name方法的典型用法代码示例。如果您正苦于以下问题:Python Token.Name方法的具体用法?Python Token.Name怎么用?Python Token.Name使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pygments.token.Token
的用法示例。
在下文中一共展示了Token.Name方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_gcd_expr
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_gcd_expr(lexer):
fragment = u'1^3+(5-5)*gcd(a,b)\n'
tokens = [
(Token.Number.Integer, u'1'),
(Token.Operator, u'^'),
(Token.Literal.Number.Integer, u'3'),
(Token.Operator, u'+'),
(Token.Punctuation, u'('),
(Token.Literal.Number.Integer, u'5'),
(Token.Operator, u'-'),
(Token.Literal.Number.Integer, u'5'),
(Token.Punctuation, u')'),
(Token.Operator, u'*'),
(Token.Name, u'gcd'),
(Token.Punctuation, u'('),
(Token.Name, u'a'),
(Token.Operator, u','),
(Token.Name, u'b'),
(Token.Punctuation, u')'),
(Token.Text, u'\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
示例2: test_walrus_operator
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_walrus_operator(lexer3):
"""
Tests that ':=' is recognized as an Operator
"""
fragment = u'if (a := 2) > 4:'
tokens = [
(Token.Keyword, 'if'),
(Token.Text, ' '),
(Token.Punctuation, '('),
(Token.Name, 'a'),
(Token.Text, ' '),
(Token.Operator, ':='),
(Token.Text, ' '),
(Token.Literal.Number.Integer, '2'),
(Token.Punctuation, ')'),
(Token.Text, ' '),
(Token.Operator, '>'),
(Token.Text, ' '),
(Token.Literal.Number.Integer, '4'),
(Token.Punctuation, ':'),
(Token.Text, '\n'),
]
assert list(lexer3.get_tokens(fragment)) == tokens
示例3: test_escaped_bracestring
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_escaped_bracestring(lexer):
fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
tokens = [
(Token.Name, u'str'),
(Token.Operator, u'.'),
(Token.Name, u'gsub'),
(Token.Punctuation, u'('),
(Token.Literal.String.Regex, u'%r{'),
(Token.Literal.String.Regex, u'\\\\'),
(Token.Literal.String.Regex, u'\\\\'),
(Token.Literal.String.Regex, u'}'),
(Token.Punctuation, u','),
(Token.Text, u' '),
(Token.Literal.String.Double, u'"'),
(Token.Literal.String.Double, u'/'),
(Token.Literal.String.Double, u'"'),
(Token.Punctuation, u')'),
(Token.Text, u'\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例4: test_single_line
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_single_line(lexer):
"""
Test that a single line with strings, a method, and numbers is parsed correctly.
"""
fragment = "set('T',300,'P',101325);\n"
tokens = [
(Token.Name, 'set'),
(Token.Punctuation, '('),
(Token.Literal.String, "'"),
(Token.Literal.String, "T'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '300'),
(Token.Punctuation, ','),
(Token.Literal.String, "'"),
(Token.Literal.String, "P'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '101325'),
(Token.Punctuation, ')'),
(Token.Punctuation, ';'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例5: test_line_continuation
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_line_continuation(lexer):
"""
Test that line continuation by ellipses does not produce generic
output on the second line.
"""
fragment = "set('T',300,...\n'P',101325);\n"
tokens = [
(Token.Name, 'set'),
(Token.Punctuation, '('),
(Token.Literal.String, "'"),
(Token.Literal.String, "T'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '300'),
(Token.Punctuation, ','),
(Token.Keyword, '...'),
(Token.Text, '\n'),
(Token.Literal.String, "'"),
(Token.Literal.String, "P'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '101325'),
(Token.Punctuation, ')'),
(Token.Punctuation, ';'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例6: test_comment_after_continuation
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_comment_after_continuation(lexer):
"""
Test that text after the line continuation ellipses is marked as a comment.
"""
fragment = "set('T',300,... a comment\n'P',101325);\n"
tokens = [
(Token.Name, 'set'),
(Token.Punctuation, '('),
(Token.Literal.String, "'"),
(Token.Literal.String, "T'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '300'),
(Token.Punctuation, ','),
(Token.Keyword, '...'),
(Token.Comment, ' a comment'),
(Token.Text, '\n'),
(Token.Literal.String, "'"),
(Token.Literal.String, "P'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '101325'),
(Token.Punctuation, ')'),
(Token.Punctuation, ';'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例7: test_multiple_spaces_variable_assignment
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_multiple_spaces_variable_assignment(lexer):
"""
Test that multiple spaces with an equal sign doesn't get formatted to a string.
"""
fragment = 'x = 100;\n'
tokens = [
(Token.Name, 'x'),
(Token.Text, ' '),
(Token.Text, ' '),
(Token.Punctuation, '='),
(Token.Text, ' '),
(Token.Literal.Number.Integer, '100'),
(Token.Punctuation, ';'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例8: test_operator_multiple_space
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_operator_multiple_space(lexer):
"""
Test that multiple spaces with an operator doesn't get formatted to a string.
"""
fragment = 'x > 100;\n'
tokens = [
(Token.Name, 'x'),
(Token.Text, ' '),
(Token.Text, ' '),
(Token.Operator, '>'),
(Token.Text, ' '),
(Token.Literal.Number.Integer, '100'),
(Token.Punctuation, ';'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例9: test_simple_function
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_simple_function(lexer):
fragment = "func abc(arg):\n\tprint(\"Hello, World!\")"
tokens = [
(Token.Keyword, "func"),
(Token.Text, " "),
(Token.Name, "abc"),
(Token.Punctuation, "("),
(Token.Name, "arg"),
(Token.Punctuation, ")"),
(Token.Punctuation, ":"),
(Token.Text, "\n"),
(Token.Text, "\t"),
(Token.Name.Builtin, "print"),
(Token.Punctuation, "("),
(Token.Literal.String.Double, "\""),
(Token.Literal.String.Double, "Hello, World!"),
(Token.Literal.String.Double, "\""),
(Token.Punctuation, ")"),
(Token.Text, "\n"),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例10: test_export_array
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_export_array(lexer):
fragment = "export (Array, AudioStream) var streams"
tokens = [
(Token.Keyword, "export"),
(Token.Text, " "),
(Token.Punctuation, "("),
(Token.Name.Builtin.Type, "Array"),
(Token.Punctuation, ","),
(Token.Text, " "),
(Token.Name, "AudioStream"),
(Token.Punctuation, ")"),
(Token.Text, " "),
(Token.Keyword, "var"),
(Token.Text, " "),
(Token.Name, "streams"),
(Token.Text, "\n"),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例11: test_inner_class
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_inner_class(lexer):
fragment = "class InnerClass:\n\tvar a = 5"
tokens = [
(Token.Keyword, "class"),
(Token.Text, " "),
(Token.Name, "InnerClass"),
(Token.Punctuation, ":"),
(Token.Text, "\n"),
(Token.Text, "\t"),
(Token.Keyword, "var"),
(Token.Text, " "),
(Token.Name, "a"),
(Token.Text, " "),
(Token.Operator, "="),
(Token.Text, " "),
(Token.Literal.Number.Integer, "5"),
(Token.Text, "\n"),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例12: get_autocomplete_stub
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def get_autocomplete_stub(lexer, text):
"""
"""
entity = []
from pygments.token import Token
# ignored tokens
ignored = [Token.Comment, Token.Text, Token.Text.Whitespace, Token.Comment.Single]
filtered = lambda pair: pair[0] not in ignored # pair = (token,value)
tokens = filter(filtered, get_tokens_reversed(lexer, text))
blocks = get_blocks(tokens)
block = next(blocks, [])
if len(block) == 1 and block[0][1] == ".":
block = next(blocks, [])
if len(block) > 0 and block[0][1] == "(":
block_ = next(blocks, [])
if len(block_) == 1 and block[0][0] is Token.Name:
return block_ + block
return block
return []
示例13: parse_func_from_str
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def parse_func_from_str(text):
tks = lex(text,lan='C',process=True)
tks = strip_tokens(tks)
i = 0
while i < len(tks) - 1:
if tks[i][0] in Token.Name and tks[i+1][1] == '(':
name = tks[i][1]
e_i = find_close(tks,i+1)
if e_i is None:
i += 1
continue
args = _parse_func_args(tks[i+2:e_i])
return (name,args)
i += 1
return None
示例14: _l_cls_call
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def _l_cls_call(line,tks):
tks = strip_tokens(tks)
if not tks or len(tks) < 3:
return False
for i in range(len(tks)):
if tks[i][1] == '(':
if i >= 1 and tks[i-1][0] in Token.Name:
return True
return False
示例15: test_cls_builtin
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Name [as 别名]
def test_cls_builtin(lexer2):
"""
Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo
"""
fragment = 'class TestClass():\n @classmethod\n def hello(cls):\n pass\n'
tokens = [
(Token.Keyword, 'class'),
(Token.Text, ' '),
(Token.Name.Class, 'TestClass'),
(Token.Punctuation, '('),
(Token.Punctuation, ')'),
(Token.Punctuation, ':'),
(Token.Text, '\n'),
(Token.Text, ' '),
(Token.Name.Decorator, '@classmethod'),
(Token.Text, '\n'),
(Token.Text, ' '),
(Token.Keyword, 'def'),
(Token.Text, ' '),
(Token.Name.Function, 'hello'),
(Token.Punctuation, '('),
(Token.Name.Builtin.Pseudo, 'cls'),
(Token.Punctuation, ')'),
(Token.Punctuation, ':'),
(Token.Text, '\n'),
(Token.Text, ' '),
(Token.Keyword, 'pass'),
(Token.Text, '\n'),
]
assert list(lexer2.get_tokens(fragment)) == tokens