本文整理汇总了Python中pygments.token.Token.Punctuation方法的典型用法代码示例。如果您正苦于以下问题:Python Token.Punctuation方法的具体用法?Python Token.Punctuation怎么用?Python Token.Punctuation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pygments.token.Token
的用法示例。
在下文中一共展示了Token.Punctuation方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_curly_no_escape_and_quotes
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_curly_no_escape_and_quotes(lexer_bash):
fragment = u'echo "${a//["b"]/}"\n'
tokens = [
(Token.Name.Builtin, u'echo'),
(Token.Text, u' '),
(Token.Literal.String.Double, u'"'),
(Token.String.Interpol, u'${'),
(Token.Name.Variable, u'a'),
(Token.Punctuation, u'//['),
(Token.Literal.String.Double, u'"b"'),
(Token.Punctuation, u']/'),
(Token.String.Interpol, u'}'),
(Token.Literal.String.Double, u'"'),
(Token.Text, u'\n'),
]
assert list(lexer_bash.get_tokens(fragment)) == tokens
示例2: test_gcd_expr
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_gcd_expr(lexer):
fragment = u'1^3+(5-5)*gcd(a,b)\n'
tokens = [
(Token.Number.Integer, u'1'),
(Token.Operator, u'^'),
(Token.Literal.Number.Integer, u'3'),
(Token.Operator, u'+'),
(Token.Punctuation, u'('),
(Token.Literal.Number.Integer, u'5'),
(Token.Operator, u'-'),
(Token.Literal.Number.Integer, u'5'),
(Token.Punctuation, u')'),
(Token.Operator, u'*'),
(Token.Name, u'gcd'),
(Token.Punctuation, u'('),
(Token.Name, u'a'),
(Token.Operator, u','),
(Token.Name, u'b'),
(Token.Punctuation, u')'),
(Token.Text, u'\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
示例3: test_walrus_operator
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_walrus_operator(lexer3):
"""
Tests that ':=' is recognized as an Operator
"""
fragment = u'if (a := 2) > 4:'
tokens = [
(Token.Keyword, 'if'),
(Token.Text, ' '),
(Token.Punctuation, '('),
(Token.Name, 'a'),
(Token.Text, ' '),
(Token.Operator, ':='),
(Token.Text, ' '),
(Token.Literal.Number.Integer, '2'),
(Token.Punctuation, ')'),
(Token.Text, ' '),
(Token.Operator, '>'),
(Token.Text, ' '),
(Token.Literal.Number.Integer, '4'),
(Token.Punctuation, ':'),
(Token.Text, '\n'),
]
assert list(lexer3.get_tokens(fragment)) == tokens
示例4: test_escaped_bracestring
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_escaped_bracestring(lexer):
fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
tokens = [
(Token.Name, u'str'),
(Token.Operator, u'.'),
(Token.Name, u'gsub'),
(Token.Punctuation, u'('),
(Token.Literal.String.Regex, u'%r{'),
(Token.Literal.String.Regex, u'\\\\'),
(Token.Literal.String.Regex, u'\\\\'),
(Token.Literal.String.Regex, u'}'),
(Token.Punctuation, u','),
(Token.Text, u' '),
(Token.Literal.String.Double, u'"'),
(Token.Literal.String.Double, u'/'),
(Token.Literal.String.Double, u'"'),
(Token.Punctuation, u')'),
(Token.Text, u'\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例5: test_single_line
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_single_line(lexer):
"""
Test that a single line with strings, a method, and numbers is parsed correctly.
"""
fragment = "set('T',300,'P',101325);\n"
tokens = [
(Token.Name, 'set'),
(Token.Punctuation, '('),
(Token.Literal.String, "'"),
(Token.Literal.String, "T'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '300'),
(Token.Punctuation, ','),
(Token.Literal.String, "'"),
(Token.Literal.String, "P'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '101325'),
(Token.Punctuation, ')'),
(Token.Punctuation, ';'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例6: test_line_continuation
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_line_continuation(lexer):
"""
Test that line continuation by ellipses does not produce generic
output on the second line.
"""
fragment = "set('T',300,...\n'P',101325);\n"
tokens = [
(Token.Name, 'set'),
(Token.Punctuation, '('),
(Token.Literal.String, "'"),
(Token.Literal.String, "T'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '300'),
(Token.Punctuation, ','),
(Token.Keyword, '...'),
(Token.Text, '\n'),
(Token.Literal.String, "'"),
(Token.Literal.String, "P'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '101325'),
(Token.Punctuation, ')'),
(Token.Punctuation, ';'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例7: test_multiple_spaces_variable_assignment
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_multiple_spaces_variable_assignment(lexer):
"""
Test that multiple spaces with an equal sign doesn't get formatted to a string.
"""
fragment = 'x = 100;\n'
tokens = [
(Token.Name, 'x'),
(Token.Text, ' '),
(Token.Text, ' '),
(Token.Punctuation, '='),
(Token.Text, ' '),
(Token.Literal.Number.Integer, '100'),
(Token.Punctuation, ';'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例8: test_operator_multiple_space
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_operator_multiple_space(lexer):
"""
Test that multiple spaces with an operator doesn't get formatted to a string.
"""
fragment = 'x > 100;\n'
tokens = [
(Token.Name, 'x'),
(Token.Text, ' '),
(Token.Text, ' '),
(Token.Operator, '>'),
(Token.Text, ' '),
(Token.Literal.Number.Integer, '100'),
(Token.Punctuation, ';'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例9: test_string_escaping_run
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_string_escaping_run(lexer):
fragment = '<?php $x="{\\""; ?>\n'
tokens = [
(Token.Comment.Preproc, '<?php'),
(Token.Text, ' '),
(Token.Name.Variable, '$x'),
(Token.Operator, '='),
(Token.Literal.String.Double, '"'),
(Token.Literal.String.Double, '{'),
(Token.Literal.String.Escape, '\\"'),
(Token.Literal.String.Double, '"'),
(Token.Punctuation, ';'),
(Token.Text, ' '),
(Token.Comment.Preproc, '?>'),
(Token.Other, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例10: test_peg_modified_strings
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_peg_modified_strings(lexer_peg):
# see for example:
# - http://textx.github.io/Arpeggio/
# - https://nim-lang.org/docs/pegs.html
# - https://github.com/erikrose/parsimonious
fragment = u'~"regex" i"insensitive" "multimod"ilx ("not modified")\n'
tokens = [
# can't handle parsimonious-style regex while ~ is a cut operator
(Token.Operator, u'~'),
(Token.String.Double, u'"regex"'),
(Token.Text, u' '),
(Token.String.Double, u'i"insensitive"'),
(Token.Text, u' '),
(Token.String.Double, u'"multimod"ilx'),
(Token.Text, u' '),
(Token.Punctuation, u'('),
(Token.String.Double, u'"not modified"'),
(Token.Punctuation, u')'),
(Token.Text, u'\n'),
]
assert list(lexer_peg.get_tokens(fragment)) == tokens
示例11: test_keywords_with_dollar
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_keywords_with_dollar(lexer):
fragment = u'DIM x\nx = RIGHT$("abc", 1)\n'
expected = [
(Token.Keyword.Declaration, u'DIM'),
(Token.Text.Whitespace, u' '),
(Token.Name.Variable.Global, u'x'),
(Token.Text, u'\n'),
(Token.Name.Variable.Global, u'x'),
(Token.Text.Whitespace, u' '),
(Token.Operator, u'='),
(Token.Text.Whitespace, u' '),
(Token.Keyword.Reserved, u'RIGHT$'),
(Token.Punctuation, u'('),
(Token.Literal.String.Double, u'"abc"'),
(Token.Punctuation, u','),
(Token.Text.Whitespace, u' '),
(Token.Literal.Number.Integer.Long, u'1'),
(Token.Punctuation, u')'),
(Token.Text, u'\n'),
]
assert list(lexer.get_tokens(fragment)) == expected
示例12: test_literal_number_nested_expression
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_literal_number_nested_expression(lexer):
fragment = u'@(1+(2+3));\n'
expected = [
(Token.Literal, u'@('),
(Token.Literal.Number.Integer, u'1'),
(Token.Operator, u'+'),
(Token.Punctuation, u'('),
(Token.Literal.Number.Integer, u'2'),
(Token.Operator, u'+'),
(Token.Literal.Number.Integer, u'3'),
(Token.Punctuation, u')'),
(Token.Literal, u')'),
(Token.Punctuation, u';'),
(Token.Text, u'\n'),
]
assert list(lexer.get_tokens(fragment)) == expected
示例13: test_function_call
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_function_call(lexer):
fragment = u'selected("Sound", i+(a*b))\n'
tokens = [
(Token.Name.Function, u'selected'),
(Token.Punctuation, u'('),
(Token.Literal.String, u'"'),
(Token.Literal.String, u'Sound'),
(Token.Literal.String, u'"'),
(Token.Punctuation, u','),
(Token.Text, u' '),
(Token.Text, u'i'),
(Token.Operator, u'+'),
(Token.Text, u'('),
(Token.Text, u'a'),
(Token.Operator, u'*'),
(Token.Text, u'b'),
(Token.Text, u')'),
(Token.Punctuation, u')'),
(Token.Text, u'\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例14: test_simple_function
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_simple_function(lexer):
fragment = "func abc(arg):\n\tprint(\"Hello, World!\")"
tokens = [
(Token.Keyword, "func"),
(Token.Text, " "),
(Token.Name, "abc"),
(Token.Punctuation, "("),
(Token.Name, "arg"),
(Token.Punctuation, ")"),
(Token.Punctuation, ":"),
(Token.Text, "\n"),
(Token.Text, "\t"),
(Token.Name.Builtin, "print"),
(Token.Punctuation, "("),
(Token.Literal.String.Double, "\""),
(Token.Literal.String.Double, "Hello, World!"),
(Token.Literal.String.Double, "\""),
(Token.Punctuation, ")"),
(Token.Text, "\n"),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例15: test_signal
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Punctuation [as 别名]
def test_signal(lexer):
fragment = "signal sig (arg1, arg2)"
tokens = [
(Token.Keyword, "signal"),
(Token.Text, " "),
(Token.Name, "sig"),
(Token.Text, " "),
(Token.Punctuation, "("),
(Token.Name, "arg1"),
(Token.Punctuation, ","),
(Token.Text, " "),
(Token.Name, "arg2"),
(Token.Punctuation, ")"),
(Token.Text, "\n"),
]
assert list(lexer.get_tokens(fragment)) == tokens