當前位置: 首頁>>代碼示例>>Python>>正文


Python token.Punctuation方法代碼示例

本文整理匯總了Python中pygments.token.Punctuation方法的典型用法代碼示例。如果您正苦於以下問題:Python token.Punctuation方法的具體用法?Python token.Punctuation怎麽用?Python token.Punctuation使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pygments.token的用法示例。


在下文中一共展示了token.Punctuation方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_can_cope_with_destructuring

# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import Punctuation [as 別名]
def test_can_cope_with_destructuring(lexer):
    fragment = u'val (a, b) = '
    tokens = [
        (Keyword, u'val'),
        (Text, u' '),
        (Punctuation, u'('),
        (Name.Property, u'a'),
        (Punctuation, u','),
        (Text, u' '),
        (Name.Property, u'b'),
        (Punctuation, u')'),
        (Text, u' '),
        (Punctuation, u'='),
        (Text, u' '),
        (Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
開發者ID:pygments,項目名稱:pygments,代碼行數:19,代碼來源:test_kotlin.py

示例2: test_can_cope_generics_in_destructuring

# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import Punctuation [as 別名]
def test_can_cope_generics_in_destructuring(lexer):
    fragment = u'val (a: List<Something>, b: Set<Wobble>) ='
    tokens = [
        (Keyword, u'val'),
        (Text, u' '),
        (Punctuation, u'('),
        (Name.Property, u'a'),
        (Punctuation, u':'),
        (Text, u' '),
        (Name.Property, u'List'),
        (Punctuation, u'<'),
        (Name, u'Something'),
        (Punctuation, u'>'),
        (Punctuation, u','),
        (Text, u' '),
        (Name.Property, u'b'),
        (Punctuation, u':'),
        (Text, u' '),
        (Name.Property, u'Set'),
        (Punctuation, u'<'),
        (Name, u'Wobble'),
        (Punctuation, u'>'),
        (Punctuation, u')'),
        (Text, u' '),
        (Punctuation, u'='),
        (Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
開發者ID:pygments,項目名稱:pygments,代碼行數:30,代碼來源:test_kotlin.py

示例3: test_can_cope_with_generics

# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import Punctuation [as 別名]
def test_can_cope_with_generics(lexer):
    fragment = u'inline fun <reified T : ContractState> VaultService.queryBy(): Vault.Page<T> {'
    tokens = [
        (Keyword, u'inline fun'),
        (Text, u' '),
        (Punctuation, u'<'),
        (Keyword, u'reified'),
        (Text, u' '),
        (Name, u'T'),
        (Text, u' '),
        (Punctuation, u':'),
        (Text, u' '),
        (Name, u'ContractState'),
        (Punctuation, u'>'),
        (Text, u' '),
        (Name.Class, u'VaultService'),
        (Punctuation, u'.'),
        (Name.Function, u'queryBy'),
        (Punctuation, u'('),
        (Punctuation, u')'),
        (Punctuation, u':'),
        (Text, u' '),
        (Name, u'Vault'),
        (Punctuation, u'.'),
        (Name, u'Page'),
        (Punctuation, u'<'),
        (Name, u'T'),
        (Punctuation, u'>'),
        (Text, u' '),
        (Punctuation, u'{'),
        (Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
開發者ID:pygments,項目名稱:pygments,代碼行數:35,代碼來源:test_kotlin.py

示例4: test_can_reject_almost_float

# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import Punctuation [as 別名]
def test_can_reject_almost_float(lexer):
    _assert_tokens_match(lexer, '.e1', ((Punctuation, '.'), (Name, 'e1'))) 
開發者ID:pygments,項目名稱:pygments,代碼行數:4,代碼來源:test_sql.py

示例5: test_can_reject_almost_float

# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import Punctuation [as 別名]
def test_can_reject_almost_float(lexer):
    assert_tokens_match(lexer, '.e1', ((Punctuation, '.'), (Name, 'e1'))) 
開發者ID:pygments,項目名稱:pygments,代碼行數:4,代碼來源:test_basic.py

示例6: test_call

# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import Punctuation [as 別名]
def test_call(lexer):
    fragment = u'f(1, a)\n'
    tokens = [
        (Name.Function, u'f'),
        (Punctuation, u'('),
        (Token.Literal.Number, u'1'),
        (Punctuation, u','),
        (Token.Text, u' '),
        (Token.Name, u'a'),
        (Punctuation, u')'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
開發者ID:pygments,項目名稱:pygments,代碼行數:15,代碼來源:test_r.py

示例7: test_indexing

# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import Punctuation [as 別名]
def test_indexing(lexer):
    fragment = u'a[1]'
    tokens = [
        (Token.Name, u'a'),
        (Token.Punctuation, u'['),
        (Token.Literal.Number, u'1'),
        (Token.Punctuation, u']'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
開發者ID:pygments,項目名稱:pygments,代碼行數:12,代碼來源:test_r.py

示例8: test_dot_indexing

# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import Punctuation [as 別名]
def test_dot_indexing(lexer):
    fragment = u'.[1]'
    tokens = [
        (Token.Name, u'.'),
        (Token.Punctuation, u'['),
        (Token.Literal.Number, u'1'),
        (Token.Punctuation, u']'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
開發者ID:pygments,項目名稱:pygments,代碼行數:12,代碼來源:test_r.py


注:本文中的pygments.token.Punctuation方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。