当前位置: 首页>>代码示例>>Python>>正文


Python Token.String方法代码示例

本文整理汇总了Python中pygments.token.Token.String方法的典型用法代码示例。如果您正苦于以下问题:Python Token.String方法的具体用法?Python Token.String怎么用?Python Token.String使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.token.Token的用法示例。


在下文中一共展示了Token.String方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_tokens_unprocessed

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import String [as 别名]
def get_tokens_unprocessed(self, text):
        buf = ''
        idx = 0
        for i, t, v in self.lang.get_tokens_unprocessed(text):
            if t in Token.Comment or t in Token.String:
                if buf:
                    for x in self.get_tokens_aux(idx, buf):
                        yield x
                    buf = ''
                yield i, t, v
            else:
                if not buf:
                    idx = i
                buf += v
        if buf:
            for x in self.get_tokens_aux(idx, buf):
                yield x 
开发者ID:joxeankoret,项目名称:pigaios,代码行数:19,代码来源:latex.py

示例2: test_peg_basic

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import String [as 别名]
def test_peg_basic(lexer_peg):
    fragment = u'rule<-("terminal"/nonterminal/[cls])*\n'
    tokens = [
        (Token.Name.Class, u'rule'),
        (Token.Operator, u'<-'),
        (Token.Punctuation, u'('),
        (Token.String.Double, u'"terminal"'),
        (Token.Operator, u'/'),
        (Token.Name.Class, u'nonterminal'),
        (Token.Operator, u'/'),
        (Token.Punctuation, u'['),
        (Token.String, u'cls'),
        (Token.Punctuation, u']'),
        (Token.Punctuation, u')'),
        (Token.Operator, u'*'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer_peg.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:20,代码来源:test_grammar_notation.py

示例3: test_peg_modified_strings

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import String [as 别名]
def test_peg_modified_strings(lexer_peg):
    # see for example:
    # - http://textx.github.io/Arpeggio/
    # - https://nim-lang.org/docs/pegs.html
    # - https://github.com/erikrose/parsimonious
    fragment = u'~"regex" i"insensitive" "multimod"ilx ("not modified")\n'
    tokens = [
        # can't handle parsimonious-style regex while ~ is a cut operator
        (Token.Operator, u'~'),
        (Token.String.Double, u'"regex"'),
        (Token.Text, u' '),
        (Token.String.Double, u'i"insensitive"'),
        (Token.Text, u' '),
        (Token.String.Double, u'"multimod"ilx'),
        (Token.Text, u' '),
        (Token.Punctuation, u'('),
        (Token.String.Double, u'"not modified"'),
        (Token.Punctuation, u')'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer_peg.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:23,代码来源:test_grammar_notation.py

示例4: test_peg_operators

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import String [as 别名]
def test_peg_operators(lexer_peg):
    # see for example:
    # - https://github.com/gvanrossum/pegen
    # - https://nim-lang.org/docs/pegs.html
    fragment = u"rule = 'a' | 'b'\n"
    tokens = [
        (Token.Name.Class, u'rule'),
        (Token.Text, u' '),
        (Token.Operator, u'='),
        (Token.Text, u' '),
        (Token.String.Single, u"'a'"),
        (Token.Text, u' '),
        (Token.Operator, u'|'),
        (Token.Text, u' '),
        (Token.String.Single, u"'b'"),
        (Token.Text, u'\n'),
    ]
    assert list(lexer_peg.get_tokens(fragment)) == tokens
    fragment = u"rule: 'a' ~ 'b'\n"
    tokens = [
        (Token.Name.Class, u'rule'),
        (Token.Operator, u':'),
        (Token.Text, u' '),
        (Token.String.Single, u"'a'"),
        (Token.Text, u' '),
        (Token.Operator, u'~'),
        (Token.Text, u' '),
        (Token.String.Single, u"'b'"),
        (Token.Text, u'\n'),
    ]
    assert list(lexer_peg.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:33,代码来源:test_grammar_notation.py

示例5: test_preamble

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import String [as 别名]
def test_preamble(lexer):
    data = u'@PREAMBLE{"% some LaTeX code here"}'
    tokens = [
        (Token.Name.Class, u'@PREAMBLE'),
        (Token.Punctuation, u'{'),
        (Token.String, u'"'),
        (Token.String, u'% some LaTeX code here'),
        (Token.String, u'"'),
        (Token.Punctuation, u'}'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer.get_tokens(data)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:14,代码来源:test_bibtex.py

示例6: test_mismatched_brace

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import String [as 别名]
def test_mismatched_brace(lexer):
    data = '@PREAMBLE(""}'
    tokens = [
        (Token.Name.Class, u'@PREAMBLE'),
        (Token.Punctuation, u'('),
        (Token.String, u'"'),
        (Token.String, u'"'),
        (Token.Error, u'}'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer.get_tokens(data)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:13,代码来源:test_bibtex.py

示例7: test_256esc_seq

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import String [as 别名]
def test_256esc_seq():
    """
    Test that a few escape sequences are actually used when using ansi<> color
    codes.
    """
    def termtest(x):
        return highlight(x, Python3Lexer(),
                         Terminal256Formatter(style=MyStyle))

    assert '32;101' in termtest('0x123')
    assert '92;42' in termtest('123')
    assert '90' in termtest('#comment')
    assert '94;41' in termtest('"String"') 
开发者ID:pygments,项目名称:pygments,代码行数:15,代码来源:test_terminal_formatter.py


注:本文中的pygments.token.Token.String方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。