当前位置: 首页>>代码示例>>Python>>正文


Python token.Text方法代码示例

本文整理汇总了Python中pygments.token.Text方法的典型用法代码示例。如果您正苦于以下问题:Python token.Text方法的具体用法?Python token.Text怎么用?Python token.Text使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.token的用法示例。


在下文中一共展示了token.Text方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: content_callback

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def content_callback(self, match):
        content_type = getattr(self, "content_type", None)
        content = match.group()
        offset = match.start()
        if content_type:
            from pygments.lexers import get_lexer_for_mimetype

            try:
                lexer = get_lexer_for_mimetype(content_type)
            except ClassNotFound:
                pass
            else:
                for idx, token, value in lexer.get_tokens_unprocessed(content):
                    yield offset + idx, token, value
                return
        yield offset, Text, content 
开发者ID:apache,项目名称:couchdb-documentation,代码行数:18,代码来源:httpdomain.py

示例2: get_background_style_defs

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def get_background_style_defs(self, arg=None):
        prefix = self.get_css_prefix(arg)
        bg_color = self.style.background_color
        hl_color = self.style.highlight_color

        lines = []

        if arg and not self.nobackground and bg_color is not None:
            text_style = ''
            if Text in self.ttype2class:
                text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
            lines.insert(
                0, '%s{ background: %s;%s }' % (
                    prefix(''), bg_color, text_style
                )
            )
        if hl_color is not None:
            lines.insert(
                0, '%s { background-color: %s }' % (prefix('hll'), hl_color)
            )

        return lines 
开发者ID:pygments,项目名称:pygments,代码行数:24,代码来源:html.py

示例3: test_formatter_encodings

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def test_formatter_encodings():
    from pygments.formatters import HtmlFormatter

    # unicode output
    fmt = HtmlFormatter()
    tokens = [(Text, u"ä")]
    out = format(tokens, fmt)
    assert type(out) is str
    assert u"ä" in out

    # encoding option
    fmt = HtmlFormatter(encoding="latin1")
    tokens = [(Text, u"ä")]
    assert u"ä".encode("latin1") in format(tokens, fmt)

    # encoding and outencoding option
    fmt = HtmlFormatter(encoding="latin1", outencoding="utf8")
    tokens = [(Text, u"ä")]
    assert u"ä".encode("utf8") in format(tokens, fmt) 
开发者ID:pygments,项目名称:pygments,代码行数:21,代码来源:test_basic_api.py

示例4: test_can_cope_with_destructuring

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def test_can_cope_with_destructuring(lexer):
    fragment = u'val (a, b) = '
    tokens = [
        (Keyword, u'val'),
        (Text, u' '),
        (Punctuation, u'('),
        (Name.Property, u'a'),
        (Punctuation, u','),
        (Text, u' '),
        (Name.Property, u'b'),
        (Punctuation, u')'),
        (Text, u' '),
        (Punctuation, u'='),
        (Text, u' '),
        (Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:19,代码来源:test_kotlin.py

示例5: test_gcd_expr

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def test_gcd_expr(lexer):
    fragment = u'1^3+(5-5)*gcd(a,b)\n'
    tokens = [
        (Token.Number.Integer, u'1'),
        (Token.Operator, u'^'),
        (Token.Literal.Number.Integer, u'3'),
        (Token.Operator, u'+'),
        (Token.Punctuation, u'('),
        (Token.Literal.Number.Integer, u'5'),
        (Token.Operator, u'-'),
        (Token.Literal.Number.Integer, u'5'),
        (Token.Punctuation, u')'),
        (Token.Operator, u'*'),
        (Token.Name, u'gcd'),
        (Token.Punctuation, u'('),
        (Token.Name, u'a'),
        (Token.Operator, u','),
        (Token.Name, u'b'),
        (Token.Punctuation, u')'),
        (Token.Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:24,代码来源:test_ezhil.py

示例6: test_escaped_bracestring

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def test_escaped_bracestring(lexer):
    fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
    tokens = [
        (Token.Name, u'str'),
        (Token.Operator, u'.'),
        (Token.Name, u'gsub'),
        (Token.Punctuation, u'('),
        (Token.Literal.String.Regex, u'%r{'),
        (Token.Literal.String.Regex, u'\\\\'),
        (Token.Literal.String.Regex, u'\\\\'),
        (Token.Literal.String.Regex, u'}'),
        (Token.Punctuation, u','),
        (Token.Text, u' '),
        (Token.Literal.String.Double, u'"'),
        (Token.Literal.String.Double, u'/'),
        (Token.Literal.String.Double, u'"'),
        (Token.Punctuation, u')'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:22,代码来源:test_ruby.py

示例7: test_enhanced_for

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def test_enhanced_for(lexer):
    fragment = u'label:\nfor(String var2: var1) {}\n'
    tokens = [
        (Name.Label, u'label:'),
        (Text, u'\n'),
        (Keyword, u'for'),
        (Punctuation, u'('),
        (Name, u'String'),
        (Text, u' '),
        (Name, u'var2'),
        (Punctuation, u':'),
        (Text, u' '),
        (Name, u'var1'),
        (Punctuation, u')'),
        (Text, u' '),
        (Punctuation, u'{'),
        (Punctuation, u'}'),
        (Text, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:22,代码来源:test_java.py

示例8: test_unquoted_querystring

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def test_unquoted_querystring(self):
        self.assertEqual(self.get_tokens('`echo name`==john'), [
            (Text, '`'),
            (Name.Builtin, 'echo'),
            (Text, 'name'),
            (Text, '`'),
            (Operator, '=='),
            (String, 'john')
        ])
        self.assertEqual(self.get_tokens('name==`echo john`'), [
            (Name, 'name'),
            (Operator, '=='),
            (Text, '`'),
            (Name.Builtin, 'echo'),
            (Text, 'john'),
            (Text, '`')
        ]) 
开发者ID:eliangcs,项目名称:http-prompt,代码行数:19,代码来源:test_lexer.py

示例9: test_unquoted_bodystring

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def test_unquoted_bodystring(self):
        self.assertEqual(self.get_tokens('`echo name`=john'), [
            (Text, '`'),
            (Name.Builtin, 'echo'),
            (Text, 'name'),
            (Text, '`'),
            (Operator, '='),
            (String, 'john')
        ])
        self.assertEqual(self.get_tokens('name=`echo john`'), [
            (Name, 'name'),
            (Operator, '='),
            (Text, '`'),
            (Name.Builtin, 'echo'),
            (Text, 'john'),
            (Text, '`')
        ]) 
开发者ID:eliangcs,项目名称:http-prompt,代码行数:19,代码来源:test_lexer.py

示例10: doctree_read

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def doctree_read(app, doctree):
    env = app.builder.env
    for node in doctree.traverse(addnodes.productionlist):
        for production in node:
            if not isinstance(production, addnodes.production):
                continue
            if not isinstance(production[-1], nodes.Text):
                continue
            parts = comment_re.split(production.pop().astext())
            new_nodes = []
            for s in parts:
                if comment_re.match(s):
                    new_nodes.append(nodes.emphasis(s, s))
                elif s:
                    new_nodes.append(nodes.Text(s))
            production += new_nodes 
开发者ID:landlab,项目名称:landlab,代码行数:18,代码来源:landlab_ext.py

示例11: test_skip_tokens_text

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def test_skip_tokens_text(self):
        tokens = [(token.Text, "\n"), (token.Name, "foo")]
        result, skipped = self.tokenizer.skip_tokens(tokens)
        self.assertEqual(result, tokens[1:])
        self.assertEqual(skipped, 1) 
开发者ID:danhper,项目名称:bigcode-tools,代码行数:7,代码来源:java_tokenizer_test.py

示例12: _should_skip

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def _should_skip(self, tok: RawToken) -> bool:
        if self.skip_text and tok[0] in token.Text:
            return True
        if self.skip_comments and tok[0] in token.Comment:
            return True
        return False 
开发者ID:danhper,项目名称:bigcode-tools,代码行数:8,代码来源:tokenizer.py

示例13: header_callback

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def header_callback(self, match):
        if match.group(1).lower() == 'content-type':
            content_type = match.group(5).strip()
            if ';' in content_type:
                content_type = content_type[:content_type.find(';')].strip()
            self.content_type = content_type
        yield match.start(1), Name.Attribute, match.group(1)
        yield match.start(2), Text, match.group(2)
        yield match.start(3), Operator, match.group(3)
        yield match.start(4), Text, match.group(4)
        yield match.start(5), Literal, match.group(5)
        yield match.start(6), Text, match.group(6) 
开发者ID:preems,项目名称:nltk-server,代码行数:14,代码来源:httpdomain.py

示例14: continuous_header_callback

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def continuous_header_callback(self, match):
        yield match.start(1), Text, match.group(1)
        yield match.start(2), Literal, match.group(2)
        yield match.start(3), Text, match.group(3) 
开发者ID:preems,项目名称:nltk-server,代码行数:6,代码来源:httpdomain.py

示例15: content_callback

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Text [as 别名]
def content_callback(self, match):
        content_type = getattr(self, 'content_type', None)
        content = match.group()
        offset = match.start()
        if content_type:
            from pygments.lexers import get_lexer_for_mimetype
            try:
                lexer = get_lexer_for_mimetype(content_type)
            except ClassNotFound:
                pass
            else:
                for idx, token, value in lexer.get_tokens_unprocessed(content):
                    yield offset + idx, token, value
                return
        yield offset, Text, content 
开发者ID:preems,项目名称:nltk-server,代码行数:17,代码来源:httpdomain.py


注:本文中的pygments.token.Text方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。