当前位置: 首页>>代码示例>>Python>>正文


Python Token.Text方法代码示例

本文整理汇总了Python中pygments.token.Token.Text方法的典型用法代码示例。如果您正苦于以下问题:Python Token.Text方法的具体用法?Python Token.Text怎么用?Python Token.Text使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.token.Token的用法示例。


在下文中一共展示了Token.Text方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_colors

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def get_colors(stylename):
    """Construct the keys to be used building the base stylesheet
    from a templatee."""
    style = get_style_by_name(stylename)
    fgcolor = style.style_for_token(Token.Text)['color'] or ''
    if len(fgcolor) in (3,6):
        # could be 'abcdef' or 'ace' hex, which needs '#' prefix
        try:
            int(fgcolor, 16)
        except TypeError:
            pass
        else:
            fgcolor = "#"+fgcolor

    return dict(
        bgcolor = style.background_color,
        select = style.highlight_color,
        fgcolor = fgcolor
    ) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:21,代码来源:styles.py

示例2: filter

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def filter(self, lexer, stream):

        prefix = False
        for ttype, value in stream:

            if prefix and ttype is Token.Name.Variable:
                if value in self.prefixes:
                    ttype = Token.Keyword.Type
                else:
                    ttype = Token.Name.Function

            elif ttype is Token.Name.Function and value in self.prefixes:
                prefix = True
                ttype = Token.Keyword.Type

            elif ttype is not Token.Text:
                prefix = False

            yield ttype, value 
开发者ID:wapiflapi,项目名称:gxf,代码行数:21,代码来源:disassembly.py

示例3: _get_number_styles

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]:
        """Get background, number, and highlight styles for line numbers."""
        background_style = Style(bgcolor=self._pygments_style_class.background_color)
        if console.color_system in ("256", "truecolor"):
            number_style = Style.chain(
                background_style,
                self._get_theme_style(Token.Text),
                Style(color=self._get_line_numbers_color()),
            )
            highlight_number_style = Style.chain(
                background_style,
                self._get_theme_style(Token.Text),
                Style(bold=True, color=self._get_line_numbers_color(0.9)),
            )
        else:
            number_style = highlight_number_style = Style()
        return background_style, number_style, highlight_number_style 
开发者ID:willmcgugan,项目名称:rich,代码行数:19,代码来源:syntax.py

示例4: test_ignored_multiline_after_inline_comment_after_star_comment

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test_ignored_multiline_after_inline_comment_after_star_comment(self):
        """
        ```stata
        * // /* Ignored due to inline comment
        disp "Printed 1"
        ```
        """
        code = '* // /* a\na'
        tokens = CodeManager(code).tokens_fp_all
        expected = [
            (Token.Comment.Single, '*'),
            (Token.Comment.Single, ' '),
            (Token.Comment.Single, '//'),
            (Token.Comment.Single, ' '),
            (Token.Comment.Single, '/'),
            (Token.Comment.Single, '*'),
            (Token.Comment.Single, ' '),
            (Token.Comment.Single, 'a'),
            (Token.Text, '\n'),
            (Token.Text, 'a'),
            (Token.Text, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:24,代码来源:test_stata_lexer.py

示例5: test_ignored_multiline_after_inline_comment

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test_ignored_multiline_after_inline_comment(self):
        """
        ```stata
        // /* Also ignored due to inline comment
        disp "Printed 2"
        ```
        """
        code = '// /* a\na'
        tokens = CodeManager(code).tokens_fp_all
        expected = [
            (Token.Comment.Single, '//'),
            (Token.Comment.Single, ' '),
            (Token.Comment.Single, '/'),
            (Token.Comment.Single, '*'),
            (Token.Comment.Single, ' '),
            (Token.Comment.Single, 'a'),
            (Token.Text, '\n'),
            (Token.Text, 'a'),
            (Token.Text, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:22,代码来源:test_stata_lexer.py

示例6: test_line_continuation_comment_after_star_comment

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test_line_continuation_comment_after_star_comment(self):
        """
        ```stata
        * ///
        disp "Not printed. Line continuation applies"
        ```
        """
        code = '* ///\na\na'
        tokens = CodeManager(code).tokens_fp_all
        expected = [
            (Token.Comment.Single, '*'),
            (Token.Comment.Single, ' '),
            (Token.Comment.Special, '///\n'),
            (Token.Comment.Special, 'a'),
            (Token.Comment.Special, '\n'),
            (Token.Text, 'a'),
            (Token.Text, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:20,代码来源:test_stata_lexer.py

示例7: test_line_continuation_ignored_after_inline_comment

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test_line_continuation_ignored_after_inline_comment(self):
        """
        ```stata
        // /// Line continuation ignored due to inline comment
        disp "Printed 3"
        ```
        """
        code = '// /// a\na'
        tokens = CodeManager(code).tokens_fp_all
        expected = [
            (Token.Comment.Single, '//'),
            (Token.Comment.Single, ' '),
            (Token.Comment.Single, '/'),
            (Token.Comment.Single, '/'),
            (Token.Comment.Single, '/'),
            (Token.Comment.Single, ' '),
            (Token.Comment.Single, 'a'),
            (Token.Text, '\n'),
            (Token.Text, 'a'),
            (Token.Text, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:23,代码来源:test_stata_lexer.py

示例8: test_multiline_comment_across_empty_whitespace_lines

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test_multiline_comment_across_empty_whitespace_lines(self):
        """
        ```stata
        di /*

        */ "hi"
        ```
        """
        code = 'a /*\n\n*/ a'
        tokens = CodeManager(code).tokens_fp_all
        expected = [
            (Token.Text, 'a'),
            (Token.Text, ' '),
            (Token.Comment.Multiline, '/*'),
            (Token.Comment.Multiline, '\n'),
            (Token.Comment.Multiline, '\n'),
            (Token.Comment.Multiline, '*/'),
            (Token.Text, ' '),
            (Token.Text, 'a'),
            (Token.Text, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:23,代码来源:test_stata_lexer.py

示例9: test4

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test4(self):
        code = 'a ///\n/// a ///'
        tokens = CodeManager(code).tokens_fp_all
        expected = [
            (Token.Text, 'a'),
            (Token.Text, ' '),
            (Token.Comment.Special, '///'),
            (Token.Comment.Special, '\n'),
            (Token.Comment.Special, '///'),
            (Token.Comment.Special, ' '),
            (Token.Comment.Special, 'a'),
            (Token.Comment.Special, ' '),
            (Token.Comment.Special, '/'),
            (Token.Comment.Special, '/'),
            (Token.Comment.Special, '/'),
            (Token.Comment.Special, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:19,代码来源:test_stata_lexer.py

示例10: test5

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test5(self):
        code = 'a ///\n// a ///'
        tokens = CodeManager(code).tokens_fp_all
        expected = [
            (Token.Text, 'a'),
            (Token.Text, ' '),
            (Token.Comment.Special, '///'),
            (Token.Comment.Special, '\n'),
            (Token.Comment.Single, '//'),
            (Token.Comment.Single, ' '),
            (Token.Comment.Single, 'a'),
            (Token.Comment.Single, ' '),
            (Token.Comment.Single, '/'),
            (Token.Comment.Single, '/'),
            (Token.Comment.Single, '/'),
            (Token.Text, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:19,代码来源:test_stata_lexer.py

示例11: test_cap_chunk

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test_cap_chunk(self):
        code = 'cap {\n a\n}'
        tokens = CodeManager(code).tokens_final
        expected = [
            (Token.Text, 'c'),
            (Token.Text, 'a'),
            (Token.Text, 'p'),
            (Token.Text, ' '),
            (Token.TextBlock, '{'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, ' '),
            (Token.TextBlock, 'a'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, '}'),
            (Token.Text, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:18,代码来源:test_stata_lexer.py

示例12: test_cap_chunk_recursive

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test_cap_chunk_recursive(self):
        code = 'cap {\n{\n a\n}\n}'
        tokens = CodeManager(code).tokens_final
        expected = [
            (Token.Text, 'c'),
            (Token.Text, 'a'),
            (Token.Text, 'p'),
            (Token.Text, ' '),
            (Token.TextBlock, '{'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, '{'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, ' '),
            (Token.TextBlock, 'a'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, '}'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, '}'),
            (Token.Text, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:22,代码来源:test_stata_lexer.py

示例13: test_cap_chunk_with_inner_line_comment

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test_cap_chunk_with_inner_line_comment(self):
        code = 'cap {\n*{\n a\n}'
        tokens = CodeManager(code).tokens_final
        expected = [
            (Token.Text, 'c'),
            (Token.Text, 'a'),
            (Token.Text, 'p'),
            (Token.Text, ' '),
            (Token.TextBlock, '{'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, ' '),
            (Token.TextBlock, 'a'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, '}'),
            (Token.Text, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:19,代码来源:test_stata_lexer.py

示例14: test_cap_chunk_with_inner_multiline_comment

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test_cap_chunk_with_inner_multiline_comment(self):
        code = 'cap {\n/*{*/\n a\n}'
        tokens = CodeManager(code).tokens_final
        expected = [
            (Token.Text, 'c'),
            (Token.Text, 'a'),
            (Token.Text, 'p'),
            (Token.Text, ' '),
            (Token.TextBlock, '{'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, ' '),
            (Token.TextBlock, 'a'),
            (Token.TextBlock, '\n'),
            (Token.TextBlock, '}'),
            (Token.Text, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:19,代码来源:test_stata_lexer.py

示例15: test_if_block_with_preceding_string

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Text [as 别名]
def test_if_block_with_preceding_string(self):
        """ GH issue 139 """
        code = 'if "0" == "1" {'
        tokens = CodeManager(code).tokens_final
        expected = [
            (Token.Text, 'i'),
            (Token.Text, 'f'),
            (Token.Text, ' '),
            (Token.Text, '"'),
            (Token.Text, '0'),
            (Token.Text, '"'),
            (Token.Text, ' '),
            (Token.Text, '='),
            (Token.Text, '='),
            (Token.Text, ' '),
            (Token.Text, '"'),
            (Token.Text, '1'),
            (Token.Text, '"'),
            (Token.Text, ' '),
            (Token.TextBlock, '{'),
            (Token.TextBlock, '\n')]
        assert tokens == expected 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:24,代码来源:test_stata_lexer.py


注:本文中的pygments.token.Token.Text方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。