当前位置: 首页>>代码示例>>Python>>正文


Python token.Whitespace方法代码示例

本文整理汇总了Python中pygments.token.Whitespace方法的典型用法代码示例。如果您正苦于以下问题:Python token.Whitespace方法的具体用法?Python token.Whitespace怎么用?Python token.Whitespace使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.token的用法示例。


在下文中一共展示了token.Whitespace方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _assert_are_tokens_of_type

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Whitespace [as 别名]
def _assert_are_tokens_of_type(lexer, examples, expected_token_type):
    for test_number, example in enumerate(examples.split(), 1):
        token_count = 0
        for token_type, token_value in lexer.get_tokens(example):
            if token_type != Whitespace:
                token_count += 1
                assert token_type == expected_token_type, \
                    'token_type #%d for %s is be %s but must be %s' % \
                    (test_number, token_value, token_type, expected_token_type)
        assert token_count == 1, \
            '%s must yield exactly 1 token instead of %d' % \
            (example, token_count) 
开发者ID:pygments,项目名称:pygments,代码行数:14,代码来源:test_sql.py

示例2: _assert_tokens_match

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Whitespace [as 别名]
def _assert_tokens_match(lexer, text, expected_tokens_without_trailing_newline):
    actual_tokens = tuple(lexer.get_tokens(text))
    if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')):
        actual_tokens = tuple(actual_tokens[:-1])
    assert expected_tokens_without_trailing_newline == actual_tokens, \
        'text must yield expected tokens: %s' % text 
开发者ID:pygments,项目名称:pygments,代码行数:8,代码来源:test_sql.py

示例3: assert_are_tokens_of_type

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Whitespace [as 别名]
def assert_are_tokens_of_type(lexer, examples, expected_token_type):
    for test_number, example in enumerate(examples.split(), 1):
        token_count = 0
        for token_type, token_value in lexer.get_tokens(example):
            if token_type != Whitespace:
                token_count += 1
                assert token_type == expected_token_type, \
                    'token_type #%d for %s is be %s but must be %s' % \
                    (test_number, token_value, token_type, expected_token_type)
        assert token_count == 1, \
            '%s must yield exactly 1 token instead of %d' % (example, token_count) 
开发者ID:pygments,项目名称:pygments,代码行数:13,代码来源:test_basic.py

示例4: assert_tokens_match

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Whitespace [as 别名]
def assert_tokens_match(lexer, text, expected_tokens_without_trailing_newline):
    actual_tokens = tuple(lexer.get_tokens(text))
    if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')):
        actual_tokens = tuple(actual_tokens[:-1])
    assert expected_tokens_without_trailing_newline == actual_tokens, \
        'text must yield expected tokens: %s' % text 
开发者ID:pygments,项目名称:pygments,代码行数:8,代码来源:test_basic.py

示例5: _get_format

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Whitespace [as 别名]
def _get_format(self, token):
        """ Returns a QTextCharFormat for token or None.
        """
        if token == Whitespace:
            return self.editor.whitespaces_foreground

        if token in self._formats:
            return self._formats[token]

        result = self._get_format_from_style(token, self._style)

        self._formats[token] = result
        return result 
开发者ID:TuringApp,项目名称:Turing,代码行数:15,代码来源:pygments_sh.py

示例6: highlight_block

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Whitespace [as 别名]
def highlight_block(self, text, block):
        """
        Highlights the block using a pygments lexer.

        :param text: text of the block to highlith
        :param block: block to highlight
        """
        if self.color_scheme.name != self._pygments_style:
            self._pygments_style = self.color_scheme.name
            self._update_style()
        original_text = text
        if self.editor and self._lexer and self.enabled:
            if block.blockNumber():
                prev_data = self._prev_block.userData()
                if prev_data:
                    if hasattr(prev_data, "syntax_stack"):
                        self._lexer._saved_state_stack = prev_data.syntax_stack
                    elif hasattr(self._lexer, '_saved_state_stack'):
                        del self._lexer._saved_state_stack

            # Lex the text using Pygments
            index = 0
            usd = block.userData()
            if usd is None:
                usd = TextBlockUserData()
                block.setUserData(usd)
            tokens = list(self._lexer.get_tokens(text))
            for token, text in tokens:
                length = len(text)
                fmt = self._get_format(token)
                if token in [Token.Literal.String, Token.Literal.String.Doc,
                             Token.Comment]:
                    fmt.setObjectType(fmt.UserObject)
                self.setFormat(index, length, fmt)
                index += length

            if hasattr(self._lexer, '_saved_state_stack'):
                setattr(usd, "syntax_stack", self._lexer._saved_state_stack)
                # Clean up for the next go-round.
                del self._lexer._saved_state_stack

            # spaces
            text = original_text
            expression = QRegExp(r'\s+')
            index = expression.indexIn(text, 0)
            while index >= 0:
                index = expression.pos(0)
                length = len(expression.cap(0))
                self.setFormat(index, length, self._get_format(Whitespace))
                index = expression.indexIn(text, index + length)

            self._prev_block = block 
开发者ID:TuringApp,项目名称:Turing,代码行数:54,代码来源:pygments_sh.py


注:本文中的pygments.token.Whitespace方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。