当前位置: 首页>>代码示例>>Python>>正文


Python token._TokenType方法代码示例

本文整理汇总了Python中pygments.token._TokenType方法的典型用法代码示例。如果您正苦于以下问题:Python token._TokenType方法的具体用法?Python token._TokenType怎么用?Python token._TokenType使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.token的用法示例。


在下文中一共展示了token._TokenType方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: bygroups

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import _TokenType [as 别名]
def bygroups(*args):
    """
    Callback that yields multiple actions for each group in the match.
    """
    def callback(lexer, match, ctx=None):
        for i, action in enumerate(args):
            if action is None:
                continue
            elif type(action) is _TokenType:
                data = match.group(i + 1)
                if data:
                    yield match.start(i + 1), action, data
            else:
                data = match.group(i + 1)
                if data is not None:
                    if ctx:
                        ctx.pos = match.start(i + 1)
                    for item in action(lexer, _PseudoMatch(match.start(i + 1),
                                       data), ctx):
                        if item:
                            yield item
        if ctx:
            ctx.pos = match.end()
    return callback 
开发者ID:joxeankoret,项目名称:pigaios,代码行数:26,代码来源:lexer.py

示例2: bygroups

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import _TokenType [as 别名]
def bygroups(*args):
    """
    Callback that yields multiple actions for each group in the match.
    """
    def callback(lexer, match, ctx=None):
        for i, action in enumerate(args):
            if action is None:
                continue
            elif type(action) is _TokenType:
                data = match.group(i + 1)
                if data:
                    yield match.start(i + 1), action, data
            else:
                data = match.group(i + 1)
                if data is not None:
                    if ctx:
                        ctx.pos = match.start(i + 1)
                    for item in action(lexer,
                                       _PseudoMatch(match.start(i + 1), data), ctx):
                        if item:
                            yield item
        if ctx:
            ctx.pos = match.end()
    return callback 
开发者ID:luckystarufo,项目名称:pySINDy,代码行数:26,代码来源:lexer.py

示例3: test_random_input

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import _TokenType [as 别名]
def test_random_input(cls):
    inst = cls()
    try:
        tokens = list(inst.get_tokens(test_content))
    except KeyboardInterrupt:
        raise KeyboardInterrupt(
            'interrupted %s.get_tokens(): test_content=%r' %
            (cls.__name__, test_content))
    txt = ""
    for token in tokens:
        assert isinstance(token, tuple)
        assert isinstance(token[0], _TokenType)
        assert isinstance(token[1], str)
        txt += token[1]
    assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \
        (cls.name, test_content, txt) 
开发者ID:pygments,项目名称:pygments,代码行数:18,代码来源:test_basic_api.py

示例4: _process_token

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import _TokenType [as 别名]
def _process_token(cls, token):
        """Preprocess the token component of a token definition."""
        assert type(token) is _TokenType or callable(token), \
            'token type must be simple type or callable, not %r' % (token,)
        return token 
开发者ID:joxeankoret,项目名称:pigaios,代码行数:7,代码来源:lexer.py

示例5: get_tokens_unprocessed

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import _TokenType [as 别名]
def get_tokens_unprocessed(self, text, stack=('root',)):
        """
        Split ``text`` into (tokentype, text) pairs.

        ``stack`` is the inital stack (default: ``['root']``)
        """
        pos = 0
        tokendefs = self._tokens
        statestack = list(stack)
        statetokens = tokendefs[statestack[-1]]
        while 1:
            for rexmatch, action, new_state in statetokens:
                m = rexmatch(text, pos)
                if m:
                    if action is not None:
                        if type(action) is _TokenType:
                            yield pos, action, m.group()
                        else:
                            for item in action(self, m):
                                yield item
                    pos = m.end()
                    if new_state is not None:
                        # state transition
                        if isinstance(new_state, tuple):
                            for state in new_state:
                                if state == '#pop':
                                    statestack.pop()
                                elif state == '#push':
                                    statestack.append(statestack[-1])
                                else:
                                    statestack.append(state)
                        elif isinstance(new_state, int):
                            # pop
                            del statestack[new_state:]
                        elif new_state == '#push':
                            statestack.append(statestack[-1])
                        else:
                            assert False, "wrong state def: %r" % new_state
                        statetokens = tokendefs[statestack[-1]]
                    break
            else:
                try:
                    if text[pos] == '\n':
                        # at EOL, reset state to "root"
                        statestack = ['root']
                        statetokens = tokendefs['root']
                        yield pos, Text, u'\n'
                        pos += 1
                        continue
                    yield pos, Error, text[pos]
                    pos += 1
                except IndexError:
                    break 
开发者ID:joxeankoret,项目名称:pigaios,代码行数:55,代码来源:lexer.py

示例6: get_tokens_unprocessed

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import _TokenType [as 别名]
def get_tokens_unprocessed(self, text, stack=('root',)):
        """
        Split ``text`` into (tokentype, text) pairs.

        ``stack`` is the inital stack (default: ``['root']``)
        """
        pos = 0
        tokendefs = self._tokens
        statestack = list(stack)
        statetokens = tokendefs[statestack[-1]]
        while 1:
            for rexmatch, action, new_state in statetokens:
                m = rexmatch(text, pos)
                if m:
                    if action is not None:
                        if type(action) is _TokenType:
                            yield pos, action, m.group()
                        else:
                            for item in action(self, m):
                                yield item
                    pos = m.end()
                    if new_state is not None:
                        # state transition
                        if isinstance(new_state, tuple):
                            for state in new_state:
                                if state == '#pop':
                                    statestack.pop()
                                elif state == '#push':
                                    statestack.append(statestack[-1])
                                else:
                                    statestack.append(state)
                        elif isinstance(new_state, int):
                            # pop
                            del statestack[new_state:]
                        elif new_state == '#push':
                            statestack.append(statestack[-1])
                        else:
                            assert False, "wrong state def: %r" % new_state
                        statetokens = tokendefs[statestack[-1]]
                    break
            else:
                # We are here only if all state tokens have been considered
                # and there was not a match on any of them.
                try:
                    if text[pos] == '\n':
                        # at EOL, reset state to "root"
                        statestack = ['root']
                        statetokens = tokendefs['root']
                        yield pos, Text, u'\n'
                        pos += 1
                        continue
                    yield pos, Error, text[pos]
                    pos += 1
                except IndexError:
                    break 
开发者ID:luckystarufo,项目名称:pySINDy,代码行数:57,代码来源:lexer.py

示例7: get_tokens_unprocessed

# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import _TokenType [as 别名]
def get_tokens_unprocessed(self, text, stack=('root',)):
        """
        Split ``text`` into (tokentype, text) pairs.

        ``stack`` is the inital stack (default: ``['root']``)
        """
        pos = 0
        tokendefs = self._tokens
        statestack = list(stack)
        statetokens = tokendefs[statestack[-1]]
        while 1:
            for rexmatch, action, new_state in statetokens:
                m = rexmatch(text, pos)
                if m:
                    if action is not None:
                        if type(action) is _TokenType:
                            yield pos, action, m.group()
                        else:
                            for item in action(self, m):
                                yield item
                    pos = m.end()
                    if new_state is not None:
                        # state transition
                        if isinstance(new_state, tuple):
                            for state in new_state:
                                if state == '#pop':
                                    if len(statestack) > 1:
                                        statestack.pop()
                                elif state == '#push':
                                    statestack.append(statestack[-1])
                                else:
                                    statestack.append(state)
                        elif isinstance(new_state, int):
                            # pop, but keep at least one state on the stack
                            # (random code leading to unexpected pops should
                            # not allow exceptions)
                            if abs(new_state) >= len(statestack):
                                del statestack[1:]
                            else:
                                del statestack[new_state:]
                        elif new_state == '#push':
                            statestack.append(statestack[-1])
                        else:
                            assert False, "wrong state def: %r" % new_state
                        statetokens = tokendefs[statestack[-1]]
                    break
            else:
                # We are here only if all state tokens have been considered
                # and there was not a match on any of them.
                try:
                    if text[pos] == '\n':
                        # at EOL, reset state to "root"
                        statestack = ['root']
                        statetokens = tokendefs['root']
                        yield pos, Text, u'\n'
                        pos += 1
                        continue
                    yield pos, Error, text[pos]
                    pos += 1
                except IndexError:
                    break 
开发者ID:pygments,项目名称:pygments,代码行数:63,代码来源:lexer.py


注:本文中的pygments.token._TokenType方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。