当前位置: 首页>>代码示例>>Python>>正文


Python lexer.do_insertions函数代码示例

本文整理汇总了Python中pygments.lexer.do_insertions函数的典型用法代码示例。如果您正苦于以下问题:Python do_insertions函数的具体用法?Python do_insertions怎么用?Python do_insertions使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了do_insertions函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        exlexer = ElixirLexer(**self.options)

        curcode = ''
        in_error = False
        insertions = []
        for match in line_re.finditer(text):
            line = match.group()
            if line.startswith(u'** '):
                in_error = True
                insertions.append((len(curcode),
                                   [(0, Generic.Error, line[:-1])]))
                curcode += line[-1:]
            else:
                m = self._prompt_re.match(line)
                if m is not None:
                    in_error = False
                    end = m.end()
                    insertions.append((len(curcode),
                                       [(0, Generic.Prompt, line[:end])]))
                    curcode += line[end:]
                else:
                    if curcode:
                        for item in do_insertions(
                                insertions, exlexer.get_tokens_unprocessed(curcode)):
                            yield item
                        curcode = ''
                        insertions = []
                    token = Generic.Error if in_error else Generic.Output
                    yield match.start(), token, line
        if curcode:
            for item in do_insertions(
                    insertions, exlexer.get_tokens_unprocessed(curcode)):
                yield item
开发者ID:2015E8014661092,项目名称:jinjaysnow.github.io,代码行数:34,代码来源:erlang.py

示例2: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        erlexer = ErlangLexer(**self.options)

        curcode = ''
        insertions = []
        for match in line_re.finditer(text):
            line = match.group()
            m = self._prompt_re.match(line)
            if m is not None:
                end = m.end()
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, line[:end])]))
                curcode += line[end:]
            else:
                if curcode:
                    for item in do_insertions(insertions,
                                    erlexer.get_tokens_unprocessed(curcode)):
                        yield item
                    curcode = ''
                    insertions = []
                if line.startswith('*'):
                    yield match.start(), Generic.Traceback, line
                else:
                    yield match.start(), Generic.Output, line
        if curcode:
            for item in do_insertions(insertions,
                                      erlexer.get_tokens_unprocessed(curcode)):
                yield item
开发者ID:Mekyi,项目名称:crunchy,代码行数:28,代码来源:functional.py

示例3: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        langlexer = self.LangLexer(**self.options)

        curcode = ''
        insertions = []
        for match in line_re.finditer(text):
            line = match.group()

            # prompt
            m = self._prompt_re.match(line)
            if m is not None:
                end = m.end()
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, line[:end])]))
                curcode += line[end:]
                continue
            elif curcode:
                for item in do_insertions(insertions,
                                          langlexer.get_tokens_unprocessed(curcode)):
                    yield item
                curcode = ''
                insertions = []

            # comments
            m = self._comment_re.match(line)
            if m is not None:
                yield match.start(), Comment.Single, line
                continue

            yield match.start(), Generic.Output, line

        if curcode:
            for item in do_insertions(insertions,
                                      langlexer.get_tokens_unprocessed(curcode)):
                yield item
开发者ID:hyone,项目名称:pygments-myextra,代码行数:35,代码来源:lexer.py

示例4: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        slexer = SLexer(**self.options)

        current_code_block = ''
        insertions = []

        for match in line_re.finditer(text):
            line = match.group()
            if line.startswith('>') or line.startswith('+'):
                # Colorize the prompt as such,
                # then put rest of line into current_code_block
                insertions.append((len(current_code_block),
                                   [(0, Generic.Prompt, line[:2])]))
                current_code_block += line[2:]
            else:
                # We have reached a non-prompt line!
                # If we have stored prompt lines, need to process them first.
                if current_code_block:
                    # Weave together the prompts and highlight code.
                    for item in do_insertions(insertions,
                          slexer.get_tokens_unprocessed(current_code_block)):
                        yield item
                    # Reset vars for next code block.
                    current_code_block = ''
                    insertions = []
                # Now process the actual line itself, this is output from R.
                yield match.start(), Generic.Output, line

        # If we happen to end on a code block with nothing after it, need to
        # process the last code block. This is neither elegant nor DRY so
        # should be changed.
        if current_code_block:
            for item in do_insertions(insertions,
                    slexer.get_tokens_unprocessed(current_code_block)):
                yield item
开发者ID:leeight,项目名称:feutils,代码行数:35,代码来源:math.py

示例5: get_tokens_unprocessed

    def get_tokens_unprocessed(self, data):
        sql = SqlLexer(**self.options)

        curcode = ''
        insertions = []
        for match in line_re.finditer(data):
            line = match.group()
            if line.startswith('sqlite> ') or line.startswith('   ...> '):
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, line[:8])]))
                curcode += line[8:]
            else:
                if curcode:
                    for item in do_insertions(insertions,
                                              sql.get_tokens_unprocessed(curcode)):
                        yield item
                    curcode = ''
                    insertions = []
                if line.startswith('SQL error: '):
                    yield (match.start(), Generic.Traceback, line)
                else:
                    yield (match.start(), Generic.Output, line)
        if curcode:
            for item in do_insertions(insertions,
                                      sql.get_tokens_unprocessed(curcode)):
                yield item
开发者ID:alfonsodiecko,项目名称:PYTHON_DIST,代码行数:26,代码来源:sql.py

示例6: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        bashlexer = BashLexer(**self.options)

        pos = 0
        curcode = ''
        insertions = []

        for match in line_re.finditer(text):
            line = match.group()
            m = re.match(r'^((?:\[?\[email protected][^$#%]+)[$#%])(.*\n?)', line)
            if m:
                # To support output lexers (say diff output), the output
                # needs to be broken by prompts whenever the output lexer
                # changes.
                if not insertions:
                    pos = match.start()

                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, m.group(1))]))
                curcode += m.group(2)
            else:
                if insertions:
                    toks = bashlexer.get_tokens_unprocessed(curcode)
                    for i, t, v in do_insertions(insertions, toks):
                        yield pos+i, t, v
                yield match.start(), Generic.Output, line
                insertions = []
                curcode = ''
        if insertions:
            for i, t, v in do_insertions(insertions,
                                         bashlexer.get_tokens_unprocessed(curcode)):
                yield pos+i, t, v
开发者ID:MobileWebApps,项目名称:backend-python-rest-gae,代码行数:32,代码来源:shell.py

示例7: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        mlexer = MatlabLexer(**self.options)

        curcode = ""
        insertions = []

        for match in line_re.finditer(text):
            line = match.group()

            if line.startswith(">>"):
                insertions.append((len(curcode), [(0, Generic.Prompt, line[:3])]))
                curcode += line[3:]

            elif line.startswith("???"):

                idx = len(curcode)

                # without is showing error on same line as before...?
                line = "\n" + line
                token = (0, Generic.Traceback, line)
                insertions.append((idx, [token]))

            else:
                if curcode:
                    for item in do_insertions(insertions, mlexer.get_tokens_unprocessed(curcode)):
                        yield item
                    curcode = ""
                    insertions = []

                yield match.start(), Generic.Output, line

        if curcode:  # or item:
            for item in do_insertions(insertions, mlexer.get_tokens_unprocessed(curcode)):
                yield item
开发者ID:apetcho,项目名称:wxPython-2,代码行数:34,代码来源:math.py

示例8: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        if self.python3:
            pylexer = Python3Lexer(**self.options)
            tblexer = Python3TracebackLexer(**self.options)
        else:
            pylexer = PythonLexer(**self.options)
            tblexer = PythonTracebackLexer(**self.options)

        curcode = ''
        insertions = []
        curtb = ''
        tbindex = 0
        tb = 0
        for match in line_re.finditer(text):
            line = match.group()
            if line.startswith(u'>>> ') or line.startswith(u'... '):
                tb = 0
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, line[:4])]))
                curcode += line[4:]
            elif line.rstrip() == u'...' and not tb:
                # only a new >>> prompt can end an exception block
                # otherwise an ellipsis in place of the traceback frames
                # will be mishandled
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, u'...')]))
                curcode += line[3:]
            else:
                if curcode:
                    for item in do_insertions(
                            insertions, pylexer.get_tokens_unprocessed(curcode)):
                        yield item
                    curcode = ''
                    insertions = []
                if (line.startswith(u'Traceback (most recent call last):') or
                        re.match(u'  File "[^"]+", line \\d+\\n$', line)):
                    tb = 1
                    curtb = line
                    tbindex = match.start()
                elif line == 'KeyboardInterrupt\n':
                    yield match.start(), Name.Class, line
                elif tb:
                    curtb += line
                    if not (line.startswith(' ') or line.strip() == u'...'):
                        tb = 0
                        for i, t, v in tblexer.get_tokens_unprocessed(curtb):
                            yield tbindex+i, t, v
                        curtb = ''
                else:
                    yield match.start(), Generic.Output, line
        if curcode:
            for item in do_insertions(insertions,
                                      pylexer.get_tokens_unprocessed(curcode)):
                yield item
        if curtb:
            for i, t, v in tblexer.get_tokens_unprocessed(curtb):
                yield tbindex+i, t, v
开发者ID:1901,项目名称:sublime_sync,代码行数:57,代码来源:python.py

示例9: _handle_codeblock

    def _handle_codeblock(self, match):
        """
        match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks
        """
        from pygments.lexers import get_lexer_by_name

        # section header
        yield match.start(1), String        , match.group(1)
        yield match.start(2), String        , match.group(2)
        yield match.start(3), Text          , match.group(3)

        # lookup lexer if wanted and existing
        lexer = None
        if self.handlecodeblocks:
            try:
                lexer = get_lexer_by_name( match.group(2).strip() )
            except ClassNotFound:
                pass
        code = match.group(4)

        # no lexer for this language. handle it like it was a code block
        if lexer is None:
            yield match.start(4), String, code
        else:
            for item in do_insertions([], lexer.get_tokens_unprocessed(code)):
                yield item

        yield match.start(5), String        , match.group(5)
开发者ID:PKpacheco,项目名称:monitor-dollar-value-galicia,代码行数:28,代码来源:markup.py

示例10: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        pylexer = Python3Lexer(**self.options)
        tblexer = Python3TracebackLexer(**self.options)
        if ">>>" not in text:
            if (text.startswith('Traceback (most recent call last):') or
                        re.match(r'^  File "[^"]+", line \d+\n$', text)):
                yield from tblexer.get_tokens_unprocessed(text)
            else:
                yield from pylexer.get_tokens_unprocessed(text)
            return

        curcode = ''
        insertions = []
        curtb = ''
        tbindex = 0
        tb = 0

        def do_current_code():
            nonlocal curcode
            nonlocal insertions
            if curcode:
                for item in do_insertions(insertions,
                                          pylexer.get_tokens_unprocessed(curcode)):
                    yield item
                curcode = ''
                insertions = []

        section = ""
        code = list(line_re.finditer(text))
        while code:
            match = code.pop(0)
            line = match.group()
            if line.startswith(">>>"):
                insertions = []
                insertions.append((0,
                                   [(0, Generic.Prompt, line[:4])]))
                section = line[4:]
                secindex = match.start()
                while code and code[0].group().startswith("   "):
                    line = code.pop(0).group()
                    if not line.strip():
                        break
                    insertions.append((len(section),
                                        [(0, Generic.Prompt, line[:4])]))
                    section += line[4:]
                for i, t, v in do_insertions(insertions,
                                            pylexer.get_tokens_unprocessed(section)):
                    yield secindex+i, t, v
            elif line.startswith('Traceback (most recent call last):') or re.match(r' *File "[^"]+", line \d+\n$', line):
                tb = line
                tbindex = match.start()
                while code and not code[0].group().startswith(">>>"):
                    tb += code.pop(0).group()
                for i, t, v in tblexer.get_tokens_unprocessed(tb):
                    yield tbindex+i, t, v
            else:
                yield match.start(), Generic.Output, line
开发者ID:dwjoyce,项目名称:pythonbook,代码行数:57,代码来源:lexer.py

示例11: do_current_code

 def do_current_code():
     nonlocal curcode
     nonlocal insertions
     if curcode:
         for item in do_insertions(insertions,
                                   pylexer.get_tokens_unprocessed(curcode)):
             yield item
         curcode = ''
         insertions = []
开发者ID:dwjoyce,项目名称:pythonbook,代码行数:9,代码来源:lexer.py

示例12: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        pylexer = PythonLexer(**self.options)
        tblexer = PythonTracebackLexer(**self.options)

        curcode = ''
        insertions = []
        curtb = ''
        tbindex = 0
        tb = 0
        for match in line_re.finditer(text):
            line = match.group()
            if line.startswith('>>> ') or line.startswith('... '):
                tb = 0
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, line[:4])]))
                curcode += line[4:]
            elif line.rstrip() == '...':
                tb = 0
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, '...')]))
                curcode += line[3:]
            else:
                if curcode:
                    for item in do_insertions(insertions,
                                    pylexer.get_tokens_unprocessed(curcode)):
                        yield item
                    curcode = ''
                    insertions = []
                if line.startswith('Traceback (most recent call last):'):
                    tb = 1
                    curtb = line
                    tbindex = match.start()
                elif tb:
                    curtb += line
                    if not (line.startswith(' ') or line.strip() == '...'):
                        tb = 0
                        for i, t, v in tblexer.get_tokens_unprocessed(curtb):
                            yield tbindex+i, t, v
                else:
                    yield match.start(), Generic.Output, line
        if curcode:
            for item in do_insertions(insertions,
                                      pylexer.get_tokens_unprocessed(curcode)):
                yield item
开发者ID:achernet,项目名称:wxPython,代码行数:44,代码来源:agile.py

示例13: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        pylexer = PythonLexer(**self.options)
        tblexer = PythonTracebackLexer(**self.options)

        curcode = ''
        insertions = []
        for match in line_re.finditer(text):
            line = match.group()
            input_prompt = self.input_prompt.match(line)
            continue_prompt = self.continue_prompt.match(line.rstrip())
            output_prompt = self.output_prompt.match(line)
            if line.startswith("#"):
                insertions.append((len(curcode),
                                   [(0, Comment, line)]))
            elif line.startswith("<warning>"):
                insertions.append((len(curcode),
                                   [(0, Generic.Error, line[9:])]))
            elif input_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, input_prompt.group())]))
                curcode += line[input_prompt.end():]
            elif continue_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, continue_prompt.group())]))
                curcode += line[continue_prompt.end():]
            elif output_prompt is not None:
                # Use the 'error' token for output.  We should probably make
                # our own token, but error is typicaly in a bright color like
                # red, so it works fine for our output prompts.
                insertions.append((len(curcode),
                                   [(0, Generic.Error, output_prompt.group())]))
                curcode += line[output_prompt.end():]
            else:
                if curcode:
                    for item in do_insertions(insertions,
                                              pylexer.get_tokens_unprocessed(curcode)):
                        yield item
                        curcode = ''
                        insertions = []
                yield match.start(), Generic.Output, line
        if curcode:
            for item in do_insertions(insertions,
                                      pylexer.get_tokens_unprocessed(curcode)):
                yield item
开发者ID:cmft,项目名称:sardana,代码行数:44,代码来源:spock_console_highlighting.py

示例14: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        innerlexer = self._innerLexerCls(**self.options)

        pos = 0
        curcode = ''
        insertions = []
        backslash_continuation = False

        for match in line_re.finditer(text):
            line = match.group()
            m = re.match(self._ps1rgx, line)
            if backslash_continuation:
                curcode += line
                backslash_continuation = curcode.endswith('\\\n')
            elif m:
                # To support output lexers (say diff output), the output
                # needs to be broken by prompts whenever the output lexer
                # changes.
                if not insertions:
                    pos = match.start()

                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, m.group(1))]))
                curcode += m.group(2)
                backslash_continuation = curcode.endswith('\\\n')
            elif line.startswith(self._ps2):
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, line[:len(self._ps2)])]))
                curcode += line[len(self._ps2):]
                backslash_continuation = curcode.endswith('\\\n')
            else:
                if insertions:
                    toks = innerlexer.get_tokens_unprocessed(curcode)
                    for i, t, v in do_insertions(insertions, toks):
                        yield pos+i, t, v
                yield match.start(), Generic.Output, line
                insertions = []
                curcode = ''
        if insertions:
            for i, t, v in do_insertions(insertions,
                                         innerlexer.get_tokens_unprocessed(curcode)):
                yield pos+i, t, v
开发者ID:avtobiff,项目名称:pygments.rb,代码行数:42,代码来源:shell.py

示例15: get_tokens_unprocessed

    def get_tokens_unprocessed(self, text):
        jllexer = JuliaLexer(**self.options)
        start = 0
        curcode = ''
        insertions = []
        output = False
        error = False

        for line in text.splitlines(True):
            if line.startswith('julia>'):
                insertions.append((len(curcode), [(0, Generic.Prompt, line[:6])]))
                curcode += line[6:]
                output = False
                error = False
            elif line.startswith('help?>') or line.startswith('shell>'):
                yield start, Generic.Prompt, line[:6]
                yield start + 6, Text, line[6:]
                output = False
                error = False
            elif line.startswith('      ') and not output:
                insertions.append((len(curcode), [(0, Text, line[:6])]))
                curcode += line[6:]
            else:
                if curcode:
                    for item in do_insertions(
                            insertions, jllexer.get_tokens_unprocessed(curcode)):
                        yield item
                    curcode = ''
                    insertions = []
                if line.startswith('ERROR: ') or error:
                    yield start, Generic.Error, line
                    error = True
                else:
                    yield start, Generic.Output, line
                output = True
            start += len(line)

        if curcode:
            for item in do_insertions(
                    insertions, jllexer.get_tokens_unprocessed(curcode)):
                yield item
开发者ID:avtobiff,项目名称:pygments.rb,代码行数:41,代码来源:julia.py


注:本文中的pygments.lexer.do_insertions函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。