当前位置: 首页>>代码示例>>Python>>正文


Python PythonLexer.get_tokens_unprocessed方法代码示例

本文整理汇总了Python中pygments.lexers.agile.PythonLexer.get_tokens_unprocessed方法的典型用法代码示例。如果您正苦于以下问题:Python PythonLexer.get_tokens_unprocessed方法的具体用法?Python PythonLexer.get_tokens_unprocessed怎么用?Python PythonLexer.get_tokens_unprocessed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.lexers.agile.PythonLexer的用法示例。


在下文中一共展示了PythonLexer.get_tokens_unprocessed方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_tokens_unprocessed

# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     for index, token, value in \
             PythonLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self.EXTRA_KEYWORDS:
             yield index, Keyword.Pseudo, value
         else:
             yield index, token, value
开发者ID:leeight,项目名称:feutils,代码行数:9,代码来源:math.py

示例2: get_tokens_unprocessed

# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
    def get_tokens_unprocessed(self, text):
        pylexer = PythonLexer(**self.options)
        tblexer = PythonTracebackLexer(**self.options)

        # print '\nTEXT > \n', text, '\n TEXT'
        for line in text.splitlines():
            lstrip = line.lstrip()
            if lstrip.startswith('Out'):
                line = lstrip + '\n'
            elif lstrip.startswith('...'):
                line = line + '\n'
            else:
                line = line + '\n'
            input_prompt = self.input_prompt.match(line)
            output_prompt = self.output_prompt.match(line)

            if input_prompt is not None:
                yield (0, Generic.Prompt, input_prompt.group())
                code = line[input_prompt.end():]
                for item in pylexer.get_tokens_unprocessed(code):
                  yield item
            elif output_prompt is not None:
                # Use the 'error' token for output.  We should probably make
                # our own token, but error is typicaly in a bright color like
                # red, so it works fine for our output prompts.
                yield (0, Generic.Error, output_prompt.group())
                index = output_prompt.end()
                yield index, Generic.Output, line[index:]
            else:
                yield 0, Generic.Output, line
开发者ID:barettog1,项目名称:ProDy,代码行数:32,代码来源:ipython_console_highlighting.py

示例3: get_tokens_unprocessed

# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
    def get_tokens_unprocessed(self, text):
        offset = 0
        if re.search(r'^----\s*$', text, re.MULTILINE):
            py, _, text = text.partition('----')

            lexer = PythonLexer(**self.options)
            for i, token, value in lexer.get_tokens_unprocessed(py):
                yield i, token, value

            offset = i + 1
            yield offset, Text, u'----'
            offset += 1

        lexer = HtmlDjangoLexer(**self.options)
        for i, token, value in lexer.get_tokens_unprocessed(text):
            yield offset + i, token, value
开发者ID:dcrosta,项目名称:keystone,代码行数:18,代码来源:keystonelexer.py

示例4: get_tokens_unprocessed

# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self.lpy_modules:
             # Colourize previously detected modules
             yield index, Keyword, value
         else:
             yield index, token, value
开发者ID:VirtualPlants,项目名称:plantlab,代码行数:9,代码来源:lpy_lexer.py

示例5: get_tokens_unprocessed

# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
    def get_tokens_unprocessed(self, text):
        pylexer = PythonLexer(**self.options)
        tblexer = PythonTracebackLexer(**self.options)

        curcode = ''
        insertions = []
        for match in line_re.finditer(text):
            line = match.group()
            input_prompt = self.input_prompt.match(line)
            continue_prompt = self.continue_prompt.match(line.rstrip())
            output_prompt = self.output_prompt.match(line)
            if line.startswith("#"):
                insertions.append((len(curcode),
                                   [(0, Comment, line)]))
            elif line.startswith("<warning>"):
                insertions.append((len(curcode),
                                   [(0, Generic.Error, line[9:])]))
            elif input_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, input_prompt.group())]))
                curcode += line[input_prompt.end():]
            elif continue_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, continue_prompt.group())]))
                curcode += line[continue_prompt.end():]
            elif output_prompt is not None:
                # Use the 'error' token for output.  We should probably make
                # our own token, but error is typicaly in a bright color like
                # red, so it works fine for our output prompts.
                insertions.append((len(curcode),
                                   [(0, Generic.Error, output_prompt.group())]))
                curcode += line[output_prompt.end():]
            else:
                if curcode:
                    for item in do_insertions(insertions,
                                              pylexer.get_tokens_unprocessed(curcode)):
                        yield item
                        curcode = ''
                        insertions = []
                yield match.start(), Generic.Output, line
        if curcode:
            for item in do_insertions(insertions,
                                      pylexer.get_tokens_unprocessed(curcode)):
                yield item
开发者ID:cmft,项目名称:sardana,代码行数:46,代码来源:spock_console_highlighting.py

示例6: get_tokens_unprocessed

# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
    def get_tokens_unprocessed(self, text):
        pylexer = PythonLexer(**self.options)
        tblexer = PythonTracebackLexer(**self.options)

        curcode = ''
        insertions = []
        for match in line_re.finditer(text):
            line = match.group()
            input_prompt = self.input_prompt.match(line)
            continue_prompt = self.continue_prompt.match(line.rstrip())
            output_prompt = self.output_prompt.match(line)
            if line.startswith("#"):
                insertions.append((len(curcode),
                                   [(0, Comment, line)]))
            elif input_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, input_prompt.group())]))
                curcode += line[input_prompt.end():]
            elif continue_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, continue_prompt.group())]))
                curcode += line[continue_prompt.end():]
            elif output_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Output, output_prompt.group())]))
                curcode += line[output_prompt.end():]
            else:
                if curcode:
                    for item in do_insertions(insertions,
                                              pylexer.get_tokens_unprocessed(curcode)):
                        yield item
                        curcode = ''
                        insertions = []
                yield match.start(), Generic.Output, line
        if curcode:
            for item in do_insertions(insertions,
                                      pylexer.get_tokens_unprocessed(curcode)):
                yield item
开发者ID:ktc312,项目名称:numpy-tutorial,代码行数:40,代码来源:ipython_console_highlighting.py

示例7: get_tokens_unprocessed

# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
    def get_tokens_unprocessed(self, text):
        for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):

            if value.startswith("###"):
                continue

            if token == Token.Error and value == "$":
                yield index, Token.Keyword, value

            elif token in [ Name, Operator.Word ] and value in KEYWORDS:
                yield index, Token.Keyword, value

            elif token in Name and value in PROPERTIES:
                yield index, Name.Attribute, value

            else:
                yield index, token, value
开发者ID:maniacs-games,项目名称:renpy,代码行数:19,代码来源:renpydoc.py

示例8: get_tokens_unprocessed

# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self._extra_commands:
             yield index, Name.Builtin, value
         else:
             yield index, token, value
开发者ID:OSUPychron,项目名称:pychron,代码行数:8,代码来源:pyscript_lexer.py

示例9: get_tokens_unprocessed

# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     pylexer = PythonLexer(**self.options)
     for pos, type_, value in pylexer.get_tokens_unprocessed(text):
         if type_ == Token.Error and value == '$':
             type_ = Comment.Preproc
         yield pos, type_, value
开发者ID:Mekyi,项目名称:crunchy,代码行数:8,代码来源:templates.py


注:本文中的pygments.lexers.agile.PythonLexer.get_tokens_unprocessed方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。