当前位置: 首页>>代码示例>>Python>>正文


Python RegexLexer.get_tokens_unprocessed方法代码示例

本文整理汇总了Python中pygments.lexer.RegexLexer.get_tokens_unprocessed方法的典型用法代码示例。如果您正苦于以下问题:Python RegexLexer.get_tokens_unprocessed方法的具体用法?Python RegexLexer.get_tokens_unprocessed怎么用?Python RegexLexer.get_tokens_unprocessed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.lexer.RegexLexer的用法示例。


在下文中一共展示了RegexLexer.get_tokens_unprocessed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     # 'in' is either a keyword or an operator.
     # If the token two tokens after 'in' is ')', 'in' is a keyword:
     #   objectloop(a in b)
     # Otherwise, it is an operator:
     #   objectloop(a in b && true)
     objectloop_queue = []
     objectloop_token_count = -1
     previous_token = None
     for index, token, value in RegexLexer.get_tokens_unprocessed(self,
                                                                  text):
         if previous_token is Name.Variable and value == 'in':
             objectloop_queue = [[index, token, value]]
             objectloop_token_count = 2
         elif objectloop_token_count > 0:
             if token not in Comment and token not in Text:
                 objectloop_token_count -= 1
             objectloop_queue.append((index, token, value))
         else:
             if objectloop_token_count == 0:
                 if objectloop_queue[-1][2] == ')':
                     objectloop_queue[0][1] = Keyword
                 while objectloop_queue:
                     yield objectloop_queue.pop(0)
                 objectloop_token_count = -1
             yield index, token, value
         if token not in Comment and token not in Text:
             previous_token = token
     while objectloop_queue:
         yield objectloop_queue.pop(0)
开发者ID:spencerlyon2,项目名称:pygments,代码行数:32,代码来源:int_fiction.py

示例2: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
         # Convention: mark all upper case names as constants
         if token is Name:
             if value.isupper():
                 token = Name.Constant
         yield index, token, value
开发者ID:testmana2,项目名称:eric,代码行数:9,代码来源:hdl.py

示例3: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
         if token is Name.Variable:
             if value in self.builtin_function:
                 yield index, Name.Builtin, value
                 continue
             if value in self.special_forms:
                 yield index, Keyword, value
                 continue
             if value in self.macros:
                 yield index, Name.Builtin, value
                 continue
             if value in self.lambda_list_keywords:
                 yield index, Keyword, value
                 continue
             if value in self.declarations:
                 yield index, Keyword, value
                 continue
             if value in self.builtin_types:
                 yield index, Keyword.Type, value
                 continue
             if value in self.builtin_classes:
                 yield index, Name.Class, value
                 continue
         yield index, token, value
开发者ID:Mekyi,项目名称:crunchy,代码行数:28,代码来源:functional.py

示例4: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     # cut at the beginning of the interface and information tab stuff
     substrings = text.partition('@#$#@#$#@')
     text = substrings[0]
     stack = ['root']
     for item in RegexLexer.get_tokens_unprocessed(self, text, stack):
         yield item
开发者ID:cdzhang,项目名称:netlogo_latex_minted,代码行数:9,代码来源:netlogo.py

示例5: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     for index, token, value in (
         RegexLexer.get_tokens_unprocessed(self, text)
     ):
         if token is Name.Function and value in ('init', 'del'):
             token = Keyword.Pseudo
         yield index, token, value
开发者ID:DawidvC,项目名称:Cbang,代码行数:9,代码来源:pygments_cbang.py

示例6: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text, stack):
         if token is Name and value in self.EXTRA_KEYWORDS:
             yield index, Name.Builtin, value
         else:
             yield index, token, value
开发者ID:spencerlyon2,项目名称:pygments,代码行数:10,代码来源:haskell.py

示例7: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in builtins.BUILTIN_FUNCTIONS:
                 token = Name.Builtin
             elif value in builtins.BUILTIN_OPERATIONS:
                 token = Name.Builtin.Pseudo
         yield index, token, value
开发者ID:yamad,项目名称:igor-pygments-lexer,代码行数:10,代码来源:igorlexer.py

示例8: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
    def get_tokens_unprocessed(self, text, stack=('root', )):
        ma = MathematicaAnnotations()
        annotations = (ma.builtins, ma.unicode, ma.lexical_scope)
        for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
            result = (index, token, value)
            for func in annotations:
                result = func(*result)

            yield result
开发者ID:andandandand,项目名称:pygments-mathematica,代码行数:11,代码来源:lexer.py

示例9: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self,text):
   for index,token,value in RegexLexer.get_tokens_unprocessed(self,text):
     if token == Generic:
       value = "\\(" + value + "\\)"
       yield (index,token,value)
     elif token == XMath:
       yield (index,Generic,value)
     else:
       yield (index,token,value)
开发者ID:felix-lang,项目名称:felix,代码行数:11,代码来源:xfelix.py

示例10: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in ASYFUNCNAME:
             token = Name.Function
         elif token is Name and value in ASYVARNAME:
             token = Name.Variable
         yield index, token, value
开发者ID:axil,项目名称:blog,代码行数:11,代码来源:graphics.py

示例11: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
    def get_tokens_unprocessed(self, text):
        from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, COCOA_PROTOCOLS, COCOA_PRIMITIVES

        for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
            if token is Name or token is Name.Class:
                if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS or value in COCOA_PRIMITIVES:
                    token = Name.Builtin.Pseudo

            yield index, token, value
开发者ID:AlexStef,项目名称:stef-sublime-conf,代码行数:11,代码来源:objective.py

示例12: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
         # check for reserved words and pervasives
         if token is Name:
             if value in self.reserved_words:
                 token = Keyword.Reserved
             elif value in self.pervasives:
                 token = Keyword.Pervasive
         # return result
         yield index, token, value
开发者ID:2015E8014661092,项目名称:jinjaysnow.github.io,代码行数:12,代码来源:pascal.py

示例13: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if self.smhighlighting:
                 if value in self.SM_TYPES:
                     token = Keyword.Type
                 elif value in self._functions:
                     token = Name.Builtin
         yield index, token, value
开发者ID:spencerlyon2,项目名称:pygments,代码行数:12,代码来源:pawn.py

示例14: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     stack = ["root"]
     if self.startinline:
         stack.append("php")
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
         if token is Name.Other:
             if value in self._functions:
                 yield index, Name.Builtin, value
                 continue
         yield index, token, value
开发者ID:dlutcat,项目名称:pastebin,代码行数:12,代码来源:web.py

示例15: get_tokens_unprocessed

# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     if self.startinline:
         stack.append('php')
     for index, token, value in \
         RegexLexer.get_tokens_unprocessed(self, text, stack):
         if token is Name.Other:
             if value in self._functions:
                 yield index, Name.Function, value
                 continue
         yield index, token, value
开发者ID:BobPyron,项目名称:SideBySideDiff,代码行数:13,代码来源:web.py


注:本文中的pygments.lexer.RegexLexer.get_tokens_unprocessed方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。