本文整理汇总了Python中pygments.lexer.RegexLexer.get_tokens_unprocessed方法的典型用法代码示例。如果您正苦于以下问题:Python RegexLexer.get_tokens_unprocessed方法的具体用法?Python RegexLexer.get_tokens_unprocessed怎么用?Python RegexLexer.get_tokens_unprocessed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pygments.lexer.RegexLexer
的用法示例。
在下文中一共展示了RegexLexer.get_tokens_unprocessed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
# 'in' is either a keyword or an operator.
# If the token two tokens after 'in' is ')', 'in' is a keyword:
# objectloop(a in b)
# Otherwise, it is an operator:
# objectloop(a in b && true)
objectloop_queue = []
objectloop_token_count = -1
previous_token = None
for index, token, value in RegexLexer.get_tokens_unprocessed(self,
text):
if previous_token is Name.Variable and value == 'in':
objectloop_queue = [[index, token, value]]
objectloop_token_count = 2
elif objectloop_token_count > 0:
if token not in Comment and token not in Text:
objectloop_token_count -= 1
objectloop_queue.append((index, token, value))
else:
if objectloop_token_count == 0:
if objectloop_queue[-1][2] == ')':
objectloop_queue[0][1] = Keyword
while objectloop_queue:
yield objectloop_queue.pop(0)
objectloop_token_count = -1
yield index, token, value
if token not in Comment and token not in Text:
previous_token = token
while objectloop_queue:
yield objectloop_queue.pop(0)
示例2: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
示例3: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in self.builtin_function:
yield index, Name.Builtin, value
continue
if value in self.special_forms:
yield index, Keyword, value
continue
if value in self.macros:
yield index, Name.Builtin, value
continue
if value in self.lambda_list_keywords:
yield index, Keyword, value
continue
if value in self.declarations:
yield index, Keyword, value
continue
if value in self.builtin_types:
yield index, Keyword.Type, value
continue
if value in self.builtin_classes:
yield index, Name.Class, value
continue
yield index, token, value
示例4: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
# cut at the beginning of the interface and information tab stuff
substrings = text.partition('@#$#@#$#@')
text = substrings[0]
stack = ['root']
for item in RegexLexer.get_tokens_unprocessed(self, text, stack):
yield item
示例5: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
for index, token, value in (
RegexLexer.get_tokens_unprocessed(self, text)
):
if token is Name.Function and value in ('init', 'del'):
token = Keyword.Pseudo
yield index, token, value
示例6: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Name.Builtin, value
else:
yield index, token, value
示例7: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in builtins.BUILTIN_FUNCTIONS:
token = Name.Builtin
elif value in builtins.BUILTIN_OPERATIONS:
token = Name.Builtin.Pseudo
yield index, token, value
示例8: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text, stack=('root', )):
ma = MathematicaAnnotations()
annotations = (ma.builtins, ma.unicode, ma.lexical_scope)
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
result = (index, token, value)
for func in annotations:
result = func(*result)
yield result
示例9: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self,text):
for index,token,value in RegexLexer.get_tokens_unprocessed(self,text):
if token == Generic:
value = "\\(" + value + "\\)"
yield (index,token,value)
elif token == XMath:
yield (index,Generic,value)
else:
yield (index,token,value)
示例10: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
示例11: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, COCOA_PROTOCOLS, COCOA_PRIMITIVES
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name or token is Name.Class:
if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS or value in COCOA_PRIMITIVES:
token = Name.Builtin.Pseudo
yield index, token, value
示例12: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
# check for reserved words and pervasives
if token is Name:
if value in self.reserved_words:
token = Keyword.Reserved
elif value in self.pervasives:
token = Keyword.Pervasive
# return result
yield index, token, value
示例13: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.smhighlighting:
if value in self.SM_TYPES:
token = Keyword.Type
elif value in self._functions:
token = Name.Builtin
yield index, token, value
示例14: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
stack = ["root"]
if self.startinline:
stack.append("php")
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
示例15: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Function, value
continue
yield index, token, value