本文整理汇总了Python中pygments.lexer.RegexLexer.get_tokens_unprocessed方法的典型用法代码示例。如果您正苦于以下问题:Python RegexLexer.get_tokens_unprocessed方法的具体用法?Python RegexLexer.get_tokens_unprocessed怎么用?Python RegexLexer.get_tokens_unprocessed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pygments.lexer.RegexLexer
的用法示例。
在下文中一共展示了RegexLexer.get_tokens_unprocessed方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
lowercase_value = value.lower()
if lowercase_value in self.builtins:
yield index, Name.Builtin, value
continue
if lowercase_value in self.keywords:
yield index, Keyword, value
continue
if lowercase_value in self.functions:
yield index, Name.Builtin, value
continue
if lowercase_value in self.operators:
yield index, Operator, value
continue
yield index, token, value
示例2: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
示例3: get_tokens_unprocessed
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in EmacsLispLexer.builtin_function:
yield index, Name.Function, value
continue
if value in EmacsLispLexer.special_forms:
yield index, Keyword, value
continue
if value in EmacsLispLexer.error_keywords:
yield index, Name.Exception, value
continue
if value in EmacsLispLexer.builtin_function_highlighted:
yield index, Name.Builtin, value
continue
if value in EmacsLispLexer.macros:
yield index, Name.Builtin, value
continue
if value in EmacsLispLexer.lambda_list_keywords:
yield index, Keyword.Pseudo, value
continue
yield index, token, value
示例4: content_callback
# 需要导入模块: from pygments.lexer import RegexLexer [as 别名]
# 或者: from pygments.lexer.RegexLexer import get_tokens_unprocessed [as 别名]
def content_callback(self, match):
content_type = getattr(self, 'content_type', None)
content = match.group()
offset = match.start()
if content_type:
from pygments.lexers import get_lexer_for_mimetype
possible_lexer_mimetypes = [content_type]
if '+' in content_type:
# application/calendar+xml can be treated as application/xml
# if there's not a better match.
general_type = re.sub(r'^(.*)/.*\+(.*)$', r'\1/\2',
content_type)
possible_lexer_mimetypes.append(general_type)
for i in possible_lexer_mimetypes:
try:
lexer = get_lexer_for_mimetype(i)
except ClassNotFound:
pass
else:
for idx, token, value in lexer.get_tokens_unprocessed(content):
yield offset + idx, token, value
return
yield offset, Text, content