本文整理汇总了Python中pygments.lexer.RegexLexer类的典型用法代码示例。如果您正苦于以下问题:Python RegexLexer类的具体用法?Python RegexLexer怎么用?Python RegexLexer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RegexLexer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, **options):
from pygments.lexers._vimbuiltins import command, option, auto
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
示例2: __init__
def __init__(self, **options):
level = options.get('i6t', '+i6t-not-inline')
if level not in self._all_tokens:
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
示例3: __init__
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
示例4: __init__
def __init__(self, **options):
self.smhighlighting = get_bool_opt(options,
'sourcemod', True)
self._functions = set()
if self.smhighlighting:
from pygments.lexers._sourcemodbuiltins import FUNCTIONS
self._functions.update(FUNCTIONS)
RegexLexer.__init__(self, **options)
示例5: __init__
def __init__(self, **options):
self.func_name_highlighting = get_bool_opt(
options, 'func_name_highlighting', True)
self.disabled_modules = get_list_opt(options, 'disabled_modules', [])
self._functions = set()
if self.func_name_highlighting:
from pygments.lexers._lua_builtins import MODULES
for mod, func in MODULES.iteritems():
if mod not in self.disabled_modules:
self._functions.update(func)
RegexLexer.__init__(self, **options)
示例6: __init__
def __init__(self, **options):
from pygments.lexers._clbuiltins import BUILTIN_FUNCTIONS, \
SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \
BUILTIN_TYPES, BUILTIN_CLASSES
self.builtin_function = BUILTIN_FUNCTIONS
self.special_forms = SPECIAL_FORMS
self.macros = MACROS
self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS
self.declarations = DECLARATIONS
self.builtin_types = BUILTIN_TYPES
self.builtin_classes = BUILTIN_CLASSES
RegexLexer.__init__(self, **options)
示例7: __init__
def __init__(self, **options):
self.builtinshighlighting = get_bool_opt(options, "builtinshighlighting", True)
self.requiredelimiters = get_bool_opt(options, "requiredelimiters", False)
self._builtins = set()
self._members = set()
if self.builtinshighlighting:
from pygments.lexers._lasso_builtins import BUILTINS, MEMBERS
for key, value in iteritems(BUILTINS):
self._builtins.update(value)
for key, value in iteritems(MEMBERS):
self._members.update(value)
RegexLexer.__init__(self, **options)
示例8: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
# cut at the beginning of the interface and information tab stuff
substrings = text.partition('@#$#@#$#@')
text = substrings[0]
stack = ['root']
for item in RegexLexer.get_tokens_unprocessed(self, text, stack):
yield item
示例9: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in self.builtin_function:
yield index, Name.Builtin, value
continue
if value in self.special_forms:
yield index, Keyword, value
continue
if value in self.macros:
yield index, Name.Builtin, value
continue
if value in self.lambda_list_keywords:
yield index, Keyword, value
continue
if value in self.declarations:
yield index, Keyword, value
continue
if value in self.builtin_types:
yield index, Keyword.Type, value
continue
if value in self.builtin_classes:
yield index, Name.Class, value
continue
yield index, token, value
示例10: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
for index, token, value in (
RegexLexer.get_tokens_unprocessed(self, text)
):
if token is Name.Function and value in ('init', 'del'):
token = Keyword.Pseudo
yield index, token, value
示例11: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
# 'in' is either a keyword or an operator.
# If the token two tokens after 'in' is ')', 'in' is a keyword:
# objectloop(a in b)
# Otherwise, it is an operator:
# objectloop(a in b && true)
objectloop_queue = []
objectloop_token_count = -1
previous_token = None
for index, token, value in RegexLexer.get_tokens_unprocessed(self,
text):
if previous_token is Name.Variable and value == 'in':
objectloop_queue = [[index, token, value]]
objectloop_token_count = 2
elif objectloop_token_count > 0:
if token not in Comment and token not in Text:
objectloop_token_count -= 1
objectloop_queue.append((index, token, value))
else:
if objectloop_token_count == 0:
if objectloop_queue[-1][2] == ')':
objectloop_queue[0][1] = Keyword
while objectloop_queue:
yield objectloop_queue.pop(0)
objectloop_token_count = -1
yield index, token, value
if token not in Comment and token not in Text:
previous_token = token
while objectloop_queue:
yield objectloop_queue.pop(0)
示例12: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
示例13: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in builtins.BUILTIN_FUNCTIONS:
token = Name.Builtin
elif value in builtins.BUILTIN_OPERATIONS:
token = Name.Builtin.Pseudo
yield index, token, value
示例14: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Name.Builtin, value
else:
yield index, token, value
示例15: get_tokens_unprocessed
def get_tokens_unprocessed(self, text, stack=('root', )):
ma = MathematicaAnnotations()
annotations = (ma.builtins, ma.unicode, ma.lexical_scope)
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
result = (index, token, value)
for func in annotations:
result = func(*result)
yield result