本文整理汇总了Python中pygments.lexers.agile.PythonLexer.get_tokens_unprocessed方法的典型用法代码示例。如果您正苦于以下问题:Python PythonLexer.get_tokens_unprocessed方法的具体用法?Python PythonLexer.get_tokens_unprocessed怎么用?Python PythonLexer.get_tokens_unprocessed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pygments.lexers.agile.PythonLexer
的用法示例。
在下文中一共展示了PythonLexer.get_tokens_unprocessed方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_tokens_unprocessed
# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
示例2: get_tokens_unprocessed
# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
# print '\nTEXT > \n', text, '\n TEXT'
for line in text.splitlines():
lstrip = line.lstrip()
if lstrip.startswith('Out'):
line = lstrip + '\n'
elif lstrip.startswith('...'):
line = line + '\n'
else:
line = line + '\n'
input_prompt = self.input_prompt.match(line)
output_prompt = self.output_prompt.match(line)
if input_prompt is not None:
yield (0, Generic.Prompt, input_prompt.group())
code = line[input_prompt.end():]
for item in pylexer.get_tokens_unprocessed(code):
yield item
elif output_prompt is not None:
# Use the 'error' token for output. We should probably make
# our own token, but error is typicaly in a bright color like
# red, so it works fine for our output prompts.
yield (0, Generic.Error, output_prompt.group())
index = output_prompt.end()
yield index, Generic.Output, line[index:]
else:
yield 0, Generic.Output, line
示例3: get_tokens_unprocessed
# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
offset = 0
if re.search(r'^----\s*$', text, re.MULTILINE):
py, _, text = text.partition('----')
lexer = PythonLexer(**self.options)
for i, token, value in lexer.get_tokens_unprocessed(py):
yield i, token, value
offset = i + 1
yield offset, Text, u'----'
offset += 1
lexer = HtmlDjangoLexer(**self.options)
for i, token, value in lexer.get_tokens_unprocessed(text):
yield offset + i, token, value
示例4: get_tokens_unprocessed
# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.lpy_modules:
# Colourize previously detected modules
yield index, Keyword, value
else:
yield index, token, value
示例5: get_tokens_unprocessed
# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif line.startswith("<warning>"):
insertions.append((len(curcode),
[(0, Generic.Error, line[9:])]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
# Use the 'error' token for output. We should probably make
# our own token, but error is typicaly in a bright color like
# red, so it works fine for our output prompts.
insertions.append((len(curcode),
[(0, Generic.Error, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
示例6: get_tokens_unprocessed
# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Output, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
示例7: get_tokens_unprocessed
# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
if value.startswith("###"):
continue
if token == Token.Error and value == "$":
yield index, Token.Keyword, value
elif token in [ Name, Operator.Word ] and value in KEYWORDS:
yield index, Token.Keyword, value
elif token in Name and value in PROPERTIES:
yield index, Name.Attribute, value
else:
yield index, token, value
示例8: get_tokens_unprocessed
# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self._extra_commands:
yield index, Name.Builtin, value
else:
yield index, token, value
示例9: get_tokens_unprocessed
# 需要导入模块: from pygments.lexers.agile import PythonLexer [as 别名]
# 或者: from pygments.lexers.agile.PythonLexer import get_tokens_unprocessed [as 别名]
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
for pos, type_, value in pylexer.get_tokens_unprocessed(text):
if type_ == Token.Error and value == '$':
type_ = Comment.Preproc
yield pos, type_, value