本文整理汇总了Python中pygments.lexers.agile.PythonLexer类的典型用法代码示例。如果您正苦于以下问题:Python PythonLexer类的具体用法?Python PythonLexer怎么用?Python PythonLexer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PythonLexer类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
# print '\nTEXT > \n', text, '\n TEXT'
for line in text.splitlines():
lstrip = line.lstrip()
if lstrip.startswith('Out'):
line = lstrip + '\n'
elif lstrip.startswith('...'):
line = line + '\n'
else:
line = line + '\n'
input_prompt = self.input_prompt.match(line)
output_prompt = self.output_prompt.match(line)
if input_prompt is not None:
yield (0, Generic.Prompt, input_prompt.group())
code = line[input_prompt.end():]
for item in pylexer.get_tokens_unprocessed(code):
yield item
elif output_prompt is not None:
# Use the 'error' token for output. We should probably make
# our own token, but error is typicaly in a bright color like
# red, so it works fine for our output prompts.
yield (0, Generic.Error, output_prompt.group())
index = output_prompt.end()
yield index, Generic.Output, line[index:]
else:
yield 0, Generic.Output, line
示例2: highlight_code
def highlight_code(codebox):
lexer = PythonLexer()
cursor = codebox.createTextCursor()
style = styles.get_style_by_name('default')
cursor.gotoStart(False)
for tok_type, tok_value in lexer.get_tokens(codebox.String):
cursor.goRight(len(tok_value), True) # selects the token's text
cursor.CharColor = to_rgbint(style.style_for_token(tok_type)['color'])
cursor.goRight(0, False) # deselects the selected text
示例3: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
示例4: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
offset = 0
if re.search(r'^----\s*$', text, re.MULTILINE):
py, _, text = text.partition('----')
lexer = PythonLexer(**self.options)
for i, token, value in lexer.get_tokens_unprocessed(py):
yield i, token, value
offset = i + 1
yield offset, Text, u'----'
offset += 1
lexer = HtmlDjangoLexer(**self.options)
for i, token, value in lexer.get_tokens_unprocessed(text):
yield offset + i, token, value
示例5: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.lpy_modules:
# Colourize previously detected modules
yield index, Keyword, value
else:
yield index, token, value
示例6: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif line.startswith("<warning>"):
insertions.append((len(curcode),
[(0, Generic.Error, line[9:])]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
# Use the 'error' token for output. We should probably make
# our own token, but error is typicaly in a bright color like
# red, so it works fine for our output prompts.
insertions.append((len(curcode),
[(0, Generic.Error, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
示例7: setLexerFromFilename
def setLexerFromFilename(self, filename):
"""
Change the lexer based on the filename (actually only the extension is needed)
:param filename: Filename or extension
"""
try:
self._lexer = get_lexer_for_filename(filename)
except ClassNotFound:
self._lexer = PythonLexer()
示例8: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Output, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
示例9: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
if value.startswith("###"):
continue
if token == Token.Error and value == "$":
yield index, Token.Keyword, value
elif token in [ Name, Operator.Word ] and value in KEYWORDS:
yield index, Token.Keyword, value
elif token in Name and value in PROPERTIES:
yield index, Name.Attribute, value
else:
yield index, token, value
示例10: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self._extra_commands:
yield index, Name.Builtin, value
else:
yield index, token, value
示例11: get_tokens_unprocessed
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
for pos, type_, value in pylexer.get_tokens_unprocessed(text):
if type_ == Token.Error and value == '$':
type_ = Comment.Preproc
yield pos, type_, value
示例12: QPygmentsHighlighter
class QPygmentsHighlighter(QSyntaxHighlighter):
""" Syntax highlighter that uses Pygments for parsing. """
hilighlightingBlock = Signal(unicode, QSyntaxHighlighter)
#---------------------------------------------------------------------------
# 'QSyntaxHighlighter' interface
#---------------------------------------------------------------------------
def __init__(self, parent, lexer=None):
super(QPygmentsHighlighter, self).__init__(parent)
self._document = QtGui.QTextDocument()
self._formatter = HtmlFormatter(nowrap=True)
self._lexer = lexer if lexer else PythonLexer()
self.style = styles.getStyle("Default").pygmentsStyle
self.enabled = True
def setLexerFromFilename(self, filename):
"""
Change the lexer based on the filename (actually only the extension is needed)
:param filename: Filename or extension
"""
try:
self._lexer = get_lexer_for_filename(filename)
except ClassNotFound:
self._lexer = PythonLexer()
def highlightBlock(self, text):
""" Highlight a block of text """
if self.enabled is False:
return
text = unicode(text)
original_text = text
prev_data = self.currentBlock().previous().userData()
if prev_data is not None:
self._lexer._saved_state_stack = prev_data.syntax_stack
elif hasattr(self._lexer, '_saved_state_stack'):
del self._lexer._saved_state_stack
# Lex the text using Pygments
index = 0
for token, text in self._lexer.get_tokens(text):
length = len(text)
self.setFormat(index, length, self._get_format(token))
index += length
if hasattr(self._lexer, '_saved_state_stack'):
data = PygmentsBlockUserData(
syntax_stack=self._lexer._saved_state_stack)
self.currentBlock().setUserData(data)
# Clean up for the next go-round.
del self._lexer._saved_state_stack
#Spaces
expression = QRegExp('\s+')
index = expression.indexIn(original_text, 0)
while index >= 0:
index = expression.pos(0)
length = len(expression.cap(0))
self.setFormat(index, length, self._get_format(Whitespace))
index = expression.indexIn(original_text, index + length)
self.hilighlightingBlock.emit(original_text, self)
# expression = QRegExp('\s+')
# index = expression.indexIn(original_text, 0)
# while index >= 0:
# index = expression.pos(0)
# length = len(expression.cap(0))
# self.setFormat(index, length, self._get_format(Whitespace))
# index = expression.indexIn(original_text, index + length)
#---------------------------------------------------------------------------
# 'PygmentsHighlighter' interface
#---------------------------------------------------------------------------
def __set_style(self, style):
""" Sets the style to the specified Pygments style.
"""
if (isinstance(style, str) or
isinstance(style, unicode)):
style = get_style_by_name(style)
self._style = style
self._clear_caches()
def set_style_sheet(self, stylesheet):
""" Sets a CSS stylesheet. The classes in the stylesheet should
correspond to those generated by:
pygmentize -S <style> -f html
Note that 'set_style' and 'set_style_sheet' completely override each
other, i.e. they cannot be used in conjunction.
"""
self._document.setDefaultStyleSheet(stylesheet)
self._style = None
self._clear_caches()
#.........这里部分代码省略.........
示例13: NameHighlightFilter
from pygments import highlight
from pygments.lexers.agile import PythonLexer
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.formatters.terminal import TerminalFormatter
from pygments.styles import emacs
from pygments.filters import NameHighlightFilter
from pygments.token import Other
from talkstyle import TalkStyle
fltr = NameHighlightFilter(names=['video_cap'],
tokentype=Other)
from asciipoint import *
slide = open('webcam.py').read()
lex = PythonLexer()
lex.add_filter(fltr)
slide = highlight(slide, lex,
Terminal256Formatter(style=TalkStyle)
#TerminalFormatter(bg='dark')
)
hl = [(('(import ecto)', inv),),
((r'(from ecto_opencv.*)', inv),),
((r'(plasm =.*)', inv),),
((r'(VideoCapture)', inv),),
((r'(highgui\.VideoCapture)', inv),),
((r'(imshow)', inv),),
]