本文整理汇总了Python中pygments.token.Token.Comment方法的典型用法代码示例。如果您正苦于以下问题:Python Token.Comment方法的具体用法?Python Token.Comment怎么用?Python Token.Comment使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pygments.token.Token
的用法示例。
在下文中一共展示了Token.Comment方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: in_prompt_tokens
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def in_prompt_tokens(self, cli=None):
session = self.shell.user_module.session
style = session.GetParameter("highlighting_style")
old_style = self.shell.highlighting_style
if style != old_style:
try:
self.shell.highlighting_style = style
except Exception:
self.shell.highlighting_style = old_style
session.logging.error(
"Style %s not valid. Valid styles are %s" %
(style, list(styles.get_all_styles())))
return [
(Token.Prompt, "["),
(Token.Name.Variable, str(session.session_id)),
(Token.Prompt, "] "),
(Token.Name.Class, str(session.session_name)),
(Token.Prompt, " "),
(Token.Comment, time.strftime("%H:%M:%S")),
(Token.Prompt, "> "),
]
示例2: get_tokens_unprocessed
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def get_tokens_unprocessed(self, text):
buf = ''
idx = 0
for i, t, v in self.lang.get_tokens_unprocessed(text):
if t in Token.Comment or t in Token.String:
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
示例3: test_comment_after_continuation
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def test_comment_after_continuation(lexer):
"""
Test that text after the line continuation ellipses is marked as a comment.
"""
fragment = "set('T',300,... a comment\n'P',101325);\n"
tokens = [
(Token.Name, 'set'),
(Token.Punctuation, '('),
(Token.Literal.String, "'"),
(Token.Literal.String, "T'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '300'),
(Token.Punctuation, ','),
(Token.Keyword, '...'),
(Token.Comment, ' a comment'),
(Token.Text, '\n'),
(Token.Literal.String, "'"),
(Token.Literal.String, "P'"),
(Token.Punctuation, ','),
(Token.Literal.Number.Integer, '101325'),
(Token.Punctuation, ')'),
(Token.Punctuation, ';'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例4: out_prompt_tokens
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def out_prompt_tokens(self):
return [
(Token.OutPrompt, 'Out<'),
(Token.Comment, time.strftime("%H:%M:%S")),
(Token.OutPrompt, '> '),
]
示例5: get_autocomplete_stub
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def get_autocomplete_stub(lexer, text):
"""
"""
entity = []
from pygments.token import Token
# ignored tokens
ignored = [Token.Comment, Token.Text, Token.Text.Whitespace, Token.Comment.Single]
filtered = lambda pair: pair[0] not in ignored # pair = (token,value)
tokens = filter(filtered, get_tokens_reversed(lexer, text))
blocks = get_blocks(tokens)
block = next(blocks, [])
if len(block) == 1 and block[0][1] == ".":
block = next(blocks, [])
if len(block) > 0 and block[0][1] == "(":
block_ = next(blocks, [])
if len(block_) == 1 and block[0][0] is Token.Name:
return block_ + block
return block
return []
示例6: filter
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def filter(self, lexer, stream):
lexed = list(map(list, stream))
hexdumps = {}
count = 0
for i, token in enumerate(lexed):
if token[0] is Token.Comment.Special:
token[1] = token[1].strip() + ' '
count += 1
elif count > 0:
hexdumps[i - count] = count
count = 0
if hexdumps:
# This is not a real median, its 90%.
median = sorted(hexdumps.values())[int(len(hexdumps) * 0.9)]
for i, count in hexdumps.items():
token = lexed[i + count - 1]
value = token[1]
backoff = 2
padding = median
while padding < count:
padding += backoff
backoff *= 2
padding = int(padding) - count
value += padding * 3 * ' '
token[1] += value
yield from lexed
示例7: fmttokens
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def fmttokens(self, hexdump=False, offset=0,
skipleading=False, style=True):
# TODO: We need a way to remove indentation.
# Something like skip spaces if more than one should do
# the trick. It might be worth checking if this is doable
# at the Formatable level ?
if style is True:
gstyle = Token.Generic.Heading if self.current else None
style = self.formatting.get(self.itype)
else:
gstyle = style
for ttype, value in self.tokens[offset:]:
if skipleading and value.isspace():
continue
skipleading = False
if not hexdump and ttype is Token.Comment.Special:
continue
# If bytecode should be an indicator of the instruction
# type we should add Comment.Special in the following:
if style and ttype in (Token.Comment, ):
ttype = style
if gstyle:
ttype = gstyle
yield ttype, value
示例8: fmtinsttokens
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def fmtinsttokens(self, hexdump=False):
if self.instidx is not None:
yield from self.fmttokens(hexdump=hexdump, offset=self.instidx)
else:
yield (Token.Comment, "(bad)")
示例9: fmtaddr
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def fmtaddr(self, addr):
if "x" in self.perms:
token = Token.Generic.Deleted
elif "w" in self.perms:
token = Token.Keyword
elif "r" in self.perms:
token = Token.Generic.Inserted
elif "u" in self.perms:
token = Token.Comment
else:
token = Token.Text
yield (token, "%#.x" % addr)
示例10: test_comments
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def test_comments(lexer):
"""
Assures lines lead by either # or ! are recognized as a comment
"""
fragment = '! a comment\n# also a comment\n'
tokens = [
(Token.Comment, '! a comment'),
(Token.Text, '\n'),
(Token.Comment, '# also a comment'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例11: test_leading_whitespace_comments
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def test_leading_whitespace_comments(lexer):
fragment = ' # comment\n'
tokens = [
(Token.Text, ' '),
(Token.Comment, '# comment'),
(Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例12: test_comment
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def test_comment(lexer):
data = '@COMMENT{test}'
tokens = [
(Token.Comment, u'@COMMENT'),
(Token.Comment, u'{test}'),
(Token.Text, u'\n'),
]
assert list(lexer.get_tokens(data)) == tokens
示例13: _get_format_from_style
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def _get_format_from_style(self, token, style):
""" Returns a QTextCharFormat for token by reading a Pygments style.
"""
result = QtGui.QTextCharFormat()
items = list(style.style_for_token(token).items())
for key, value in items:
if value is None and key == 'color':
# make sure to use a default visible color for the foreground
# brush
value = drift_color(self.background, 1000).name()
if value:
if key == 'color':
result.setForeground(self._get_brush(value))
elif key == 'bgcolor':
result.setBackground(self._get_brush(value))
elif key == 'bold':
result.setFontWeight(QtGui.QFont.Bold)
elif key == 'italic':
result.setFontItalic(value)
elif key == 'underline':
result.setUnderlineStyle(
QtGui.QTextCharFormat.SingleUnderline)
elif key == 'sans':
result.setFontStyleHint(QtGui.QFont.SansSerif)
elif key == 'roman':
result.setFontStyleHint(QtGui.QFont.Times)
elif key == 'mono':
result.setFontStyleHint(QtGui.QFont.TypeWriter)
if token in [Token.Literal.String, Token.Literal.String.Doc,
Token.Comment]:
# mark strings, comments and docstrings regions for further queries
result.setObjectType(result.UserObject)
return result
示例14: color_line
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def color_line(self, line):
"""
"""
lexer = CLexer()
tokens = list(lexer.get_tokens(line))
new_line = ""
for t in tokens:
ttype = t[0]
ttext = str(t[1])
if ttype == Token.Text:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_INSN)
elif ttype == Token.Text.Whitespace:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_INSN)
elif ttype == Token.Error:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_ERROR)
elif ttype == Token.Other:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_DSTR)
elif ttype == Token.Keyword:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_KEYWORD)
elif ttype == Token.Name:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_LIBNAME)
elif ttype == Token.Literal:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_LOCNAME)
elif ttype == Token.Literal.String:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_STRING)
elif ttype == Token.Literal.Number:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_DNUM)
elif ttype == Token.Operator:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_ALTOP)
elif ttype == Token.Punctuation:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_SYMBOL)
elif ttype == Token.Comment:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_REGCMT)
elif ttype == Token.Comment.Single:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_REGCMT)
elif ttype == Token.Generic:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_CREFTAIL)
else:
new_line += idaapi.COLSTR(ttext, idaapi.SCOLOR_CREFTAIL)
return new_line
示例15: add_comment
# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Comment [as 别名]
def add_comment(self):
"""
Add a commment to the selected line
"""
print("GhIDA:: [DEBUG] add_comment called")
colored_line = self.GetCurrentLine(notags=1)
if not colored_line:
idaapi.warning("Select a line")
return False
# Use pygments to parse the line to check if there are comments
line = idaapi.tag_remove(colored_line)
lexer = CLexer()
tokens = list(lexer.get_tokens(line))
text = ""
text_comment = ""
for t in tokens:
ttype = t[0]
ttext = str(t[1])
if ttype == Token.Comment.Single:
text_comment = ttext.replace('//', '').strip()
else:
text += ttext
# Get the new comment
comment = gl.display_comment_form(text_comment)
if not comment or len(comment) == 0:
return False
comment = comment.replace("//", "").replace("\n", " ")
comment = comment.strip()
# Create the new text
full_comment = "\t// %s" % comment
text = text.rstrip()
new_text = text + full_comment
text_colored = self.color_line(new_text)
num_line = self.GetLineNo()
self.EditLine(num_line, text_colored)
self.RefreshCurrent()
# Add comment to cache
COMMENTS_CACHE.add_comment_to_cache(self.__ea, num_line, full_comment)
print("GhIDA:: [DEBUG] Added comment to #line: %d (%s)" %
(num_line, new_text))
return