本文整理汇总了Python中pygments.token.Token方法的典型用法代码示例。如果您正苦于以下问题:Python token.Token方法的具体用法?Python token.Token怎么用?Python token.Token使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pygments.token
的用法示例。
在下文中一共展示了token.Token方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: serialize_code
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Token [as 别名]
def serialize_code(code, lexer='does not matter'):
return code_el.Code(tokens=[code_el.Token(val=code)])
示例2: get_code_class
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Token [as 别名]
def get_code_class(token_type):
cls = _TOKEN_MAP.get(token_type)
while cls is None:
token_type = token_type[:-1]
cls = _TOKEN_MAP.get(token_type)
if cls is None:
cls = code_el.Token
return cls
示例3: test_tokentype
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Token [as 别名]
def test_tokentype():
t = token.String
assert t.split() == [token.Token, token.Literal, token.String]
assert t.__class__ is token._TokenType
示例4: test_functions
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Token [as 别名]
def test_functions():
assert token.is_token_subtype(token.String, token.String)
assert token.is_token_subtype(token.String, token.Literal)
assert not token.is_token_subtype(token.Literal, token.String)
assert token.string_to_tokentype(token.String) is token.String
assert token.string_to_tokentype('') is token.Token
assert token.string_to_tokentype('String') is token.String
示例5: test_sanity_check
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Token [as 别名]
def test_sanity_check():
stp = token.STANDARD_TYPES.copy()
stp[token.Token] = '---' # Token and Text do conflict, that is okay
t = {}
for k, v in stp.items():
t.setdefault(v, []).append(k)
if len(t) == len(stp):
return # Okay
for k, v in t.items():
if len(v) > 1:
pytest.fail("%r has more than one key: %r" % (k, v))
示例6: test_copying
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Token [as 别名]
def test_copying():
# Token instances are supposed to be singletons, so copying or even
# deepcopying should return themselves
t = token.String
assert t is copy.copy(t)
assert t is copy.deepcopy(t)
示例7: style_from_pygments_dict
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Token [as 别名]
def style_from_pygments_dict(pygments_dict: Dict["Token", str]) -> Style:
"""
Create a :class:`.Style` instance from a Pygments style dictionary.
(One that maps Token objects to style strings.)
"""
pygments_style = []
for token, style in pygments_dict.items():
pygments_style.append((pygments_token_to_classname(token), style))
return Style(pygments_style)
示例8: pygments_token_to_classname
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Token [as 别名]
def pygments_token_to_classname(token: "Token") -> str:
"""
Turn e.g. `Token.Name.Exception` into `'pygments.name.exception'`.
(Our Pygments lexer will also turn the tokens that pygments produces in a
prompt_toolkit list of fragments that match these styling rules.)
"""
parts = ("pygments",) + token
return ".".join(parts).lower()
示例9: __init__
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Token [as 别名]
def __init__(self, token_list: List[Tuple["Token", str]]) -> None:
self.token_list = token_list
示例10: _handle_exception
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Token [as 别名]
def _handle_exception(self, e: Exception) -> None:
output = self.app.output
# Instead of just calling ``traceback.format_exc``, we take the
# traceback and skip the bottom calls of this framework.
t, v, tb = sys.exc_info()
# Required for pdb.post_mortem() to work.
sys.last_type, sys.last_value, sys.last_traceback = t, v, tb
tblist = list(traceback.extract_tb(tb))
for line_nr, tb_tuple in enumerate(tblist):
if tb_tuple[0] == "<stdin>":
tblist = tblist[line_nr:]
break
l = traceback.format_list(tblist)
if l:
l.insert(0, "Traceback (most recent call last):\n")
l.extend(traceback.format_exception_only(t, v))
tb_str = "".join(l)
# Format exception and write to output.
# (We use the default style. Most other styles result
# in unreadable colors for the traceback.)
if self.enable_syntax_highlighting:
tokens = list(_lex_python_traceback(tb_str))
else:
tokens = [(Token, tb_str)]
print_formatted_text(
PygmentsTokens(tokens),
style=self._current_style,
style_transformation=self.style_transformation,
include_default_pygments_style=False,
output=output,
)
output.write("%s\n" % e)
output.flush()
示例11: __new__
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Token [as 别名]
def __new__(mcs, name, bases, dct):
obj = type.__new__(mcs, name, bases, dct)
for token in STANDARD_TYPES:
if token not in obj.styles:
obj.styles[token] = ''
def colorformat(text):
if text[0:1] == '#':
col = text[1:]
if len(col) == 6:
return col
elif len(col) == 3:
return col[0]*2 + col[1]*2 + col[2]*2
elif text == '':
return ''
assert False, "wrong color format %r" % text
_styles = obj._styles = {}
for ttype in obj.styles:
for token in ttype.split():
if token in _styles:
continue
ndef = _styles.get(token.parent, None)
styledefs = obj.styles.get(token, '').split()
if not ndef or token is None:
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
elif 'noinherit' in styledefs and token is not Token:
ndef = _styles[Token][:]
else:
ndef = ndef[:]
_styles[token] = ndef
for styledef in obj.styles.get(token, '').split():
if styledef == 'noinherit':
pass
elif styledef == 'bold':
ndef[1] = 1
elif styledef == 'nobold':
ndef[1] = 0
elif styledef == 'italic':
ndef[2] = 1
elif styledef == 'noitalic':
ndef[2] = 0
elif styledef == 'underline':
ndef[3] = 1
elif styledef == 'nounderline':
ndef[3] = 0
elif styledef[:3] == 'bg:':
ndef[4] = colorformat(styledef[3:])
elif styledef[:7] == 'border:':
ndef[5] = colorformat(styledef[7:])
elif styledef == 'roman':
ndef[6] = 1
elif styledef == 'sans':
ndef[7] = 1
elif styledef == 'mono':
ndef[8] = 1
else:
ndef[0] = colorformat(styledef)
return obj