本文整理汇总了Python中pygments.token.String方法的典型用法代码示例。如果您正苦于以下问题:Python token.String方法的具体用法?Python token.String怎么用?Python token.String使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pygments.token
的用法示例。
在下文中一共展示了token.String方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: visit_ls
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import String [as 别名]
def visit_ls(self, node, children):
path = urlparse(self.context_override.url).path
path = filter(None, path.split('/'))
nodes = self.context.root.ls(*path)
if self.output.isatty():
names = []
for node in nodes:
token_type = String if node.data.get('type') == 'dir' else Name
name = self._colorize(node.name, token_type)
names.append(name)
lines = list(colformat(list(names)))
else:
lines = [n.name for n in nodes]
if lines:
self.output.write('\n'.join(lines))
return node
示例2: test_should_cope_with_multiline_comments
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import String [as 别名]
def test_should_cope_with_multiline_comments(lexer):
fragment = u'"""\nthis\nis\na\ncomment"""'
tokens = [
(String, u'"""\nthis\nis\na\ncomment"""'),
(Text, u'\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
示例3: test_tokentype
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import String [as 别名]
def test_tokentype():
t = token.String
assert t.split() == [token.Token, token.Literal, token.String]
assert t.__class__ is token._TokenType
示例4: test_functions
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import String [as 别名]
def test_functions():
assert token.is_token_subtype(token.String, token.String)
assert token.is_token_subtype(token.String, token.Literal)
assert not token.is_token_subtype(token.Literal, token.String)
assert token.string_to_tokentype(token.String) is token.String
assert token.string_to_tokentype('') is token.Token
assert token.string_to_tokentype('String') is token.String
示例5: test_copying
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import String [as 别名]
def test_copying():
# Token instances are supposed to be singletons, so copying or even
# deepcopying should return themselves
t = token.String
assert t is copy.copy(t)
assert t is copy.deepcopy(t)
示例6: test_basic
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import String [as 别名]
def test_basic():
expected = [(Text, 'a'), (String, '"'), (Keyword, 'bcd'),
(String, '"'), (Text, 'e\n')]
assert list(MyLexer().get_tokens('a"bcd"e')) == expected
示例7: string_rules
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import String [as 别名]
def string_rules(state):
return [
(r'(")((?:[^\r\n"\\]|(?:\\.))+)(")',
bygroups(Text, String, Text), state),
(r'(")((?:[^\r\n"\\]|(?:\\.))+)', bygroups(Text, String), state),
(r"(')((?:[^\r\n'\\]|(?:\\.))+)(')",
bygroups(Text, String, Text), state),
(r"(')((?:[^\r\n'\\]|(?:\\.))+)", bygroups(Text, String), state),
(r'([^\s\'\\]|(\\.))+', String, state)
]