本文整理匯總了Python中pygments.token.String方法的典型用法代碼示例。如果您正苦於以下問題:Python token.String方法的具體用法?Python token.String怎麽用?Python token.String使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pygments.token
的用法示例。
在下文中一共展示了token.String方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: visit_ls
# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import String [as 別名]
def visit_ls(self, node, children):
path = urlparse(self.context_override.url).path
path = filter(None, path.split('/'))
nodes = self.context.root.ls(*path)
if self.output.isatty():
names = []
for node in nodes:
token_type = String if node.data.get('type') == 'dir' else Name
name = self._colorize(node.name, token_type)
names.append(name)
lines = list(colformat(list(names)))
else:
lines = [n.name for n in nodes]
if lines:
self.output.write('\n'.join(lines))
return node
示例2: test_should_cope_with_multiline_comments
# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import String [as 別名]
def test_should_cope_with_multiline_comments(lexer):
fragment = u'"""\nthis\nis\na\ncomment"""'
tokens = [
(String, u'"""\nthis\nis\na\ncomment"""'),
(Text, u'\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
示例3: test_tokentype
# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import String [as 別名]
def test_tokentype():
t = token.String
assert t.split() == [token.Token, token.Literal, token.String]
assert t.__class__ is token._TokenType
示例4: test_functions
# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import String [as 別名]
def test_functions():
assert token.is_token_subtype(token.String, token.String)
assert token.is_token_subtype(token.String, token.Literal)
assert not token.is_token_subtype(token.Literal, token.String)
assert token.string_to_tokentype(token.String) is token.String
assert token.string_to_tokentype('') is token.Token
assert token.string_to_tokentype('String') is token.String
示例5: test_copying
# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import String [as 別名]
def test_copying():
# Token instances are supposed to be singletons, so copying or even
# deepcopying should return themselves
t = token.String
assert t is copy.copy(t)
assert t is copy.deepcopy(t)
示例6: test_basic
# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import String [as 別名]
def test_basic():
expected = [(Text, 'a'), (String, '"'), (Keyword, 'bcd'),
(String, '"'), (Text, 'e\n')]
assert list(MyLexer().get_tokens('a"bcd"e')) == expected
示例7: string_rules
# 需要導入模塊: from pygments import token [as 別名]
# 或者: from pygments.token import String [as 別名]
def string_rules(state):
return [
(r'(")((?:[^\r\n"\\]|(?:\\.))+)(")',
bygroups(Text, String, Text), state),
(r'(")((?:[^\r\n"\\]|(?:\\.))+)', bygroups(Text, String), state),
(r"(')((?:[^\r\n'\\]|(?:\\.))+)(')",
bygroups(Text, String, Text), state),
(r"(')((?:[^\r\n'\\]|(?:\\.))+)", bygroups(Text, String), state),
(r'([^\s\'\\]|(\\.))+', String, state)
]