本文整理汇总了Python中pygments.token.Error方法的典型用法代码示例。如果您正苦于以下问题:Python token.Error方法的具体用法?Python token.Error怎么用?Python token.Error使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pygments.token
的用法示例。
在下文中一共展示了token.Error方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: analyze
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Error [as 别名]
def analyze(self, text):
if any([token is Error for token, value in self.get_tokens(text)]):
return 2 * (None, )
tokens, args, kwargs = self.get_tokens(text), [], {}
for token, value in tokens:
if token is Keyword:
token = token in ['true', 'True']
elif token is Number:
token = int(token)
if token in (Keyword, Number, String):
args.append(value)
if token is Name:
next(tokens) # pass the Operator '='
kwargs.update({value: next(tokens)[1]})
return args, kwargs
示例2: test_can_recover_after_unterminated_string
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Error [as 别名]
def test_can_recover_after_unterminated_string(lexer):
assert_tokens_match(lexer,
'"x\nx',
((String.Double, '"'), (String.Double, 'x'),
(Error, '\n'), (Name, 'x')))
示例3: test_can_recover_from_invalid_character
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Error [as 别名]
def test_can_recover_from_invalid_character(lexer):
assert_tokens_match(lexer,
'a;bc\nd',
((Name, 'a'), (Error, ';bc\n'), (Name, 'd')))
示例4: test_second_path
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Error [as 别名]
def test_second_path(self):
self.assertEqual(self.get_tokens(r"cd api v1"), [
(Keyword, 'cd'),
(String, 'api'),
(Error, 'v'),
(Error, '1')
])
示例5: test_invalid_type
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Error [as 别名]
def test_invalid_type(self):
self.assertEqual(self.get_tokens('rm -a foo'), [
(Keyword, 'rm'),
(Error, '-'), (Error, 'a'),
(Error, 'f'), (Error, 'o'), (Error, 'o')
])
示例6: get_tokens_unprocessed
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Error [as 别名]
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
示例7: get_tokens_unprocessed
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Error [as 别名]
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
# We are here only if all state tokens have been considered
# and there was not a match on any of them.
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
示例8: get_tokens_unprocessed
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Error [as 别名]
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
if len(statestack) > 1:
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop, but keep at least one state on the stack
# (random code leading to unexpected pops should
# not allow exceptions)
if abs(new_state) >= len(statestack):
del statestack[1:]
else:
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
# We are here only if all state tokens have been considered
# and there was not a match on any of them.
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break