本文整理汇总了Python中pylint.testutils._tokenize_str函数的典型用法代码示例。如果您正苦于以下问题:Python _tokenize_str函数的具体用法?Python _tokenize_str怎么用?Python _tokenize_str使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_tokenize_str函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testTrailingCommaGood
def testTrailingCommaGood(self):
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('(a, )\n'))
self.checker.process_tokens(_tokenize_str('(a,)\n'))
self.checker.config.no_space_check = []
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('(a,)\n'))
示例2: testCheckKeywordParensHandlesUnnecessaryParens
def testCheckKeywordParensHandlesUnnecessaryParens(self):
self.checker._keywords_with_parens = set()
cases = [
(Message('superfluous-parens', line=1, args='if'),
'if (foo):', 0),
(Message('superfluous-parens', line=1, args='if'),
'if ((foo, bar)):', 0),
(Message('superfluous-parens', line=1, args='if'),
'if (foo(bar)):', 0),
(Message('superfluous-parens', line=1, args='return'),
'return ((x for x in x))', 0),
(Message('superfluous-parens', line=1, args='not'),
'not (foo)', 0),
(Message('superfluous-parens', line=1, args='not'),
'if not (foo):', 1),
(Message('superfluous-parens', line=1, args='if'),
'if (not (foo)):', 0),
(Message('superfluous-parens', line=1, args='not'),
'if (not (foo)):', 2),
(Message('superfluous-parens', line=1, args='for'),
'for (x) in (1, 2, 3):', 0),
(Message('superfluous-parens', line=1, args='if'),
'if (1) in (1, 2, 3):', 0),
]
for msg, code, offset in cases:
with self.assertAddsMessages(msg):
self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
示例3: test_non_ascii_bytes_literal
def test_non_ascii_bytes_literal(self):
code = 'b"测试"'
self._test_token_message(code, 'non-ascii-bytes-literal')
for code in ("测试", u"测试", u'abcdef', b'\x80'):
tokens = testutils._tokenize_str(code)
with self.assertNoMessages():
self.checker.process_tokens(tokens)
示例4: test_other_present_codetag
def test_other_present_codetag(self):
code = """a = 1
# CODETAG
# FIXME
"""
with self.assertAddsMessages(Message(msg_id="fixme", line=2, args="CODETAG")):
self.checker.process_tokens(_tokenize_str(code))
示例5: testValidTypingAnnotationEllipses
def testValidTypingAnnotationEllipses(self):
"""Make sure ellipses in function typing annotation
doesn't cause a false positive bad-whitespace message"""
with self.assertNoMessages():
self.checker.process_tokens(
_tokenize_str("def foo(t: Tuple[str, ...] = None):\n")
)
示例6: test_check_bad_coment_custom_suggestion_count
def test_check_bad_coment_custom_suggestion_count(self):
with self.assertAddsMessages(
Message('wrong-spelling-in-comment', line=1,
args=('coment', '# bad coment',
' ^^^^^^',
self._get_msg_suggestions('coment', count=2)))):
self.checker.process_tokens(_tokenize_str("# bad coment"))
示例7: testCheckKeywordParensHandlesUnnecessaryParens
def testCheckKeywordParensHandlesUnnecessaryParens(self):
self.checker._keywords_with_parens = set()
cases = [
(Message("superfluous-parens", line=1, args="if"), "if (foo):", 0),
(Message("superfluous-parens", line=1, args="if"), "if ((foo, bar)):", 0),
(Message("superfluous-parens", line=1, args="if"), "if (foo(bar)):", 0),
(
Message("superfluous-parens", line=1, args="return"),
"return ((x for x in x))",
0,
),
(Message("superfluous-parens", line=1, args="not"), "not (foo)", 0),
(Message("superfluous-parens", line=1, args="not"), "if not (foo):", 1),
(Message("superfluous-parens", line=1, args="if"), "if (not (foo)):", 0),
(Message("superfluous-parens", line=1, args="not"), "if (not (foo)):", 2),
(
Message("superfluous-parens", line=1, args="for"),
"for (x) in (1, 2, 3):",
0,
),
(
Message("superfluous-parens", line=1, args="if"),
"if (1) in (1, 2, 3):",
0,
),
]
for msg, code, offset in cases:
with self.assertAddsMessages(msg):
self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
示例8: testCheckIfArgsAreNotUnicode
def testCheckIfArgsAreNotUnicode(self):
self.checker._keywords_with_parens = set()
cases = [(u'if (foo):', 0), (u'assert (1 == 1)', 0)]
for code, offset in cases:
self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
got = self.linter.release_messages()
assert isinstance(got[-1].args, str)
示例9: test_absent_codetag
def test_absent_codetag(self):
code = """a = 1
# FIXME # FIXME
# TODO # TODO
# XXX # XXX
"""
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str(code))
示例10: test_fixme_with_message
def test_fixme_with_message(self):
code = """a = 1
# FIXME message
"""
with self.assertAddsMessages(
Message(msg_id="fixme", line=2, args="FIXME message")
):
self.checker.process_tokens(_tokenize_str(code))
示例11: testFuturePrintStatementWithoutParensWarning
def testFuturePrintStatementWithoutParensWarning(self):
code = """from __future__ import print_function
print('Hello world!')
"""
tree = astroid.parse(code)
with self.assertNoMessages():
self.checker.process_module(tree)
self.checker.process_tokens(_tokenize_str(code))
示例12: testEmptyLines
def testEmptyLines(self):
self.checker.config.no_space_check = []
with self.assertAddsMessages(
Message('trailing-whitespace', line=2)):
self.checker.process_tokens(_tokenize_str('a = 1\n \nb = 2\n'))
with self.assertAddsMessages(
Message('trailing-whitespace', line=2)):
self.checker.process_tokens(_tokenize_str('a = 1\n\t\nb = 2\n'))
with self.assertAddsMessages(
Message('trailing-whitespace', line=2)):
self.checker.process_tokens(_tokenize_str('a = 1\n\v\nb = 2\n'))
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('a = 1\n\f\nb = 2\n'))
self.checker.config.no_space_check = ['empty-line']
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('a = 1\n \nb = 2\n'))
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('a = 1\n\t\nb = 2\n'))
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('a = 1\n\v\nb = 2\n'))
示例13: testOperatorSpacingGood
def testOperatorSpacingGood(self):
good_cases = [
'a = b\n'
'a < b\n'
'a\n< b\n',
]
with self.assertNoMessages():
for code in good_cases:
self.checker.process_tokens(_tokenize_str(code))
示例14: test_old_octal_literal
def test_old_octal_literal(self):
for octal in ("045", "055", "075", "077", "076543"):
self._test_token_message(octal, "old-octal-literal")
# Make sure we are catching only octals.
for non_octal in ("45", "00", "085", "08", "1"):
tokens = testutils._tokenize_str(non_octal)
with self.assertNoMessages():
self.checker.process_tokens(tokens)
示例15: testComma
def testComma(self):
with self.assertAddsMessages(
Message(
"bad-whitespace",
line=1,
args=("No", "allowed", "before", "comma", "(a , b)\n ^"),
)
):
self.checker.process_tokens(_tokenize_str("(a , b)\n"))