本文整理汇总了Python中test_utils.captured_output函数的典型用法代码示例。如果您正苦于以下问题:Python captured_output函数的具体用法?Python captured_output怎么用?Python captured_output使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了captured_output函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_log_results
def test_log_results(self):
"""Create log directory if it doesn't exist and write the log file."""
def assert_log_written(out, log_file, content):
self.assertEquals(out.getvalue(),
'Logging results to: ' + log_file + '\n')
with open(log_file, 'rU') as f:
text = f.read()
self.assertEquals(text, "formatted output")
try:
import tempfile # setUp
temp_dir = tempfile.mkdtemp()
log_dir = os.path.join(temp_dir, 'sub-dir/')
driver = BenchmarkDriver(Stub(), tests=[''])
self.assertFalse(os.path.exists(log_dir))
content = "formatted output"
log_file = os.path.join(log_dir, '1.log')
with captured_output() as (out, _):
driver.log_results(content, log_file=log_file)
assert_log_written(out, log_file, content)
self.assertTrue(os.path.exists(log_dir))
log_file = os.path.join(log_dir, '2.log')
with captured_output() as (out, _):
driver.log_results(content, log_file=log_file)
assert_log_written(out, log_file, content)
finally:
import shutil # tearDown
shutil.rmtree(temp_dir)
示例2: test_run_and_log
def test_run_and_log(self):
def mock_run(test):
self.assertEquals(test, 'b1')
return PerformanceTestResult(
'3,b1,5,101,1,1,1,1,888'.split(','),
quantiles=True, delta=True, memory=True)
driver = BenchmarkDriver(tests=['b1'], args=Stub(output_dir=None))
driver.run_independent_samples = mock_run # patching
with captured_output() as (out, _):
log = driver.run_and_log()
header = '#,TEST,SAMPLES,MIN(μs),Q1(μs),MEDIAN(μs),Q3(μs),MAX(μs),' +\
'MAX_RSS(B)\n'
csv_log = '3,b1,5,101,102,103,104,105,888\n'
self.assertEquals(log, None)
self.assertEquals(
out.getvalue(),
header +
csv_log +
'\n' +
'Total performance tests executed: 1\n')
with captured_output() as (out, _):
log = driver.run_and_log(csv_console=False)
self.assertEquals(log, header + csv_log)
self.assertEquals(
out.getvalue(),
' # TEST SAMPLES MIN(μs) Q1(μs)' +
' MEDIAN(μs) Q3(μs) MAX(μs) MAX_RSS(B)\n' +
' 3 b1 5 101 102' +
' 103 104 105 888\n' +
'\n' +
'Total performance tests executed: 1\n')
示例3: test_benchmark_name_matches_naming_conventions
def test_benchmark_name_matches_naming_conventions(self):
driver = BenchmarkDriverMock(tests=[
'BenchmarkName', 'CapitalWordsConvention', 'ABBRName',
'TooManyCamelCaseHumps',
'Existential.Array.method.1x.Val4',
'Flatten.Array.Array.Str.for-in.reserved',
'Flatten.Array.String?.as!.NSArray',
'wrongCase', 'Wrong_convention', 'Illegal._$%[]<>{}@^()'])
with captured_output() as (out, _):
doctor = BenchmarkDoctor(self.args, driver)
doctor.check()
output = out.getvalue()
self.assertIn('naming: ', output)
self.assertNotIn('BenchmarkName', output)
self.assertNotIn('CapitalWordsConvention', output)
self.assertNotIn('ABBRName', output)
self.assertNotIn('Existential.Array.method.1x.Val4', output)
self.assertNotIn('Flatten.Array.Array.Str.for-in.reserved', output)
self.assertNotIn('Flatten.Array.String?.as!.NSArray', output)
err_msg = " name doesn't conform to benchmark naming convention."
self.assert_contains(
["'wrongCase'" + err_msg, "'Wrong_convention'" + err_msg,
"'Illegal._$%[]<>{}@^()'" + err_msg], self.logs['error'])
self.assert_contains(
["'TooManyCamelCaseHumps' name is composed of 5 words."],
self.logs['warning'])
self.assert_contains(
['See http://bit.ly/BenchmarkNaming'], self.logs['info'])
self.assert_contains(
["Split 'TooManyCamelCaseHumps' name into dot-separated groups "
"and variants. See http://bit.ly/BenchmarkNaming"],
self.logs['info'])
示例4: test_measure_10_independent_1s_benchmark_series
def test_measure_10_independent_1s_benchmark_series(self):
"""Measurement strategy takes 5 i2 and 5 i1 series.
Num-samples for Benchmark Driver are calibrated to be powers of two,
take measurements for approximately 1s
based on short initial runtime sampling. Capped at 200 samples.
"""
driver = BenchmarkDriverMock(tests=['B1'], responses=([
# calibration run, returns a stand-in for PerformanceTestResult
(_run('B1', num_samples=3, num_iters=1,
verbose=True), _PTR(min=300))] +
# 5x i1 series, with 300 μs runtime its possible to take 4098
# samples/s, but it should be capped at 2k
([(_run('B1', num_samples=200, num_iters=1,
verbose=True, measure_memory=True), _PTR(min=300))] * 5) +
# 5x i2 series
([(_run('B1', num_samples=200, num_iters=2,
verbose=True, measure_memory=True), _PTR(min=300))] * 5)
))
doctor = BenchmarkDoctor(self.args, driver)
with captured_output() as (out, _):
measurements = doctor.measure('B1')
driver.assert_called_all_expected()
self.assert_contains(
['name',
'B1 O i1a', 'B1 O i1b', 'B1 O i1c', 'B1 O i1d', 'B1 O i1e',
'B1 O i2a', 'B1 O i2b', 'B1 O i2c', 'B1 O i2d', 'B1 O i2e'],
measurements.keys())
self.assertEquals(measurements['name'], 'B1')
self.assert_contains(
['Calibrating num-samples for B1:',
'Runtime 300 μs yields 4096 adjusted samples per second.',
'Measuring B1, 5 x i1 (200 samples), 5 x i2 (200 samples)'],
self.logs['debug'])
示例5: test_benchmark_has_no_significant_setup_overhead
def test_benchmark_has_no_significant_setup_overhead(self):
with captured_output() as (out, _):
doctor = BenchmarkDoctor(self.args, BenchmarkDriverMock([]))
doctor.analyze({
'name': 'NoOverhead', # not 'significant' enough
# Based on DropFirstArray a10/e10: overhead 3.7% (6 μs)
'NoOverhead O i1a': _PTR(min=162),
'NoOverhead O i2a': _PTR(min=159)})
doctor.analyze({
'name': 'SO', # Setup Overhead
# Based on SuffixArrayLazy a10/e10: overhead 5.8% (4 μs)
'SO O i1a': _PTR(min=69), 'SO O i1b': _PTR(min=70),
'SO O i2a': _PTR(min=67), 'SO O i2b': _PTR(min=68)})
doctor.analyze({'name': 'Zero', 'Zero O i1a': _PTR(min=0),
'Zero O i2a': _PTR(min=0)})
output = out.getvalue()
self.assertIn('runtime: ', output)
self.assertNotIn('NoOverhead', output)
self.assertNotIn('ZeroRuntime', output)
self.assert_contains(
["'SO' has setup overhead of 4 μs (5.8%)."],
self.logs['error'])
self.assert_contains(
["Move initialization of benchmark data to the `setUpFunction` "
"registered in `BenchmarkInfo`."], self.logs['info'])
示例6: test_supports_verbose_output
def test_supports_verbose_output(self):
driver = BenchmarkDriverMock(tests=['B1', 'B2'])
driver.verbose = True
self.args.verbose = True
with captured_output() as (out, _):
BenchmarkDoctor(self.args, driver)
self.assert_contains(['Checking tests: B1, B2'], out.getvalue())
示例7: test_benchmark_runtime_range
def test_benchmark_runtime_range(self):
"""Optimized benchmark should have runtime between 20 μs and 1000 μs.
Even on calm machine, benchmark with runtime of 2500 μs has 1:4 chance
of being interrupted in the middle of measurement due to elapsed 10 ms
quantum used by macos scheduler. Linux scheduler's quantum is 6ms.
Driver yielding the process before the 10ms quantum elapses helped
a lot, but benchmarks with runtimes under 1ms usually exhibit a strong
mode which is best for accurate performance charaterization.
To minimize the number of involuntary context switches that corrupt our
measurements, we should strive to stay in the microbenchmark range.
Warn about longer runtime. Runtimes over 10ms are an error.
"""
def measurements(name, runtime):
return {'name': name,
name + ' O i1a': _PTR(min=runtime + 2),
name + ' O i2a': _PTR(min=runtime)}
with captured_output() as (out, _):
doctor = BenchmarkDoctor(self.args, BenchmarkDriverMock([]))
doctor.analyze(measurements('Sylph', 0))
doctor.analyze(measurements('Unicorn', 3))
doctor.analyze(measurements('Cheetah', 200))
doctor.analyze(measurements('Hare', 1001))
doctor.analyze(measurements('Tortoise', 500000))
doctor.analyze({'name': 'OverheadTurtle',
'OverheadTurtle O i1a': _PTR(min=800000),
'OverheadTurtle O i2a': _PTR(min=700000)})
output = out.getvalue()
self.assertIn('runtime: ', output)
self.assertNotIn('Cheetah', output)
self.assert_contains(["'Sylph' execution took 0 μs."],
self.logs['error'])
self.assert_contains(
["Ensure the workload of 'Sylph' has a properly measurable size"
" (runtime > 20 μs) and is not eliminated by the compiler (use "
"`blackHole` function if necessary)."],
self.logs['info'])
self.assert_contains(["'Unicorn' execution took 3 μs."],
self.logs['warning'])
self.assert_contains(
["Increase the workload of 'Unicorn' to be more than 20 μs."],
self.logs['info'])
self.assert_contains(["'Hare' execution took at least 1001 μs."],
self.logs['warning'])
self.assert_contains(
["Decrease the workload of 'Hare' by a factor of 2 (10), "
"to be less than 1000 μs."], self.logs['info'])
self.assert_contains(
["'Tortoise' execution took at least 500000 μs."],
self.logs['error'])
self.assert_contains(
["Decrease the workload of 'Tortoise' by a factor of 512 (1000), "
"to be less than 1000 μs."], self.logs['info'])
self.assert_contains(
["'OverheadTurtle' execution took at least 600000 μs"
" (excluding the setup overhead)."],
self.logs['error'])
示例8: test_uses_optional_markdown_report_formatter
def test_uses_optional_markdown_report_formatter(self):
self.args.markdown = True
with captured_output() as (_, _):
doc = BenchmarkDoctor(self.args, BenchmarkDriverMock(tests=['B1']))
self.assertTrue(doc)
console_handler = logging.getLogger('BenchmarkDoctor').handlers[1]
self.assertTrue(isinstance(console_handler, MarkdownReportHandler))
示例9: test_required_input_arguments
def test_required_input_arguments(self):
with captured_output() as (_, err):
self.assertRaises(SystemExit, parse_args, [])
self.assertIn('usage: compare_perf_tests.py', err.getvalue())
args = parse_args(self.required)
self.assertEquals(args.old_file, 'old.log')
self.assertEquals(args.new_file, 'new.log')
示例10: test_check_flags_are_mutually_exclusive
def test_check_flags_are_mutually_exclusive(self):
with captured_output() as (out, err):
self.assertRaises(SystemExit,
parse_args, ['check', '-md', '-v'])
self.assert_contains(
['error:', 'argument -v/--verbose: ' +
'not allowed with argument -md/--markdown'],
err.getvalue())
示例11: test_run_benchmarks_and_filters_are_exclusive
def test_run_benchmarks_and_filters_are_exclusive(self):
with captured_output() as (_, err):
self.assertRaises(SystemExit,
parse_args, 'run -f Filter1 Benchmark1'.split())
self.assert_contains(
['error',
'argument BENCHMARK: not allowed with argument -f/--filter'],
err.getvalue())
示例12: test_iterations
def test_iterations(self):
self.assertEquals(parse_args(['run']).iterations, 1)
self.assertEquals(parse_args(['run', '-i', '3']).iterations, 3)
with captured_output() as (out, err):
self.assertRaises(SystemExit,
parse_args, ['run', '-i', '-3'])
self.assert_contains(
['error:',
"argument -i/--iterations: invalid positive_int value: '-3'"],
err.getvalue())
示例13: test_benchmark_has_constant_memory_use
def test_benchmark_has_constant_memory_use(self):
"""Benchmark's memory footprint must not vary with num-iters."""
with captured_output() as (out, _):
doctor = BenchmarkDoctor(self.args, BenchmarkDriverMock([]))
doctor.analyze({
# The threshold of 15 pages was estimated from previous
# measurements. The normal range should be probably aproximated
# by a function instead of a simple constant.
# TODO: re-evaluate normal range from whole SBS
'name': 'ConstantMemory',
'ConstantMemory O i1a': _PTR(mem_pages=1460),
'ConstantMemory O i2a': _PTR(mem_pages=(1460 + 15))})
doctor.analyze({
'name': 'VariableMemory', # ObserverForwardStruct
'VariableMemory O i1a': _PTR(mem_pages=1460),
'VariableMemory O i1b': _PTR(mem_pages=1472),
# i2 series start at 290 pages higher
'VariableMemory O i2a': _PTR(mem_pages=1750),
'VariableMemory O i2b': _PTR(mem_pages=1752)})
measurements = dict([
('HighVariance O i{0}{1}'.format(num_iters, suffix),
_PTR(mem_pages=num_pages))
for num_iters, pages in [
(1, [6200, 5943, 4818, 5612, 5469]),
(2, [6244, 5832, 4674, 5176, 5490])]
for num_pages, suffix in zip(pages, list('abcde'))])
measurements['name'] = 'HighVariance' # Array2D
doctor.analyze(measurements)
output = out.getvalue()
self.assertIn('memory: ', output)
self.assertNotIn('ConstantMemory', output)
self.assert_contains(
["'VariableMemory' varies the memory footprint of the base "
"workload depending on the `num-iters`."],
self.logs['error'])
self.assert_contains(
["'VariableMemory' "
"mem_pages [i1, i2]: min=[1460, 1750] 𝚫=290 R=[12, 2]"],
self.logs['info'])
self.assert_contains(
["'HighVariance' has very wide range of memory used between "
"independent, repeated measurements."],
self.logs['warning'])
self.assert_contains(
["'HighVariance' "
"mem_pages [i1, i2]: min=[4818, 4674] 𝚫=144 R=[1382, 1570]"],
self.logs['info'])
示例14: test_format_argument
def test_format_argument(self):
self.assertEquals(parse_args(self.required).format, 'markdown')
self.assertEquals(
parse_args(self.required + ['--format', 'markdown']).format,
'markdown')
self.assertEquals(
parse_args(self.required + ['--format', 'git']).format, 'git')
self.assertEquals(
parse_args(self.required + ['--format', 'html']).format, 'html')
with captured_output() as (_, err):
self.assertRaises(SystemExit, parse_args,
self.required + ['--format', 'bogus'])
self.assertIn("error: argument --format: invalid choice: 'bogus' "
"(choose from 'markdown', 'git', 'html')",
err.getvalue())
示例15: test_benchmark_name_is_at_most_40_chars_long
def test_benchmark_name_is_at_most_40_chars_long(self):
driver = BenchmarkDriverMock(tests=[
'BenchmarkName',
'ThisTestNameIsTooLongAndCausesOverflowsInReports'])
with captured_output() as (out, _):
doctor = BenchmarkDoctor(self.args, driver)
doctor.check()
output = out.getvalue()
self.assertIn('naming: ', output)
self.assertNotIn('BenchmarkName', output)
self.assert_contains(
["'ThisTestNameIsTooLongAndCausesOverflowsInReports' name is "
"48 characters long."], self.logs['error'])
self.assert_contains(
["Benchmark name should not be longer than 40 characters."],
self.logs['info'])