本文整理汇总了Python中webkitpy.performance_tests.perftest.PerfTest类的典型用法代码示例。如果您正苦于以下问题:Python PerfTest类的具体用法?Python PerfTest怎么用?Python PerfTest使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PerfTest类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_parse_output_with_failing_line
def test_parse_output_with_failing_line(self):
output = DriverOutput("""
Running 20 times
Ignoring warm-up run (1115)
some-unrecognizable-line
Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50862 ms
min 1080 ms
max 1120 ms
""", image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
test._filter_output(output)
self.assertEqual(test.parse_output(output), None)
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
示例2: test_parse_output
def test_parse_output(self):
output = DriverOutput('\n'.join([
'Running 20 times',
'Ignoring warm-up run (1115)',
'',
'Time:',
'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
'avg 1100 ms',
'median 1101 ms',
'stdev 11 ms',
'min 1080 ms',
'max 1120 ms']), image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest(None, 'some-test', '/path/some-dir/some-test')
self.assertEqual(test.parse_output(output),
{'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
'values': [i for i in range(1, 20)]}})
finally:
pass
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n')
示例3: test_parse_output_with_subtests
def test_parse_output_with_subtests(self):
output = DriverOutput('\n'.join([
'Running 20 times',
'some test: [1, 2, 3, 4, 5]',
'other test = else: [6, 7, 8, 9, 10]',
'',
'Time:',
'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
'avg 1100 ms',
'median 1101 ms',
'stdev 11 ms',
'min 1080 ms',
'max 1120 ms']), image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
test._filter_output(output)
self.assertEqual(test.parse_output(output),
{'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
'values': [i for i in range(1, 20)]}})
finally:
pass
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, '')
示例4: test_parse_output_with_description
def test_parse_output_with_description(self):
output = DriverOutput("""
Description: this is a test description.
:Time -> [1080, 1120, 1095, 1101, 1104] ms
""", image=None, image_hash=None, audio=None)
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
self._assert_results_are_correct(test, output)
self.assertEqual(test.description(), 'this is a test description.')
示例5: test_parse_output_with_subtests
def test_parse_output_with_subtests(self):
output = DriverOutput("""
Description: this is a test description.
some test:Time -> [1, 2, 3, 4, 5] ms
some other test = else:Time -> [6, 7, 8, 9, 10] ms
some other test = else:Malloc -> [11, 12, 13, 14, 15] bytes
Array Construction, []:Time -> [11, 12, 13, 14, 15] ms
Concat String:Time -> [15163, 15304, 15386, 15608, 15622] ms
jQuery - addClass:Time -> [2785, 2815, 2826, 2841, 2861] ms
Dojo - div:only-child:Time -> [7825, 7910, 7950, 7958, 7970] ms
Dojo - div:nth-child(2n+1):Time -> [3620, 3623, 3633, 3641, 3658] ms
Dojo - div > div:Time -> [10158, 10172, 10180, 10183, 10231] ms
Dojo - div ~ div:Time -> [6673, 6675, 6714, 6848, 6902] ms
:Time -> [1080, 1120, 1095, 1101, 1104] ms
""", image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test')
test.run_single = lambda driver, path, time_out_ms: output
self.assertTrue(test.run(10))
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
subtests = test._metrics
self.assertEqual(map(lambda test: test['name'], subtests), ['some test', 'some other test = else',
'Array Construction, []', 'Concat String', 'jQuery - addClass', 'Dojo - div:only-child',
'Dojo - div:nth-child(2n+1)', 'Dojo - div > div', 'Dojo - div ~ div', None])
some_test_metrics = subtests[0]['metrics']
self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'some test'])
self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1, 2, 3, 4, 5] * 4)
some_other_test_metrics = subtests[1]['metrics']
self.assertEqual(map(lambda metric: metric.name(), some_other_test_metrics), ['Time', 'Malloc'])
self.assertEqual(some_other_test_metrics[0].path(), ['some-dir', 'some-test', 'some other test = else'])
self.assertEqual(some_other_test_metrics[0].flattened_iteration_values(), [6, 7, 8, 9, 10] * 4)
self.assertEqual(some_other_test_metrics[1].path(), ['some-dir', 'some-test', 'some other test = else'])
self.assertEqual(some_other_test_metrics[1].flattened_iteration_values(), [11, 12, 13, 14, 15] * 4)
main_metrics = subtests[len(subtests) - 1]['metrics']
self.assertEqual(map(lambda metric: metric.name(), main_metrics), ['Time'])
self.assertEqual(main_metrics[0].path(), ['some-dir', 'some-test'])
self.assertEqual(main_metrics[0].flattened_iteration_values(), [1080, 1120, 1095, 1101, 1104] * 4)
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, """DESCRIPTION: this is a test description.
RESULT some-dir: some-test: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms
""")
示例6: _assert_failed_on_line
def _assert_failed_on_line(self, output_text, expected_log):
output = DriverOutput(output_text, image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
test.run_single = lambda driver, path, time_out_ms: output
self.assertFalse(test._run_with_driver(None, None))
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, expected_log)
示例7: test_parse_output_with_description
def test_parse_output_with_description(self):
output = DriverOutput('\n'.join([
'Description: this is a test description.',
'Time:',
'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
'avg 1100 ms',
'median 1101 ms',
'stdev 11 ms',
'min 1080 ms',
'max 1120 ms']), image=None, image_hash=None, audio=None)
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
self.assertTrue(test.parse_output(output))
self.assertEqual(test.description(), 'this is a test description.')
示例8: test_ignored_stderr_lines
def test_ignored_stderr_lines(self):
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
output_with_lines_to_ignore = DriverOutput('', image=None, image_hash=None, audio=None, error="""
Unknown option: --foo-bar
Should not be ignored
[WARNING:proxy_service.cc] bad moon a-rising
[WARNING:chrome.cc] Something went wrong
[INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/
[ERROR:main.cc] The sky has fallen""")
test._filter_output(output_with_lines_to_ignore)
self.assertEqual(output_with_lines_to_ignore.error,
"Should not be ignored\n"
"[WARNING:chrome.cc] Something went wrong\n"
"[ERROR:main.cc] The sky has fallen")
示例9: test_parse_output_with_subtests_and_total
def test_parse_output_with_subtests_and_total(self):
output = DriverOutput("""
:Time:Total -> [2324, 2328, 2345, 2314, 2312] ms
EmberJS-TodoMVC:Time:Total -> [1462, 1473, 1490, 1465, 1458] ms
EmberJS-TodoMVC/a:Time -> [1, 2, 3, 4, 5] ms
BackboneJS-TodoMVC:Time -> [862, 855, 855, 849, 854] ms
""", image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test')
test.run_single = lambda driver, path, time_out_ms: output
self.assertTrue(test.run(10))
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
subtests = test._metrics
self.assertEqual(map(lambda test: test['name'], subtests), [None, 'EmberJS-TodoMVC', 'EmberJS-TodoMVC/a', 'BackboneJS-TodoMVC'])
main_metrics = subtests[0]['metrics']
self.assertEqual(map(lambda metric: metric.name(), main_metrics), ['Time'])
self.assertEqual(main_metrics[0].aggregator(), 'Total')
self.assertEqual(main_metrics[0].path(), ['some-dir', 'some-test'])
self.assertEqual(main_metrics[0].flattened_iteration_values(), [2324, 2328, 2345, 2314, 2312] * 4)
some_test_metrics = subtests[1]['metrics']
self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
self.assertEqual(some_test_metrics[0].aggregator(), 'Total')
self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'EmberJS-TodoMVC'])
self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1462, 1473, 1490, 1465, 1458] * 4)
some_test_metrics = subtests[2]['metrics']
self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
self.assertEqual(some_test_metrics[0].aggregator(), None)
self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'EmberJS-TodoMVC', 'a'])
self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1, 2, 3, 4, 5] * 4)
some_test_metrics = subtests[3]['metrics']
self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
self.assertEqual(some_test_metrics[0].aggregator(), None)
self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'BackboneJS-TodoMVC'])
self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [862, 855, 855, 849, 854] * 4)
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, """RESULT some-dir: some-test: Time= 2324.6 ms
median= 2324.0 ms, stdev= 12.1326007105 ms, min= 2312.0 ms, max= 2345.0 ms
""")
示例10: test_ignored_stderr_lines
def test_ignored_stderr_lines(self):
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
ignored_lines = [
"Unknown option: --foo-bar",
"[WARNING:proxy_service.cc] bad moon a-rising",
"[INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/",
]
for line in ignored_lines:
self.assertTrue(test._should_ignore_line_in_stderr(line))
non_ignored_lines = [
"Should not be ignored",
"[WARNING:chrome.cc] Something went wrong",
"[ERROR:main.cc] The sky has fallen",
]
for line in non_ignored_lines:
self.assertFalse(test._should_ignore_line_in_stderr(line))
示例11: test_compute_statistics
def test_compute_statistics(self):
def compute_statistics(values):
statistics = PerfTest.compute_statistics(map(lambda x: float(x), values))
return json.loads(json.dumps(statistics))
statistics = compute_statistics([10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11])
self.assertEqual(sorted(statistics.keys()), ['avg', 'max', 'median', 'min', 'stdev'])
self.assertEqual(statistics['avg'], 10.5)
self.assertEqual(statistics['min'], 1)
self.assertEqual(statistics['max'], 20)
self.assertEqual(statistics['median'], 10.5)
self.assertEqual(compute_statistics([8, 9, 10, 11, 12])['avg'], 10)
self.assertEqual(compute_statistics([8, 9, 10, 11, 12] * 4)['avg'], 10)
self.assertEqual(compute_statistics([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])['avg'], 10)
self.assertEqual(PerfTest.compute_statistics([1, 5, 2, 8, 7])['median'], 5)
self.assertEqual(PerfTest.compute_statistics([1, 6, 2, 8, 7, 2])['median'], 4)
self.assertAlmostEqual(statistics['stdev'], math.sqrt(35))
self.assertAlmostEqual(compute_statistics([1, 2, 3, 4, 5, 6])['stdev'], math.sqrt(3.5))
self.assertAlmostEqual(compute_statistics([4, 2, 5, 8, 6])['stdev'], math.sqrt(5))
示例12: test_parse_output
def test_parse_output(self):
output = DriverOutput('\n'.join([
'Running 20 times',
'Ignoring warm-up run (1115)',
'',
'avg 1100',
'median 1101',
'stdev 11',
'min 1080',
'max 1120']), image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest('some-test', '/path/some-dir/some-test')
self.assertEqual(test.parse_output(output),
{'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}})
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n')
示例13: test_parse_output_with_failing_line
def test_parse_output_with_failing_line(self):
output = DriverOutput('\n'.join([
'Running 20 times',
'Ignoring warm-up run (1115)',
'',
'some-unrecognizable-line',
'',
'avg 1100',
'median 1101',
'stdev 11',
'min 1080',
'max 1120']), image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest('some-test', '/path/some-dir/some-test')
self.assertEqual(test.parse_output(output), None)
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'some-unrecognizable-line\n')
示例14: compute_statistics
def compute_statistics(values):
statistics = PerfTest.compute_statistics(map(lambda x: float(x), values))
return json.loads(json.dumps(statistics))