本文整理汇总了Python中webkitpy.layout_tests.views.metered_stream.MeteredStream.cleanup方法的典型用法代码示例。如果您正苦于以下问题:Python MeteredStream.cleanup方法的具体用法?Python MeteredStream.cleanup怎么用?Python MeteredStream.cleanup使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类webkitpy.layout_tests.views.metered_stream.MeteredStream
的用法示例。
在下文中一共展示了MeteredStream.cleanup方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Printer
# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import cleanup [as 别名]
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests."""
def __init__(self, port, options, regular_output, logger=None):
self.num_completed = 0
self.num_tests = 0
self._port = port
self._options = options
self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
number_of_columns=self._port.host.platform.terminal_width())
self._running_tests = []
self._completed_tests = []
def cleanup(self):
self._meter.cleanup()
def __del__(self):
self.cleanup()
def print_config(self, results_directory):
self._print_default("Using port '%s'" % self._port.name())
self._print_default("Test configuration: %s" % self._port.test_configuration())
self._print_default("View the test results at file://%s/results.html" % results_directory)
self._print_default("View the archived results dashboard at file://%s/dashboard.html" % results_directory)
# FIXME: should these options be in printing_options?
if self._options.new_baseline:
self._print_default("Placing new baselines in %s" % self._port.baseline_path())
fs = self._port.host.filesystem
fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
self._print_default("Using %s build" % self._options.configuration)
if self._options.pixel_tests:
self._print_default("Pixel tests enabled")
else:
self._print_default("Pixel tests disabled")
self._print_default("Regular timeout: %s, slow test timeout: %s" %
(self._options.time_out_ms, self._options.slow_time_out_ms))
self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
self._print_default('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
if repeat_each * iterations > 1:
found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
found_str += ', skipping %d' % (num_all_test_files - num_to_run)
self._print_default(found_str + '.')
def print_expected(self, run_results, tests_with_result_type_callback):
self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
self._print_debug('')
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
self._print_default("Running 1 %s." % driver_name)
self._print_debug("(%s)." % grammar.pluralize('shard', num_shards))
else:
self._print_default("Running %d %ss in parallel." % (num_workers, driver_name))
self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards))
self._print_default('')
def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
tests = tests_with_result_type_callback(result_type)
now = run_results.tests_by_timeline[test_expectations.NOW]
wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]
# We use a fancy format string in order to print the data out in a
# nicely-aligned table.
fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
% (self._num_digits(now), self._num_digits(wontfix)))
self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
def _num_digits(self, num):
ndigits = 1
if len(num):
ndigits = int(math.log10(len(num))) + 1
return ndigits
def print_results(self, run_time, run_results, summarized_results):
self._print_timing_statistics(run_time, run_results)
self._print_one_line_summary(run_time, run_results)
def _print_timing_statistics(self, total_time, run_results):
self._print_debug("Test timing:")
self._print_debug(" %6.2f total testing time" % total_time)
self._print_debug("")
self._print_worker_statistics(run_results, int(self._options.child_processes))
self._print_aggregate_test_statistics(run_results)
self._print_individual_test_times(run_results)
self._print_directory_timings(run_results)
def _print_worker_statistics(self, run_results, num_workers):
#.........这里部分代码省略.........
示例2: Printer
# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import cleanup [as 别名]
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests.
Printing from run-webkit-tests falls into two buckets: general or
regular output that is read only by humans and can be changed at any
time, and output that is parsed by buildbots (and humans) and hence
must be changed more carefully and in coordination with the buildbot
parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
log_parser/webkit_test_command.py script).
By default the buildbot-parsed code gets logged to stdout, and regular
output gets logged to stderr."""
def __init__(self, port, options, regular_output, buildbot_output, logger=None):
self.num_completed = 0
self.num_tests = 0
self._port = port
self._options = options
self._buildbot_stream = buildbot_output
self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger)
self._running_tests = []
self._completed_tests = []
def cleanup(self):
self._meter.cleanup()
def __del__(self):
self.cleanup()
def print_config(self):
self._print_default("Using port '%s'" % self._port.name())
self._print_default("Test configuration: %s" % self._port.test_configuration())
self._print_default("Placing test results in %s" % self._options.results_directory)
# FIXME: should these options be in printing_options?
if self._options.new_baseline:
self._print_default("Placing new baselines in %s" % self._port.baseline_path())
fs = self._port.host.filesystem
fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
self._print_default("Using %s build" % self._options.configuration)
if self._options.pixel_tests:
self._print_default("Pixel tests enabled")
else:
self._print_default("Pixel tests disabled")
self._print_default(
"Regular timeout: %s, slow test timeout: %s" % (self._options.time_out_ms, self._options.slow_time_out_ms)
)
self._print_default("Command line: " + " ".join(self._port.driver_cmd_line()))
self._print_default("")
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
num_unique_tests = num_to_run / (repeat_each * iterations)
found_str = "Found %s; running %d" % (grammar.pluralize("test", num_all_test_files), num_unique_tests)
if repeat_each * iterations > 1:
found_str += " (%d times each: --repeat-each=%d --iterations=%d)" % (
repeat_each * iterations,
repeat_each,
iterations,
)
found_str += ", skipping %d" % (num_all_test_files - num_unique_tests)
self._print_default(found_str + ".")
def print_expected(self, result_summary, tests_with_result_type_callback):
self._print_expected_results_of_type(
result_summary, test_expectations.PASS, "passes", tests_with_result_type_callback
)
self._print_expected_results_of_type(
result_summary, test_expectations.FAIL, "failures", tests_with_result_type_callback
)
self._print_expected_results_of_type(
result_summary, test_expectations.FLAKY, "flaky", tests_with_result_type_callback
)
self._print_debug("")
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
self._print_default("Running 1 %s over %s." % (driver_name, grammar.pluralize("shard", num_shards)))
else:
self._print_default(
"Running %d %ss in parallel over %d shards (%d locked)."
% (num_workers, driver_name, num_shards, num_locked_shards)
)
self._print_default("")
def _print_expected_results_of_type(
self, result_summary, result_type, result_type_str, tests_with_result_type_callback
):
tests = tests_with_result_type_callback(result_type)
now = result_summary.tests_by_timeline[test_expectations.NOW]
wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
# We use a fancy format string in order to print the data out in a
# nicely-aligned table.
fmtstr = "Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)" % (self._num_digits(now), self._num_digits(wontfix))
#.........这里部分代码省略.........
示例3: RegularTest
# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import cleanup [as 别名]
class RegularTest(unittest.TestCase):
verbose = False
isatty = False
def setUp(self):
self.stream = StringIO.StringIO()
self.buflist = self.stream.buflist
self.stream.isatty = lambda: self.isatty
# configure a logger to test that log calls do normally get included.
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
# add a dummy time counter for a default behavior.
self.times = range(10)
self.meter = MeteredStream(self.stream, self.verbose, self.logger, self.time_fn, 8675)
def tearDown(self):
if self.meter:
self.meter.cleanup()
self.meter = None
def time_fn(self):
return self.times.pop(0)
def test_logging_not_included(self):
# This tests that if we don't hand a logger to the MeteredStream,
# nothing is logged.
logging_stream = StringIO.StringIO()
handler = logging.StreamHandler(logging_stream)
root_logger = logging.getLogger()
orig_level = root_logger.level
root_logger.addHandler(handler)
root_logger.setLevel(logging.DEBUG)
try:
self.meter = MeteredStream(self.stream, self.verbose, None, self.time_fn, 8675)
self.meter.write_throttled_update('foo')
self.meter.write_update('bar')
self.meter.write('baz')
self.assertEqual(logging_stream.buflist, [])
finally:
root_logger.removeHandler(handler)
root_logger.setLevel(orig_level)
def _basic(self, times):
self.times = times
self.meter.write_update('foo')
self.meter.write_update('bar')
self.meter.write_throttled_update('baz')
self.meter.write_throttled_update('baz 2')
self.meter.writeln('done')
self.assertEqual(self.times, [])
return self.buflist
def test_basic(self):
buflist = self._basic([0, 1, 2, 13, 14])
self.assertEqual(buflist, ['foo\n', 'bar\n', 'baz 2\n', 'done\n'])
def _log_after_update(self):
self.meter.write_update('foo')
self.logger.info('bar')
return self.buflist
def test_log_after_update(self):
buflist = self._log_after_update()
self.assertEqual(buflist, ['foo\n', 'bar\n'])
def test_log_args(self):
self.logger.info('foo %s %d', 'bar', 2)
self.assertEqual(self.buflist, ['foo bar 2\n'])
示例4: Printer
# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import cleanup [as 别名]
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests.
Printing from run-webkit-tests falls into two buckets: general or
regular output that is read only by humans and can be changed at any
time, and output that is parsed by buildbots (and humans) and hence
must be changed more carefully and in coordination with the buildbot
parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
log_parser/webkit_test_command.py script).
By default the buildbot-parsed code gets logged to stdout, and regular
output gets logged to stderr."""
def __init__(self, port, options, regular_output, buildbot_output, logger=None):
"""
Args
port interface to port-specific routines
options OptionParser object with command line settings
regular_output stream to which output intended only for humans
should be written
buildbot_output stream to which output intended to be read by
the buildbots (and humans) should be written
logger optional logger to integrate into the stream.
"""
self._port = port
self._options = options
self._buildbot_stream = buildbot_output
self._meter = MeteredStream(regular_output, options.verbose, logger=logger)
self.switches = parse_print_options(options.print_options, options.verbose)
def cleanup(self):
self._meter.cleanup()
def __del__(self):
self.cleanup()
# These two routines just hide the implementation of the switches.
def disabled(self, option):
return not option in self.switches
def enabled(self, option):
return option in self.switches
def help_printing(self):
self._write(HELP_PRINTING)
def print_actual(self, msg):
if self.disabled('actual'):
return
self._buildbot_stream.write("%s\n" % msg)
def print_config(self, msg):
self.write(msg, 'config')
def print_expected(self, msg):
self.write(msg, 'expected')
def print_timing(self, msg):
self.write(msg, 'timing')
def print_one_line_summary(self, total, expected, unexpected):
"""Print a one-line summary of the test run to stdout.
Args:
total: total number of tests run
expected: number of expected results
unexpected: number of unexpected results
"""
if self.disabled('one-line-summary'):
return
incomplete = total - expected - unexpected
if incomplete:
self._write("")
incomplete_str = " (%d didn't run)" % incomplete
expected_str = str(expected)
else:
incomplete_str = ""
expected_str = "All %d" % expected
if unexpected == 0:
self._write("%s tests ran as expected%s." %
(expected_str, incomplete_str))
elif expected == 1:
self._write("1 test ran as expected, %d didn't%s:" %
(unexpected, incomplete_str))
else:
self._write("%d tests ran as expected, %d didn't%s:" %
(expected, unexpected, incomplete_str))
self._write("")
def print_test_result(self, result, expected, exp_str, got_str):
"""Print the result of the test as determined by --print.
This routine is used to print the details of each test as it completes.
Args:
result - The actual TestResult object
expected - Whether the result we got was an expected result
exp_str - What we expected to get (used for tracing)
got_str - What we actually got (used for tracing)
#.........这里部分代码省略.........
示例5: Printer
# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import cleanup [as 别名]
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests.
Printing from run-webkit-tests falls into two buckets: general or
regular output that is read only by humans and can be changed at any
time, and output that is parsed by buildbots (and humans) and hence
must be changed more carefully and in coordination with the buildbot
parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
log_parser/webkit_test_command.py script).
By default the buildbot-parsed code gets logged to stdout, and regular
output gets logged to stderr."""
def __init__(self, port, options, regular_output, buildbot_output, logger=None):
"""
Args
port interface to port-specific routines
options OptionParser object with command line settings
regular_output stream to which output intended only for humans
should be written
buildbot_output stream to which output intended to be read by
the buildbots (and humans) should be written
logger optional logger to integrate into the stream.
"""
self._port = port
self._options = options
self._buildbot_stream = buildbot_output
self._meter = MeteredStream(regular_output, options.verbose, logger=logger)
self.switches = parse_print_options(options.print_options, options.verbose)
def cleanup(self):
self._meter.cleanup()
def __del__(self):
self.cleanup()
# These two routines just hide the implementation of the switches.
def disabled(self, option):
return not option in self.switches
def enabled(self, option):
return option in self.switches
def help_printing(self):
self._write(HELP_PRINTING)
def print_config(self):
"""Prints the configuration for the test run."""
self._print_config("Using port '%s'" % self._port.name())
self._print_config("Test configuration: %s" % self._port.test_configuration())
self._print_config("Placing test results in %s" % self._options.results_directory)
# FIXME: should these options be in printing_options?
if self._options.new_baseline:
self._print_config("Placing new baselines in %s" % self._port.baseline_path())
fs = self._port.host.filesystem
fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
self._print_config("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
self._print_config("Using %s build" % self._options.configuration)
if self._options.pixel_tests:
self._print_config("Pixel tests enabled")
else:
self._print_config("Pixel tests disabled")
self._print_config("Regular timeout: %s, slow test timeout: %s" %
(self._options.time_out_ms, self._options.slow_time_out_ms))
self._print_config('Command line: ' + ' '.join(self._port.driver_cmd_line()))
self._print_config('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
if repeat_each * iterations > 1:
found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
found_str += ', skipping %d' % (num_all_test_files - num_to_run)
self._print_expected(found_str + '.')
def print_expected(self, result_summary, tests_with_result_type_callback):
self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes", tests_with_result_type_callback)
self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures", tests_with_result_type_callback)
self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
self._print_expected('')
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
self._print_config("Running 1 %s over %s." %
(driver_name, grammar.pluralize('shard', num_shards)))
else:
self._print_config("Running %d %ss in parallel over %d shards (%d locked)." %
(num_workers, driver_name, num_shards, num_locked_shards))
self._print_config('')
def _print_expected_results_of_type(self, result_summary,
result_type, result_type_str, tests_with_result_type_callback):
"""Print the number of the tests in a given result class.
Args:
result_summary - the object containing all the results to report on
#.........这里部分代码省略.........