当前位置: 首页>>代码示例>>Python>>正文


Python MeteredStream.writeln方法代码示例

本文整理汇总了Python中webkitpy.layout_tests.views.metered_stream.MeteredStream.writeln方法的典型用法代码示例。如果您正苦于以下问题:Python MeteredStream.writeln方法的具体用法?Python MeteredStream.writeln怎么用?Python MeteredStream.writeln使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在webkitpy.layout_tests.views.metered_stream.MeteredStream的用法示例。


在下文中一共展示了MeteredStream.writeln方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Printer

# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import writeln [as 别名]

#.........这里部分代码省略.........

        std_deviation = math.sqrt(sum_of_deviations / num_tests)
        self._print_debug("  Median:          %6.3f" % median)
        self._print_debug("  Mean:            %6.3f" % mean)
        self._print_debug("  90th percentile: %6.3f" % percentile90)
        self._print_debug("  99th percentile: %6.3f" % percentile99)
        self._print_debug("  Standard dev:    %6.3f" % std_deviation)
        self._print_debug("")

    def _print_one_line_summary(self, total_time, run_results):
        if self._options.timing:
            parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values())

            # There is serial overhead in layout_test_runner.run() that we can't easily account for when
            # really running in parallel, but taking the min() ensures that in the worst case
            # (if parallel time is less than run_time) we do account for it.
            serial_time = total_time - min(run_results.run_time, parallel_time)

            speedup = (parallel_time + serial_time) / total_time
            timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, serial_time, speedup)
        else:
            timing_summary = ''

        total = run_results.total - run_results.expected_skips
        expected = run_results.expected - run_results.expected_skips
        unexpected = run_results.unexpected
        incomplete = total - expected - unexpected
        incomplete_str = ''
        if incomplete:
            self._print_default("")
            incomplete_str = " (%d didn't run)" % incomplete

        if self._options.verbose or self._options.debug_rwt_logging or unexpected:
            self.writeln("")

        expected_summary_str = ''
        if run_results.expected_failures > 0:
            expected_summary_str = " (%d passed, %d didn't)" % (expected - run_results.expected_failures, run_results.expected_failures)

        summary = ''
        if unexpected == 0:
            if expected == total:
                if expected > 1:
                    summary = "All %d tests ran as expected%s%s." % (expected, expected_summary_str, timing_summary)
                else:
                    summary = "The test ran as expected%s%s." % (expected_summary_str, timing_summary)
            else:
                summary = "%s ran as expected%s%s%s." % (grammar.pluralize('test', expected), expected_summary_str, incomplete_str, timing_summary)
        else:
            summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize('test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary)

        self._print_quiet(summary)
        self._print_quiet("")

    def _test_status_line(self, test_name, suffix):
        format_string = '[%d/%d] %s%s'
        status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
        if len(status_line) > self._meter.number_of_columns():
            overflow_columns = len(status_line) - self._meter.number_of_columns()
            ellipsis = '...'
            if len(test_name) < overflow_columns + len(ellipsis) + 2:
                # We don't have enough space even if we elide, just show the test filename.
                fs = self._port.host.filesystem
                test_name = fs.split(test_name)[1]
            else:
                new_length = len(test_name) - overflow_columns - len(ellipsis)
开发者ID:dreifachstein,项目名称:chromium-src,代码行数:70,代码来源:printing.py

示例2: Printer

# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import writeln [as 别名]
class Printer(object):
    def __init__(self, stream, options=None):
        self.stream = stream
        self.meter = None
        self.options = options
        self.num_tests = 0
        self.num_started = 0
        self.num_errors = 0
        self.num_failures = 0
        self.running_tests = []
        self.completed_tests = []
        if options:
            self.configure(options)

    def configure(self, options):
        self.options = options

        if options.timing:
            # --timing implies --verbose
            options.verbose = max(options.verbose, 1)

        log_level = logging.INFO
        if options.quiet:
            log_level = logging.WARNING
        elif options.verbose == 2:
            log_level = logging.DEBUG

        self.meter = MeteredStream(
            self.stream, (options.verbose == 2), number_of_columns=SystemHost().platform.terminal_width()
        )

        handler = logging.StreamHandler(self.stream)
        # We constrain the level on the handler rather than on the root
        # logger itself.  This is probably better because the handler is
        # configured and known only to this module, whereas the root logger
        # is an object shared (and potentially modified) by many modules.
        # Modifying the handler, then, is less intrusive and less likely to
        # interfere with modifications made by other modules (e.g. in unit
        # tests).
        handler.name = __name__
        handler.setLevel(log_level)
        formatter = logging.Formatter("%(message)s")
        handler.setFormatter(formatter)

        logger = logging.getLogger()
        logger.addHandler(handler)
        logger.setLevel(logging.NOTSET)

        # Filter out most webkitpy messages.
        #
        # Messages can be selectively re-enabled for this script by updating
        # this method accordingly.
        def filter_records(record):
            """Filter out autoinstall and non-third-party webkitpy messages."""
            # FIXME: Figure out a way not to use strings here, for example by
            #        using syntax like webkitpy.test.__name__.  We want to be
            #        sure not to import any non-Python 2.4 code, though, until
            #        after the version-checking code has executed.
            if record.name.startswith("webkitpy.common.system.autoinstall") or record.name.startswith("webkitpy.test"):
                return True
            if record.name.startswith("webkitpy"):
                return False
            return True

        testing_filter = logging.Filter()
        testing_filter.filter = filter_records

        # Display a message so developers are not mystified as to why
        # logging does not work in the unit tests.
        _log.info("Suppressing most webkitpy logging while running unit tests.")
        handler.addFilter(testing_filter)

        if self.options.pass_through:
            # FIXME: Can't import at top of file, as outputcapture needs unittest2
            from webkitpy.common.system import outputcapture

            outputcapture.OutputCapture.stream_wrapper = _CaptureAndPassThroughStream

    def write_update(self, msg):
        self.meter.write_update(msg)

    def print_started_test(self, source, test_name):
        self.running_tests.append(test_name)
        if len(self.running_tests) > 1:
            suffix = " (+%d)" % (len(self.running_tests) - 1)
        else:
            suffix = ""

        if self.options.verbose:
            write = self.meter.write_update
        else:
            write = self.meter.write_throttled_update

        write(self._test_line(self.running_tests[0], suffix))

    def print_finished_test(self, source, test_name, test_time, failures, errors):
        write = self.meter.writeln
        if failures:
            lines = failures[0].splitlines() + [""]
            suffix = " failed:"
#.........这里部分代码省略.........
开发者ID:,项目名称:,代码行数:103,代码来源:

示例3: RegularTest

# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import writeln [as 别名]
class RegularTest(unittest.TestCase):
    verbose = False
    isatty = False

    def setUp(self):
        self.stream = StringIO.StringIO()
        self.buflist = self.stream.buflist
        self.stream.isatty = lambda: self.isatty

        # configure a logger to test that log calls do normally get included.
        self.logger = logging.getLogger(__name__)
        self.logger.setLevel(logging.DEBUG)
        self.logger.propagate = False

        # add a dummy time counter for a default behavior.
        self.times = range(10)

        self.meter = MeteredStream(self.stream, self.verbose, self.logger, self.time_fn, 8675)

    def tearDown(self):
        if self.meter:
            self.meter.cleanup()
            self.meter = None

    def time_fn(self):
        return self.times.pop(0)

    def test_logging_not_included(self):
        # This tests that if we don't hand a logger to the MeteredStream,
        # nothing is logged.
        logging_stream = StringIO.StringIO()
        handler = logging.StreamHandler(logging_stream)
        root_logger = logging.getLogger()
        orig_level = root_logger.level
        root_logger.addHandler(handler)
        root_logger.setLevel(logging.DEBUG)
        try:
            self.meter = MeteredStream(self.stream, self.verbose, None, self.time_fn, 8675)
            self.meter.write_throttled_update('foo')
            self.meter.write_update('bar')
            self.meter.write('baz')
            self.assertEqual(logging_stream.buflist, [])
        finally:
            root_logger.removeHandler(handler)
            root_logger.setLevel(orig_level)

    def _basic(self, times):
        self.times = times
        self.meter.write_update('foo')
        self.meter.write_update('bar')
        self.meter.write_throttled_update('baz')
        self.meter.write_throttled_update('baz 2')
        self.meter.writeln('done')
        self.assertEqual(self.times, [])
        return self.buflist

    def test_basic(self):
        buflist = self._basic([0, 1, 2, 13, 14])
        self.assertEqual(buflist, ['foo\n', 'bar\n', 'baz 2\n', 'done\n'])

    def _log_after_update(self):
        self.meter.write_update('foo')
        self.logger.info('bar')
        return self.buflist

    def test_log_after_update(self):
        buflist = self._log_after_update()
        self.assertEqual(buflist, ['foo\n', 'bar\n'])

    def test_log_args(self):
        self.logger.info('foo %s %d', 'bar', 2)
        self.assertEqual(self.buflist, ['foo bar 2\n'])
开发者ID:Drakey83,项目名称:steamlink-sdk,代码行数:74,代码来源:metered_stream_unittest.py

示例4: Printer

# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import writeln [as 别名]

#.........这里部分代码省略.........

        self._meter.write_throttled_update("%s (%d%%): %d ran as expected, %d didn't, %d left" %
            (action, percent_complete, result_summary.expected,
             result_summary.unexpected, result_summary.remaining))

    def print_unexpected_results(self, unexpected_results):
        """Prints a list of the unexpected results to the buildbot stream."""
        if self.disabled('unexpected-results'):
            return

        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
            actual = results['actual'].split(" ")
            expected = results['expected'].split(" ")
            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to crash, but passed',
                                         test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to timeout, but passed',
                                          test)
                else:
                    add_to_dict_of_lists(passes,
                                         'Expected to fail, but passed',
                                         test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results['actual'], test)

        resultsjsonparser.for_each_test(unexpected_results['tests'], add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._buildbot_stream.write("\n")

        if len(passes):
            for key, tests in passes.iteritems():
                self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._buildbot_stream.write("  %s\n" % test)
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
                    % (descriptions[result][1], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(unexpected_results['tests'], test)
                    actual = result['actual'].split(" ")
                    expected = result['expected'].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    new_expectations_list = list(set(actual) | set(expected))
                    self._buildbot_stream.write("  %s = %s\n" %
                        (test, " ".join(new_expectations_list)))
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._buildbot_stream.write(
                    "Regressions: Unexpected %s : (%d)\n" % (
                    descriptions[result][1], len(tests)))
                tests.sort()
                for test in tests:
                    self._buildbot_stream.write("  %s = %s\n" % (test, key))
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(unexpected_results['tests']) and self._options.verbose:
            self._buildbot_stream.write("%s\n" % ("-" * 78))

    def print_update(self, msg):
        if self.disabled('updates'):
            return
        self._meter.write_update(msg)

    def write(self, msg, option="misc"):
        if self.disabled(option):
            return
        self._write(msg)

    def _write(self, msg):
        self._meter.writeln(msg)
开发者ID:Moondee,项目名称:Artemis,代码行数:104,代码来源:printing.py

示例5: Printer

# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import writeln [as 别名]

#.........这里部分代码省略.........
        """Prints a list of the unexpected results to the buildbot stream."""
        if self.disabled('unexpected-results'):
            return

        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
            actual = results['actual'].split(" ")
            expected = results['expected'].split(" ")
            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to crash, but passed',
                                         test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to timeout, but passed',
                                          test)
                else:
                    add_to_dict_of_lists(passes,
                                         'Expected to fail, but passed',
                                         test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results['actual'], test)

        resultsjsonparser.for_each_test(unexpected_results['tests'], add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._buildbot_stream.write("\n")

        if len(passes):
            for key, tests in passes.iteritems():
                self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._buildbot_stream.write("  %s\n" % test)
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
                    % (descriptions[result][1], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(unexpected_results['tests'], test)
                    actual = result['actual'].split(" ")
                    expected = result['expected'].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    new_expectations_list = list(set(actual) | set(expected))
                    self._buildbot_stream.write("  %s = %s\n" %
                        (test, " ".join(new_expectations_list)))
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._buildbot_stream.write(
                    "Regressions: Unexpected %s : (%d)\n" % (
                    descriptions[result][1], len(tests)))
                tests.sort()
                for test in tests:
                    self._buildbot_stream.write("  %s = %s\n" % (test, key))
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(unexpected_results['tests']) and self._options.verbose:
            self._buildbot_stream.write("%s\n" % ("-" * 78))

    def write_update(self, msg):
        if self.disabled('updates'):
            return
        self._meter.write_update(msg)

    def write(self, msg, option="misc"):
        if self.disabled(option):
            return
        self._write(msg)

    def writeln(self, *args, **kwargs):
        self._meter.writeln(*args, **kwargs)

    def _write(self, msg):
        self._meter.writeln(msg)

    def flush(self):
        self._meter.flush()
开发者ID:kseo,项目名称:webkit,代码行数:104,代码来源:printing.py

示例6: Printer

# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import writeln [as 别名]
class Printer(object):
    def __init__(self, stream, options=None):
        self.stream = stream
        self.meter = None
        self.options = options
        self.num_tests = 0
        self.num_completed = 0
        self.num_errors = 0
        self.num_failures = 0
        self.running_tests = []
        self.completed_tests = []
        if options:
            self.configure(options)

    def configure(self, options):
        self.options = options

        if options.timing:
            # --timing implies --verbose
            options.verbose = max(options.verbose, 1)

        log_level = logging.INFO
        if options.quiet:
            log_level = logging.WARNING
        elif options.verbose == 2:
            log_level = logging.DEBUG

        self.meter = MeteredStream(self.stream, (options.verbose == 2))

        handler = logging.StreamHandler(self.stream)
        # We constrain the level on the handler rather than on the root
        # logger itself.  This is probably better because the handler is
        # configured and known only to this module, whereas the root logger
        # is an object shared (and potentially modified) by many modules.
        # Modifying the handler, then, is less intrusive and less likely to
        # interfere with modifications made by other modules (e.g. in unit
        # tests).
        handler.name = __name__
        handler.setLevel(log_level)
        formatter = logging.Formatter("%(message)s")
        handler.setFormatter(formatter)

        logger = logging.getLogger()
        logger.addHandler(handler)
        logger.setLevel(logging.NOTSET)

        # Filter out most webkitpy messages.
        #
        # Messages can be selectively re-enabled for this script by updating
        # this method accordingly.
        def filter_records(record):
            """Filter out autoinstall and non-third-party webkitpy messages."""
            # FIXME: Figure out a way not to use strings here, for example by
            #        using syntax like webkitpy.test.__name__.  We want to be
            #        sure not to import any non-Python 2.4 code, though, until
            #        after the version-checking code has executed.
            if (record.name.startswith("webkitpy.common.system.autoinstall") or
                record.name.startswith("webkitpy.test")):
                return True
            if record.name.startswith("webkitpy"):
                return False
            return True

        testing_filter = logging.Filter()
        testing_filter.filter = filter_records

        # Display a message so developers are not mystified as to why
        # logging does not work in the unit tests.
        _log.info("Suppressing most webkitpy logging while running unit tests.")
        handler.addFilter(testing_filter)

        if self.options.pass_through:
            outputcapture.OutputCapture.stream_wrapper = _CaptureAndPassThroughStream

    def write_update(self, msg):
        self.meter.write_update(msg)

    def print_started_test(self, source, test_name):
        self.running_tests.append(test_name)
        if len(self.running_tests) > 1:
            suffix = ' (+%d)' % (len(self.running_tests) - 1)
        else:
            suffix = ''

        if self.options.verbose:
            write = self.meter.write_update
        else:
            write = self.meter.write_throttled_update

        write(self._test_line(self.running_tests[0], suffix))

    def print_finished_test(self, source, test_name, test_time, failures, errors):
        write = self.meter.writeln
        if failures:
            lines = failures[0].splitlines() + ['']
            suffix = ' failed:'
            self.num_failures += 1
        elif errors:
            lines = errors[0].splitlines() + ['']
            suffix = ' erred:'
#.........这里部分代码省略.........
开发者ID:EQ4,项目名称:h5vcc,代码行数:103,代码来源:printer.py


注:本文中的webkitpy.layout_tests.views.metered_stream.MeteredStream.writeln方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。