当前位置: 首页>>代码示例>>Python>>正文


Python MeteredStream.flush方法代码示例

本文整理汇总了Python中webkitpy.layout_tests.views.metered_stream.MeteredStream.flush方法的典型用法代码示例。如果您正苦于以下问题:Python MeteredStream.flush方法的具体用法?Python MeteredStream.flush怎么用?Python MeteredStream.flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在webkitpy.layout_tests.views.metered_stream.MeteredStream的用法示例。


在下文中一共展示了MeteredStream.flush方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Printer

# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import flush [as 别名]

#.........这里部分代码省略.........

    def print_started_test(self, test_name):
        self._running_tests.append(test_name)
        if len(self._running_tests) > 1:
            suffix = ' (+%d)' % (len(self._running_tests) - 1)
        else:
            suffix = ''
        if self._options.verbose:
            write = self._meter.write_update
        else:
            write = self._meter.write_throttled_update
        write(self._test_status_line(test_name, suffix))

    def print_finished_test(self, result, expected, exp_str, got_str):
        self.num_completed += 1
        test_name = result.test_name

        result_message = self._result_message(result.type, result.failures, expected,
                                              self._options.timing, result.test_run_time)

        if self._options.details:
            self._print_test_trace(result, exp_str, got_str)
        elif self._options.verbose or not expected:
            self.writeln(self._test_status_line(test_name, result_message))
        elif self.num_completed == self.num_tests:
            self._meter.write_update('')
        else:
            if test_name == self._running_tests[0]:
                self._completed_tests.insert(0, [test_name, result_message])
            else:
                self._completed_tests.append([test_name, result_message])

            for test_name, result_message in self._completed_tests:
                self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
            self._completed_tests = []
        self._running_tests.remove(test_name)

    def _result_message(self, result_type, failures, expected, timing, test_run_time):
        exp_string = ' unexpectedly' if not expected else ''
        timing_string = ' %.4fs' % test_run_time if timing else ''
        if result_type == test_expectations.PASS:
            return ' passed%s%s' % (exp_string, timing_string)
        else:
            return ' failed%s (%s)%s' % (exp_string, ', '.join(failure.message() for failure in failures), timing_string)

    def _print_test_trace(self, result, exp_str, got_str):
        test_name = result.test_name
        self._print_default(self._test_status_line(test_name, ''))

        base = self._port.lookup_virtual_test_base(test_name)
        if base:
            args = ' '.join(self._port.lookup_virtual_test_args(test_name))
            reference_args = ' '.join(self._port.lookup_virtual_reference_args(test_name))
            self._print_default(' base: %s' % base)
            self._print_default(' args: %s' % args)
            self._print_default(' reference_args: %s' % reference_args)

        references = self._port.reference_files(test_name)
        if references:
            for _, filename in references:
                self._print_default('  ref: %s' % self._port.relative_test_filename(filename))
        else:
            for extension in ('.txt', '.png', '.wav'):
                    self._print_baseline(test_name, extension)

        self._print_default('  exp: %s' % exp_str)
        self._print_default('  got: %s' % got_str)
        self._print_default(' took: %-.3f' % result.test_run_time)
        self._print_default('')

    def _print_baseline(self, test_name, extension):
        baseline = self._port.expected_filename(test_name, extension)
        if self._port._filesystem.exists(baseline):
            relpath = self._port.relative_test_filename(baseline)
        else:
            relpath = '<none>'
        self._print_default('  %s: %s' % (extension[1:], relpath))

    def _print_quiet(self, msg):
        self.writeln(msg)

    def _print_default(self, msg):
        if not self._options.quiet:
            self.writeln(msg)

    def _print_debug(self, msg):
        if self._options.debug_rwt_logging:
            self.writeln(msg)

    def write_throttled_update(self, msg):
        self._meter.write_throttled_update(msg)

    def write_update(self, msg):
        self._meter.write_update(msg)

    def writeln(self, msg):
        self._meter.writeln(msg)

    def flush(self):
        self._meter.flush()
开发者ID:dreifachstein,项目名称:chromium-src,代码行数:104,代码来源:printing.py

示例2: Printer

# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import flush [as 别名]

#.........这里部分代码省略.........
        self._print_default("")

    def _print_baseline(self, test_name, extension):
        baseline = self._port.expected_filename(test_name, extension)
        if self._port._filesystem.exists(baseline):
            relpath = self._port.relative_test_filename(baseline)
        else:
            relpath = "<none>"
        self._print_default("  %s: %s" % (extension[1:], relpath))

    def _print_unexpected_results(self, unexpected_results):
        # Prints to the buildbot stream
        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
            actual = results["actual"].split(" ")
            expected = results["expected"].split(" ")
            if actual == ["PASS"]:
                if "CRASH" in expected:
                    add_to_dict_of_lists(passes, "Expected to crash, but passed", test)
                elif "TIMEOUT" in expected:
                    add_to_dict_of_lists(passes, "Expected to timeout, but passed", test)
                else:
                    add_to_dict_of_lists(passes, "Expected to fail, but passed", test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results["actual"], test)

        resultsjsonparser.for_each_test(unexpected_results["tests"], add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._print_for_bot("")
        if len(passes):
            for key, tests in passes.iteritems():
                self._print_for_bot("%s: (%d)" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._print_for_bot("  %s" % test)
                self._print_for_bot("")
            self._print_for_bot("")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print_for_bot("Unexpected flakiness: %s (%d)" % (descriptions[result][0], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(unexpected_results["tests"], test)
                    actual = result["actual"].split(" ")
                    expected = result["expected"].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    new_expectations_list = list(set(actual) | set(expected))
                    self._print_for_bot("  %s = %s" % (test, " ".join(new_expectations_list)))
                self._print_for_bot("")
            self._print_for_bot("")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print_for_bot("Regressions: Unexpected %s : (%d)" % (descriptions[result][0], len(tests)))
                tests.sort()
                for test in tests:
                    self._print_for_bot("  %s = %s" % (test, key))
                self._print_for_bot("")

        if len(unexpected_results["tests"]) and self._options.debug_rwt_logging:
            self._print_for_bot("%s" % ("-" * 78))

    def _print_quiet(self, msg):
        self.writeln(msg)

    def _print_default(self, msg):
        if not self._options.quiet:
            self.writeln(msg)

    def _print_debug(self, msg):
        if self._options.debug_rwt_logging:
            self.writeln(msg)

    def _print_for_bot(self, msg):
        self._buildbot_stream.write(msg + "\n")

    def write_update(self, msg):
        self._meter.write_update(msg)

    def writeln(self, msg):
        self._meter.writeln(msg)

    def flush(self):
        self._meter.flush()
开发者ID:,项目名称:,代码行数:104,代码来源:

示例3: Printer

# 需要导入模块: from webkitpy.layout_tests.views.metered_stream import MeteredStream [as 别名]
# 或者: from webkitpy.layout_tests.views.metered_stream.MeteredStream import flush [as 别名]

#.........这里部分代码省略.........
        """Prints a list of the unexpected results to the buildbot stream."""
        if self.disabled('unexpected-results'):
            return

        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
            actual = results['actual'].split(" ")
            expected = results['expected'].split(" ")
            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to crash, but passed',
                                         test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to timeout, but passed',
                                          test)
                else:
                    add_to_dict_of_lists(passes,
                                         'Expected to fail, but passed',
                                         test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results['actual'], test)

        resultsjsonparser.for_each_test(unexpected_results['tests'], add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._buildbot_stream.write("\n")

        if len(passes):
            for key, tests in passes.iteritems():
                self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._buildbot_stream.write("  %s\n" % test)
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
                    % (descriptions[result][1], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(unexpected_results['tests'], test)
                    actual = result['actual'].split(" ")
                    expected = result['expected'].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    new_expectations_list = list(set(actual) | set(expected))
                    self._buildbot_stream.write("  %s = %s\n" %
                        (test, " ".join(new_expectations_list)))
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._buildbot_stream.write(
                    "Regressions: Unexpected %s : (%d)\n" % (
                    descriptions[result][1], len(tests)))
                tests.sort()
                for test in tests:
                    self._buildbot_stream.write("  %s = %s\n" % (test, key))
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(unexpected_results['tests']) and self._options.verbose:
            self._buildbot_stream.write("%s\n" % ("-" * 78))

    def write_update(self, msg):
        if self.disabled('updates'):
            return
        self._meter.write_update(msg)

    def write(self, msg, option="misc"):
        if self.disabled(option):
            return
        self._write(msg)

    def writeln(self, *args, **kwargs):
        self._meter.writeln(*args, **kwargs)

    def _write(self, msg):
        self._meter.writeln(msg)

    def flush(self):
        self._meter.flush()
开发者ID:kseo,项目名称:webkit,代码行数:104,代码来源:printing.py


注:本文中的webkitpy.layout_tests.views.metered_stream.MeteredStream.flush方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。