本文整理汇总了Python中webkitpy.layout_tests.models.test_expectations.TestExpectations.result_was_expected方法的典型用法代码示例。如果您正苦于以下问题:Python TestExpectations.result_was_expected方法的具体用法?Python TestExpectations.result_was_expected怎么用?Python TestExpectations.result_was_expected使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类webkitpy.layout_tests.models.test_expectations.TestExpectations
的用法示例。
在下文中一共展示了TestExpectations.result_was_expected方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: did_run_as_expected
# 需要导入模块: from webkitpy.layout_tests.models.test_expectations import TestExpectations [as 别名]
# 或者: from webkitpy.layout_tests.models.test_expectations.TestExpectations import result_was_expected [as 别名]
def did_run_as_expected(self):
actual_results = self._actual_as_tokens()
expected_results = self._expected_as_tokens()
# FIXME: We should only call remove_pixel_failures when this JSONResult
# came from a test run without pixel tests!
if not TestExpectations.has_pixel_failures(actual_results):
expected_results = TestExpectations.remove_pixel_failures(expected_results)
for actual_result in actual_results:
if not TestExpectations.result_was_expected(actual_result, expected_results, False):
return False
return True
示例2: _flaky_types_in_results
# 需要导入模块: from webkitpy.layout_tests.models.test_expectations import TestExpectations [as 别名]
# 或者: from webkitpy.layout_tests.models.test_expectations.TestExpectations import result_was_expected [as 别名]
def _flaky_types_in_results(self, results_entry, only_ignore_very_flaky):
flaky_results = set()
# Always include pass as an expected result. Passes will never turn the bot red.
# This fixes cases where the expectations have an implicit Pass, e.g. [ Slow ].
latest_expectations = [PASS]
if self.results_json.EXPECTATIONS_KEY in results_entry:
expectations_list = results_entry[self.results_json.EXPECTATIONS_KEY].split(' ')
latest_expectations += [self._result_to_enum(expectation) for expectation in expectations_list]
for result_item in results_entry[self.results_json.RESULTS_KEY]:
_, result_types_str = self.results_json.occurances_and_type_from_result_item(result_item)
result_types = []
for result_type in result_types_str:
# TODO(ojan): Remove this if-statement once crbug.com/514378 is fixed.
if result_type not in self.NON_RESULT_TYPES:
result_types.append(self.results_json.expectation_for_type(result_type))
# It didn't flake if it didn't retry.
if len(result_types) <= 1:
continue
# If the test ran as expected after only one retry, it's not very flaky.
# It's only very flaky if it failed the first run and the first retry
# and then ran as expected in one of the subsequent retries.
# If there are only two entries, then that means it failed on the first
# try and ran as expected on the second because otherwise we'd have
# a third entry from the next try.
second_result_type_enum_value = self._result_to_enum(result_types[1])
if only_ignore_very_flaky and len(result_types) == 2:
continue
has_unexpected_results = False
for result_type in result_types:
result_enum = self._result_to_enum(result_type)
# TODO(ojan): We really should be grabbing the expected results from the time
# of the run instead of looking at the latest expected results. That's a lot
# more complicated though. So far we've been looking at the aggregated
# results_small.json off test_results.appspot, which has all the information
# for the last 100 runs. In order to do this, we'd need to look at the
# individual runs' full_results.json, which would be slow and more complicated.
# The only thing we lose by not fixing this is that a test that was flaky
# and got fixed will still get printed out until 100 runs have passed.
if not TestExpectations.result_was_expected(result_enum, latest_expectations, test_needs_rebaselining=False):
has_unexpected_results = True
break
if has_unexpected_results:
flaky_results = flaky_results.union(set(result_types))
return flaky_results
示例3: unexpected_results_by_path
# 需要导入模块: from webkitpy.layout_tests.models.test_expectations import TestExpectations [as 别名]
# 或者: from webkitpy.layout_tests.models.test_expectations.TestExpectations import result_was_expected [as 别名]
def unexpected_results_by_path(self):
"""For tests with unexpected results, returns original expectations + results."""
def exp_to_string(exp):
return (TestExpectations.EXPECTATIONS_TO_STRING.get(exp, None) or
TestExpectations.MODIFIERS_TO_STRING.get(exp, None)).upper()
def string_to_exp(string):
# Needs a bit more logic than the method above,
# since a PASS is 0 and evaluates to False.
result = TestExpectations.EXPECTATIONS.get(string.lower(), None)
if not result is None:
return result
result = TestExpectations.MODIFIERS.get(string.lower(), None)
if not result is None:
return result
raise ValueError(string)
unexpected_results_by_path = {}
for test_path, entry in self.results_json.walk_results():
# Expectations for this test. No expectation defaults to PASS.
exp_string = entry.get(self.results_json.EXPECTATIONS_KEY, u'PASS')
# All run-length-encoded results for this test.
results_dict = entry.get(self.results_json.RESULTS_KEY, {})
# Set of expectations for this test.
expectations = set(map(string_to_exp, exp_string.split(' ')))
# Set of distinct results for this test.
result_types = self._flaky_types_in_results(results_dict)
# Distinct results as non-encoded strings.
result_strings = map(self.results_json.expectation_for_type, result_types)
# Distinct resulting expectations.
result_exp = map(string_to_exp, result_strings)
expected = lambda e: TestExpectations.result_was_expected(e, expectations, False)
additional_expectations = set(e for e in result_exp if not expected(e))
# Test did not have unexpected results.
if not additional_expectations:
continue
expectations.update(additional_expectations)
unexpected_results_by_path[test_path] = sorted(map(exp_to_string, expectations))
return unexpected_results_by_path