Changeset 161171 in webkit
- Timestamp:
- Dec 30, 2013, 10:32:59 PM (11 years ago)
- Location:
- trunk/Tools
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Tools/ChangeLog
r161156 r161171 1 2013-12-30 Alexey Proskuryakov <ap@apple.com> 2 3 full_results.json should distinguish unexpected failures from expected ones 4 https://bugs.webkit.org/show_bug.cgi?id=126300 5 6 Reviewed by Timothy Hatcher. 7 8 * Scripts/webkitpy/layout_tests/models/test_run_results.py: 9 (summarize_results): Add "report" element to JSON, which tells the consumer how 10 this result was counted for summary. 11 12 * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py: 13 Updated results to include the new element. 14 15 * Scripts/webkitpy/layout_tests/views/buildbot_results.py: 16 (print_unexpected_results): Added a comment pointing to another place that 17 summarizes results, and should stay in sync. 18 1 19 2013-12-30 Ryuan Choi <ryuan.choi@samsung.com> 2 20 -
trunk/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
r160271 r161171 118 118 return test_dict 119 119 120 120 # These results must match ones in print_unexpected_results() in views/buildbot_results.py. 121 121 def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=False, include_time_and_modifiers=False): 122 122 """Returns a dictionary containing a summary of the test runs, with the following fields: … … 180 180 if test_name in initial_results.unexpected_results_by_name: 181 181 num_regressions += 1 182 test_dict['report'] = 'REGRESSION' 182 183 elif result_type == test_expectations.MISSING: 183 184 if test_name in initial_results.unexpected_results_by_name: 184 185 num_missing += 1 186 test_dict['report'] = 'MISSING' 185 187 elif test_name in initial_results.unexpected_results_by_name: 186 188 if retry_results and test_name not in retry_results.unexpected_results_by_name: 187 189 actual.extend(expectations.model().get_expectations_string(test_name).split(" ")) 188 190 num_flaky += 1 191 test_dict['report'] = 'FLAKY' 189 192 elif retry_results: 190 193 retry_result_type = retry_results.unexpected_results_by_name[test_name].type … … 192 195 if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and retry_result_type == test_expectations.IMAGE_PLUS_TEXT: 193 196 num_regressions += 1 197 test_dict['report'] = 'REGRESSION' 194 198 else: 195 199 num_flaky += 1 200 test_dict['report'] = 'FLAKY' 196 201 actual.append(keywords[retry_result_type]) 197 202 else: 198 203 num_regressions += 1 204 test_dict['report'] = 'REGRESSION' 199 205 else: 200 206 num_regressions += 1 207 test_dict['report'] = 'REGRESSION' 201 208 202 209 test_dict['expected'] = expected -
trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
r159675 r161171 497 497 file_list = host.filesystem.written_files.keys() 498 498 self.assertEqual(details.exit_code, 1) 499 expected_token = '"unexpected":{"text-image-checksum.html":{" expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'499 expected_token = '"unexpected":{"text-image-checksum.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING"}' 500 500 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json') 501 501 self.assertTrue(json_string.find(expected_token) != -1) … … 514 514 515 515 self.assertEqual(details.exit_code, 1) 516 expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{" expected":"PASS","actual":"IMAGE"'516 expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE"' 517 517 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json') 518 518 self.assertTrue(json_string.find(expected_token) != -1) … … 538 538 host = MockHost() 539 539 _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host) 540 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{" expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)540 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"report":"REGRESSION","expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1) 541 541 542 542 def test_no_image_failure_with_image_diff(self): … … 668 668 json = parse_full_results(json_string) 669 669 self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"], 670 {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1 })670 {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1, "report": "REGRESSION"}) 671 671 self.assertFalse(json["pixel_tests_enabled"]) 672 672 self.assertEqual(details.enabled_pixel_tests_in_retry, True) … … 750 750 _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host) 751 751 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json') 752 self.assertTrue(json_string.find('"unlistedtest.html":{" expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)752 self.assertTrue(json_string.find('"unlistedtest.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1) 753 753 self.assertTrue(json_string.find('"num_regressions":4') != -1) 754 754 self.assertTrue(json_string.find('"num_flaky":0') != -1) … … 851 851 self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"]) 852 852 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"], 853 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1 })853 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1, "report": "REGRESSION"}) 854 854 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"], 855 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="] })855 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "report": "REGRESSION"}) 856 856 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"], 857 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="] })857 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "report": "REGRESSION"}) 858 858 859 859 -
trunk/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
r146817 r161171 89 89 self._print(" %5d %-24s (%4.1f%%)" % (len(results), desc, pct)) 90 90 91 # These results must match ones in summarize_results() in models/test_run_results.py. 91 92 def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False): 92 93 passes = {}
Note:
See TracChangeset
for help on using the changeset viewer.