Changeset 101845 in webkit
- Timestamp:
- Dec 2, 2011 1:42:32 PM (12 years ago)
- Location:
- trunk/Tools
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Tools/ChangeLog
r101840 r101845 1 2011-12-01 Ryosuke Niwa <rniwa@webkit.org> 2 3 [NRWT] reftest should support having multiple references per test 4 https://bugs.webkit.org/show_bug.cgi?id=73613 5 6 Reviewed by Dirk Pranke. 7 8 Add a support for having multiple reference files for a single test. 9 10 Because a reftest succeeds when it matches at least one of expected matches and fails when it matches 11 at least one of expected mismatches, we compare expected mismatches first in order to minimize 12 the number of reference files to open on DRT. 13 14 * Scripts/webkitpy/layout_tests/controllers/manager.py: 15 (interpret_test_failures): Remove checks no longer applicable. 16 * Scripts/webkitpy/layout_tests/controllers/manager_unittest.py: 17 (ResultSummaryTest.test_interpret_test_failures): Ditto. 18 * Scripts/webkitpy/layout_tests/controllers/single_test_runner.py: 19 (SingleTestRunner.__init__): Remove a bunch of code and just call port.reference_files. 20 (SingleTestRunner._driver_input): 21 (SingleTestRunner.run): 22 (SingleTestRunner._run_reftest): Compare the output of the test to each reference file. 23 * Scripts/webkitpy/layout_tests/models/test_input.py: 24 (TestInput.__init__): Remove ref_file and is_mismatch_reftest because they are no longer used. 25 * Scripts/webkitpy/layout_tests/port/base.py: 26 (Port.reference_files): Renamed from _reference_file_for. Returns a list of expectation, filename pairs. 27 (_parse_reftest_list): Now supports parsing multiple entries for a single test. 28 * Scripts/webkitpy/layout_tests/port/base_unittest.py: 29 (PortTest.test_parse_reftest_list): 30 * Scripts/webkitpy/layout_tests/port/test.py: 31 * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py: 32 (MainTest.test_unexpected_failures): 33 (MainTest.test_reftest_skipped_if_unlisted): Renamed from test_missing_and_unexpected_results. 34 (EndToEndTest.test_end_to_end): 35 (EndToEndTest.test_reftest_with_two_notrefs): Added. 36 1 37 2011-12-02 Gustavo Noronha Silva <gns@gnome.org> 2 38 -
trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
r101667 r101845 90 90 elif isinstance(failure, test_failures.FailureReftestMismatch): 91 91 test_dict['is_reftest'] = True 92 if failure.reference_filename != port.reftest_expected_filename(test_name): 93 test_dict['ref_file'] = port.relative_test_filename(failure.reference_filename) 92 test_dict['ref_file'] = port.relative_test_filename(failure.reference_filename) 94 93 elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur): 95 94 test_dict['is_mismatch_reftest'] = True 96 if failure.reference_filename != port.reftest_expected_mismatch_filename(test_name): 97 test_dict['ref_file'] = port.relative_test_filename(failure.reference_filename) 95 test_dict['ref_file'] = port.relative_test_filename(failure.reference_filename) 98 96 99 97 if test_failures.FailureMissingResult in failure_types: -
trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
r101534 r101845 317 317 self.assertTrue('is_reftest' in test_dict) 318 318 self.assertFalse('is_mismatch_reftest' in test_dict) 319 self.assertFalse('ref_file' in test_dict)320 319 321 320 test_dict = interpret_test_failures(self.port, 'foo/reftest.html', … … 329 328 self.assertFalse('is_reftest' in test_dict) 330 329 self.assertTrue(test_dict['is_mismatch_reftest']) 331 self.assertFalse('ref_file' in test_dict)332 330 333 331 test_dict = interpret_test_failures(self.port, 'foo/reftest.html', -
trunk/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
r101727 r101845 58 58 59 59 self._is_reftest = False 60 self._is_mismatch_reftest = False 61 self._reference_filename = None 62 63 fs = port._filesystem 64 if test_input.ref_file: 65 self._is_reftest = True 66 self._reference_filename = fs.join(self._port.layout_tests_dir(), test_input.ref_file) 67 self._is_mismatch_reftest = test_input.is_mismatch_reftest 68 return 69 70 reftest_expected_filename = port.reftest_expected_filename(self._test_name) 71 if reftest_expected_filename and fs.exists(reftest_expected_filename): 72 self._is_reftest = True 73 self._reference_filename = reftest_expected_filename 74 75 reftest_expected_mismatch_filename = port.reftest_expected_mismatch_filename(self._test_name) 76 if reftest_expected_mismatch_filename and fs.exists(reftest_expected_mismatch_filename): 77 if self._is_reftest: 78 _log.error('One test file cannot have both match and mismatch references. Please remove either %s or %s', 79 reftest_expected_filename, reftest_expected_mismatch_filename) 80 else: 81 self._is_reftest = True 82 self._is_mismatch_reftest = True 83 self._reference_filename = reftest_expected_mismatch_filename 84 85 if self._is_reftest: 60 self._reference_files = port.reference_files(self._test_name) 61 62 if self._reference_files: 86 63 # Detect and report a test which has a wrong combination of expectation files. 87 64 # For example, if 'foo.html' has two expectation files, 'foo-expected.html' and … … 90 67 for suffix in ('.txt', '.png', '.wav'): 91 68 expected_filename = self._port.expected_filename(self._test_name, suffix) 92 if fs.exists(expected_filename):93 _log.error(' The reftest (%s) can not have an expectation file (%s).'94 ' Please remove that file.',self._test_name, expected_filename)69 if port.host.filesystem.exists(expected_filename): 70 _log.error('%s is both a reftest and has an expected output file %s.', 71 self._test_name, expected_filename) 95 72 96 73 def _expected_driver_output(self): … … 112 89 if self._should_fetch_expected_checksum(): 113 90 image_hash = self._port.expected_checksum(self._test_name) 114 return DriverInput(self._test_name, self._timeout, image_hash, self._is_reftest)91 return DriverInput(self._test_name, self._timeout, image_hash, bool(self._reference_files)) 115 92 116 93 def run(self): 117 if self._ is_reftest:94 if self._reference_files: 118 95 if self._port.get_option('no_ref_tests') or self._options.new_baseline or self._options.reset_results: 119 96 result = TestResult(self._test_name) … … 283 260 284 261 def _run_reftest(self): 285 driver_output1 = self._driver.run_test(self._driver_input()) 286 reference_test_name = self._port.relative_test_filename(self._reference_filename) 287 driver_output2 = self._driver.run_test(DriverInput(reference_test_name, self._timeout, driver_output1.image_hash, self._is_reftest)) 288 test_result = self._compare_output_with_reference(driver_output1, driver_output2) 289 290 test_result_writer.write_test_result(self._port, self._test_name, driver_output1, driver_output2, test_result.failures) 291 return test_result 292 293 def _compare_output_with_reference(self, driver_output1, driver_output2): 262 test_output = self._driver.run_test(self._driver_input()) 263 total_test_time = 0 264 reference_output = None 265 test_result = None 266 267 # A reftest can have multiple match references and multiple mismatch references; 268 # the test fails if any mismatch matches and all of the matches don't match. 269 # To minimize the number of references we have to check, we run all of the mismatches first, 270 # then the matches, and short-circuit out as soon as we can. 271 # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do. 272 273 putAllMismatchBeforeMatch = sorted 274 for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files): 275 reference_test_name = self._port.relative_test_filename(reference_filename) 276 reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, test_output.image_hash, is_reftest=True)) 277 test_result = self._compare_output_with_reference(test_output, reference_output, reference_filename, expectation == '!=') 278 279 if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures): 280 break 281 total_test_time += test_result.test_run_time 282 283 assert(reference_output) 284 test_result_writer.write_test_result(self._port, self._test_name, test_output, reference_output, test_result.failures) 285 return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr) 286 287 def _compare_output_with_reference(self, driver_output1, driver_output2, reference_filename, mismatch): 294 288 total_test_time = driver_output1.test_time + driver_output2.test_time 295 289 has_stderr = driver_output1.has_stderr() or driver_output2.has_stderr() … … 299 293 # Don't continue any more if we already have crash or timeout. 300 294 return TestResult(self._test_name, failures, total_test_time, has_stderr) 301 failures.extend(self._handle_error(driver_output2, reference_filename= self._reference_filename))295 failures.extend(self._handle_error(driver_output2, reference_filename=reference_filename)) 302 296 if failures: 303 297 return TestResult(self._test_name, failures, total_test_time, has_stderr) … … 305 299 assert(driver_output1.image_hash or driver_output2.image_hash) 306 300 307 if self._is_mismatch_reftest:301 if mismatch: 308 302 if driver_output1.image_hash == driver_output2.image_hash: 309 failures.append(test_failures.FailureReftestMismatchDidNotOccur( self._reference_filename))303 failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename)) 310 304 elif driver_output1.image_hash != driver_output2.image_hash: 311 failures.append(test_failures.FailureReftestMismatch( self._reference_filename))305 failures.append(test_failures.FailureReftestMismatch(reference_filename)) 312 306 return TestResult(self._test_name, failures, total_test_time, has_stderr) -
trunk/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
r101739 r101845 47 47 fs = port._filesystem 48 48 test_name = 'failures/unexpected/reftest.html' 49 test_reference_file = fs.join(port.layout_tests_dir(), port.reftest_expected_filename(test_name))49 test_reference_file = fs.join(port.layout_tests_dir(), 'failures/unexpected/reftest-expected.html') 50 50 driver_output1 = DriverOutput('text1', 'image1', 'imagehash1', 'audio1') 51 51 driver_output2 = DriverOutput('text2', 'image2', 'imagehash2', 'audio2') -
trunk/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
r99651 r101845 37 37 is_mismatch_reftest = None 38 38 39 def __init__(self, test_name, timeout , ref_file=None, is_mismatch_reftest=False):39 def __init__(self, test_name, timeout): 40 40 """Holds the input parameters for a test. 41 41 Args: … … 47 47 self.test_name = test_name 48 48 self.timeout = timeout 49 if ref_file:50 self.ref_file = ref_file51 self.is_mismatch_reftest = is_mismatch_reftest52 49 53 50 def __repr__(self): -
trunk/Tools/Scripts/webkitpy/layout_tests/port/base.py
r101819 r101845 440 440 reftest_list_file = filesystem.read_text_file(reftest_list_path) 441 441 442 parsed_list = dict()442 parsed_list = {} 443 443 for line in reftest_list_file.split('\n'): 444 444 line = re.sub('#.+$', '', line) … … 447 447 continue 448 448 expectation_type, test_file, ref_file = split_line 449 parsed_list [filesystem.join(test_dirpath, test_file)] = (expectation_type, filesystem.join(test_dirpath, ref_file))449 parsed_list.setdefault(filesystem.join(test_dirpath, test_file), []).append((expectation_type, filesystem.join(test_dirpath, ref_file))) 450 450 return parsed_list 451 451 452 def _reference_file_for(self, test_name, expectation): 452 def reference_files(self, test_name): 453 """Return a list of expectation (== or !=) and filename pairs""" 454 453 455 reftest_list = self._get_reftest_list(test_name) 454 456 if not reftest_list: 455 if expectation == '==': 456 return self.expected_filename(test_name, '.html') 457 else: 458 return self.expected_filename(test_name, '-mismatch.html') 459 460 filename = self._filesystem.join(self.layout_tests_dir(), test_name) 461 if filename not in reftest_list or reftest_list[filename][0] != expectation: 462 return None 463 return reftest_list[filename][1] 457 expected_filenames = [('==', self.expected_filename(test_name, '.html')), ('!=', self.expected_filename(test_name, '-mismatch.html'))] 458 return [(expectation, filename) for expectation, filename in expected_filenames if self._filesystem.exists(filename)] 459 460 return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), []) 464 461 465 462 def is_reftest(self, test_name): … … 470 467 filename = self._filesystem.join(self.layout_tests_dir(), test_name) 471 468 return filename in reftest_list 472 473 def reftest_expected_filename(self, test_name):474 """Return the filename of reference we expect the test matches."""475 return self._reference_file_for(test_name, '==')476 477 def reftest_expected_mismatch_filename(self, test_name):478 """Return the filename of reference we don't expect the test matches."""479 return self._reference_file_for(test_name, '!=')480 469 481 470 def test_to_uri(self, test_name): -
trunk/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
r101819 r101845 329 329 "# some comment", 330 330 "!= test-2.html test-notref.html # more comments", 331 "== test-3.html test-ref.html"]) 331 "== test-3.html test-ref.html", 332 "== test-3.html test-ref2.html", 333 "!= test-3.html test-notref.html"]) 332 334 333 335 reftest_list = Port._parse_reftest_list(port.host.filesystem, 'bar') 334 self.assertEqual(reftest_list, {'bar/test.html': ('==', 'bar/test-ref.html'),335 'bar/test-2.html': ('!=', 'bar/test-notref.html'),336 'bar/test-3.html': ('==', 'bar/test-ref.html')})336 self.assertEqual(reftest_list, {'bar/test.html': [('==', 'bar/test-ref.html')], 337 'bar/test-2.html': [('!=', 'bar/test-notref.html')], 338 'bar/test-3.html': [('==', 'bar/test-ref.html'), ('==', 'bar/test-ref2.html'), ('!=', 'bar/test-notref.html')]}) 337 339 338 340 -
trunk/Tools/Scripts/webkitpy/layout_tests/port/test.py
r101739 r101845 191 191 tests.add('reftests/foo/test-ref.html') 192 192 193 tests.add('reftests/foo/multiple-match-success.html', actual_checksum='abc', actual_image='abc') 194 tests.add('reftests/foo/multiple-match-failure.html', actual_checksum='abc', actual_image='abc') 195 tests.add('reftests/foo/multiple-mismatch-success.html', actual_checksum='abc', actual_image='abc') 196 tests.add('reftests/foo/multiple-mismatch-failure.html', actual_checksum='abc', actual_image='abc') 197 tests.add('reftests/foo/multiple-both-success.html', actual_checksum='abc', actual_image='abc') 198 tests.add('reftests/foo/multiple-both-failure.html', actual_checksum='abc', actual_image='abc') 199 200 tests.add('reftests/foo/matching-ref.html', actual_checksum='abc', actual_image='abc') 201 tests.add('reftests/foo/mismatching-ref.html', actual_checksum='def', actual_image='def') 202 tests.add('reftests/foo/second-mismatching-ref.html', actual_checksum='ghi', actual_image='ghi') 203 193 204 # The following files shouldn't be treated as reftests 194 205 tests.add_reftest('reftests/foo/unlistedtest.html', 'reftests/foo/unlistedtest-expected.html', same_image=True) … … 267 278 add_file(files, 'reftests/foo/reftest.list', """ 268 279 == test.html test-ref.html 280 281 == multiple-match-success.html mismatching-ref.html 282 == multiple-match-success.html matching-ref.html 283 == multiple-match-failure.html mismatching-ref.html 284 == multiple-match-failure.html second-mismatching-ref.html 285 != multiple-mismatch-success.html mismatching-ref.html 286 != multiple-mismatch-success.html second-mismatching-ref.html 287 != multiple-mismatch-failure.html mismatching-ref.html 288 != multiple-mismatch-failure.html matching-ref.html 289 == multiple-both-success.html matching-ref.html 290 == multiple-both-success.html mismatching-ref.html 291 != multiple-both-success.html second-mismatching-ref.html 292 == multiple-both-failure.html matching-ref.html 293 != multiple-both-failure.html second-mismatching-ref.html 294 != multiple-both-failure.html matching-ref.html 269 295 """) 270 296 -
trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
r101757 r101845 186 186 187 187 188 # Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test 189 # FIXME: It's nice to have a routine in port/test.py that returns this number. 190 unexpected_tests_count = 11 191 192 188 193 class MainTest(unittest.TestCase): 189 194 def test_accelerated_compositing(self): … … 426 431 self._url_opened = None 427 432 res, out, err, user = logging_run(tests_included=True) 428 429 # Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test430 # FIXME: It's nice to have a routine in port/test.py that returns this number.431 unexpected_tests_count = 8432 433 433 434 self.assertEqual(res, unexpected_tests_count) … … 455 456 self.assertTrue(json_string.find('"num_missing":1') != -1) 456 457 457 def test_missing_and_unexpected_results(self):458 # Test that we update expectations in place. If the expectation459 # is missing, update the expected generic location.460 fs = unit_test_filesystem()461 res, out, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, filesystem=fs, record_results=True)462 file_list = fs.written_files.keys()463 file_list.remove('/tmp/layout-test-results/tests_run0.txt')464 self.assertEquals(res, 1)465 json_string = fs.read_text_file('/tmp/layout-test-results/full_results.json')466 self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)467 self.assertTrue(json_string.find('"num_regressions":1') != -1)468 self.assertTrue(json_string.find('"num_flaky":0') != -1)469 self.assertTrue(json_string.find('"num_missing":1') != -1)470 471 458 def test_missing_and_unexpected_results_with_custom_exit_code(self): 472 459 # Test that we update expectations in place. If the expectation … … 736 723 self.assertEquals(['passes/mismatch.html', 'passes/mismatch-expected-mismatch.html'], tests_run) 737 724 725 def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self): 726 fs = unit_test_filesystem() 727 res, out, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, filesystem=fs, record_results=True) 728 file_list = fs.written_files.keys() 729 file_list.remove('/tmp/layout-test-results/tests_run0.txt') 730 json_string = fs.read_text_file('/tmp/layout-test-results/full_results.json') 731 self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1) 732 self.assertTrue(json_string.find('"num_regressions":4') != -1) 733 self.assertTrue(json_string.find('"num_flaky":0') != -1) 734 self.assertTrue(json_string.find('"num_missing":1') != -1) 735 738 736 def test_additional_platform_directory(self): 739 737 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo'])) … … 779 777 res, out, err, user = logging_run(record_results=True, tests_included=True, filesystem=fs) 780 778 781 # Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test782 # FIXME: It's nice to have a routine in port/test.py that returns this number.783 unexpected_tests_count = 8784 785 779 self.assertEquals(res, unexpected_tests_count) 786 780 results = self.parse_full_results(fs.files['/tmp/layout-test-results/full_results.json']) … … 791 785 # Check that we attempted to display the results page in a browser. 792 786 self.assertTrue(user.opened_urls) 787 788 def test_reftest_with_two_notrefs(self): 789 # Test that we update expectations in place. If the expectation 790 # is missing, update the expected generic location. 791 fs = unit_test_filesystem() 792 res, out, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, filesystem=fs, record_results=True) 793 file_list = fs.written_files.keys() 794 file_list.remove('/tmp/layout-test-results/tests_run0.txt') 795 json_string = fs.read_text_file('/tmp/layout-test-results/full_results.json') 796 json = self.parse_full_results(json_string) 797 self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"]) 798 self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"]) 799 self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"]) 800 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"], 801 {"expected": "PASS", "ref_file": "reftests/foo/second-mismatching-ref.html", "actual": "IMAGE", 'is_reftest': True}) 802 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"], 803 {"expected": "PASS", "ref_file": "reftests/foo/matching-ref.html", "actual": "IMAGE", "is_mismatch_reftest": True}) 804 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"], 805 {"expected": "PASS", "ref_file": "reftests/foo/matching-ref.html", "actual": "IMAGE", "is_mismatch_reftest": True}) 806 793 807 794 808 class RebaselineTest(unittest.TestCase):
Note: See TracChangeset
for help on using the changeset viewer.