Changeset 106784 in webkit
- Timestamp:
- Feb 6, 2012 1:07:20 AM (12 years ago)
- Location:
- trunk/Tools
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Tools/ChangeLog
r106761 r106784 1 2012-02-06 Sergio Villar Senin <svillar@igalia.com> 2 3 Incorrect statistics shown when running run-webkit-tests with --repeat-each or --iterations 4 https://bugs.webkit.org/show_bug.cgi?id=77672 5 6 Reviewed by Dirk Pranke. 7 8 Test repetitions must be taken into account when working out 9 the statistics shown by run-webkit-tests. 10 11 * Scripts/webkitpy/layout_tests/controllers/manager.py: 12 (Manager.prepare_lists_and_print_output): 13 (Manager._print_result_summary): 14 * Scripts/webkitpy/layout_tests/models/result_summary.py: 15 (ResultSummary.__init__): 16 (ResultSummary.add): 17 * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py: 18 (MainTest.test_repeat_each_iterations_num_tests): 19 1 20 2012-02-05 Dan Bernstein <mitz@apple.com> 2 21 -
trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
r106035 r106784 504 504 self._test_files_list = self._test_files_list * self._options.iterations 505 505 506 result_summary = ResultSummary(self._expectations, self._test_files | skipped) 506 iterations = \ 507 (self._options.repeat_each if self._options.repeat_each else 1) * \ 508 (self._options.iterations if self._options.iterations else 1) 509 result_summary = ResultSummary(self._expectations, self._test_files | skipped, iterations) 507 510 self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes") 508 511 self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures") … … 519 522 result = test_results.TestResult(test) 520 523 result.type = test_expectations.SKIP 521 result_summary.add(result, expected=True) 524 iterations = \ 525 (self._options.repeat_each if self._options.repeat_each else 1) * \ 526 (self._options.iterations if self._options.iterations else 1) 527 for iteration in range(iterations): 528 result_summary.add(result, expected=True) 522 529 self._printer.print_expected('') 523 530 … … 1321 1328 result_summary: information to log 1322 1329 """ 1323 failed = len(result_summary.failures) 1324 skipped = len( 1325 result_summary.tests_by_expectation[test_expectations.SKIP]) 1330 failed = result_summary.total_failures 1331 skipped = result_summary.total_tests_by_expectation[test_expectations.SKIP] 1326 1332 total = result_summary.total 1327 1333 passed = total - failed - skipped -
trunk/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py
r90538 r106784 32 32 33 33 class ResultSummary(object): 34 def __init__(self, expectations, test_files ):35 self.total = len(test_files) 34 def __init__(self, expectations, test_files, iterations=1): 35 self.total = len(test_files) * iterations 36 36 self.remaining = self.total 37 37 self.expectations = expectations … … 41 41 self.unexpected_crashes = 0 42 42 self.unexpected_timeouts = 0 43 self.total_tests_by_expectation = {} 43 44 self.tests_by_expectation = {} 44 45 self.tests_by_timeline = {} … … 46 47 self.unexpected_results = {} 47 48 self.failures = {} 49 self.total_failures = 0 50 self.total_tests_by_expectation[SKIP] = 0 48 51 self.tests_by_expectation[SKIP] = set() 49 52 for expectation in TestExpectations.EXPECTATIONS.values(): 50 53 self.tests_by_expectation[expectation] = set() 54 self.total_tests_by_expectation[expectation] = 0 51 55 for timeline in TestExpectations.TIMELINES.values(): 52 56 self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline) 53 57 54 58 def add(self, test_result, expected): 59 self.total_tests_by_expectation[test_result.type] += 1 55 60 self.tests_by_expectation[test_result.type].add(test_result.test_name) 56 61 self.results[test_result.test_name] = test_result 57 62 self.remaining -= 1 58 63 if len(test_result.failures): 64 self.total_failures += 1 59 65 self.failures[test_result.test_name] = test_result.failures 60 66 if expected: -
trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
r106416 r106784 391 391 self.assertEquals(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html']) 392 392 393 def test_repeat_each_iterations_num_tests(self): 394 # The total number of tests should be: number_of_tests * 395 # repeat_each * iterations 396 host = MockHost() 397 res, out, err, _ = logging_run(['--iterations', '2', 398 '--repeat-each', '4', 399 '--print', 'everything', 400 'passes/text.html', 'failures/expected/text.html'], 401 tests_included=True, host=host, record_results=True) 402 self.assertTrue("=> Results: 8/16 tests passed (50.0%)\n" in out.get()) 403 self.assertTrue(err.get()[-2] == "All 16 tests ran as expected.\n") 404 393 405 def test_run_chunk(self): 394 406 # Test that we actually select the right chunk
Note: See TracChangeset
for help on using the changeset viewer.