Changeset 57934 in webkit
- Timestamp:
- Apr 20, 2010 3:43:51 PM (14 years ago)
- Location:
- trunk/WebKitTools
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/WebKitTools/ChangeLog
r57920 r57934 1 2010-04-20 Dirk Pranke <dpranke@chromium.org> 2 3 This patch to new-run-webkit-tests adds a --log 'trace' option 4 that prints out detailed info about a given test as it executes 5 (where the baselines are, what the expectation is, what we got, 6 how long it took). 7 8 https://bugs.webkit.org/show_bug.cgi?id=37726 9 10 * Scripts/webkitpy/layout_tests/run_webkit_tests.py: 11 - use the newly exposed TestResult class and implement 12 --log trace 13 * Scripts/webkitpy/layout_tests/layout_package/dump_render_thread.py: 14 - rename TestStats to TestResult and make it more public, resulting 15 in cleaner code 16 * Scripts/webkitpy/layout_tests/layout_package/test_expectations.py: 17 - add expectation_to_string() as a separate callable function 18 1 19 2010-04-20 Eric Seidel <eric@webkit.org> 2 20 -
trunk/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
r57920 r57934 55 55 output_dir, crash, timeout, test_run_time, actual_checksum, 56 56 output, error): 57 """Receives the output from a DumpRenderTree process, subjects it to a number58 of tests, and returns a list of failure types the test produced.57 """Receives the output from a DumpRenderTree process, subjects it to a 58 number of tests, and returns a list of failure types the test produced. 59 59 60 60 Args: … … 67 67 output_dir: directory to put crash stack traces into 68 68 69 Returns: a list of failure objects and times for the test being processed69 Returns: a TestResult object 70 70 """ 71 71 failures = [] … … 112 112 113 113 total_time_for_all_diffs = time.time() - start_diff_time 114 return Test Stats(test_info.filename, failures, test_run_time,115 total_time_for_all_diffs, time_for_diffs)116 117 118 class Test Stats:114 return TestResult(test_info.filename, failures, test_run_time, 115 total_time_for_all_diffs, time_for_diffs) 116 117 118 class TestResult(object): 119 119 120 120 def __init__(self, filename, failures, test_run_time, 121 121 total_time_for_all_diffs, time_for_diffs): 122 self.failures = failures 122 123 self.filename = filename 123 self.failures = failures124 124 self.test_run_time = test_run_time 125 self.time_for_diffs = time_for_diffs 125 126 self.total_time_for_all_diffs = total_time_for_all_diffs 126 self.t ime_for_diffs = time_for_diffs127 self.type = test_failures.determine_result_type(failures) 127 128 128 129 … … 158 159 test_info.image_hash) 159 160 end = time.time() 160 self._test_ stats= process_output(self._port,161 self._test_result = process_output(self._port, 161 162 test_info, self._test_types, self._test_args, 162 163 self._configuration, self._output_dir, crash, timeout, end - start, … … 164 165 driver.stop() 165 166 166 def get_test_ stats(self):167 return self._test_ stats167 def get_test_result(self): 168 return self._test_result 168 169 169 170 … … 202 203 self._exception_info = None 203 204 self._directory_timing_stats = {} 204 self._test_ stats = []205 self._test_results = [] 205 206 self._num_tests = 0 206 207 self._start_time = 0 … … 219 220 return self._directory_timing_stats 220 221 221 def get_individual_test_stats(self): 222 """Returns a list of (test_filename, time_to_run_test, 223 total_time_for_all_diffs, time_for_diffs) tuples.""" 224 return self._test_stats 222 def get_test_results(self): 223 """Return the list of all tests run on this thread. 224 225 This is used to calculate per-thread statistics. 226 227 """ 228 return self._test_results 225 229 226 230 def cancel(self): … … 318 322 self._num_tests += 1 319 323 if self._options.run_singly: 320 failures= self._run_test_singly(test_info)324 result = self._run_test_singly(test_info) 321 325 else: 322 failures= self._run_test(test_info)326 result = self._run_test(test_info) 323 327 324 328 filename = test_info.filename 325 329 tests_run_file.write(filename + "\n") 326 if failures: 327 # Check and kill DumpRenderTree if we need too. 328 if len([1 for f in failures if f.should_kill_dump_render_tree()]): 330 if result.failures: 331 # Check and kill DumpRenderTree if we need to. 332 if len([1 for f in result.failures 333 if f.should_kill_dump_render_tree()]): 329 334 self._kill_dump_render_tree() 330 335 # Reset the batch count since the shell just bounced. 331 336 batch_count = 0 332 337 # Print the error message(s). 333 error_str = '\n'.join([' ' + f.message() for f in failures]) 338 error_str = '\n'.join([' ' + f.message() for 339 f in result.failures]) 334 340 _log.debug("%s %s failed:\n%s" % (self.getName(), 335 341 self._port.relative_test_filename(filename), … … 338 344 _log.debug("%s %s passed" % (self.getName(), 339 345 self._port.relative_test_filename(filename))) 340 self._result_queue.put( (filename, failures))346 self._result_queue.put(result) 341 347 342 348 if batch_size > 0 and batch_count > batch_size: … … 358 364 test_info: Object containing the test filename, uri and timeout 359 365 360 Return: 361 A list of TestFailure objects describing the error. 366 Returns: 367 A TestResult 368 362 369 """ 363 370 worker = SingleTestThread(self._port, self._image_path, … … 371 378 worker.start() 372 379 373 # When we're running one test per DumpRenderTree process, we can enforce374 # a hard timeout. the DumpRenderTree watchdog uses 2.5x the timeout375 # We want to be larger than that.380 # When we're running one test per DumpRenderTree process, we can 381 # enforce a hard timeout. The DumpRenderTree watchdog uses 2.5x 382 # the timeout; we want to be larger than that. 376 383 worker.join(int(test_info.timeout) * 3.0 / 1000.0) 377 384 if worker.isAlive(): … … 381 388 # DumpRenderTrees to free it up. If we're running more than 382 389 # one DumpRenderTree thread, we'll end up killing the other 383 # DumpRenderTrees too, introducing spurious crashes. We accept that384 # t radeoff in order to avoid losing the rest of this thread's385 # results.390 # DumpRenderTrees too, introducing spurious crashes. We accept 391 # that tradeoff in order to avoid losing the rest of this 392 # thread's results. 386 393 _log.error('Test thread hung: killing all DumpRenderTrees') 387 394 worker._driver.stop() 388 395 389 396 try: 390 stats = worker.get_test_stats() 391 self._test_stats.append(stats) 392 failures = stats.failures 397 result = worker.get_test_result() 393 398 except AttributeError, e: 394 399 failures = [] 395 400 _log.error('Cannot get results of test: %s' % 396 401 test_info.filename) 397 398 return failures 402 result = TestResult(test_info.filename, failures=[], 403 test_run_time=0, total_time_for_all_diffs=0, 404 time_for_diffs=0) 405 406 return result 399 407 400 408 def _run_test(self, test_info): … … 404 412 test_info: Object containing the test filename, uri and timeout 405 413 406 Return :414 Returns: 407 415 A list of TestFailure objects describing the error. 416 408 417 """ 409 418 self._ensure_dump_render_tree_is_running() … … 420 429 end = time.time() 421 430 422 stats = process_output(self._port, test_info, self._test_types, 423 self._test_args, self._options.configuration, 424 self._options.results_directory, crash, 425 timeout, end - start, actual_checksum, 426 output, error) 427 428 self._test_stats.append(stats) 429 return stats.failures 431 result = process_output(self._port, test_info, self._test_types, 432 self._test_args, self._options.configuration, 433 self._options.results_directory, crash, 434 timeout, end - start, actual_checksum, 435 output, error) 436 self._test_results.append(result) 437 return result 430 438 431 439 def _ensure_dump_render_tree_is_running(self): 432 """Start the shared DumpRenderTree, if it's not running. Not for use when 433 running tests singly, since those each start a separate DumpRenderTree in 434 their own thread. 440 """Start the shared DumpRenderTree, if it's not running. 441 442 This is not for use when running tests singly, since those each start 443 a separate DumpRenderTree in their own thread. 444 435 445 """ 436 446 if (not self._driver or self._driver.poll() is not None): -
trunk/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
r57443 r57934 49 49 (NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4) 50 50 51 51 52 def result_was_expected(result, expected_results, test_needs_rebaselining, 52 53 test_is_skipped): … … 62 63 return True 63 64 if result == MISSING and test_needs_rebaselining: 64 return True65 return True 65 66 if result == SKIP and test_is_skipped: 66 return True67 return True 67 68 return False 69 68 70 69 71 def remove_pixel_failures(expected_results): … … 142 144 143 145 for expectation in expectations: 144 for item in TestExpectationsFile.EXPECTATIONS.items(): 145 if item[1] == expectation: 146 retval.append(item[0]) 147 break 148 149 return " ".join(retval).upper() 146 retval.append(self.expectation_to_string(expectation)) 147 148 return " ".join(retval) 149 150 def expectation_to_string(self, expectation): 151 """Return the uppercased string equivalent of a given expectation.""" 152 for item in TestExpectationsFile.EXPECTATIONS.items(): 153 if item[1] == expectation: 154 return item[0].upper() 155 return "" 150 156 151 157 def get_timeline_for_test(self, test): … … 835 841 set_of_tests.remove(test) 836 842 837 def _already_seen_test(self, test, test_list_path, lineno, allow_overrides): 843 def _already_seen_test(self, test, test_list_path, lineno, 844 allow_overrides): 838 845 """Returns true if we've already seen a more precise path for this test 839 846 than the test_list_path. -
trunk/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
r57858 r57934 91 91 LOG_SUMMARY = 'summary' 92 92 93 # "Trace" the test - log the expected result, the actual result, and the 94 # baselines used 95 LOG_TRACE = 'trace' 96 93 97 # Log any unexpected results while running (instead of just at the end). 94 98 LOG_UNEXPECTED = 'unexpected' 95 96 # Log any unexpected results at the end97 99 LOG_UNEXPECTED_RESULTS = 'unexpected-results' 98 100 … … 154 156 expectations.get_tests_with_timeline(timeline)) 155 157 156 def add(self, test, failures, result, expected): 157 """Add a result into the appropriate bin. 158 159 Args: 160 test: test file name 161 failures: list of failure objects from test execution 162 result: result of test (PASS, IMAGE, etc.). 158 def add(self, result, expected): 159 """Add a TestResult into the appropriate bin. 160 161 Args: 162 result: TestResult from dump_render_tree_thread. 163 163 expected: whether the result was what we expected it to be. 164 164 """ 165 165 166 self.tests_by_expectation[result ].add(test)167 self.results[ test] = result166 self.tests_by_expectation[result.type].add(result.filename) 167 self.results[result.filename] = result.type 168 168 self.remaining -= 1 169 if len( failures):170 self.failures[ test] =failures169 if len(result.failures): 170 self.failures[result.filename] = result.failures 171 171 if expected: 172 172 self.expected += 1 173 173 else: 174 self.unexpected_results[ test] = result174 self.unexpected_results[result.filename] = result.type 175 175 self.unexpected += 1 176 176 … … 395 395 # results here so the statistics can remain accurate. 396 396 for test in skip_chunk: 397 result_summary.add(test, [], test_expectations.SKIP, 398 expected=True) 397 result = dump_render_tree_thread.TestResult(test, 398 failures=[], test_run_time=0, total_time_for_all_diffs=0, 399 time_for_diffs=0) 400 result.type = test_expectations.SKIP 401 result_summary.add(result, expected=True) 399 402 write("") 400 403 … … 603 606 test_timings.update(thread.get_directory_timing_stats()) 604 607 individual_test_timings.extend( 605 thread.get_ individual_test_stats())608 thread.get_test_results()) 606 609 except KeyboardInterrupt: 607 610 for thread in threads: … … 720 723 721 724 def update_summary(self, result_summary): 722 """Update the summary while runningtests."""725 """Update the summary and print results with any completed tests.""" 723 726 while True: 724 727 try: 725 (test, fail_list) = self._result_queue.get_nowait() 726 result = test_failures.determine_result_type(fail_list) 727 expected = self._expectations.matches_an_expected_result(test, 728 result, self._options.pixel_tests) 729 result_summary.add(test, fail_list, result, expected) 730 if (LOG_DETAILED_PROGRESS in self._options.log and 731 (self._options.experimental_fully_parallel or 732 self._is_single_threaded())): 733 self._display_detailed_progress(result_summary) 734 else: 735 if not expected and LOG_UNEXPECTED in self._options.log: 736 self._print_unexpected_test_result(test, result) 737 if LOG_PROGRESS in self._options.log: 738 self._display_one_line_progress(result_summary) 728 result = self._result_queue.get_nowait() 739 729 except Queue.Empty: 740 730 return 741 742 def _display_one_line_progress(self, result_summary): 731 expected = self._expectations.matches_an_expected_result( 732 result.filename, result.type, self._options.pixel_tests) 733 result_summary.add(result, expected) 734 self._print_test_results(result, expected, result_summary) 735 736 def _print_test_results(self, result, expected, result_summary): 737 "Print the result of the test as determined by the --log switches." 738 if LOG_TRACE in self._options.log: 739 self._print_test_trace(result) 740 elif (LOG_DETAILED_PROGRESS in self._options.log and 741 (self._options.experimental_fully_parallel or 742 self._is_single_threaded())): 743 self._print_detailed_progress(result_summary) 744 else: 745 if (not expected and LOG_UNEXPECTED in self._options.log): 746 self._print_unexpected_test_result(result) 747 self._print_one_line_progress(result_summary) 748 749 def _print_test_trace(self, result): 750 """Print detailed results of a test (triggered by --log trace). 751 For each test, print: 752 - location of the expected baselines 753 - expected results 754 - actual result 755 - timing info 756 """ 757 filename = result.filename 758 test_name = self._port.relative_test_filename(filename) 759 _log.info('trace: %s' % test_name) 760 _log.info(' txt: %s' % 761 self._port.relative_test_filename( 762 self._port.expected_filename(filename, '.txt'))) 763 png_file = self._port.expected_filename(filename, '.png') 764 if os.path.exists(png_file): 765 _log.info(' png: %s' % 766 self._port.relative_test_filename(filename)) 767 else: 768 _log.info(' png: <none>') 769 _log.info(' exp: %s' % 770 self._expectations.get_expectations_string(filename)) 771 _log.info(' got: %s' % 772 self._expectations.expectation_to_string(result.type)) 773 _log.info(' took: %-.3f' % result.test_run_time) 774 _log.info('') 775 776 def _print_one_line_progress(self, result_summary): 743 777 """Displays the progress through the test run.""" 744 778 percent_complete = 100 * (result_summary.expected + … … 751 785 result_summary.unexpected, result_summary.remaining)) 752 786 753 def _ display_detailed_progress(self, result_summary):787 def _print_detailed_progress(self, result_summary): 754 788 """Display detailed progress output where we print the directory name 755 789 and one dot for each completed test. This is triggered by … … 794 828 else: 795 829 self._meter.progress("%s\n" % (self._current_progress_str)) 830 831 def _print_unexpected_test_result(self, result): 832 """Prints one unexpected test result line.""" 833 desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result.type][0] 834 self._meter.write(" %s -> unexpected %s\n" % 835 (self._port.relative_test_filename(result.filename), 836 desc)) 796 837 797 838 def _get_failures(self, result_summary, include_crashes): … … 1307 1348 if len(unexpected_results['tests']) and self._options.verbose: 1308 1349 print "-" * 78 1309 1310 def _print_unexpected_test_result(self, test, result):1311 """Prints one unexpected test result line."""1312 desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result][0]1313 self._meter.write(" %s -> unexpected %s\n" %1314 (self._port.relative_test_filename(test), desc))1315 1350 1316 1351 def _write_results_html_file(self, result_summary):
Note: See TracChangeset
for help on using the changeset viewer.