Changeset 124967 in webkit
- Timestamp:
- Aug 7, 2012 6:50:11 PM (12 years ago)
- Location:
- trunk/Tools
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Tools/ChangeLog
r124958 r124967 1 2012-08-07 Dirk Pranke <dpranke@chromium.org> 2 3 [NRWT] Would like an output mode similar to ORWT verbose one 4 https://bugs.webkit.org/show_bug.cgi?id=88702 5 6 Reviewed by Ryosuke Niwa. 7 8 Change the --verbose logging for new-run-webkit-tests so that 9 it matches ORWT more; we just print one line per test. Use 10 --debug-rwt-logging to get the full debug stream (aka old ORWT 11 --verbose). 12 13 * Scripts/webkitpy/layout_tests/models/test_expectations.py: 14 (TestExpectations): 15 * Scripts/webkitpy/layout_tests/run_webkit_tests.py: 16 (_set_up_derived_options): 17 (parse_args): 18 * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py: 19 (MainTest.test_retrying_and_flaky_tests): 20 * Scripts/webkitpy/layout_tests/views/printing.py: 21 (print_options): 22 (Printer._print_result_summary_entry): 23 (Printer._print_one_line_summary): 24 (Printer._print_test_result): 25 (Printer._print_baseline): 26 (Printer._print_unexpected_results): 27 1 28 2012-08-07 Dirk Pranke <dpranke@chromium.org> 2 29 -
trunk/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
r124131 r124967 691 691 'missing': MISSING} 692 692 693 EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'), 694 PASS: ('pass', 'passes'), 695 TEXT: ('text diff mismatch', 696 'text diff mismatch'), 697 IMAGE: ('image mismatch', 'image mismatch'), 698 IMAGE_PLUS_TEXT: ('image and text mismatch', 699 'image and text mismatch'), 700 AUDIO: ('audio mismatch', 'audio mismatch'), 701 CRASH: ('crash', 'crashes'), 702 TIMEOUT: ('test timed out', 'tests timed out'), 703 MISSING: ('no expected result found', 704 'no expected results found')} 693 # (aggregated by category, pass/fail/skip, type) 694 EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped', ''), 695 PASS: ('passes', 'passed', ''), 696 TEXT: ('text failures', 'failed', ' (text diff)'), 697 IMAGE: ('image-only failures', 'failed', ' (image diff)'), 698 IMAGE_PLUS_TEXT: ('both image and text failures', 'failed', ' (both image and text diffs'), 699 AUDIO: ('audio failures', 'failed', ' (audio diff)'), 700 CRASH: ('crashes', 'crashed', ''), 701 TIMEOUT: ('timeouts', 'timed out', ''), 702 MISSING: ('missing results', 'is missing an expected result', '')} 705 703 706 704 EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT, TEXT, IMAGE, AUDIO, SKIP) -
trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
r124809 r124967 178 178 options.pixel_test_directories = list(varified_dirs) 179 179 180 if options.run_singly: 181 options.verbose = True 182 180 183 return warnings 181 184 … … 390 393 help=("Run a the tests in batches (n), after every n tests, " 391 394 "DumpRenderTree is relaunched."), type="int", default=None), 392 # old-run-webkit-tests has --run-singly imply --verbose.393 395 optparse.make_option("--run-singly", action="store_true", 394 default=False, help="run a separate DumpRenderTree for each test "),396 default=False, help="run a separate DumpRenderTree for each test (implies --verbose)"), 395 397 optparse.make_option("--child-processes", 396 398 help="Number of DumpRenderTrees to run in parallel."), -
trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
r124958 r124967 765 765 self.assertTrue('Clobbering old results' in err.getvalue()) 766 766 self.assertTrue('flaky/text.html' in err.getvalue()) 767 self.assertTrue('Unexpected text diff' in out.getvalue())767 self.assertTrue('Unexpected text failures' in out.getvalue()) 768 768 self.assertFalse('Unexpected flakiness' in out.getvalue()) 769 769 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt')) -
trunk/Tools/Scripts/webkitpy/layout_tests/views/printing.py
r124870 r124967 47 47 optparse.make_option('-q', '--quiet', action='store_true', default=False, 48 48 help='run quietly (errors, warnings, and progress only)'), 49 optparse.make_option('-v', '--verbose', action='store_true', default=False, dest='debug_rwt_logging',50 help=' same as --debug-rwt-logging (for now)'),49 optparse.make_option('-v', '--verbose', action='store_true', default=False, 50 help='print a summarized result for every test (one line per test)'), 51 51 optparse.make_option('--details', action='store_true', default=False, 52 52 help='print detailed results for every test'), … … 294 294 if not_passing and len(results): 295 295 pct = len(results) * 100.0 / not_passing 296 self._print_for_bot(" %5d %-24s (%4.1f%%)" % (len(results), desc[ len(results) != 1], pct))296 self._print_for_bot(" %5d %-24s (%4.1f%%)" % (len(results), desc[0], pct)) 297 297 298 298 def _print_one_line_summary(self, total, expected, unexpected): … … 303 303 incomplete_str = " (%d didn't run)" % incomplete 304 304 305 if self._options. debug_rwt_logging or unexpected:305 if self._options.verbose or self._options.debug_rwt_logging or unexpected: 306 306 self.writeln("") 307 307 … … 328 328 if self._options.details: 329 329 self._print_test_trace(result, exp_str, got_str) 330 elif not expected: 331 self._print_unexpected_test_result(result) 330 elif (self._options.verbose and not self._options.debug_rwt_logging) or not expected: 331 desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result.type] 332 self.writeln("%s %s%s%s" % (result.test_name, desc[1], "" if expected else " unexpectedly", desc[2])) 332 333 333 334 def _print_test_trace(self, result, exp_str, got_str): … … 356 357 relpath = '<none>' 357 358 self._print_default(' %s: %s' % (extension[1:], relpath)) 358 359 def _print_unexpected_test_result(self, result):360 desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result.type][0]361 self._print_quiet(" %s -> unexpected %s" % (result.test_name, desc))362 359 363 360 def _print_progress(self, result_summary, retrying, test_list): … … 419 416 for key, tests in flaky.iteritems(): 420 417 result = TestExpectations.EXPECTATIONS[key.lower()] 421 self._print_for_bot("Unexpected flakiness: %s (%d)" % (descriptions[result][ 1], len(tests)))418 self._print_for_bot("Unexpected flakiness: %s (%d)" % (descriptions[result][0], len(tests))) 422 419 tests.sort() 423 420 … … 436 433 for key, tests in regressions.iteritems(): 437 434 result = TestExpectations.EXPECTATIONS[key.lower()] 438 self._print_for_bot("Regressions: Unexpected %s : (%d)" % (descriptions[result][ 1], len(tests)))435 self._print_for_bot("Regressions: Unexpected %s : (%d)" % (descriptions[result][0], len(tests))) 439 436 tests.sort() 440 437 for test in tests:
Note: See TracChangeset
for help on using the changeset viewer.