Changeset 136585 in webkit
- Timestamp:
- Dec 4, 2012, 3:41:37 PM (13 years ago)
- Location:
- trunk/Tools
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Tools/ChangeLog
r136583 r136585 1 2012-12-04 Dirk Pranke <dpranke@chromium.org> 2 3 nrwt: make paths and test_names passed arguments in Manager._prepare_lists et al 4 https://bugs.webkit.org/show_bug.cgi?id=104047 5 6 Reviewed by Eric Seidel. 7 8 The code becomes cleaner if we are just passing values around rather 9 than hanging them off the manager object, helps move _prepare_lists() 10 to a pure function, and is needed to eventually make the 11 result_summary object something returned from runner.run_tests() 12 (note that two more patches are needed for that to happen). 13 14 * Scripts/webkitpy/layout_tests/controllers/manager.py: 15 (Manager.__init__): 16 (Manager._http_tests): 17 (Manager._prepare_lists): 18 (Manager.needs_servers): 19 (Manager._set_up_run): 20 (Manager.run): 21 (Manager._run_tests): 22 (Manager._upload_json_files): 23 * Scripts/webkitpy/layout_tests/controllers/manager_unittest.py: 24 (ManagerTest.test_needs_servers.get_manager): 25 (ManagerTest.test_needs_servers): 26 (ManagerTest.integration_test_needs_servers.get_manager): 27 (ManagerTest.integration_test_needs_servers): 28 (ManagerTest.test_look_for_new_crash_logs.get_manager): 29 (ManagerTest): 30 (ManagerTest.test_look_for_new_crash_logs): 31 * Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py: 32 (JSONLayoutResultsGenerator.__init__): 33 (JSONLayoutResultsGenerator._get_modifier_char): 34 1 35 2012-12-04 Adam Barth <abarth@webkit.org> 2 36 -
trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
r136582 r136585 282 282 # options.results_directory, use_tls=True, port=9323) 283 283 284 self._paths = set()285 self._test_names = None286 284 self._results_directory = self._port.results_directory() 287 285 self._finder = LayoutTestFinder(self._port, self._options) … … 297 295 return self.WEBSOCKET_SUBDIR in test 298 296 299 def _http_tests(self ):300 return set(test for test in self._test_names if self._is_http_test(test))297 def _http_tests(self, test_names): 298 return set(test for test in test_names if self._is_http_test(test)) 301 299 302 300 def _is_perf_test(self, test): 303 301 return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test 304 302 305 def _prepare_lists(self ):306 tests_to_skip = self._finder.skip_tests( self._paths, self._test_names, self._expectations, self._http_tests())307 self._test_names = [test for test in self._test_names if test not in tests_to_skip]303 def _prepare_lists(self, paths, test_names): 304 tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names)) 305 tests_to_run = [test for test in test_names if test not in tests_to_skip] 308 306 309 307 # Create a sorted list of test files so the subset chunk, 310 308 # if used, contains alphabetically consecutive tests. 311 309 if self._options.order == 'natural': 312 self._test_names.sort(key=self._port.test_key)310 tests_to_run.sort(key=self._port.test_key) 313 311 elif self._options.order == 'random': 314 random.shuffle( self._test_names)315 316 self._test_names, tests_in_other_chunks = self._finder.split_into_chunks(self._test_names)312 random.shuffle(tests_to_run) 313 314 tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run) 317 315 self._expectations.add_skipped_tests(tests_in_other_chunks) 318 316 tests_to_skip.update(tests_in_other_chunks) 319 317 320 if self._options.repeat_each > 1: 321 list_with_repetitions = [] 322 for test in self._test_names: 323 list_with_repetitions += ([test] * self._options.repeat_each) 324 self._test_names = list_with_repetitions 325 326 if self._options.iterations > 1: 327 self._test_names = self._test_names * self._options.iterations 328 329 summary = ResultSummary(self._expectations, len(self._test_names)) 318 summary = ResultSummary(self._expectations, len(tests_to_run) * self._options.repeat_each * self._options.iterations) 319 330 320 for test_name in set(tests_to_skip): 331 321 result = test_results.TestResult(test_name) 332 322 result.type = test_expectations.SKIP 333 323 summary.add(result, expected=True, test_is_slow=self._test_is_slow(test_name)) 334 return summary 324 return summary, tests_to_run 335 325 336 326 def _test_input_for_file(self, test_file): … … 349 339 return self._expectations.has_modifier(test_file, test_expectations.SLOW) 350 340 351 def needs_servers(self ):352 return any(self._test_requires_lock(test_name) for test_name in self._test_names) and self._options.http353 354 def _set_up_run(self ):341 def needs_servers(self, test_names): 342 return any(self._test_requires_lock(test_name) for test_name in test_names) and self._options.http 343 344 def _set_up_run(self, test_names): 355 345 self._printer.write_update("Checking build ...") 356 if not self._port.check_build(self.needs_servers( )):346 if not self._port.check_build(self.needs_servers(test_names)): 357 347 _log.error("Build check failed") 358 348 return False … … 367 357 if not self._options.nocheck_sys_deps: 368 358 self._printer.write_update("Checking system dependencies ...") 369 if not self._port.check_sys_deps(self.needs_servers( )):359 if not self._port.check_sys_deps(self.needs_servers(test_names)): 370 360 self._port.stop_helper() 371 361 return False … … 384 374 self._printer.write_update("Collecting tests ...") 385 375 try: 386 self._paths, self._test_names = self._collect_tests(args)376 paths, test_names = self._collect_tests(args) 387 377 except IOError as exception: 388 378 # This is raised if --test-list doesn't exist … … 390 380 391 381 self._printer.write_update("Parsing expectations ...") 392 self._expectations = test_expectations.TestExpectations(self._port, self._test_names)393 394 num_all_test_files_found = len( self._test_names)395 result_summary = self._prepare_lists()382 self._expectations = test_expectations.TestExpectations(self._port, test_names) 383 384 num_all_test_files_found = len(test_names) 385 result_summary, test_names = self._prepare_lists(paths, test_names) 396 386 397 387 # Check to make sure we're not skipping every test. 398 if not self._test_names:388 if not test_names: 399 389 _log.critical('No tests to run.') 400 390 return -1 401 391 402 self._printer.print_found(num_all_test_files_found, len( self._test_names), self._options.repeat_each, self._options.iterations)392 self._printer.print_found(num_all_test_files_found, len(test_names), self._options.repeat_each, self._options.iterations) 403 393 self._printer.print_expected(result_summary, self._expectations.get_tests_with_result_type) 404 394 405 if not self._set_up_run( ):395 if not self._set_up_run(test_names): 406 396 return -1 407 397 … … 409 399 410 400 try: 411 result_summary = self._run_tests( self._test_names, result_summary, int(self._options.child_processes), retrying=False)401 result_summary = self._run_tests(test_names, result_summary, int(self._options.child_processes), retrying=False) 412 402 413 403 # We exclude the crashes from the list of results to retry, because … … 455 445 456 446 def _run_tests(self, tests, result_summary, num_workers, retrying): 457 test_inputs = [self._test_input_for_file(test) for test in tests]458 447 needs_http = self._port.requires_http_server() or any(self._is_http_test(test) for test in tests) 459 448 needs_websockets = any(self._is_websocket_test(test) for test in tests) 449 test_inputs = [] 450 for _ in xrange(self._options.iterations): 451 for test in tests: 452 for _ in xrange(self._options.repeat_each): 453 test_inputs.append(self._test_input_for_file(test)) 460 454 return self._runner.run_tests(test_inputs, self._expectations, result_summary, num_workers, needs_http, needs_websockets, retrying) 461 455 … … 539 533 self._options.build_number, self._results_directory, 540 534 BUILDER_BASE_URL, 541 self._expectations, result_summary, self._test_names,535 self._expectations, result_summary, 542 536 self._options.test_results_server, 543 537 "layout-tests", -
trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
r136582 r136585 46 46 class ManagerTest(unittest.TestCase): 47 47 def test_needs_servers(self): 48 def get_manager _with_tests(test_names):48 def get_manager(): 49 49 port = Mock() # FIXME: Use a tighter mock. 50 50 port.TEST_PATH_SEPARATOR = '/' 51 51 manager = Manager(port, options=MockOptions(http=True, max_locked_shards=1), printer=Mock()) 52 manager._test_names = test_names53 52 return manager 54 53 55 manager = get_manager _with_tests(['fast/html'])56 self.assertFalse(manager.needs_servers( ))57 58 manager = get_manager _with_tests(['http/tests/misc'])59 self.assertTrue(manager.needs_servers( ))54 manager = get_manager() 55 self.assertFalse(manager.needs_servers(['fast/html'])) 56 57 manager = get_manager() 58 self.assertTrue(manager.needs_servers(['http/tests/misc'])) 60 59 61 60 def integration_test_needs_servers(self): 62 def get_manager _with_tests(test_names):61 def get_manager(): 63 62 host = MockHost() 64 63 port = host.port_factory.get() 65 64 manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock()) 66 manager._collect_tests(test_names)67 65 return manager 68 66 69 manager = get_manager _with_tests(['fast/html'])70 self.assertFalse(manager.needs_servers( ))71 72 manager = get_manager _with_tests(['http/tests/mime'])73 self.assertTrue(manager.needs_servers( ))67 manager = get_manager() 68 self.assertFalse(manager.needs_servers(['fast/html'])) 69 70 manager = get_manager() 71 self.assertTrue(manager.needs_servers(['http/tests/mime'])) 74 72 75 73 if sys.platform == 'win32': 76 manager = get_manager _with_tests(['fast\\html'])77 self.assertFalse(manager.needs_servers( ))78 79 manager = get_manager _with_tests(['http\\tests\\mime'])80 self.assertTrue(manager.needs_servers( ))74 manager = get_manager() 75 self.assertFalse(manager.needs_servers(['fast\\html'])) 76 77 manager = get_manager() 78 self.assertTrue(manager.needs_servers(['http\\tests\\mime'])) 81 79 82 80 def test_look_for_new_crash_logs(self): 83 def get_manager _with_tests(test_names):81 def get_manager(): 84 82 host = MockHost() 85 83 port = host.port_factory.get('test-mac-leopard') 86 84 manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock()) 87 manager._collect_tests(test_names)88 85 return manager 89 86 host = MockHost() … … 92 89 expectations = test_expectations.TestExpectations(port, tests) 93 90 rs = ResultSummary(expectations, len(tests)) 94 manager = get_manager _with_tests(tests)91 manager = get_manager() 95 92 manager._look_for_new_crash_logs(rs, time.time()) 96 93 -
trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
r136579 r136585 53 53 def __init__(self, port, builder_name, build_name, build_number, 54 54 results_file_base_path, builder_base_url, 55 expectations, result_summary, all_tests,55 expectations, result_summary, 56 56 test_results_server=None, test_type="", master_name=""): 57 57 """Modifies the results.json file. Grabs it off the archive directory … … 71 71 self._result_summary = result_summary 72 72 self._failures = dict((test_name, result_summary.results[test_name].type) for test_name in result_summary.failures) 73 self._all_tests = all_tests74 73 self._test_timings = result_summary.results 75 74 … … 109 108 # override 110 109 def _get_modifier_char(self, test_name): 111 if test_name not in self._ all_tests:110 if test_name not in self._result_summary.results: 112 111 return self.NO_DATA_RESULT 113 112
Note:
See TracChangeset
for help on using the changeset viewer.