Changeset 136585 in webkit


Ignore:
Timestamp:
Dec 4, 2012, 3:41:37 PM (13 years ago)
Author:
dpranke@chromium.org
Message:

nrwt: make paths and test_names passed arguments in Manager._prepare_lists et al
https://bugs.webkit.org/show_bug.cgi?id=104047

Reviewed by Eric Seidel.

The code becomes cleaner if we are just passing values around rather
than hanging them off the manager object, helps move _prepare_lists()
to a pure function, and is needed to eventually make the
result_summary object something returned from runner.run_tests()
(note that two more patches are needed for that to happen).

  • Scripts/webkitpy/layout_tests/controllers/manager.py:

(Manager.init):
(Manager._http_tests):
(Manager._prepare_lists):
(Manager.needs_servers):
(Manager._set_up_run):
(Manager.run):
(Manager._run_tests):
(Manager._upload_json_files):

  • Scripts/webkitpy/layout_tests/controllers/manager_unittest.py:

(ManagerTest.test_needs_servers.get_manager):
(ManagerTest.test_needs_servers):
(ManagerTest.integration_test_needs_servers.get_manager):
(ManagerTest.integration_test_needs_servers):
(ManagerTest.test_look_for_new_crash_logs.get_manager):
(ManagerTest):
(ManagerTest.test_look_for_new_crash_logs):

  • Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py:

(JSONLayoutResultsGenerator.init):
(JSONLayoutResultsGenerator._get_modifier_char):

Location:
trunk/Tools
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/Tools/ChangeLog

    r136583 r136585  
     12012-12-04  Dirk Pranke  <dpranke@chromium.org>
     2
     3        nrwt: make paths and test_names passed arguments in Manager._prepare_lists et al
     4        https://bugs.webkit.org/show_bug.cgi?id=104047
     5
     6        Reviewed by Eric Seidel.
     7
     8        The code becomes cleaner if we are just passing values around rather
     9        than hanging them off the manager object, helps move _prepare_lists()
     10        to a pure function, and is needed to eventually make the
     11        result_summary object something returned from runner.run_tests()
     12        (note that two more patches are needed for that to happen).
     13
     14        * Scripts/webkitpy/layout_tests/controllers/manager.py:
     15        (Manager.__init__):
     16        (Manager._http_tests):
     17        (Manager._prepare_lists):
     18        (Manager.needs_servers):
     19        (Manager._set_up_run):
     20        (Manager.run):
     21        (Manager._run_tests):
     22        (Manager._upload_json_files):
     23        * Scripts/webkitpy/layout_tests/controllers/manager_unittest.py:
     24        (ManagerTest.test_needs_servers.get_manager):
     25        (ManagerTest.test_needs_servers):
     26        (ManagerTest.integration_test_needs_servers.get_manager):
     27        (ManagerTest.integration_test_needs_servers):
     28        (ManagerTest.test_look_for_new_crash_logs.get_manager):
     29        (ManagerTest):
     30        (ManagerTest.test_look_for_new_crash_logs):
     31        * Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py:
     32        (JSONLayoutResultsGenerator.__init__):
     33        (JSONLayoutResultsGenerator._get_modifier_char):
     34
    1352012-12-04  Adam Barth  <abarth@webkit.org>
    236
  • trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py

    r136582 r136585  
    282282        #        options.results_directory, use_tls=True, port=9323)
    283283
    284         self._paths = set()
    285         self._test_names = None
    286284        self._results_directory = self._port.results_directory()
    287285        self._finder = LayoutTestFinder(self._port, self._options)
     
    297295        return self.WEBSOCKET_SUBDIR in test
    298296
    299     def _http_tests(self):
    300         return set(test for test in self._test_names if self._is_http_test(test))
     297    def _http_tests(self, test_names):
     298        return set(test for test in test_names if self._is_http_test(test))
    301299
    302300    def _is_perf_test(self, test):
    303301        return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
    304302
    305     def _prepare_lists(self):
    306         tests_to_skip = self._finder.skip_tests(self._paths, self._test_names, self._expectations, self._http_tests())
    307         self._test_names = [test for test in self._test_names if test not in tests_to_skip]
     303    def _prepare_lists(self, paths, test_names):
     304        tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
     305        tests_to_run = [test for test in test_names if test not in tests_to_skip]
    308306
    309307        # Create a sorted list of test files so the subset chunk,
    310308        # if used, contains alphabetically consecutive tests.
    311309        if self._options.order == 'natural':
    312             self._test_names.sort(key=self._port.test_key)
     310            tests_to_run.sort(key=self._port.test_key)
    313311        elif self._options.order == 'random':
    314             random.shuffle(self._test_names)
    315 
    316         self._test_names, tests_in_other_chunks = self._finder.split_into_chunks(self._test_names)
     312            random.shuffle(tests_to_run)
     313
     314        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
    317315        self._expectations.add_skipped_tests(tests_in_other_chunks)
    318316        tests_to_skip.update(tests_in_other_chunks)
    319317
    320         if self._options.repeat_each > 1:
    321             list_with_repetitions = []
    322             for test in self._test_names:
    323                 list_with_repetitions += ([test] * self._options.repeat_each)
    324             self._test_names = list_with_repetitions
    325 
    326         if self._options.iterations > 1:
    327             self._test_names = self._test_names * self._options.iterations
    328 
    329         summary = ResultSummary(self._expectations, len(self._test_names))
     318        summary = ResultSummary(self._expectations, len(tests_to_run) * self._options.repeat_each * self._options.iterations)
     319
    330320        for test_name in set(tests_to_skip):
    331321            result = test_results.TestResult(test_name)
    332322            result.type = test_expectations.SKIP
    333323            summary.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))
    334         return summary
     324        return summary, tests_to_run
    335325
    336326    def _test_input_for_file(self, test_file):
     
    349339        return self._expectations.has_modifier(test_file, test_expectations.SLOW)
    350340
    351     def needs_servers(self):
    352         return any(self._test_requires_lock(test_name) for test_name in self._test_names) and self._options.http
    353 
    354     def _set_up_run(self):
     341    def needs_servers(self, test_names):
     342        return any(self._test_requires_lock(test_name) for test_name in test_names) and self._options.http
     343
     344    def _set_up_run(self, test_names):
    355345        self._printer.write_update("Checking build ...")
    356         if not self._port.check_build(self.needs_servers()):
     346        if not self._port.check_build(self.needs_servers(test_names)):
    357347            _log.error("Build check failed")
    358348            return False
     
    367357        if not self._options.nocheck_sys_deps:
    368358            self._printer.write_update("Checking system dependencies ...")
    369             if not self._port.check_sys_deps(self.needs_servers()):
     359            if not self._port.check_sys_deps(self.needs_servers(test_names)):
    370360                self._port.stop_helper()
    371361                return False
     
    384374        self._printer.write_update("Collecting tests ...")
    385375        try:
    386             self._paths, self._test_names = self._collect_tests(args)
     376            paths, test_names = self._collect_tests(args)
    387377        except IOError as exception:
    388378            # This is raised if --test-list doesn't exist
     
    390380
    391381        self._printer.write_update("Parsing expectations ...")
    392         self._expectations = test_expectations.TestExpectations(self._port, self._test_names)
    393 
    394         num_all_test_files_found = len(self._test_names)
    395         result_summary = self._prepare_lists()
     382        self._expectations = test_expectations.TestExpectations(self._port, test_names)
     383
     384        num_all_test_files_found = len(test_names)
     385        result_summary, test_names = self._prepare_lists(paths, test_names)
    396386
    397387        # Check to make sure we're not skipping every test.
    398         if not self._test_names:
     388        if not test_names:
    399389            _log.critical('No tests to run.')
    400390            return -1
    401391
    402         self._printer.print_found(num_all_test_files_found, len(self._test_names), self._options.repeat_each, self._options.iterations)
     392        self._printer.print_found(num_all_test_files_found, len(test_names), self._options.repeat_each, self._options.iterations)
    403393        self._printer.print_expected(result_summary, self._expectations.get_tests_with_result_type)
    404394
    405         if not self._set_up_run():
     395        if not self._set_up_run(test_names):
    406396            return -1
    407397
     
    409399
    410400        try:
    411             result_summary = self._run_tests(self._test_names, result_summary, int(self._options.child_processes), retrying=False)
     401            result_summary = self._run_tests(test_names, result_summary, int(self._options.child_processes), retrying=False)
    412402
    413403            # We exclude the crashes from the list of results to retry, because
     
    455445
    456446    def _run_tests(self, tests, result_summary, num_workers, retrying):
    457         test_inputs = [self._test_input_for_file(test) for test in tests]
    458447        needs_http = self._port.requires_http_server() or any(self._is_http_test(test) for test in tests)
    459448        needs_websockets = any(self._is_websocket_test(test) for test in tests)
     449        test_inputs = []
     450        for _ in xrange(self._options.iterations):
     451            for test in tests:
     452                for _ in xrange(self._options.repeat_each):
     453                    test_inputs.append(self._test_input_for_file(test))
    460454        return self._runner.run_tests(test_inputs, self._expectations, result_summary, num_workers, needs_http, needs_websockets, retrying)
    461455
     
    539533            self._options.build_number, self._results_directory,
    540534            BUILDER_BASE_URL,
    541             self._expectations, result_summary, self._test_names,
     535            self._expectations, result_summary,
    542536            self._options.test_results_server,
    543537            "layout-tests",
  • trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py

    r136582 r136585  
    4646class ManagerTest(unittest.TestCase):
    4747    def test_needs_servers(self):
    48         def get_manager_with_tests(test_names):
     48        def get_manager():
    4949            port = Mock()  # FIXME: Use a tighter mock.
    5050            port.TEST_PATH_SEPARATOR = '/'
    5151            manager = Manager(port, options=MockOptions(http=True, max_locked_shards=1), printer=Mock())
    52             manager._test_names = test_names
    5352            return manager
    5453
    55         manager = get_manager_with_tests(['fast/html'])
    56         self.assertFalse(manager.needs_servers())
    57 
    58         manager = get_manager_with_tests(['http/tests/misc'])
    59         self.assertTrue(manager.needs_servers())
     54        manager = get_manager()
     55        self.assertFalse(manager.needs_servers(['fast/html']))
     56
     57        manager = get_manager()
     58        self.assertTrue(manager.needs_servers(['http/tests/misc']))
    6059
    6160    def integration_test_needs_servers(self):
    62         def get_manager_with_tests(test_names):
     61        def get_manager():
    6362            host = MockHost()
    6463            port = host.port_factory.get()
    6564            manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock())
    66             manager._collect_tests(test_names)
    6765            return manager
    6866
    69         manager = get_manager_with_tests(['fast/html'])
    70         self.assertFalse(manager.needs_servers())
    71 
    72         manager = get_manager_with_tests(['http/tests/mime'])
    73         self.assertTrue(manager.needs_servers())
     67        manager = get_manager()
     68        self.assertFalse(manager.needs_servers(['fast/html']))
     69
     70        manager = get_manager()
     71        self.assertTrue(manager.needs_servers(['http/tests/mime']))
    7472
    7573        if sys.platform == 'win32':
    76             manager = get_manager_with_tests(['fast\\html'])
    77             self.assertFalse(manager.needs_servers())
    78 
    79             manager = get_manager_with_tests(['http\\tests\\mime'])
    80             self.assertTrue(manager.needs_servers())
     74            manager = get_manager()
     75            self.assertFalse(manager.needs_servers(['fast\\html']))
     76
     77            manager = get_manager()
     78            self.assertTrue(manager.needs_servers(['http\\tests\\mime']))
    8179
    8280    def test_look_for_new_crash_logs(self):
    83         def get_manager_with_tests(test_names):
     81        def get_manager():
    8482            host = MockHost()
    8583            port = host.port_factory.get('test-mac-leopard')
    8684            manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock())
    87             manager._collect_tests(test_names)
    8885            return manager
    8986        host = MockHost()
     
    9289        expectations = test_expectations.TestExpectations(port, tests)
    9390        rs = ResultSummary(expectations, len(tests))
    94         manager = get_manager_with_tests(tests)
     91        manager = get_manager()
    9592        manager._look_for_new_crash_logs(rs, time.time())
    9693
  • trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py

    r136579 r136585  
    5353    def __init__(self, port, builder_name, build_name, build_number,
    5454        results_file_base_path, builder_base_url,
    55         expectations, result_summary, all_tests,
     55        expectations, result_summary,
    5656        test_results_server=None, test_type="", master_name=""):
    5757        """Modifies the results.json file. Grabs it off the archive directory
     
    7171        self._result_summary = result_summary
    7272        self._failures = dict((test_name, result_summary.results[test_name].type) for test_name in result_summary.failures)
    73         self._all_tests = all_tests
    7473        self._test_timings = result_summary.results
    7574
     
    109108    # override
    110109    def _get_modifier_char(self, test_name):
    111         if test_name not in self._all_tests:
     110        if test_name not in self._result_summary.results:
    112111            return self.NO_DATA_RESULT
    113112
Note: See TracChangeset for help on using the changeset viewer.