Changeset 239739 in webkit


Ignore:
Timestamp:
Jan 8, 2019 1:15:04 PM (5 years ago)
Author:
commit-queue@webkit.org
Message:

Layout test will generate a perf metric file to results dir.
https://bugs.webkit.org/show_bug.cgi?id=192030
<rdar://problem/32779516>

Patch by Zhifei Fang <zhifei_fang@apple.com> on 2019-01-08
Reviewed by Aakash Jain.

Layout test running time will be collected to a perf metric file.
For now, instead of outputing running time of all tests (which is huge),
we aggregate them by test directories which are at most two level deep.

  • Scripts/webkitpy/layout_tests/controllers/manager.py:

(Manager._end_test_run):
(Manager._output_perf_metrics):
(Manager._print_expectation_line_for_test):

  • Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py:

(add_test_perf_metric):
(test_perf_metrics):

  • Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py:

(JSONGeneratorTest.test_test_timings_trie):
(JSONGeneratorTest):
(JSONGeneratorTest.test_test_perf_metrics):

  • Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:

(RebaselineTest.test_reset_results):
(RebaselineTest.test_missing_results):
(RebaselineTest.test_new_baseline):

Location:
trunk/Tools
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/Tools/ChangeLog

    r239729 r239739  
     12019-01-08  Zhifei Fang  <zhifei_fang@apple.com>
     2 
     3        Layout test will generate a perf metric file to results dir.
     4        https://bugs.webkit.org/show_bug.cgi?id=192030
     5        <rdar://problem/32779516>
     6
     7        Reviewed by Aakash Jain.
     8
     9        Layout test running time will be collected to a perf metric file.
     10        For now, instead of outputing running time of all tests (which is huge),
     11        we aggregate them by test directories which are at most two level deep.
     12
     13        * Scripts/webkitpy/layout_tests/controllers/manager.py:
     14        (Manager._end_test_run):
     15        (Manager._output_perf_metrics):
     16        (Manager._print_expectation_line_for_test):
     17        * Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py:
     18        (add_test_perf_metric):
     19        (test_perf_metrics):
     20        * Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py:
     21        (JSONGeneratorTest.test_test_timings_trie):
     22        (JSONGeneratorTest):
     23        (JSONGeneratorTest.test_test_perf_metrics):
     24        * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
     25        (RebaselineTest.test_reset_results):
     26        (RebaselineTest.test_missing_results):
     27        (RebaselineTest.test_new_baseline):
     28
    1292019-01-08  Patrick Griffis  <pgriffis@igalia.com>
    230
  • trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py

    r238944 r239739  
    340340        if not self._options.dry_run:
    341341            self._port.print_leaks_summary()
     342            self._output_perf_metrics(end_time - start_time, initial_results)
    342343            self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
    343344
     
    432433                    (result.type != test_expectations.CRASH or include_crashes))]
    433434
     435    def _output_perf_metrics(self, run_time, initial_results):
     436        perf_metrics_json = json_results_generator.perf_metrics_for_test(run_time, initial_results.results_by_name.values())
     437        perf_metrics_path = self._filesystem.join(self._results_directory, "layout_test_perf_metrics.json")
     438        self._filesystem.write_text_file(perf_metrics_path, json.dumps(perf_metrics_json))
     439
    434440    def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
    435441        """Writes the results of the test run as JSON files into the results
     
    570576        line = self._expectations.model().get_expectation_line(test)
    571577        print(format_string.format(test, line.expected_behavior, self._expectations.readable_filename_and_line_number(line), line.original_string or ''))
    572    
     578
    573579    def _print_expectations_for_subset(self, device_type, test_col_width, tests_to_run, tests_to_skip={}):
    574580        format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
  • trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py

    r225733 r239739  
    115115    for test_result in individual_test_timings:
    116116        test = test_result.test_name
    117 
    118117        add_path_to_trie(test, int(1000 * test_result.test_run_time), trie)
    119118
    120119    return trie
     120
     121
     122def _add_perf_metric_for_test(path, time, tests, depth, depth_limit):
     123    """
     124    Aggregate test time to result for a given test at a specified depth_limit.
     125    """
     126    if not "/" in path:
     127        tests["tests"][path] = {
     128            "metrics": {
     129                "Time": {
     130                    "current": [time],
     131                }}}
     132        return
     133
     134    directory, slash, rest = path.partition("/")
     135    if depth == depth_limit:
     136        if directory not in tests["tests"]:
     137            tests["tests"][directory] = {
     138                "metrics": {
     139                    "Time": {
     140                        "current": [time],
     141                    }}}
     142        else:
     143            tests["tests"][directory]["metrics"]["Time"]["current"][0] += time
     144        return
     145    else:
     146        if directory not in tests["tests"]:
     147            tests["tests"][directory] = {
     148                "metrics": {
     149                    "Time": ["Total", "Arithmetic"],
     150                },
     151                "tests": {}
     152            }
     153        _add_perf_metric_for_test(rest, time, tests["tests"][directory], depth + 1, depth_limit)
     154
     155
     156def perf_metrics_for_test(run_time, individual_test_timings):
     157    """
     158    Output two performace metrics
     159    1. run time, which is how much time consumed by the layout tests script
     160    2. run time of first-level and second-level of test directories
     161    """
     162    total_run_time = 0
     163
     164    for test_result in individual_test_timings:
     165        total_run_time += int(1000 * test_result.test_run_time)
     166
     167    perf_metric = {
     168        "layout_tests": {
     169            "metrics": {
     170                "Time": ["Total", "Arithmetic"],
     171            },
     172            "tests": {}
     173        },
     174        "layout_tests_run_time": {
     175            "metrics": {
     176                "Time": {"current": [run_time]},
     177            }}}
     178    for test_result in individual_test_timings:
     179        test = test_result.test_name
     180        # for now, we only send two levels of directories
     181        _add_perf_metric_for_test(test, int(1000 * test_result.test_run_time), perf_metric["layout_tests"], 1, 2)
     182    return perf_metric
    121183
    122184
  • trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py

    r174136 r239739  
    227227
    228228        self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
     229
     230    def test_perf_metrics_for_test(self):
     231        individual_test_timings = []
     232        individual_test_timings.append(json_results_generator.TestResult('foo/bar/baz.html', elapsed_time=1.2))
     233        individual_test_timings.append(json_results_generator.TestResult('foo/bar/ba.html', elapsed_time=1.4))
     234        individual_test_timings.append(json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
     235        metrics = json_results_generator.perf_metrics_for_test(1200, individual_test_timings)
     236
     237        expected_metrics = {
     238            "layout_tests": {
     239                "metrics": {
     240                    "Time": ["Total", "Arithmetic"],
     241                },
     242                "tests": {
     243                    "foo": {
     244                        "metrics": {
     245                            "Time": ["Total", "Arithmetic"],
     246                        },
     247                        "tests": {
     248                            "bar":  {
     249                                "metrics": {
     250                                    "Time": {"current": [2600]},
     251                                }
     252                            }
     253                        }
     254                    },
     255                    "bar.html": {
     256                        "metrics": {
     257                            "Time": {"current": [0]},
     258                        }
     259                    }
     260                }
     261            },
     262            "layout_tests_run_time": {
     263                "metrics": {
     264                    "Time": {"current": [1200]},
     265                }
     266            }}
     267        self.assertEqual(json.dumps(metrics), json.dumps(expected_metrics))
  • trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py

    r235764 r239739  
    880880        file_list = host.filesystem.written_files.keys()
    881881        self.assertEqual(details.exit_code, 0)
    882         self.assertEqual(len(file_list), 8)
     882        self.assertEqual(len(file_list), 9)
    883883        self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
    884884        self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
     
    896896        file_list = host.filesystem.written_files.keys()
    897897        self.assertEqual(details.exit_code, 0)
    898         self.assertEqual(len(file_list), 10)
     898        self.assertEqual(len(file_list), 11)
    899899        self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
    900900        self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
     
    910910        file_list = host.filesystem.written_files.keys()
    911911        self.assertEqual(details.exit_code, 0)
    912         self.assertEqual(len(file_list), 8)
     912        self.assertEqual(len(file_list), 9)
    913913        self.assertBaselines(file_list,
    914914            "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
Note: See TracChangeset for help on using the changeset viewer.