Changeset 104856 in webkit


Ignore:
Timestamp:
Jan 12, 2012 2:08:41 PM (12 years ago)
Author:
rniwa@webkit.org
Message:

Need a script to run tests in PerformanceTests
https://bugs.webkit.org/show_bug.cgi?id=76132

Reviewed by Adam Barth.

Add run-perf-tests to run performance tests using parser/resources/runner.js.
Unfortunately, there isn't an easy way of telling which test uses which format
so hard-code directory that uses Chromium perf-bot style (only inspector perf. tests for now).

All test outputs are re-formatted to match Chromium perf-bot style.

  • Scripts/run-inspector-perf-tests.py:
  • Scripts/run-perf-tests: Added.
  • Scripts/webkitpy/performance_tests/perftestsrunner.py:

(PerfTestsRunner):
(PerfTestsRunner.init):
(PerfTestsRunner._collect_tests):
(PerfTestsRunner.run):
(PerfTestsRunner._print_status):
(PerfTestsRunner._run_tests_set):
(PerfTestsRunner._process_chromium_style_test_result):
(PerfTestsRunner._should_ignore_line_in_parser_test_result):
(PerfTestsRunner._process_parser_test_result):
(PerfTestsRunner._run_single_test):

  • Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:

(MainTest.TestDriver.run_test):
(create_runner):
(run_test):
(test_run_test_set):
(test_run_test_set_for_parser_tests):
(test_collect_tests):

Location:
trunk/Tools
Files:
1 added
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/Tools/ChangeLog

    r104843 r104856  
     12012-01-12  Ryosuke Niwa  <rniwa@webkit.org>
     2
     3        Need a script to run tests in PerformanceTests
     4        https://bugs.webkit.org/show_bug.cgi?id=76132
     5
     6        Reviewed by Adam Barth.
     7
     8        Add run-perf-tests to run performance tests using parser/resources/runner.js.
     9        Unfortunately, there isn't an easy way of telling which test uses which format
     10        so hard-code directory that uses Chromium perf-bot style (only inspector perf. tests for now).
     11
     12        All test outputs are re-formatted to match Chromium perf-bot style.
     13
     14        * Scripts/run-inspector-perf-tests.py:
     15        * Scripts/run-perf-tests: Added.
     16        * Scripts/webkitpy/performance_tests/perftestsrunner.py:
     17        (PerfTestsRunner):
     18        (PerfTestsRunner.__init__):
     19        (PerfTestsRunner._collect_tests):
     20        (PerfTestsRunner.run):
     21        (PerfTestsRunner._print_status):
     22        (PerfTestsRunner._run_tests_set):
     23        (PerfTestsRunner._process_chromium_style_test_result):
     24        (PerfTestsRunner._should_ignore_line_in_parser_test_result):
     25        (PerfTestsRunner._process_parser_test_result):
     26        (PerfTestsRunner._run_single_test):
     27        * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
     28        (MainTest.TestDriver.run_test):
     29        (create_runner):
     30        (run_test):
     31        (test_run_test_set):
     32        (test_run_test_set_for_parser_tests):
     33        (test_collect_tests):
     34
    1352012-01-12  Dirk Pranke  <dpranke@chromium.org>
    236
  • trunk/Tools/Scripts/run-inspector-perf-tests.py

    r101618 r104856  
    3939if '__main__' == __name__:
    4040    logging.basicConfig(level=logging.INFO, format="%(message)s")
    41     sys.exit(PerfTestsRunner('inspector').run())
     41    sys.exit(PerfTestsRunner(args=['inspector']).run())
  • trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py

    r103289 r104856  
    4545class PerfTestsRunner(object):
    4646    _perf_tests_base_dir = 'PerformanceTests'
    47     _result_regex = re.compile('^RESULT .*$')
    48 
    49     def __init__(self, perf_tests_dir, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None):
    50         self._perf_tests_dir = perf_tests_dir
     47    _test_directories_for_chromium_style_tests = ['inspector']
     48
     49    def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None):
    5150        self._buildbot_output = buildbot_output
    5251        self._options, self._args = self._parse_args(args)
     
    5655        self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
    5756        self._webkit_base_dir_len = len(self._port.webkit_base())
     57        self._base_path = self._host.filesystem.join(self._port.webkit_base(), self._perf_tests_base_dir)
    5858
    5959    def _parse_args(self, args=None):
     
    7676        return optparse.OptionParser(option_list=option_list).parse_args(args)
    7777
    78     def _collect_tests(self, webkit_base, filesystem=None):
     78    def _collect_tests(self):
    7979        """Return the list of tests found."""
    8080
     
    8282            return filename.endswith('.html')
    8383
    84         filesystem = filesystem or self._host.filesystem
    85         base_dir = filesystem.join(webkit_base, self._perf_tests_base_dir, self._perf_tests_dir)
    86         return find_files.find(filesystem, base_dir, paths=self._args, file_filter=_is_test_file)
     84        return find_files.find(self._host.filesystem, self._base_path, paths=self._args, file_filter=_is_test_file)
    8785
    8886    def run(self):
     
    10098        unexpected = -1
    10199        try:
    102             tests = self._collect_tests(self._port.webkit_base())
     100            tests = self._collect_tests()
    103101            unexpected = self._run_tests_set(tests, self._port)
    104102        finally:
     
    106104
    107105        return unexpected
     106
     107    def _print_status(self, tests, expected, unexpected):
     108        if len(tests) == expected + unexpected:
     109            status = "Ran %d tests" % len(tests)
     110        else:
     111            status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
     112        if unexpected:
     113            status += " (%d didn't run)" % unexpected
     114        self._printer.write(status)
    108115
    109116    def _run_tests_set(self, tests, port):
     
    111118        expected = 0
    112119        unexpected = 0
    113         self._printer.print_one_line_summary(result_count, 0, 0)
    114120        driver_need_restart = False
    115121        driver = None
     
    123129                driver = port.create_driver(worker_number=1)
    124130
    125             test_failed, driver_need_restart = self._run_single_test(test, driver)
     131            relative_test_path = self._host.filesystem.relpath(test, self._base_path)
     132            self._printer.write('Running %s (%d of %d)' % (relative_test_path, expected + unexpected + 1, len(tests)))
     133
     134            is_chromium_style = self._host.filesystem.split(relative_test_path)[0] in self._test_directories_for_chromium_style_tests
     135            test_failed, driver_need_restart = self._run_single_test(test, driver, is_chromium_style)
    126136            if test_failed:
    127137                unexpected = unexpected + 1
     
    129139                expected = expected + 1
    130140
    131             self._printer.print_one_line_summary(result_count, expected, unexpected)
     141            self._printer.write('')
    132142
    133143        if driver:
     
    136146        return unexpected
    137147
    138     def _run_single_test(self, test, driver):
     148    _inspector_result_regex = re.compile('^RESULT .*$')
     149
     150    def _process_chromium_style_test_result(self, test, output):
     151        test_failed = False
     152        got_a_result = False
     153        for line in re.split('\n', output.text):
     154            if self._inspector_result_regex.match(line):
     155                self._buildbot_output.write("%s\n" % line)
     156                got_a_result = True
     157            elif not len(line) == 0:
     158                test_failed = True
     159                self._printer.write("%s" % line)
     160        return test_failed or not got_a_result
     161
     162    _lines_to_ignore_in_parser_result = [
     163        re.compile(r'^Running \d+ times$'),
     164        re.compile(r'^Ignoring warm-up '),
     165        re.compile(r'^\d+$'),
     166    ]
     167
     168    def _should_ignore_line_in_parser_test_result(self, line):
     169        if not line:
     170            return True
     171        for regex in self._lines_to_ignore_in_parser_result:
     172            if regex.match(line):
     173                return True
     174        return False
     175
     176    def _process_parser_test_result(self, test, output):
     177        got_a_result = False
     178        test_failed = False
     179        filesystem = self._host.filesystem
     180        category, test_name = filesystem.split(filesystem.relpath(test, self._base_path))
     181        test_name = filesystem.splitext(test_name)[0]
     182        results = {}
     183        keys = ['avg', 'median', 'stdev', 'min', 'max']
     184        score_regex = re.compile(r'^(' + r'|'.join(keys) + r')\s+([0-9\.]+)')
     185        for line in re.split('\n', output.text):
     186            score = score_regex.match(line)
     187            if score:
     188                results[score.group(1)] = score.group(2)
     189                continue
     190
     191            if not self._should_ignore_line_in_parser_test_result(line):
     192                test_failed = True
     193                self._printer.write("%s" % line)
     194
     195        if test_failed or set(keys) != set(results.keys()):
     196            return True
     197        self._buildbot_output.write('RESULT %s: %s= %s ms\n' % (category, test_name, results['avg']))
     198        self._buildbot_output.write(', '.join(['%s= %s ms' % (key, results[key]) for key in keys[1:]]) + '\n')
     199        return False
     200
     201    def _run_single_test(self, test, driver, is_chromium_style):
    139202        test_failed = False
    140203        driver_need_restart = False
     
    152215            test_failed = True
    153216        else:
    154             got_a_result = False
    155             for line in re.split('\n', output.text):
    156                 if self._result_regex.match(line):
    157                     self._buildbot_output.write("%s\n" % line)
    158                     got_a_result = True
    159                 elif not len(line) == 0:
    160                     test_failed = True
    161                     self._printer.write("%s" % line)
    162             test_failed = test_failed or not got_a_result
     217            if is_chromium_style:
     218                test_failed = self._process_chromium_style_test_result(test, output)
     219            else:
     220                test_failed = self._process_parser_test_result(test, output)
    163221
    164222        if len(output.error):
     
    166224            test_failed = True
    167225
     226        if test_failed:
     227            self._printer.write('FAILED')
     228
    168229        return test_failed, driver_need_restart
  • trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py

    r101618 r104856  
    4949            timeout = False
    5050            crash = False
    51             if driver_input.test_name == 'pass.html':
     51            if driver_input.test_name.endswith('pass.html'):
    5252                text = 'RESULT group_name: test_name= 42 ms'
    53             elif driver_input.test_name == 'timeout.html':
     53            elif driver_input.test_name.endswith('timeout.html'):
    5454                timeout = True
    55             elif driver_input.test_name == 'failed.html':
     55            elif driver_input.test_name.endswith('failed.html'):
    5656                text = None
    57             elif driver_input.test_name == 'tonguey.html':
     57            elif driver_input.test_name.endswith('tonguey.html'):
    5858                text = 'we are not expecting an output from perf tests but RESULT blablabla'
    59             elif driver_input.test_name == 'crash.html':
     59            elif driver_input.test_name.endswith('crash.html'):
    6060                crash = True
     61            elif driver_input.test_name.endswith('event-target-wrapper.html'):
     62                text = """Running 20 times
     63Ignoring warm-up run (1502)
     641504
     651505
     661510
     671504
     681507
     691509
     701510
     711487
     721488
     731472
     741472
     751488
     761473
     771472
     781475
     791487
     801486
     811486
     821475
     831471
     84
     85avg 1489.05
     86median 1487
     87stdev 14.46
     88min 1471
     89max 1510
     90"""
     91            elif driver_input.test_name.endswith('some-parser.html'):
     92                text = """Running 20 times
     93Ignoring warm-up run (1115)
     94
     95avg 1100
     96median 1101
     97stdev 11
     98min 1080
     99max 1120
     100"""
    61101            return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
    62102
     
    67107        buildbot_output = buildbot_output or array_stream.ArrayStream()
    68108        regular_output = array_stream.ArrayStream()
    69         return PerfTestsRunner('', regular_output, buildbot_output, args=[])
     109        return PerfTestsRunner(regular_output, buildbot_output, args=[])
    70110
    71111    def run_test(self, test_name):
    72112        runner = self.create_runner()
    73113        driver = MainTest.TestDriver()
    74         return runner._run_single_test(test_name, driver)
     114        return runner._run_single_test(test_name, driver, is_chromium_style=True)
    75115
    76116    def test_run_passing_test(self):
     
    107147        buildbot_output = array_stream.ArrayStream()
    108148        runner = self.create_runner(buildbot_output)
     149        runner._base_path = '/test.checkout/PerformanceTests'
    109150        port = MainTest.TestPort()
    110         tests = ['pass.html', 'silent.html', 'failed.html', 'tonguey.html', 'timeout.html', 'crash.html']
     151        dirname = runner._base_path + '/inspector/'
     152        tests = [dirname + 'pass.html', dirname + 'silent.html', dirname + 'failed.html',
     153            dirname + 'tonguey.html', dirname + 'timeout.html', dirname + 'crash.html']
    111154        unexpected_result_count = runner._run_tests_set(tests, port)
    112155        self.assertEqual(unexpected_result_count, len(tests) - 1)
     
    114157        self.assertEqual(buildbot_output.get()[0], 'RESULT group_name: test_name= 42 ms\n')
    115158
     159    def test_run_test_set_for_parser_tests(self):
     160        buildbot_output = array_stream.ArrayStream()
     161        runner = self.create_runner(buildbot_output)
     162        runner._base_path = '/test.checkout/PerformanceTests/'
     163        port = MainTest.TestPort()
     164        tests = [runner._base_path + 'Bindings/event-target-wrapper.html', runner._base_path + 'Parser/some-parser.html']
     165        unexpected_result_count = runner._run_tests_set(tests, port)
     166        self.assertEqual(unexpected_result_count, 0)
     167        self.assertEqual(buildbot_output.get()[0], 'RESULT Bindings: event-target-wrapper= 1489.05 ms\n')
     168        self.assertEqual(buildbot_output.get()[1], 'median= 1487 ms, stdev= 14.46 ms, min= 1471 ms, max= 1510 ms\n')
     169        self.assertEqual(buildbot_output.get()[2], 'RESULT Parser: some-parser= 1100 ms\n')
     170        self.assertEqual(buildbot_output.get()[3], 'median= 1101 ms, stdev= 11 ms, min= 1080 ms, max= 1120 ms\n')
     171
    116172    def test_collect_tests(self):
    117173        runner = self.create_runner()
    118         webkit_base = '/test.checkout'
     174        runner._base_path = '/test.checkout/PerformanceTests'
    119175        filesystem = MockFileSystem()
    120         filename = filesystem.join(webkit_base, 'PerformanceTests', 'a_file.html')
     176        filename = filesystem.join(runner._base_path, 'inspector', 'a_file.html')
     177        filesystem.maybe_make_directory(runner._base_path, 'inspector')
    121178        filesystem.files[filename] = 'a content'
    122         tests = runner._collect_tests(webkit_base, filesystem)
     179        runner._host.filesystem = filesystem
     180        tests = runner._collect_tests()
    123181        self.assertEqual(len(tests), 1)
    124182
Note: See TracChangeset for help on using the changeset viewer.