Changeset 114076 in webkit


Ignore:
Timestamp:
Apr 12, 2012 8:11:20 PM (12 years ago)
Author:
rniwa@webkit.org
Message:

Extract PerfTest class from PerfTestRunner
https://bugs.webkit.org/show_bug.cgi?id=83847

Reviewed by Hajime Morita.

Extracted PerfTest and ChromiumStylePerfTest from PerfTestRunner. This class abstracts a test
that was previously represented by a tuple.

Also moved the logic to determine whether a given test is chromium style or not from run() to
_collect_tests(). And moved the output parsing algorithms for parser style and chromium style
tests from PerfTestRunner to PerfTest and ChromiumStylePerfTest respectively so that we may
add new types of tests more easily.

  • Scripts/webkitpy/performance_tests/perftest.py: Added.

(PerfTest):
(PerfTest.init):
(PerfTest.test_name):
(PerfTest.dirname):
(PerfTest.path_or_url):
(PerfTest._should_ignore_line_in_parser_test_result):
(PerfTest.parse_output):
(ChromiumStylePerfTest):
(ChromiumStylePerfTest.init):
(ChromiumStylePerfTest.parse_output):

  • Scripts/webkitpy/performance_tests/perftestsrunner.py:

(PerfTestsRunner._collect_tests):
(PerfTestsRunner.run):
(PerfTestsRunner._run_tests_set):
(PerfTestsRunner._run_single_test):

  • Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:

(run_test):
(_tests_for_runner):
(test_run_test_set_with_json_output):
(test_run_test_set_with_json_source):
(test_run_test_set_with_multiple_repositories):
(_collect_tests_and_sort_test_name):
(test_collect_tests):
(test_collect_tests_with_skipped_list):

Location:
trunk/Tools
Files:
1 added
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/Tools/ChangeLog

    r114042 r114076  
     12012-04-12  Ryosuke Niwa  <rniwa@webkit.org>
     2
     3        Extract PerfTest class from PerfTestRunner
     4        https://bugs.webkit.org/show_bug.cgi?id=83847
     5
     6        Reviewed by Hajime Morita.
     7
     8        Extracted PerfTest and ChromiumStylePerfTest from PerfTestRunner. This class abstracts a test
     9        that was previously represented by a tuple.
     10
     11        Also moved the logic to determine whether a given test is chromium style or not from run() to
     12        _collect_tests(). And moved the output parsing algorithms for parser style and chromium style
     13        tests from PerfTestRunner to PerfTest and ChromiumStylePerfTest respectively so that we may
     14        add new types of tests more easily.
     15
     16        * Scripts/webkitpy/performance_tests/perftest.py: Added.
     17        (PerfTest):
     18        (PerfTest.__init__):
     19        (PerfTest.test_name):
     20        (PerfTest.dirname):
     21        (PerfTest.path_or_url):
     22        (PerfTest._should_ignore_line_in_parser_test_result):
     23        (PerfTest.parse_output):
     24        (ChromiumStylePerfTest):
     25        (ChromiumStylePerfTest.__init__):
     26        (ChromiumStylePerfTest.parse_output):
     27        * Scripts/webkitpy/performance_tests/perftestsrunner.py:
     28        (PerfTestsRunner._collect_tests):
     29        (PerfTestsRunner.run):
     30        (PerfTestsRunner._run_tests_set):
     31        (PerfTestsRunner._run_single_test):
     32        * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
     33        (run_test):
     34        (_tests_for_runner):
     35        (test_run_test_set_with_json_output):
     36        (test_run_test_set_with_json_source):
     37        (test_run_test_set_with_multiple_repositories):
     38        (_collect_tests_and_sort_test_name):
     39        (test_collect_tests):
     40        (test_collect_tests_with_skipped_list):
     41
    1422012-04-12  Balazs Kelemen  <kbalazs@webkit.org>
    243
  • trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py

    r113839 r114076  
    4242from webkitpy.layout_tests.port.driver import DriverInput
    4343from webkitpy.layout_tests.views import printing
     44from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
     45from webkitpy.performance_tests.perftest import PerfTest
    4446
    4547_log = logging.getLogger(__name__)
     
    113115            return filename.endswith('.html')
    114116
     117        filesystem = self._host.filesystem
     118
    115119        paths = []
    116120        for arg in self._args:
    117121            paths.append(arg)
    118             relpath = self._host.filesystem.relpath(arg, self._base_path)
     122            relpath = filesystem.relpath(arg, self._base_path)
    119123            if relpath:
    120124                paths.append(relpath)
    121125
    122126        skipped_directories = set(['.svn', 'resources'])
    123         test_files = find_files.find(self._host.filesystem, self._base_path, paths, skipped_directories, _is_test_file)
     127        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
    124128        tests = []
    125129        for path in test_files:
    126             test_name = self._port.relative_perf_test_filename(path)
    127             if self._port.skips_perf_test(test_name):
     130            relative_path = self._port.relative_perf_test_filename(path)
     131            if self._port.skips_perf_test(relative_path):
    128132                continue
    129             tests.append((test_name.replace('\\', '/'), path))
     133            test_name = relative_path.replace('\\', '/')
     134            dirname = filesystem.dirname(path)
     135            if self._host.filesystem.dirname(relative_path) in self._test_directories_for_chromium_style_tests:
     136                tests.append(ChromiumStylePerfTest(test_name, dirname, path))
     137            else:
     138                tests.append(PerfTest(test_name, dirname, path))
     139
    130140        return tests
    131141
     
    145155        try:
    146156            tests = self._collect_tests()
    147             unexpected = self._run_tests_set(sorted(list(tests)), self._port)
     157            unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
    148158        finally:
    149159            self._printer.cleanup()
     
    225235        driver = None
    226236
    227         for (test_name, test_path) in tests:
     237        for test in tests:
    228238            driver = port.create_driver(worker_number=1, no_timeout=True)
    229239
     
    234244                    return unexpected
    235245
    236             self._printer.write('Running %s (%d of %d)' % (test_name, expected + unexpected + 1, len(tests)))
    237 
    238             is_chromium_style = self._host.filesystem.dirname(test_name) in self._test_directories_for_chromium_style_tests
    239             if self._run_single_test(test_name, test_path, driver, is_chromium_style):
     246            self._printer.write('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
     247            if self._run_single_test(test, driver):
    240248                expected = expected + 1
    241249            else:
     
    265273        return test_failed or not got_a_result
    266274
    267     _lines_to_ignore_in_parser_result = [
    268         re.compile(r'^Running \d+ times$'),
    269         re.compile(r'^Ignoring warm-up '),
    270         re.compile(r'^Info:'),
    271         re.compile(r'^\d+(.\d+)?$'),
    272         # Following are for handle existing test like Dromaeo
    273         re.compile(re.escape("""main frame - has 1 onunload handler(s)""")),
    274         re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")),
    275         re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)"""))]
    276 
    277     def _should_ignore_line_in_parser_test_result(self, line):
    278         if not line:
    279             return True
    280         for regex in self._lines_to_ignore_in_parser_result:
    281             if regex.search(line):
    282                 return True
    283         return False
    284 
    285     def _process_parser_test_result(self, test_name, output):
    286         got_a_result = False
    287         test_failed = False
    288         filesystem = self._host.filesystem
    289         results = {}
    290         keys = ['avg', 'median', 'stdev', 'min', 'max']
    291         score_regex = re.compile(r'^(?P<key>' + r'|'.join(keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)')
    292         unit = "ms"
    293 
    294         for line in re.split('\n', output.text):
    295             score = score_regex.match(line)
    296             if score:
    297                 results[score.group('key')] = float(score.group('value'))
    298                 if score.group('unit'):
    299                     unit = score.group('unit')
    300                 continue
    301 
    302             if not self._should_ignore_line_in_parser_test_result(line):
    303                 test_failed = True
    304                 self._printer.write("%s" % line)
    305 
    306         if test_failed or set(keys) != set(results.keys()):
    307             return True
    308 
    309         results['unit'] = unit
    310 
    311         test_name = re.sub(r'\.\w+$', '', test_name)
    312         self._results[test_name] = results
    313         self._buildbot_output.write('RESULT %s= %s %s\n' % (test_name.replace('/', ': '), results['avg'], unit))
    314         self._buildbot_output.write(', '.join(['%s= %s %s' % (key, results[key], unit) for key in keys[1:]]) + '\n')
    315         return False
    316 
    317     def _run_single_test(self, test_name, test_path, driver, is_chromium_style):
    318         test_failed = False
     275    def _run_single_test(self, test, driver):
    319276        start_time = time.time()
    320277
    321         output = driver.run_test(DriverInput(test_path, self._options.time_out_ms, None, False))
     278        output = driver.run_test(DriverInput(test.path_or_url(), self._options.time_out_ms, None, False))
     279        new_results = None
    322280
    323281        if output.text == None:
    324             test_failed = True
     282            pass
    325283        elif output.timeout:
    326             self._printer.write('timeout: %s' % test_name)
    327             test_failed = True
     284            self._printer.write('timeout: %s' % test.test_name())
    328285        elif output.crash:
    329             self._printer.write('crash: %s' % test_name)
    330             test_failed = True
     286            self._printer.write('crash: %s' % test.test_name())
    331287        else:
    332             if is_chromium_style:
    333                 test_failed = self._process_chromium_style_test_result(test_name, output)
    334             else:
    335                 test_failed = self._process_parser_test_result(test_name, output)
     288            new_results = test.parse_output(output, self._printer, self._buildbot_output)
    336289
    337290        if len(output.error):
    338291            self._printer.write('error:\n%s' % output.error)
    339             test_failed = True
    340 
    341         if test_failed:
     292            new_results = None
     293
     294        if new_results:
     295            self._results.update(new_results)
     296        else:
    342297            self._printer.write('FAILED')
    343298
    344299        self._printer.write("Finished: %f s" % (time.time() - start_time))
    345300
    346         return not test_failed
     301        return new_results != None
  • trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py

    r113839 r114076  
    4040from webkitpy.layout_tests.port.test import TestPort
    4141from webkitpy.layout_tests.views import printing
     42from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
     43from webkitpy.performance_tests.perftest import PerfTest
    4244from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
    4345
     
    127129        runner = self.create_runner()
    128130        driver = MainTest.TestDriver()
    129         return runner._run_single_test(test_name, test_name, driver, is_chromium_style=True)
     131        return runner._run_single_test(ChromiumStylePerfTest(test_name, 'some-dir',
     132            runner._host.filesystem.join('some-dir', test_name)), driver)
    130133
    131134    def test_run_passing_test(self):
     
    147150        self.assertFalse(self.run_test('crash.html'))
    148151
    149     def _tests_for_runner(self, runner, tests):
    150         return [(test, runner._base_path + '/' + test) for test in tests]
     152    def _tests_for_runner(self, runner, test_names):
     153        filesystem = runner._host.filesystem
     154        tests = []
     155        for test in test_names:
     156            path = filesystem.join(runner._base_path, test)
     157            dirname = filesystem.dirname(path)
     158            if test.startswith('inspector/'):
     159                tests.append(ChromiumStylePerfTest(test, dirname, path))
     160            else:
     161                tests.append(PerfTest(test, dirname, path))
     162        return tests
    151163
    152164    def test_run_test_set(self):
     
    222234            "timestamp": 123456789, "results":
    223235            {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
    224             "group_name:test_name": 42},
     236            "inspector/pass.html:group_name:test_name": 42},
    225237            "webkit-revision": 5678})
    226238
     
    241253            "timestamp": 123456789, "results":
    242254            {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
    243             "group_name:test_name": 42},
     255            "inspector/pass.html:group_name:test_name": 42},
    244256            "webkit-revision": 5678,
    245257            "key": "value"})
     
    254266
    255267        self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
    256             "timestamp": 123456789, "results": {"group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678})
     268            "timestamp": 123456789, "results": {"inspector/pass.html:group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678})
    257269
    258270    def test_run_with_upload_json(self):
     
    333345        self.assertEqual(len(tests), 1)
    334346
     347    def _collect_tests_and_sort_test_name(self, runner):
     348        return sorted([test.test_name() for test in runner._collect_tests()])
     349
    335350    def test_collect_tests(self):
    336351        runner = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
     
    343358        add_file('test3.html')
    344359        runner._host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
    345         self.assertEqual(sorted([test[0] for test in runner._collect_tests()]), ['test1.html', 'test2.html'])
     360        self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
    346361
    347362    def test_collect_tests_with_skipped_list(self):
     
    359374        add_file('unsupported', 'unsupported_test2.html')
    360375        runner._port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
    361         self.assertEqual(sorted([test[0] for test in runner._collect_tests()]), ['inspector/test1.html', 'inspector/test2.html'])
     376        self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
    362377
    363378    def test_parse_args(self):
Note: See TracChangeset for help on using the changeset viewer.