Changeset 104856 in webkit
- Timestamp:
- Jan 12, 2012 2:08:41 PM (12 years ago)
- Location:
- trunk/Tools
- Files:
-
- 1 added
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Tools/ChangeLog
r104843 r104856 1 2012-01-12 Ryosuke Niwa <rniwa@webkit.org> 2 3 Need a script to run tests in PerformanceTests 4 https://bugs.webkit.org/show_bug.cgi?id=76132 5 6 Reviewed by Adam Barth. 7 8 Add run-perf-tests to run performance tests using parser/resources/runner.js. 9 Unfortunately, there isn't an easy way of telling which test uses which format 10 so hard-code directory that uses Chromium perf-bot style (only inspector perf. tests for now). 11 12 All test outputs are re-formatted to match Chromium perf-bot style. 13 14 * Scripts/run-inspector-perf-tests.py: 15 * Scripts/run-perf-tests: Added. 16 * Scripts/webkitpy/performance_tests/perftestsrunner.py: 17 (PerfTestsRunner): 18 (PerfTestsRunner.__init__): 19 (PerfTestsRunner._collect_tests): 20 (PerfTestsRunner.run): 21 (PerfTestsRunner._print_status): 22 (PerfTestsRunner._run_tests_set): 23 (PerfTestsRunner._process_chromium_style_test_result): 24 (PerfTestsRunner._should_ignore_line_in_parser_test_result): 25 (PerfTestsRunner._process_parser_test_result): 26 (PerfTestsRunner._run_single_test): 27 * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py: 28 (MainTest.TestDriver.run_test): 29 (create_runner): 30 (run_test): 31 (test_run_test_set): 32 (test_run_test_set_for_parser_tests): 33 (test_collect_tests): 34 1 35 2012-01-12 Dirk Pranke <dpranke@chromium.org> 2 36 -
trunk/Tools/Scripts/run-inspector-perf-tests.py
r101618 r104856 39 39 if '__main__' == __name__: 40 40 logging.basicConfig(level=logging.INFO, format="%(message)s") 41 sys.exit(PerfTestsRunner( 'inspector').run())41 sys.exit(PerfTestsRunner(args=['inspector']).run()) -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
r103289 r104856 45 45 class PerfTestsRunner(object): 46 46 _perf_tests_base_dir = 'PerformanceTests' 47 _result_regex = re.compile('^RESULT .*$') 48 49 def __init__(self, perf_tests_dir, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None): 50 self._perf_tests_dir = perf_tests_dir 47 _test_directories_for_chromium_style_tests = ['inspector'] 48 49 def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None): 51 50 self._buildbot_output = buildbot_output 52 51 self._options, self._args = self._parse_args(args) … … 56 55 self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False) 57 56 self._webkit_base_dir_len = len(self._port.webkit_base()) 57 self._base_path = self._host.filesystem.join(self._port.webkit_base(), self._perf_tests_base_dir) 58 58 59 59 def _parse_args(self, args=None): … … 76 76 return optparse.OptionParser(option_list=option_list).parse_args(args) 77 77 78 def _collect_tests(self , webkit_base, filesystem=None):78 def _collect_tests(self): 79 79 """Return the list of tests found.""" 80 80 … … 82 82 return filename.endswith('.html') 83 83 84 filesystem = filesystem or self._host.filesystem 85 base_dir = filesystem.join(webkit_base, self._perf_tests_base_dir, self._perf_tests_dir) 86 return find_files.find(filesystem, base_dir, paths=self._args, file_filter=_is_test_file) 84 return find_files.find(self._host.filesystem, self._base_path, paths=self._args, file_filter=_is_test_file) 87 85 88 86 def run(self): … … 100 98 unexpected = -1 101 99 try: 102 tests = self._collect_tests( self._port.webkit_base())100 tests = self._collect_tests() 103 101 unexpected = self._run_tests_set(tests, self._port) 104 102 finally: … … 106 104 107 105 return unexpected 106 107 def _print_status(self, tests, expected, unexpected): 108 if len(tests) == expected + unexpected: 109 status = "Ran %d tests" % len(tests) 110 else: 111 status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests)) 112 if unexpected: 113 status += " (%d didn't run)" % unexpected 114 self._printer.write(status) 108 115 109 116 def _run_tests_set(self, tests, port): … … 111 118 expected = 0 112 119 unexpected = 0 113 self._printer.print_one_line_summary(result_count, 0, 0)114 120 driver_need_restart = False 115 121 driver = None … … 123 129 driver = port.create_driver(worker_number=1) 124 130 125 test_failed, driver_need_restart = self._run_single_test(test, driver) 131 relative_test_path = self._host.filesystem.relpath(test, self._base_path) 132 self._printer.write('Running %s (%d of %d)' % (relative_test_path, expected + unexpected + 1, len(tests))) 133 134 is_chromium_style = self._host.filesystem.split(relative_test_path)[0] in self._test_directories_for_chromium_style_tests 135 test_failed, driver_need_restart = self._run_single_test(test, driver, is_chromium_style) 126 136 if test_failed: 127 137 unexpected = unexpected + 1 … … 129 139 expected = expected + 1 130 140 131 self._printer. print_one_line_summary(result_count, expected, unexpected)141 self._printer.write('') 132 142 133 143 if driver: … … 136 146 return unexpected 137 147 138 def _run_single_test(self, test, driver): 148 _inspector_result_regex = re.compile('^RESULT .*$') 149 150 def _process_chromium_style_test_result(self, test, output): 151 test_failed = False 152 got_a_result = False 153 for line in re.split('\n', output.text): 154 if self._inspector_result_regex.match(line): 155 self._buildbot_output.write("%s\n" % line) 156 got_a_result = True 157 elif not len(line) == 0: 158 test_failed = True 159 self._printer.write("%s" % line) 160 return test_failed or not got_a_result 161 162 _lines_to_ignore_in_parser_result = [ 163 re.compile(r'^Running \d+ times$'), 164 re.compile(r'^Ignoring warm-up '), 165 re.compile(r'^\d+$'), 166 ] 167 168 def _should_ignore_line_in_parser_test_result(self, line): 169 if not line: 170 return True 171 for regex in self._lines_to_ignore_in_parser_result: 172 if regex.match(line): 173 return True 174 return False 175 176 def _process_parser_test_result(self, test, output): 177 got_a_result = False 178 test_failed = False 179 filesystem = self._host.filesystem 180 category, test_name = filesystem.split(filesystem.relpath(test, self._base_path)) 181 test_name = filesystem.splitext(test_name)[0] 182 results = {} 183 keys = ['avg', 'median', 'stdev', 'min', 'max'] 184 score_regex = re.compile(r'^(' + r'|'.join(keys) + r')\s+([0-9\.]+)') 185 for line in re.split('\n', output.text): 186 score = score_regex.match(line) 187 if score: 188 results[score.group(1)] = score.group(2) 189 continue 190 191 if not self._should_ignore_line_in_parser_test_result(line): 192 test_failed = True 193 self._printer.write("%s" % line) 194 195 if test_failed or set(keys) != set(results.keys()): 196 return True 197 self._buildbot_output.write('RESULT %s: %s= %s ms\n' % (category, test_name, results['avg'])) 198 self._buildbot_output.write(', '.join(['%s= %s ms' % (key, results[key]) for key in keys[1:]]) + '\n') 199 return False 200 201 def _run_single_test(self, test, driver, is_chromium_style): 139 202 test_failed = False 140 203 driver_need_restart = False … … 152 215 test_failed = True 153 216 else: 154 got_a_result = False 155 for line in re.split('\n', output.text): 156 if self._result_regex.match(line): 157 self._buildbot_output.write("%s\n" % line) 158 got_a_result = True 159 elif not len(line) == 0: 160 test_failed = True 161 self._printer.write("%s" % line) 162 test_failed = test_failed or not got_a_result 217 if is_chromium_style: 218 test_failed = self._process_chromium_style_test_result(test, output) 219 else: 220 test_failed = self._process_parser_test_result(test, output) 163 221 164 222 if len(output.error): … … 166 224 test_failed = True 167 225 226 if test_failed: 227 self._printer.write('FAILED') 228 168 229 return test_failed, driver_need_restart -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
r101618 r104856 49 49 timeout = False 50 50 crash = False 51 if driver_input.test_name == 'pass.html':51 if driver_input.test_name.endswith('pass.html'): 52 52 text = 'RESULT group_name: test_name= 42 ms' 53 elif driver_input.test_name == 'timeout.html':53 elif driver_input.test_name.endswith('timeout.html'): 54 54 timeout = True 55 elif driver_input.test_name == 'failed.html':55 elif driver_input.test_name.endswith('failed.html'): 56 56 text = None 57 elif driver_input.test_name == 'tonguey.html':57 elif driver_input.test_name.endswith('tonguey.html'): 58 58 text = 'we are not expecting an output from perf tests but RESULT blablabla' 59 elif driver_input.test_name == 'crash.html':59 elif driver_input.test_name.endswith('crash.html'): 60 60 crash = True 61 elif driver_input.test_name.endswith('event-target-wrapper.html'): 62 text = """Running 20 times 63 Ignoring warm-up run (1502) 64 1504 65 1505 66 1510 67 1504 68 1507 69 1509 70 1510 71 1487 72 1488 73 1472 74 1472 75 1488 76 1473 77 1472 78 1475 79 1487 80 1486 81 1486 82 1475 83 1471 84 85 avg 1489.05 86 median 1487 87 stdev 14.46 88 min 1471 89 max 1510 90 """ 91 elif driver_input.test_name.endswith('some-parser.html'): 92 text = """Running 20 times 93 Ignoring warm-up run (1115) 94 95 avg 1100 96 median 1101 97 stdev 11 98 min 1080 99 max 1120 100 """ 61 101 return DriverOutput(text, '', '', '', crash=crash, timeout=timeout) 62 102 … … 67 107 buildbot_output = buildbot_output or array_stream.ArrayStream() 68 108 regular_output = array_stream.ArrayStream() 69 return PerfTestsRunner( '',regular_output, buildbot_output, args=[])109 return PerfTestsRunner(regular_output, buildbot_output, args=[]) 70 110 71 111 def run_test(self, test_name): 72 112 runner = self.create_runner() 73 113 driver = MainTest.TestDriver() 74 return runner._run_single_test(test_name, driver )114 return runner._run_single_test(test_name, driver, is_chromium_style=True) 75 115 76 116 def test_run_passing_test(self): … … 107 147 buildbot_output = array_stream.ArrayStream() 108 148 runner = self.create_runner(buildbot_output) 149 runner._base_path = '/test.checkout/PerformanceTests' 109 150 port = MainTest.TestPort() 110 tests = ['pass.html', 'silent.html', 'failed.html', 'tonguey.html', 'timeout.html', 'crash.html'] 151 dirname = runner._base_path + '/inspector/' 152 tests = [dirname + 'pass.html', dirname + 'silent.html', dirname + 'failed.html', 153 dirname + 'tonguey.html', dirname + 'timeout.html', dirname + 'crash.html'] 111 154 unexpected_result_count = runner._run_tests_set(tests, port) 112 155 self.assertEqual(unexpected_result_count, len(tests) - 1) … … 114 157 self.assertEqual(buildbot_output.get()[0], 'RESULT group_name: test_name= 42 ms\n') 115 158 159 def test_run_test_set_for_parser_tests(self): 160 buildbot_output = array_stream.ArrayStream() 161 runner = self.create_runner(buildbot_output) 162 runner._base_path = '/test.checkout/PerformanceTests/' 163 port = MainTest.TestPort() 164 tests = [runner._base_path + 'Bindings/event-target-wrapper.html', runner._base_path + 'Parser/some-parser.html'] 165 unexpected_result_count = runner._run_tests_set(tests, port) 166 self.assertEqual(unexpected_result_count, 0) 167 self.assertEqual(buildbot_output.get()[0], 'RESULT Bindings: event-target-wrapper= 1489.05 ms\n') 168 self.assertEqual(buildbot_output.get()[1], 'median= 1487 ms, stdev= 14.46 ms, min= 1471 ms, max= 1510 ms\n') 169 self.assertEqual(buildbot_output.get()[2], 'RESULT Parser: some-parser= 1100 ms\n') 170 self.assertEqual(buildbot_output.get()[3], 'median= 1101 ms, stdev= 11 ms, min= 1080 ms, max= 1120 ms\n') 171 116 172 def test_collect_tests(self): 117 173 runner = self.create_runner() 118 webkit_base = '/test.checkout'174 runner._base_path = '/test.checkout/PerformanceTests' 119 175 filesystem = MockFileSystem() 120 filename = filesystem.join(webkit_base, 'PerformanceTests', 'a_file.html') 176 filename = filesystem.join(runner._base_path, 'inspector', 'a_file.html') 177 filesystem.maybe_make_directory(runner._base_path, 'inspector') 121 178 filesystem.files[filename] = 'a content' 122 tests = runner._collect_tests(webkit_base, filesystem) 179 runner._host.filesystem = filesystem 180 tests = runner._collect_tests() 123 181 self.assertEqual(len(tests), 1) 124 182
Note: See TracChangeset
for help on using the changeset viewer.