Changeset 114076 in webkit
- Timestamp:
- Apr 12, 2012 8:11:20 PM (12 years ago)
- Location:
- trunk/Tools
- Files:
-
- 1 added
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Tools/ChangeLog
r114042 r114076 1 2012-04-12 Ryosuke Niwa <rniwa@webkit.org> 2 3 Extract PerfTest class from PerfTestRunner 4 https://bugs.webkit.org/show_bug.cgi?id=83847 5 6 Reviewed by Hajime Morita. 7 8 Extracted PerfTest and ChromiumStylePerfTest from PerfTestRunner. This class abstracts a test 9 that was previously represented by a tuple. 10 11 Also moved the logic to determine whether a given test is chromium style or not from run() to 12 _collect_tests(). And moved the output parsing algorithms for parser style and chromium style 13 tests from PerfTestRunner to PerfTest and ChromiumStylePerfTest respectively so that we may 14 add new types of tests more easily. 15 16 * Scripts/webkitpy/performance_tests/perftest.py: Added. 17 (PerfTest): 18 (PerfTest.__init__): 19 (PerfTest.test_name): 20 (PerfTest.dirname): 21 (PerfTest.path_or_url): 22 (PerfTest._should_ignore_line_in_parser_test_result): 23 (PerfTest.parse_output): 24 (ChromiumStylePerfTest): 25 (ChromiumStylePerfTest.__init__): 26 (ChromiumStylePerfTest.parse_output): 27 * Scripts/webkitpy/performance_tests/perftestsrunner.py: 28 (PerfTestsRunner._collect_tests): 29 (PerfTestsRunner.run): 30 (PerfTestsRunner._run_tests_set): 31 (PerfTestsRunner._run_single_test): 32 * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py: 33 (run_test): 34 (_tests_for_runner): 35 (test_run_test_set_with_json_output): 36 (test_run_test_set_with_json_source): 37 (test_run_test_set_with_multiple_repositories): 38 (_collect_tests_and_sort_test_name): 39 (test_collect_tests): 40 (test_collect_tests_with_skipped_list): 41 1 42 2012-04-12 Balazs Kelemen <kbalazs@webkit.org> 2 43 -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
r113839 r114076 42 42 from webkitpy.layout_tests.port.driver import DriverInput 43 43 from webkitpy.layout_tests.views import printing 44 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest 45 from webkitpy.performance_tests.perftest import PerfTest 44 46 45 47 _log = logging.getLogger(__name__) … … 113 115 return filename.endswith('.html') 114 116 117 filesystem = self._host.filesystem 118 115 119 paths = [] 116 120 for arg in self._args: 117 121 paths.append(arg) 118 relpath = self._host.filesystem.relpath(arg, self._base_path)122 relpath = filesystem.relpath(arg, self._base_path) 119 123 if relpath: 120 124 paths.append(relpath) 121 125 122 126 skipped_directories = set(['.svn', 'resources']) 123 test_files = find_files.find( self._host.filesystem, self._base_path, paths, skipped_directories, _is_test_file)127 test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file) 124 128 tests = [] 125 129 for path in test_files: 126 test_name= self._port.relative_perf_test_filename(path)127 if self._port.skips_perf_test( test_name):130 relative_path = self._port.relative_perf_test_filename(path) 131 if self._port.skips_perf_test(relative_path): 128 132 continue 129 tests.append((test_name.replace('\\', '/'), path)) 133 test_name = relative_path.replace('\\', '/') 134 dirname = filesystem.dirname(path) 135 if self._host.filesystem.dirname(relative_path) in self._test_directories_for_chromium_style_tests: 136 tests.append(ChromiumStylePerfTest(test_name, dirname, path)) 137 else: 138 tests.append(PerfTest(test_name, dirname, path)) 139 130 140 return tests 131 141 … … 145 155 try: 146 156 tests = self._collect_tests() 147 unexpected = self._run_tests_set(sorted(list(tests) ), self._port)157 unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port) 148 158 finally: 149 159 self._printer.cleanup() … … 225 235 driver = None 226 236 227 for (test_name, test_path)in tests:237 for test in tests: 228 238 driver = port.create_driver(worker_number=1, no_timeout=True) 229 239 … … 234 244 return unexpected 235 245 236 self._printer.write('Running %s (%d of %d)' % (test_name, expected + unexpected + 1, len(tests))) 237 238 is_chromium_style = self._host.filesystem.dirname(test_name) in self._test_directories_for_chromium_style_tests 239 if self._run_single_test(test_name, test_path, driver, is_chromium_style): 246 self._printer.write('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests))) 247 if self._run_single_test(test, driver): 240 248 expected = expected + 1 241 249 else: … … 265 273 return test_failed or not got_a_result 266 274 267 _lines_to_ignore_in_parser_result = [ 268 re.compile(r'^Running \d+ times$'), 269 re.compile(r'^Ignoring warm-up '), 270 re.compile(r'^Info:'), 271 re.compile(r'^\d+(.\d+)?$'), 272 # Following are for handle existing test like Dromaeo 273 re.compile(re.escape("""main frame - has 1 onunload handler(s)""")), 274 re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")), 275 re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)"""))] 276 277 def _should_ignore_line_in_parser_test_result(self, line): 278 if not line: 279 return True 280 for regex in self._lines_to_ignore_in_parser_result: 281 if regex.search(line): 282 return True 283 return False 284 285 def _process_parser_test_result(self, test_name, output): 286 got_a_result = False 287 test_failed = False 288 filesystem = self._host.filesystem 289 results = {} 290 keys = ['avg', 'median', 'stdev', 'min', 'max'] 291 score_regex = re.compile(r'^(?P<key>' + r'|'.join(keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)') 292 unit = "ms" 293 294 for line in re.split('\n', output.text): 295 score = score_regex.match(line) 296 if score: 297 results[score.group('key')] = float(score.group('value')) 298 if score.group('unit'): 299 unit = score.group('unit') 300 continue 301 302 if not self._should_ignore_line_in_parser_test_result(line): 303 test_failed = True 304 self._printer.write("%s" % line) 305 306 if test_failed or set(keys) != set(results.keys()): 307 return True 308 309 results['unit'] = unit 310 311 test_name = re.sub(r'\.\w+$', '', test_name) 312 self._results[test_name] = results 313 self._buildbot_output.write('RESULT %s= %s %s\n' % (test_name.replace('/', ': '), results['avg'], unit)) 314 self._buildbot_output.write(', '.join(['%s= %s %s' % (key, results[key], unit) for key in keys[1:]]) + '\n') 315 return False 316 317 def _run_single_test(self, test_name, test_path, driver, is_chromium_style): 318 test_failed = False 275 def _run_single_test(self, test, driver): 319 276 start_time = time.time() 320 277 321 output = driver.run_test(DriverInput(test_path, self._options.time_out_ms, None, False)) 278 output = driver.run_test(DriverInput(test.path_or_url(), self._options.time_out_ms, None, False)) 279 new_results = None 322 280 323 281 if output.text == None: 324 test_failed = True282 pass 325 283 elif output.timeout: 326 self._printer.write('timeout: %s' % test_name) 327 test_failed = True 284 self._printer.write('timeout: %s' % test.test_name()) 328 285 elif output.crash: 329 self._printer.write('crash: %s' % test_name) 330 test_failed = True 286 self._printer.write('crash: %s' % test.test_name()) 331 287 else: 332 if is_chromium_style: 333 test_failed = self._process_chromium_style_test_result(test_name, output) 334 else: 335 test_failed = self._process_parser_test_result(test_name, output) 288 new_results = test.parse_output(output, self._printer, self._buildbot_output) 336 289 337 290 if len(output.error): 338 291 self._printer.write('error:\n%s' % output.error) 339 test_failed = True 340 341 if test_failed: 292 new_results = None 293 294 if new_results: 295 self._results.update(new_results) 296 else: 342 297 self._printer.write('FAILED') 343 298 344 299 self._printer.write("Finished: %f s" % (time.time() - start_time)) 345 300 346 return n ot test_failed301 return new_results != None -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
r113839 r114076 40 40 from webkitpy.layout_tests.port.test import TestPort 41 41 from webkitpy.layout_tests.views import printing 42 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest 43 from webkitpy.performance_tests.perftest import PerfTest 42 44 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner 43 45 … … 127 129 runner = self.create_runner() 128 130 driver = MainTest.TestDriver() 129 return runner._run_single_test(test_name, test_name, driver, is_chromium_style=True) 131 return runner._run_single_test(ChromiumStylePerfTest(test_name, 'some-dir', 132 runner._host.filesystem.join('some-dir', test_name)), driver) 130 133 131 134 def test_run_passing_test(self): … … 147 150 self.assertFalse(self.run_test('crash.html')) 148 151 149 def _tests_for_runner(self, runner, tests): 150 return [(test, runner._base_path + '/' + test) for test in tests] 152 def _tests_for_runner(self, runner, test_names): 153 filesystem = runner._host.filesystem 154 tests = [] 155 for test in test_names: 156 path = filesystem.join(runner._base_path, test) 157 dirname = filesystem.dirname(path) 158 if test.startswith('inspector/'): 159 tests.append(ChromiumStylePerfTest(test, dirname, path)) 160 else: 161 tests.append(PerfTest(test, dirname, path)) 162 return tests 151 163 152 164 def test_run_test_set(self): … … 222 234 "timestamp": 123456789, "results": 223 235 {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"}, 224 " group_name:test_name": 42},236 "inspector/pass.html:group_name:test_name": 42}, 225 237 "webkit-revision": 5678}) 226 238 … … 241 253 "timestamp": 123456789, "results": 242 254 {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"}, 243 " group_name:test_name": 42},255 "inspector/pass.html:group_name:test_name": 42}, 244 256 "webkit-revision": 5678, 245 257 "key": "value"}) … … 254 266 255 267 self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), { 256 "timestamp": 123456789, "results": {" group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678})268 "timestamp": 123456789, "results": {"inspector/pass.html:group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678}) 257 269 258 270 def test_run_with_upload_json(self): … … 333 345 self.assertEqual(len(tests), 1) 334 346 347 def _collect_tests_and_sort_test_name(self, runner): 348 return sorted([test.test_name() for test in runner._collect_tests()]) 349 335 350 def test_collect_tests(self): 336 351 runner = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html']) … … 343 358 add_file('test3.html') 344 359 runner._host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)]) 345 self.assertEqual(s orted([test[0] for test in runner._collect_tests()]), ['test1.html', 'test2.html'])360 self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html']) 346 361 347 362 def test_collect_tests_with_skipped_list(self): … … 359 374 add_file('unsupported', 'unsupported_test2.html') 360 375 runner._port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported'] 361 self.assertEqual(s orted([test[0] for test in runner._collect_tests()]), ['inspector/test1.html', 'inspector/test2.html'])376 self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html']) 362 377 363 378 def test_parse_args(self):
Note: See TracChangeset
for help on using the changeset viewer.