Changeset 129091 in webkit
- Timestamp:
- Sep 19, 2012 10:14:16 PM (12 years ago)
- Location:
- trunk
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/LayoutTests/ChangeLog
r129090 r129091 1 2012-09-19 Ryosuke Niwa <rniwa@webkit.org> 2 3 run-perf-tests should record indivisual value instead of statistics 4 https://bugs.webkit.org/show_bug.cgi?id=97155 5 6 Reviewed by Hajime Morita. 7 8 The expected result now contains indivisual value. 9 10 * fast/harness/perftests/runs-per-second-log-expected.txt: 11 1 12 2012-09-19 David Grogan <dgrogan@chromium.org> 2 13 -
trunk/LayoutTests/fast/harness/perftests/runs-per-second-log-expected.txt
r125194 r129091 10 10 11 11 Time: 12 values 1, 2, 3, 4, 5 runs/s 12 13 avg 3 runs/s 13 14 median 3 runs/s -
trunk/PerformanceTests/ChangeLog
r128779 r129091 1 2012-09-19 Ryosuke Niwa <rniwa@webkit.org> 2 3 run-perf-tests should record indivisual value instead of statistics 4 https://bugs.webkit.org/show_bug.cgi?id=97155 5 6 Reviewed by Hajime Morita. 7 8 Report the list of values as "values" so that run-perf-tests can parse them. 9 10 * resources/runner.js: 11 (PerfTestRunner.computeStatistics): 12 (PerfTestRunner.printStatistics): 13 1 14 2012-09-17 Ryosuke Niwa <rniwa@webkit.org> 2 15 -
trunk/PerformanceTests/resources/runner.js
r128649 r129091 75 75 // Compute the mean and variance using a numerically stable algorithm. 76 76 var squareSum = 0; 77 result.values = times; 77 78 result.mean = data[0]; 78 79 result.sum = data[0]; … … 100 101 this.log(""); 101 102 this.log(title); 103 this.log("values " + statistics.values.join(', ') + " " + statistics.unit) 102 104 this.log("avg " + statistics.mean + " " + statistics.unit); 103 105 this.log("median " + statistics.median + " " + statistics.unit); -
trunk/Tools/ChangeLog
r129083 r129091 1 2012-09-19 Ryosuke Niwa <rniwa@webkit.org> 2 3 run-perf-tests should record indivisual value instead of statistics 4 https://bugs.webkit.org/show_bug.cgi?id=97155 5 6 Reviewed by Hajime Morita. 7 8 Parse the list of indivisual value reported by tests and include them as "values". 9 We strip "values" from the output JSON when uploading it to the perf-o-matic 10 since it doesn't know how to parse "values" or ignore it. 11 12 * Scripts/webkitpy/performance_tests/perftest.py: 13 (PerfTest): 14 (PerfTest.parse_output): Parse and report "values". 15 (PageLoadingPerfTest.run): Report indivisual page loading time in "values". 16 * Scripts/webkitpy/performance_tests/perftest_unittest.py: 17 (MainTest.test_parse_output): 18 (MainTest.test_parse_output_with_failing_line): 19 (TestPageLoadingPerfTest.test_run): 20 * Scripts/webkitpy/performance_tests/perftestsrunner.py: 21 (PerfTestsRunner._generate_and_show_results): Strip "values" from each result 22 until we update perf-o-matic. 23 * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py: 24 (test_run_memory_test): 25 (test_run_with_json_output): 26 (test_run_with_description): 27 (test_run_with_slave_config_json): 28 (test_run_with_multiple_repositories): 29 1 30 2012-09-19 Dirk Pranke <dpranke@chromium.org> 2 31 -
trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py
r126512 r129091 115 115 _result_classes = ['Time', 'JS Heap', 'Malloc'] 116 116 _result_class_regex = re.compile(r'^(?P<resultclass>' + r'|'.join(_result_classes) + '):') 117 _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit' ]118 _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value> [0-9\.]+)\s*(?P<unit>.*)')117 _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values'] 118 _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)') 119 119 120 120 def parse_output(self, output): … … 139 139 if score: 140 140 key = score.group('key') 141 value = float(score.group('value')) 141 if ', ' in score.group('value'): 142 value = [float(number) for number in score.group('value').split(', ')] 143 else: 144 value = float(score.group('value')) 142 145 unit = score.group('unit') 143 146 name = test_name … … 209 212 test_times.append(output.test_time * 1000) 210 213 211 test_times = sorted(test_times)214 sorted_test_times = sorted(test_times) 212 215 213 216 # Compute the mean and variance using a numerically stable algorithm. 214 217 squareSum = 0 215 218 mean = 0 216 valueSum = sum( test_times)217 for i, time in enumerate( test_times):219 valueSum = sum(sorted_test_times) 220 for i, time in enumerate(sorted_test_times): 218 221 delta = time - mean 219 222 sweep = i + 1.0 … … 222 225 223 226 middle = int(len(test_times) / 2) 224 results = {'avg': mean, 225 'min': min(test_times), 226 'max': max(test_times), 227 'median': test_times[middle] if len(test_times) % 2 else (test_times[middle - 1] + test_times[middle]) / 2, 227 results = {'values': test_times, 228 'avg': mean, 229 'min': sorted_test_times[0], 230 'max': sorted_test_times[-1], 231 'median': sorted_test_times[middle] if len(sorted_test_times) % 2 else (sorted_test_times[middle - 1] + sorted_test_times[middle]) / 2, 228 232 'stdev': math.sqrt(squareSum), 229 233 'unit': 'ms'} -
trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
r126512 r129091 51 51 '', 52 52 'Time:', 53 'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms', 53 54 'avg 1100 ms', 54 55 'median 1101 ms', … … 61 62 test = PerfTest(None, 'some-test', '/path/some-dir/some-test') 62 63 self.assertEqual(test.parse_output(output), 63 {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}}) 64 {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms', 65 'values': [i for i in range(1, 20)]}}) 64 66 finally: 65 67 pass … … 77 79 '', 78 80 'Time:' 81 'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms', 79 82 'avg 1100 ms', 80 83 'median 1101 ms', … … 110 113 def test_run(self): 111 114 test = PageLoadingPerfTest(None, 'some-test', '/path/some-dir/some-test') 112 driver = TestPageLoadingPerfTest.MockDriver( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])115 driver = TestPageLoadingPerfTest.MockDriver(range(1, 21)) 113 116 output_capture = OutputCapture() 114 117 output_capture.capture_output() 115 118 try: 116 119 self.assertEqual(test.run(driver, None), 117 {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms'}}) 120 {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms', 121 'values': [i * 1000 for i in range(2, 21)]}}) 118 122 finally: 119 123 actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
r129055 r129091 192 192 return self.EXIT_CODE_BAD_MERGE 193 193 results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html' 194 else: 195 # FIXME: Remove this code once webkit-perf.appspot.com supported "values". 196 for result in output['results'].values(): 197 if isinstance(result, dict) and 'values' in result: 198 del result['values'] 194 199 195 200 self._generate_output_files(output_json_path, results_page_path, output) -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
r129055 r129091 93 93 94 94 Time: 95 values 1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471 ms 95 96 avg 1489.05 ms 96 97 median 1487 ms … … 104 105 105 106 Time: 107 values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms 106 108 avg 1100 ms 107 109 median 1101 ms … … 115 117 116 118 Time: 119 values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms 117 120 avg 1100 ms 118 121 median 1101 ms … … 122 125 123 126 JS Heap: 127 values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes 124 128 avg 832000 bytes 125 129 median 829000 bytes … … 129 133 130 134 Malloc: 135 values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes 131 136 avg 532000 bytes 132 137 median 529000 bytes … … 287 292 '', ''])) 288 293 results = runner.load_output_json()[0]['results'] 289 self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms'}) 290 self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes'}) 291 self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes'}) 294 values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] 295 self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms', 'values': values}) 296 self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes', 'values': values}) 297 self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes', 'values': values}) 292 298 293 299 def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=False, expected_exit_code=0): … … 331 337 332 338 _event_target_wrapper_and_inspector_results = { 339 "Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms", 340 "values": [1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471]}, 341 "inspector/pass.html:group_name:test_name": 42} 342 343 # FIXME: Remove this variance once perf-o-matic supported "values". 344 _event_target_wrapper_and_inspector_results_without_values = { 333 345 "Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"}, 334 346 "inspector/pass.html:group_name:test_name": 42} … … 339 351 self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True) 340 352 self.assertEqual(runner.load_output_json(), { 341 "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results ,353 "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values, 342 354 "webkit-revision": "5678", "branch": "webkit-trunk"}) 343 355 … … 348 360 self.assertEqual(runner.load_output_json(), { 349 361 "timestamp": 123456789, "description": "some description", 350 "results": self._event_target_wrapper_and_inspector_results ,362 "results": self._event_target_wrapper_and_inspector_results_without_values, 351 363 "webkit-revision": "5678", "branch": "webkit-trunk"}) 352 364 … … 438 450 self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True) 439 451 self.assertEqual(runner.load_output_json(), { 440 "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results ,452 "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values, 441 453 "webkit-revision": "5678", "branch": "webkit-trunk", "key": "value"}) 442 454 … … 457 469 self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True) 458 470 self.assertEqual(runner.load_output_json(), { 459 "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results ,471 "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values, 460 472 "webkit-revision": "5678", "some-revision": "5678", "branch": "webkit-trunk"}) 461 473
Note: See TracChangeset
for help on using the changeset viewer.