Changeset 162183 in webkit
- Timestamp:
- Jan 16, 2014 10:06:36 PM (10 years ago)
- Location:
- trunk
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/PerformanceTests/ChangeLog
r162065 r162183 1 2014-01-16 Ryosuke Niwa <rniwa@webkit.org> 2 3 Automate DoYouEvenBench 4 https://bugs.webkit.org/show_bug.cgi?id=124497 5 6 Reviewed by Geoffrey Garen. 7 8 Enable DoYouEvenBench/Full.html on perf bots by default. 9 10 Put a space between the time and ms, and fixed a typo in runner.js so that the aggregator name will be reported. 11 12 * DoYouEvenBench/Full.html: 13 * Skipped: 14 * resources/runner.js: 15 1 16 2014-01-15 Manuel Rego Casasnovas <rego@igalia.com> 2 17 -
trunk/PerformanceTests/DoYouEvenBench/Full.html
r162058 r162183 18 18 values.push(measuredValues.total); 19 19 iterationNumber++; 20 pre.appendChild(document.createTextNode('Iteration ' + iterationNumber + ': ' + measuredValues.total + ' ms\n'));20 pre.appendChild(document.createTextNode('Iteration ' + iterationNumber + ': ' + measuredValues.total + ' ms\n')); 21 21 }, 22 22 didFinishLastIteration: function () { … … 24 24 for (var i = 0; i < values.length; i++) 25 25 sum += values[i]; 26 pre.appendChild(document.createTextNode('Average: ' + (sum / iterationNumber) + ' ms\n'));26 pre.appendChild(document.createTextNode('Average: ' + (sum / iterationNumber) + ' ms\n')); 27 27 pre.style.paddingTop = 0; 28 28 } -
trunk/PerformanceTests/Skipped
r162065 r162183 90 90 Layout/LineLayoutJapanese.html 91 91 92 # New DOM benchmark is not ready for the prime time yet.93 DoYouEvenBench 92 # Don't run the interactive runner. We run Full.html 93 DoYouEvenBench/benchmark.html -
trunk/PerformanceTests/resources/runner.js
r162058 r162183 222 222 PerfTestRunner.log("Description: " + currentTest.description); 223 223 metric = {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[PerfTestRunner.unit]; 224 var suffix = currentTest.aggregat ion ? ':' + currentTest.aggregation: '';224 var suffix = currentTest.aggregator ? ':' + currentTest.aggregator : ''; 225 225 PerfTestRunner.logStatistics(results, PerfTestRunner.unit, prefix + ":" + metric + suffix); 226 226 if (jsHeapResults.length) { -
trunk/Tools/ChangeLog
r162179 r162183 1 2014-01-16 Ryosuke Niwa <rniwa@webkit.org> 2 3 Automate DoYouEvenBench 4 https://bugs.webkit.org/show_bug.cgi?id=124497 5 6 Reviewed by Geoffrey Garen. 7 8 * Scripts/webkitpy/performance_tests/perftest.py: 9 (PerfTestMetric.__init__): Added the aggregator name as an argument. 10 (PerfTestMetric.aggregator): Added. 11 (PerfTest._metrics_regex): Made the subtest name match non-greedy so that the metric names will be 12 won't be eagerly parsed as a part of the subtest name. e.g. "Time" and "Total" in "a:Time:Total" 13 should be parsed as the metric and the aggregator respectively. 14 (PerfTest._run_with_driver): Pass in the aggregator name. 15 (PerfTest._ensure_metrics): Ditto. Also split the subtest name by / as required by DoYouEvenBench 16 which generates subtests of subtests within a single test file. 17 18 * Scripts/webkitpy/performance_tests/perftest_unittest.py: 19 (test_parse_output_with_subtests_and_total): Added. 20 21 * Scripts/webkitpy/performance_tests/perftestsrunner.py: 22 (_generate_results_dict): Add the aggregator name to the JSON when one is available. 23 24 * Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py: 25 (TestWithSubtestsData): Added a sub test with an aggregator and a sub-sub test. 26 1 27 2014-01-16 Chris Fleizach <cfleizach@apple.com> 2 28 -
trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py
r162058 r162183 51 51 52 52 class PerfTestMetric(object): 53 def __init__(self, path, test_file_name, metric, unit=None, iterations=None):53 def __init__(self, path, test_file_name, metric, unit=None, aggregator=None, iterations=None): 54 54 # FIXME: Fix runner.js to report correct metric names 55 55 self._iterations = iterations or [] 56 56 self._unit = unit or self.metric_to_unit(metric) 57 self._aggregator = aggregator 57 58 self._metric = self.time_unit_to_metric(self._unit) if metric == 'Time' else metric 58 59 self._path = path … … 61 62 def name(self): 62 63 return self._metric 64 65 def aggregator(self): 66 return self._aggregator 63 67 64 68 def path(self): … … 169 173 170 174 _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE) 171 _metrics_regex = re.compile(r'^(?P<subtest>[A-Za-z0-9\(\[].+ )?:(?P<metric>[A-Z][A-Za-z]+)(:(?P<aggregator>[A-Z][A-Za-z]+))? -> \[(?P<values>(\d+(\.\d+)?)(, \d+(\.\d+)?)+)\] (?P<unit>[a-z/]+)?$')175 _metrics_regex = re.compile(r'^(?P<subtest>[A-Za-z0-9\(\[].+?)?:(?P<metric>[A-Z][A-Za-z]+)(:(?P<aggregator>[A-Z][A-Za-z]+))? -> \[(?P<values>(\d+(\.\d+)?)(, \d+(\.\d+)?)+)\] (?P<unit>[a-z/]+)?$') 172 176 173 177 def _run_with_driver(self, driver, time_out_ms): … … 189 193 return False 190 194 191 metric = self._ensure_metrics(metric_match.group('metric'), metric_match.group('subtest'), metric_match.group('unit') )195 metric = self._ensure_metrics(metric_match.group('metric'), metric_match.group('subtest'), metric_match.group('unit'), metric_match.group('aggregator')) 192 196 metric.append_group(map(lambda value: float(value), metric_match.group('values').split(', '))) 193 197 194 198 return True 195 199 196 def _ensure_metrics(self, metric_name, subtest_name='', unit=None ):200 def _ensure_metrics(self, metric_name, subtest_name='', unit=None, aggregator=None): 197 201 try: 198 202 subtest = next(subtest for subtest in self._metrics if subtest['name'] == subtest_name) … … 206 210 path = self.test_name_without_file_extension().split('/') 207 211 if subtest_name: 208 path += [subtest_name]209 metric = PerfTestMetric(path, self._test_name, metric_name, unit )212 path += subtest_name.split('/') 213 metric = PerfTestMetric(path, self._test_name, metric_name, unit, aggregator) 210 214 subtest['metrics'].append(metric) 211 215 return metric -
trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
r159805 r162183 212 212 """) 213 213 214 def test_parse_output_with_subtests_and_total(self): 215 output = DriverOutput(""" 216 :Time:Total -> [2324, 2328, 2345, 2314, 2312] ms 217 EmberJS-TodoMVC:Time:Total -> [1462, 1473, 1490, 1465, 1458] ms 218 EmberJS-TodoMVC/a:Time -> [1, 2, 3, 4, 5] ms 219 BackboneJS-TodoMVC:Time -> [862, 855, 855, 849, 854] ms 220 """, image=None, image_hash=None, audio=None) 221 output_capture = OutputCapture() 222 output_capture.capture_output() 223 try: 224 test = PerfTest(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test') 225 test.run_single = lambda driver, path, time_out_ms: output 226 self.assertTrue(test.run(10)) 227 finally: 228 actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() 229 230 subtests = test._metrics 231 self.assertEqual(map(lambda test: test['name'], subtests), [None, 'EmberJS-TodoMVC', 'EmberJS-TodoMVC/a', 'BackboneJS-TodoMVC']) 232 233 main_metrics = subtests[0]['metrics'] 234 self.assertEqual(map(lambda metric: metric.name(), main_metrics), ['Time']) 235 self.assertEqual(main_metrics[0].aggregator(), 'Total') 236 self.assertEqual(main_metrics[0].path(), ['some-dir', 'some-test']) 237 self.assertEqual(main_metrics[0].flattened_iteration_values(), [2324, 2328, 2345, 2314, 2312] * 4) 238 239 some_test_metrics = subtests[1]['metrics'] 240 self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time']) 241 self.assertEqual(some_test_metrics[0].aggregator(), 'Total') 242 self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'EmberJS-TodoMVC']) 243 self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1462, 1473, 1490, 1465, 1458] * 4) 244 245 some_test_metrics = subtests[2]['metrics'] 246 self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time']) 247 self.assertEqual(some_test_metrics[0].aggregator(), None) 248 self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'EmberJS-TodoMVC', 'a']) 249 self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1, 2, 3, 4, 5] * 4) 250 251 some_test_metrics = subtests[3]['metrics'] 252 self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time']) 253 self.assertEqual(some_test_metrics[0].aggregator(), None) 254 self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'BackboneJS-TodoMVC']) 255 self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [862, 855, 855, 849, 854] * 4) 256 257 self.assertEqual(actual_stdout, '') 258 self.assertEqual(actual_stderr, '') 259 self.assertEqual(actual_logs, """RESULT some-dir: some-test: Time= 2324.6 ms 260 median= 2324.0 ms, stdev= 12.1326007105 ms, min= 2312.0 ms, max= 2345.0 ms 261 """) 262 214 263 215 264 class TestSingleProcessPerfTest(unittest.TestCase): -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
r159805 r162183 270 270 current_test.setdefault('metrics', {}) 271 271 assert metric.name() not in current_test['metrics'] 272 current_test['metrics'][metric.name()] = {'current': metric.grouped_iteration_values()} 272 test_results = {'current': metric.grouped_iteration_values()} 273 if metric.aggregator(): 274 test_results['aggregators'] = [metric.aggregator()] 275 current_test['metrics'][metric.name()] = test_results 273 276 else: 274 277 current_test.setdefault('tests', {}) -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py
r159805 r162183 99 99 class TestWithSubtestsData: 100 100 text = """subtest:Time -> [1, 2, 3, 4, 5] ms 101 total-test:Time:Total -> [1, 2, 3, 4, 5] ms 102 total-test/subsubtest:Time -> [1, 2, 3, 4, 5] ms 101 103 :Time -> [1080, 1120, 1095, 1101, 1104] ms 102 104 """ … … 114 116 'subtest': { 115 117 'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html', 116 'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4}}}}} 118 'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4}}}, 119 'total-test': { 120 'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html', 121 'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4, "aggregators": ["Total"]}}, 122 'tests': { 123 'subsubtest': 124 {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html', 125 'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4}}}}}}} 117 126 118 127
Note: See TracChangeset
for help on using the changeset viewer.