Changeset 159805 in webkit
- Timestamp:
- Nov 26, 2013 9:33:17 PM (10 years ago)
- Location:
- trunk
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/LayoutTests/ChangeLog
r159797 r159805 1 2013-11-26 Ryosuke Niwa <rniwa@webkit.org> 2 3 Record subtest values in Dromaeo tests 4 https://bugs.webkit.org/show_bug.cgi?id=124498 5 6 Reviewed by Andreas Kling. 7 8 Rebaselined the test. 9 10 * fast/harness/perftests/runs-per-second-log-expected.txt: 11 1 12 2013-11-26 Nick Diego Yamane <nick.yamane@openbossa.org> 2 13 -
trunk/LayoutTests/fast/harness/perftests/runs-per-second-log-expected.txt
r159465 r159805 2 2 3 3 4 : Time-> [2, 4, 5, 8, 10] runs/s4 :Runs -> [2, 4, 5, 8, 10] runs/s 5 5 -
trunk/PerformanceTests/ChangeLog
r159803 r159805 1 2013-11-26 Ryosuke Niwa <rniwa@webkit.org> 2 3 Record subtest values in Dromaeo tests 4 https://bugs.webkit.org/show_bug.cgi?id=124498 5 6 Reviewed by Andreas Kling. 7 8 Made Dromaeo's test runner report values in DRT.progress via newly added PerfTestRunner.reportValues. 9 10 * Dromaeo/resources/dromaeorunner.js: 11 (.): Moved the definition out of DRT.setup. 12 (DRT.setup): Ditto. 13 (DRT.testObject): Extracted from DRT.setup. Set the subtest name and continueTesting. 14 continueTesting is set true for subtests; i.e. when name is specified. 15 (DRT.progress): Call PerfTestRunner.reportValues to report subtest results. 16 (DRT.teardown): Call PerfTestRunner.reportValues instead of measureValueAsync. 17 18 * resources/runner.js: Made various changes for newly added PerfTestRunner.reportValues. 19 (.): Moved the initialization of completedIterations, results, jsHeapResults, and mallocHeapResults into 20 start since they need to be initialized before running each subtest. Initialize logLines here since we 21 need to use the same logger for all subtests. 22 (.start): Initialize the variables mentioned above here. Also respect doNotLogStart used by reportValues. 23 (ignoreWarmUpAndLog): Added doNotLogProgress. Used by reportValues since it reports all values at once. 24 (finish): Compute the metric name such as FrameFrame and Runs from unit. Also don't log or notify done 25 when continueTesting is set on the test object. 26 (PerfTestRunner.reportValues): Added. Reports all values for the main/sub test. 27 1 28 2013-11-26 Ryosuke Niwa <rniwa@webkit.org> 2 29 -
trunk/PerformanceTests/Dromaeo/resources/dromaeorunner.js
r159465 r159805 1 1 (function(){ 2 var ITERATION_COUNT = 5; 2 3 var DRT = { 3 4 baseURL: "./resources/dromaeo/web/index.html", 4 5 5 6 setup: function(testName) { 6 var ITERATION_COUNT = 5;7 PerfTestRunner.prepareToMeasureValuesAsync({dromaeoIterationCount: ITERATION_COUNT, doNotMeasureMemoryUsage: true, doNotIgnoreInitialRun: true, unit: 'runs/s'});8 9 7 var iframe = document.createElement("iframe"); 10 8 var url = DRT.baseURL + "?" + testName + '&numTests=' + ITERATION_COUNT; … … 34 32 }, 35 33 34 testObject: function(name) { 35 return {dromaeoIterationCount: ITERATION_COUNT, doNotMeasureMemoryUsage: true, doNotIgnoreInitialRun: true, unit: 'runs/s', 36 name: name, continueTesting: !!name}; 37 }, 38 36 39 start: function() { 37 40 DRT.targetWindow.postMessage({ name: "dromaeo:start" } , "*"); … … 41 44 var score = message.status.score; 42 45 if (score) 43 DRT.log(score.name + ' -> [' + score.times.join(', ') + ']');46 PerfTestRunner.reportValues(this.testObject(score.name), score.times); 44 47 }, 45 48 … … 56 59 } 57 60 58 for (var i = 0; i < times.length; ++i) 59 PerfTestRunner.measureValueAsync(1 / times[i]); 61 PerfTestRunner.reportValues(this.testObject(), times.map(function (time) { return 1 / time; })); 60 62 }, 61 63 -
trunk/PerformanceTests/resources/runner.js
r159465 r159805 7 7 8 8 (function () { 9 var logLines = null;9 var logLines = window.testRunner ? [] : null; 10 10 var verboseLogging = false; 11 var completedIterations = -1;11 var completedIterations; 12 12 var callsPerIteration = 1; 13 13 var currentTest = null; 14 var results = [];15 var jsHeapResults = [];16 var mallocHeapResults = [];14 var results; 15 var jsHeapResults; 16 var mallocHeapResults; 17 17 var iterationCount = undefined; 18 18 … … 146 146 } 147 147 148 function start(test, runner ) {148 function start(test, runner, doNotLogStart) { 149 149 if (!test) { 150 150 logFatalError("Got a bad test object."); … … 155 155 // FIXME: Don't hard code the number of in-process iterations to use inside a test runner. 156 156 iterationCount = test.dromaeoIterationCount || (window.testRunner ? 5 : 20); 157 logLines = window.testRunner ? [] : null; 157 completedIterations = -1; 158 results = []; 159 jsHeapResults = []; 160 mallocHeapResults = []; 158 161 verboseLogging = !window.testRunner; 159 PerfTestRunner.logInfo("Running " + iterationCount + " times"); 162 if (!doNotLogStart) { 163 PerfTestRunner.logInfo(''); 164 PerfTestRunner.logInfo("Running " + iterationCount + " times"); 165 } 160 166 if (test.doNotIgnoreInitialRun) 161 167 completedIterations++; … … 193 199 } 194 200 195 function ignoreWarmUpAndLog(measuredValue ) {201 function ignoreWarmUpAndLog(measuredValue, doNotLogProgress) { 196 202 var labeledResult = measuredValue + " " + PerfTestRunner.unit; 197 if (completedIterations <= 0) 198 PerfTestRunner.logDetail(completedIterations, labeledResult + " (Ignored warm-up run)"); 199 else { 200 results.push(measuredValue); 201 if (window.internals && !currentTest.doNotMeasureMemoryUsage) { 202 jsHeapResults.push(getUsedJSHeap()); 203 mallocHeapResults.push(getUsedMallocHeap()); 204 } 203 if (completedIterations <= 0) { 204 if (!doNotLogProgress) 205 PerfTestRunner.logDetail(completedIterations, labeledResult + " (Ignored warm-up run)"); 206 return; 207 } 208 209 results.push(measuredValue); 210 if (window.internals && !currentTest.doNotMeasureMemoryUsage) { 211 jsHeapResults.push(getUsedJSHeap()); 212 mallocHeapResults.push(getUsedMallocHeap()); 213 } 214 if (!doNotLogProgress) 205 215 PerfTestRunner.logDetail(completedIterations, labeledResult); 206 }207 216 } 208 217 209 218 function finish() { 210 219 try { 220 var prefix = currentTest.name || ''; 211 221 if (currentTest.description) 212 222 PerfTestRunner.log("Description: " + currentTest.description); 213 PerfTestRunner.logStatistics(results, PerfTestRunner.unit, ":Time"); 223 metric = {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[PerfTestRunner.unit] 224 PerfTestRunner.logStatistics(results, PerfTestRunner.unit, prefix + ":" + metric); 214 225 if (jsHeapResults.length) { 215 PerfTestRunner.logStatistics(jsHeapResults, "bytes", ":JSHeap"); 216 PerfTestRunner.logStatistics(mallocHeapResults, "bytes", ":Malloc"); 217 } 218 if (logLines) 219 logLines.forEach(logInDocument); 226 PerfTestRunner.logStatistics(jsHeapResults, "bytes", prefix + ":JSHeap"); 227 PerfTestRunner.logStatistics(mallocHeapResults, "bytes", prefix + ":Malloc"); 228 } 220 229 if (currentTest.done) 221 230 currentTest.done(); 231 232 if (logLines && !currentTest.continueTesting) 233 logLines.forEach(logInDocument); 222 234 } catch (exception) { 223 235 logInDocument("Got an exception while finalizing the test with name=" + exception.name + ", message=" + exception.message); 224 236 } 225 237 226 if (window.testRunner) 227 testRunner.notifyDone(); 238 if (!currentTest.continueTesting) { 239 if (window.testRunner) 240 testRunner.notifyDone(); 241 return; 242 } 243 244 currentTest = null; 228 245 } 229 246 … … 249 266 250 267 return true; 268 } 269 270 PerfTestRunner.reportValues = function (test, values) { 271 PerfTestRunner.unit = test.unit; 272 start(test, null, true); 273 for (var i = 0; i < values.length; i++) { 274 completedIterations++; 275 ignoreWarmUpAndLog(values[i], true); 276 } 277 finish(); 251 278 } 252 279 -
trunk/Tools/ChangeLog
r159804 r159805 1 2013-11-26 Ryosuke Niwa <rniwa@webkit.org> 2 3 Record subtest values in Dromaeo tests 4 https://bugs.webkit.org/show_bug.cgi?id=124498 5 6 Reviewed by Andreas Kling. 7 8 Supported parsing subtest results. 9 10 * Scripts/webkitpy/performance_tests/perftest.py: Replaced _metrics with an ordered list of subtests, each of 11 which contains a dictionary with its name and an ordered list of subtest's metrics. 12 (PerfTest.__init__): Initialize _metrics as a list. 13 (PerfTest.run): Go through each subtest and its metrics to create a list of TestMetrics. 14 (PerfTest._run_with_driver): 15 (PerfTest._ensure_metrics): Look for a subtest then a metric in _metrics. 16 17 * Scripts/webkitpy/performance_tests/perftest_unittest.py: 18 (TestPerfTest._assert_results_are_correct): Updated the assertions per changes to _metrics. 19 (TestPerfTest.test_parse_output): Ditto. 20 (TestPerfTest.test_parse_output_with_subtests): Added the metric and the unit on each subtest result as well as 21 assertions to ensure subtest results are parsed properly. 22 (TestReplayPerfTest.test_run_with_driver_accumulates_results): Updated the assertions per changes to _metrics. 23 (TestReplayPerfTest.test_run_with_driver_accumulates_memory_results): Dittp. 24 25 * Scripts/webkitpy/performance_tests/perftestsrunner.py: 26 (_generate_results_dict): When the metric for a subtest is processed before that of the main test, the url is 27 incorrectly suffixed with '/'. Fix this later by re-computing the url with TestPerfMetric.test_file_name when 28 adding new results. 29 30 * Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py: 31 (TestWithSubtestsData): Added. 32 (TestDriver.run_test): 33 (MainTest.test_run_test_with_subtests): Added. 34 1 35 2013-11-26 Ryosuke Niwa <rniwa@webkit.org> 2 36 -
trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py
r159803 r159805 101 101 self._test_path = test_path 102 102 self._description = None 103 self._metrics = {} 104 self._ordered_metrics_name = [] 103 self._metrics = [] 105 104 self._test_runner_count = test_runner_count 106 105 … … 137 136 138 137 results = [] 139 for metric_name in self._ordered_metrics_name:140 metric = self._metrics[metric_name]141 results.append(metric)142 if should_log:143 legacy_chromium_bot_compatible_name = self.test_name_without_file_extension().replace('/', ': ')144 self.log_statistics(legacy_chromium_bot_compatible_name + ': ' + metric.name(),145 metric.flattened_iteration_values(), metric.unit())138 for subtest in self._metrics: 139 for metric in subtest['metrics']: 140 results.append(metric) 141 if should_log and not subtest['name']: 142 legacy_chromium_bot_compatible_name = self.test_name_without_file_extension().replace('/', ': ') 143 self.log_statistics(legacy_chromium_bot_compatible_name + ': ' + metric.name(), 144 metric.flattened_iteration_values(), metric.unit()) 146 145 147 146 return results … … 170 169 171 170 _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE) 172 _metrics_regex = re.compile(r'^ :(?P<metric>Time|Malloc|JSHeap) -> \[(?P<values>(\d+(\.\d+)?)(, \d+(\.\d+)?)+)\] (?P<unit>[a-z/]+)')171 _metrics_regex = re.compile(r'^(?P<subtest>[A-Za-z0-9\(\[].+)?:(?P<metric>[A-Z][A-Za-z]+) -> \[(?P<values>(\d+(\.\d+)?)(, \d+(\.\d+)?)+)\] (?P<unit>[a-z/]+)?$') 173 172 174 173 def _run_with_driver(self, driver, time_out_ms): … … 190 189 return False 191 190 192 metric = self._ensure_metrics(metric_match.group('metric'), metric_match.group(' unit'))191 metric = self._ensure_metrics(metric_match.group('metric'), metric_match.group('subtest'), metric_match.group('unit')) 193 192 metric.append_group(map(lambda value: float(value), metric_match.group('values').split(', '))) 194 193 195 194 return True 196 195 197 def _ensure_metrics(self, metric_name, unit=None): 198 if metric_name not in self._metrics: 199 self._metrics[metric_name] = PerfTestMetric(self.test_name_without_file_extension().split('/'), self._test_name, metric_name, unit) 200 self._ordered_metrics_name.append(metric_name) 201 return self._metrics[metric_name] 196 def _ensure_metrics(self, metric_name, subtest_name='', unit=None): 197 try: 198 subtest = next(subtest for subtest in self._metrics if subtest['name'] == subtest_name) 199 except StopIteration: 200 subtest = {'name': subtest_name, 'metrics': []} 201 self._metrics.append(subtest) 202 203 try: 204 return next(metric for metric in subtest['metrics'] if metric.name() == metric_name) 205 except StopIteration: 206 path = self.test_name_without_file_extension().split('/') 207 if subtest_name: 208 path += [subtest_name] 209 metric = PerfTestMetric(path, self._test_name, metric_name, unit) 210 subtest['metrics'].append(metric) 211 return metric 202 212 203 213 def run_single(self, driver, test_path, time_out_ms, should_run_pixel_test=False): … … 237 247 re.compile(r"CONSOLE MESSAGE: (line \d+: )?Blocked script execution in '[A-Za-z0-9\-\.:]+' because the document's frame is sandboxed and the 'allow-scripts' permission is not set."), 238 248 re.compile(r"CONSOLE MESSAGE: (line \d+: )?Not allowed to load local resource"), 239 # Dromaeo reports values for subtests. Ignore them for now.240 # FIXME: Remove once subtests are supported241 re.compile(r'^[A-Za-z0-9\(\[].+( -> )(\[?[0-9\., ]+\])( [a-z/]+)?$'),242 249 ] 243 250 -
trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
r159803 r159805 92 92 def _assert_results_are_correct(self, test, output): 93 93 test.run_single = lambda driver, path, time_out_ms: output 94 self.assertTrue(test._run_with_driver(None, None)) 95 self.assertEqual(test._metrics.keys(), ['Time']) 96 self.assertEqual(test._metrics['Time'].flattened_iteration_values(), [1080, 1120, 1095, 1101, 1104]) 94 self.assertTrue(test.run(10)) 95 subtests = test._metrics 96 self.assertEqual(map(lambda test: test['name'], subtests), [None]) 97 metrics = subtests[0]['metrics'] 98 self.assertEqual(map(lambda metric: metric.name(), metrics), ['Time']) 99 self.assertEqual(metrics[0].flattened_iteration_values(), [1080, 1120, 1095, 1101, 1104] * 4) 97 100 98 101 def test_parse_output(self): … … 109 112 self.assertEqual(actual_stdout, '') 110 113 self.assertEqual(actual_stderr, '') 111 self.assertEqual(actual_logs, '') 114 self.assertEqual(actual_logs, """RESULT some-test: Time= 1100.0 ms 115 median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms 116 """) 112 117 113 118 def _assert_failed_on_line(self, output_text, expected_log): … … 156 161 output = DriverOutput(""" 157 162 Description: this is a test description. 158 some test -> [1, 2, 3, 4, 5] 159 some other test = else -> [6, 7, 8, 9, 10] 160 Array Construction, [] -> [11, 12, 13, 14, 15] 161 Concat String -> [15163, 15304, 15386, 15608, 15622] 162 jQuery - addClass -> [2785, 2815, 2826, 2841, 2861] 163 Dojo - div:only-child -> [7825, 7910, 7950, 7958, 7970] 164 Dojo - div:nth-child(2n+1) -> [3620, 3623, 3633, 3641, 3658] 165 Dojo - div > div -> [10158, 10172, 10180, 10183, 10231] 166 Dojo - div ~ div -> [6673, 6675, 6714, 6848, 6902] 163 some test:Time -> [1, 2, 3, 4, 5] ms 164 some other test = else:Time -> [6, 7, 8, 9, 10] ms 165 some other test = else:Malloc -> [11, 12, 13, 14, 15] bytes 166 Array Construction, []:Time -> [11, 12, 13, 14, 15] ms 167 Concat String:Time -> [15163, 15304, 15386, 15608, 15622] ms 168 jQuery - addClass:Time -> [2785, 2815, 2826, 2841, 2861] ms 169 Dojo - div:only-child:Time -> [7825, 7910, 7950, 7958, 7970] ms 170 Dojo - div:nth-child(2n+1):Time -> [3620, 3623, 3633, 3641, 3658] ms 171 Dojo - div > div:Time -> [10158, 10172, 10180, 10183, 10231] ms 172 Dojo - div ~ div:Time -> [6673, 6675, 6714, 6848, 6902] ms 167 173 168 174 :Time -> [1080, 1120, 1095, 1101, 1104] ms … … 171 177 output_capture.capture_output() 172 178 try: 173 test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') 174 self._assert_results_are_correct(test, output) 179 test = PerfTest(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test') 180 test.run_single = lambda driver, path, time_out_ms: output 181 self.assertTrue(test.run(10)) 175 182 finally: 176 183 actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() 184 185 subtests = test._metrics 186 self.assertEqual(map(lambda test: test['name'], subtests), ['some test', 'some other test = else', 187 'Array Construction, []', 'Concat String', 'jQuery - addClass', 'Dojo - div:only-child', 188 'Dojo - div:nth-child(2n+1)', 'Dojo - div > div', 'Dojo - div ~ div', None]) 189 190 some_test_metrics = subtests[0]['metrics'] 191 self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time']) 192 self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'some test']) 193 self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1, 2, 3, 4, 5] * 4) 194 195 some_other_test_metrics = subtests[1]['metrics'] 196 self.assertEqual(map(lambda metric: metric.name(), some_other_test_metrics), ['Time', 'Malloc']) 197 self.assertEqual(some_other_test_metrics[0].path(), ['some-dir', 'some-test', 'some other test = else']) 198 self.assertEqual(some_other_test_metrics[0].flattened_iteration_values(), [6, 7, 8, 9, 10] * 4) 199 self.assertEqual(some_other_test_metrics[1].path(), ['some-dir', 'some-test', 'some other test = else']) 200 self.assertEqual(some_other_test_metrics[1].flattened_iteration_values(), [11, 12, 13, 14, 15] * 4) 201 202 main_metrics = subtests[len(subtests) - 1]['metrics'] 203 self.assertEqual(map(lambda metric: metric.name(), main_metrics), ['Time']) 204 self.assertEqual(main_metrics[0].path(), ['some-dir', 'some-test']) 205 self.assertEqual(main_metrics[0].flattened_iteration_values(), [1080, 1120, 1095, 1101, 1104] * 4) 206 177 207 self.assertEqual(actual_stdout, '') 178 208 self.assertEqual(actual_stderr, '') 179 self.assertEqual(actual_logs, '') 209 self.assertEqual(actual_logs, """DESCRIPTION: this is a test description. 210 RESULT some-dir: some-test: Time= 1100.0 ms 211 median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms 212 """) 180 213 181 214 -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
r159803 r159805 263 263 for i in range(0, len(path)): 264 264 is_last_token = i + 1 == len(path) 265 url = view_source_url('PerformanceTests/' + (metric.test_file_name() if is_last_token else '/'.join(path[0:i + 1])))265 url = view_source_url('PerformanceTests/' + '/'.join(path[0:i + 1])) 266 266 tests.setdefault(path[i], {'url': url}) 267 267 current_test = tests[path[i]] 268 268 if is_last_token: 269 current_test['url'] = view_source_url('PerformanceTests/' + metric.test_file_name()) 269 270 current_test.setdefault('metrics', {}) 270 271 assert metric.name() not in current_test['metrics'] -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py
r159465 r159805 97 97 98 98 99 class TestWithSubtestsData: 100 text = """subtest:Time -> [1, 2, 3, 4, 5] ms 101 :Time -> [1080, 1120, 1095, 1101, 1104] ms 102 """ 103 104 output = """Running 1 tests 105 Running Parser/test-with-subtests.html (1 of 1) 106 RESULT Parser: test-with-subtests: Time= 1100.0 ms 107 median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms 108 Finished: 0.1 s 109 """ 110 111 results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html', 112 'metrics': {'Time': {'current': [[1080.0, 1120.0, 1095.0, 1101.0, 1104.0]] * 4}}, 113 'tests': { 114 'subtest': { 115 'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html', 116 'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4}}}}} 117 118 99 119 class TestDriver: 100 120 def run_test(self, driver_input, stop_when_done): … … 118 138 elif driver_input.test_name.endswith('memory-test.html'): 119 139 text = MemoryTestData.text 140 elif driver_input.test_name.endswith('test-with-subtests.html'): 141 text = TestWithSubtestsData.text 120 142 return DriverOutput(text, '', '', '', crash=crash, timeout=timeout) 121 143 … … 223 245 self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], MemoryTestData.js_heap_results) 224 246 self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], MemoryTestData.malloc_results) 247 248 def test_run_test_with_subtests(self): 249 runner, port = self.create_runner_and_setup_results_template() 250 runner._timestamp = 123456789 251 port.host.filesystem.write_text_file(runner._base_path + '/Parser/test-with-subtests.html', 'some content') 252 253 output = OutputCapture() 254 output.capture_output() 255 try: 256 unexpected_result_count = runner.run() 257 finally: 258 stdout, stderr, log = output.restore_output() 259 260 self.assertEqual(unexpected_result_count, 0) 261 self.assertEqual(self._normalize_output(log), TestWithSubtestsData.output + '\nMOCK: user.open_url: file://...\n') 262 parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tests'] 263 self.maxDiff = None 264 self.assertEqual(parser_tests['test-with-subtests'], TestWithSubtestsData.results) 225 265 226 266 def _test_run_with_json_output(self, runner, filesystem, upload_succeeds=False, results_shown=True, expected_exit_code=0, repeat=1, compare_logs=True):
Note: See TracChangeset
for help on using the changeset viewer.