Changeset 144141 in webkit
- Timestamp:
- Feb 26, 2013 9:02:14 PM (11 years ago)
- Location:
- trunk
- Files:
-
- 1 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/PerformanceTests/ChangeLog
r142460 r144141 1 2013-02-25 Ryosuke Niwa <rniwa@webkit.org> 2 3 Use perf.webkit.org JSON format in results page 4 https://bugs.webkit.org/show_bug.cgi?id=110842 5 6 Reviewed by Benjamin Poulain. 7 8 Updated the results page template to use the new JSON format. 9 10 Since new JSON format doesn't contain statistics such as stdev and min, added statistics.js to compute 11 these values. Also use 95% percentile confidence interval instead of standard deviation in various places. 12 13 * resources/results-template.html: Added statistics.js as dependency. 14 (TestResult): Updated to take a metric instead of its test. Replaced stdev() with confidenceIntervalDelta() 15 now that we have a fancy Statistics class. 16 17 (TestRun.webkitRevision): 18 (PerfTestMetric): Renamed from PerfTest since this object now encapsulates each measurement (such as time, 19 JS heap, and malloc) in test. Also added a conversion table from a metric name to a unit since new format 20 doesn't contain units. 21 (PerfTestMetric.name): Updated to compute the full metric name from test name and metric name, matching 22 the old behavior. 23 (PerfTestMetric.isMemoryTest): Explicitly look for 'JSHeap' and 'Malloc' tests. 24 (PerfTestMetric.smallerIsBetter): 25 26 (attachPlot): Deleted the code to deal with tests that don't provide individual iteration measurement 27 since such tests no longer exist. Also fixed up the code compute y-axis range. 28 29 (createTableRow.markupForRun): Updated to use confidenceIntervalDelta() instead of stdev(). 30 31 (init.addTests): Added. Recursively add metrics. 32 33 * resources/statistics.js: Added. Imported from perf.webkit.org. 34 (Statistics.max): 35 (Statistics.min): 36 (Statistics.sum): 37 (Statistics.squareSum): 38 (Statistics.sampleStandardDeviation): 39 (Statistics.supportedConfidenceLevels): 40 (Statistics.confidenceIntervalDelta): 41 (Statistics.confidenceInterval): 42 1 43 2013-02-11 Alexei Filippov <alph@chromium.org> 2 44 -
trunk/PerformanceTests/resources/results-template.html
r140070 r144141 138 138 139 139 (function () { 140 var jQuery = ['PerformanceTests/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js']; 141 var plugins = ['PerformanceTests/resources/jquery.flot.min.js', 'PerformanceTests/resources/jquery.tablesorter.min.js']; 140 var jQuery = 'PerformanceTests/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js'; 141 var plugins = ['PerformanceTests/resources/jquery.flot.min.js', 'PerformanceTests/resources/jquery.tablesorter.min.js', 142 'PerformanceTests/resources/statistics.js']; 142 143 var localPath = '%AbsolutePathToWebKitTrunk%'; 143 144 var remotePath = 'https://svn.webkit.org/repository/webkit/trunk'; … … 184 185 })(); 185 186 186 function TestResult(associatedTest, result, associatedRun) { 187 this.unit = function () { return result.unit; } 188 this.test = function () { return associatedTest; } 189 this.values = function () { return result.values ? result.values.map(function (value) { return associatedTest.scalingFactor() * value; }) : undefined; } 190 this.unscaledMean = function () { return result.avg; } 191 this.mean = function () { return associatedTest.scalingFactor() * result.avg; } 192 this.min = function () { return associatedTest.scalingFactor() * result.min; } 193 this.max = function () { return associatedTest.scalingFactor() * result.max; } 194 this.stdev = function () { return associatedTest.scalingFactor() * result.stdev; } 195 this.stdevRatio = function () { return result.stdev / result.avg; } 196 this.percentDifference = function(other) { return (other.mean() - this.mean()) / this.mean(); } 187 function TestResult(metric, values, associatedRun) { 188 this.test = function () { return metric; } 189 this.values = function () { return values.map(function (value) { return metric.scalingFactor() * value; }); } 190 this.unscaledMean = function () { return Statistics.sum(values) / values.length; } 191 this.mean = function () { return metric.scalingFactor() * this.unscaledMean(); } 192 this.min = function () { return metric.scalingFactor() * Statistics.min(values); } 193 this.max = function () { return metric.scalingFactor() * Statistics.max(values); } 194 this.confidenceIntervalDelta = function () { 195 return metric.scalingFactor() * Statistics.confidenceIntervalDelta(0.95, values.length, 196 Statistics.sum(values), Statistics.squareSum(values)); 197 } 198 this.confidenceIntervalDeltaRatio = function () { return this.confidenceIntervalDelta() / this.mean(); } 199 this.percentDifference = function(other) { return (other.unscaledMean() - this.unscaledMean()) / this.unscaledMean(); } 197 200 this.isStatisticallySignificant = function (other) { 198 201 var diff = Math.abs(other.mean() - this.mean()); 199 return diff > this. stdev() && diff > other.stdev();202 return diff > this.confidenceIntervalDelta() && diff > other.confidenceIntervalDelta(); 200 203 } 201 204 this.run = function () { return associatedRun; } … … 204 207 function TestRun(entry) { 205 208 this.description = function () { return entry['description']; } 206 this.webkitRevision = function () { return entry[' webkit-revision']; }209 this.webkitRevision = function () { return entry['revisions']['WebKit']['revision']; } 207 210 this.label = function () { 208 211 var label = 'r' + this.webkitRevision(); … … 213 216 } 214 217 215 function PerfTest (name) {218 function PerfTestMetric(name, metric) { 216 219 var testResults = []; 217 220 var cachedUnit = null; 218 221 var cachedScalingFactor = null; 222 var unit = {'FrameRate': 'fps', 'Runs': 'runs/s', 'Time': 'ms', 'Malloc': 'bytes', 'JSHeap': 'bytes'}[metric]; 219 223 220 224 // We can't do this in TestResult because all results for each test need to share the same unit and the same scaling factor. … … 225 229 return; 226 230 227 var unit = testResults[0].unit(); // FIXME: We should verify that all results have the same unit.228 231 var mean = testResults[0].unscaledMean(); // FIXME: We should look at all values. 229 232 var kilo = unit == 'bytes' ? 1024 : 1000; … … 240 243 } 241 244 242 this.name = function () { return name ; }243 this.isMemoryTest = function () { return name.indexOf(':') >= 0; }245 this.name = function () { return name + ':' + metric; } 246 this.isMemoryTest = function () { return metric == 'JSHeap' || metric == 'Malloc'; } 244 247 this.addResult = function (newResult) { 245 248 testResults.push(newResult); … … 256 259 return cachedUnit; 257 260 } 258 this.smallerIsBetter = function () { return testResults[0].unit() == 'ms' || testResults[0].unit()== 'bytes'; }261 this.smallerIsBetter = function () { return unit == 'ms' || unit == 'bytes'; } 259 262 } 260 263 … … 354 357 }, []); 355 358 356 var plotData = []; 357 if (values.length) 358 plotData = [$.extend(true, {}, subpointsPlotOptions, {data: values})]; 359 else { 360 function makeSubpoints(id, callback) { return $.extend(true, {}, subpointsPlotOptions, {id: id, data: results.map(callback)}); } 361 plotData = [makeSubpoints('min', function (result, index) { return [index, result.min()]; }), 362 makeSubpoints('max', function (result, index) { return [index, result.max()]; }), 363 makeSubpoints('-σ', function (result, index) { return [index, result.mean() - result.stdev()]; }), 364 makeSubpoints('+σ', function (result, index) { return [index, result.mean() + result.stdev()]; })]; 365 } 366 359 var plotData = [$.extend(true, {}, subpointsPlotOptions, {data: values})]; 367 360 plotData.push({id: 'μ', data: results.map(function (result, index) { return [index, result.mean()]; }), color: plotColor}); 368 361 362 var overallMax = Statistics.max(results.map(function (result, index) { return result.max(); })); 363 var overallMin = Statistics.min(results.map(function (result, index) { return result.min(); })); 364 var margin = (overallMax - overallMin) * 0.1; 369 365 var currentPlotOptions = $.extend(true, {}, mainPlotOptions, {yaxis: { 370 min: minIsZero ? 0 : Math.min.apply(Math, results.map(function (result, index) { return result.min(); })) * 0.98,371 max: Math.max.apply(Math, results.map(function (result, index) { return result.max(); })) * (minIsZero ? 1.1 : 1.01)}});366 min: minIsZero ? 0 : overallMin - margin, 367 max: minIsZero ? overallMax * 1.1 : overallMax + margin}}); 372 368 373 369 currentPlotOptions.xaxis.max = results.length - 0.5; … … 476 472 } 477 473 478 var statistics = 'σ=' + toFixedWidthPrecision(result. stdev()) + ', min=' + toFixedWidthPrecision(result.min())474 var statistics = 'σ=' + toFixedWidthPrecision(result.confidenceIntervalDelta()) + ', min=' + toFixedWidthPrecision(result.min()) 479 475 + ', max=' + toFixedWidthPrecision(result.max()) + '\n' + regressionAnalysis; 480 476 481 477 // Tablesorter doesn't know about the second cell so put the comparison in the invisible element. 482 478 return '<td class="result" title="' + statistics + '">' + toFixedWidthPrecision(result.mean()) + hiddenValue 483 + '</td><td class=" stdev" title="' + statistics + '">± '484 + formatPercentage(result. stdevRatio()) + warning + '</td>' + comparisonCell;479 + '</td><td class="confidenceIntervalDelta" title="' + statistics + '">± ' 480 + formatPercentage(result.confidenceIntervalDeltaRatio()) + warning + '</td>' + comparisonCell; 485 481 } 486 482 … … 548 544 549 545 var runs = []; 550 var tests = {};546 var metrics = {}; 551 547 $.each(JSON.parse(document.getElementById('json').textContent), function (index, entry) { 552 548 var run = new TestRun(entry); 553 549 runs.push(run); 554 $.each(entry.results, function (test, result) { 555 if (!tests[test]) 556 tests[test] = new PerfTest(test); 557 tests[test].addResult(new TestResult(tests[test], result, run)); 558 }); 550 551 function addTests(tests, parentFullName) { 552 for (var testName in tests) { 553 var fullTestName = parentFullName + '/' + testName; 554 var rawMetrics = tests[testName].metrics; 555 556 for (var metricName in rawMetrics) { 557 var fullMetricName = fullTestName + ':' + metricName; 558 var metric = metrics[fullMetricName]; 559 if (!metric) { 560 metric = new PerfTestMetric(fullTestName, metricName); 561 metrics[fullMetricName] = metric; 562 } 563 metric.addResult(new TestResult(metric, rawMetrics[metricName].current, run)); 564 } 565 566 if (tests[testName].tests) 567 addTests(tests[testName].tests, fullTestName); 568 } 569 } 570 571 addTests(entry.tests, ''); 559 572 }); 560 573 561 574 var shouldIgnoreMemory= true; 562 575 var referenceIndex = 0; 563 createTable(tests, runs, shouldIgnoreMemory, referenceIndex); 576 577 createTable(metrics, runs, shouldIgnoreMemory, referenceIndex); 564 578 565 579 $('#time-memory').bind('change', function (event, checkedElement) { 566 580 shouldIgnoreMemory = checkedElement.textContent == 'Time'; 567 createTable( tests, runs, shouldIgnoreMemory, referenceIndex);581 createTable(metrics, runs, shouldIgnoreMemory, referenceIndex); 568 582 }); 569 583 … … 574 588 $('#reference').bind('change', function (event, checkedElement) { 575 589 referenceIndex = parseInt(checkedElement.getAttribute('value')); 576 createTable( tests, runs, shouldIgnoreMemory, referenceIndex);590 createTable(metrics, runs, shouldIgnoreMemory, referenceIndex); 577 591 }); 578 592 -
trunk/Tools/ChangeLog
r144136 r144141 1 2013-02-25 Ryosuke Niwa <rniwa@webkit.org> 2 3 Use perf.webkit.org JSON format in results page 4 https://bugs.webkit.org/show_bug.cgi?id=110842 5 6 Reviewed by Benjamin Poulain. 7 8 Change the default JSON format from that of webkit-perf.appspot.com to that of perf.webkit.org. 9 10 A whole bunch of integration tests have been updated to use the new JSON format. 11 12 * Scripts/webkitpy/performance_tests/perftestsrunner.py: 13 (PerfTestsRunner._generate_and_show_results): Renamed output and output_path to legacy_output 14 and legacy_output_json_path respectively. 15 (PerfTestsRunner._generate_results_dict): Don't assume meta build information is always available. 16 (PerfTestsRunner._generate_output_files): Make json_output, which is used to generate the default 17 JSON file and the results page out of perf_webkit_output instead of legacy_output. 18 19 * Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py: 20 (MainTest.test_run_memory_test): 21 (MainTest._test_run_with_json_output.mock_upload_json): 22 (MainTest): 23 (MainTest.test_run_with_json_output): 24 (MainTest.test_run_with_description): 25 (MainTest.test_run_generates_json_by_default): 26 (MainTest.test_run_merges_output_by_default): 27 (MainTest.test_run_respects_reset_results): 28 (MainTest.test_run_generates_and_show_results_page): 29 (MainTest.test_run_with_slave_config_json): 30 (MainTest.test_run_with_multiple_repositories): 31 (MainTest.test_run_with_upload_json): 32 (MainTest.test_run_with_upload_json_should_generate_perf_webkit_json): 33 1 34 2013-02-26 Adam Barth <abarth@webkit.org> 2 35 -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
r143903 r144141 208 208 def _generate_and_show_results(self): 209 209 options = self._options 210 output_json_path = self._output_json_path()211 output, perf_webkit_output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)210 perf_webkit_json_path = self._output_json_path() 211 legacy_output, perf_webkit_output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number) 212 212 213 213 if options.slave_config_json_path: 214 output, perf_webkit_output = self._merge_slave_config_json(options.slave_config_json_path,output, perf_webkit_output)215 if not output:214 legacy_output, perf_webkit_output = self._merge_slave_config_json(options.slave_config_json_path, legacy_output, perf_webkit_output) 215 if not legacy_output: 216 216 return self.EXIT_CODE_BAD_SOURCE_JSON 217 217 218 output = self._merge_outputs_if_needed(output_json_path,output)219 if not output:218 perf_webkit_output = self._merge_outputs_if_needed(perf_webkit_json_path, perf_webkit_output) 219 if not perf_webkit_output: 220 220 return self.EXIT_CODE_BAD_MERGE 221 perf_webkit_output = [perf_webkit_output]222 223 results_page_path = self._host.filesystem.splitext( output_json_path)[0] + '.html'224 perf_webkit_json_path = self._host.filesystem.splitext(output_json_path)[0] + '-perf-webkit.json' if options.test_results_server else None225 self._generate_output_files( output_json_path, perf_webkit_json_path, results_page_path,output, perf_webkit_output)221 legacy_output = [legacy_output] 222 223 results_page_path = self._host.filesystem.splitext(perf_webkit_json_path)[0] + '.html' 224 legacy_output_json_path = self._host.filesystem.splitext(perf_webkit_json_path)[0] + '-legacy.json' if options.test_results_server else None 225 self._generate_output_files(legacy_output_json_path, perf_webkit_json_path, results_page_path, legacy_output, perf_webkit_output) 226 226 227 227 if options.test_results_server: 228 if not self._upload_json(options.test_results_server, output_json_path):228 if not self._upload_json(options.test_results_server, legacy_output_json_path): 229 229 return self.EXIT_CODE_FAILED_UPLOADING 230 230 … … 254 254 contents[key] = value 255 255 256 contents_for_perf_webkit = { 257 'builderName': builder_name, 258 'buildNumber': str(build_number), 256 contents_for_perf_webkit = {'tests': {}} 257 if description: 258 contents_for_perf_webkit['description'] = description 259 260 meta_info = { 259 261 'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp), 260 262 'platform': platform, 261 263 'revisions': revisions_for_perf_webkit, 262 'tests': {}} 264 'builderName': builder_name, 265 'buildNumber': int(build_number) if build_number else None} 266 267 for key, value in meta_info.items(): 268 if value: 269 contents_for_perf_webkit[key] = value 263 270 264 271 # FIXME: Make this function shorter once we've transitioned to use perf.webkit.org. … … 323 330 return None 324 331 325 def _generate_output_files(self, output_json_path, perf_webkit_json_path, results_page_path, output, perf_webkit_output):332 def _generate_output_files(self, output_json_path, perf_webkit_json_path, results_page_path, legacy_output, perf_webkit_output): 326 333 filesystem = self._host.filesystem 327 334 328 json_output = json.dumps(output) 329 filesystem.write_text_file(output_json_path, json_output) 330 335 if output_json_path: 336 filesystem.write_text_file(output_json_path, json.dumps(legacy_output)) 337 338 json_output = json.dumps(perf_webkit_output) 331 339 if perf_webkit_json_path: 332 340 filesystem.write_text_file(perf_webkit_json_path, json.dumps(perf_webkit_output)) -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py
r143833 r144141 93 93 """ 94 94 95 results = { "max": 1510, "avg": 1490, "median": 1488, "min": 1471, "stdev": 15.13935, "unit": "ms",96 "values": [1486, 1471, 1510, 1505, 1478, 1490]}95 results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html', 96 'metrics': {'Time': {'current': [1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]}}} 97 97 98 98 … … 158 158 """ 159 159 160 results = {'values': [1080, 1120, 1095, 1101, 1104], 'avg': 1100, 'min': 1080, 'max': 1120, 161 'stdev': 14.50861, 'median': 1101, 'unit': 'ms'} 162 js_heap_results = {'values': [825000, 811000, 848000, 837000, 829000], 'avg': 830000, 'min': 811000, 'max': 848000, 163 'stdev': 13784.04875, 'median': 829000, 'unit': 'bytes'} 164 malloc_results = {'values': [529000, 511000, 548000, 536000, 521000], 'avg': 529000, 'min': 511000, 'max': 548000, 165 'stdev': 14124.44689, 'median': 529000, 'unit': 'bytes'} 160 results = {'current': [1080, 1120, 1095, 1101, 1104]} 161 js_heap_results = {'current': [825000, 811000, 848000, 837000, 829000]} 162 malloc_results = {'current': [529000, 511000, 548000, 536000, 521000]} 166 163 167 164 … … 303 300 self.assertEqual(unexpected_result_count, 0) 304 301 self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\nMOCK: user.open_url: file://...\n') 305 results = self._load_output_json(runner)[0]['results'] 306 values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] 307 308 # Stdev for test doesn't match on some bots 309 self.assertEqual(sorted(results['Parser/memory-test'].keys()), sorted(MemoryTestData.results.keys())) 310 for key in MemoryTestData.results: 311 if key == 'stdev': 312 self.assertAlmostEqual(results['Parser/memory-test'][key], MemoryTestData.results[key], places=4) 313 else: 314 self.assertEqual(results['Parser/memory-test'][key], MemoryTestData.results[key]) 315 self.assertEqual(results['Parser/memory-test'], MemoryTestData.results) 316 self.assertEqual(results['Parser/memory-test:JSHeap'], MemoryTestData.js_heap_results) 317 self.assertEqual(results['Parser/memory-test:Malloc'], MemoryTestData.malloc_results) 302 parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tests'] 303 self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryTestData.results) 304 self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], MemoryTestData.js_heap_results) 305 self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], MemoryTestData.malloc_results) 318 306 319 307 def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=False, results_shown=True, expected_exit_code=0): … … 326 314 # FIXME: Get rid of the hard-coded perf.webkit.org once we've completed the transition. 327 315 self.assertIn(hostname, ['some.host', 'perf.webkit.org']) 328 self.assertIn(json_path, ['/mock-checkout/output.json', '/mock-checkout/output- perf-webkit.json'])316 self.assertIn(json_path, ['/mock-checkout/output.json', '/mock-checkout/output-legacy.json']) 329 317 self.assertIn(host_path, [None, '/api/report']) 330 318 uploaded[0] = upload_suceeds … … 352 340 353 341 _event_target_wrapper_and_inspector_results = { 354 "Bindings/event-target-wrapper": EventTargetWrapperTestData.results, 355 "inspector/pass.html:group_name:test_name": 42} 342 "Bindings": 343 {"url": "http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings", 344 "tests": {"event-target-wrapper": EventTargetWrapperTestData.results}}} 356 345 357 346 def test_run_with_json_output(self): … … 360 349 self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True) 361 350 self.assertEqual(self._load_output_json(runner), [{ 362 " timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,363 " webkit-revision": "5678", "branch": "webkit-trunk"}])351 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results, 352 "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}]) 364 353 365 354 filesystem = port.host.filesystem … … 372 361 self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True) 373 362 self.assertEqual(self._load_output_json(runner), [{ 374 " timestamp": 123456789, "description": "some description",375 " results": self._event_target_wrapper_and_inspector_results,376 " webkit-revision": "5678", "branch": "webkit-trunk"}])363 "buildTime": "2013-02-08T15:19:37.460000", "description": "some description", 364 "tests": self._event_target_wrapper_and_inspector_results, 365 "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}]) 377 366 378 367 def create_runner_and_setup_results_template(self, args=[]): … … 403 392 404 393 self.assertEqual(self._load_output_json(runner), [{ 405 " timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,406 " webkit-revision": "5678", "branch": "webkit-trunk"}])394 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results, 395 "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}]) 407 396 408 397 self.assertTrue(filesystem.isfile(output_json_path)) … … 419 408 420 409 self.assertEqual(self._load_output_json(runner), [{"previous": "results"}, { 421 " timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,422 " webkit-revision": "5678", "branch": "webkit-trunk"}])410 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results, 411 "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}]) 423 412 self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html')) 424 413 … … 433 422 434 423 self.assertEqual(self._load_output_json(runner), [{ 435 " timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,436 " webkit-revision": "5678", "branch": "webkit-trunk"}])424 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results, 425 "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}]) 437 426 self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html')) 438 427 pass … … 445 434 self._test_run_with_json_output(runner, filesystem, results_shown=False) 446 435 447 expected_entry = {" timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,448 " webkit-revision": "5678", "branch": "webkit-trunk"}436 expected_entry = {"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results, 437 "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}} 449 438 450 439 self.maxDiff = None … … 492 481 self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True) 493 482 self.assertEqual(self._load_output_json(runner), [{ 494 " timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,495 " webkit-revision": "5678", "branch": "webkit-trunk", "key": "value"}])483 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results, 484 "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}, "builderKey": "value"}]) 496 485 497 486 def test_run_with_bad_slave_config_json(self): … … 511 500 self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True) 512 501 self.assertEqual(self._load_output_json(runner), [{ 513 "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results, 514 "webkit-revision": "5678", "some-revision": "5678", "branch": "webkit-trunk"}]) 502 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results, 503 "revisions": {"webkit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}, 504 "some": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}]) 515 505 516 506 def test_run_with_upload_json(self): … … 521 511 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json']) 522 512 self.assertEqual(generated_json[0]['platform'], 'platform1') 523 self.assertEqual(generated_json[0]['builder -name'], 'builder1')524 self.assertEqual(generated_json[0]['build -number'], 123)513 self.assertEqual(generated_json[0]['builderName'], 'builder1') 514 self.assertEqual(generated_json[0]['buildNumber'], 123) 525 515 526 516 self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING) … … 533 523 534 524 self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True) 535 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output -perf-webkit.json'])525 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json']) 536 526 self.assertTrue(isinstance(generated_json, list)) 537 527 self.assertEqual(len(generated_json), 1) … … 540 530 self.maxDiff = None 541 531 self.assertEqual(output['platform'], 'platform1') 542 self.assertEqual(output['buildNumber'], '123')532 self.assertEqual(output['buildNumber'], 123) 543 533 self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000') 544 534 self.assertEqual(output['builderName'], 'builder1')
Note: See TracChangeset
for help on using the changeset viewer.