Changeset 240319 in webkit
- Timestamp:
- Jan 22, 2019 8:28:57 PM (5 years ago)
- Location:
- trunk/Websites/perf.webkit.org
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Websites/perf.webkit.org/ChangeLog
r240200 r240319 1 2019-01-17 Dewei Zhu <dewei_zhu@apple.com> 2 3 Analyzing a chart that does not exist should not halt whole run-analysis script. 4 https://bugs.webkit.org/show_bug.cgi?id=193563 5 6 Reviewed by Ryosuke Niwa. 7 8 Halting whole run-analysis script while there is any invalid chart specified in Manifest makes the script fragile. 9 Run-analysis is also responsible for adding retry and sending notification which should not be block by this error. 10 Skipping analyzing the corresponding configuration seems reasonable. 11 12 * public/v3/models/measurement-set.js: 13 (MeasurementSet.prototype._ensureClusterPromise): Only add callback when callback is specified. 14 This will help to fix 'UnhandledPromiseRejectionWarning' while running the test. 15 * tools/js/measurement-set-analyzer.js: 16 (MeasurementSetAnalyzer.prototype.async._analyzeMeasurementSet): Catch the exception while failing to fetch a measurement set and skip the analysis for this config. 17 * unit-tests/measurement-set-analyzer-tests.js: Added unit tests for this. 18 1 19 2019-01-17 Dewei Zhu <dewei_zhu@apple.com> 2 20 -
trunk/Websites/perf.webkit.org/public/v3/models/measurement-set.js
r233564 r240319 63 63 this._primaryClusterPromise = this._fetchPrimaryCluster(noCache); 64 64 var self = this; 65 this._primaryClusterPromise.catch(callback); 65 if (callback) 66 this._primaryClusterPromise.catch(callback); 66 67 return this._primaryClusterPromise.then(function () { 67 68 self._allFetches[self._primaryClusterEndTime] = self._primaryClusterPromise; … … 77 78 this._callbackMap.set(clusterEndTime, new Set); 78 79 var callbackSet = this._callbackMap.get(clusterEndTime); 79 callbackSet.add(callback); 80 if (callback) 81 callbackSet.add(callback); 80 82 81 83 var promise = this._allFetches[clusterEndTime]; 82 if (promise) 83 promise.then(callback, callback); 84 else { 84 if (promise) { 85 if (callback) 86 promise.then(callback, callback); 87 } else { 85 88 promise = this._fetchSecondaryCluster(clusterEndTime); 86 89 for (var existingCallback of callbackSet) -
trunk/Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js
r233326 r240319 50 50 const platform = Platform.findById(measurementSet.platformId()); 51 51 this._logger.info(`==== "${metric.fullName()}" on "${platform.name()}" ====`); 52 await measurementSet.fetchBetween(this._startTime, this._endTime); 52 try { 53 await measurementSet.fetchBetween(this._startTime, this._endTime); 54 } catch (error) { 55 if (error != 'ConfigurationNotFound') 56 throw error; 57 this._logger.warn(`Skipping analysis for "${metric.fullName()}" on "${platform.name()}" as time series does not exit.`); 58 return; 59 } 53 60 const currentTimeSeries = measurementSet.fetchedTimeSeries('current', false, false); 54 61 const rawValues = currentTimeSeries.values(); -
trunk/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js
r233326 r240319 36 36 const info_logs = []; 37 37 const error_logs =[]; 38 const warn_logs =[]; 38 39 return { 39 40 info: (message) => info_logs.push(message), 41 warn: (message) => warn_logs.push(message), 40 42 error: (message) => error_logs.push(message), 41 info_logs, error_logs43 info_logs, warn_logs, error_logs 42 44 }; 43 45 } … … 87 89 await analysisPromise; 88 90 assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']); 91 assert.deepEqual(logger.error_logs, []); 92 }); 93 94 it('should not analyze if no corresponding time series for a measurement set', async () => { 95 const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); 96 const logger = mockLogger(); 97 const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger); 98 const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet); 99 assert.equal(requests.length, 1); 100 assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`); 101 requests[0].reject('ConfigurationNotFound'); 102 103 try { 104 await analysisPromise; 105 } catch (error) { 106 assert(false, 'Should not throw any exception here'); 107 } 108 assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']); 109 assert.deepEqual(logger.warn_logs, [`Skipping analysis for "${MockModels.someMetric.fullName()}" on "${MockModels.somePlatform.name()}" as time series does not exit.`]); 110 assert.deepEqual(logger.error_logs, []); 111 }); 112 113 it('should throw an error if "measurementSet.fetchBetween" is not failed due to "ConfugurationNotFound"', async () => { 114 const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); 115 const logger = mockLogger(); 116 const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger); 117 const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet); 118 assert.equal(requests.length, 1); 119 assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`); 120 requests[0].reject('SomeError'); 121 122 try { 123 await analysisPromise; 124 } catch (error) { 125 assert.equal(error, 'SomeError'); 126 } 127 assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']); 128 assert.deepEqual(logger.warn_logs, []); 89 129 assert.deepEqual(logger.error_logs, []); 90 130 }); … … 297 337 assert.deepEqual(logger.error_logs, []); 298 338 }); 299 300 339 301 340 it('should not create confirming A/B tests if a new regression is detected but no triggerable available', async () => {
Note: See TracChangeset
for help on using the changeset viewer.