Changeset 240319 in webkit


Ignore:
Timestamp:
Jan 22, 2019 8:28:57 PM (5 years ago)
Author:
Dewei Zhu
Message:

Analyzing a chart that does not exist should not halt whole run-analysis script.
https://bugs.webkit.org/show_bug.cgi?id=193563

Reviewed by Ryosuke Niwa.

Halting whole run-analysis script while there is any invalid chart specified in Manifest makes the script fragile.
Run-analysis is also responsible for adding retry and sending notification which should not be block by this error.
Skipping analyzing the corresponding configuration seems reasonable.

  • public/v3/models/measurement-set.js:

(MeasurementSet.prototype._ensureClusterPromise): Only add callback when callback is specified.
This will help to fix 'UnhandledPromiseRejectionWarning' while running the test.

  • tools/js/measurement-set-analyzer.js:

(MeasurementSetAnalyzer.prototype.async._analyzeMeasurementSet): Catch the exception while failing to fetch a measurement set and skip the analysis for this config.

  • unit-tests/measurement-set-analyzer-tests.js: Added unit tests for this.
Location:
trunk/Websites/perf.webkit.org
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/Websites/perf.webkit.org/ChangeLog

    r240200 r240319  
     12019-01-17  Dewei Zhu  <dewei_zhu@apple.com>
     2
     3        Analyzing a chart that does not exist should not halt whole run-analysis script.
     4        https://bugs.webkit.org/show_bug.cgi?id=193563
     5
     6        Reviewed by Ryosuke Niwa.
     7
     8        Halting whole run-analysis script while there is any invalid chart specified in Manifest makes the script fragile.
     9        Run-analysis is also responsible for adding retry and sending notification which should not be block by this error.
     10        Skipping analyzing the corresponding configuration seems reasonable.
     11
     12        * public/v3/models/measurement-set.js:
     13        (MeasurementSet.prototype._ensureClusterPromise): Only add callback when callback is specified.
     14        This will help to fix 'UnhandledPromiseRejectionWarning' while running the test.
     15        * tools/js/measurement-set-analyzer.js:
     16        (MeasurementSetAnalyzer.prototype.async._analyzeMeasurementSet): Catch the exception while failing to fetch a measurement set and skip the analysis for this config.
     17        * unit-tests/measurement-set-analyzer-tests.js: Added unit tests for this.
     18
    1192019-01-17  Dewei Zhu  <dewei_zhu@apple.com>
    220
  • trunk/Websites/perf.webkit.org/public/v3/models/measurement-set.js

    r233564 r240319  
    6363            this._primaryClusterPromise = this._fetchPrimaryCluster(noCache);
    6464        var self = this;
    65         this._primaryClusterPromise.catch(callback);
     65        if (callback)
     66            this._primaryClusterPromise.catch(callback);
    6667        return this._primaryClusterPromise.then(function () {
    6768            self._allFetches[self._primaryClusterEndTime] = self._primaryClusterPromise;
     
    7778            this._callbackMap.set(clusterEndTime, new Set);
    7879        var callbackSet = this._callbackMap.get(clusterEndTime);
    79         callbackSet.add(callback);
     80        if (callback)
     81            callbackSet.add(callback);
    8082
    8183        var promise = this._allFetches[clusterEndTime];
    82         if (promise)
    83             promise.then(callback, callback);
    84         else {
     84        if (promise) {
     85            if (callback)
     86                promise.then(callback, callback);
     87        } else {
    8588            promise = this._fetchSecondaryCluster(clusterEndTime);
    8689            for (var existingCallback of callbackSet)
  • trunk/Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js

    r233326 r240319  
    5050        const platform = Platform.findById(measurementSet.platformId());
    5151        this._logger.info(`==== "${metric.fullName()}" on "${platform.name()}" ====`);
    52         await measurementSet.fetchBetween(this._startTime, this._endTime);
     52        try {
     53            await measurementSet.fetchBetween(this._startTime, this._endTime);
     54        } catch (error) {
     55            if (error != 'ConfigurationNotFound')
     56                throw error;
     57            this._logger.warn(`Skipping analysis for "${metric.fullName()}" on "${platform.name()}" as time series does not exit.`);
     58            return;
     59        }
    5360        const currentTimeSeries = measurementSet.fetchedTimeSeries('current', false, false);
    5461        const rawValues = currentTimeSeries.values();
  • trunk/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js

    r233326 r240319  
    3636        const info_logs = [];
    3737        const error_logs =[];
     38        const warn_logs =[];
    3839        return {
    3940            info: (message) => info_logs.push(message),
     41            warn: (message) => warn_logs.push(message),
    4042            error: (message) => error_logs.push(message),
    41             info_logs, error_logs
     43            info_logs, warn_logs, error_logs
    4244        };
    4345    }
     
    8789            await analysisPromise;
    8890            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']);
     91            assert.deepEqual(logger.error_logs, []);
     92        });
     93
     94        it('should not analyze if no corresponding time series for a measurement set', async () => {
     95            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
     96            const logger = mockLogger();
     97            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
     98            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
     99            assert.equal(requests.length, 1);
     100            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
     101            requests[0].reject('ConfigurationNotFound');
     102
     103            try {
     104                await analysisPromise;
     105            } catch (error) {
     106                assert(false, 'Should not throw any exception here');
     107            }
     108            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']);
     109            assert.deepEqual(logger.warn_logs, [`Skipping analysis for "${MockModels.someMetric.fullName()}" on "${MockModels.somePlatform.name()}" as time series does not exit.`]);
     110            assert.deepEqual(logger.error_logs, []);
     111        });
     112
     113        it('should throw an error if "measurementSet.fetchBetween" is not failed due to "ConfugurationNotFound"', async () => {
     114            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
     115            const logger = mockLogger();
     116            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
     117            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
     118            assert.equal(requests.length, 1);
     119            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
     120            requests[0].reject('SomeError');
     121
     122            try {
     123                await analysisPromise;
     124            } catch (error) {
     125                assert.equal(error, 'SomeError');
     126            }
     127            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']);
     128            assert.deepEqual(logger.warn_logs, []);
    89129            assert.deepEqual(logger.error_logs, []);
    90130        });
     
    297337            assert.deepEqual(logger.error_logs, []);
    298338        });
    299 
    300339
    301340        it('should not create confirming A/B tests if a new regression is detected but no triggerable available', async () => {
Note: See TracChangeset for help on using the changeset viewer.