Modified: trunk/Websites/perf.webkit.org/ChangeLog (240318 => 240319)
--- trunk/Websites/perf.webkit.org/ChangeLog 2019-01-23 04:17:06 UTC (rev 240318)
+++ trunk/Websites/perf.webkit.org/ChangeLog 2019-01-23 04:28:57 UTC (rev 240319)
@@ -1,5 +1,23 @@
2019-01-17 Dewei Zhu <[email protected]>
+ Analyzing a chart that does not exist should not halt whole run-analysis script.
+ https://bugs.webkit.org/show_bug.cgi?id=193563
+
+ Reviewed by Ryosuke Niwa.
+
+ Halting whole run-analysis script while there is any invalid chart specified in Manifest makes the script fragile.
+ Run-analysis is also responsible for adding retry and sending notification which should not be block by this error.
+ Skipping analyzing the corresponding configuration seems reasonable.
+
+ * public/v3/models/measurement-set.js:
+ (MeasurementSet.prototype._ensureClusterPromise): Only add callback when callback is specified.
+ This will help to fix 'UnhandledPromiseRejectionWarning' while running the test.
+ * tools/js/measurement-set-analyzer.js:
+ (MeasurementSetAnalyzer.prototype.async._analyzeMeasurementSet): Catch the exception while failing to fetch a measurement set and skip the analysis for this config.
+ * unit-tests/measurement-set-analyzer-tests.js: Added unit tests for this.
+
+2019-01-17 Dewei Zhu <[email protected]>
+
Updating commit in OSBuildFetcher should respect revision range in config.
https://bugs.webkit.org/show_bug.cgi?id=193558
Modified: trunk/Websites/perf.webkit.org/public/v3/models/measurement-set.js (240318 => 240319)
--- trunk/Websites/perf.webkit.org/public/v3/models/measurement-set.js 2019-01-23 04:17:06 UTC (rev 240318)
+++ trunk/Websites/perf.webkit.org/public/v3/models/measurement-set.js 2019-01-23 04:28:57 UTC (rev 240319)
@@ -62,7 +62,8 @@
if (!this._primaryClusterPromise)
this._primaryClusterPromise = this._fetchPrimaryCluster(noCache);
var self = this;
- this._primaryClusterPromise.catch(callback);
+ if (callback)
+ this._primaryClusterPromise.catch(callback);
return this._primaryClusterPromise.then(function () {
self._allFetches[self._primaryClusterEndTime] = self._primaryClusterPromise;
return Promise.all(self.findClusters(startTime, endTime).map(function (clusterEndTime) {
@@ -76,12 +77,14 @@
if (!this._callbackMap.has(clusterEndTime))
this._callbackMap.set(clusterEndTime, new Set);
var callbackSet = this._callbackMap.get(clusterEndTime);
- callbackSet.add(callback);
+ if (callback)
+ callbackSet.add(callback);
var promise = this._allFetches[clusterEndTime];
- if (promise)
- promise.then(callback, callback);
- else {
+ if (promise) {
+ if (callback)
+ promise.then(callback, callback);
+ } else {
promise = this._fetchSecondaryCluster(clusterEndTime);
for (var existingCallback of callbackSet)
promise.then(existingCallback, existingCallback);
Modified: trunk/Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js (240318 => 240319)
--- trunk/Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js 2019-01-23 04:17:06 UTC (rev 240318)
+++ trunk/Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js 2019-01-23 04:28:57 UTC (rev 240319)
@@ -49,7 +49,14 @@
const metric = Metric.findById(measurementSet.metricId());
const platform = Platform.findById(measurementSet.platformId());
this._logger.info(`==== "${metric.fullName()}" on "${platform.name()}" ====`);
- await measurementSet.fetchBetween(this._startTime, this._endTime);
+ try {
+ await measurementSet.fetchBetween(this._startTime, this._endTime);
+ } catch (error) {
+ if (error != 'ConfigurationNotFound')
+ throw error;
+ this._logger.warn(`Skipping analysis for "${metric.fullName()}" on "${platform.name()}" as time series does not exit.`);
+ return;
+ }
const currentTimeSeries = measurementSet.fetchedTimeSeries('current', false, false);
const rawValues = currentTimeSeries.values();
if (!rawValues || rawValues.length < 2)
Modified: trunk/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js (240318 => 240319)
--- trunk/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js 2019-01-23 04:17:06 UTC (rev 240318)
+++ trunk/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js 2019-01-23 04:28:57 UTC (rev 240319)
@@ -35,10 +35,12 @@
{
const info_logs = [];
const error_logs =[];
+ const warn_logs =[];
return {
info: (message) => info_logs.push(message),
+ warn: (message) => warn_logs.push(message),
error: (message) => error_logs.push(message),
- info_logs, error_logs
+ info_logs, warn_logs, error_logs
};
}
@@ -89,6 +91,44 @@
assert.deepEqual(logger.error_logs, []);
});
+ it('should not analyze if no corresponding time series for a measurement set', async () => {
+ const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+ const logger = mockLogger();
+ const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+ const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+ assert.equal(requests.length, 1);
+ assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+ requests[0].reject('ConfigurationNotFound');
+
+ try {
+ await analysisPromise;
+ } catch (error) {
+ assert(false, 'Should not throw any exception here');
+ }
+ assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']);
+ assert.deepEqual(logger.warn_logs, [`Skipping analysis for "${MockModels.someMetric.fullName()}" on "${MockModels.somePlatform.name()}" as time series does not exit.`]);
+ assert.deepEqual(logger.error_logs, []);
+ });
+
+ it('should throw an error if "measurementSet.fetchBetween" is not failed due to "ConfugurationNotFound"', async () => {
+ const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+ const logger = mockLogger();
+ const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+ const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+ assert.equal(requests.length, 1);
+ assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+ requests[0].reject('SomeError');
+
+ try {
+ await analysisPromise;
+ } catch (error) {
+ assert.equal(error, 'SomeError');
+ }
+ assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']);
+ assert.deepEqual(logger.warn_logs, []);
+ assert.deepEqual(logger.error_logs, []);
+ });
+
it('should not analyze if there is only one data point in the measurement set', async () => {
const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
const logger = mockLogger();
@@ -297,7 +337,6 @@
assert.deepEqual(logger.error_logs, []);
});
-
it('should not create confirming A/B tests if a new regression is detected but no triggerable available', async () => {
PrivilegedAPI.configure('test', 'password');
const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);