saxenapranav commented on code in PR #5446:
URL: https://github.com/apache/hadoop/pull/5446#discussion_r1122694142
##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java:
##########
@@ -272,6 +273,30 @@
DefaultValue = DEFAULT_ANALYSIS_PERIOD_MS)
private int analysisPeriod;
+ @DoubleConfigurationValidatorAnnotation(ConfigurationKey =
FS_AZURE_MIN_ACCEPTABLE_ERROR_PERCENTAGE,
+ DefaultValue = DEFAULT_MIN_ACCEPTABLE_ERROR_PERCENTAGE)
+ private double minAcceptableErrorPercentage;
+
+ @DoubleConfigurationValidatorAnnotation(ConfigurationKey =
FS_AZURE_MAX_EQUILIBRIUM_ERROR_PERCENTAGE,
+ DefaultValue = DEFAULT_MAX_EQUILIBRIUM_ERROR_PERCENTAGE)
+ private double maxEquilibriumErrorPercentage;
+
+ @DoubleConfigurationValidatorAnnotation(ConfigurationKey =
FS_AZURE_RAPID_SLEEP_DECREASE_FACTOR,
+ DefaultValue = DEFAULT_RAPID_SLEEP_DECREASE_FACTOR)
+ private double rapidSleepDecreaseFactor;
+
+ @DoubleConfigurationValidatorAnnotation(ConfigurationKey =
FS_AZURE_RAPID_SLEEP_DECREASE_TRANSITION_MS,
+ DefaultValue = DEFAULT_RAPID_SLEEP_DECREASE_TRANSITION_PERIOD_MS)
+ private double rapidSleepDecreaseTransitionPeriodMs;
+
+ @DoubleConfigurationValidatorAnnotation(ConfigurationKey =
FS_AZURE_SLEEP_DECREASE_FACTOR,
+ DefaultValue = DEFAULT_SLEEP_DECREASE_FACTOR)
+ private double sleepDecreaseFactor;
+
+ @DoubleConfigurationValidatorAnnotation(ConfigurationKey =
FS_AZURE_SLEEP_INCREASE_FACTOR,
+ DefaultValue = DEFAULT_SLEEP_INCREASE_FACTOR)
+ private double sleepIncreaseFactor;
Review Comment:
lets add documentation of what each field means.
##########
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java:
##########
@@ -285,6 +293,67 @@ public void testAbfsConfigConstructor() throws Exception {
Assert.assertEquals("Delta backoff interval was not set as expected.",
expectedDeltaBackoff, policy.getDeltaBackoff());
}
+// public void testClientBackoffOnlyNewRequest() throws IOException {
+@Test
+public void testClientBackoffOnlyNewWriteRequest() throws IOException,
InterruptedException {
+ AzureBlobFileSystem fs = getFileSystem();
+ AbfsClient client = fs.getAbfsStore().getClient();
+ AbfsConfiguration configuration = client.getAbfsConfiguration();
+ Assume.assumeTrue(configuration.isAutoThrottlingEnabled());
+ AbfsCounters counters = client.getAbfsCounters();
+
+ URL dummyUrl = client.createRequestUrl("/", "");
+ String dummyMethod = AbfsHttpConstants.HTTP_METHOD_PUT;
+
+ AbfsRestOperationType testOperationType = AbfsRestOperationType.Append;
+
+ AbfsRestOperation restOp = new AbfsRestOperation(testOperationType, client,
dummyMethod, dummyUrl, new ArrayList<>());
+
+ Long writeThrottleStatBefore =
counters.toMap().get(AbfsStatistic.WRITE_THROTTLES.getStatName());
+ Thread.sleep(10000);
Review Comment:
why sleep is there?
##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java:
##########
@@ -334,6 +337,21 @@ private boolean executeHttpOperation(final int retryCount,
return true;
}
+ /**
+ * Makes a call for client side throttling based on
+ * the request count.
+ * @param operationType operation type of current request
+ * @param abfsCounters AbfsCounters instance
+ */
+ @VisibleForTesting
+ boolean applyThrottlingBackoff(int retryCount, AbfsRestOperationType
operationType, AbfsCounters abfsCounters) {
+ if (retryCount == 0) {
+ intercept.sendingRequest(operationType, abfsCounters);
+ return true;
+ }
+ return false;
+ }
+
Review Comment:
once test is refactored. lets keep
```
if (retryCount == 0) {
intercept.sendingRequest(operationType, abfsCounters);
return true;
}
```
inline to line 286.
##########
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java:
##########
@@ -285,6 +293,67 @@ public void testAbfsConfigConstructor() throws Exception {
Assert.assertEquals("Delta backoff interval was not set as expected.",
expectedDeltaBackoff, policy.getDeltaBackoff());
}
+// public void testClientBackoffOnlyNewRequest() throws IOException {
+@Test
+public void testClientBackoffOnlyNewWriteRequest() throws IOException,
InterruptedException {
+ AzureBlobFileSystem fs = getFileSystem();
+ AbfsClient client = fs.getAbfsStore().getClient();
+ AbfsConfiguration configuration = client.getAbfsConfiguration();
+ Assume.assumeTrue(configuration.isAutoThrottlingEnabled());
+ AbfsCounters counters = client.getAbfsCounters();
+
+ URL dummyUrl = client.createRequestUrl("/", "");
+ String dummyMethod = AbfsHttpConstants.HTTP_METHOD_PUT;
+
+ AbfsRestOperationType testOperationType = AbfsRestOperationType.Append;
+
+ AbfsRestOperation restOp = new AbfsRestOperation(testOperationType, client,
dummyMethod, dummyUrl, new ArrayList<>());
+
+ Long writeThrottleStatBefore =
counters.toMap().get(AbfsStatistic.WRITE_THROTTLES.getStatName());
+ Thread.sleep(10000);
+ boolean appliedBackoff = restOp.applyThrottlingBackoff(0, testOperationType,
counters);
Review Comment:
lets not just test this method.
lets test the whole executeRequest method. Have httpOperation mocked. And
change the behaviour of httpOperation.processResponse.. Fail it for 2-3 times.
and assert.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]