[ 
https://issues.apache.org/jira/browse/HADOOP-18695?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17714027#comment-17714027
 ] 

ASF GitHub Bot commented on HADOOP-18695:
-----------------------------------------

steveloughran commented on code in PR #5548:
URL: https://github.com/apache/hadoop/pull/5548#discussion_r1171080025


##########
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java:
##########
@@ -31,21 +31,29 @@
 import java.util.concurrent.TimeUnit;
 
 import com.amazonaws.AmazonClientException;
-import org.apache.hadoop.util.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.InvalidRequestException;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.api.UnsupportedRequestException;
 import org.apache.hadoop.fs.s3a.auth.NoAuthWithAWSException;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.net.ConnectTimeoutException;
+import org.apache.hadoop.util.Preconditions;
 
-import static org.apache.hadoop.io.retry.RetryPolicies.*;
-
-import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.Constants.RETRY_INTERVAL;

Review Comment:
   roll back to reduce backport pain



##########
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java:
##########
@@ -93,12 +101,15 @@ public class S3ARetryPolicy implements RetryPolicy {
   /** Exponential policy for the base of normal failures. */
   protected final RetryPolicy baseExponentialRetry;
 
-  /** Idempotent calls which raise IOEs are retried.
-   *  */
+  /**
+   * Idempotent calls which raise IOEs are retried.
+   */
   protected final RetryPolicy retryIdempotentCalls;
 
-  /** Policy for throttle requests, which are considered repeatable, even for
-   * non-idempotent calls, as the service rejected the call entirely. */
+  /**
+   * Policy for throttle requests, which are considered repeatable, even for
+   * non-idempotent calls, as the service rejected the call entirely.

Review Comment:
   revert



##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java:
##########
@@ -184,6 +193,7 @@ public void test_010_CreateHugeFile() throws IOException {
     Statistic putRequestsActive = Statistic.OBJECT_PUT_REQUESTS_ACTIVE;
     Statistic putBytesPending = Statistic.OBJECT_PUT_BYTES_PENDING;
 
+

Review Comment:
   cut





> S3A: reject multipart copy requests when disabled
> -------------------------------------------------
>
>                 Key: HADOOP-18695
>                 URL: https://issues.apache.org/jira/browse/HADOOP-18695
>             Project: Hadoop Common
>          Issue Type: Improvement
>          Components: fs/s3
>    Affects Versions: 3.4.0
>            Reporter: Steve Loughran
>            Assignee: Steve Loughran
>            Priority: Minor
>              Labels: pull-request-available
>
> follow-on to HADOOP-18637 and support for huge file uploads with stores which 
> don't support MPU.
> * prevent use of API against any s3 store when disabled, using logging 
> auditor to reject it
> * tests to verify rename of huge files still works (by setting large part 
> size)



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to