[ 
https://issues.apache.org/jira/browse/HADOOP-19256?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17924490#comment-17924490
 ] 

ASF GitHub Bot commented on HADOOP-19256:
-----------------------------------------

saikatroy038 commented on code in PR #7329:
URL: https://github.com/apache/hadoop/pull/7329#discussion_r1944538173


##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestS3APutIfMatch.java:
##########
@@ -101,24 +99,26 @@ private static void createFileWithIfNoneMatchFlag(
             Path path,
             byte[] data,
             String ifMatchTag) throws Exception {
-          FSDataOutputStreamBuilder builder = fs.createFile(path);
-          builder.must(FS_S3A_CONDITIONAL_FILE_CREATE, ifMatchTag);
-          FSDataOutputStream stream = builder.create().build();
-          if (data != null && data.length > 0) {
-              stream.write(data);
-          }
-          stream.close();
-          IOUtils.closeStream(stream);
+        FSDataOutputStreamBuilder builder = fs.createFile(path);
+        builder.must(FS_S3A_CONDITIONAL_FILE_CREATE, "true");
+        builder.opt(FS_S3A_CREATE_HEADER + "." + IF_NONE_MATCH, ifMatchTag);
+        FSDataOutputStream stream = builder.create().build();
+        if (data != null && data.length > 0) {
+            stream.write(data);
+        }
+        stream.close();
+        IOUtils.closeStream(stream);
     }
 
     @Test
     public void testPutIfAbsentConflict() throws Throwable {
         FileSystem fs = getFileSystem();
         Path testFile = methodPath();
-
         fs.mkdirs(testFile.getParent());
         byte[] fileBytes = dataset(TEST_FILE_LEN, 0, 255);
 
+        createFileWithIfNoneMatchFlag(fs, testFile, fileBytes, "*");
+
         RemoteFileChangedException firstException = 
intercept(RemoteFileChangedException.class,
                 () -> createFileWithIfNoneMatchFlag(fs, testFile, fileBytes, 
"*"));
         assertS3ExceptionStatusCode(412, firstException);

Review Comment:
   done



##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestS3APutIfMatch.java:
##########
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSDataOutputStreamBuilder;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.performance.AbstractS3ACostTest;
+import org.apache.hadoop.fs.s3a.RemoteFileChangedException;
+import org.apache.hadoop.fs.s3a.S3ATestUtils;
+import org.apache.hadoop.io.IOUtils;
+
+import org.junit.Test;
+import software.amazon.awssdk.services.s3.model.S3Exception;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_ARRAY;
+import static 
org.apache.hadoop.fs.s3a.Constants.FS_S3A_CONDITIONAL_FILE_CREATE;
+import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_CREATE_HEADER;
+import static org.apache.hadoop.fs.s3a.Constants.MIN_MULTIPART_THRESHOLD;
+import static org.apache.hadoop.fs.s3a.Constants.MULTIPART_MIN_SIZE;
+import static org.apache.hadoop.fs.s3a.Constants.MULTIPART_SIZE;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfNotEnabled;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
+import static org.apache.hadoop.fs.s3a.impl.AWSHeaders.IF_NONE_MATCH;
+import static 
org.apache.hadoop.fs.s3a.impl.InternalConstants.UPLOAD_PART_COUNT_LIMIT;
+import static org.apache.hadoop.fs.s3a.scale.S3AScaleTestBase._1MB;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+
+public class ITestS3APutIfMatch extends AbstractS3ACostTest {
+
+    private Configuration conf;

Review Comment:
   done





> S3A: Support S3 Conditional Writes
> ----------------------------------
>
>                 Key: HADOOP-19256
>                 URL: https://issues.apache.org/jira/browse/HADOOP-19256
>             Project: Hadoop Common
>          Issue Type: Sub-task
>          Components: fs/s3
>            Reporter: Ahmar Suhail
>            Priority: Major
>              Labels: pull-request-available
>
> S3 Conditional Write (Put-if-absent) capability is now generally available - 
> [https://aws.amazon.com/about-aws/whats-new/2024/08/amazon-s3-conditional-writes/]
>  
> S3A should allow passing in this put-if-absent header to prevent over writing 
> of files. 
> There is a feature branch for this: HADOOP-19256-s3-conditional-writes
> + support etags to allow an overwrite to be restricted to overwriting a 
> specific version. This can be done through a createFile option.
> https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-writes.html
> Fun fact; third party stores will not reject overwrites if they don't 
> recognise the headers, so there's no way to be sure they are supported 
> without testing.
> we need a flag to enable/disable conditional writes which can be exposed in a 
> hasPathCapability()



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to