This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new d10a822ddd HDDS-10653. Support custom metadata for MPU key (#6489)
d10a822ddd is described below

commit d10a822ddd5df89053dc87b20621e3785c2acb9f
Author: Ivan Andika <[email protected]>
AuthorDate: Mon Apr 22 13:19:17 2024 +0800

    HDDS-10653. Support custom metadata for MPU key (#6489)
---
 .../apache/hadoop/ozone/client/OzoneBucket.java    | 11 +++-
 .../ozone/client/protocol/ClientProtocol.java      | 25 ++++++--
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  | 12 ++++
 ...OzoneManagerProtocolClientSideTranslatorPB.java |  1 +
 .../src/main/smoketest/s3/MultipartUpload.robot    | 18 +++++-
 .../dist/src/main/smoketest/s3/objectputget.robot  |  5 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java     | 42 ++++++++++++-
 .../S3InitiateMultipartUploadRequest.java          |  2 +
 .../S3InitiateMultipartUploadRequestWithFSO.java   |  2 +
 .../S3MultipartUploadCompleteRequest.java          |  4 ++
 .../ozone/om/request/OMRequestTestUtils.java       | 23 ++++++-
 .../TestS3InitiateMultipartUploadRequest.java      | 12 +++-
 ...estS3InitiateMultipartUploadRequestWithFSO.java | 11 +++-
 .../s3/multipart/TestS3MultipartRequest.java       | 52 +++++++++++++++-
 .../TestS3MultipartUploadCompleteRequest.java      | 21 +++++--
 .../hadoop/ozone/s3/endpoint/EndpointBase.java     | 16 +++++
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   | 13 ++--
 .../hadoop/ozone/client/ClientProtocolStub.java    | 11 +++-
 .../hadoop/ozone/client/OzoneBucketStub.java       | 70 +++++++++++++++++-----
 .../s3/endpoint/TestMultipartUploadComplete.java   | 59 ++++++++++++++++--
 .../ozone/s3/endpoint/TestPermissionCheck.java     |  3 +-
 21 files changed, 361 insertions(+), 52 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 6976d1842c..207bd54528 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -680,7 +680,16 @@ public class OzoneBucket extends WithMetadata {
   public OmMultipartInfo initiateMultipartUpload(String keyName,
       ReplicationConfig config)
       throws IOException {
-    return proxy.initiateMultipartUpload(volumeName, name, keyName, config);
+    return initiateMultipartUpload(keyName, config, Collections.emptyMap());
+  }
+
+  /**
+   * Initiate multipart upload for a specified key.
+   */
+  public OmMultipartInfo initiateMultipartUpload(String keyName,
+      ReplicationConfig config, Map<String, String> metadata)
+      throws IOException {
+    return proxy.initiateMultipartUpload(volumeName, name, keyName, config, 
metadata);
   }
 
   /**
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 492cd31b67..912a3138c4 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -327,7 +327,7 @@ public interface ClientProtocol {
    * @param bucketName Name of the Bucket
    * @param keyName Name of the Key
    * @param size Size of the data
-   * @param metadata custom key value metadata
+   * @param metadata Custom key value metadata
    * @return {@link OzoneOutputStream}
    *
    */
@@ -509,10 +509,10 @@ public interface ClientProtocol {
 
   /**
    * Initiate Multipart upload.
-   * @param volumeName
-   * @param bucketName
-   * @param keyName
-   * @param replicationConfig
+   * @param volumeName Name of the Volume
+   * @param bucketName Name of the Bucket
+   * @param keyName Name of the Key
+   * @param replicationConfig Replication Config
    * @return {@link OmMultipartInfo}
    * @throws IOException
    */
@@ -520,6 +520,21 @@ public interface ClientProtocol {
       bucketName, String keyName, ReplicationConfig replicationConfig)
       throws IOException;
 
+  /**
+   * Initiate Multipart upload.
+   * @param volumeName Name of the Volume
+   * @param bucketName Name of the Bucket
+   * @param keyName Name of the Key
+   * @param replicationConfig Replication config
+   * @param metadata Custom key value metadata
+   * @return {@link OmMultipartInfo}
+   * @throws IOException
+   */
+  OmMultipartInfo initiateMultipartUpload(String volumeName, String
+      bucketName, String keyName, ReplicationConfig replicationConfig,
+      Map<String, String> metadata)
+      throws IOException;
+
   /**
    * Create a part key for a multipart upload key.
    * @param volumeName
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 0806ffb847..a03760cd4e 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1811,6 +1811,17 @@ public class RpcClient implements ClientProtocol {
       String keyName,
       ReplicationConfig replicationConfig)
       throws IOException {
+    return initiateMultipartUpload(volumeName, bucketName, keyName, 
replicationConfig,
+        Collections.emptyMap());
+  }
+
+  @Override
+  public OmMultipartInfo initiateMultipartUpload(String volumeName,
+      String bucketName,
+      String keyName,
+      ReplicationConfig replicationConfig,
+      Map<String, String> metadata)
+      throws IOException {
     verifyVolumeName(volumeName);
     verifyBucketName(bucketName);
     HddsClientUtils.checkNotNull(keyName);
@@ -1829,6 +1840,7 @@ public class RpcClient implements ClientProtocol {
         .setKeyName(keyName)
         .setReplicationConfig(replicationConfig)
         .setAcls(getAclList())
+        .addAllMetadataGdpr(metadata)
         .build();
     OmMultipartInfo multipartInfo = ozoneManagerClient
         .initiateMultipartUpload(keyArgs);
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 3c6b6647a6..98e558e347 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -1605,6 +1605,7 @@ public final class 
OzoneManagerProtocolClientSideTranslatorPB
         .setVolumeName(omKeyArgs.getVolumeName())
         .setBucketName(omKeyArgs.getBucketName())
         .setKeyName(omKeyArgs.getKeyName())
+        .addAllMetadata(KeyValueUtil.toProtobuf(omKeyArgs.getMetadata()))
         .addAllAcls(omKeyArgs.getAcls().stream().map(a ->
             OzoneAcl.toProtobuf(a)).collect(Collectors.toList()));
 
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot 
b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
index 3a6ae0e45d..96feec2f81 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
@@ -87,7 +87,7 @@ Test Multipart Upload
 
 
 Test Multipart Upload Complete
-    ${result} =         Execute AWSS3APICli     create-multipart-upload 
--bucket ${BUCKET} --key ${PREFIX}/multipartKey1
+    ${result} =         Execute AWSS3APICli     create-multipart-upload 
--bucket ${BUCKET} --key ${PREFIX}/multipartKey1 
--metadata="custom-key1=custom-value1,custom-key2=custom-value2,gdprEnabled=true"
     ${uploadID} =       Execute and checkrc     echo '${result}' | jq -r 
'.UploadId'    0
                         Should contain          ${result}    ${BUCKET}
                         Should contain          ${result}    
${PREFIX}/multipartKey
@@ -117,6 +117,16 @@ Test Multipart Upload Complete
                                 Should contain                ${result}    ETag
                                 Should Be Equal As Strings    ${resultETag}    
 "${expectedResultETag}-2"
 
+#check whether the user defined metadata can be retrieved
+    ${result} =                 Execute AWSS3ApiCli           head-object 
--bucket ${BUCKET} --key ${PREFIX}/multipartKey1
+                                Should contain                ${result}    
\"custom-key1\": \"custom-value1\"
+                                Should contain                ${result}    
\"custom-key2\": \"custom-value2\"
+
+    ${result} =                 Execute                       ozone sh key 
info /s3v/${BUCKET}/${PREFIX}/multipartKey1
+                                Should contain                ${result}    
\"custom-key1\" : \"custom-value1\"
+                                Should contain                ${result}    
\"custom-key2\" : \"custom-value2\"
+                                Should not contain            ${result}    
\"gdprEnabled\": \"true\"
+
 #read file and check the key
     ${result} =                 Execute AWSS3ApiCli        get-object --bucket 
${BUCKET} --key ${PREFIX}/multipartKey1 /tmp/${PREFIX}-multipartKey1.result
                                 Execute                    cat /tmp/part1 
/tmp/part2 > /tmp/${PREFIX}-multipartKey1
@@ -128,6 +138,12 @@ Test Multipart Upload Complete
     ${result} =                 Execute AWSS3ApiCli        get-object --bucket 
${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 
/tmp/${PREFIX}-multipartKey1-part2.result
     Compare files               /tmp/part2        
/tmp/${PREFIX}-multipartKey1-part2.result
 
+Test Multipart Upload with user defined metadata size larger than 2 KB
+    ${custom_metadata_value} =  Execute                               printf 
'v%.0s' {1..3000}
+    ${result} =                 Execute AWSS3APICli and checkrc       
create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/mpuWithLargeMetadata 
--metadata="custom-key1=${custom_metadata_value}"    255
+                                Should contain                        
${result}   MetadataTooLarge
+                                Should not contain                    
${result}   custom-key1: ${custom_metadata_value}
+
 Test Multipart Upload Complete Entity too small
     ${result} =         Execute AWSS3APICli     create-multipart-upload 
--bucket ${BUCKET} --key ${PREFIX}/multipartKey2
     ${uploadID} =       Execute and checkrc     echo '${result}' | jq -r 
'.UploadId'    0
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot 
b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
index 05348fbcba..bbff89e71f 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
@@ -185,8 +185,9 @@ Create file with user defined metadata with gdpr enabled 
value in request
 Create file with user defined metadata size larger than 2 KB
                                 Execute                    echo "Randomtext" > 
/tmp/testfile2
     ${custom_metadata_value} =  Execute                    printf 'v%.0s' 
{1..3000}
-    ${result} =                 Execute AWSS3APICli and ignore error        
put-object --bucket ${BUCKET} --key ${PREFIX}/putobject/custom-metadata/key2 
--body /tmp/testfile2 --metadata="custom-key1=${custom_metadata_value}"
-                                Should not contain                          
${result}   custom-key1: ${custom_metadata_value}
+    ${result} =                 Execute AWSS3APICli and checkrc       
put-object --bucket ${BUCKET} --key ${PREFIX}/putobject/custom-metadata/key2 
--body /tmp/testfile2 --metadata="custom-key1=${custom_metadata_value}"    255
+                                Should contain                        
${result}   MetadataTooLarge
+                                Should not contain                    
${result}   custom-key1: ${custom_metadata_value}
 
 Create small file and expect ETag (MD5) in a reponse header
                                 Execute                    head -c 1MB 
</dev/urandom > /tmp/small_file
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index a77edd3abc..632076e2ee 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -25,6 +25,7 @@ import java.security.PrivilegedExceptionAction;
 import java.time.Instant;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
@@ -2960,6 +2961,26 @@ public abstract class TestOzoneRpcClientAbstract {
             keyName, sampleData.length(), 10001, uploadID));
   }
 
+  @ParameterizedTest
+  @MethodSource("replicationConfigs")
+  public void testMultipartUploadWithCustomMetadata(ReplicationConfig 
replication) throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    // Create custom metadata
+    Map<String, String> customMetadata = new HashMap<>();
+    customMetadata.put("custom-key1", "custom-value1");
+    customMetadata.put("custom-key2", "custom-value2");
+
+    doMultipartUpload(bucket, keyName, (byte) 98, replication, customMetadata);
+  }
+
   @Test
   public void testAbortUploadFail() throws Exception {
     String volumeName = UUID.randomUUID().toString();
@@ -3593,8 +3614,14 @@ public abstract class TestOzoneRpcClientAbstract {
   private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val,
       ReplicationConfig replication)
       throws Exception {
+    doMultipartUpload(bucket, keyName, val, replication, 
Collections.emptyMap());
+  }
+
+  private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val,
+      ReplicationConfig replication, Map<String, String> customMetadata)
+      throws Exception {
     // Initiate Multipart upload request
-    String uploadID = initiateMultipartUpload(bucket, keyName, replication);
+    String uploadID = initiateMultipartUpload(bucket, keyName, replication, 
customMetadata);
 
     // Upload parts
     Map<Integer, String> partsMap = new TreeMap<>();
@@ -3661,12 +3688,23 @@ public abstract class TestOzoneRpcClientAbstract {
     latestVersionLocations.getBlocksLatestVersionOnly()
         .forEach(omKeyLocationInfo ->
             assertNotEquals(-1, omKeyLocationInfo.getPartNumber()));
+
+    Map<String, String> keyMetadata = omKeyInfo.getMetadata();
+    assertNotNull(keyMetadata.get(ETAG));
+    if (customMetadata != null && !customMetadata.isEmpty()) {
+      assertThat(keyMetadata).containsAllEntriesOf(customMetadata);
+    }
   }
 
   private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
       ReplicationConfig replicationConfig) throws Exception {
+    return initiateMultipartUpload(bucket, keyName, replicationConfig, 
Collections.emptyMap());
+  }
+
+  private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
+      ReplicationConfig replicationConfig, Map<String, String> customMetadata) 
throws Exception {
     OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
-        replicationConfig);
+        replicationConfig, customMetadata);
 
     String uploadID = multipartInfo.getUploadID();
     assertNotNull(uploadID);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index e1772d4009..914a707deb 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
 import org.apache.ratis.server.protocol.TermIndex;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -211,6 +212,7 @@ public class S3InitiateMultipartUploadRequest extends 
OMKeyRequest {
           .setUpdateID(transactionLogIndex)
           .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ?
               OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null)
+          
.addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList()))
           .build();
 
       // Add to cache
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
index d1c865fbc7..f2423736a3 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
 import org.apache.ratis.server.protocol.TermIndex;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneConfigUtil;
@@ -187,6 +188,7 @@ public class S3InitiateMultipartUploadRequestWithFSO
           .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ?
               OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null)
           .setParentObjectID(pathInfoFSO.getLastKnownParentId())
+          
.addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList()))
           .build();
       
       // validate and update namespace for missing parent directory
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index 5a2359560a..0ffcac8fa0 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -491,6 +491,7 @@ public class S3MultipartUploadCompleteRequest extends 
OMKeyRequest {
           .setOmKeyLocationInfos(
               Collections.singletonList(keyLocationInfoGroup))
           .setAcls(dbOpenKeyInfo.getAcls())
+          .addAllMetadata(dbOpenKeyInfo.getMetadata())
           .addMetadata(OzoneConsts.ETAG,
               multipartUploadedKeyHash(partKeyInfoMap));
       // Check if db entry has ObjectID. This check is required because
@@ -521,6 +522,9 @@ public class S3MultipartUploadCompleteRequest extends 
OMKeyRequest {
       omKeyInfo.setModificationTime(keyArgs.getModificationTime());
       omKeyInfo.setDataSize(dataSize);
       omKeyInfo.setReplicationConfig(dbOpenKeyInfo.getReplicationConfig());
+      if (dbOpenKeyInfo.getMetadata() != null) {
+        omKeyInfo.setMetadata(dbOpenKeyInfo.getMetadata());
+      }
       omKeyInfo.getMetadata().put(OzoneConsts.ETAG,
           multipartUploadedKeyHash(partKeyInfoMap));
     }
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
index 0ebd6946bd..8103f6616c 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
@@ -28,6 +28,7 @@ import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.UUID;
 
 import javax.xml.bind.DatatypeConverter;
@@ -49,6 +50,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -991,11 +993,28 @@ public final class OMRequestTestUtils {
    */
   public static OMRequest createInitiateMPURequest(String volumeName,
       String bucketName, String keyName) {
+    return createInitiateMPURequest(volumeName, bucketName, keyName, 
Collections.emptyMap());
+  }
+
+  /**
+   * Create OMRequest which encapsulates InitiateMultipartUpload request.
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @param metadata
+   */
+  public static OMRequest createInitiateMPURequest(String volumeName,
+      String bucketName, String keyName, Map<String, String> metadata) {
     MultipartInfoInitiateRequest
         multipartInfoInitiateRequest =
         MultipartInfoInitiateRequest.newBuilder().setKeyArgs(
-            KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName)
-                .setBucketName(bucketName)).build();
+            KeyArgs.newBuilder()
+                .setVolumeName(volumeName)
+                .setKeyName(keyName)
+                .setBucketName(bucketName)
+                .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
+            )
+            .build();
 
     return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString())
         .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload)
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
index a4c512b25a..0165716231 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
@@ -25,7 +25,9 @@ import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
@@ -62,8 +64,12 @@ public class TestS3InitiateMultipartUploadRequest
     OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager, getBucketLayout());
 
+    Map<String, String> customMetadata = new HashMap<>();
+    customMetadata.put("custom-key1", "custom-value1");
+    customMetadata.put("custom-key2", "custom-value2");
+
     OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName,
-        bucketName, keyName);
+        bucketName, keyName, customMetadata);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
         getS3InitiateMultipartUploadReq(modifiedRequest);
@@ -84,6 +90,10 @@ public class TestS3InitiateMultipartUploadRequest
     assertNotNull(openMPUKeyInfo);
     assertNotNull(openMPUKeyInfo.getLatestVersionLocations());
     assertTrue(openMPUKeyInfo.getLatestVersionLocations().isMultipartKey());
+    assertNotNull(openMPUKeyInfo.getMetadata());
+    assertEquals("custom-value1", 
openMPUKeyInfo.getMetadata().get("custom-key1"));
+    assertEquals("custom-value2", 
openMPUKeyInfo.getMetadata().get("custom-key2"));
+
     assertNotNull(omMetadataManager.getMultipartInfoTable().get(multipartKey));
 
     assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
index cbdea75720..dd8eb00edb 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
@@ -36,7 +36,9 @@ import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
@@ -62,11 +64,15 @@ public class TestS3InitiateMultipartUploadRequestWithFSO
     OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager, getBucketLayout());
 
+    Map<String, String> customMetadata = new HashMap<>();
+    customMetadata.put("custom-key1", "custom-value1");
+    customMetadata.put("custom-key2", "custom-value2");
+
     final long volumeId = omMetadataManager.getVolumeId(volumeName);
     final long bucketId = omMetadataManager.getBucketId(volumeName,
             bucketName);
     OMRequest modifiedRequest = doPreExecuteInitiateMPUWithFSO(volumeName,
-        bucketName, keyName);
+        bucketName, keyName, customMetadata);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadReqFSO =
         getS3InitiateMultipartUploadReq(modifiedRequest);
@@ -102,6 +108,9 @@ public class TestS3InitiateMultipartUploadRequestWithFSO
         "FileName mismatches!");
     assertEquals(parentID, omKeyInfo.getParentObjectID(),
         "ParentId mismatches!");
+    assertNotNull(omKeyInfo.getMetadata());
+    assertEquals("custom-value1", omKeyInfo.getMetadata().get("custom-key1"));
+    assertEquals("custom-value2", omKeyInfo.getMetadata().get("custom-key2"));
 
     OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager
             .getMultipartInfoTable().get(multipartFileKey);
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index 16cb9b6821..1972fee69b 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@ -21,10 +21,13 @@ package org.apache.hadoop.ozone.om.request.s3.multipart;
 
 import java.io.IOException;
 import java.nio.file.Path;
+import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager;
 import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer;
@@ -130,9 +133,24 @@ public class TestS3MultipartRequest {
    */
   protected OMRequest doPreExecuteInitiateMPU(
       String volumeName, String bucketName, String keyName) throws Exception {
+    return doPreExecuteInitiateMPU(volumeName, bucketName, keyName, 
Collections.emptyMap());
+  }
+
+  /**
+   * Perform preExecute of Initiate Multipart upload request for given
+   * volume, bucket and key name.
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @param metadata
+   * @return OMRequest - returned from preExecute.
+   */
+  protected OMRequest doPreExecuteInitiateMPU(
+      String volumeName, String bucketName, String keyName,
+      Map<String, String> metadata) throws Exception {
     OMRequest omRequest =
         OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName,
-            keyName);
+            keyName, metadata);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
         getS3InitiateMultipartUploadReq(omRequest);
@@ -147,6 +165,14 @@ public class TestS3MultipartRequest {
     assertThat(modifiedRequest.getInitiateMultiPartUploadRequest()
         .getKeyArgs().getModificationTime()).isGreaterThan(0);
 
+    if (metadata != null) {
+      Map<String, String> modifiedKeyMetadata = KeyValueUtil.getFromProtobuf(
+          modifiedRequest.getInitiateMultiPartUploadRequest()
+              .getKeyArgs().getMetadataList());
+
+      assertThat(modifiedKeyMetadata).containsAllEntriesOf(metadata);
+    }
+
     return modifiedRequest;
   }
 
@@ -247,9 +273,24 @@ public class TestS3MultipartRequest {
    */
   protected OMRequest doPreExecuteInitiateMPUWithFSO(
       String volumeName, String bucketName, String keyName) throws Exception {
+    return doPreExecuteInitiateMPUWithFSO(volumeName, bucketName, keyName, 
Collections.emptyMap());
+  }
+
+  /**
+   * Perform preExecute of Initiate Multipart upload request for given
+   * volume, bucket and key name.
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @param metadata
+   * @return OMRequest - returned from preExecute.
+   */
+  protected OMRequest doPreExecuteInitiateMPUWithFSO(
+      String volumeName, String bucketName, String keyName,
+      Map<String, String> metadata) throws Exception {
     OMRequest omRequest =
         OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName,
-            keyName);
+            keyName, metadata);
 
     S3InitiateMultipartUploadRequestWithFSO
         s3InitiateMultipartUploadRequestWithFSO =
@@ -265,6 +306,13 @@ public class TestS3MultipartRequest {
         .getKeyArgs().getMultipartUploadID());
     assertThat(modifiedRequest.getInitiateMultiPartUploadRequest()
         .getKeyArgs().getModificationTime()).isGreaterThan(0);
+    if (metadata != null) {
+      Map<String, String> modifiedKeyMetadata = KeyValueUtil.getFromProtobuf(
+          modifiedRequest.getInitiateMultiPartUploadRequest()
+          .getKeyArgs().getMetadataList());
+
+      assertThat(modifiedKeyMetadata).containsAllEntriesOf(metadata);
+    }
 
     return modifiedRequest;
   }
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
index 34e32b0e18..663f2925cb 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
@@ -26,7 +26,9 @@ import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.UUID;
 
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -72,13 +74,21 @@ public class TestS3MultipartUploadCompleteRequest
     OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager, getBucketLayout());
 
+    Map<String, String> customMetadata = new HashMap<>();
+    customMetadata.put("custom-key1", "custom-value1");
+    customMetadata.put("custom-key2", "custom-value2");
+
     String uploadId = checkValidateAndUpdateCacheSuccess(
-        volumeName, bucketName, keyName);
+        volumeName, bucketName, keyName, customMetadata);
     checkDeleteTableCount(volumeName, bucketName, keyName, 0, uploadId);
 
+    customMetadata.remove("custom-key1");
+    customMetadata.remove("custom-key2");
+    customMetadata.put("custom-key3", "custom-value3");
+
     // Do it twice to test overwrite
     uploadId = checkValidateAndUpdateCacheSuccess(volumeName, bucketName,
-        keyName);
+        keyName, customMetadata);
     // After overwrite, one entry must be in delete table
     checkDeleteTableCount(volumeName, bucketName, keyName, 1, uploadId);
   }
@@ -106,10 +116,10 @@ public class TestS3MultipartUploadCompleteRequest
   }
 
   private String checkValidateAndUpdateCacheSuccess(String volumeName,
-      String bucketName, String keyName) throws Exception {
+      String bucketName, String keyName, Map<String, String> metadata) throws 
Exception {
 
     OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
-        bucketName, keyName);
+        bucketName, keyName, metadata);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
         getS3InitiateMultipartUploadReq(initiateMPURequest);
@@ -175,6 +185,9 @@ public class TestS3MultipartUploadCompleteRequest
     assertNotNull(multipartKeyInfo.getLatestVersionLocations());
     assertTrue(multipartKeyInfo.getLatestVersionLocations()
         .isMultipartKey());
+    if (metadata != null) {
+      
assertThat(multipartKeyInfo.getMetadata()).containsAllEntriesOf(metadata);
+    }
 
     OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable()
         .getCacheValue(new CacheKey<>(
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
index 5810c4ec2a..136e47c776 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.s3.endpoint;
 
 import javax.annotation.PostConstruct;
 import javax.inject.Inject;
+import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MultivaluedMap;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.container.ContainerRequestContext;
@@ -308,6 +309,20 @@ public abstract class EndpointBase implements Auditor {
         customMetadata.put(mapKey, value);
       }
     }
+
+    // If the request contains a custom metadata header "x-amz-meta-ETag",
+    // replace the metadata key to "etag-custom" to prevent key metadata 
collision with
+    // the ETag calculated by hashing the object when storing the key in OM 
table.
+    // The custom ETag metadata header will be rebuilt during the headObject 
operation.
+    if (customMetadata.containsKey(HttpHeaders.ETAG)
+        || customMetadata.containsKey(HttpHeaders.ETAG.toLowerCase())) {
+      String customETag = customMetadata.get(HttpHeaders.ETAG) != null ?
+          customMetadata.get(HttpHeaders.ETAG) : 
customMetadata.get(HttpHeaders.ETAG.toLowerCase());
+      customMetadata.remove(HttpHeaders.ETAG);
+      customMetadata.remove(HttpHeaders.ETAG.toLowerCase());
+      customMetadata.put(ETAG_CUSTOM, customETag);
+    }
+
     return customMetadata;
   }
 
@@ -321,6 +336,7 @@ public abstract class EndpointBase implements Auditor {
       }
       String metadataKey = entry.getKey();
       if (metadataKey.equals(ETAG_CUSTOM)) {
+        // Rebuild the ETag custom metadata header
         metadataKey = ETAG.toLowerCase();
       }
       responseBuilder
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 68daf80cfb..1ca17d5428 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -291,14 +291,6 @@ public class ObjectEndpoint extends EndpointBase {
       // Normal put object
       Map<String, String> customMetadata =
           getCustomMetadataFromHeaders(headers.getRequestHeaders());
-      if (customMetadata.containsKey(ETAG)
-          || customMetadata.containsKey(ETAG.toLowerCase())) {
-        String customETag = customMetadata.get(ETAG) != null ?
-            customMetadata.get(ETAG) : customMetadata.get(ETAG.toLowerCase());
-        customMetadata.remove(ETAG);
-        customMetadata.remove(ETAG.toLowerCase());
-        customMetadata.put(ETAG_CUSTOM, customETag);
-      }
 
       if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
           .equals(headers.getHeaderString("x-amz-content-sha256"))) {
@@ -756,11 +748,14 @@ public class ObjectEndpoint extends EndpointBase {
       OzoneBucket ozoneBucket = getBucket(bucket);
       String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
 
+      Map<String, String> customMetadata =
+          getCustomMetadataFromHeaders(headers.getRequestHeaders());
+
       ReplicationConfig replicationConfig =
           getReplicationConfig(ozoneBucket, storageType);
 
       OmMultipartInfo multipartInfo =
-          ozoneBucket.initiateMultipartUpload(key, replicationConfig);
+          ozoneBucket.initiateMultipartUpload(key, replicationConfig, 
customMetadata);
 
       MultipartUploadInitiateResponse multipartUploadInitiateResponse = new
           MultipartUploadInitiateResponse();
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
index 0400bc6050..bc562d5d93 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.security.token.Token;
 
 import java.io.IOException;
 import java.net.URI;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
@@ -309,8 +310,16 @@ public class ClientProtocolStub implements ClientProtocol {
   public OmMultipartInfo initiateMultipartUpload(String volumeName,
          String bucketName, String keyName, ReplicationConfig 
replicationConfig)
       throws IOException {
+    return initiateMultipartUpload(volumeName, bucketName, keyName, 
replicationConfig, Collections.emptyMap());
+  }
+
+  @Override
+  public OmMultipartInfo initiateMultipartUpload(String volumeName,
+         String bucketName, String keyName, ReplicationConfig 
replicationConfig,
+         Map<String, String> metadata)
+      throws IOException {
     return getBucket(volumeName, bucketName)
-        .initiateMultipartUpload(keyName, replicationConfig);
+        .initiateMultipartUpload(keyName, replicationConfig, metadata);
   }
 
   @Override
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
index d272360fc3..b5f37aaef3 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
@@ -26,6 +26,7 @@ import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -72,7 +73,7 @@ public final class OzoneBucketStub extends OzoneBucket {
 
   private Map<String, byte[]> keyContents = new HashMap<>();
 
-  private Map<String, String> multipartUploadIdMap = new HashMap<>();
+  private Map<String, MultipartInfoStub> keyToMultipartUpload = new 
HashMap<>();
 
   private Map<String, Map<Integer, Part>> partList = new HashMap<>();
 
@@ -210,8 +211,8 @@ public final class OzoneBucketStub extends OzoneBucket {
                                                         int partNumber,
                                                         String uploadID)
       throws IOException {
-    String multipartUploadID = multipartUploadIdMap.get(key);
-    if (multipartUploadID == null || !multipartUploadID.equals(uploadID)) {
+    MultipartInfoStub multipartInfo = keyToMultipartUpload.get(key);
+    if (multipartInfo == null || 
!multipartInfo.getUploadId().equals(uploadID)) {
       throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
     } else {
       ByteBufferStreamOutput byteBufferStreamOutput =
@@ -275,6 +276,7 @@ public final class OzoneBucketStub extends OzoneBucket {
           ozoneKeyDetails.getCreationTime().toEpochMilli(),
           ozoneKeyDetails.getModificationTime().toEpochMilli(),
           ozoneKeyDetails.getReplicationConfig(),
+          ozoneKeyDetails.getMetadata(),
           ozoneKeyDetails.isFile());
     } else {
       throw new OMException(ResultCodes.KEY_NOT_FOUND);
@@ -358,16 +360,22 @@ public final class OzoneBucketStub extends OzoneBucket {
                                                  ReplicationType type,
                                                  ReplicationFactor factor)
       throws IOException {
-    String uploadID = UUID.randomUUID().toString();
-    multipartUploadIdMap.put(keyName, uploadID);
-    return new OmMultipartInfo(getVolumeName(), getName(), keyName, uploadID);
+    return initiateMultipartUpload(keyName, 
ReplicationConfig.fromTypeAndFactor(type, factor), 
+        Collections.emptyMap());
   }
 
   @Override
   public OmMultipartInfo initiateMultipartUpload(String keyName,
       ReplicationConfig repConfig) throws IOException {
+    return initiateMultipartUpload(keyName, repConfig, Collections.emptyMap());
+  }
+
+  @Override
+  public OmMultipartInfo initiateMultipartUpload(String keyName,
+       ReplicationConfig config, Map<String, String> metadata)
+      throws IOException {
     String uploadID = UUID.randomUUID().toString();
-    multipartUploadIdMap.put(keyName, uploadID);
+    keyToMultipartUpload.put(keyName, new MultipartInfoStub(uploadID, 
metadata));
     return new OmMultipartInfo(getVolumeName(), getName(), keyName, uploadID);
   }
 
@@ -375,8 +383,8 @@ public final class OzoneBucketStub extends OzoneBucket {
   public OzoneOutputStream createMultipartKey(String key, long size,
                                               int partNumber, String uploadID)
       throws IOException {
-    String multipartUploadID = multipartUploadIdMap.get(key);
-    if (multipartUploadID == null || !multipartUploadID.equals(uploadID)) {
+    MultipartInfoStub multipartInfo = keyToMultipartUpload.get(key);
+    if (multipartInfo == null || 
!multipartInfo.getUploadId().equals(uploadID)) {
       throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
     } else {
       ByteArrayOutputStream byteArrayOutputStream =
@@ -402,13 +410,11 @@ public final class OzoneBucketStub extends OzoneBucket {
   public OmMultipartUploadCompleteInfo completeMultipartUpload(String key,
       String uploadID, Map<Integer, String> partsMap) throws IOException {
 
-    if (multipartUploadIdMap.get(key) == null) {
+    if (keyToMultipartUpload.get(key) == null) {
       throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
     } else {
       final Map<Integer, Part> partsList = partList.get(key);
 
-      int count = 1;
-
       ByteArrayOutputStream output = new ByteArrayOutputStream();
 
       int prevPartNumber = 0;
@@ -429,6 +435,18 @@ public final class OzoneBucketStub extends OzoneBucket {
         }
         keyContents.put(key, output.toByteArray());
       }
+      
+      keyDetails.put(key, new OzoneKeyDetails(
+          getVolumeName(),
+          getName(),
+          key,
+          keyContents.get(key) != null ? keyContents.get(key).length : 0,
+          System.currentTimeMillis(),
+          System.currentTimeMillis(),
+          new ArrayList<>(), getReplicationConfig(),
+          keyToMultipartUpload.get(key).getMetadata(), null,
+          () -> readKey(key), true
+      ));
     }
 
     return new OmMultipartUploadCompleteInfo(getVolumeName(), getName(), key,
@@ -438,17 +456,17 @@ public final class OzoneBucketStub extends OzoneBucket {
   @Override
   public void abortMultipartUpload(String keyName, String uploadID) throws
       IOException {
-    if (multipartUploadIdMap.get(keyName) == null) {
+    if (keyToMultipartUpload.get(keyName) == null) {
       throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
     } else {
-      multipartUploadIdMap.remove(keyName);
+      keyToMultipartUpload.remove(keyName);
     }
   }
 
   @Override
   public OzoneMultipartUploadPartListParts listParts(String key,
       String uploadID, int partNumberMarker, int maxParts) throws IOException {
-    if (multipartUploadIdMap.get(key) == null) {
+    if (keyToMultipartUpload.get(key) == null) {
       throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
     }
     List<PartInfo> partInfoList = new ArrayList<>();
@@ -642,4 +660,26 @@ public final class OzoneBucketStub extends OzoneBucket {
     }
   }
 
+  /**
+   * Multipart upload stub to store MPU related information.
+   */
+  private static class MultipartInfoStub {
+    
+    private final String uploadId;
+    private final Map<String, String> metadata;
+
+    MultipartInfoStub(String uploadId, Map<String, String> metadata) {
+      this.uploadId = uploadId;
+      this.metadata = metadata;
+    }
+
+    public String getUploadId() {
+      return uploadId;
+    }
+
+    public Map<String, String> getMetadata() {
+      return metadata;
+    }
+  }
+
 }
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
index 3c0c87a177..b23dbfb9c0 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
@@ -30,16 +30,22 @@ import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
 import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MultivaluedHashMap;
+import javax.ws.rs.core.MultivaluedMap;
 import javax.ws.rs.core.Response;
 
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.UUID;
 import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
+import static 
org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -54,6 +60,7 @@ import static org.mockito.Mockito.when;
 public class TestMultipartUploadComplete {
 
   private static final ObjectEndpoint REST = new ObjectEndpoint();
+  private static final HttpHeaders HEADERS = mock(HttpHeaders.class);
   private static final OzoneClient CLIENT = new OzoneClientStub();
 
   @BeforeAll
@@ -61,18 +68,30 @@ public class TestMultipartUploadComplete {
 
     CLIENT.getObjectStore().createS3Bucket(OzoneConsts.S3_BUCKET);
 
-
-    HttpHeaders headers = mock(HttpHeaders.class);
-    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
+    when(HEADERS.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
         "STANDARD");
 
-    REST.setHeaders(headers);
+    REST.setHeaders(HEADERS);
     REST.setClient(CLIENT);
     REST.setOzoneConfiguration(new OzoneConfiguration());
   }
 
   private String initiateMultipartUpload(String key) throws IOException,
       OS3Exception {
+    return initiateMultipartUpload(key, Collections.emptyMap());
+  }
+
+  private String initiateMultipartUpload(String key, Map<String, String> 
metadata) throws IOException,
+      OS3Exception {
+    MultivaluedMap<String, String> metadataHeaders = new 
MultivaluedHashMap<>();
+
+    for (Map.Entry<String, String> entry : metadata.entrySet()) {
+      metadataHeaders.computeIfAbsent(CUSTOM_METADATA_HEADER_PREFIX + 
entry.getKey(), k -> new ArrayList<>())
+          .add(entry.getValue());
+    }
+
+    when(HEADERS.getRequestHeaders()).thenReturn(metadataHeaders);
+
     Response response = REST.initializeMultipartUpload(OzoneConsts.S3_BUCKET,
         key);
     MultipartUploadInitiateResponse multipartUploadInitiateResponse =
@@ -83,7 +102,6 @@ public class TestMultipartUploadComplete {
     assertEquals(200, response.getStatus());
 
     return uploadID;
-
   }
 
   private Part uploadPart(String key, String uploadID, int partNumber, String
@@ -152,6 +170,37 @@ public class TestMultipartUploadComplete {
 
   }
 
+  @Test
+  public void testMultipartWithCustomMetadata() throws Exception {
+    String key = UUID.randomUUID().toString();
+
+    Map<String, String> customMetadata = new HashMap<>();
+    customMetadata.put("custom-key1", "custom-value1");
+    customMetadata.put("custom-key2", "custom-value2");
+
+    String uploadID = initiateMultipartUpload(key, customMetadata);
+
+    List<Part> partsList = new ArrayList<>();
+
+    // Upload parts
+    String content = "Multipart Upload 1";
+    int partNumber = 1;
+
+    Part part1 = uploadPart(key, uploadID, partNumber, content);
+    partsList.add(part1);
+
+    CompleteMultipartUploadRequest completeMultipartUploadRequest = new
+        CompleteMultipartUploadRequest();
+    completeMultipartUploadRequest.setPartList(partsList);
+
+    completeMultipartUpload(key, completeMultipartUploadRequest, uploadID);
+
+    Response headResponse = REST.head(OzoneConsts.S3_BUCKET, key);
+
+    assertEquals("custom-value1", 
headResponse.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + "custom-key1"));
+    assertEquals("custom-value2", 
headResponse.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + "custom-key2"));
+  }
+
 
   @Test
   public void testMultipartInvalidPartOrderError() throws Exception {
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
index d891573d5f..ec262cdf21 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
@@ -45,6 +45,7 @@ import static java.net.HttpURLConnection.HTTP_FORBIDDEN;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyBoolean;
 import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.anyMap;
 import static org.mockito.Mockito.anyString;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.isNull;
@@ -278,7 +279,7 @@ public class TestPermissionCheck {
   @Test
   public void testMultiUploadKey() throws IOException {
     when(objectStore.getS3Bucket(anyString())).thenReturn(bucket);
-    doThrow(exception).when(bucket).initiateMultipartUpload(anyString(), 
any());
+    doThrow(exception).when(bucket).initiateMultipartUpload(anyString(), 
any(), anyMap());
     ObjectEndpoint objectEndpoint = new ObjectEndpoint();
     objectEndpoint.setClient(client);
     objectEndpoint.setHeaders(headers);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to