This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 7370676dcf HDDS-9680. Use md5 hash of multipart object part's content 
as ETag (#5668)
7370676dcf is described below

commit 7370676dcf476a01094f7efa97aba98780a5073f
Author: Slava Tutrinov <[email protected]>
AuthorDate: Tue Feb 13 12:23:07 2024 +0300

    HDDS-9680. Use md5 hash of multipart object part's content as ETag (#5668)
---
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   5 +
 .../docs/content/feature/S3-Tenant-Commands.md     |   2 +-
 .../client/OzoneMultipartUploadPartListParts.java  |   9 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   3 +-
 .../helpers/OmMultipartCommitUploadPartInfo.java   |  11 +-
 .../om/helpers/OmMultipartUploadCompleteList.java  |   5 +-
 .../om/helpers/OmMultipartUploadListParts.java     |   3 +-
 .../apache/hadoop/ozone/om/helpers/OmPartInfo.java |  11 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   3 +-
 .../src/main/smoketest/s3/MultipartUpload.robot    |  33 ++--
 .../fs/ozone/TestOzoneFSWithObjectStoreCreate.java |   9 +-
 .../hadoop/ozone/TestMultipartObjectGet.java       |   4 +-
 .../client/rpc/TestOzoneAtRestEncryption.java      |  18 ++-
 .../rpc/TestOzoneClientMultipartUploadWithFSO.java | 173 +++++++++++++--------
 .../client/rpc/TestOzoneRpcClientAbstract.java     | 128 +++++++++------
 .../client/rpc/TestOzoneRpcClientWithRatis.java    |  10 +-
 .../ozone/om/TestObjectStoreWithLegacyFS.java      |  11 +-
 .../om/TestOzoneManagerHAWithStoppedNodes.java     |  10 +-
 .../src/main/proto/OmClientProtocol.proto          |   5 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |   6 +-
 .../S3MultipartUploadCommitPartRequest.java        |  10 +-
 .../S3MultipartUploadCompleteRequest.java          |  85 ++++++++--
 .../ozone/om/request/OMRequestTestUtils.java       |  38 ++++-
 .../TestS3MultipartUploadCompleteRequest.java      |  16 +-
 .../s3/multipart/TestS3MultipartResponse.java      |   2 +-
 .../service/TestMultipartUploadCleanupService.java |   4 +
 .../om/service/TestOpenKeyCleanupService.java      |   4 +
 .../hadoop/ozone/s3/commontypes/KeyMetadata.java   |   4 +-
 .../endpoint/CompleteMultipartUploadRequest.java   |  10 +-
 .../endpoint/CompleteMultipartUploadResponse.java  |   3 +-
 .../ozone/s3/endpoint/CopyObjectResponse.java      |   3 +-
 .../hadoop/ozone/s3/endpoint/CopyPartResult.java   |   3 +-
 .../hadoop/ozone/s3/endpoint/EndpointBase.java     |   3 +-
 .../ozone/s3/endpoint/ListPartsResponse.java       |   3 +-
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  13 +-
 .../ozone/s3/endpoint/ObjectEndpointStreaming.java |  21 +--
 .../hadoop/ozone/client/OzoneBucketStub.java       |  32 +++-
 .../ozone/client/OzoneDataStreamOutputStub.java    |   4 +-
 .../hadoop/ozone/client/OzoneOutputStreamStub.java |   5 +-
 ...CompleteMultipartUploadRequestUnmarshaller.java |   4 +-
 .../hadoop/ozone/s3/endpoint/TestListParts.java    |   6 +-
 .../s3/endpoint/TestMultipartUploadComplete.java   |   6 +-
 .../s3/endpoint/TestMultipartUploadWithCopy.java   |  13 +-
 .../hadoop/ozone/s3/endpoint/TestPartUpload.java   |  10 +-
 .../s3/endpoint/TestPartUploadWithStream.java      |  11 +-
 45 files changed, 527 insertions(+), 245 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 653af11ce8..f3c08b252b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -587,4 +587,9 @@ public final class OzoneConsts {
    */
   public static final String COMPACTION_LOG_TABLE =
       "compactionLogTable";
+
+  /**
+   * S3G multipart upload request's ETag header key.
+   */
+  public static final String ETAG = "ETag";
 }
diff --git a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md 
b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md
index f9ea5f6084..23c0155150 100644
--- a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md
+++ b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md
@@ -432,7 +432,7 @@ bash-4.2$ aws s3api --endpoint-url http://s3g:9878 
list-objects --bucket bucket-
         {
             "Key": "file1",
             "LastModified": "2022-02-16T00:10:00.000Z",
-            "ETag": "2022-02-16T00:10:00.000Z",
+            "ETag": "e99f93dedfe22e9a133dc3c634f14634",
             "Size": 3811,
             "StorageClass": "STANDARD"
         }
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java
index c1902cdb60..67f8edf314 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java
@@ -104,12 +104,15 @@ public class OzoneMultipartUploadPartListParts {
     private String partName;
     private long modificationTime;
     private long size;
+    private String eTag;
 
-    public PartInfo(int number, String name, long time, long size) {
+    public PartInfo(int number, String name, long time, long size,
+                    String eTag) {
       this.partNumber = number;
       this.partName = name;
       this.modificationTime = time;
       this.size = size;
+      this.eTag = eTag;
     }
 
     public int getPartNumber() {
@@ -127,5 +130,9 @@ public class OzoneMultipartUploadPartListParts {
     public long getSize() {
       return size;
     }
+
+    public String getETag() {
+      return eTag;
+    }
   }
 }
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index b3d853be6b..94d6ae9769 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1952,7 +1952,8 @@ public class RpcClient implements ClientProtocol {
       ozoneMultipartUploadPartListParts.addPart(
           new OzoneMultipartUploadPartListParts.PartInfo(
               omPartInfo.getPartNumber(), omPartInfo.getPartName(),
-              omPartInfo.getModificationTime(), omPartInfo.getSize()));
+              omPartInfo.getModificationTime(), omPartInfo.getSize(),
+              omPartInfo.getETag()));
     }
     return ozoneMultipartUploadPartListParts;
 
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java
index 646cb421e4..bbf1a1bdae 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java
@@ -24,8 +24,15 @@ public class OmMultipartCommitUploadPartInfo {
 
   private final String partName;
 
-  public OmMultipartCommitUploadPartInfo(String name) {
-    this.partName = name;
+  private final String eTag;
+
+  public OmMultipartCommitUploadPartInfo(String partName, String eTag) {
+    this.partName = partName;
+    this.eTag = eTag;
+  }
+
+  public String getETag() {
+    return eTag;
   }
 
   public String getPartName() {
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java
index 63e6353c18..ff39661d01 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java
@@ -56,8 +56,9 @@ public class OmMultipartUploadCompleteList {
    */
   public List<Part> getPartsList() {
     List<Part> partList = new ArrayList<>();
-    multipartMap.forEach((partNumber, partName) -> partList.add(Part
-        
.newBuilder().setPartName(partName).setPartNumber(partNumber).build()));
+    multipartMap.forEach((partNumber, eTag) -> partList.add(Part
+        // set partName equal to eTag for back compatibility (partName is a 
required property)
+        
.newBuilder().setPartName(eTag).setETag(eTag).setPartNumber(partNumber).build()));
     return partList;
   }
 }
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java
index fbf519c226..0ba0e26acd 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java
@@ -79,6 +79,7 @@ public class OmMultipartUploadListParts {
   public void addProtoPartList(List<PartInfo> partInfos) {
     partInfos.forEach(partInfo -> partInfoList.add(new OmPartInfo(
         partInfo.getPartNumber(), partInfo.getPartName(),
-        partInfo.getModificationTime(), partInfo.getSize())));
+        partInfo.getModificationTime(), partInfo.getSize(),
+        partInfo.getETag())));
   }
 }
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java
index 2d753a5caa..e908c5a025 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java
@@ -28,12 +28,15 @@ public class OmPartInfo {
   private String partName;
   private long modificationTime;
   private long size;
+  private String eTag;
 
-  public OmPartInfo(int number, String name, long time, long size) {
+  public OmPartInfo(int number, String name, long time, long size,
+                    String eTag) {
     this.partNumber = number;
     this.partName = name;
     this.modificationTime = time;
     this.size = size;
+    this.eTag = eTag;
   }
 
   public int getPartNumber() {
@@ -52,9 +55,13 @@ public class OmPartInfo {
     return size;
   }
 
+  public String getETag() {
+    return eTag;
+  }
+
   public PartInfo getProto() {
     return 
PartInfo.newBuilder().setPartNumber(partNumber).setPartName(partName)
        .setModificationTime(modificationTime)
-       .setSize(size).build();
+       .setSize(size).setETag(eTag).build();
   }
 }
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 67d7987326..5864102758 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -1632,7 +1632,8 @@ public final class 
OzoneManagerProtocolClientSideTranslatorPB
         .getCommitMultiPartUploadResponse();
 
     OmMultipartCommitUploadPartInfo info = new
-        OmMultipartCommitUploadPartInfo(response.getPartName());
+        OmMultipartCommitUploadPartInfo(response.getPartName(),
+          response.getETag());
     return info;
   }
 
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot 
b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
index 04cce8fefc..3a6ae0e45d 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
@@ -94,21 +94,28 @@ Test Multipart Upload Complete
                         Should contain          ${result}    UploadId
 
 #upload parts
-    Run Keyword         Create Random file      5
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} 
--key ${PREFIX}/multipartKey1 --part-number 1 --body /tmp/part1 --upload-id 
${uploadID}
-    ${eTag1} =          Execute and checkrc     echo '${result}' | jq -r 
'.ETag'   0
-                        Should contain          ${result}    ETag
-
-                        Execute                 echo "Part2" > /tmp/part2
-    ${result} =         Execute AWSS3APICli     upload-part --bucket ${BUCKET} 
--key ${PREFIX}/multipartKey1 --part-number 2 --body /tmp/part2 --upload-id 
${uploadID}
-    ${eTag2} =          Execute and checkrc     echo '${result}' | jq -r 
'.ETag'   0
-                        Should contain          ${result}    ETag
+    Run Keyword         Create Random file            5
+    ${result} =         Execute AWSS3APICli           upload-part --bucket 
${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 1 --body /tmp/part1 
--upload-id ${uploadID}
+    ${eTag1} =          Execute and checkrc           echo '${result}' | jq -r 
'.ETag'   0
+                        Should contain                ${result}    ETag
+    ${part1Md5Sum} =    Execute                       md5sum /tmp/part1 | awk 
'{print $1}'
+                        Should Be Equal As Strings    ${eTag1}  ${part1Md5Sum}
+
+                        Execute                       echo "Part2" > /tmp/part2
+    ${result} =         Execute AWSS3APICli           upload-part --bucket 
${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 --body /tmp/part2 
--upload-id ${uploadID}
+    ${eTag2} =          Execute and checkrc           echo '${result}' | jq -r 
'.ETag'   0
+                        Should contain                ${result}    ETag
+    ${part2Md5Sum} =    Execute                       md5sum /tmp/part2 | awk 
'{print $1}'
+                        Should Be Equal As Strings    ${eTag2}  ${part2Md5Sum}
 
 #complete multipart upload
-    ${result} =         Execute AWSS3APICli     complete-multipart-upload 
--upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 
--multipart-upload 
'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]'
-                        Should contain          ${result}    ${BUCKET}
-                        Should contain          ${result}    
${PREFIX}/multipartKey1
-                        Should contain          ${result}    ETag
+    ${result} =                 Execute AWSS3APICli           
complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key 
${PREFIX}/multipartKey1 --multipart-upload 
'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]'
+                                Should contain                ${result}    
${BUCKET}
+                                Should contain                ${result}    
${PREFIX}/multipartKey1
+    ${resultETag} =             Execute and checkrc           echo '${result}' 
| jq -r '.ETag'   0
+    ${expectedResultETag} =     Execute                       echo -n 
${eTag1}${eTag2} | md5sum | awk '{print $1}'
+                                Should contain                ${result}    ETag
+                                Should Be Equal As Strings    ${resultETag}    
 "${expectedResultETag}-2"
 
 #read file and check the key
     ${result} =                 Execute AWSS3ApiCli        get-object --bucket 
${BUCKET} --key ${PREFIX}/multipartKey1 /tmp/${PREFIX}-multipartKey1.result
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
index 6dccd60420..a41dcd80ac 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.fs.ozone;
 
+import javax.xml.bind.DatatypeConverter;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -48,6 +49,7 @@ import org.junit.jupiter.api.Timeout;
 
 import java.io.FileNotFoundException;
 import java.net.URI;
+import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -57,6 +59,8 @@ import java.util.List;
 import java.util.Map;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
+import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
 import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.assertj.core.api.Assertions.assertThat;
@@ -293,10 +297,13 @@ public class TestOzoneFSWithObjectStoreCreate {
 
     // This should succeed, as we check during creation of part or during
     // complete MPU.
+    ozoneOutputStream.getMetadata().put(ETAG,
+        DatatypeConverter.printHexBinary(MessageDigest.getInstance(MD5_HASH)
+            .digest(b)).toLowerCase());
     ozoneOutputStream.close();
 
     Map<Integer, String> partsMap = new HashMap<>();
-    partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName());
+    partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag());
 
     // Should fail, as we have directory with same name.
     OMException ex = assertThrows(OMException.class, () -> 
ozoneBucket.completeMultipartUpload(keyName,
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
index 852f351ee2..cb49f3b320 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
@@ -141,11 +141,11 @@ public class TestMultipartObjectGet {
     Response response = REST.put(BUCKET, KEY, content.length(),
         partNumber, uploadID, body);
     assertEquals(200, response.getStatus());
-    assertNotNull(response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
 
     CompleteMultipartUploadRequest.Part
         part = new CompleteMultipartUploadRequest.Part();
-    part.seteTag(response.getHeaderString("ETag"));
+    part.setETag(response.getHeaderString(OzoneConsts.ETAG));
     part.setPartNumber(partNumber);
     return part;
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
index 919654d82a..0b0149b4d9 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.net.URI;
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
+import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.time.Instant;
 import java.util.ArrayList;
@@ -34,6 +35,7 @@ import java.util.TreeMap;
 import java.util.UUID;
 
 import com.google.common.cache.Cache;
+import javax.xml.bind.DatatypeConverter;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
@@ -120,6 +122,7 @@ class TestOzoneAtRestEncryption {
   private static final int DEFAULT_CRYPTO_BUFFER_SIZE = 8 * 1024; // 8KB
   // (this is the default Crypto Buffer size as determined by the config
   // hadoop.security.crypto.buffer.size)
+  private static MessageDigest eTagProvider;
 
   @BeforeAll
   static void init() throws Exception {
@@ -169,6 +172,7 @@ class TestOzoneAtRestEncryption {
 
     // create test key
     createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf);
+    eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH);
   }
 
   @AfterAll
@@ -631,14 +635,17 @@ class TestOzoneAtRestEncryption {
 
     ByteBuffer dataBuffer = ByteBuffer.wrap(data);
     multipartStreamKey.write(dataBuffer, 0, length);
+    multipartStreamKey.getMetadata().put(OzoneConsts.ETAG,
+        DatatypeConverter.printHexBinary(eTagProvider.digest(data))
+            .toLowerCase());
     multipartStreamKey.close();
 
     OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
         multipartStreamKey.getCommitUploadPartInfo();
 
     assertNotNull(omMultipartCommitUploadPartInfo);
-    assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
-    return omMultipartCommitUploadPartInfo.getPartName();
+    assertNotNull(omMultipartCommitUploadPartInfo.getETag());
+    return omMultipartCommitUploadPartInfo.getETag();
   }
 
   private String uploadPart(OzoneBucket bucket, String keyName,
@@ -646,14 +653,17 @@ class TestOzoneAtRestEncryption {
     OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
         data.length, partNumber, uploadID);
     ozoneOutputStream.write(data, 0, data.length);
+    ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG,
+        DatatypeConverter.printHexBinary(eTagProvider.digest(data))
+            .toLowerCase());
     ozoneOutputStream.close();
 
     OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
         ozoneOutputStream.getCommitUploadPartInfo();
 
     assertNotNull(omMultipartCommitUploadPartInfo);
-    assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
-    return omMultipartCommitUploadPartInfo.getPartName();
+    assertNotNull(omMultipartCommitUploadPartInfo.getETag());
+    return omMultipartCommitUploadPartInfo.getETag();
   }
 
   private void completeMultipartUpload(OzoneBucket bucket, String keyName,
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index 268a192640..1e75a4d10a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -17,8 +17,14 @@
 
 package org.apache.hadoop.ozone.client.rpc;
 
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
 import java.util.HashMap;
+
+import javax.xml.bind.DatatypeConverter;
+import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.lang3.RandomUtils;
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -98,6 +104,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
   private static ObjectStore store = null;
   private static MiniOzoneCluster cluster = null;
   private static OzoneClient ozClient = null;
+  private static MessageDigest eTagProvider;
 
   private String volumeName;
   private String bucketName;
@@ -118,6 +125,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
     conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
     OMRequestTestUtils.configureFSOptimizedPaths(conf, true);
     startCluster(conf);
+    eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH);
   }
 
   /**
@@ -188,6 +196,9 @@ public class TestOzoneClientMultipartUploadWithFSO {
 
     assertNotNull(multipartInfo);
     String uploadID = multipartInfo.getUploadID();
+    assertEquals(volumeName, multipartInfo.getVolumeName());
+    assertEquals(bucketName, multipartInfo.getBucketName());
+    assertEquals(keyName, multipartInfo.getKeyName());
     assertNotNull(multipartInfo.getUploadID());
 
     // Call initiate multipart upload for the same key again, this should
@@ -195,6 +206,9 @@ public class TestOzoneClientMultipartUploadWithFSO {
     multipartInfo = bucket.initiateMultipartUpload(keyName);
 
     assertNotNull(multipartInfo);
+    assertEquals(volumeName, multipartInfo.getVolumeName());
+    assertEquals(bucketName, multipartInfo.getBucketName());
+    assertEquals(keyName, multipartInfo.getKeyName());
     assertNotEquals(multipartInfo.getUploadID(), uploadID);
     assertNotNull(multipartInfo.getUploadID());
   }
@@ -208,13 +222,14 @@ public class TestOzoneClientMultipartUploadWithFSO {
     OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
         sampleData.length(), 1, uploadID);
     ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
+    ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, 
DigestUtils.md5Hex(sampleData));
     ozoneOutputStream.close();
 
     OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
         .getCommitUploadPartInfo();
 
     assertNotNull(commitUploadPartInfo);
-    assertNotNull(commitUploadPartInfo.getPartName());
+    assertNotNull(commitUploadPartInfo.getETag());
   }
 
   @Test
@@ -224,12 +239,12 @@ public class TestOzoneClientMultipartUploadWithFSO {
         ReplicationType.RATIS, THREE);
 
     int partNumber = 1;
-    String partName = uploadPart(bucket, keyName, uploadID, partNumber,
-        sampleData.getBytes(UTF_8));
+    Pair<String, String> partNameAndETag = uploadPart(bucket, keyName, 
uploadID,
+        partNumber, sampleData.getBytes(UTF_8));
 
     //Overwrite the part by creating part key with same part number.
-    String partNameNew = uploadPart(bucket, keyName, uploadID, partNumber,
-        "name".getBytes(UTF_8));
+    Pair<String, String> partNameAndETagNew = uploadPart(bucket, keyName,
+        uploadID, partNumber, "name".getBytes(UTF_8));
 
     // PartName should be same from old part Name.
     // AWS S3 for same content generates same partName during upload part.
@@ -239,7 +254,10 @@ public class TestOzoneClientMultipartUploadWithFSO {
     // So, when a part is override partNames will still be same irrespective
     // of content in ozone s3. This will make S3 Mpu completeMPU pass when
     // comparing part names and large file uploads work using aws cp.
-    assertEquals(partName, partNameNew, "Part names should be same");
+    assertEquals(partNameAndETag.getKey(), partNameAndETagNew.getKey());
+
+    // ETags are not equal due to content differences
+    assertNotEquals(partNameAndETag.getValue(), partNameAndETagNew.getValue());
 
     // old part bytes written needs discard and have only
     // new part bytes in quota for this bucket
@@ -249,7 +267,8 @@ public class TestOzoneClientMultipartUploadWithFSO {
   }
 
   @Test
-  public void testUploadTwiceWithEC() throws IOException {
+  public void testUploadTwiceWithEC()
+      throws IOException, NoSuchAlgorithmException {
     bucketName = UUID.randomUUID().toString();
     bucket = getOzoneECBucket(bucketName);
 
@@ -260,12 +279,12 @@ public class TestOzoneClientMultipartUploadWithFSO {
     String uploadID = multipartInfo.getUploadID();
     int partNumber = 1;
 
-    String partName = uploadPart(bucket, keyName, uploadID, partNumber,
-        data);
-
-    Map<Integer, String> partsMap = new HashMap<>();
-    partsMap.put(partNumber, partName);
-    bucket.completeMultipartUpload(keyName, uploadID, partsMap);
+    Pair<String, String> partNameAndETag = uploadPart(bucket, keyName, 
uploadID,
+        partNumber, data);
+    
+    Map<Integer, String> eTagsMap = new HashMap<>();
+    eTagsMap.put(partNumber, partNameAndETag.getValue());
+    bucket.completeMultipartUpload(keyName, uploadID, eTagsMap);
 
     long replicatedSize = QuotaUtil.getReplicatedSize(data.length,
         bucket.getReplicationConfig());
@@ -276,12 +295,12 @@ public class TestOzoneClientMultipartUploadWithFSO {
     multipartInfo = bucket.initiateMultipartUpload(keyName);
     uploadID = multipartInfo.getUploadID();
 
-    partName = uploadPart(bucket, keyName, uploadID, partNumber,
+    partNameAndETag = uploadPart(bucket, keyName, uploadID, partNumber,
         data);
 
-    partsMap = new HashMap<>();
-    partsMap.put(partNumber, partName);
-    bucket.completeMultipartUpload(keyName, uploadID, partsMap);
+    eTagsMap = new HashMap<>();
+    eTagsMap.put(partNumber, partNameAndETag.getValue());
+    bucket.completeMultipartUpload(keyName, uploadID, eTagsMap);
 
     // used sized should remain same, overwrite previous upload
     assertEquals(volume.getBucket(bucketName).getUsedBytes(),
@@ -289,7 +308,8 @@ public class TestOzoneClientMultipartUploadWithFSO {
   }
 
   @Test
-  public void testUploadAbortWithEC() throws IOException {
+  public void testUploadAbortWithEC()
+      throws IOException, NoSuchAlgorithmException {
     byte[] data = generateData(81920, (byte) 97);
 
     bucketName = UUID.randomUUID().toString();
@@ -332,19 +352,19 @@ public class TestOzoneClientMultipartUploadWithFSO {
         ONE);
 
     // Upload Parts
-    Map<Integer, String> partsMap = new TreeMap<>();
+    Map<Integer, String> eTagsMap = new TreeMap<>();
     // Uploading part 1 with less than min size
-    String partName = uploadPart(bucket, keyName, uploadID, 1,
-        "data".getBytes(UTF_8));
-    partsMap.put(1, partName);
+    Pair<String, String> partNameAndETag = uploadPart(bucket, keyName, 
uploadID,
+        1, "data".getBytes(UTF_8));
+    eTagsMap.put(1, partNameAndETag.getValue());
 
-    partName = uploadPart(bucket, keyName, uploadID, 2,
-        "data".getBytes(UTF_8));
-    partsMap.put(2, partName);
+    partNameAndETag = uploadPart(bucket, keyName, uploadID, 2,
+            "data".getBytes(UTF_8));
+    eTagsMap.put(2, partNameAndETag.getValue());
 
     // Complete multipart upload
     OzoneTestUtils.expectOmException(OMException.ResultCodes.ENTITY_TOO_SMALL,
-        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+        () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap));
   }
 
   @Test
@@ -355,22 +375,24 @@ public class TestOzoneClientMultipartUploadWithFSO {
     byte[] data = generateData(10000000, (byte) 97);
 
     // Upload Parts
-    Map<Integer, String> partsMap = new TreeMap<>();
+    Map<Integer, String> eTagsMap = new TreeMap<>();
 
-    // Upload part 1 and add it to the partsMap for completing the upload.
-    String partName1 = uploadPart(bucket, keyName, uploadID, 1, data);
-    partsMap.put(1, partName1);
+    // Upload part 1 and add it to the eTagsMap for completing the upload.
+    Pair<String, String> partNameAndETag1 = uploadPart(bucket, keyName,
+        uploadID, 1, data);
+    eTagsMap.put(1, partNameAndETag1.getValue());
 
-    // Upload part 2 and add it to the partsMap for completing the upload.
-    String partName2 = uploadPart(bucket, keyName, uploadID, 2, data);
-    partsMap.put(2, partName2);
+    // Upload part 2 and add it to the eTagsMap for completing the upload.
+    Pair<String, String> partNameAndETag2 = uploadPart(bucket, keyName,
+        uploadID, 2, data);
+    eTagsMap.put(2, partNameAndETag2.getValue());
 
-    // Upload part 3 but do not add it to the partsMap.
+    // Upload part 3 but do not add it to the eTagsMap.
     uploadPart(bucket, keyName, uploadID, 3, data);
 
-    completeMultipartUpload(bucket, keyName, uploadID, partsMap);
+    completeMultipartUpload(bucket, keyName, uploadID, eTagsMap);
 
-    // Check the bucket size. Since part number 3 was not added to the 
partsMap,
+    // Check the bucket size. Since part number 3 was not added to the 
eTagsMap,
     // the unused part size should be discarded from the bucket size,
     // 30000000 - 10000000 = 20000000
     long bucketSize = volume.getBucket(bucketName).getUsedBytes();
@@ -457,6 +479,9 @@ public class TestOzoneClientMultipartUploadWithFSO {
     OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
         data.length, 1, uploadID);
     ozoneOutputStream.write(data, 0, data.length);
+    ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG,
+        DatatypeConverter.printHexBinary(eTagProvider.digest(data))
+            .toLowerCase());
     ozoneOutputStream.close();
 
     OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
@@ -465,10 +490,13 @@ public class TestOzoneClientMultipartUploadWithFSO {
     // Do not close output stream for part 2.
     ozoneOutputStream = bucket.createMultipartKey(keyName,
         data.length, 2, uploadID);
+    ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG,
+        DatatypeConverter.printHexBinary(eTagProvider.digest(data))
+            .toLowerCase());
     ozoneOutputStream.write(data, 0, data.length);
 
     Map<Integer, String> partsMap = new LinkedHashMap<>();
-    partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
+    partsMap.put(1, omMultipartCommitUploadPartInfo.getETag());
     OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
         bucket.completeMultipartUpload(keyName,
             uploadID, partsMap);
@@ -541,12 +569,13 @@ public class TestOzoneClientMultipartUploadWithFSO {
 
     String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
         ONE);
-    String partName = uploadPart(bucket, keyName, uploadID, 1,
-        "data".getBytes(UTF_8));
+    Pair<String, String> partNameAndETag = uploadPart(bucket, keyName, 
uploadID,
+        1, "data".getBytes(UTF_8));
 
     OMMetadataManager metadataMgr =
         cluster.getOzoneManager().getMetadataManager();
-    String multipartKey = verifyUploadedPart(uploadID, partName, metadataMgr);
+    String multipartKey = verifyUploadedPart(uploadID, 
partNameAndETag.getKey(),
+        metadataMgr);
 
     bucket.abortMultipartUpload(keyName, uploadID);
 
@@ -572,17 +601,17 @@ public class TestOzoneClientMultipartUploadWithFSO {
     Map<Integer, String> partsMap = new TreeMap<>();
     String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
         ONE);
-    String partName1 = uploadPart(bucket, keyName, uploadID, 1,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(1, partName1);
+    Pair<String, String> partNameAndETag1 = uploadPart(bucket, keyName,
+        uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(1, partNameAndETag1.getKey());
 
-    String partName2 = uploadPart(bucket, keyName, uploadID, 2,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(2, partName2);
+    Pair<String, String> partNameAndETag2 = uploadPart(bucket, keyName,
+        uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(2, partNameAndETag2.getKey());
 
-    String partName3 = uploadPart(bucket, keyName, uploadID, 3,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(3, partName3);
+    Pair<String, String> partNameAndETag3 = uploadPart(bucket, keyName,
+        uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(3, partNameAndETag3.getKey());
 
     OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
         bucket.listParts(keyName, uploadID, 0, 3);
@@ -640,7 +669,6 @@ public class TestOzoneClientMultipartUploadWithFSO {
 
       listPartNames.remove(partKeyName);
     }
-
     assertThat(listPartNames).withFailMessage("Wrong partKeyName format in 
DB!").isEmpty();
   }
 
@@ -662,17 +690,17 @@ public class TestOzoneClientMultipartUploadWithFSO {
     Map<Integer, String> partsMap = new TreeMap<>();
     String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
         ONE);
-    String partName1 = uploadPart(bucket, keyName, uploadID, 1,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(1, partName1);
+    Pair<String, String> partNameAndETag1 = uploadPart(bucket, keyName,
+        uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(1, partNameAndETag1.getKey());
 
-    String partName2 = uploadPart(bucket, keyName, uploadID, 2,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(2, partName2);
+    Pair<String, String> partNameAndETag2 = uploadPart(bucket, keyName,
+        uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(2, partNameAndETag2.getKey());
 
-    String partName3 = uploadPart(bucket, keyName, uploadID, 3,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(3, partName3);
+    Pair<String, String> partNameAndETag3 = uploadPart(bucket, keyName,
+        uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(3, partNameAndETag3.getKey());
 
     OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
         bucket.listParts(keyName, uploadID, 0, 2);
@@ -734,8 +762,8 @@ public class TestOzoneClientMultipartUploadWithFSO {
         bucket.listParts(keyName, uploadID, 100, 2);
 
     // Should return empty
-
     assertEquals(0, 
ozoneMultipartUploadPartListParts.getPartInfoList().size());
+
     assertEquals(
         RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
         ozoneMultipartUploadPartListParts.getReplicationConfig());
@@ -870,27 +898,37 @@ public class TestOzoneClientMultipartUploadWithFSO {
 
     assertNotNull(multipartInfo);
     String uploadID = multipartInfo.getUploadID();
+    assertEquals(volumeName, multipartInfo.getVolumeName());
+    assertEquals(bucketName, multipartInfo.getBucketName());
+    assertEquals(kName, multipartInfo.getKeyName());
     assertNotNull(multipartInfo.getUploadID());
 
     return uploadID;
   }
 
-  private String uploadPart(OzoneBucket oBucket, String kName, String
-      uploadID, int partNumber, byte[] data) throws IOException {
+  private Pair<String, String> uploadPart(OzoneBucket oBucket, String kName,
+                                          String uploadID, int partNumber,
+                                          byte[] data)
+      throws IOException, NoSuchAlgorithmException {
 
     OzoneOutputStream ozoneOutputStream = oBucket.createMultipartKey(kName,
         data.length, partNumber, uploadID);
-    ozoneOutputStream.write(data, 0,
-        data.length);
+    ozoneOutputStream.write(data, 0, data.length);
+    ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG,
+        DatatypeConverter.printHexBinary(eTagProvider.digest(data))
+            .toLowerCase());
     ozoneOutputStream.close();
 
     OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
         ozoneOutputStream.getCommitUploadPartInfo();
 
     assertNotNull(omMultipartCommitUploadPartInfo);
+    assertNotNull(omMultipartCommitUploadPartInfo.getETag());
+
     assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
 
-    return omMultipartCommitUploadPartInfo.getPartName();
+    return Pair.of(omMultipartCommitUploadPartInfo.getPartName(),
+        omMultipartCommitUploadPartInfo.getETag());
   }
 
   private void completeMultipartUpload(OzoneBucket oBucket, String kName,
@@ -899,6 +937,11 @@ public class TestOzoneClientMultipartUploadWithFSO {
         .completeMultipartUpload(kName, uploadID, partsMap);
 
     assertNotNull(omMultipartUploadCompleteInfo);
+    assertEquals(omMultipartUploadCompleteInfo.getBucket(), oBucket
+        .getName());
+    assertEquals(omMultipartUploadCompleteInfo.getVolume(), oBucket
+        .getVolumeName());
+    assertEquals(omMultipartUploadCompleteInfo.getKey(), kName);
     assertNotNull(omMultipartUploadCompleteInfo.getHash());
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 2869737907..f2efe84b9c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.ozone.client.rpc;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
 import java.security.PrivilegedExceptionAction;
 import java.time.Instant;
 import java.util.ArrayList;
@@ -38,6 +40,9 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.stream.Stream;
 
+import javax.xml.bind.DatatypeConverter;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec;
@@ -137,7 +142,9 @@ import static 
org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConsts.DEFAULT_OM_UPDATE_ID;
+import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
 import static org.apache.hadoop.ozone.OzoneConsts.GB;
+import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH;
 import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR;
 import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME;
@@ -161,6 +168,7 @@ import static org.junit.jupiter.api.Assertions.fail;
 import static org.slf4j.event.Level.DEBUG;
 
 import org.apache.ozone.test.tag.Unhealthy;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.MethodOrderer;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.TestMethodOrder;
@@ -194,6 +202,12 @@ public abstract class TestOzoneRpcClientAbstract {
       READ, ACCESS);
   private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP,
       remoteGroupName, READ, ACCESS);
+  private static MessageDigest eTagProvider;
+
+  @BeforeAll
+  public static void initialize() throws NoSuchAlgorithmException {
+    eTagProvider = MessageDigest.getInstance(MD5_HASH);
+  }
 
   /**
    * Create a MiniOzoneCluster for testing.
@@ -1481,6 +1495,7 @@ public abstract class TestOzoneRpcClientAbstract {
         sampleData.length(), 1, uploadID);
     ozoneOutputStream.write(string2Bytes(sampleData), 0,
         sampleData.length());
+    ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData));
     ozoneOutputStream.close();
 
     assertEquals(valueLength, store.getVolume(volumeName)
@@ -2627,13 +2642,14 @@ public abstract class TestOzoneRpcClientAbstract {
     OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
         sampleData.length(), 1, uploadID);
     ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
+    ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData));
     ozoneOutputStream.close();
 
     OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
         .getCommitUploadPartInfo();
 
     assertNotNull(commitUploadPartInfo);
-    assertNotNull(commitUploadPartInfo.getPartName());
+    assertNotNull(commitUploadPartInfo.getETag());
   }
 
   @ParameterizedTest
@@ -2661,6 +2677,7 @@ public abstract class TestOzoneRpcClientAbstract {
     OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
         sampleData.length(), partNumber, uploadID);
     ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
+    ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData));
     ozoneOutputStream.close();
 
     OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
@@ -2668,7 +2685,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
     assertNotNull(commitUploadPartInfo);
     String partName = commitUploadPartInfo.getPartName();
-    assertNotNull(commitUploadPartInfo.getPartName());
+    assertNotNull(commitUploadPartInfo.getETag());
 
     // Overwrite the part by creating part key with same part number
     // and different content.
@@ -2676,13 +2693,14 @@ public abstract class TestOzoneRpcClientAbstract {
     ozoneOutputStream = bucket.createMultipartKey(keyName,
         sampleData.length(), partNumber, uploadID);
     ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length());
+    ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData));
     ozoneOutputStream.close();
 
     commitUploadPartInfo = ozoneOutputStream
         .getCommitUploadPartInfo();
 
     assertNotNull(commitUploadPartInfo);
-    assertNotNull(commitUploadPartInfo.getPartName());
+    assertNotNull(commitUploadPartInfo.getETag());
 
     // AWS S3 for same content generates same partName during upload part.
     // In AWS S3 ETag is generated from md5sum. In Ozone right now we
@@ -2808,12 +2826,13 @@ public abstract class TestOzoneRpcClientAbstract {
 
       // Upload part
       byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 1);
-      String partName = uploadPart(bucket, keyName2, uploadId, 1, data);
-      Map<Integer, String> partsMap = new TreeMap<>();
-      partsMap.put(1, partName);
+      Pair<String, String> partNameAndETag = uploadPart(bucket, keyName2,
+          uploadId, 1, data);
+      Map<Integer, String> eTagsMaps = new TreeMap<>();
+      eTagsMaps.put(1, partNameAndETag.getValue());
 
       // Complete multipart upload request
-      completeMultipartUpload(bucket2, keyName2, uploadId, partsMap);
+      completeMultipartUpload(bucket2, keyName2, uploadId, eTagsMaps);
 
       // User without permission cannot read multi-uploaded object
       OMException ex = assertThrows(OMException.class, () -> {
@@ -2863,21 +2882,21 @@ public abstract class TestOzoneRpcClientAbstract {
         anyReplication());
 
     // Upload Parts
-    Map<Integer, String> partsMap = new TreeMap<>();
+    Map<Integer, String> eTagsMaps = new TreeMap<>();
     // Uploading part 1 with less than min size
-    String partName = uploadPart(bucket, keyName, uploadID, 1,
-        "data".getBytes(UTF_8));
-    partsMap.put(1, partName);
+    Pair<String, String> partNameAndETag = uploadPart(bucket, keyName,
+        uploadID, 1, "data".getBytes(UTF_8));
+    eTagsMaps.put(1, partNameAndETag.getValue());
 
-    partName = uploadPart(bucket, keyName, uploadID, 2,
+    partNameAndETag = uploadPart(bucket, keyName, uploadID, 2,
         "data".getBytes(UTF_8));
-    partsMap.put(2, partName);
+    eTagsMaps.put(2, partNameAndETag.getValue());
 
 
     // Complete multipart upload
 
     OzoneTestUtils.expectOmException(ResultCodes.ENTITY_TOO_SMALL,
-        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+        () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps));
 
   }
   @Test
@@ -2924,11 +2943,11 @@ public abstract class TestOzoneRpcClientAbstract {
     uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
     // We have not uploaded any parts, but passing some list it should throw
     // error.
-    TreeMap<Integer, String> partsMap = new TreeMap<>();
-    partsMap.put(1, UUID.randomUUID().toString());
+    TreeMap<Integer, String> eTagsMaps = new TreeMap<>();
+    eTagsMaps.put(1, DigestUtils.md5Hex(UUID.randomUUID().toString()));
 
     OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART,
-        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+        () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps));
 
   }
 
@@ -2950,11 +2969,11 @@ public abstract class TestOzoneRpcClientAbstract {
     uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
     // We have not uploaded any parts, but passing some list it should throw
     // error.
-    TreeMap<Integer, String> partsMap = new TreeMap<>();
-    partsMap.put(3, "random");
+    TreeMap<Integer, String> eTagsMap = new TreeMap<>();
+    eTagsMap.put(3, DigestUtils.md5Hex("random"));
 
     OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART,
-        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+        () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap));
   }
 
   @Test
@@ -3053,6 +3072,9 @@ public abstract class TestOzoneRpcClientAbstract {
     OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
         data.length, 1, uploadID);
     ozoneOutputStream.write(data, 0, data.length);
+    ozoneOutputStream.getMetadata().put(ETAG,
+        DatatypeConverter.printHexBinary(eTagProvider.digest(data))
+            .toLowerCase());
     ozoneOutputStream.close();
 
     OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
@@ -3061,10 +3083,13 @@ public abstract class TestOzoneRpcClientAbstract {
     // Do not close output stream for part 2.
     ozoneOutputStream = bucket.createMultipartKey(keyName,
         data.length, 2, omMultipartInfo.getUploadID());
+    ozoneOutputStream.getMetadata().put(ETAG,
+        DatatypeConverter.printHexBinary(eTagProvider.digest(data))
+            .toLowerCase());
     ozoneOutputStream.write(data, 0, data.length);
 
     Map<Integer, String> partsMap = new LinkedHashMap<>();
-    partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
+    partsMap.put(1, omMultipartCommitUploadPartInfo.getETag());
     OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
         bucket.completeMultipartUpload(keyName,
         uploadID, partsMap);
@@ -3134,17 +3159,17 @@ public abstract class TestOzoneRpcClientAbstract {
 
     Map<Integer, String> partsMap = new TreeMap<>();
     String uploadID = initiateMultipartUpload(bucket, keyName, replication);
-    String partName1 = uploadPart(bucket, keyName, uploadID, 1,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(1, partName1);
+    Pair<String, String> partNameAndETag1 = uploadPart(bucket, keyName,
+        uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(1, partNameAndETag1.getKey());
 
-    String partName2 = uploadPart(bucket, keyName, uploadID, 2,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(2, partName2);
+    Pair<String, String> partNameAndETag2 = uploadPart(bucket, keyName,
+        uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(2, partNameAndETag2.getKey());
 
-    String partName3 = uploadPart(bucket, keyName, uploadID, 3,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(3, partName3);
+    Pair<String, String> partNameAndETag3 = uploadPart(bucket, keyName,
+        uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(3, partNameAndETag3.getKey());
 
     OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
         bucket.listParts(keyName, uploadID, 0, 3);
@@ -3185,17 +3210,17 @@ public abstract class TestOzoneRpcClientAbstract {
 
     Map<Integer, String> partsMap = new TreeMap<>();
     String uploadID = initiateMultipartUpload(bucket, keyName, replication);
-    String partName1 = uploadPart(bucket, keyName, uploadID, 1,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(1, partName1);
+    Pair<String, String> partNameAndETag1 = uploadPart(bucket, keyName,
+        uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(1, partNameAndETag1.getKey());
 
-    String partName2 = uploadPart(bucket, keyName, uploadID, 2,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(2, partName2);
+    Pair<String, String> partNameAndETag2 = uploadPart(bucket, keyName,
+        uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(2, partNameAndETag2.getKey());
 
-    String partName3 = uploadPart(bucket, keyName, uploadID, 3,
-        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
-    partsMap.put(3, partName3);
+    Pair<String, String> partNameAndETag3 = uploadPart(bucket, keyName,
+        uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, 
(byte)97));
+    partsMap.put(3, partNameAndETag3.getKey());
 
     OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
         bucket.listParts(keyName, uploadID, 0, 2);
@@ -3632,19 +3657,20 @@ public abstract class TestOzoneRpcClientAbstract {
     // than 5mb
     int length = 0;
     byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, val);
-    String partName = uploadPart(bucket, keyName, uploadID, 1, data);
-    partsMap.put(1, partName);
+    Pair<String, String> partNameAndEtag = uploadPart(bucket, keyName, 
uploadID,
+        1, data);
+    partsMap.put(1, partNameAndEtag.getValue());
     length += data.length;
 
 
-    partName = uploadPart(bucket, keyName, uploadID, 2, data);
-    partsMap.put(2, partName);
+    partNameAndEtag = uploadPart(bucket, keyName, uploadID, 2, data);
+    partsMap.put(2, partNameAndEtag.getValue());
     length += data.length;
 
     String part3 = UUID.randomUUID().toString();
-    partName = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes(
+    partNameAndEtag = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes(
         UTF_8));
-    partsMap.put(3, partName);
+    partsMap.put(3, partNameAndEtag.getValue());
     length += part3.getBytes(UTF_8).length;
 
     // Complete multipart upload request
@@ -3701,20 +3727,26 @@ public abstract class TestOzoneRpcClientAbstract {
     return uploadID;
   }
 
-  private String uploadPart(OzoneBucket bucket, String keyName, String
-      uploadID, int partNumber, byte[] data) throws Exception {
+  private Pair<String, String> uploadPart(OzoneBucket bucket, String keyName,
+                                          String uploadID, int partNumber,
+                                          byte[] data) throws Exception {
     OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
         data.length, partNumber, uploadID);
     ozoneOutputStream.write(data, 0,
         data.length);
+    ozoneOutputStream.getMetadata().put(ETAG,
+        DatatypeConverter.printHexBinary(eTagProvider.digest(data))
+            .toLowerCase());
     ozoneOutputStream.close();
 
     OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
         ozoneOutputStream.getCommitUploadPartInfo();
 
     assertNotNull(omMultipartCommitUploadPartInfo);
+    assertNotNull(omMultipartCommitUploadPartInfo.getETag());
     assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
-    return omMultipartCommitUploadPartInfo.getPartName();
+    return Pair.of(omMultipartCommitUploadPartInfo.getPartName(),
+        omMultipartCommitUploadPartInfo.getETag());
 
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
index ffd80f359f..febb6fd41c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
@@ -24,12 +24,15 @@ import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
 import java.util.HashMap;
 import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeoutException;
 
+import javax.xml.bind.DatatypeConverter;
 import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationType;
@@ -37,6 +40,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.utils.FaultInjector;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.BucketArgs;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -166,7 +170,8 @@ public class TestOzoneRpcClientWithRatis extends 
TestOzoneRpcClientAbstract {
   }
 
   @Test
-  public void testMultiPartUploadWithStream() throws IOException {
+  public void testMultiPartUploadWithStream()
+      throws IOException, NoSuchAlgorithmException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     String keyName = UUID.randomUUID().toString();
@@ -196,6 +201,9 @@ public class TestOzoneRpcClientWithRatis extends 
TestOzoneRpcClientAbstract {
         keyName, valueLength, 1, uploadID);
     ozoneStreamOutput.write(ByteBuffer.wrap(sampleData), 0,
         valueLength);
+    ozoneStreamOutput.getMetadata().put(OzoneConsts.ETAG,
+        
DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH)
+            .digest(sampleData)).toLowerCase());
     ozoneStreamOutput.close();
 
     OzoneMultipartUploadPartListParts parts =
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
index be2e0a9652..9c7a0a7032 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.om;
 
+import javax.xml.bind.DatatypeConverter;
 import org.apache.hadoop.hdds.utils.IOUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.commons.lang3.RandomUtils;
@@ -52,6 +53,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
 import java.util.Arrays;
 import java.util.LinkedHashMap;
 import java.util.Map;
@@ -213,7 +216,8 @@ public class TestObjectStoreWithLegacyFS {
   }
 
   private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists(
-      OzoneBucket bucket, String keyName) throws IOException {
+      OzoneBucket bucket, String keyName)
+      throws IOException, NoSuchAlgorithmException {
     OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
         RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE));
 
@@ -226,6 +230,9 @@ public class TestObjectStoreWithLegacyFS {
     OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
         data.length, 1, uploadID);
     ozoneOutputStream.write(data, 0, data.length);
+    ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG,
+        
DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH)
+            .digest(data)).toLowerCase());
     ozoneOutputStream.close();
 
     if (bucket.getBucketLayout() == BucketLayout.OBJECT_STORE) {
@@ -245,7 +252,7 @@ public class TestObjectStoreWithLegacyFS {
         ozoneOutputStream.getCommitUploadPartInfo();
 
     Map<Integer, String> partsMap = new LinkedHashMap<>();
-    partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
+    partsMap.put(1, omMultipartCommitUploadPartInfo.getETag());
     OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
         bucket.completeMultipartUpload(keyName,
             uploadID, partsMap);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java
index ab9f6382f0..63202805ec 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.ozone.om;
 
+import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
@@ -24,6 +25,7 @@ import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
@@ -187,11 +189,12 @@ public class TestOzoneManagerHAWithStoppedNodes extends 
TestOzoneManagerHA {
     OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey(
         keyName, value.length(), 1, uploadID);
     ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length());
+    ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, 
DigestUtils.md5Hex(value));
     ozoneOutputStream.close();
 
 
     Map<Integer, String> partsMap = new HashMap<>();
-    partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName());
+    partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag());
     OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
         ozoneBucket.completeMultipartUpload(keyName, uploadID, partsMap);
 
@@ -362,7 +365,7 @@ public class TestOzoneManagerHAWithStoppedNodes extends 
TestOzoneManagerHA {
 
     for (int i = 0; i < partsMap.size(); i++) {
       assertEquals(partsMap.get(partInfoList.get(i).getPartNumber()),
-          partInfoList.get(i).getPartName());
+          partInfoList.get(i).getETag());
 
     }
 
@@ -379,9 +382,10 @@ public class TestOzoneManagerHAWithStoppedNodes extends 
TestOzoneManagerHA {
     OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey(
         keyName, value.length(), partNumber, uploadID);
     ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length());
+    ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, 
DigestUtils.md5Hex(value));
     ozoneOutputStream.close();
 
-    return ozoneOutputStream.getCommitUploadPartInfo().getPartName();
+    return ozoneOutputStream.getCommitUploadPartInfo().getETag();
   }
 
   @Test
diff --git 
a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto 
b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 6a1a51bc32..5c737fdad9 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -1584,8 +1584,9 @@ message MultipartCommitUploadPartRequest {
 }
 
 message MultipartCommitUploadPartResponse {
-    // This one is returned as Etag for S3.
     optional string partName = 1;
+    // This one is returned as Etag for S3.
+    optional string eTag = 2;
 }
 
 message MultipartUploadCompleteRequest {
@@ -1603,6 +1604,7 @@ message MultipartUploadCompleteResponse {
 message Part {
     required uint32 partNumber = 1;
     required string partName = 2;
+    optional string eTag = 3;
 }
 
 message MultipartUploadAbortRequest {
@@ -1675,6 +1677,7 @@ message PartInfo {
     required string partName = 2;
     required uint64 modificationTime = 3;
     required uint64 size = 4;
+    optional string eTag = 5;
 }
 
 /**
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index d932ed1eff..2c9419e78d 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -121,6 +121,7 @@ import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SE
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT_DEFAULT;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT;
@@ -824,7 +825,10 @@ public class KeyManagerImpl implements KeyManager {
             OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(),
                 partName,
                 partKeyInfo.getPartKeyInfo().getModificationTime(),
-                partKeyInfo.getPartKeyInfo().getDataSize());
+                partKeyInfo.getPartKeyInfo().getDataSize(),
+                partKeyInfo.getPartKeyInfo().getMetadataList().stream()
+                    .filter(keyValue -> keyValue.getKey().equals(ETAG))
+                    .findFirst().get().getValue());
             omPartInfoList.add(omPartInfo);
 
             //if there are parts, use replication type from one of the parts
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index f461bbd171..a3e7840ccc 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -238,9 +238,13 @@ public class S3MultipartUploadCommitPartRequest extends 
OMKeyRequest {
           correctedSpace);
       omBucketInfo.incrUsedBytes(correctedSpace);
 
-      omResponse.setCommitMultiPartUploadResponse(
-          MultipartCommitUploadPartResponse.newBuilder()
-              .setPartName(partName));
+      MultipartCommitUploadPartResponse.Builder commitResponseBuilder = 
MultipartCommitUploadPartResponse.newBuilder()
+          .setPartName(partName);
+      String eTag = omKeyInfo.getMetadata().get(OzoneConsts.ETAG);
+      if (eTag != null) {
+        commitResponseBuilder.setETag(eTag);
+      }
+      omResponse.setCommitMultiPartUploadResponse(commitResponseBuilder);
       omClientResponse =
           getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey,
               omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(),
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index 99c98e3b48..83b46de7fd 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -27,6 +27,8 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiFunction;
 
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.ratis.server.protocol.TermIndex;
@@ -80,6 +82,32 @@ public class S3MultipartUploadCompleteRequest extends 
OMKeyRequest {
   private static final Logger LOG =
       LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class);
 
+  private BiFunction<OzoneManagerProtocolProtos.Part, PartKeyInfo, 
MultipartCommitRequestPart> eTagBasedValidator =
+      (part, partKeyInfo) -> {
+        String eTag = part.getETag();
+        AtomicReference<String> dbPartETag = new AtomicReference<>();
+        String dbPartName = null;
+        if (partKeyInfo != null) {
+          partKeyInfo.getPartKeyInfo().getMetadataList()
+              .stream()
+              .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG))
+              .findFirst().ifPresent(kv -> dbPartETag.set(kv.getValue()));
+          dbPartName = partKeyInfo.getPartName();
+        }
+        return new MultipartCommitRequestPart(eTag, partKeyInfo == null ? null 
:
+            dbPartETag.get(), StringUtils.equals(eTag, dbPartETag.get()) || 
StringUtils.equals(eTag, dbPartName));
+      };
+  private BiFunction<OzoneManagerProtocolProtos.Part, PartKeyInfo, 
MultipartCommitRequestPart> partNameBasedValidator =
+      (part, partKeyInfo) -> {
+        String partName = part.getPartName();
+        String dbPartName = null;
+        if (partKeyInfo != null) {
+          dbPartName = partKeyInfo.getPartName();
+        }
+        return new MultipartCommitRequestPart(partName, partKeyInfo == null ? 
null :
+            dbPartName, StringUtils.equals(partName, dbPartName));
+      };
+
   public S3MultipartUploadCompleteRequest(OMRequest omRequest,
       BucketLayout bucketLayout) {
     super(omRequest, bucketLayout);
@@ -249,7 +277,7 @@ public class S3MultipartUploadCompleteRequest extends 
OMKeyRequest {
                 .setVolume(requestedVolume)
                 .setBucket(requestedBucket)
                 .setKey(keyName)
-                .setHash(omKeyInfo.getMetadata().get("ETag")));
+                .setHash(omKeyInfo.getMetadata().get(OzoneConsts.ETAG)));
 
         long volumeId = omMetadataManager.getVolumeId(volumeName);
         long bucketId = omMetadataManager.getBucketId(volumeName, bucketName);
@@ -389,7 +417,7 @@ public class S3MultipartUploadCompleteRequest extends 
OMKeyRequest {
           .setOmKeyLocationInfos(
               Collections.singletonList(keyLocationInfoGroup))
           .setAcls(dbOpenKeyInfo.getAcls())
-          .addMetadata("ETag",
+          .addMetadata(OzoneConsts.ETAG,
               multipartUploadedKeyHash(partKeyInfoMap));
       // Check if db entry has ObjectID. This check is required because
       // it is possible that between multipart key uploads and complete,
@@ -419,7 +447,7 @@ public class S3MultipartUploadCompleteRequest extends 
OMKeyRequest {
       omKeyInfo.setModificationTime(keyArgs.getModificationTime());
       omKeyInfo.setDataSize(dataSize);
       omKeyInfo.setReplicationConfig(dbOpenKeyInfo.getReplicationConfig());
-      omKeyInfo.getMetadata().put("ETag",
+      omKeyInfo.getMetadata().put(OzoneConsts.ETAG,
           multipartUploadedKeyHash(partKeyInfoMap));
     }
     omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
@@ -491,24 +519,19 @@ public class S3MultipartUploadCompleteRequest extends 
OMKeyRequest {
       OzoneManager ozoneManager) throws OMException {
     long dataSize = 0;
     int currentPartCount = 0;
+    boolean eTagBasedValidationAvailable = 
partsList.stream().allMatch(OzoneManagerProtocolProtos.Part::hasETag);
     // Now do actual logic, and check for any Invalid part during this.
     for (OzoneManagerProtocolProtos.Part part : partsList) {
       currentPartCount++;
       int partNumber = part.getPartNumber();
-      String partName = part.getPartName();
-
       PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber);
-
-      String dbPartName = null;
-      if (partKeyInfo != null) {
-        dbPartName = partKeyInfo.getPartName();
-      }
-      if (!StringUtils.equals(partName, dbPartName)) {
-        String omPartName = partKeyInfo == null ? null : dbPartName;
+      MultipartCommitRequestPart requestPart = eTagBasedValidationAvailable ?
+          eTagBasedValidator.apply(part, partKeyInfo) : 
partNameBasedValidator.apply(part, partKeyInfo);
+      if (!requestPart.isValid()) {
         throw new OMException(
             failureMessage(requestedVolume, requestedBucket, keyName) +
-            ". Provided Part info is { " + partName + ", " + partNumber +
-            "}, whereas OM has partName " + omPartName,
+            ". Provided Part info is { " + requestPart.getRequestPartId() + ", 
" + partNumber +
+            "}, whereas OM has eTag " + requestPart.getOmPartId(),
             OMException.ResultCodes.INVALID_PART);
       }
 
@@ -641,11 +664,41 @@ public class S3MultipartUploadCompleteRequest extends 
OMKeyRequest {
       OmMultipartKeyInfo.PartKeyInfoMap partsList) {
     StringBuffer keysConcatenated = new StringBuffer();
     for (PartKeyInfo partKeyInfo: partsList) {
-      keysConcatenated.append(KeyValueUtil.getFromProtobuf(partKeyInfo
-          .getPartKeyInfo().getMetadataList()).get("ETag"));
+      String partPropertyToComputeHash = 
KeyValueUtil.getFromProtobuf(partKeyInfo.getPartKeyInfo().getMetadataList())
+          .get(OzoneConsts.ETAG);
+      if (partPropertyToComputeHash == null) {
+        partPropertyToComputeHash = partKeyInfo.getPartName();
+      }
+      keysConcatenated.append(partPropertyToComputeHash);
     }
     return DigestUtils.md5Hex(keysConcatenated.toString()) + "-"
         + partsList.size();
   }
 
+  private static class MultipartCommitRequestPart {
+    private String requestPartId;
+
+    private String omPartId;
+
+    private boolean isValid;
+
+    MultipartCommitRequestPart(String requestPartId, String omPartId, boolean 
isValid) {
+      this.requestPartId = requestPartId;
+      this.omPartId = omPartId;
+      this.isValid = isValid;
+    }
+
+    public String getRequestPartId() {
+      return requestPartId;
+    }
+
+    public String getOmPartId() {
+      return omPartId;
+    }
+
+    public boolean isValid() {
+      return isValid;
+    }
+  }
+
 }
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
index 21b94ce5f0..1bd642fce7 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
@@ -19,12 +19,19 @@
 
 package org.apache.hadoop.ozone.om.request;
 
+import java.io.ByteArrayInputStream;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.security.DigestInputStream;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
 
+import javax.xml.bind.DatatypeConverter;
+import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.client.BlockID;
@@ -1005,14 +1012,31 @@ public final class OMRequestTestUtils {
       String bucketName, String keyName, long clientID, long size,
       String multipartUploadID, int partNumber) {
 
+    MessageDigest eTagProvider;
+    try {
+      eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH);
+    } catch (NoSuchAlgorithmException e) {
+      throw new RuntimeException(e);
+    }
+
     // Just set dummy size.
-    KeyArgs.Builder keyArgs =
-        KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName)
-            .setBucketName(bucketName)
-            .setDataSize(size)
-            .setMultipartNumber(partNumber)
-            .setMultipartUploadID(multipartUploadID)
-            .addAllKeyLocations(new ArrayList<>());
+    KeyArgs.Builder  keyArgs = KeyArgs.newBuilder().setVolumeName(volumeName)
+        .setKeyName(keyName)
+        .setBucketName(bucketName)
+        .setDataSize(size)
+        .setMultipartNumber(partNumber)
+        .setMultipartUploadID(multipartUploadID)
+        .addAllKeyLocations(new ArrayList<>())
+        .addMetadata(HddsProtos.KeyValue.newBuilder()
+            .setKey(OzoneConsts.ETAG)
+            .setValue(DatatypeConverter.printHexBinary(
+                new DigestInputStream(
+                    new ByteArrayInputStream(
+                        RandomStringUtils.randomAlphanumeric((int) size)
+                            .getBytes(StandardCharsets.UTF_8)),
+                    eTagProvider)
+                    .getMessageDigest().digest()))
+            .build());
     // Just adding dummy list. As this is for UT only.
 
     MultipartCommitUploadPartRequest multipartCommitUploadPartRequest =
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
index 0a1ce8f724..34e32b0e18 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
@@ -133,9 +134,14 @@ public class TestS3MultipartUploadCompleteRequest
 
     List<Part> partList = new ArrayList<>();
 
-    String partName = getPartName(volumeName, bucketName, keyName,
-        multipartUploadID, 1);
-    partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1)
+    String eTag = s3MultipartUploadCommitPartRequest.getOmRequest()
+        .getCommitMultiPartUploadRequest()
+        .getKeyArgs()
+        .getMetadataList()
+        .stream()
+        .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG))
+        .findFirst().get().getValue();
+    
partList.add(Part.newBuilder().setETag(eTag).setPartName(eTag).setPartNumber(1)
         .build());
 
     OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
@@ -223,10 +229,10 @@ public class TestS3MultipartUploadCompleteRequest
     String partName = getPartName(volumeName, bucketName, keyName,
         multipartUploadID, 23);
 
-    
partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23).build());
+    
partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(23).build());
 
     partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 
1);
-    
partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1).build());
+    
partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(1).build());
 
     OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
         bucketName, keyName, multipartUploadID, partList);
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 23b543b6ec..51963a00a1 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -287,7 +287,7 @@ public class TestS3MultipartResponse {
             .setStatus(status).setSuccess(true)
             .setCommitMultiPartUploadResponse(
                     
OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse
-                            .newBuilder().setPartName(volumeName)).build();
+                            
.newBuilder().setETag(volumeName).setPartName(volumeName)).build();
 
     return new S3MultipartUploadCommitPartResponseWithFSO(omResponse,
         multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo,
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java
index 762d874056..9fc0f5c0c1 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java
@@ -19,11 +19,13 @@
 
 package org.apache.hadoop.ozone.om.service;
 
+import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.hdds.utils.db.DBConfigFromFile;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.KeyManager;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OmTestManagers;
@@ -241,6 +243,8 @@ class TestMultipartUploadCleanupService {
               .setMultipartUploadID(omMultipartInfo.getUploadID())
               .setMultipartUploadPartNumber(i)
               .setAcls(Collections.emptyList())
+              .addMetadata(OzoneConsts.ETAG,
+                  DigestUtils.md5Hex(UUID.randomUUID().toString()))
               .setReplicationConfig(
                   StandaloneReplicationConfig.getInstance(ONE))
               .setLocationInfoList(Collections.emptyList())
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
index 418608e855..2ef6c341ae 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.ozone.om.service;
 
+import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -27,6 +28,7 @@ import 
org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.hdds.utils.db.DBConfigFromFile;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.ExpiredOpenKeys;
 import org.apache.hadoop.ozone.om.KeyManager;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -473,6 +475,8 @@ class TestOpenKeyCleanupService {
                 .setReplicationConfig(RatisReplicationConfig.getInstance(
                     HddsProtos.ReplicationFactor.ONE))
                 .setLocationInfoList(Collections.emptyList())
+                .addMetadata(OzoneConsts.ETAG, 
DigestUtils.md5Hex(UUID.randomUUID()
+                    .toString()))
                 .build();
 
         writeClient.commitMultipartUploadPart(commitPartKeyArgs,
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java
index 47b59cfcc0..8ae48ca4f8 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java
@@ -21,6 +21,8 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
+import org.apache.hadoop.ozone.OzoneConsts;
+
 import java.time.Instant;
 
 /**
@@ -37,7 +39,7 @@ public class KeyMetadata {
   @XmlElement(name = "LastModified")
   private Instant lastModified;
 
-  @XmlElement(name = "ETag")
+  @XmlElement(name = OzoneConsts.ETAG)
   private String eTag;
 
   @XmlElement(name = "Size")
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java
index 72289470c2..af5eafc9f4 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java
@@ -23,6 +23,8 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
+import org.apache.hadoop.ozone.OzoneConsts;
+
 import java.util.ArrayList;
 import java.util.List;
 
@@ -55,7 +57,7 @@ public class CompleteMultipartUploadRequest {
     @XmlElement(name = "PartNumber")
     private int partNumber;
 
-    @XmlElement(name = "ETag")
+    @XmlElement(name = OzoneConsts.ETAG)
     private String eTag;
 
     public int getPartNumber() {
@@ -66,12 +68,12 @@ public class CompleteMultipartUploadRequest {
       this.partNumber = partNumber;
     }
 
-    public String geteTag() {
+    public String getETag() {
       return eTag;
     }
 
-    public void seteTag(String eTag) {
-      this.eTag = eTag;
+    public void setETag(String eTagHash) {
+      this.eTag = eTagHash;
     }
   }
 
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java
index c636f36b17..2aa30d6b83 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java
@@ -22,6 +22,7 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
+import org.apache.hadoop.ozone.OzoneConsts;
 
 /**
  * Complete Multipart Upload request response.
@@ -41,7 +42,7 @@ public class CompleteMultipartUploadResponse {
   @XmlElement(name = "Key")
   private String key;
 
-  @XmlElement(name = "ETag")
+  @XmlElement(name = OzoneConsts.ETAG)
   private String eTag;
 
   public String getLocation() {
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java
index 6e114c2e0c..d1136fe9ed 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.s3.endpoint;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter;
 
 import javax.xml.bind.annotation.XmlAccessType;
@@ -39,7 +40,7 @@ public class CopyObjectResponse {
   @XmlElement(name = "LastModified")
   private Instant lastModified;
 
-  @XmlElement(name = "ETag")
+  @XmlElement(name = OzoneConsts.ETAG)
   private String eTag;
 
 
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java
index c4e65aa38f..ab30c1f0e7 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java
@@ -25,6 +25,7 @@ import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
 import java.time.Instant;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter;
 
 /**
@@ -39,7 +40,7 @@ public class CopyPartResult {
   @XmlElement(name = "LastModified")
   private Instant lastModified;
 
-  @XmlElement(name = "ETag")
+  @XmlElement(name = OzoneConsts.ETAG)
   private String eTag;
 
   public CopyPartResult() {
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
index 5694d6f9f4..5810c4ec2a 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
@@ -65,6 +65,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
 import static org.apache.hadoop.ozone.OzoneConsts.KB;
 import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;
 import static 
org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX;
@@ -74,8 +75,6 @@ import static 
org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PR
  */
 public abstract class EndpointBase implements Auditor {
 
-  protected static final String ETAG = "ETag";
-
   protected static final String ETAG_CUSTOM = "etag-custom";
 
   @Inject
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java
index fc9da14133..8f3fad7354 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.s3.endpoint;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter;
 
 import javax.xml.bind.annotation.XmlAccessType;
@@ -154,7 +155,7 @@ public class ListPartsResponse {
     @XmlElement(name = "LastModified")
     private Instant lastModified;
 
-    @XmlElement(name = "ETag")
+    @XmlElement(name = OzoneConsts.ETAG)
     private String eTag;
 
 
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 1e247c8eb8..4a36ad9e62 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -101,6 +101,7 @@ import java.util.Map;
 import java.util.OptionalLong;
 
 import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH;
+import static javax.ws.rs.core.HttpHeaders.ETAG;
 import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
@@ -150,7 +151,7 @@ public class ObjectEndpoint extends EndpointBase {
   static {
     E_TAG_PROVIDER = ThreadLocal.withInitial(() -> {
       try {
-        return MessageDigest.getInstance("Md5");
+        return MessageDigest.getInstance(OzoneConsts.MD5_HASH);
       } catch (NoSuchAlgorithmException e) {
         throw new RuntimeException(e);
       }
@@ -807,7 +808,7 @@ public class ObjectEndpoint extends EndpointBase {
     OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo;
     try {
       for (CompleteMultipartUploadRequest.Part part : partList) {
-        partsMap.put(part.getPartNumber(), part.geteTag());
+        partsMap.put(part.getPartNumber(), part.getETag());
       }
       if (LOG.isDebugEnabled()) {
         LOG.debug("Parts map {}", partsMap);
@@ -955,6 +956,8 @@ public class ObjectEndpoint extends EndpointBase {
                   getMetrics().updateCopyKeyMetadataStats(startNanos);
               copyLength = IOUtils.copyLarge(
                   sourceObject, ozoneOutputStream, 0, length);
+              ozoneOutputStream.getMetadata()
+                  .putAll(sourceKeyDetails.getMetadata());
               keyOutputStream = ozoneOutputStream.getKeyOutputStream();
             }
           } else {
@@ -964,6 +967,8 @@ public class ObjectEndpoint extends EndpointBase {
               metadataLatencyNs =
                   getMetrics().updateCopyKeyMetadataStats(startNanos);
               copyLength = IOUtils.copyLarge(sourceObject, ozoneOutputStream);
+              ozoneOutputStream.getMetadata()
+                  .putAll(sourceKeyDetails.getMetadata());
               keyOutputStream = ozoneOutputStream.getKeyOutputStream();
             }
           }
@@ -993,7 +998,7 @@ public class ObjectEndpoint extends EndpointBase {
       assert keyOutputStream != null;
       OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
           keyOutputStream.getCommitUploadPartInfo();
-      String eTag = omMultipartCommitUploadPartInfo.getPartName();
+      String eTag = omMultipartCommitUploadPartInfo.getETag();
 
       if (copyHeader != null) {
         getMetrics().updateCopyObjectSuccessStats(startNanos);
@@ -1064,7 +1069,7 @@ public class ObjectEndpoint extends EndpointBase {
       ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> {
         ListPartsResponse.Part part = new ListPartsResponse.Part();
         part.setPartNumber(partInfo.getPartNumber());
-        part.setETag(partInfo.getPartName());
+        part.setETag(partInfo.getETag());
         part.setSize(partInfo.getSize());
         part.setLastModified(Instant.ofEpochMilli(
             partInfo.getModificationTime()));
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java
index e509acb05b..bbb743ee35 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java
@@ -21,12 +21,11 @@ import javax.ws.rs.core.Response;
 import javax.xml.bind.DatatypeConverter;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.KeyMetadataAware;
 import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
 import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics;
@@ -110,7 +109,7 @@ final class ObjectEndpointStreaming {
       eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest())
           .toLowerCase();
       perf.appendMetaLatencyNanos(metadataLatencyNs);
-      ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag);
+      ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, 
eTag);
     }
     return Pair.of(eTag, writeLen);
   }
@@ -161,11 +160,6 @@ final class ObjectEndpointStreaming {
       throws IOException, OS3Exception {
     long startNanos = Time.monotonicNowNanos();
     String eTag;
-    // OmMultipartCommitUploadPartInfo can only be gotten after the
-    // OzoneDataStreamOutput is closed, so we need to save the
-    // KeyDataStreamOutput in the OzoneDataStreamOutput and use it to get the
-    // OmMultipartCommitUploadPartInfo after OzoneDataStreamOutput is closed.
-    KeyDataStreamOutput keyDataStreamOutput = null;
     try {
       try (OzoneDataStreamOutput streamOutput = ozoneBucket
           .createMultipartStreamKey(key, length, partNumber, uploadID)) {
@@ -174,11 +168,10 @@ final class ObjectEndpointStreaming {
             writeToStreamOutput(streamOutput, body, chunkSize, length);
         eTag = DatatypeConverter.printHexBinary(
             body.getMessageDigest().digest()).toLowerCase();
-        ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag);
+        ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, 
eTag);
         METRICS.incPutKeySuccessLength(putLength);
         perf.appendMetaLatencyNanos(metadataLatencyNs);
         perf.appendSizeBytes(putLength);
-        keyDataStreamOutput = streamOutput.getKeyDataStreamOutput();
       }
     } catch (OMException ex) {
       if (ex.getResult() ==
@@ -190,13 +183,7 @@ final class ObjectEndpointStreaming {
             ozoneBucket.getName() + "/" + key);
       }
       throw ex;
-    } finally {
-      if (keyDataStreamOutput != null) {
-        OmMultipartCommitUploadPartInfo commitUploadPartInfo =
-            keyDataStreamOutput.getCommitUploadPartInfo();
-        eTag = commitUploadPartInfo.getPartName();
-      }
     }
-    return Response.ok().header("ETag", eTag).build();
+    return Response.ok().header(OzoneConsts.ETAG, eTag).build();
   }
 }
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
index fad3386c61..39ae9cc4af 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
@@ -23,6 +23,8 @@ import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -32,6 +34,7 @@ import java.util.TreeMap;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
+import javax.xml.bind.DatatypeConverter;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -53,6 +56,8 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
 import org.apache.hadoop.util.Time;
 
+import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
+import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 
 /**
@@ -267,7 +272,8 @@ public class OzoneBucketStub extends OzoneBucket {
               byte[] bytes = new byte[position];
               buffer.get(bytes);
 
-              Part part = new Part(key + size, bytes);
+              Part part = new Part(key + size, bytes,
+                  getMetadata().get(ETAG));
               if (partList.get(key) == null) {
                 Map<Integer, Part> parts = new TreeMap<>();
                 parts.put(partNumber, part);
@@ -425,7 +431,7 @@ public class OzoneBucketStub extends OzoneBucket {
             @Override
             public void close() throws IOException {
               Part part = new Part(key + size,
-                  toByteArray());
+                  toByteArray(), getMetadata().get(ETAG));
               if (partList.get(key) == null) {
                 Map<Integer, Part> parts = new TreeMap<>();
                 parts.put(partNumber, part);
@@ -463,7 +469,7 @@ public class OzoneBucketStub extends OzoneBucket {
       for (Map.Entry<Integer, String> part: partsMap.entrySet()) {
         Part recordedPart = partsList.get(part.getKey());
         if (recordedPart == null ||
-            !recordedPart.getPartName().equals(part.getValue())) {
+            !recordedPart.getETag().equals(part.getValue())) {
           throw new OMException(ResultCodes.INVALID_PART);
         } else {
           output.write(recordedPart.getContent());
@@ -506,13 +512,21 @@ public class OzoneBucketStub extends OzoneBucket {
       int count = 0;
       int nextPartNumberMarker = 0;
       boolean truncated = false;
+      MessageDigest eTagProvider;
+      try {
+        eTagProvider = MessageDigest.getInstance(MD5_HASH);
+      } catch (NoSuchAlgorithmException e) {
+        throw new RuntimeException(e);
+      }
       while (count < maxParts && partIterator.hasNext()) {
         Map.Entry<Integer, Part> partEntry = partIterator.next();
         nextPartNumberMarker = partEntry.getKey();
         if (partEntry.getKey() > partNumberMarker) {
           PartInfo partInfo = new PartInfo(partEntry.getKey(),
               partEntry.getValue().getPartName(),
-              Time.now(), partEntry.getValue().getContent().length);
+              Time.now(), partEntry.getValue().getContent().length,
+              DatatypeConverter.printHexBinary(eTagProvider.digest(partEntry
+                  .getValue().getContent())).toLowerCase());
           partInfoList.add(partInfo);
           count++;
         }
@@ -563,9 +577,12 @@ public class OzoneBucketStub extends OzoneBucket {
     private String partName;
     private byte[] content;
 
-    public Part(String name, byte[] data) {
+    private String eTag;
+
+    public Part(String name, byte[] data, String eTag) {
       this.partName = name;
       this.content = data.clone();
+      this.eTag = eTag;
     }
 
     public String getPartName() {
@@ -575,6 +592,11 @@ public class OzoneBucketStub extends OzoneBucket {
     public byte[] getContent() {
       return content.clone();
     }
+
+    public String getETag() {
+      return eTag;
+    }
+
   }
 
   @Override
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java
index 7bb35682d8..b472320b7f 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java
@@ -21,6 +21,7 @@
 package org.apache.hadoop.ozone.client;
 
 import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 
@@ -65,6 +66,7 @@ public class OzoneDataStreamOutputStub extends 
OzoneDataStreamOutput {
 
   @Override
   public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
-    return closed ? new OmMultipartCommitUploadPartInfo(partName) : null;
+    return closed ? new OmMultipartCommitUploadPartInfo(partName,
+        getMetadata().get(OzoneConsts.ETAG)) : null;
   }
 }
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java
index 9835160029..da2fb26ec8 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java
@@ -22,6 +22,8 @@ package org.apache.hadoop.ozone.client;
 
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.io.KeyMetadataAware;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.scm.StreamBufferArgs;
 import org.apache.hadoop.ozone.client.io.KeyOutputStream;
@@ -93,7 +95,8 @@ public class OzoneOutputStreamStub extends OzoneOutputStream {
 
   @Override
   public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
-    return closed ? new OmMultipartCommitUploadPartInfo(partName) : null;
+    return closed ? new OmMultipartCommitUploadPartInfo(partName,
+        
((KeyMetadataAware)getOutputStream()).getMetadata().get(OzoneConsts.ETAG)) : 
null;
   }
 
 }
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java
index ab87f9c98e..cd0fbfed4e 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java
@@ -80,8 +80,8 @@ public class TestCompleteMultipartUploadRequestUnmarshaller {
     List<CompleteMultipartUploadRequest.Part> parts =
         request.getPartList();
 
-    assertEquals(part1, parts.get(0).geteTag());
-    assertEquals(part2, parts.get(1).geteTag());
+    assertEquals(part1, parts.get(0).getETag());
+    assertEquals(part2, parts.get(1).getETag());
   }
 
   private CompleteMultipartUploadRequest unmarshall(
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
index 3e8beb2c3a..677367e6d8 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
@@ -79,17 +79,17 @@ public class TestListParts {
     response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
         content.length(), 1, uploadID, body);
 
-    assertNotNull(response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
 
     response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
         content.length(), 2, uploadID, body);
 
-    assertNotNull(response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
 
     response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
         content.length(), 3, uploadID, body);
 
-    assertNotNull(response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
   }
 
   @Test
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
index eedee2855e..3c0c87a177 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
@@ -93,9 +93,9 @@ public class TestMultipartUploadComplete {
     Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(),
         partNumber, uploadID, body);
     assertEquals(200, response.getStatus());
-    assertNotNull(response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
     Part part = new Part();
-    part.seteTag(response.getHeaderString("ETag"));
+    part.setETag(response.getHeaderString(OzoneConsts.ETAG));
     part.setPartNumber(partNumber);
 
     return part;
@@ -202,7 +202,7 @@ public class TestMultipartUploadComplete {
 
     Part part1 = uploadPart(key, uploadID, partNumber, content);
     // Change part name.
-    part1.seteTag("random");
+    part1.setETag("random");
     partsList.add(part1);
 
     content = "Multipart Upload 2";
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
index a773b87579..d9595aeff7 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
@@ -32,6 +32,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Scanner;
 
+import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
@@ -91,7 +92,11 @@ public class TestMultipartUploadWithCopy {
     try (OutputStream stream = bucket
         .createKey(EXISTING_KEY, keyContent.length,
             ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
-            ReplicationFactor.THREE), new HashMap<>())) {
+            ReplicationFactor.THREE),
+            new HashMap<String, String>() {{
+              put(OzoneConsts.ETAG, DigestUtils.md5Hex(EXISTING_KEY_CONTENT));
+            }}
+        )) {
       stream.write(keyContent);
     }
 
@@ -327,9 +332,9 @@ public class TestMultipartUploadWithCopy {
     Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(),
         partNumber, uploadID, body);
     assertEquals(200, response.getStatus());
-    assertNotNull(response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
     Part part = new Part();
-    part.seteTag(response.getHeaderString("ETag"));
+    part.setETag(response.getHeaderString(OzoneConsts.ETAG));
     part.setPartNumber(partNumber);
 
     return part;
@@ -377,7 +382,7 @@ public class TestMultipartUploadWithCopy {
     assertNotNull(result.getETag());
     assertNotNull(result.getLastModified());
     Part part = new Part();
-    part.seteTag(result.getETag());
+    part.setETag(result.getETag());
     part.setPartNumber(partNumber);
 
     return part;
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
index 90d490dea0..bb1b7037bd 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
@@ -90,7 +90,7 @@ public class TestPartUpload {
     response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
         content.length(), 1, uploadID, body);
 
-    assertNotNull(response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
 
   }
 
@@ -112,16 +112,16 @@ public class TestPartUpload {
     response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
         content.length(), 1, uploadID, body);
 
-    assertNotNull(response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
 
-    String eTag = response.getHeaderString("ETag");
+    String eTag = response.getHeaderString(OzoneConsts.ETAG);
 
     // Upload part again with same part Number, the ETag should be changed.
     content = "Multipart Upload Changed";
     response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
         content.length(), 1, uploadID, body);
-    assertNotNull(response.getHeaderString("ETag"));
-    assertNotEquals(eTag, response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
+    assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG));
 
   }
 
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
index 787aa6e877..775d5a1976 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.ozone.s3.endpoint;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
@@ -95,7 +96,7 @@ public class TestPartUploadWithStream {
     response = REST.put(S3BUCKET, S3KEY,
         content.length(), 1, uploadID, body);
 
-    assertNotNull(response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
 
   }
 
@@ -116,16 +117,16 @@ public class TestPartUploadWithStream {
     response = REST.put(S3BUCKET, S3KEY,
         content.length(), 1, uploadID, body);
 
-    assertNotNull(response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
 
-    String eTag = response.getHeaderString("ETag");
+    String eTag = response.getHeaderString(OzoneConsts.ETAG);
 
     // Upload part again with same part Number, the ETag should be changed.
     content = "Multipart Upload Changed";
     response = REST.put(S3BUCKET, S3KEY,
         content.length(), 1, uploadID, body);
-    assertNotNull(response.getHeaderString("ETag"));
-    assertNotEquals(eTag, response.getHeaderString("ETag"));
+    assertNotNull(response.getHeaderString(OzoneConsts.ETAG));
+    assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG));
 
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to