[ 
https://issues.apache.org/jira/browse/HDDS-1849?focusedWorklogId=284602&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-284602
 ]

ASF GitHub Bot logged work on HDDS-1849:
----------------------------------------

                Author: ASF GitHub Bot
            Created on: 29/Jul/19 23:04
            Start Date: 29/Jul/19 23:04
    Worklog Time Spent: 10m 
      Work Description: bharatviswa504 commented on pull request #1181: 
HDDS-1849. Implement S3 Complete MPU request to use Cache and DoubleBuffer.
URL: https://github.com/apache/hadoop/pull/1181#discussion_r308471450
 
 

 ##########
 File path: 
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
 ##########
 @@ -0,0 +1,314 @@
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import com.google.common.base.Optional;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import 
org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .MultipartUploadCompleteRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .MultipartUploadCompleteResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .PartKeyInfo;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.db.cache.CacheKey;
+import org.apache.hadoop.utils.db.cache.CacheValue;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
+import static 
org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handle Multipart upload complete request.
+ */
+public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class);
+
+  public S3MultipartUploadCompleteRequest(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
+    MultipartUploadCompleteRequest multipartUploadCompleteRequest =
+        getOmRequest().getCompleteMultiPartUploadRequest();
+
+    KeyArgs keyArgs = multipartUploadCompleteRequest.getKeyArgs();
+
+    return getOmRequest().toBuilder()
+        .setCompleteMultiPartUploadRequest(multipartUploadCompleteRequest
+            .toBuilder().setKeyArgs(keyArgs.toBuilder()
+                .setModificationTime(Time.now())))
+        .setUserInfo(getUserInfo()).build();
+
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long transactionLogIndex,
+      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
+    MultipartUploadCompleteRequest multipartUploadCompleteRequest =
+        getOmRequest().getCompleteMultiPartUploadRequest();
+
+    KeyArgs keyArgs = multipartUploadCompleteRequest.getKeyArgs();
+
+    List<OzoneManagerProtocolProtos.Part> partsList =
+        multipartUploadCompleteRequest.getPartsListList();
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+    String uploadID = keyArgs.getMultipartUploadID();
+
+    ozoneManager.getMetrics().incNumCompleteMultipartUploads();
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+    boolean acquiredLock = false;
+    OMResponse.Builder omResponse = OMResponse.newBuilder()
+        .setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload)
+        .setStatus(OzoneManagerProtocolProtos.Status.OK)
+        .setSuccess(true);
+    OMClientResponse omClientResponse = null;
+    IOException exception = null;
+    OmMultipartUploadList multipartUploadList = null;
+    try {
+      // check Acl
+      if (ozoneManager.getAclsEnabled()) {
+        checkAcls(ozoneManager, OzoneObj.ResourceType.KEY,
+            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
+            volumeName, bucketName, keyName);
+      }
+
+      TreeMap<Integer, String> partsMap = new TreeMap<>();
 
 Review comment:
   Yes. This is done in this way because, when we send a list of parts with 
1,3,2 we should throw an error MISMATCH_MULTIPART_LIST and also if parts are 
not in monotonic increasing order according to S3. So, if we have sorted part 
map it will be easy to find them.
 
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


Issue Time Tracking
-------------------

    Worklog Id:     (was: 284602)
    Time Spent: 50m  (was: 40m)

> Implement S3 Complete MPU request to use Cache and DoubleBuffer
> ---------------------------------------------------------------
>
>                 Key: HDDS-1849
>                 URL: https://issues.apache.org/jira/browse/HDDS-1849
>             Project: Hadoop Distributed Data Store
>          Issue Type: Sub-task
>          Components: Ozone Manager
>            Reporter: Bharat Viswanadham
>            Assignee: Bharat Viswanadham
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 50m
>  Remaining Estimate: 0h
>
> Implement S3 Complete MPU request to use OM Cache, double buffer.
>  
> In this Jira will add the changes to implement S3 bucket operations, and 
> HA/Non-HA will have a different code path, but once all requests are 
> implemented will have a single code path.



--
This message was sent by Atlassian JIRA
(v7.6.14#76016)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to