ashishkumar50 commented on code in PR #6496:
URL: https://github.com/apache/ozone/pull/6496#discussion_r1558837776


##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java:
##########
@@ -74,6 +77,70 @@ protected void checkDirectoryAlreadyExists(OzoneManager 
ozoneManager,
     }
   }
 
+  @Override
+  protected void addMissingParentsToTable(OmBucketInfo omBucketInfo,
+      List<OmDirectoryInfo> missingParentInfos,
+      OMMetadataManager omMetadataManager, long volumeId, long bucketId,
+      long transactionLogIndex) throws IOException {
+
+    // validate and update namespace for missing parent directory.
+    checkBucketQuotaInNamespace(omBucketInfo, missingParentInfos.size());
+    omBucketInfo.incrUsedNamespace(missingParentInfos.size());
+
+    // Add cache entries for the missing parent directories.
+    OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+        volumeId, bucketId, transactionLogIndex,
+        missingParentInfos, null);
+
+    // Create missing parent directory entries.
+    try (BatchOperation batchOperation = omMetadataManager.getStore()
+        .initBatchOperation()) {
+      for (OmDirectoryInfo parentDirInfo : missingParentInfos) {
+        final String parentKey = omMetadataManager.getOzonePathKey(
+            volumeId, bucketId, parentDirInfo.getParentObjectID(),
+            parentDirInfo.getName());
+        omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,
+            parentKey, parentDirInfo);
+      }
+
+      // namespace quota changes for parent directory
+      String bucketKey = omMetadataManager.getBucketKey(
+          omBucketInfo.getVolumeName(),
+          omBucketInfo.getBucketName());
+      omMetadataManager.getBucketTable().putWithBatch(batchOperation,
+          bucketKey, omBucketInfo);
+
+      omMetadataManager.getStore().commitBatchOperation(batchOperation);
+    }
+  }
+
+  @Override
+  protected void addMultiParttoOpenTable(
+      OMMetadataManager omMetadataManager, String multipartOpenKey,
+      OmMultipartKeyInfo multipartKeyInfo,
+      OMFileRequest.OMPathInfoWithFSO pathInfoFSO, OmKeyInfo omKeyInfo,
+      long volumeId, long bucketId, long transactionLogIndex
+  ) throws IOException {
+
+    // Add multi part to cache
+    OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
+        multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(),
+        transactionLogIndex);
+
+    // Add multi part to open key table.
+    try (BatchOperation batchOperation = omMetadataManager.getStore()
+        .initBatchOperation()) {
+
+      OMFileRequest.addToOpenFileTableForMultipart(omMetadataManager,

Review Comment:
   Even this should be done in S3MultipartUploadCompleteResponseWithFSO instead 
here.



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java:
##########
@@ -74,6 +77,70 @@ protected void checkDirectoryAlreadyExists(OzoneManager 
ozoneManager,
     }
   }
 
+  @Override
+  protected void addMissingParentsToTable(OmBucketInfo omBucketInfo,
+      List<OmDirectoryInfo> missingParentInfos,
+      OMMetadataManager omMetadataManager, long volumeId, long bucketId,
+      long transactionLogIndex) throws IOException {
+
+    // validate and update namespace for missing parent directory.
+    checkBucketQuotaInNamespace(omBucketInfo, missingParentInfos.size());
+    omBucketInfo.incrUsedNamespace(missingParentInfos.size());
+
+    // Add cache entries for the missing parent directories.
+    OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+        volumeId, bucketId, transactionLogIndex,
+        missingParentInfos, null);
+
+    // Create missing parent directory entries.
+    try (BatchOperation batchOperation = omMetadataManager.getStore()
+        .initBatchOperation()) {
+      for (OmDirectoryInfo parentDirInfo : missingParentInfos) {
+        final String parentKey = omMetadataManager.getOzonePathKey(
+            volumeId, bucketId, parentDirInfo.getParentObjectID(),
+            parentDirInfo.getName());
+        omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,

Review Comment:
   I think this should be done in S3MultipartUploadCompleteResponseWithFSO 
instead here or else db entry will be added only on the leader OM.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to