anujmodi2021 commented on code in PR #7265:
URL: https://github.com/apache/hadoop/pull/7265#discussion_r1933332401


##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java:
##########
@@ -415,18 +415,19 @@ public FSDataOutputStream createNonRecursive(final Path 
f, final FsPermission pe
           ERR_CREATE_ON_ROOT,
           null);
     }
-    final Path parent = f.getParent();
+
     TracingContext tracingContext = new TracingContext(clientCorrelationId,
         fileSystemId, FSOperationType.CREATE_NON_RECURSIVE, 
tracingHeaderFormat,
         listener);
-    final FileStatus parentFileStatus = tryGetFileStatus(parent, 
tracingContext);
-
-    if (parentFileStatus == null) {
-      throw new FileNotFoundException("Cannot create file "
-          + f.getName() + " because parent folder does not exist.");
+    try {
+      Path qualifiedPath = makeQualified(f);
+      getAbfsStore().createNonRecursivePreCheck(qualifiedPath, tracingContext);
+      return create(f, permission, overwrite, bufferSize, replication,
+              blockSize, progress);
+    } catch (AzureBlobFileSystemException ex) {
+      checkException(f, ex);

Review Comment:
   checkException() was not happening earlier. This might cause difference in 
behavior?



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java:
##########
@@ -1124,8 +1144,11 @@ public void delete(final Path path, final boolean 
recursive,
       try (AbfsPerfInfo perfInfo = startTracking("delete", "deletePath")) {
         AbfsRestOperation op = getClient().deletePath(relativePath, recursive,
             continuation, tracingContext, 
getIsNamespaceEnabled(tracingContext));
-        perfInfo.registerResult(op.getResult());
-        continuation = 
op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION);
+        if (op != null) {

Review Comment:
   Same here. Why this change is needed.?



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/BlobDeleteHandler.java:
##########
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs.services;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIOException;
+import 
org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
+import 
org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
+import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
+
+import static java.net.HttpURLConnection.HTTP_CONFLICT;
+import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
+import static 
org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.NON_EMPTY_DIRECTORY_DELETE;
+import static 
org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.PATH_NOT_FOUND;
+
+/**
+ * Orchestrator for delete over Blob endpoint. Blob endpoint for flat-namespace
+ * account does not support directory delete. This class is responsible for
+ * deleting the blobs and creating the parent directory marker file if needed.
+ */
+public class BlobDeleteHandler extends ListActionTaker {
+
+    private final Path path;
+
+    private final boolean recursive;
+
+    private boolean nonRecursiveDeleteDirectoryFailed = false;
+
+    private final TracingContext tracingContext;
+
+    private final AtomicInteger deleteCount = new AtomicInteger(0);
+
+    /** Constructor
+     *
+     * @param path path to delete.
+     * @param recursive if true, delete the path recursively.
+     * @param abfsBlobClient client to use for blob operations.
+     * @param tracingContext tracing context.
+     */
+    public BlobDeleteHandler(final Path path,
+                             final boolean recursive,
+                             final AbfsBlobClient abfsBlobClient,
+                             final TracingContext tracingContext) {
+        super(path, abfsBlobClient, tracingContext);
+        this.path = path;
+        this.recursive = recursive;
+        this.tracingContext = tracingContext;
+    }
+
+    /**{@inheritDoc}
+     *
+     * @return the maximum number of parallelism for delete operation.
+     */
+    @Override
+    int getMaxConsumptionParallelism() {
+        return getAbfsClient().getAbfsConfiguration()
+                .getBlobDeleteDirConsumptionParallelism();
+    }
+
+    /** Delete the path.
+     *
+     * @param path path to delete.
+     * @return true if the path is deleted.
+     * @throws AzureBlobFileSystemException server error.
+     */
+    private boolean deleteInternal(final Path path)
+            throws AzureBlobFileSystemException {
+        getAbfsClient().deleteBlobPath(path, null, tracingContext);

Review Comment:
   `deleteBlobPath` return AbfsRestOperation but it is never used. May be we 
can update it to return void.



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java:
##########
@@ -1063,11 +1085,6 @@ public boolean rename(final Path source,
     long countAggregate = 0;
     boolean shouldContinue;
 
-    if (isAtomicRenameKey(source.getName())) {

Review Comment:
   Why we are removing this Warning?



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsBlobClient.java:
##########
@@ -68,9 +70,14 @@
 import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider;
 import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter;
 import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
+import org.apache.hadoop.classification.VisibleForTesting;

Review Comment:
   fix import ordering



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsBlobClient.java:
##########
@@ -909,23 +1025,46 @@ public AbfsRestOperation read(final String path,
   }
 
   /**
-   * Orchestration for delete operation to be implemented.
+   * Get Rest Operation for API <a href = 
https://learn.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/delete></a>.

Review Comment:
   This is not an API implementation, javadoc should say Orchestration only.
   This will be needed for API implementation of delete path



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java:
##########
@@ -1089,8 +1106,11 @@ public boolean rename(final Path source,
                   isNamespaceEnabled);
 
         AbfsRestOperation op = abfsClientRenameResult.getOp();
-        perfInfo.registerResult(op.getResult());
-        continuation = 
op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION);
+        if (op != null) {

Review Comment:
   Why will op be null now?
   Why this change needed?



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java:
##########
@@ -290,5 +290,13 @@ public static ApiVersion getCurrentVersion() {
   public static final String JDK_FALLBACK = "JDK_fallback";
   public static final String KEEP_ALIVE_CACHE_CLOSED = "KeepAliveCache is 
closed";
 
+  public static final String COPY_STATUS_SUCCESS = "success";
+  public static final String COPY_STATUS_PENDING = "pending";
+  public static final String COPY_STATUS_ABORTED = "aborted";
+  public static final String COPY_STATUS_FAILED = "failed";
+
+  public static final String ATOMIC_DIR_RENAME_RECOVERY_ON_GET_PATH_EXCEPTION =

Review Comment:
   This does not seem like a Http Constant. Its a error message move it to 
`AbfsErrors`



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsBlobClient.java:
##########
@@ -38,6 +38,8 @@
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.UUID;
+import java.util.Arrays;

Review Comment:
   fix Import ordering



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsBlobClient.java:
##########
@@ -499,21 +578,50 @@ public AbfsRestOperation breakLease(final String path,
    * @param isMetadataIncompleteState was there a rename failure due to
    *                                  incomplete metadata state?
    * @param isNamespaceEnabled        whether namespace enabled account or not
-   * @return result of rename operation
-   * @throws IOException if rename operation fails.
+   *
+   * @return AbfsClientRenameResult result of rename operation indicating the
+   * AbfsRest operation, rename recovery and incomplete metadata state failure.
+   *
+   * @throws IOException failure, excluding any recovery from overload 
failures.
    */
   @Override
   public AbfsClientRenameResult renamePath(final String source,
-      final String destination,
-      final String continuation,
-      final TracingContext tracingContext,
-      final String sourceEtag,
-      final boolean isMetadataIncompleteState,
-      final boolean isNamespaceEnabled) throws IOException {
-    /**
-     * TODO: [FnsOverBlob] To be implemented as part of rename-delete over 
blob endpoint work. <a 
href="https://issues.apache.org/jira/browse/HADOOP-19233";>HADOOP-19233</a>.
-     */
-    throw new NotImplementedException("Rename operation on Blob endpoint yet 
to be implemented.");
+                                           final String destination,

Review Comment:
   We should keep same indentation format throughout the file.
   Please fix it for all methods added



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLease.java:
##########
@@ -45,10 +46,10 @@
  * AbfsLease manages an Azure blob lease. It acquires an infinite lease on 
instantiation and
  * releases the lease when free() is called. Use it to prevent writes to the 
blob by other
  * processes that don't have the lease.
- *
+ * <p>

Review Comment:
   Don't we need closing tags for `</p>`



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java:
##########
@@ -22,6 +22,7 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.UnsupportedEncodingException;
+import java.io.FileNotFoundException;

Review Comment:
   fix import ordering



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to