[
https://issues.apache.org/jira/browse/HADOOP-19233?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17913257#comment-17913257
]
ASF GitHub Bot commented on HADOOP-19233:
-----------------------------------------
anmolanmol1234 commented on code in PR #7265:
URL: https://github.com/apache/hadoop/pull/7265#discussion_r1916358749
##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/BlobRenameHandler.java:
##########
@@ -0,0 +1,652 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs.services;
+
+import java.net.HttpURLConnection;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.classification.VisibleForTesting;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIOException;
+import
org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
+import
org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
+import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode;
+import org.apache.hadoop.fs.azurebfs.enums.BlobCopyProgress;
+import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
+
+import static
org.apache.hadoop.fs.azurebfs.AzureBlobFileSystemStore.extractEtagHeader;
+import static
org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.COPY_STATUS_ABORTED;
+import static
org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.COPY_STATUS_FAILED;
+import static
org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.COPY_STATUS_SUCCESS;
+import static
org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.ROOT_PATH;
+import static
org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_COPY_ID;
+import static
org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_COPY_SOURCE;
+import static
org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_COPY_STATUS;
+import static
org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_COPY_STATUS_DESCRIPTION;
+import static
org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.COPY_BLOB_ABORTED;
+import static
org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.COPY_BLOB_FAILED;
+import static
org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.RENAME_DESTINATION_PARENT_PATH_NOT_FOUND;
+
+/**
+ * Orchestrator for rename over Blob endpoint. Handles both directory and file
+ * renames. Blob Endpoint does not expose rename API, this class is responsible
+ * for copying the blobs and deleting the source blobs.
+ * <p>
+ * For directory rename, it recursively lists the blobs in the source
directory and
+ * copies them to the destination directory.
+ */
+public class BlobRenameHandler extends ListActionTaker {
+
+ public static final Logger LOG = LoggerFactory.getLogger(AbfsClient.class);
+
+ private final String srcEtag;
+
+ private final Path src, dst;
+
+ private final boolean isAtomicRename, isAtomicRenameRecovery;
+
+ private final TracingContext tracingContext;
+
+ private AbfsLease srcAbfsLease;
+
+ private String srcLeaseId;
+
+ private final List<AbfsLease> leases = new ArrayList<>();
+
+ private final AtomicInteger operatedBlobCount = new AtomicInteger(0);
+
+ /** Constructor.
+ *
+ * @param src source path
+ * @param dst destination path
+ * @param abfsClient AbfsBlobClient to use for the rename operation
+ * @param srcEtag eTag of the source path
+ * @param isAtomicRename true if the rename operation is atomic
+ * @param isAtomicRenameRecovery true if the rename operation is a
recovery of a previous failed atomic rename operation
+ * @param tracingContext object of tracingContext used for the tracing of
the server calls.
+ */
+ public BlobRenameHandler(final String src,
+ final String dst,
+ final AbfsBlobClient abfsClient,
+ final String srcEtag,
+ final boolean isAtomicRename,
+ final boolean isAtomicRenameRecovery,
+ final TracingContext tracingContext) {
+ super(new Path(src), abfsClient, tracingContext);
+ this.srcEtag = srcEtag;
+ this.tracingContext = tracingContext;
+ this.src = new Path(src);
+ this.dst = new Path(dst);
+ this.isAtomicRename = isAtomicRename;
+ this.isAtomicRenameRecovery = isAtomicRenameRecovery;
+ }
+
+ public BlobRenameHandler(final String src,
Review Comment:
javadocs
> ABFS: [FnsOverBlob] Implementing Rename and Delete APIs over Blob Endpoint
> --------------------------------------------------------------------------
>
> Key: HADOOP-19233
> URL: https://issues.apache.org/jira/browse/HADOOP-19233
> Project: Hadoop Common
> Issue Type: Sub-task
> Components: fs/azure
> Affects Versions: 3.4.0
> Reporter: Anuj Modi
> Assignee: Manish Bhatt
> Priority: Major
> Labels: pull-request-available
>
> Currently, we only support rename and delete operations on the DFS endpoint.
> The reason for supporting rename and delete operations on the Blob endpoint
> is that the Blob endpoint does not account for hierarchy. We need to ensure
> that the HDFS contracts are maintained when performing rename and delete
> operations. Renaming or deleting a directory over the Blob endpoint requires
> the client to handle the orchestration and rename or delete all the blobs
> within the specified directory.
>
> The task outlines the considerations for implementing rename and delete
> operations for the FNS-blob endpoint to ensure compatibility with HDFS
> contracts.
> * {*}Blob Endpoint Usage{*}: The task addresses the need for abstraction in
> the code to maintain HDFS contracts while performing rename and delete
> operations on the blob endpoint, which does not support hierarchy.
> * {*}Rename Operations{*}: The {{AzureBlobFileSystem#rename()}} method will
> use a {{RenameHandler}} instance to handle rename operations, with separate
> handlers for the DFS and blob endpoints. This method includes prechecks,
> destination adjustments, and orchestration of directory renaming for blobs.
> * {*}Atomic Rename{*}: Atomic renaming is essential for blob endpoints, as
> it requires orchestration to copy or delete each blob within the directory. A
> configuration will allow developers to specify directories for atomic
> renaming, with a JSON file to track the status of renames.
> * {*}Delete Operations{*}: Delete operations are simpler than renames,
> requiring fewer HDFS contract checks. For blob endpoints, the client must
> handle orchestration, including managing orphaned directories created by
> Az-copy.
> * {*}Orchestration for Rename/Delete{*}: Orchestration for rename and delete
> operations over blob endpoints involves listing blobs and performing actions
> on each blob. The process must be optimized to handle large numbers of blobs
> efficiently.
> * {*}Need for Optimization{*}: Optimization is crucial because the
> {{ListBlob}} API can return a maximum of 5000 blobs at once, necessitating
> multiple calls for large directories. The task proposes a producer-consumer
> model to handle blobs in parallel, thereby reducing processing time and
> memory usage.
> * {*}Producer-Consumer Design{*}: The proposed design includes a producer to
> list blobs, a queue to store the blobs, and a consumer to process them in
> parallel. This approach aims to improve efficiency and mitigate memory issues.
> More details will follow
> Perquisites for this Patch:
> 1. HADOOP-19187 ABFS: [FnsOverBlob]Making AbfsClient Abstract for supporting
> both DFS and Blob Endpoint - ASF JIRA (apache.org)
> 2. HADOOP-19226 ABFS: [FnsOverBlob]Implementing Azure Rest APIs on Blob
> Endpoint for AbfsBlobClient - ASF JIRA (apache.org)
> 3. HADOOP-19207 ABFS: [FnsOverBlob]Response Handling of Blob Endpoint APIs
> and Metadata APIs - ASF JIRA (apache.org)
--
This message was sent by Atlassian Jira
(v8.20.10#820010)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]