This is an automated email from the ASF dual-hosted git repository.

zanderxu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 3bd6a751edc HDFS-17177. ErasureCodingWork should ignore the deleted 
block while reconstructing blocks (#6024)
3bd6a751edc is described below

commit 3bd6a751edcb099cbd354b240684d4467209069d
Author: huhaiyang <huhaiyang...@126.com>
AuthorDate: Mon Sep 11 11:57:52 2023 +0800

    HDFS-17177. ErasureCodingWork should ignore the deleted block while 
reconstructing blocks (#6024)
---
 .../hdfs/server/blockmanagement/ErasureCodingWork.java | 18 +++++++++++++-----
 .../hdfs/server/blockmanagement/ReplicationWork.java   |  5 +++--
 2 files changed, 16 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
index 147f4c3fd62..b8c396696ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
@@ -48,7 +48,7 @@ class ErasureCodingWork extends BlockReconstructionWork {
     this.blockPoolId = blockPoolId;
     this.liveBlockIndices = liveBlockIndices;
     this.liveBusyBlockIndices = liveBusyBlockIndices;
-    this.excludeReconstructedIndices=excludeReconstrutedIndices;
+    this.excludeReconstructedIndices = excludeReconstrutedIndices;
     LOG.debug("Creating an ErasureCodingWork to {} reconstruct ",
         block);
   }
@@ -62,10 +62,18 @@ class ErasureCodingWork extends BlockReconstructionWork {
       BlockStoragePolicySuite storagePolicySuite,
       Set<Node> excludedNodes) {
     // TODO: new placement policy for EC considering multiple writers
-    DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
-        getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
-        getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
-        storagePolicySuite.getPolicy(getStoragePolicyID()), null);
+    DatanodeStorageInfo[] chosenTargets = null;
+    // HDFS-14720. If the block is deleted, the block size will become
+    // BlockCommand.NO_ACK (LONG.MAX_VALUE) . This kind of block we don't need
+    // to send for replication or reconstruction
+    if (!getBlock().isDeleted()) {
+      chosenTargets = blockplacement.chooseTarget(
+          getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
+          getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
+          storagePolicySuite.getPolicy(getStoragePolicyID()), null);
+    } else {
+      LOG.warn("ErasureCodingWork could not need choose targets for {}", 
getBlock());
+    }
     setTargets(chosenTargets);
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
index 771751f21e0..15e5d5cdc27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.net.Node;
 
 import java.util.List;
@@ -47,11 +46,13 @@ class ReplicationWork extends BlockReconstructionWork {
       // HDFS-14720 If the block is deleted, the block size will become
       // BlockCommand.NO_ACK (LONG.MAX_VALUE) . This kind of block we don't 
need
       // to send for replication or reconstruction
-      if (getBlock().getNumBytes() != BlockCommand.NO_ACK) {
+      if (!getBlock().isDeleted()) {
         chosenTargets = blockplacement.chooseTarget(getSrcPath(),
             getAdditionalReplRequired(), getSrcNodes()[0],
             getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
             storagePolicySuite.getPolicy(getStoragePolicyID()), null);
+      } else {
+        LOG.warn("ReplicationWork could not need choose targets for {}", 
getBlock());
       }
       setTargets(chosenTargets);
     } finally {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to