HDFS-6945. BlockManager should remove a block from excessReplicateMap and 
decrement ExcessBlocks metric when the block is removed. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71b43741
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71b43741
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71b43741

Branch: refs/heads/YARN-2928
Commit: 71b43741dde686725ac188ae91b14185989944a1
Parents: 40a1282
Author: Akira Ajisaka <aajis...@apache.org>
Authored: Wed Apr 1 09:07:28 2015 +0900
Committer: Zhijie Shen <zjs...@apache.org>
Committed: Mon Apr 6 12:08:09 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../server/blockmanagement/BlockManager.java    | 22 ++++++++++++++++++--
 .../namenode/metrics/TestNameNodeMetrics.java   |  9 ++++++--
 3 files changed, 30 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71b43741/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f3537b0..4247ea6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -398,6 +398,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-7997. The first non-existing xattr should also throw IOException.
     (zhouyingchao via yliu)
 
+    HDFS-6945. BlockManager should remove a block from excessReplicateMap and
+    decrement ExcessBlocks metric when the block is removed. (aajisaka)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71b43741/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f6e15a3..d9aee62 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3351,8 +3351,7 @@ public class BlockManager {
     // file already removes them from the block map below.
     block.setNumBytes(BlockCommand.NO_ACK);
     addToInvalidates(block);
-    corruptReplicas.removeFromCorruptReplicasMap(block);
-    blocksMap.removeBlock(block);
+    removeBlockFromMap(block);
     // Remove the block from pendingReplications and neededReplications
     pendingReplications.remove(block);
     neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
@@ -3528,11 +3527,30 @@ public class BlockManager {
   }
 
   public void removeBlockFromMap(Block block) {
+    removeFromExcessReplicateMap(block);
     blocksMap.removeBlock(block);
     // If block is removed from blocksMap remove it from corruptReplicasMap
     corruptReplicas.removeFromCorruptReplicasMap(block);
   }
 
+  /**
+   * If a block is removed from blocksMap, remove it from excessReplicateMap.
+   */
+  private void removeFromExcessReplicateMap(Block block) {
+    for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
+      String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
+      LightWeightLinkedSet<Block> excessReplicas = 
excessReplicateMap.get(uuid);
+      if (excessReplicas != null) {
+        if (excessReplicas.remove(block)) {
+          excessBlocksCount.decrementAndGet();
+          if (excessReplicas.isEmpty()) {
+            excessReplicateMap.remove(uuid);
+          }
+        }
+      }
+    }
+  }
+
   public int getCapacity() {
     return blocksMap.getCapacity();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71b43741/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 64ea1e4..b390391 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -280,11 +280,16 @@ public class TestNameNodeMetrics {
   public void testExcessBlocks() throws Exception {
     Path file = getTestPath("testExcessBlocks");
     createFile(file, 100, (short)2);
-    long totalBlocks = 1;
     NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
     MetricsRecordBuilder rb = getMetrics(NS_METRICS);
-    assertGauge("ExcessBlocks", totalBlocks, rb);
+    assertGauge("ExcessBlocks", 1L, rb);
+
+    // verify ExcessBlocks metric is decremented and
+    // excessReplicateMap is cleared after deleting a file
     fs.delete(file, true);
+    rb = getMetrics(NS_METRICS);
+    assertGauge("ExcessBlocks", 0L, rb);
+    assertTrue(bm.excessReplicateMap.isEmpty());
   }
   
   /** Test to ensure metrics reflects missing blocks */

Reply via email to