Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c7f87dc2d -> 75bc53a86


HDFS-9279. Decomissioned capacity should not be considered for configured/used 
capacity. Contributed by Kihu Shukla .
(cherry picked from commit 19a77f546657b086af8f41fa631099bdde7e010c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75bc53a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75bc53a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75bc53a8

Branch: refs/heads/branch-2
Commit: 75bc53a86a846b3c528164105b91604a9da9c543
Parents: c7f87dc
Author: Kihwal Lee <kih...@apache.org>
Authored: Wed Oct 28 11:59:36 2015 -0500
Committer: Kihwal Lee <kih...@apache.org>
Committed: Wed Oct 28 11:59:36 2015 -0500

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../server/blockmanagement/DatanodeStats.java   | 26 ++++++-----
 .../apache/hadoop/hdfs/TestDecommission.java    | 47 +++++++++++++++++---
 3 files changed, 58 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75bc53a8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bfba5d4..cdb10f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1344,6 +1344,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9302. WebHDFS throws NullPointerException if newLength is not
     provided. (Jagadesh Kiran N via yliu)
 
+    HDFS-9297. Decomissioned capacity should not be considered for 
+    configured/used capacity (Contributed by Kuhu Shukla)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75bc53a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
index 3ab0d5c..4c39c41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
@@ -45,19 +45,20 @@ class DatanodeStats {
   private int expiredHeartbeats = 0;
 
   synchronized void add(final DatanodeDescriptor node) {
-    capacityUsed += node.getDfsUsed();
-    blockPoolUsed += node.getBlockPoolUsed();
     xceiverCount += node.getXceiverCount();
     if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+      capacityUsed += node.getDfsUsed();
+      blockPoolUsed += node.getBlockPoolUsed();
       nodesInService++;
       nodesInServiceXceiverCount += node.getXceiverCount();
       capacityTotal += node.getCapacity();
       capacityRemaining += node.getRemaining();
-    } else {
-      capacityTotal += node.getDfsUsed();
+      cacheCapacity += node.getCacheCapacity();
+      cacheUsed += node.getCacheUsed();
+    } else if (!node.isDecommissioned()) {
+      cacheCapacity += node.getCacheCapacity();
+      cacheUsed += node.getCacheUsed();
     }
-    cacheCapacity += node.getCacheCapacity();
-    cacheUsed += node.getCacheUsed();
     Set<StorageType> storageTypes = new HashSet<>();
     for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
       statsMap.addStorage(storageInfo, node);
@@ -69,19 +70,20 @@ class DatanodeStats {
   }
 
   synchronized void subtract(final DatanodeDescriptor node) {
-    capacityUsed -= node.getDfsUsed();
-    blockPoolUsed -= node.getBlockPoolUsed();
     xceiverCount -= node.getXceiverCount();
     if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+      capacityUsed -= node.getDfsUsed();
+      blockPoolUsed -= node.getBlockPoolUsed();
       nodesInService--;
       nodesInServiceXceiverCount -= node.getXceiverCount();
       capacityTotal -= node.getCapacity();
       capacityRemaining -= node.getRemaining();
-    } else {
-      capacityTotal -= node.getDfsUsed();
+      cacheCapacity -= node.getCacheCapacity();
+      cacheUsed -= node.getCacheUsed();
+    } else if (!node.isDecommissioned()) {
+      cacheCapacity -= node.getCacheCapacity();
+      cacheUsed -= node.getCacheUsed();
     }
-    cacheCapacity -= node.getCacheCapacity();
-    cacheUsed -= node.getCacheUsed();
     Set<StorageType> storageTypes = new HashSet<>();
     for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
       statsMap.subtractStorage(storageInfo, node);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75bc53a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index a2ba8a9..0dce5d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
@@ -350,14 +351,13 @@ public class TestDecommission {
     for (int i = 0; i < 10; i++) {
       long[] newStats = namenode.getRpcServer().getStats();
 
-      // For decommissioning nodes, ensure capacity of the DN is no longer
-      // counted. Only used space of the DN is counted in cluster capacity
+      // For decommissioning nodes, ensure capacity of the DN and dfsUsed
+      //  is no longer counted towards total
       assertEquals(newStats[0],
-          decommissioning ? info.getDfsUsed() : info.getCapacity());
+          decommissioning ? 0 : info.getCapacity());
 
-      // Ensure cluster used capacity is counted for both normal and
-      // decommissioning nodes
-      assertEquals(newStats[1], info.getDfsUsed());
+      // Ensure cluster used capacity is counted for normal nodes only
+      assertEquals(newStats[1], decommissioning ? 0 : info.getDfsUsed());
 
       // For decommissioning nodes, remaining space from the DN is not counted
       assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining());
@@ -1291,4 +1291,39 @@ public class TestDecommission {
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testUsedCapacity() throws Exception {
+    int numNamenodes = 1;
+    int numDatanodes = 2;
+
+    startCluster(numNamenodes,numDatanodes,conf);
+    cluster.waitActive();
+    FSNamesystem ns = cluster.getNamesystem(0);
+    BlockManager blockManager = ns.getBlockManager();
+    DatanodeStatistics datanodeStatistics = blockManager.getDatanodeManager()
+        .getDatanodeStatistics();
+
+    long initialUsedCapacity = datanodeStatistics.getCapacityUsed();
+    long initialTotalCapacity = datanodeStatistics.getCapacityTotal();
+    long initialBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
+    ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
+        new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
+    namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(numDatanodes));
+    ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(0);
+    //decommission one node
+    DatanodeInfo decomNode = decommissionNode(0, null, decommissionedNodes,
+        AdminStates.DECOMMISSIONED);
+    decommissionedNodes.add(decomNode);
+    long newUsedCapacity = datanodeStatistics.getCapacityUsed();
+    long newTotalCapacity = datanodeStatistics.getCapacityTotal();
+    long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
+
+    assertTrue("DfsUsedCapacity should not be the same after a node has " +
+        "been decommissioned!", initialUsedCapacity != newUsedCapacity);
+    assertTrue("TotalCapacity should not be the same after a node has " +
+        "been decommissioned!", initialTotalCapacity != newTotalCapacity);
+    assertTrue("BlockPoolUsed should not be the same after a node has " +
+        "been decommissioned!",initialBlockPoolUsed != newBlockPoolUsed);
+  }
 }

Reply via email to