Author: szetszwo
Date: Thu Mar 24 20:27:23 2011
New Revision: 1085116

URL: http://svn.apache.org/viewvc?rev=1085116&view=rev
Log:
HDFS-1773. Do not show decommissioned datanodes on web and JMX interfaces.  
Contributed by Tanping Wang

Modified:
    hadoop/common/branches/branch-0.20-security/CHANGES.txt
    
hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    
hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java

Modified: hadoop/common/branches/branch-0.20-security/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/CHANGES.txt?rev=1085116&r1=1085115&r2=1085116&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.20-security/CHANGES.txt Thu Mar 24 20:27:23 
2011
@@ -2,14 +2,23 @@ Hadoop Change Log
 
 Release 0.20.204.0 - unreleased
 
+  BUG FIXES
+
     HDFS-1592. At Startup, Valid volumes required in FSDataset doesn't
     handle consistently with volumes tolerated. (Bharath Mundlapudi)
 
+    HDFS-1598. Directory listing on hftp:// does not show
+    .*.crc files.  (szetszwo)
+
     HDFS-1750. ListPathsServlet should not use HdfsFileStatus.getLocalName()
     to get file name since it may return an empty string.  (szetszwo)
 
     HDFS-1758. Make Web UI JSP pages thread safe. (Tanping via suresh)
 
+    HDFS-1773. Do not show decommissioned datanodes, which are not in both
+    include and exclude lists, on web and JMX interfaces.
+    (Tanping Wang via szetszwo)
+
 Release 0.20.203.0 - unreleased
 
     HADOOP-7190. Add metrics v1 back for backwards compatibility. (omalley)
@@ -40,9 +49,6 @@ Release 0.20.203.0 - unreleased
     MAPREDUCE-2364. Don't hold the rjob lock while localizing resources. (ddas
     via omalley)
 
-    HDFS:1598. Directory listing on hftp:// does not show
-    .*.crc files.  (szetszwo)
-
     MAPREDUCE-2365. New counters for FileInputFormat (BYTES_READ) and 
     FileOutputFormat (BYTES_WRITTEN). 
     New counter MAP_OUTPUT_MATERIALIZED_BYTES for compressed MapOutputSize.

Modified: 
hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1085116&r1=1085115&r2=1085116&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 (original)
+++ 
hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 Thu Mar 24 20:27:23 2011
@@ -5346,6 +5346,7 @@ public class FSNamesystem implements FSC
     final Map<String, Object> info = new HashMap<String, Object>();
     final ArrayList<DatanodeDescriptor> deadNodeList =
       this.getDatanodeListForReport(DatanodeReportType.DEAD); 
+    removeDecomNodeFromDeadList(deadNodeList);
     for (DatanodeDescriptor node : deadNodeList) {
       final Map<String, Object> innerinfo = new HashMap<String, Object>();
       innerinfo.put("lastContact", getLastContact(node));
@@ -5435,4 +5436,29 @@ public class FSNamesystem implements FSC
                     "fsck", src, null, null);
     }
   }
+
+
+  /**
+   * Remove an already decommissioned data node who is neither in include nor
+   * exclude lists from the dead node list.
+   * @param dead, array list of dead nodes
+   */
+  void removeDecomNodeFromDeadList(ArrayList<DatanodeDescriptor> dead) {
+    // If the include list is empty, any nodes are welcomed and it does not 
+    // make sense to exclude any nodes from the cluster.  Therefore, no remove.
+    if (hostsReader.getHosts().isEmpty()) {
+      return;
+    }
+    for (Iterator<DatanodeDescriptor> it = dead.iterator();it.hasNext();){
+      DatanodeDescriptor node = it.next();
+      if ((!inHostsList(node,null)) 
+          && (!inExcludedHostsList(node, null))
+          && node.isDecommissioned()){
+        // Include list is not empty, an existing datanode does not appear 
+        // in both include or exclude lists and it has been decommissioned.
+        // Remove it from dead node list.
+        it.remove();
+      } 
+    }
+  }
 }

Modified: 
hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java?rev=1085116&r1=1085115&r2=1085116&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
 (original)
+++ 
hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
 Thu Mar 24 20:27:23 2011
@@ -170,9 +170,12 @@ public class JspHelper {
   }
   public void DFSNodesStatus(ArrayList<DatanodeDescriptor> live,
                              ArrayList<DatanodeDescriptor> dead) {
-    if (fsn != null)
+    if (fsn != null) {
       fsn.DFSNodesStatus(live, dead);
+      fsn.removeDecomNodeFromDeadList(dead);  
+    }
   }
+
   public void addTableHeader(JspWriter out) throws IOException {
     out.print("<table border=\"1\""+
               " cellpadding=\"2\" cellspacing=\"2\">");


Reply via email to