Author: rangadi
Date: Mon Jul 14 11:20:16 2008
New Revision: 676671
URL: http://svn.apache.org/viewvc?rev=676671&view=rev
Log:
HADOOP-1627. Various small improvements to 'dfsadmin -report' output.
(rangadi)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=676671&r1=676670&r2=676671&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Jul 14 11:20:16 2008
@@ -54,6 +54,9 @@
IMPROVEMENTS
+ HADOOP-1627. Various small improvements to 'dfsadmin -report' output.
+ (rangadi)
+
HADOOP-3577. Tools to inject blocks into name node and simulated
data nodes for testing. (Sanjay Radia via hairong)
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=676671&r1=676670&r2=676671&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
Mon Jul 14 11:20:16 2008
@@ -243,7 +243,7 @@
return "DFS[" + dfs + "]";
}
- DFSClient getClient() {
+ public DFSClient getClient() {
return dfs;
}
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=676671&r1=676670&r2=676671&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
Mon Jul 14 11:20:16 2008
@@ -150,17 +150,18 @@
!NetworkTopology.DEFAULT_RACK.equals(location)) {
buffer.append("Rack: "+location+"\n");
}
+ buffer.append("Decommission Status : ");
if (isDecommissioned()) {
- buffer.append("State : Decommissioned\n");
+ buffer.append("Decommissioned\n");
} else if (isDecommissionInProgress()) {
- buffer.append("State : Decommission in progress\n");
+ buffer.append("Decommission in progress\n");
} else {
- buffer.append("State : In Service\n");
+ buffer.append("Normal\n");
}
buffer.append("Total raw bytes: "+c+" ("+FsShell.byteDesc(c)+")"+"\n");
buffer.append("Remaining raw bytes: " +r+
"("+FsShell.byteDesc(r)+")"+"\n");
buffer.append("Used raw bytes: "+u+" ("+FsShell.byteDesc(u)+")"+"\n");
- buffer.append("% used:
"+FsShell.limitDecimalTo2(((1.0*u)/c)*100)+"%"+"\n");
+ buffer.append("% used: "+FsShell.limitDecimalTo2(100.0*u/(c+1e-10))+"%\n");
buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
return buffer.toString();
}
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java?rev=676671&r1=676670&r2=676671&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
Mon Jul 14 11:20:16 2008
@@ -200,6 +200,7 @@
void resetBlocks() {
this.capacity = 0;
this.remaining = 0;
+ this.dfsUsed = 0;
this.xceiverCount = 0;
this.blockList = null;
}
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=676671&r1=676670&r2=676671&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
(original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java Mon
Jul 14 11:20:16 2008
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.tools;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
@@ -25,6 +26,7 @@
import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.fs.FileSystem;
@@ -166,7 +168,6 @@
long raw = ds.getCapacity();
long rawUsed = ds.getDfsUsed();
long remaining = ds.getRemaining();
- long used = dfs.getUsed();
boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
UpgradeStatusReport status =
dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS);
@@ -187,19 +188,25 @@
+ limitDecimalTo2(((1.0 * rawUsed) / raw) * 100)
+ "%");
System.out.println();
- System.out.println("Total effective bytes: " + used
- + " (" + byteDesc(used) + ")");
- System.out.println("Effective replication multiplier: "
- + (1.0 * rawUsed / used));
System.out.println("-------------------------------------------------");
- DatanodeInfo[] info = dfs.getDataNodeStats();
- System.out.println("Datanodes available: " + info.length);
- System.out.println();
- for (int i = 0; i < info.length; i++) {
- System.out.println(info[i].getDatanodeReport());
+
+ DatanodeInfo[] live = dfs.getClient().datanodeReport(
+ DatanodeReportType.LIVE);
+ DatanodeInfo[] dead = dfs.getClient().datanodeReport(
+ DatanodeReportType.DEAD);
+ System.out.println("Datanodes available: " + live.length +
+ " (" + (live.length + dead.length) + " total, " +
+ dead.length + " dead)\n");
+
+ for (DatanodeInfo dn : live) {
+ System.out.println(dn.getDatanodeReport());
System.out.println();
}
+ for (DatanodeInfo dn : dead) {
+ System.out.println(dn.getDatanodeReport());
+ System.out.println();
+ }
}
}