Author: rangadi
Date: Tue May 12 23:03:13 2009
New Revision: 774125
URL: http://svn.apache.org/viewvc?rev=774125&view=rev
Log:
HADOOP-5780. Improve per block message prited by -metaSave in HDFS.
(Raghu Angadi)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=774125&r1=774124&r2=774125&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Tue May 12 23:03:13 2009
@@ -338,6 +338,9 @@
current user.
(Rahul Kumar Singh via yhemanth)
+ HADOOP-5780. Improve per block message prited by "-metaSave" in HDFS.
+ (Raghu Angadi)
+
OPTIMIZATIONS
HADOOP-5595. NameNode does not need to run a replicator to choose a
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=774125&r1=774124&r2=774125&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
Tue May 12 23:03:13 2009
@@ -145,23 +145,35 @@
out.println("Metasave: Blocks waiting for replication: "
+ neededReplications.size());
for (Block block : neededReplications) {
- List<DatanodeDescriptor> containingNodes = new
ArrayList<DatanodeDescriptor>();
+ List<DatanodeDescriptor> containingNodes =
+ new ArrayList<DatanodeDescriptor>();
NumberReplicas numReplicas = new NumberReplicas();
// source node returned is not used
chooseSourceDatanode(block, containingNodes, numReplicas);
- int usableReplicas = numReplicas.liveReplicas()
- + numReplicas.decommissionedReplicas();
+ int usableReplicas = numReplicas.liveReplicas() +
+ numReplicas.decommissionedReplicas();
// l: == live:, d: == decommissioned c: == corrupt e: == excess
- out.print(block + " (replicas:" + " l: " + numReplicas.liveReplicas()
- + " d: " + numReplicas.decommissionedReplicas() + " c: "
- + numReplicas.corruptReplicas() + " e: "
- + numReplicas.excessReplicas()
- + ((usableReplicas > 0) ? "" : " MISSING") + ")");
-
- for (Iterator<DatanodeDescriptor> jt = blocksMap.nodeIterator(block);
jt
- .hasNext();) {
+ out.print(block + ((usableReplicas > 0)? "" : " MISSING") +
+ " (replicas:" +
+ " l: " + numReplicas.liveReplicas() +
+ " d: " + numReplicas.decommissionedReplicas() +
+ " c: " + numReplicas.corruptReplicas() +
+ " e: " + numReplicas.excessReplicas() + ") ");
+
+ Collection<DatanodeDescriptor> corruptNodes =
+ corruptReplicas.getNodes(block);
+
+ for (Iterator<DatanodeDescriptor> jt = blocksMap.nodeIterator(block);
+ jt.hasNext();) {
DatanodeDescriptor node = jt.next();
- out.print(" " + node + " : ");
+ String state = "";
+ if (corruptNodes != null && corruptNodes.contains(node)) {
+ state = "(corrupt)";
+ } else if (node.isDecommissioned() ||
+ node.isDecommissionInProgress()) {
+ state = "(decommissioned)";
+ }
+ out.print(" " + node + state + " : ");
}
out.println("");
}