Author: hairong
Date: Mon Nov 10 16:15:34 2008
New Revision: 712884
URL: http://svn.apache.org/viewvc?rev=712884&view=rev
Log:
Merge -r 712880:712881 from trunk to main to move the change of HADOOP-4556
into release 0.18.3.
Modified:
hadoop/core/branches/branch-0.18/CHANGES.txt
hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DatanodeDescriptor.java
hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java
Modified: hadoop/core/branches/branch-0.18/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/CHANGES.txt?rev=712884&r1=712883&r2=712884&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.18/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.18/CHANGES.txt Mon Nov 10 16:15:34 2008
@@ -18,6 +18,8 @@
HADOOP-3883. Limit namenode to assign at most one generation stamp for
a particular block within a short period. (szetszwo)
+ HADOOP-4556. Block went missing. (hairong)
+
Release 0.18.2 - 2008-11-03
BUG FIXES
Modified:
hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DatanodeDescriptor.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DatanodeDescriptor.java?rev=712884&r1=712883&r2=712884&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DatanodeDescriptor.java
(original)
+++
hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DatanodeDescriptor.java
Mon Nov 10 16:15:34 2008
@@ -206,6 +206,7 @@
this.remaining = 0;
this.xceiverCount = 0;
this.blockList = null;
+ this.invalidateBlocks.clear();
}
int numBlocks() {
Modified:
hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java?rev=712884&r1=712883&r2=712884&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java
(original)
+++
hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java
Mon Nov 10 16:15:34 2008
@@ -858,7 +858,7 @@
LOG.info("Reducing replication for file " + src
+ ". New replication is " + replication);
for(int idx = 0; idx < fileBlocks.length; idx++)
- proccessOverReplicatedBlock(fileBlocks[idx], replication, null, null);
+ processOverReplicatedBlock(fileBlocks[idx], replication, null, null);
}
return true;
}
@@ -1301,20 +1301,40 @@
}
/**
+ * Remove a datanode from the invalidatesSet
+ * @param n datanode
+ */
+ private void removeFromInvalidates(DatanodeInfo n) {
+ recentInvalidateSets.remove(n.getStorageID());
+ }
+
+ /**
* Adds block to list of blocks which will be invalidated on
- * specified datanode.
+ * specified datanode and log the move
+ * @param b block
+ * @param n datanode
*/
private void addToInvalidates(Block b, DatanodeInfo n) {
+ addToInvalidatesNoLog(b, n);
+ NameNode.stateChangeLog.info("BLOCK* NameSystem.addToInvalidates: "
+ + b.getBlockName() + " is added to invalidSet of " + n.getName());
+ }
+
+ /**
+ * Adds block to list of blocks which will be invalidated on
+ * specified datanode
+ * @param b block
+ * @param n datanode
+ */
+ private void addToInvalidatesNoLog(Block b, DatanodeInfo n) {
Collection<Block> invalidateSet =
recentInvalidateSets.get(n.getStorageID());
if (invalidateSet == null) {
invalidateSet = new HashSet<Block>();
recentInvalidateSets.put(n.getStorageID(), invalidateSet);
}
invalidateSet.add(b);
- NameNode.stateChangeLog.info("BLOCK* NameSystem.delete: "
- + b.getBlockName() + " is added to invalidSet of " + n.getName());
}
-
+
/**
* Adds block to list of blocks which will be invalidated on
* all its datanodes.
@@ -2559,6 +2579,7 @@
void unprotectedRemoveDatanode(DatanodeDescriptor nodeDescr) {
nodeDescr.resetBlocks();
+ removeFromInvalidates(nodeDescr);
NameNode.stateChangeLog.debug(
"BLOCK*
NameSystem.unprotectedRemoveDatanode: "
+ nodeDescr.getName() + " is out of service
now.");
@@ -2839,7 +2860,7 @@
updateNeededReplications(block, curReplicaDelta, 0);
}
if (numCurrentReplica > fileReplication) {
- proccessOverReplicatedBlock(block, fileReplication, node, delNodeHint);
+ processOverReplicatedBlock(block, fileReplication, node, delNodeHint);
}
// If the file replication has reached desired value
// we can remove any corrupt replicas the block may have
@@ -2920,7 +2941,7 @@
if (numCurrentReplica > expectedReplication) {
// over-replicated block
nrOverReplicated++;
- proccessOverReplicatedBlock(block, expectedReplication, null, null);
+ processOverReplicatedBlock(block, expectedReplication, null, null);
}
}
LOG.info("Total number of blocks = " + blocksMap.size());
@@ -2934,7 +2955,7 @@
* If there are any extras, call chooseExcessReplicates() to
* mark them in the excessReplicateMap.
*/
- private void proccessOverReplicatedBlock(Block block, short replication,
+ private void processOverReplicatedBlock(Block block, short replication,
DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) {
if(addedNode == delNodeHint) {
delNodeHint = null;
@@ -3066,14 +3087,9 @@
// should be deleted. Items are removed from the invalidate list
// upon giving instructions to the namenode.
//
- Collection<Block> invalidateSet =
recentInvalidateSets.get(cur.getStorageID());
- if (invalidateSet == null) {
- invalidateSet = new ArrayList<Block>();
- recentInvalidateSets.put(cur.getStorageID(), invalidateSet);
- }
- invalidateSet.add(b);
- NameNode.stateChangeLog.debug("BLOCK* NameSystem.chooseExcessReplicates:
"
- +"("+cur.getName()+", "+b+") is added to
recentInvalidateSets");
+ addToInvalidatesNoLog(b, cur);
+ NameNode.stateChangeLog.info("BLOCK* NameSystem.chooseExcessReplicates: "
+ +"("+cur.getName()+", "+b+") is added to
recentInvalidateSets");
}
}
@@ -3115,6 +3131,7 @@
excessReplicateMap.remove(node.getStorageID());
}
}
+
// Remove the replica from corruptReplicas
corruptReplicas.removeFromCorruptReplicasMap(block, node);
}