Author: kihwal Date: Fri Feb 1 22:58:11 2013 New Revision: 1441656 URL: http://svn.apache.org/viewvc?rev=1441656&view=rev Log: merge -r 1311379:1311380 Merging from trunk to branch-0.23 to fix HDFS-3119
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1441656&r1=1441655&r2=1441656&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Feb 1 22:58:11 2013 @@ -30,6 +30,10 @@ Release 0.23.7 - UNRELEASED HDFS-4444. Add space between total transaction time and number of transactions in FSEditLog#printStatistics. (Stephen Chu via tgraves) + HDFS-3119. Overreplicated block is not deleted even after the replication + factor is reduced after sync follwed by closing that file. (Ashish Singhi + via umamahesh) + Release 0.23.6 - UNRELEASED INCOMPATIBLE CHANGES Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1441656&r1=1441655&r2=1441656&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Fri Feb 1 22:58:11 2013 @@ -2468,7 +2468,7 @@ public class BlockManager { } } - public void checkReplication(Block block, int numExpectedReplicas) { + public void checkReplication(Block block, short numExpectedReplicas) { // filter out containingNodes that are marked for decommission. NumberReplicas number = countNodes(block); if (isNeededReplication(block, numExpectedReplicas, number.liveReplicas())) { @@ -2476,6 +2476,10 @@ public class BlockManager { number.liveReplicas(), number.decommissionedReplicas(), numExpectedReplicas); + return; + } + if (number.liveReplicas() > numExpectedReplicas) { + processOverReplicatedBlock(block, numExpectedReplicas, null, null); } } Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1441656&r1=1441655&r2=1441656&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Feb 1 22:58:11 2013 @@ -1727,10 +1727,12 @@ public class FSNamesystem implements Nam /** * Check all blocks of a file. If any blocks are lower than their intended - * replication factor, then insert them into neededReplication + * replication factor, then insert them into neededReplication and if + * the blocks are more than the intended replication factor then insert + * them into invalidateBlocks. */ private void checkReplicationFactor(INodeFile file) { - int numExpectedReplicas = file.getReplication(); + short numExpectedReplicas = file.getReplication(); Block[] pendingBlocks = file.getBlocks(); int nrBlocks = pendingBlocks.length; for (int i = 0; i < nrBlocks; i++) { Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1441656&r1=1441655&r2=1441656&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Fri Feb 1 22:58:11 2013 @@ -17,12 +17,13 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.junit.Assert.*; import java.io.File; import java.io.IOException; -import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -36,13 +37,15 @@ import org.apache.hadoop.hdfs.protocol.E import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.junit.Test; -public class TestOverReplicatedBlocks extends TestCase { +public class TestOverReplicatedBlocks { /** Test processOverReplicatedBlock can handle corrupt replicas fine. * It make sure that it won't treat corrupt replicas as valid ones * thus prevents NN deleting valid replicas but keeping * corrupt ones. */ + @Test public void testProcesOverReplicateBlock() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); @@ -113,4 +116,30 @@ public class TestOverReplicatedBlocks ex cluster.shutdown(); } } + /** + * Test over replicated block should get invalidated when decreasing the + * replication for a partial block. + */ + @Test + public void testInvalidateOverReplicatedBlock() throws Exception { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) + .build(); + try { + final FSNamesystem namesystem = cluster.getNamesystem(); + final BlockManager bm = namesystem.getBlockManager(); + FileSystem fs = cluster.getFileSystem(); + Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1"); + FSDataOutputStream out = fs.create(p, (short) 2); + out.writeBytes("HDFS-3119: " + p); + out.hsync(); + fs.setReplication(p, (short) 1); + out.close(); + ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p); + assertEquals("Expected only one live replica for the block", 1, bm + .countNodes(block.getLocalBlock()).liveReplicas()); + } finally { + cluster.shutdown(); + } + } }