Revert "HDFS-12886. Ignore minReplication for block recovery. Contributed by Lukas Majercak."
This reverts commit d0f7050b9cbcba3d90d5a1fc7d46cac9ddc909e4. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b7101a7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b7101a7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b7101a7 Branch: refs/heads/branch-2.9 Commit: 7b7101a72617d2f515116462dbb998b4b50ac2a8 Parents: d0f7050 Author: Inigo Goiri <[email protected]> Authored: Fri Mar 16 14:26:32 2018 -0700 Committer: Inigo Goiri <[email protected]> Committed: Fri Mar 16 14:26:32 2018 -0700 ---------------------------------------------------------------------- .../server/blockmanagement/BlockManager.java | 7 -- .../hdfs/server/datanode/TestBlockRecovery.java | 83 -------------------- 2 files changed, 90 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b7101a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 9319633..4a8c8c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -847,13 +847,6 @@ public class BlockManager implements BlockStatsMXBean { addExpectedReplicasToPending(lastBlock); } completeBlock(lastBlock, iip, false); - } else if (pendingRecoveryBlocks.isUnderRecovery(lastBlock)) { - // We've just finished recovery for this block, complete - // the block forcibly disregarding number of replicas. - // This is to ignore minReplication, the block will be closed - // and then replicated out. - completeBlock(lastBlock, iip, true); - updateNeededReconstructions(lastBlock, 1, 0); } return committed; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b7101a7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 15a416d..1aa538d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -19,14 +19,9 @@ package org.apache.hadoop.hdfs.server.datanode; import org.apache.hadoop.hdfs.AppendTestUtil; -import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; @@ -47,7 +42,6 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Random; @@ -1093,81 +1087,4 @@ public class TestBlockRecovery { } } } - - /** - * Test that block will be recovered even if there are less than the - * specified minReplication datanodes involved in its recovery. - * - * Check that, after recovering, the block will be successfully replicated. - */ - @Test(timeout = 300000L) - public void testRecoveryWillIgnoreMinReplication() throws Exception { - tearDown(); // Stop the Mocked DN started in startup() - - final int blockSize = 4096; - final int numReplicas = 3; - final String filename = "/testIgnoreMinReplication"; - final Path filePath = new Path(filename); - Configuration configuration = new HdfsConfiguration(); - configuration.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); - configuration.setInt(DFS_NAMENODE_REPLICATION_MIN_KEY, 2); - configuration.setLong(DFS_BLOCK_SIZE_KEY, blockSize); - MiniDFSCluster cluster = null; - - try { - cluster = new MiniDFSCluster.Builder(configuration).numDataNodes(5) - .build(); - cluster.waitActive(); - final DistributedFileSystem dfs = cluster.getFileSystem(); - final FSNamesystem fsn = cluster.getNamesystem(); - - // Create a file and never close the output stream to trigger recovery - FSDataOutputStream out = dfs.create(filePath, (short) numReplicas); - out.write(AppendTestUtil.randomBytes(0, blockSize)); - out.hsync(); - - DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", - cluster.getNameNodePort()), configuration); - LocatedBlock blk = dfsClient.getNamenode(). - getBlockLocations(filename, 0, blockSize). - getLastLocatedBlock(); - - // Kill 2 out of 3 datanodes so that only 1 alive, thus < minReplication - List<DatanodeInfo> dataNodes = Arrays.asList(blk.getLocations()); - assertEquals(dataNodes.size(), numReplicas); - for (DatanodeInfo dataNode : dataNodes.subList(0, numReplicas - 1)) { - cluster.stopDataNode(dataNode.getName()); - } - - GenericTestUtils.waitFor(new Supplier<Boolean>() { - @Override - public Boolean get() { - return fsn.getNumDeadDataNodes() == 2; - } - }, 300, 300000); - - // Make sure hard lease expires to trigger replica recovery - cluster.setLeasePeriod(100L, 100L); - - // Wait for recovery to succeed - GenericTestUtils.waitFor(new Supplier<Boolean>() { - @Override - public Boolean get() { - try { - return dfs.isFileClosed(filePath); - } catch (IOException e) {} - return false; - } - }, 300, 300000); - - // Wait for the block to be replicated - DFSTestUtil.waitForReplication(cluster, DFSTestUtil.getFirstBlock( - dfs, filePath), 1, numReplicas, 0); - - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } } --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
