Repository: hadoop Updated Branches: refs/heads/branch-3.1 97b75f47f -> 02f8b5da4 refs/heads/trunk 6275b4287 -> e0ff8e2c1
HDFS-13840. RBW Blocks which are having less GS should be added to Corrupt. Contributed by Brahma Reddy Battula Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0ff8e2c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0ff8e2c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0ff8e2c Branch: refs/heads/trunk Commit: e0ff8e2c10f38298b81624df1e62743ac171f18d Parents: 6275b42 Author: Brahma Reddy Battula <[email protected]> Authored: Wed Sep 26 23:44:16 2018 +0530 Committer: Brahma Reddy Battula <[email protected]> Committed: Wed Sep 26 23:44:16 2018 +0530 ---------------------------------------------------------------------- .../server/blockmanagement/BlockManager.java | 19 ++++++++-- .../org/apache/hadoop/hdfs/MiniDFSCluster.java | 5 +++ .../TestRBWBlockInvalidation.java | 38 ++++++++++++++++++++ .../server/namenode/TestAddStripedBlocks.java | 4 +-- 4 files changed, 61 insertions(+), 5 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ff8e2c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 675221a..5e14247 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -3086,10 +3086,20 @@ public class BlockManager implements BlockStatsMXBean { } case RBW: case RWR: + final long reportedGS = reported.getGenerationStamp(); if (!storedBlock.isComplete()) { + //When DN report lesser GS than the storedBlock then mark it is corrupt, + //As already valid replica will be present. + if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) { + return new BlockToMarkCorrupt(new Block(reported), storedBlock, + reportedGS, + "reported " + reportedState + " replica with genstamp " + + reportedGS + + " does not match Stored block's genstamp in block map " + + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); + } return null; // not corrupt } else if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) { - final long reportedGS = reported.getGenerationStamp(); return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS, "reported " + reportedState + " replica with genstamp " + reportedGS + " does not match COMPLETE block's genstamp in block map " @@ -3149,8 +3159,11 @@ public class BlockManager implements BlockStatsMXBean { block.getUnderConstructionFeature().addReplicaIfNotPresent( storageInfo, ucBlock.reportedBlock, ucBlock.reportedState); - if (ucBlock.reportedState == ReplicaState.FINALIZED && - (block.findStorageInfo(storageInfo) < 0)) { + // Add replica if appropriate. If the replica was previously corrupt + // but now okay, it might need to be updated. + if (ucBlock.reportedState == ReplicaState.FINALIZED && ( + block.findStorageInfo(storageInfo) < 0) || corruptReplicas + .isReplicaCorrupt(block, storageInfo.getDatanodeDescriptor())) { addStoredBlock(block, ucBlock.reportedBlock, storageInfo, null, true); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ff8e2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 11265b8..29807ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -560,6 +560,11 @@ public class MiniDFSCluster implements AutoCloseable { public void setDnArgs(String ... args) { dnArgs = args; } + + public DataNode getDatanode() { + return datanode; + } + } private Configuration conf; http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ff8e2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java index 40f54cb..9c48eeb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import java.io.Closeable; import java.io.IOException; @@ -27,6 +28,7 @@ import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -245,6 +247,42 @@ public class TestRBWBlockInvalidation { } + @Test + public void testRWRShouldNotAddedOnDNRestart() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", + "false"); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(2).build()) { + Path path = new Path("/testRBW"); + FSDataOutputStream out = cluster.getFileSystem().create(path, (short) 2); + out.writeBytes("old gs data\n"); + out.hflush(); + // stop one datanode + DataNodeProperties dnProp = cluster.stopDataNode(0); + String dnAddress = dnProp.getDatanode().getXferAddress().toString(); + if (dnAddress.startsWith("/")) { + dnAddress = dnAddress.substring(1); + } + //Write some more data after DN stopped. + out.writeBytes("old gs data\n"); + out.hflush(); + cluster.restartDataNode(dnProp, true); + // wait till the block report comes + Thread.sleep(3000); + // check the block locations, this should not contain restarted datanode + BlockLocation[] locations = cluster.getFileSystem() + .getFileBlockLocations(path, 0, Long.MAX_VALUE); + String[] names = locations[0].getNames(); + for (String node : names) { + if (node.equals(dnAddress)) { + fail("Old GS DN should not be present in latest block locations."); + } + } + out.close(); + } + } + private void waitForNumTotalBlocks(final MiniDFSCluster cluster, final int numTotalBlocks) throws Exception { GenericTestUtils.waitFor(new Supplier<Boolean>() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ff8e2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index ec13b44..9374c04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -337,8 +337,8 @@ public class TestAddStripedBlocks { int i = groupSize - 1; for (DataNode dn : cluster.getDataNodes()) { String storageID = storageIDs.get(i); - final Block block = new Block(lastBlock.getBlockId() + i--, - lastBlock.getGenerationStamp(), 0); + final Block block = new Block(lastBlock.getBlockId() + i--, 0, + lastBlock.getGenerationStamp()); DatanodeStorage storage = new DatanodeStorage(storageID); List<ReplicaBeingWritten> blocks = new ArrayList<>(); ReplicaBeingWritten replica = new ReplicaBeingWritten(block, null, null, --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
