Hexiaoqiao commented on code in PR #5815:
URL: https://github.com/apache/hadoop/pull/5815#discussion_r1255363924
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java:
##########
@@ -1681,6 +1682,66 @@ public Boolean get() {
assertFalse(fsckOut.contains(NamenodeFsck.IN_MAINTENANCE_STATUS));
}
+ /**
+ * Test for blockIdCK with datanode staleness.
+ */
+ @Test
+ public void testBlockIdCKStaleness() throws Exception {
+ final short replFactor = 1;
+ short numDn = 1;
+ final long blockSize = 512;
+ Configuration conf = new Configuration();
+
+ // Shorten dfs.namenode.stale.datanode.interval for easier testing.
+ conf.set(DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, String.valueOf(5000));
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+ conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
+
+ String[] racks = {"/rack1", "/rack2"};
+ String[] hosts = {"host1", "host2"};
+
+ File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+ cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+ .numDataNodes(numDn).hosts(hosts).racks(racks).build();
+ assertNotNull("Failed Cluster Creation", cluster);
+ cluster.waitClusterUp();
+ FileSystem fs = FileSystem.get(conf);
+ assertNotNull("Failed to get FileSystem", fs);
+
+ try {
+ DFSTestUtil util = new DFSTestUtil.Builder().
+ setName(getClass().getSimpleName()).setNumFiles(1).build();
+
+ // Create one file.
+ final String pathString = new String("/testfile");
+ final Path path = new Path(pathString);
+ util.createFile(fs, path, 1024L, replFactor, 1000L);
+ util.waitReplication(fs, path, replFactor);
+ StringBuilder sb = new StringBuilder();
+ for (LocatedBlock lb: util.getAllBlocks(fs, path)){
+ sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
+ }
+ String[] bIds = sb.toString().split(" ");
+
+ // Make sure datanode is HEALTHY before down.
+ String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
+ assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+ // Make the block on datanode go into stale.
+ cluster.stopDataNode(0);
+ Thread.sleep(7000);
Review Comment:
Please try to use `GenericTestUtils.waitFor` instead `Thread.sleep`.
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java:
##########
@@ -1681,6 +1682,66 @@ public Boolean get() {
assertFalse(fsckOut.contains(NamenodeFsck.IN_MAINTENANCE_STATUS));
}
+ /**
+ * Test for blockIdCK with datanode staleness.
+ */
+ @Test
+ public void testBlockIdCKStaleness() throws Exception {
+ final short replFactor = 1;
+ short numDn = 1;
+ final long blockSize = 512;
+ Configuration conf = new Configuration();
+
+ // Shorten dfs.namenode.stale.datanode.interval for easier testing.
+ conf.set(DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, String.valueOf(5000));
Review Comment:
It should be `setLong` directly here.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]