haiyang1987 commented on code in PR #6464:
URL: https://github.com/apache/hadoop/pull/6464#discussion_r1460160197


##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java:
##########
@@ -2011,4 +2011,83 @@ public void tesInvalidateMissingBlock() throws Exception 
{
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testCheckFilesWhenInvalidateMissingBlock() throws Exception {
+    long blockSize = 1024;
+    int heartbeatInterval = 1;
+    HdfsConfiguration c = new HdfsConfiguration();
+    c.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, heartbeatInterval);
+    c.setLong(DFS_BLOCK_SIZE_KEY, blockSize);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(c).
+        numDataNodes(1).build();
+    DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get();
+    try {
+      cluster.waitActive();
+      BlockReaderTestUtil util = new BlockReaderTestUtil(cluster, new
+          HdfsConfiguration(conf));
+      Path path = new Path("/testFile");
+      util.writeFile(path, 1);
+      String bpid = cluster.getNameNode().getNamesystem().getBlockPoolId();
+      DataNode dn = cluster.getDataNodes().get(0);
+      FsDatasetImpl dnFSDataset = (FsDatasetImpl) dn.getFSDataset();
+      List<ReplicaInfo> replicaInfos = dnFSDataset.getFinalizedBlocks(bpid);
+      assertEquals(1, replicaInfos.size());
+      DFSTestUtil.readFile(cluster.getFileSystem(), path);
+      LocatedBlock blk = util.getFileBlocks(path, 512).get(0);
+      ExtendedBlock block = blk.getBlock();
+
+      // Append a new block with an incremented generation stamp.
+      long newGS = block.getGenerationStamp() + 1;
+      dnFSDataset.append(block, newGS, 1024);
+      block.setGenerationStamp(newGS);
+
+      DataNodeFaultInjector injector = new DataNodeFaultInjector() {
+        @Override
+        public void delayGetMetaDataInputStream() {
+          try {
+            Thread.sleep(8000);
+          } catch (InterruptedException e) {
+            // Ignore exception.
+          }
+        }
+      };
+      // Delay to getMetaDataInputStream.
+      DataNodeFaultInjector.set(injector);
+
+      ExecutorService executorService = Executors.newFixedThreadPool(2);
+      try {
+        Future<?> blockReaderFuture = executorService.submit(() -> {
+          try {
+            // Submit tasks for reading block.
+            BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), blk, 
0, 512);

Review Comment:
   Thanks @smarthanwang for your review.  
   here will not be thrown `java.io.FileNotFoundException` , because this 
exception will be captured output to the exception stack when processing the 
`DataXceiver.`
   
   if we expect check  `FileNotFoundException` maybe  need to call the 
initialization BlockSender directly.



##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java:
##########
@@ -2011,4 +2011,83 @@ public void tesInvalidateMissingBlock() throws Exception 
{
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testCheckFilesWhenInvalidateMissingBlock() throws Exception {
+    long blockSize = 1024;
+    int heartbeatInterval = 1;
+    HdfsConfiguration c = new HdfsConfiguration();
+    c.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, heartbeatInterval);
+    c.setLong(DFS_BLOCK_SIZE_KEY, blockSize);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(c).
+        numDataNodes(1).build();
+    DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get();
+    try {
+      cluster.waitActive();
+      BlockReaderTestUtil util = new BlockReaderTestUtil(cluster, new
+          HdfsConfiguration(conf));
+      Path path = new Path("/testFile");
+      util.writeFile(path, 1);
+      String bpid = cluster.getNameNode().getNamesystem().getBlockPoolId();
+      DataNode dn = cluster.getDataNodes().get(0);
+      FsDatasetImpl dnFSDataset = (FsDatasetImpl) dn.getFSDataset();
+      List<ReplicaInfo> replicaInfos = dnFSDataset.getFinalizedBlocks(bpid);
+      assertEquals(1, replicaInfos.size());
+      DFSTestUtil.readFile(cluster.getFileSystem(), path);
+      LocatedBlock blk = util.getFileBlocks(path, 512).get(0);
+      ExtendedBlock block = blk.getBlock();
+
+      // Append a new block with an incremented generation stamp.
+      long newGS = block.getGenerationStamp() + 1;
+      dnFSDataset.append(block, newGS, 1024);
+      block.setGenerationStamp(newGS);
+
+      DataNodeFaultInjector injector = new DataNodeFaultInjector() {
+        @Override
+        public void delayGetMetaDataInputStream() {
+          try {
+            Thread.sleep(8000);
+          } catch (InterruptedException e) {
+            // Ignore exception.
+          }
+        }
+      };
+      // Delay to getMetaDataInputStream.
+      DataNodeFaultInjector.set(injector);
+
+      ExecutorService executorService = Executors.newFixedThreadPool(2);
+      try {
+        Future<?> blockReaderFuture = executorService.submit(() -> {
+          try {
+            // Submit tasks for reading block.
+            BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), blk, 
0, 512);

Review Comment:
   Thanks @smarthanwang for your review.  
   here will not be thrown `java.io.FileNotFoundException` , because this 
exception will be captured output to the exception stack when processing the 
`DataXceiver.`
   
   if we expect check  `FileNotFoundException` maybe  need to call the 
initialization `BlockSender` directly.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to