Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a1a723fdf -> 0852d3562


HDFS-9281. Change TestDeleteBlockPool to not explicitly use File to check block 
pool existence. (lei)

(cherry picked from commit db975af5fae506eb3d586bd9201f76adb3fd1281)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0852d356
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0852d356
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0852d356

Branch: refs/heads/branch-2
Commit: 0852d3562a420375c704fbd2d60eb3c5007ebc5e
Parents: a1a723f
Author: Lei Xu <l...@apache.org>
Authored: Mon Dec 14 10:59:55 2015 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Mon Dec 14 11:01:48 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../server/datanode/FsDatasetTestUtils.java     | 16 +++++
 .../server/datanode/TestDeleteBlockPool.java    | 69 ++++----------------
 .../fsdataset/impl/FsDatasetImplTestUtils.java  | 39 +++++++++++
 4 files changed, 72 insertions(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0852d356/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index be0ff3a..22a30aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -23,6 +23,9 @@ Release 2.9.0 - UNRELEASED
     HDFS-9129. Move the safemode block count into BlockManager. (Mingliang Liu
     via jing9)
 
+    HDFS-9281. Change TestDeleteBlockPool to not explicitly use File to check
+    block pool existence. (lei)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0852d356/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
index 2a8119f..f5bf4e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
@@ -260,4 +260,20 @@ public interface FsDatasetTestUtils {
    * Get the number of pending async deletions.
    */
   long getPendingAsyncDeletions();
+
+  /**
+   * Verify the existence of the block pool.
+   *
+   * @param bpid block pool ID
+   * @throws IOException if the block pool does not exist.
+   */
+  void verifyBlockPoolExists(String bpid) throws IOException;
+
+  /**
+   * Verify that the block pool does not exist.
+   *
+   * @param bpid block pool ID
+   * @throws IOException if the block pool does exist.
+   */
+  void verifyBlockPoolMissing(String bpid) throws IOException;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0852d356/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
index bc15f51..e3b4267 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
@@ -20,10 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -68,11 +66,6 @@ public class TestDeleteBlockPool {
       String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
       String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
 
-      File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
-      File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
-      File dn2StorageDir1 = cluster.getInstanceStorageDir(1, 0);
-      File dn2StorageDir2 = cluster.getInstanceStorageDir(1, 1);
-
       // Although namenode is shutdown, the bp offerservice is still running
       try {
         dn1.deleteBlockPool(bpid1, true);
@@ -92,21 +85,17 @@ public class TestDeleteBlockPool {
       } catch (IOException expected) {
       }
 
-      verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
-      verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
+      cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid1);
 
       dn1.deleteBlockPool(bpid1, true);
 
-      verifyBlockPoolDirectories(false, dn1StorageDir1, bpid1);
-      verifyBlockPoolDirectories(false, dn1StorageDir2, bpid1);
+      cluster.getFsDatasetTestUtils(0).verifyBlockPoolMissing(bpid1);
      
       fs1.delete(new Path("/alpha"), true);
       
       // Wait till all blocks are deleted from the dn2 for bpid1.
-      File finalDir1 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid1);
-      File finalDir2 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid2);
-      while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) ||
-          (!DatanodeUtil.dirNoFilesRecursive(finalDir2))) {
+      while (cluster.getFsDatasetTestUtils(1).getStoredReplicas(bpid1)
+          .hasNext()) {
         try {
           Thread.sleep(3000);
         } catch (Exception ignored) {
@@ -124,22 +113,18 @@ public class TestDeleteBlockPool {
       
       dn2.refreshNamenodes(nn1Conf);
       assertEquals(1, dn2.getAllBpOs().size());
-      
-      verifyBlockPoolDirectories(true, dn2StorageDir1, bpid1);
-      verifyBlockPoolDirectories(true, dn2StorageDir2, bpid1);
+
+      cluster.getFsDatasetTestUtils(1).verifyBlockPoolExists(bpid1);
       
       // Now deleteBlockPool must succeed with force as false, because no 
       // blocks exist for bpid1 and bpOfferService is also stopped for bpid1.
       dn2.deleteBlockPool(bpid1, false);
-      
-      verifyBlockPoolDirectories(false, dn2StorageDir1, bpid1);
-      verifyBlockPoolDirectories(false, dn2StorageDir2, bpid1);
+
+      cluster.getFsDatasetTestUtils(1).verifyBlockPoolMissing(bpid1);
       
       //bpid2 must not be impacted
-      verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
-      verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
-      verifyBlockPoolDirectories(true, dn2StorageDir1, bpid2);
-      verifyBlockPoolDirectories(true, dn2StorageDir2, bpid2);
+      cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid2);
+      cluster.getFsDatasetTestUtils(1).verifyBlockPoolExists(bpid2);
       //make sure second block pool is running all fine
       Path gammaFile = new Path("/gamma");
       DFSTestUtil.createFile(fs2, gammaFile, 1024, (short) 1, 55);
@@ -178,9 +163,6 @@ public class TestDeleteBlockPool {
       String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
       String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
       
-      File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
-      File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
-      
       Configuration nn1Conf = cluster.getConfiguration(0);
       nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
       dn1.refreshNamenodes(nn1Conf);
@@ -193,19 +175,16 @@ public class TestDeleteBlockPool {
       int ret = admin.run(args);
       assertFalse(0 == ret);
 
-      verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
-      verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
+      cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid2);
       
       String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
       ret = admin.run(forceArgs);
       assertEquals(0, ret);
-      
-      verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
-      verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
+
+      cluster.getFsDatasetTestUtils(0).verifyBlockPoolMissing(bpid2);
       
       //bpid1 remains good
-      verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
-      verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
+      cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid1);
       
     } finally {
       if (cluster != null) {
@@ -213,24 +192,4 @@ public class TestDeleteBlockPool {
       }
     }
   }
-  
-  private void verifyBlockPoolDirectories(boolean shouldExist,
-      File storageDir, String bpid) throws IOException {
-    File bpDir = new File(storageDir, DataStorage.STORAGE_DIR_CURRENT + "/"
-        + bpid);
-
-    if (shouldExist == false) {
-      assertFalse(bpDir.exists());
-    } else {
-      File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
-      File finalizedDir = new File(bpCurrentDir,
-          DataStorage.STORAGE_DIR_FINALIZED);
-      File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
-      File versionFile = new File(bpCurrentDir, "VERSION");
-
-      assertTrue(finalizedDir.isDirectory());
-      assertTrue(rbwDir.isDirectory());
-      assertTrue(versionFile.exists());
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0852d356/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
index c85ca2b..f3c740a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
@@ -405,4 +406,42 @@ public class FsDatasetImplTestUtils implements 
FsDatasetTestUtils {
   public long getPendingAsyncDeletions() {
     return dataset.asyncDiskService.countPendingDeletions();
   }
+
+  @Override
+  public void verifyBlockPoolExists(String bpid) throws IOException {
+    FsVolumeImpl volume;
+    try (FsVolumeReferences references = dataset.getFsVolumeReferences()) {
+      volume = (FsVolumeImpl) references.get(0);
+    }
+    File bpDir = new File(volume.getCurrentDir(), bpid);
+    File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
+    File finalizedDir = new File(bpCurrentDir,
+        DataStorage.STORAGE_DIR_FINALIZED);
+    File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
+    File versionFile = new File(bpCurrentDir, "VERSION");
+
+    if (!finalizedDir.isDirectory()) {
+      throw new IOException(finalizedDir.getPath() + " is not a directory.");
+    }
+    if (!rbwDir.isDirectory()) {
+      throw new IOException(finalizedDir.getPath() + " is not a directory.");
+    }
+    if (!versionFile.exists()) {
+      throw new IOException(
+          "Version file: " + versionFile.getPath() + " does not exist.");
+    }
+  }
+
+  @Override
+  public void verifyBlockPoolMissing(String bpid) throws IOException {
+    FsVolumeImpl volume;
+    try (FsVolumeReferences references = dataset.getFsVolumeReferences()) {
+      volume = (FsVolumeImpl) references.get(0);
+    }
+    File bpDir = new File(volume.getCurrentDir(), bpid);
+    if (bpDir.exists()) {
+      throw new IOException(
+          String.format("Block pool directory %s exists", bpDir));
+    }
+  }
 }

Reply via email to