HDFS-8975. Erasure coding : Fix random failure in TestSafeModeWithStripedFile 
(Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce02b553
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce02b553
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce02b553

Branch: refs/heads/HDFS-7240
Commit: ce02b5532c3d506f8eee7af268216804815fa055
Parents: 96d6b51
Author: Vinayakumar B <vinayakum...@apache.org>
Authored: Sat Sep 12 13:29:25 2015 +0530
Committer: Vinayakumar B <vinayakum...@apache.org>
Committed: Sat Sep 12 13:29:25 2015 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt        | 4 ++++
 .../org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java     | 5 +++++
 2 files changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce02b553/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index f49a974..47bab0b 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -418,3 +418,7 @@
 
     HDFS-8853. Erasure Coding: Provide ECSchema validation when setting EC
     policy. (andreina via zhz)
+
+    HDFS-8975. Erasure coding : Fix random failure in 
TestSafeModeWithStripedFile
+    (J.Andreina via vinayakumarb)
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce02b553/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
index 318eb9f..9ab0834 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
@@ -53,6 +53,7 @@ public class TestSafeModeWithStripedFile {
   public void setup() throws IOException {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
     cluster.waitActive();
@@ -124,6 +125,7 @@ public class TestSafeModeWithStripedFile {
     // so the safe blocks count doesn't increment.
     for (int i = 0; i < minStorages - 1; i++) {
       cluster.restartDataNode(dnprops.remove(0));
+      cluster.waitActive();
       cluster.triggerBlockReports();
       assertEquals(0, NameNodeAdapter.getSafeModeSafeBlocks(nn));
     }
@@ -131,17 +133,20 @@ public class TestSafeModeWithStripedFile {
     // the block of smallFile reaches minStorages,
     // so the safe blocks count increment.
     cluster.restartDataNode(dnprops.remove(0));
+    cluster.waitActive();
     cluster.triggerBlockReports();
     assertEquals(1, NameNodeAdapter.getSafeModeSafeBlocks(nn));
 
     // the 2 blocks of bigFile need DATA_BLK_NUM storages to be safe
     for (int i = minStorages; i < DATA_BLK_NUM - 1; i++) {
       cluster.restartDataNode(dnprops.remove(0));
+      cluster.waitActive();
       cluster.triggerBlockReports();
       assertTrue(nn.isInSafeMode());
     }
 
     cluster.restartDataNode(dnprops.remove(0));
+    cluster.waitActive();
     cluster.triggerBlockReports();
     assertFalse(nn.isInSafeMode());
   }

Reply via email to