Repository: hadoop
Updated Branches:
  refs/heads/trunk 0be0f1ce1 -> f83716b7f


HDFS-13251. Avoid using hard coded datanode data dirs in unit tests. 
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f83716b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f83716b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f83716b7

Branch: refs/heads/trunk
Commit: f83716b7f2e5b63e4c2302c374982755233d4dd6
Parents: 0be0f1c
Author: Xiaoyu Yao <[email protected]>
Authored: Wed Mar 14 11:26:58 2018 -0700
Committer: Xiaoyu Yao <[email protected]>
Committed: Wed Mar 14 11:26:58 2018 -0700

----------------------------------------------------------------------
 .../blockmanagement/TestBlockStatsMXBean.java   |  7 ++--
 .../datanode/TestDataNodeHotSwapVolumes.java    |  9 +++---
 .../datanode/TestDataNodeVolumeFailure.java     | 20 ++++++------
 .../TestDataNodeVolumeFailureReporting.java     | 34 +++++++++-----------
 .../TestDataNodeVolumeFailureToleration.java    |  4 +--
 .../datanode/TestDataNodeVolumeMetrics.java     |  4 +--
 .../command/TestDiskBalancerCommand.java        |  6 ++--
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 11 ++++---
 8 files changed, 46 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f83716b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
index 11bfff8..81549a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -179,10 +179,9 @@ public class TestBlockStatsMXBean {
 
     storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE);
     assertEquals(3, storageTypeStats.getNodesInService());
-    String dataDir = cluster.getDataDirectory();
-    File dn1ArcVol1 = new File(dataDir, "data" + (3 * 0 + 2));
-    File dn2ArcVol1 = new File(dataDir, "data" + (3 * 1 + 2));
-    File dn3ArcVol1 = new File(dataDir, "data" + (3 * 2 + 2));
+    File dn1ArcVol1 = cluster.getInstanceStorageDir(0, 1);
+    File dn2ArcVol1 = cluster.getInstanceStorageDir(1, 1);
+    File dn3ArcVol1 = cluster.getInstanceStorageDir(2, 1);
     DataNodeTestUtils.injectDataDirFailure(dn1ArcVol1);
     DataNodeTestUtils.injectDataDirFailure(dn2ArcVol1);
     DataNodeTestUtils.injectDataDirFailure(dn3ArcVol1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f83716b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index df5e297..6530720 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -305,7 +305,6 @@ public class TestDataNodeHotSwapVolumes {
 
   private void addVolumes(int numNewVolumes, CountDownLatch waitLatch)
       throws ReconfigurationException, IOException, InterruptedException {
-    File dataDir = new File(cluster.getDataDirectory());
     DataNode dn = cluster.getDataNodes().get(0);  // First DataNode.
     Configuration conf = dn.getConf();
     String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
@@ -315,14 +314,14 @@ public class TestDataNodeHotSwapVolumes {
     int startIdx = oldDataDir.split(",").length + 1;
     // Find the first available (non-taken) directory name for data volume.
     while (true) {
-      File volumeDir = new File(dataDir, "data" + startIdx);
+      File volumeDir = cluster.getInstanceStorageDir(0, startIdx);
       if (!volumeDir.exists()) {
         break;
       }
       startIdx++;
     }
     for (int i = startIdx; i < startIdx + numNewVolumes; i++) {
-      File volumeDir = new File(dataDir, "data" + String.valueOf(i));
+      File volumeDir = cluster.getInstanceStorageDir(0, i);
       newVolumeDirs.add(volumeDir);
       volumeDir.mkdirs();
       newDataDirBuf.append(",");
@@ -985,7 +984,7 @@ public class TestDataNodeHotSwapVolumes {
 
     DataNode dn = cluster.getDataNodes().get(0);
     final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
-    File dirToFail = new File(cluster.getDataDirectory(), "data1");
+    File dirToFail = cluster.getInstanceStorageDir(0, 0);
 
     FsVolumeImpl failedVolume = DataNodeTestUtils.getVolume(dn, dirToFail);
     assertTrue("No FsVolume was found for " + dirToFail,
@@ -1037,7 +1036,7 @@ public class TestDataNodeHotSwapVolumes {
         InternalDataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode());
 
     // Remove a data dir from datanode
-    File dataDirToKeep = new File(cluster.getDataDirectory(), "data1");
+    File dataDirToKeep = cluster.getInstanceStorageDir(0, 0);
     assertThat(
         "DN did not update its own config",
         dn.reconfigurePropertyImpl(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f83716b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 6385367..c116ce0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -168,7 +168,7 @@ public class TestDataNodeVolumeFailure {
    
     // fail the volume
     // delete/make non-writable one of the directories (failed volume)
-    data_fail = new File(dataDir, "data3");
+    data_fail = cluster.getInstanceStorageDir(1, 0);
     failedDir = MiniDFSCluster.getFinalizedDir(data_fail,
         cluster.getNamesystem().getBlockPoolId());
     if (failedDir.exists() &&
@@ -235,7 +235,7 @@ public class TestDataNodeVolumeFailure {
     DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
     DFSTestUtil.waitReplication(fs, file1, (short) 2);
 
-    File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
+    File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
     DataNode dn0 = cluster.getDataNodes().get(0);
     DataNodeTestUtils.waitForDiskError(dn0,
@@ -298,8 +298,8 @@ public class TestDataNodeVolumeFailure {
     assumeNotWindows();
 
     // make both data directories to fail on dn0
-    final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
+    final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
+    final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2);
     DataNode dn0 = cluster.getDataNodes().get(0);
     DataNodeTestUtils.waitForDiskError(dn0,
@@ -322,8 +322,8 @@ public class TestDataNodeVolumeFailure {
     // volume failures which is currently not supported on Windows.
     assumeNotWindows();
 
-    final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
+    final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
+    final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
     final DataNode dn0 = cluster.getDataNodes().get(0);
     final String oldDataDirs = dn0.getConf().get(
         DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
@@ -366,8 +366,8 @@ public class TestDataNodeVolumeFailure {
     // volume failures which is currently not supported on Windows.
     assumeNotWindows();
 
-    final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
+    final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
+    final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
     final File dn0VolNew = new File(dataDir, "data_new");
     final DataNode dn0 = cluster.getDataNodes().get(0);
     final String oldDataDirs = dn0.getConf().get(
@@ -413,8 +413,8 @@ public class TestDataNodeVolumeFailure {
     DFSTestUtil.waitReplication(fs, file1, (short)3);
 
     // Fail the first volume on both datanodes
-    File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
-    File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
+    File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
+    File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
     DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
 
     Path file2 = new Path("/test2");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f83716b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
index a3850ef..f7c716d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
@@ -78,7 +78,6 @@ public class TestDataNodeVolumeFailureReporting {
   private FileSystem fs;
   private MiniDFSCluster cluster;
   private Configuration conf;
-  private String dataDir;
   private long volumeCapacity;
 
   // Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
@@ -134,10 +133,10 @@ public class TestDataNodeVolumeFailureReporting {
     final long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
 
-    File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
-    File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
-    File dn3Vol1 = new File(dataDir, "data"+(2*2+1));
-    File dn3Vol2 = new File(dataDir, "data"+(2*2+2));
+    File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
+    File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
+    File dn3Vol1 = cluster.getInstanceStorageDir(2, 0);
+    File dn3Vol2 = cluster.getInstanceStorageDir(2, 1);
 
     /*
      * Make the 1st volume directories on the first two datanodes
@@ -275,8 +274,8 @@ public class TestDataNodeVolumeFailureReporting {
 
     // Fail the first volume on both datanodes (we have to keep the 
     // third healthy so one node in the pipeline will not fail). 
-    File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
-    File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
+    File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
+    File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
     DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
 
     Path file1 = new Path("/test1");
@@ -317,10 +316,10 @@ public class TestDataNodeVolumeFailureReporting {
     long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
 
-    File dn1Vol1 = new File(dataDir, "data"+(4*0+1));
-    File dn1Vol2 = new File(dataDir, "data"+(4*0+2));
-    File dn2Vol1 = new File(dataDir, "data"+(4*1+1));
-    File dn2Vol2 = new File(dataDir, "data"+(4*1+2));
+    File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
+    File dn1Vol2 = cluster.getInstanceStorageDir(0, 1);
+    File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
+    File dn2Vol2 = cluster.getInstanceStorageDir(1, 1);
 
     // Make the first two volume directories on the first two datanodes
     // non-accessible.
@@ -376,10 +375,10 @@ public class TestDataNodeVolumeFailureReporting {
 
     // Fail the first volume on both datanodes (we have to keep the
     // third healthy so one node in the pipeline will not fail).
-    File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
-    File dn1Vol2 = new File(dataDir, "data"+(2*0+2));
-    File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
-    File dn2Vol2 = new File(dataDir, "data"+(2*1+2));
+    File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
+    File dn1Vol2 = cluster.getInstanceStorageDir(0, 1);
+    File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
+    File dn2Vol2 = cluster.getInstanceStorageDir(1, 1);
     DataNodeTestUtils.injectDataDirFailure(dn1Vol1);
     DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
 
@@ -528,8 +527,8 @@ public class TestDataNodeVolumeFailureReporting {
   @Test
   public void testHotSwapOutFailedVolumeAndReporting()
           throws Exception {
-    final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
+    final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
+    final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
     final DataNode dn0 = cluster.getDataNodes().get(0);
     final String oldDataDirs = dn0.getConf().get(
             DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
@@ -777,7 +776,6 @@ public class TestDataNodeVolumeFailureReporting {
         .storagesPerDatanode(storagesPerDatanode).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
-    dataDir = cluster.getDataDirectory();
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(
         cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
     volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f83716b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
index 46f9bf7..9a09570 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
@@ -50,7 +50,6 @@ public class TestDataNodeVolumeFailureToleration {
   private FileSystem fs;
   private MiniDFSCluster cluster;
   private Configuration conf;
-  private String dataDir;
 
   // Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
   // for heartbeats to propagate from the datanodes to the namenode.
@@ -80,7 +79,6 @@ public class TestDataNodeVolumeFailureToleration {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
-    dataDir = cluster.getDataDirectory();
   }
 
   @After
@@ -161,7 +159,7 @@ public class TestDataNodeVolumeFailureToleration {
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
 
     // Fail a volume on the 2nd DN
-    File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
+    File dn2Vol1 = cluster.getStorageDir(1, 0);
     DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
 
     // Should only get two replicas (the first DN and the 3rd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f83716b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
index 0f41d23..aa9a707 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
@@ -102,9 +102,7 @@ public class TestDataNodeVolumeMetrics {
 
       ArrayList<DataNode> dns = cluster.getDataNodes();
       assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
-
-      final String dataDir = cluster.getDataDirectory();
-      final File dn1Vol2 = new File(dataDir, "data2");
+      final File dn1Vol2 = cluster.getInstanceStorageDir(0, 1);
 
       DataNodeTestUtils.injectDataDirFailure(dn1Vol2);
       verifyDataNodeVolumeMetrics(fs, cluster, fileName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f83716b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index f3100ea..8266c1f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -615,13 +615,15 @@ public class TestDiskBalancerCommand {
     assertThat(
         outputs.get(3),
         is(allOf(containsString("DISK"),
-            containsString("/dfs/data/data1"),
+            containsString(cluster.getInstanceStorageDir(0, 0)
+                .getAbsolutePath()),
             containsString("0.00"),
             containsString("1.00"))));
     assertThat(
         outputs.get(4),
         is(allOf(containsString("DISK"),
-            containsString("/dfs/data/data2"),
+            containsString(cluster.getInstanceStorageDir(0, 1)
+                .getAbsolutePath()),
             containsString("0.00"),
             containsString("1.00"))));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f83716b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 1365b1a..34d34b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -349,11 +349,14 @@ public class TestDFSAdmin {
           containsString("FAILED: Change property " +
               DFS_DATANODE_DATA_DIR_KEY));
     }
+    File dnDir0 = cluster.getInstanceStorageDir(0, 0);
+    File dnDir1 = cluster.getInstanceStorageDir(0, 1);
     assertThat(outs.get(offset + 1),
-        is(allOf(containsString("From:"), containsString("data1"),
-            containsString("data2"))));
-    assertThat(outs.get(offset + 2),
-        is(not(anyOf(containsString("data1"), containsString("data2")))));
+        is(allOf(containsString("From:"), containsString(dnDir0.getName()),
+            containsString(dnDir1.getName()))));
+    assertThat(outs.get(offset + 2), is(not(
+        anyOf(containsString(dnDir0.getName()),
+            containsString(dnDir1.getName())))));
     assertThat(outs.get(offset + 2),
         is(allOf(containsString("To"), containsString("data_new"))));
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to