[34/50] [abbrv] hadoop git commit: HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its UUID when a storage directory is cleared (Harsh J via cmccabe)

2016-03-21 Thread wangda
HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its 
UUID when a storage directory is cleared (Harsh J via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc951e60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc951e60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc951e60

Branch: refs/heads/YARN-3368
Commit: dc951e606f40bb779632a8a3e3a46aeccc4a446a
Parents: ca8106d
Author: Colin Patrick Mccabe 
Authored: Thu Mar 17 10:37:42 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 17 10:37:42 2016 -0700

--
 .../hdfs/server/datanode/TestDataNodeUUID.java  | 52 
 1 file changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc951e60/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
index 34e53a3..ebf7c35 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
@@ -19,17 +19,21 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.Test;
 
+import java.io.File;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
 
 public class TestDataNodeUUID {
 
@@ -62,4 +66,52 @@ public class TestDataNodeUUID {
 // Make sure that we have a valid DataNodeUUID at that point of time.
 assertNotEquals(dn.getDatanodeUuid(), nullString);
   }
+
+  @Test(timeout = 1)
+  public void testUUIDRegeneration() throws Exception {
+File baseDir = new File(System.getProperty("test.build.data"));
+File disk1 = new File(baseDir, "disk1");
+File disk2 = new File(baseDir, "disk2");
+
+// Ensure the configured disks do not pre-exist
+FileUtils.deleteDirectory(disk1);
+FileUtils.deleteDirectory(disk2);
+
+MiniDFSCluster cluster = null;
+HdfsConfiguration conf = new HdfsConfiguration();
+conf.setStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+disk1.toURI().toString(),
+disk2.toURI().toString());
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .numDataNodes(1)
+  .manageDataDfsDirs(false)
+  .build();
+  cluster.waitActive();
+
+  // Grab the new-cluster UUID as the original one to test against
+  String originalUUID = cluster.getDataNodes().get(0).getDatanodeUuid();
+  // Stop and simulate a DN wipe or unmount-but-root-path condition
+  // on the second disk
+  MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0);
+  FileUtils.deleteDirectory(disk2);
+  assertTrue("Failed to recreate the data directory: " + disk2,
+  disk2.mkdirs());
+
+  // Restart and check if the UUID changed
+  assertTrue("DataNode failed to start up: " + dn,
+  cluster.restartDataNode(dn));
+  // We need to wait until the DN has completed registration
+  while (!cluster.getDataNodes().get(0).isDatanodeFullyStarted()) {
+Thread.sleep(50);
+  }
+  assertEquals(
+  "DN generated a new UUID despite disk1 having it intact",
+  originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid());
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
 }



hadoop git commit: HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its UUID when a storage directory is cleared (Harsh J via cmccabe)

2016-03-19 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 66257613b -> f69a6c363


HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its 
UUID when a storage directory is cleared (Harsh J via cmccabe)

(cherry picked from commit dc951e606f40bb779632a8a3e3a46aeccc4a446a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f69a6c36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f69a6c36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f69a6c36

Branch: refs/heads/branch-2
Commit: f69a6c36325f53d5a1b201c8357d94e4139b9a93
Parents: 6625761
Author: Colin Patrick Mccabe 
Authored: Thu Mar 17 10:37:42 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 17 10:42:37 2016 -0700

--
 .../hdfs/server/datanode/TestDataNodeUUID.java  | 52 
 1 file changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f69a6c36/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
index 34e53a3..ebf7c35 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
@@ -19,17 +19,21 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.Test;
 
+import java.io.File;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
 
 public class TestDataNodeUUID {
 
@@ -62,4 +66,52 @@ public class TestDataNodeUUID {
 // Make sure that we have a valid DataNodeUUID at that point of time.
 assertNotEquals(dn.getDatanodeUuid(), nullString);
   }
+
+  @Test(timeout = 1)
+  public void testUUIDRegeneration() throws Exception {
+File baseDir = new File(System.getProperty("test.build.data"));
+File disk1 = new File(baseDir, "disk1");
+File disk2 = new File(baseDir, "disk2");
+
+// Ensure the configured disks do not pre-exist
+FileUtils.deleteDirectory(disk1);
+FileUtils.deleteDirectory(disk2);
+
+MiniDFSCluster cluster = null;
+HdfsConfiguration conf = new HdfsConfiguration();
+conf.setStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+disk1.toURI().toString(),
+disk2.toURI().toString());
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .numDataNodes(1)
+  .manageDataDfsDirs(false)
+  .build();
+  cluster.waitActive();
+
+  // Grab the new-cluster UUID as the original one to test against
+  String originalUUID = cluster.getDataNodes().get(0).getDatanodeUuid();
+  // Stop and simulate a DN wipe or unmount-but-root-path condition
+  // on the second disk
+  MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0);
+  FileUtils.deleteDirectory(disk2);
+  assertTrue("Failed to recreate the data directory: " + disk2,
+  disk2.mkdirs());
+
+  // Restart and check if the UUID changed
+  assertTrue("DataNode failed to start up: " + dn,
+  cluster.restartDataNode(dn));
+  // We need to wait until the DN has completed registration
+  while (!cluster.getDataNodes().get(0).isDatanodeFullyStarted()) {
+Thread.sleep(50);
+  }
+  assertEquals(
+  "DN generated a new UUID despite disk1 having it intact",
+  originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid());
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
 }



hadoop git commit: HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its UUID when a storage directory is cleared (Harsh J via cmccabe)

2016-03-19 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 52ce763a5 -> 73b5a44b0


HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its 
UUID when a storage directory is cleared (Harsh J via cmccabe)

(cherry picked from commit dc951e606f40bb779632a8a3e3a46aeccc4a446a)
(cherry picked from commit f69a6c36325f53d5a1b201c8357d94e4139b9a93)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73b5a44b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73b5a44b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73b5a44b

Branch: refs/heads/branch-2.8
Commit: 73b5a44b0ee6ef5674cf7bd181456b1e9589b8c7
Parents: 52ce763
Author: Colin Patrick Mccabe 
Authored: Thu Mar 17 10:37:42 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 17 10:43:07 2016 -0700

--
 .../hdfs/server/datanode/TestDataNodeUUID.java  | 52 
 1 file changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73b5a44b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
index 34e53a3..ebf7c35 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
@@ -19,17 +19,21 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.Test;
 
+import java.io.File;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
 
 public class TestDataNodeUUID {
 
@@ -62,4 +66,52 @@ public class TestDataNodeUUID {
 // Make sure that we have a valid DataNodeUUID at that point of time.
 assertNotEquals(dn.getDatanodeUuid(), nullString);
   }
+
+  @Test(timeout = 1)
+  public void testUUIDRegeneration() throws Exception {
+File baseDir = new File(System.getProperty("test.build.data"));
+File disk1 = new File(baseDir, "disk1");
+File disk2 = new File(baseDir, "disk2");
+
+// Ensure the configured disks do not pre-exist
+FileUtils.deleteDirectory(disk1);
+FileUtils.deleteDirectory(disk2);
+
+MiniDFSCluster cluster = null;
+HdfsConfiguration conf = new HdfsConfiguration();
+conf.setStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+disk1.toURI().toString(),
+disk2.toURI().toString());
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .numDataNodes(1)
+  .manageDataDfsDirs(false)
+  .build();
+  cluster.waitActive();
+
+  // Grab the new-cluster UUID as the original one to test against
+  String originalUUID = cluster.getDataNodes().get(0).getDatanodeUuid();
+  // Stop and simulate a DN wipe or unmount-but-root-path condition
+  // on the second disk
+  MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0);
+  FileUtils.deleteDirectory(disk2);
+  assertTrue("Failed to recreate the data directory: " + disk2,
+  disk2.mkdirs());
+
+  // Restart and check if the UUID changed
+  assertTrue("DataNode failed to start up: " + dn,
+  cluster.restartDataNode(dn));
+  // We need to wait until the DN has completed registration
+  while (!cluster.getDataNodes().get(0).isDatanodeFullyStarted()) {
+Thread.sleep(50);
+  }
+  assertEquals(
+  "DN generated a new UUID despite disk1 having it intact",
+  originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid());
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
 }



[42/46] hadoop git commit: HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its UUID when a storage directory is cleared (Harsh J via cmccabe)

2016-03-19 Thread aengineer
HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its 
UUID when a storage directory is cleared (Harsh J via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc951e60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc951e60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc951e60

Branch: refs/heads/HDFS-7240
Commit: dc951e606f40bb779632a8a3e3a46aeccc4a446a
Parents: ca8106d
Author: Colin Patrick Mccabe 
Authored: Thu Mar 17 10:37:42 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 17 10:37:42 2016 -0700

--
 .../hdfs/server/datanode/TestDataNodeUUID.java  | 52 
 1 file changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc951e60/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
index 34e53a3..ebf7c35 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
@@ -19,17 +19,21 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.Test;
 
+import java.io.File;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
 
 public class TestDataNodeUUID {
 
@@ -62,4 +66,52 @@ public class TestDataNodeUUID {
 // Make sure that we have a valid DataNodeUUID at that point of time.
 assertNotEquals(dn.getDatanodeUuid(), nullString);
   }
+
+  @Test(timeout = 1)
+  public void testUUIDRegeneration() throws Exception {
+File baseDir = new File(System.getProperty("test.build.data"));
+File disk1 = new File(baseDir, "disk1");
+File disk2 = new File(baseDir, "disk2");
+
+// Ensure the configured disks do not pre-exist
+FileUtils.deleteDirectory(disk1);
+FileUtils.deleteDirectory(disk2);
+
+MiniDFSCluster cluster = null;
+HdfsConfiguration conf = new HdfsConfiguration();
+conf.setStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+disk1.toURI().toString(),
+disk2.toURI().toString());
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .numDataNodes(1)
+  .manageDataDfsDirs(false)
+  .build();
+  cluster.waitActive();
+
+  // Grab the new-cluster UUID as the original one to test against
+  String originalUUID = cluster.getDataNodes().get(0).getDatanodeUuid();
+  // Stop and simulate a DN wipe or unmount-but-root-path condition
+  // on the second disk
+  MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0);
+  FileUtils.deleteDirectory(disk2);
+  assertTrue("Failed to recreate the data directory: " + disk2,
+  disk2.mkdirs());
+
+  // Restart and check if the UUID changed
+  assertTrue("DataNode failed to start up: " + dn,
+  cluster.restartDataNode(dn));
+  // We need to wait until the DN has completed registration
+  while (!cluster.getDataNodes().get(0).isDatanodeFullyStarted()) {
+Thread.sleep(50);
+  }
+  assertEquals(
+  "DN generated a new UUID despite disk1 having it intact",
+  originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid());
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
 }



hadoop git commit: HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its UUID when a storage directory is cleared (Harsh J via cmccabe)

2016-03-19 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk ca8106d2d -> dc951e606


HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its 
UUID when a storage directory is cleared (Harsh J via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc951e60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc951e60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc951e60

Branch: refs/heads/trunk
Commit: dc951e606f40bb779632a8a3e3a46aeccc4a446a
Parents: ca8106d
Author: Colin Patrick Mccabe 
Authored: Thu Mar 17 10:37:42 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 17 10:37:42 2016 -0700

--
 .../hdfs/server/datanode/TestDataNodeUUID.java  | 52 
 1 file changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc951e60/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
index 34e53a3..ebf7c35 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
@@ -19,17 +19,21 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.Test;
 
+import java.io.File;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
 
 public class TestDataNodeUUID {
 
@@ -62,4 +66,52 @@ public class TestDataNodeUUID {
 // Make sure that we have a valid DataNodeUUID at that point of time.
 assertNotEquals(dn.getDatanodeUuid(), nullString);
   }
+
+  @Test(timeout = 1)
+  public void testUUIDRegeneration() throws Exception {
+File baseDir = new File(System.getProperty("test.build.data"));
+File disk1 = new File(baseDir, "disk1");
+File disk2 = new File(baseDir, "disk2");
+
+// Ensure the configured disks do not pre-exist
+FileUtils.deleteDirectory(disk1);
+FileUtils.deleteDirectory(disk2);
+
+MiniDFSCluster cluster = null;
+HdfsConfiguration conf = new HdfsConfiguration();
+conf.setStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+disk1.toURI().toString(),
+disk2.toURI().toString());
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .numDataNodes(1)
+  .manageDataDfsDirs(false)
+  .build();
+  cluster.waitActive();
+
+  // Grab the new-cluster UUID as the original one to test against
+  String originalUUID = cluster.getDataNodes().get(0).getDatanodeUuid();
+  // Stop and simulate a DN wipe or unmount-but-root-path condition
+  // on the second disk
+  MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0);
+  FileUtils.deleteDirectory(disk2);
+  assertTrue("Failed to recreate the data directory: " + disk2,
+  disk2.mkdirs());
+
+  // Restart and check if the UUID changed
+  assertTrue("DataNode failed to start up: " + dn,
+  cluster.restartDataNode(dn));
+  // We need to wait until the DN has completed registration
+  while (!cluster.getDataNodes().get(0).isDatanodeFullyStarted()) {
+Thread.sleep(50);
+  }
+  assertEquals(
+  "DN generated a new UUID despite disk1 having it intact",
+  originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid());
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
 }