HDFS-9715. Check storage ID uniqueness on datanode startup (Contributed by Lei 
(Eddy) Xu)

(cherry picked from commit 04375756a5ed6e907ee7548469c2c508aebbafb7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8577c0b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8577c0b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8577c0b6

Branch: refs/heads/branch-2
Commit: 8577c0b6a2bc7e4966866eac2bd1f2a112d0a7c1
Parents: 3583c77
Author: Vinayakumar B <vinayakum...@apache.org>
Authored: Wed Feb 3 07:35:01 2016 +0530
Committer: Vinayakumar B <vinayakum...@apache.org>
Committed: Wed Feb 3 07:35:47 2016 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 46 ++++++++++++--------
 .../apache/hadoop/hdfs/UpgradeUtilities.java    | 10 +++++
 .../fsdataset/impl/TestFsDatasetImpl.java       | 26 +++++++++++
 .../hdfs/server/namenode/FSImageTestUtil.java   | 33 ++++++++++++--
 5 files changed, 95 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8577c0b6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0b3db22..2a14a29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1035,6 +1035,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8999. Allow a file to be closed with COMMITTED but not yet COMPLETE
     blocks.  (szetszwo)
 
+    HDFS-9715. Check storage ID uniqueness on datanode startup
+    (Lei (Eddy) Xu via vinayakumarb)
+
   BUG FIXES
 
     HDFS-8091: ACLStatus and XAttributes should be presented to

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8577c0b6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index fe957e7..36b76ac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -379,6 +379,31 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     return volumeFailureInfos;
   }
 
+  /**
+   * Activate a volume to serve requests.
+   * @throws IOException if the storage UUID already exists.
+   */
+  private synchronized void activateVolume(
+      ReplicaMap replicaMap,
+      Storage.StorageDirectory sd, StorageType storageType,
+      FsVolumeReference ref) throws IOException {
+    DatanodeStorage dnStorage = storageMap.get(sd.getStorageUuid());
+    if (dnStorage != null) {
+      final String errorMsg = String.format(
+          "Found duplicated storage UUID: %s in %s.",
+          sd.getStorageUuid(), sd.getVersionFile());
+      LOG.error(errorMsg);
+      throw new IOException(errorMsg);
+    }
+    volumeMap.addAll(replicaMap);
+    storageMap.put(sd.getStorageUuid(),
+        new DatanodeStorage(sd.getStorageUuid(),
+            DatanodeStorage.State.NORMAL,
+            storageType));
+    asyncDiskService.addVolume(sd.getCurrentDir());
+    volumes.addVolume(ref);
+  }
+
   private void addVolume(Collection<StorageLocation> dataLocations,
       Storage.StorageDirectory sd) throws IOException {
     final File dir = sd.getCurrentDir();
@@ -394,16 +419,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     ReplicaMap tempVolumeMap = new ReplicaMap(this);
     fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker);
 
-    synchronized (this) {
-      volumeMap.addAll(tempVolumeMap);
-      storageMap.put(sd.getStorageUuid(),
-          new DatanodeStorage(sd.getStorageUuid(),
-              DatanodeStorage.State.NORMAL,
-              storageType));
-      asyncDiskService.addVolume(sd.getCurrentDir());
-      volumes.addVolume(ref);
-    }
-
+    activateVolume(tempVolumeMap, sd, storageType, ref);
     LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
   }
 
@@ -461,15 +477,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     setupAsyncLazyPersistThread(fsVolume);
 
     builder.build();
-    synchronized (this) {
-      volumeMap.addAll(tempVolumeMap);
-      storageMap.put(sd.getStorageUuid(),
-          new DatanodeStorage(sd.getStorageUuid(),
-              DatanodeStorage.State.NORMAL,
-              storageType));
-      asyncDiskService.addVolume(sd.getCurrentDir());
-      volumes.addVolume(ref);
-    }
+    activateVolume(tempVolumeMap, sd, storageType, ref);
     LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8577c0b6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
index 4e4ed13..527d24a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
@@ -30,6 +30,7 @@ import java.io.OutputStream;
 import java.net.URI;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.Properties;
 import java.util.zip.CRC32;
 
 import org.apache.hadoop.conf.Configuration;
@@ -49,6 +50,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 
 import com.google.common.base.Preconditions;
@@ -379,6 +381,14 @@ public class UpgradeUtilities {
       localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
                               new Path(newDir.toString()),
                               false);
+      // Change the storage UUID to avoid conflicts when DN starts up.
+      StorageDirectory sd = new StorageDirectory(
+          new File(datanodeStorage.toString()));
+      sd.setStorageUuid(DatanodeStorage.generateUuid());
+      Properties properties = Storage.readPropertiesFile(sd.getVersionFile());
+      properties.setProperty("storageID", sd.getStorageUuid());
+      Storage.writeProperties(sd.getVersionFile(), properties);
+
       retVal[i] = newDir;
     }
     return retVal;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8577c0b6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 22a0073..b2cfe89 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -213,6 +213,32 @@ public class TestFsDatasetImpl {
     assertTrue(actualVolumes.containsAll(expectedVolumes));
   }
 
+  @Test
+  public void testAddVolumeWithSameStorageUuid() throws IOException {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(1).build();
+    try {
+      cluster.waitActive();
+      assertTrue(cluster.getDataNodes().get(0).isConnectedToNN(
+          cluster.getNameNode().getServiceRpcAddress()));
+
+      MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0);
+      File vol0 = cluster.getStorageDir(0, 0);
+      File vol1 = cluster.getStorageDir(0, 1);
+      Storage.StorageDirectory sd0 = new Storage.StorageDirectory(vol0);
+      Storage.StorageDirectory sd1 = new Storage.StorageDirectory(vol1);
+      FileUtils.copyFile(sd0.getVersionFile(), sd1.getVersionFile());
+
+      cluster.restartDataNode(dn, true);
+      cluster.waitActive();
+      assertFalse(cluster.getDataNodes().get(0).isConnectedToNN(
+          cluster.getNameNode().getServiceRpcAddress()));
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
   @Test(timeout = 30000)
   public void testRemoveVolumes() throws IOException {
     // Feed FsDataset with block metadata.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8577c0b6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
index fafeae2..12b5180 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
@@ -34,6 +34,8 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.EnumMap;
 import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -313,21 +315,24 @@ public abstract class FSImageTestUtil {
         fileList.add(f);
       }
     }
-    
+
+    Set<String> ignoredProperties = new HashSet<>();
+    ignoredProperties.add("storageID");
     for (List<File> sameNameList : groupedByName.values()) {
       if (sameNameList.get(0).isDirectory()) {
         // recurse
         assertParallelFilesAreIdentical(sameNameList, ignoredFileNames);
       } else {
         if ("VERSION".equals(sameNameList.get(0).getName())) {
-          assertPropertiesFilesSame(sameNameList.toArray(new File[0]));
+          assertPropertiesFilesSame(sameNameList.toArray(new File[0]),
+              ignoredProperties);
         } else {
           assertFileContentsSame(sameNameList.toArray(new File[0]));
         }
       }
     }  
   }
-  
+
   /**
    * Assert that a set of properties files all contain the same data.
    * We cannot simply check the md5sums here, since Properties files
@@ -339,6 +344,20 @@ public abstract class FSImageTestUtil {
    */
   public static void assertPropertiesFilesSame(File[] propFiles)
       throws IOException {
+    assertPropertiesFilesSame(propFiles, null);
+  }
+
+  /**
+   * Assert that a set of properties files all contain the same data.
+   *
+   * @param propFiles the files to compare.
+   * @param ignoredProperties the property names to be ignored during
+   *                          comparison.
+   * @throws IOException if the files cannot be opened or read
+   * @throws AssertionError if the files differ
+   */
+  public static void assertPropertiesFilesSame(
+      File[] propFiles, Set<String> ignoredProperties) throws IOException {
     Set<Map.Entry<Object, Object>> prevProps = null;
     
     for (File f : propFiles) {
@@ -355,7 +374,13 @@ public abstract class FSImageTestUtil {
       } else {
         Set<Entry<Object,Object>> diff =
           Sets.symmetricDifference(prevProps, props.entrySet());
-        if (!diff.isEmpty()) {
+        Iterator<Entry<Object, Object>> it = diff.iterator();
+        while (it.hasNext()) {
+          Entry<Object, Object> entry = it.next();
+          if (ignoredProperties != null &&
+              ignoredProperties.contains(entry.getKey())) {
+            continue;
+          }
           fail("Properties file " + f + " differs from " + propFiles[0]);
         }
       }

Reply via email to