Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5f6806d90 -> 7aec9fac4


HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by 
Xiaoyu Yao.

(cherry picked from commit 8faf0b50d435039f69ea35f592856ca04d378809)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7aec9fac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7aec9fac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7aec9fac

Branch: refs/heads/branch-2.8
Commit: 7aec9fac4a72e7c868c9c1c7b4252a746694c936
Parents: 5f6806d
Author: Xiaoyu Yao <x...@apache.org>
Authored: Thu Feb 8 08:59:48 2018 -0800
Committer: Xiaoyu Yao <x...@apache.org>
Committed: Thu Feb 8 10:23:35 2018 -0800

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirConcatOp.java     |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 +++++++++++++++++++
 2 files changed, 129 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7aec9fac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index c51b178..14df725 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -246,7 +246,9 @@ class FSDirConcatOp {
     for (INodeFile nodeToRemove : srcList) {
       if(nodeToRemove != null) {
         nodeToRemove.clearBlocks();
-        nodeToRemove.getParent().removeChild(nodeToRemove);
+        // Ensure the nodeToRemove is cleared from snapshot diff list
+        nodeToRemove.getParent().removeChild(nodeToRemove,
+            targetIIP.getLatestSnapshotId());
         fsd.getINodeMap().remove(nodeToRemove);
         count++;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7aec9fac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..8bd7967 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -26,18 +26,22 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -61,11 +65,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDeletion {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSnapshotDeletion.class);
   protected static final long seed = 0;
   protected static final short REPLICATION = 3;
   protected static final short REPLICATION_1 = 2;
@@ -1232,4 +1240,122 @@ public class TestSnapshotDeletion {
     // make sure bar has been cleaned from inodeMap
     Assert.assertNull(fsdir.getInode(fileId));
   }
+
+  @Test
+  public void testSnapshotWithConcatException() throws Exception {
+    final Path st = new Path("/st");
+    hdfs.mkdirs(st);
+    hdfs.allowSnapshot(st);
+
+    Path[] files = new Path[3];
+    for (int i = 0; i < 3; i++) {
+      files[i] = new Path(st, i+ ".txt");
+    }
+
+    Path dest = new Path(st, "dest.txt");
+    hdfs.createNewFile(dest);
+    hdfs.createSnapshot(st, "ss");
+
+    for (int j = 0; j < 3; j++) {
+      FileSystem fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, files[j], false, 1024,
+          1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true);
+    }
+
+    hdfs.createSnapshot(st, "s0");
+
+    // Verify the SnapshotException is thrown as expected for HDFS-4529
+    exception.expect(RemoteException.class);
+    String error = "Concat: the source file /st/0.txt is in snapshot";
+    exception.expectMessage(error);
+    hdfs.concat(dest, files);
+
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    cluster.restartNameNodes();
+  }
+
+  @Test
+  public void testSnapshotDeleteWithConcat() throws Exception {
+    final Path st = new Path("/st");
+    hdfs.mkdirs(st);
+    hdfs.allowSnapshot(st);
+
+    Path[] files = new Path[3];
+    for (int i = 0; i < 3; i++) {
+      files[i] = new Path(st, i+ ".txt");
+    }
+
+    Path dest = new Path(st, "dest.txt");
+    hdfs.createNewFile(dest);
+    hdfs.createSnapshot(st, "ss");
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 3; j++) {
+        FileSystem fs = cluster.getFileSystem();
+        DFSTestUtil.createFile(fs, files[j], false, 1024,
+            1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true);
+      }
+
+      hdfs.concat(dest, files);
+
+      hdfs.createSnapshot(st, "s" + i);
+    }
+
+
+    hdfs.deleteSnapshot(st, "s1");
+
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    cluster.restartNameNodes();
+  }
+
+  @Test
+  public void testSnapshotDiffReportWithConcat() throws Exception {
+    final Path st = new Path("/st");
+    hdfs.mkdirs(st);
+    hdfs.allowSnapshot(st);
+
+    Path[] files = new Path[3];
+    for (int i = 0; i < 3; i++) {
+      files[i] = new Path(st, i+ ".txt");
+    }
+
+    Path dest = new Path(st, "dest.txt");
+    hdfs.createNewFile(dest);
+    hdfs.createSnapshot(st, "ss");
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 3; j++) {
+        FileSystem fs = cluster.getFileSystem();
+        DFSTestUtil.createFile(fs, files[j], false, 1024,
+            1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true);
+      }
+
+      hdfs.concat(dest, files);
+
+      hdfs.createSnapshot(st, "s" + i);
+
+      SnapshotDiffReport sdr = hdfs.getSnapshotDiffReport(st, "s" + i, "ss");
+      LOG.info("Snapshot Diff s{} to ss : {}", i, sdr);
+      Assert.assertEquals(sdr.getDiffList().size(), 1);
+      Assert.assertTrue(sdr.getDiffList().get(0).getType() ==
+          SnapshotDiffReport.DiffType.MODIFY);
+      Assert.assertTrue(new Path(st, DFSUtilClient.bytes2String(
+          sdr.getDiffList().get(0).getSourcePath())).equals(dest));
+    }
+
+    hdfs.deleteSnapshot(st, "s1");
+
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    cluster.restartNameNodes();
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to