Repository: hadoop
Updated Branches:
refs/heads/branch-2.7 6b6167d40 -> 3d88293dd
HDFS-9406. FSImage may get corrupted after deleting snapshot. (Contributed by
Jing Zhao, Stanislav Antic, Vinayakumar B, Yongjun Zhang)
(cherry picked from commit 34ab50ea92370cc7440a8f7649286b148c2fde65)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
(cherry picked from commit fc8d9cc758d4119064d67008432f63a590b5f67a)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d88293d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d88293d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d88293d
Branch: refs/heads/branch-2.7
Commit: 3d88293dda031f43b635a367a64153fe2139de00
Parents: 6b6167d
Author: Yongjun Zhang <[email protected]>
Authored: Mon Feb 1 11:23:44 2016 -0800
Committer: Yongjun Zhang <[email protected]>
Committed: Mon Feb 1 15:29:03 2016 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hadoop/hdfs/server/namenode/INodeFile.java | 2 +-
.../hdfs/server/namenode/INodeReference.java | 5 +-
.../snapshot/DirectoryWithSnapshotFeature.java | 11 ++-
.../namenode/snapshot/TestSnapshotDeletion.java | 95 ++++++++++++++++++--
5 files changed, 106 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d88293d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d16bccf..1110eab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -73,6 +73,9 @@ Release 2.7.3 - UNRELEASED
HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.
(szetszwo)
+ HDFS-9406. FSImage may get corrupted after deleting snapshot.
+ (Contributed by Jing Zhao, Stanislav Antic, Vinayakumar B, Yongjun Zhang)
+
Release 2.7.2 - 2016-01-25
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d88293d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 08b7c73..1da6ecd 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -531,7 +531,7 @@ public class INodeFile extends INodeWithAdditionalFields
blk.setBlockCollection(null);
}
}
- setBlocks(null);
+ setBlocks(BlockInfoContiguous.EMPTY_ARRAY);
if (getAclFeature() != null) {
AclStorage.removeAclFeature(getAclFeature());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d88293d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index b33a93c..d2cca0e 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -428,8 +428,9 @@ public abstract class INodeReference extends INode {
setParent(null);
}
}
-
- WithName getLastWithName() {
+
+ /** Return the last WithName reference if there is any, null otherwise. */
+ public WithName getLastWithName() {
return withNameList.size() > 0 ?
withNameList.get(withNameList.size() - 1) : null;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d88293d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 95f9d8a..ec16bd8 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -481,7 +481,16 @@ public class DirectoryWithSnapshotFeature implements
INode.Feature {
if (topNode instanceof INodeReference.WithName) {
INodeReference.WithName wn = (INodeReference.WithName) topNode;
if (wn.getLastSnapshotId() >= post) {
- wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes);
+ INodeReference.WithCount wc =
+ (INodeReference.WithCount) wn.getReferredINode();
+ if (wc.getLastWithName() == wn && wc.getParentReference() == null) {
+ // this wn is the last wn inside of the wc, also the dstRef node
has
+ // been deleted. In this case, we should treat the referred
file/dir
+ // as normal case
+ queue.add(wc.getReferredINode());
+ } else {
+ wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes);
+ }
}
// For DstReference node, since the node is not in the created list of
// prior, we should treat it as regular file/dir
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d88293d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index a679183..fc6c610 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -17,12 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
import java.io.ByteArrayOutputStream;
import java.io.FileNotFoundException;
@@ -60,6 +55,7 @@ import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -1134,4 +1130,91 @@ public class TestSnapshotDeletion {
cluster.restartNameNode(0);
assertEquals(numberOfBlocks, cluster.getNamesystem().getBlocksTotal());
}
+
+ /*
+ * Test fsimage corruption reported in HDFS-9697.
+ */
+ @Test
+ public void testFsImageCorruption() throws Exception {
+ final Path st = new Path("/st");
+ final Path nonst = new Path("/nonst");
+ final Path stY = new Path(st, "y");
+ final Path nonstTrash = new Path(nonst, "trash");
+
+ hdfs.mkdirs(stY);
+
+ hdfs.allowSnapshot(st);
+ hdfs.createSnapshot(st, "s0");
+
+ Path f = new Path(stY, "nn.log");
+ hdfs.createNewFile(f);
+ hdfs.createSnapshot(st, "s1");
+
+ Path f2 = new Path(stY, "nn2.log");
+ hdfs.rename(f, f2);
+ hdfs.createSnapshot(st, "s2");
+
+ Path trashSt = new Path(nonstTrash, "st");
+ hdfs.mkdirs(trashSt);
+ hdfs.rename(stY, trashSt);
+ hdfs.delete(nonstTrash, true);
+
+ hdfs.deleteSnapshot(st, "s1");
+ hdfs.deleteSnapshot(st, "s2");
+
+ hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ hdfs.saveNamespace();
+ hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+ cluster.restartNameNodes();
+ }
+
+ /*
+ * Test renaming file to outside of snapshottable dir then deleting it.
+ * Ensure it's deleted from both its parent INodeDirectory and InodeMap,
+ * after the last snapshot containing it is deleted.
+ */
+ @Test
+ public void testRenameAndDelete() throws IOException {
+ final Path foo = new Path("/foo");
+ final Path x = new Path(foo, "x");
+ final Path y = new Path(foo, "y");
+ final Path trash = new Path("/trash");
+ hdfs.mkdirs(x);
+ hdfs.mkdirs(y);
+ final long parentId = fsdir.getINode4Write(y.toString()).getId();
+
+ hdfs.mkdirs(trash);
+ hdfs.allowSnapshot(foo);
+ // 1. create snapshot s0
+ hdfs.createSnapshot(foo, "s0");
+ // 2. create file /foo/x/bar
+ final Path file = new Path(x, "bar");
+ DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, (short) 1, 0L);
+ final long fileId = fsdir.getINode4Write(file.toString()).getId();
+ // 3. move file into /foo/y
+ final Path newFile = new Path(y, "bar");
+ hdfs.rename(file, newFile);
+ // 4. create snapshot s1
+ hdfs.createSnapshot(foo, "s1");
+ // 5. move /foo/y to /trash
+ final Path deletedY = new Path(trash, "y");
+ hdfs.rename(y, deletedY);
+ // 6. create snapshot s2
+ hdfs.createSnapshot(foo, "s2");
+ // 7. delete /trash/y
+ hdfs.delete(deletedY, true);
+ // 8. delete snapshot s1
+ hdfs.deleteSnapshot(foo, "s1");
+
+ // make sure bar has been removed from its parent
+ INode p = fsdir.getInode(parentId);
+ Assert.assertNotNull(p);
+ INodeDirectory pd = p.asDirectory();
+ Assert.assertNotNull(pd);
+ Assert.assertNull(pd.getChild("bar".getBytes(),
Snapshot.CURRENT_STATE_ID));
+
+ // make sure bar has been cleaned from inodeMap
+ Assert.assertNull(fsdir.getInode(fileId));
+ }
}