Repository: hadoop
Updated Branches:
refs/heads/branch-2.8 b8360139a -> d7b01ebcb
HDFS-9406. FSImage may get corrupted after deleting snapshot. (Contributed by
Jing Zhao, Stanislav Antic, Vinayakumar B, Yongjun Zhang)
(cherry picked from commit 34ab50ea92370cc7440a8f7649286b148c2fde65)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
(cherry picked from commit fc8d9cc758d4119064d67008432f63a590b5f67a)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7b01ebc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7b01ebc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7b01ebc
Branch: refs/heads/branch-2.8
Commit: d7b01ebcbe514c34336c2d1432c8af6176c5d773
Parents: b836013
Author: Yongjun Zhang <[email protected]>
Authored: Mon Feb 1 11:23:44 2016 -0800
Committer: Yongjun Zhang <[email protected]>
Committed: Mon Feb 1 14:22:35 2016 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hadoop/hdfs/server/namenode/INodeFile.java | 2 +-
.../hdfs/server/namenode/INodeReference.java | 5 +-
.../snapshot/DirectoryWithSnapshotFeature.java | 11 ++-
.../hdfs/server/namenode/TestINodeFile.java | 3 +-
.../namenode/snapshot/TestSnapshotDeletion.java | 95 ++++++++++++++++++--
6 files changed, 107 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7b01ebc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4252cc4..c31de2e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1758,6 +1758,9 @@ Release 2.7.3 - UNRELEASED
HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.
(szetszwo)
+ HDFS-9406. FSImage may get corrupted after deleting snapshot.
+ (Contributed by Jing Zhao, Stanislav Antic, Vinayakumar B, Yongjun Zhang)
+
Release 2.7.2 - 2016-01-25
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7b01ebc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 99faa19..2eb9a80 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -491,7 +491,7 @@ public class INodeFile extends INodeWithAdditionalFields
/** Clear all blocks of the file. */
public void clearBlocks() {
- setBlocks(null);
+ setBlocks(BlockInfo.EMPTY_ARRAY);
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7b01ebc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 8734956..1b85237 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -421,8 +421,9 @@ public abstract class INodeReference extends INode {
setParent(null);
}
}
-
- WithName getLastWithName() {
+
+ /** Return the last WithName reference if there is any, null otherwise. */
+ public WithName getLastWithName() {
return withNameList.size() > 0 ?
withNameList.get(withNameList.size() - 1) : null;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7b01ebc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 5c5b259..0111b3b 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -452,7 +452,16 @@ public class DirectoryWithSnapshotFeature implements
INode.Feature {
if (topNode instanceof INodeReference.WithName) {
INodeReference.WithName wn = (INodeReference.WithName) topNode;
if (wn.getLastSnapshotId() >= post) {
- wn.cleanSubtree(reclaimContext, post, prior);
+ INodeReference.WithCount wc =
+ (INodeReference.WithCount) wn.getReferredINode();
+ if (wc.getLastWithName() == wn && wc.getParentReference() == null) {
+ // this wn is the last wn inside of the wc, also the dstRef node
has
+ // been deleted. In this case, we should treat the referred
file/dir
+ // as normal case
+ queue.add(wc.getReferredINode());
+ } else {
+ wn.cleanSubtree(reclaimContext, post, prior);
+ }
}
// For DstReference node, since the node is not in the created list of
// prior, we should treat it as regular file/dir
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7b01ebc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
index 33a547a..4d93a32 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
@@ -1117,6 +1116,6 @@ public class TestINodeFile {
INodeFile toBeCleared = createINodeFiles(1, "toBeCleared")[0];
assertEquals(1, toBeCleared.getBlocks().length);
toBeCleared.clearBlocks();
- assertNull(toBeCleared.getBlocks());
+ assertTrue(toBeCleared.getBlocks().length == 0);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7b01ebc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index bf462da..ca53788 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -18,12 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
import java.io.ByteArrayOutputStream;
import java.io.FileNotFoundException;
@@ -61,6 +56,7 @@ import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -1149,4 +1145,91 @@ public class TestSnapshotDeletion {
cluster.restartNameNode(0);
assertEquals(numberOfBlocks, cluster.getNamesystem().getBlocksTotal());
}
+
+ /*
+ * Test fsimage corruption reported in HDFS-9697.
+ */
+ @Test
+ public void testFsImageCorruption() throws Exception {
+ final Path st = new Path("/st");
+ final Path nonst = new Path("/nonst");
+ final Path stY = new Path(st, "y");
+ final Path nonstTrash = new Path(nonst, "trash");
+
+ hdfs.mkdirs(stY);
+
+ hdfs.allowSnapshot(st);
+ hdfs.createSnapshot(st, "s0");
+
+ Path f = new Path(stY, "nn.log");
+ hdfs.createNewFile(f);
+ hdfs.createSnapshot(st, "s1");
+
+ Path f2 = new Path(stY, "nn2.log");
+ hdfs.rename(f, f2);
+ hdfs.createSnapshot(st, "s2");
+
+ Path trashSt = new Path(nonstTrash, "st");
+ hdfs.mkdirs(trashSt);
+ hdfs.rename(stY, trashSt);
+ hdfs.delete(nonstTrash, true);
+
+ hdfs.deleteSnapshot(st, "s1");
+ hdfs.deleteSnapshot(st, "s2");
+
+ hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ hdfs.saveNamespace();
+ hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+ cluster.restartNameNodes();
+ }
+
+ /*
+ * Test renaming file to outside of snapshottable dir then deleting it.
+ * Ensure it's deleted from both its parent INodeDirectory and InodeMap,
+ * after the last snapshot containing it is deleted.
+ */
+ @Test
+ public void testRenameAndDelete() throws IOException {
+ final Path foo = new Path("/foo");
+ final Path x = new Path(foo, "x");
+ final Path y = new Path(foo, "y");
+ final Path trash = new Path("/trash");
+ hdfs.mkdirs(x);
+ hdfs.mkdirs(y);
+ final long parentId = fsdir.getINode4Write(y.toString()).getId();
+
+ hdfs.mkdirs(trash);
+ hdfs.allowSnapshot(foo);
+ // 1. create snapshot s0
+ hdfs.createSnapshot(foo, "s0");
+ // 2. create file /foo/x/bar
+ final Path file = new Path(x, "bar");
+ DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, (short) 1, 0L);
+ final long fileId = fsdir.getINode4Write(file.toString()).getId();
+ // 3. move file into /foo/y
+ final Path newFile = new Path(y, "bar");
+ hdfs.rename(file, newFile);
+ // 4. create snapshot s1
+ hdfs.createSnapshot(foo, "s1");
+ // 5. move /foo/y to /trash
+ final Path deletedY = new Path(trash, "y");
+ hdfs.rename(y, deletedY);
+ // 6. create snapshot s2
+ hdfs.createSnapshot(foo, "s2");
+ // 7. delete /trash/y
+ hdfs.delete(deletedY, true);
+ // 8. delete snapshot s1
+ hdfs.deleteSnapshot(foo, "s1");
+
+ // make sure bar has been removed from its parent
+ INode p = fsdir.getInode(parentId);
+ Assert.assertNotNull(p);
+ INodeDirectory pd = p.asDirectory();
+ Assert.assertNotNull(pd);
+ Assert.assertNull(pd.getChild("bar".getBytes(),
Snapshot.CURRENT_STATE_ID));
+
+ // make sure bar has been cleaned from inodeMap
+ Assert.assertNull(fsdir.getInode(fileId));
+ }
}