This is an automated email from the ASF dual-hosted git repository.
shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new fdd96e4 HDFS-15012. NN fails to parse Edit logs after applying
HDFS-13101. Contributed by Shashikant Banerjee.
fdd96e4 is described below
commit fdd96e46d1f89f0ecdb9b1836dc7fca9fbb954fd
Author: Shashikant Banerjee <[email protected]>
AuthorDate: Wed Dec 18 22:50:46 2019 +0530
HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101.
Contributed by Shashikant Banerjee.
---
.../snapshot/DirectoryWithSnapshotFeature.java | 8 ++-
.../namenode/snapshot/TestRenameWithSnapshots.java | 64 ++++++++++++++++++++++
2 files changed, 70 insertions(+), 2 deletions(-)
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 7fb639c..4e756c7 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -739,8 +739,12 @@ public class DirectoryWithSnapshotFeature implements
INode.Feature {
// were created before "prior" will be covered by the later
// cleanSubtreeRecursively call.
if (priorCreated != null) {
- if (currentINode.isLastReference()) {
- // if this is the last reference, the created list can be
+ if (currentINode.isLastReference() &&
+ currentINode.getDiffs().getLastSnapshotId() == prior) {
+ // If this is the last reference of the directory inode and it
+ // can not be accessed in any of the subsequent snapshots i.e,
+ // this is the latest snapshot diff and if this is the last
+ // reference, the created list can be
// destroyed.
priorDiff.getChildrenDiff().destroyCreatedList(
reclaimContext, currentINode);
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index f5b5345..128e3ba 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -49,6 +49,7 @@ import org.mockito.Mockito;
import java.io.File;
import java.io.IOException;
+import java.io.PrintWriter;
import java.util.EnumSet;
import java.util.List;
import java.util.Random;
@@ -2412,4 +2413,67 @@ public class TestRenameWithSnapshots {
assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2",
"newDir/file2"));
assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3",
"newDir/file1"));
}
+
+ @Test (timeout=60000)
+ public void testDoubleRenamesWithSnapshotDelete() throws Exception {
+ hdfs.mkdirs(sub1);
+ hdfs.allowSnapshot(sub1);
+ final Path dir1 = new Path(sub1, "dir1");
+ final Path dir2 = new Path(sub1, "dir2");
+ final Path dir3 = new Path(sub1, "dir3");
+ final String snap3 = "snap3";
+ final String snap4 = "snap4";
+ final String snap5 = "snap5";
+ final String snap6 = "snap6";
+ final Path foo = new Path(dir2, "foo");
+ final Path bar = new Path(dir2, "bar");
+ hdfs.createSnapshot(sub1, snap1);
+ hdfs.mkdirs(dir1, new FsPermission((short) 0777));
+ rename(dir1, dir2);
+ hdfs.createSnapshot(sub1, snap2);
+ DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+ DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
+ hdfs.createSnapshot(sub1, snap3);
+ hdfs.delete(foo, false);
+ DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+ hdfs.createSnapshot(sub1, snap4);
+ hdfs.delete(foo, false);
+ DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+ hdfs.createSnapshot(sub1, snap5);
+ rename(dir2, dir3);
+ hdfs.createSnapshot(sub1, snap6);
+ hdfs.delete(dir3, true);
+ deleteSnapshot(sub1, snap6);
+ deleteSnapshot(sub1, snap3);
+ // save namespace and restart Namenode
+ hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ hdfs.saveNamespace();
+ hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+ cluster.restartNameNode(true);
+ }
+
+
+ void rename(Path src, Path dst) throws Exception {
+ printTree("Before rename " + src + " -> " + dst);
+ hdfs.rename(src, dst);
+ printTree("After rename " + src + " -> " + dst);
+ }
+
+ void deleteSnapshot(Path directory, String snapshotName) throws Exception {
+ hdfs.deleteSnapshot(directory, snapshotName);
+ printTree("deleted snapshot " + snapshotName);
+ }
+
+ private final PrintWriter output = new PrintWriter(System.out, true);
+ private int printTreeCount = 0;
+
+ String printTree(String label) throws Exception {
+ output.println();
+ output.println();
+ output.println("***** " + printTreeCount++ + ": " + label);
+ final String b =
+ fsn.getFSDirectory().getINode("/").dumpTreeRecursively().toString();
+ output.println(b);
+ return b;
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]