[ 
https://issues.apache.org/jira/browse/HDFS-15568?focusedWorklogId=481660&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-481660
 ]

ASF GitHub Bot logged work on HDFS-15568:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 10/Sep/20 19:21
            Start Date: 10/Sep/20 19:21
    Worklog Time Spent: 10m 
      Work Description: goiri commented on a change in pull request #2296:
URL: https://github.com/apache/hadoop/pull/2296#discussion_r486579348



##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
##########
@@ -508,6 +508,10 @@ FSNamesystem getFSNamesystem() {
     return namesystem;
   }
 
+  public boolean isImageLoaded() {

Review comment:
       Add javadoc

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
##########
@@ -368,6 +368,13 @@ void assertFirstSnapshot(INodeDirectory dir,
     }
   }
 
+  boolean captureOpenFiles() {

Review comment:
       javadoc

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
##########
@@ -368,6 +368,13 @@ void assertFirstSnapshot(INodeDirectory dir,
     }
   }
 
+  boolean captureOpenFiles() {
+    return captureOpenFiles;
+  }
+
+  int getMaxSnapshotLimit() {

Review comment:
       VisibleForTesting

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,68 @@ public void testValidateSnapshotIDWidth() throws Exception 
{
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void SnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;
+    for (; i < numSnapshots; i++) {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+    }
+    try {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+      Assert.fail("Expected SnapshotException not thrown");
+    } catch (SnapshotException se) {
+      Assert.assertTrue(
+          StringUtils.toLowerCase(se.getMessage()).contains(
+              "max snapshot limit"));
+    }
+
+    // now change max snapshot directory limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+    cluster.restartNameNodes();
+
+    // make sure edits of all previous 5 create snapshots are replayed
+    Assert.assertEquals(numSnapshots, cluster.getNamesystem().
+        getSnapshotManager().getNumSnapshots());
+
+    // make sure namenode has the new snapshot limit configured as 2
+    Assert.assertEquals(2,
+        cluster.getNamesystem().getSnapshotManager().getMaxSnapshotLimit());
+
+    // Any new snapshot creation should still fail
+    try {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+      Assert.fail("Expected SnapshotException not thrown");
+    } catch (SnapshotException se) {
+      Assert.assertTrue(

Review comment:
       LambdaTestUtils

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,68 @@ public void testValidateSnapshotIDWidth() throws Exception 
{
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void SnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;
+    for (; i < numSnapshots; i++) {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+    }
+    try {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+      Assert.fail("Expected SnapshotException not thrown");
+    } catch (SnapshotException se) {
+      Assert.assertTrue(
+          StringUtils.toLowerCase(se.getMessage()).contains(
+              "max snapshot limit"));
+    }
+
+    // now change max snapshot directory limit to 2 and restart namenode
+    cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+        DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+    cluster.restartNameNodes();
+
+    // make sure edits of all previous 5 create snapshots are replayed
+    Assert.assertEquals(numSnapshots, cluster.getNamesystem().

Review comment:
       Extract the getSnapshotManager()

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,68 @@ public void testValidateSnapshotIDWidth() throws Exception 
{
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
+  @Test
+  public void SnapshotLimitOnRestart() throws Exception {
+    final Configuration conf = new Configuration();
+    final Path snapshottableDir
+        = new Path("/" + getClass().getSimpleName());
+    int numSnapshots = 5;
+    conf.setInt(DFSConfigKeys.
+            DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+        numSnapshots * 2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    int i = 0;
+    for (; i < numSnapshots; i++) {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);
+    }
+    try {
+      hdfs.createSnapshot(snapshottableDir, "s" + i);

Review comment:
       Use LambdaTestUtils#intercept




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 481660)
    Time Spent: 0.5h  (was: 20m)

> namenode start failed to start when dfs.namenode.snapshot.max.limit set
> -----------------------------------------------------------------------
>
>                 Key: HDFS-15568
>                 URL: https://issues.apache.org/jira/browse/HDFS-15568
>             Project: Hadoop HDFS
>          Issue Type: Sub-task
>          Components: snapshots
>            Reporter: Nilotpal Nandi
>            Assignee: Shashikant Banerjee
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 0.5h
>  Remaining Estimate: 0h
>
> {code:java}
> 11:35:05.872 AM       ERROR   NameNode        
> Failed to start namenode.
> org.apache.hadoop.hdfs.protocol.SnapshotException: Failed to add snapshot: 
> there are already 20 snapshot(s) and the max snapshot limit is 20
>       at 
> org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature.addSnapshot(DirectorySnapshottableFeature.java:181)
>       at 
> org.apache.hadoop.hdfs.server.namenode.INodeDirectory.addSnapshot(INodeDirectory.java:285)
>       at 
> org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager.createSnapshot(SnapshotManager.java:447)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.applyEditLogOp(FSEditLogLoader.java:802)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadEditRecords(FSEditLogLoader.java:287)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadFSEdits(FSEditLogLoader.java:182)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSImage.loadEdits(FSImage.java:912)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:760)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:337)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:1164)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:755)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:646)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:717)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:960)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:933)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1670)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1737)
> {code}
> Steps to reproduce:
> ----------------------
> directory level snapshot limit set - 100
> Created 100 snapshots
> deleted all 100 snapshots (in-oder)
> No snapshot exist
> Then, directory level snapshot limit set - 20
> HDFS restart
> Namenode start failed.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to