[ 
https://issues.apache.org/jira/browse/HDFS-15568?focusedWorklogId=481668&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-481668
 ]

ASF GitHub Bot logged work on HDFS-15568:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 10/Sep/20 19:49
            Start Date: 10/Sep/20 19:49
    Worklog Time Spent: 10m 
      Work Description: szetszwo commented on a change in pull request #2296:
URL: https://github.com/apache/hadoop/pull/2296#discussion_r486594478



##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
##########
@@ -448,22 +455,31 @@ public String createSnapshot(final LeaseManager 
leaseManager,
           "snapshot IDs and ID rollover is not supported.");
     }
     int n = numSnapshots.get();
-    if (n >= maxSnapshotFSLimit) {
-      // We have reached the maximum snapshot limit
-      throw new SnapshotException(
-          "Failed to create snapshot: there are already " + (n + 1)
-              + " snapshot(s) and the max snapshot limit is "
-              + maxSnapshotFSLimit);
-    }
-
-    srcRoot.addSnapshot(snapshotCounter, snapshotName, leaseManager,
-        this.captureOpenFiles, maxSnapshotLimit, mtime);
+    checkSnapshotLimit(maxSnapshotFSLimit, n);
+    srcRoot.addSnapshot(this, snapshotName, leaseManager, mtime);
       
     //create success, update id
     snapshotCounter++;
     numSnapshots.getAndIncrement();
     return Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
   }
+
+  void checkSnapshotLimit(int limit, int numSnapshots)

Review comment:
       I suggest to add limit type to the error message as below.
   
   ```
   diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
   index 266c0a71241..7a47ab4000d 100644
   --- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
   +++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
   @@ -190,8 +190,7 @@ public Snapshot addSnapshot(INodeDirectory snapshotRoot,
              + n + " snapshot(s) and the snapshot quota is "
              + snapshotQuota);
        }
   -    snapshotManager.checkSnapshotLimit(snapshotManager.
   -        getMaxSnapshotLimit(), n);
   +    snapshotManager.checkPerDirectorySnapshotLimit(n);
        final Snapshot s = new Snapshot(id, name, snapshotRoot);
        final byte[] nameBytes = s.getRoot().getLocalNameBytes();
        final int i = searchSnapshot(nameBytes);
   diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
   index 0a2e18c3dc3..7c482074486 100644
   --- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
   +++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
   @@ -455,7 +455,7 @@ public String createSnapshot(final LeaseManager 
leaseManager,
              "snapshot IDs and ID rollover is not supported.");
        }
        int n = numSnapshots.get();
   -    checkSnapshotLimit(maxSnapshotFSLimit, n);
   +    checkFileSystemSnapshotLimit(n);
        srcRoot.addSnapshot(this, snapshotName, leaseManager, mtime);
          
        //create success, update id
   @@ -464,12 +464,19 @@ public String createSnapshot(final LeaseManager 
leaseManager,
        return Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
      }
    
   -  void checkSnapshotLimit(int limit, int numSnapshots)
   -      throws SnapshotException {
   +  void checkFileSystemSnapshotLimit(int n) throws SnapshotException {
   +    checkSnapshotLimit(maxSnapshotFSLimit, n, "file system");
   +  }
   +
   +  void checkPerDirectorySnapshotLimit(int n) throws SnapshotException {
   +    checkSnapshotLimit(maxSnapshotLimit, n, "per directory");
   +  }
   +
   +  private void checkSnapshotLimit(int limit, int numSnapshots,
   +      String type) throws SnapshotException {
        if (numSnapshots >= limit) {
   -      String msg = "there are already " + (numSnapshots + 1)
   -          + " snapshot(s) and the max snapshot limit is "
   -          + limit;
   +      String msg = "There are already " + (numSnapshots + 1)
   +          + " snapshot(s) and the " + type + " snapshot limit is " + limit;
          if (fsdir.isImageLoaded()) {
            // We have reached the maximum snapshot limit
            throw new SnapshotException(
   
   ```




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Issue Time Tracking
-------------------

    Worklog Id:     (was: 481668)
    Time Spent: 40m  (was: 0.5h)

> namenode start failed to start when dfs.namenode.snapshot.max.limit set
> -----------------------------------------------------------------------
>
>                 Key: HDFS-15568
>                 URL: https://issues.apache.org/jira/browse/HDFS-15568
>             Project: Hadoop HDFS
>          Issue Type: Sub-task
>          Components: snapshots
>            Reporter: Nilotpal Nandi
>            Assignee: Shashikant Banerjee
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 40m
>  Remaining Estimate: 0h
>
> {code:java}
> 11:35:05.872 AM       ERROR   NameNode        
> Failed to start namenode.
> org.apache.hadoop.hdfs.protocol.SnapshotException: Failed to add snapshot: 
> there are already 20 snapshot(s) and the max snapshot limit is 20
>       at 
> org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature.addSnapshot(DirectorySnapshottableFeature.java:181)
>       at 
> org.apache.hadoop.hdfs.server.namenode.INodeDirectory.addSnapshot(INodeDirectory.java:285)
>       at 
> org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager.createSnapshot(SnapshotManager.java:447)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.applyEditLogOp(FSEditLogLoader.java:802)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadEditRecords(FSEditLogLoader.java:287)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadFSEdits(FSEditLogLoader.java:182)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSImage.loadEdits(FSImage.java:912)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:760)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:337)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:1164)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:755)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:646)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:717)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:960)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:933)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1670)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1737)
> {code}
> Steps to reproduce:
> ----------------------
> directory level snapshot limit set - 100
> Created 100 snapshots
> deleted all 100 snapshots (in-oder)
> No snapshot exist
> Then, directory level snapshot limit set - 20
> HDFS restart
> Namenode start failed.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to