goiri commented on a change in pull request #2296:
URL: https://github.com/apache/hadoop/pull/2296#discussion_r488109432
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +138,57 @@ public void testValidateSnapshotIDWidth() throws Exception
{
getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
}
+ @Test
+ public void testSnapshotLimitOnRestart() throws Exception {
+ final Configuration conf = new Configuration();
+ final Path snapshottableDir
+ = new Path("/" + getClass().getSimpleName());
+ int numSnapshots = 5;
+ conf.setInt(DFSConfigKeys.
+ DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+ numSnapshots * 2);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+ numDataNodes(0).build();
+ cluster.waitActive();
+ DistributedFileSystem hdfs = cluster.getFileSystem();
+ hdfs.mkdirs(snapshottableDir);
+ hdfs.allowSnapshot(snapshottableDir);
+ for (int i = 0; i < numSnapshots; i++) {
+ hdfs.createSnapshot(snapshottableDir, "s" + i);
+ }
+ LambdaTestUtils.intercept(SnapshotException.class,
+ "snapshot limit",
+ () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+
+ // now change max snapshot directory limit to 2 and restart namenode
+ cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+ DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+ cluster.restartNameNodes();
+ SnapshotManager snapshotManager = cluster.getNamesystem().
+ getSnapshotManager();
+
+ // make sure edits of all previous 5 create snapshots are replayed
+ Assert.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
+
+ // make sure namenode has the new snapshot limit configured as 2
+ Assert.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
+
+ // Any new snapshot creation should still fail
+ LambdaTestUtils.intercept(SnapshotException.class,
+ "snapshot limit", () -> hdfs.createSnapshot(snapshottableDir, "s5"));
+ // now change max snapshot FS limit to 2 and restart namenode
+ cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+ DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT, 2);
+ cluster.restartNameNodes();
+ snapshotManager = cluster.getNamesystem().
+ getSnapshotManager();
+ // make sure edits of all previous 5 create snapshots are replayed
+ Assert.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
+
+ // make sure namenode has the new snapshot limit configured as 2
+ Assert.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
+ cluster.shutdown();
Review comment:
We may need to do it in a finally to make sure we always clean it. Also
check for null.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]