goiri commented on a change in pull request #2296:
URL: https://github.com/apache/hadoop/pull/2296#discussion_r486579348
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
##########
@@ -508,6 +508,10 @@ FSNamesystem getFSNamesystem() {
return namesystem;
}
+ public boolean isImageLoaded() {
Review comment:
Add javadoc
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
##########
@@ -368,6 +368,13 @@ void assertFirstSnapshot(INodeDirectory dir,
}
}
+ boolean captureOpenFiles() {
Review comment:
javadoc
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
##########
@@ -368,6 +368,13 @@ void assertFirstSnapshot(INodeDirectory dir,
}
}
+ boolean captureOpenFiles() {
+ return captureOpenFiles;
+ }
+
+ int getMaxSnapshotLimit() {
Review comment:
VisibleForTesting
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,68 @@ public void testValidateSnapshotIDWidth() throws Exception
{
getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
}
+ @Test
+ public void SnapshotLimitOnRestart() throws Exception {
+ final Configuration conf = new Configuration();
+ final Path snapshottableDir
+ = new Path("/" + getClass().getSimpleName());
+ int numSnapshots = 5;
+ conf.setInt(DFSConfigKeys.
+ DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+ numSnapshots * 2);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+ numDataNodes(0).build();
+ cluster.waitActive();
+ DistributedFileSystem hdfs = cluster.getFileSystem();
+ hdfs.mkdirs(snapshottableDir);
+ hdfs.allowSnapshot(snapshottableDir);
+ int i = 0;
+ for (; i < numSnapshots; i++) {
+ hdfs.createSnapshot(snapshottableDir, "s" + i);
+ }
+ try {
+ hdfs.createSnapshot(snapshottableDir, "s" + i);
+ Assert.fail("Expected SnapshotException not thrown");
+ } catch (SnapshotException se) {
+ Assert.assertTrue(
+ StringUtils.toLowerCase(se.getMessage()).contains(
+ "max snapshot limit"));
+ }
+
+ // now change max snapshot directory limit to 2 and restart namenode
+ cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+ DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+ cluster.restartNameNodes();
+
+ // make sure edits of all previous 5 create snapshots are replayed
+ Assert.assertEquals(numSnapshots, cluster.getNamesystem().
+ getSnapshotManager().getNumSnapshots());
+
+ // make sure namenode has the new snapshot limit configured as 2
+ Assert.assertEquals(2,
+ cluster.getNamesystem().getSnapshotManager().getMaxSnapshotLimit());
+
+ // Any new snapshot creation should still fail
+ try {
+ hdfs.createSnapshot(snapshottableDir, "s" + i);
+ Assert.fail("Expected SnapshotException not thrown");
+ } catch (SnapshotException se) {
+ Assert.assertTrue(
Review comment:
LambdaTestUtils
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,68 @@ public void testValidateSnapshotIDWidth() throws Exception
{
getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
}
+ @Test
+ public void SnapshotLimitOnRestart() throws Exception {
+ final Configuration conf = new Configuration();
+ final Path snapshottableDir
+ = new Path("/" + getClass().getSimpleName());
+ int numSnapshots = 5;
+ conf.setInt(DFSConfigKeys.
+ DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+ numSnapshots * 2);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+ numDataNodes(0).build();
+ cluster.waitActive();
+ DistributedFileSystem hdfs = cluster.getFileSystem();
+ hdfs.mkdirs(snapshottableDir);
+ hdfs.allowSnapshot(snapshottableDir);
+ int i = 0;
+ for (; i < numSnapshots; i++) {
+ hdfs.createSnapshot(snapshottableDir, "s" + i);
+ }
+ try {
+ hdfs.createSnapshot(snapshottableDir, "s" + i);
+ Assert.fail("Expected SnapshotException not thrown");
+ } catch (SnapshotException se) {
+ Assert.assertTrue(
+ StringUtils.toLowerCase(se.getMessage()).contains(
+ "max snapshot limit"));
+ }
+
+ // now change max snapshot directory limit to 2 and restart namenode
+ cluster.getNameNode().getConf().setInt(DFSConfigKeys.
+ DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 2);
+ cluster.restartNameNodes();
+
+ // make sure edits of all previous 5 create snapshots are replayed
+ Assert.assertEquals(numSnapshots, cluster.getNamesystem().
Review comment:
Extract the getSnapshotManager()
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
##########
@@ -133,4 +137,68 @@ public void testValidateSnapshotIDWidth() throws Exception
{
getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
}
+ @Test
+ public void SnapshotLimitOnRestart() throws Exception {
+ final Configuration conf = new Configuration();
+ final Path snapshottableDir
+ = new Path("/" + getClass().getSimpleName());
+ int numSnapshots = 5;
+ conf.setInt(DFSConfigKeys.
+ DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, numSnapshots);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+ numSnapshots * 2);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+ numDataNodes(0).build();
+ cluster.waitActive();
+ DistributedFileSystem hdfs = cluster.getFileSystem();
+ hdfs.mkdirs(snapshottableDir);
+ hdfs.allowSnapshot(snapshottableDir);
+ int i = 0;
+ for (; i < numSnapshots; i++) {
+ hdfs.createSnapshot(snapshottableDir, "s" + i);
+ }
+ try {
+ hdfs.createSnapshot(snapshottableDir, "s" + i);
Review comment:
Use LambdaTestUtils#intercept
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]