ayushtkn commented on code in PR #8064:
URL: https://github.com/apache/hadoop/pull/8064#discussion_r2532117035
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java:
##########
@@ -580,6 +581,18 @@ String setProtectedDirectories(String protectedDirsString)
{
return Joiner.on(",").skipNulls().join(protectedDirectories);
}
+ public void setMaxDirItems(int newVal) {
+ com.google.common.base.Preconditions.checkArgument(
Review Comment:
I don't think we need the prefix "com.google.common.base.Preconditions."
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:
##########
@@ -2805,6 +2810,23 @@ private String
reconfigureFSNamesystemLockMetricsParameters(final String propert
}
}
+ private String reconfMaxDirItems(String newVal) throws
ReconfigurationException {
Review Comment:
Can you rename to reconfigureMaxDirItems
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java:
##########
@@ -865,6 +866,23 @@ public void testReconfigureSlowPeerCollectInterval()
throws Exception {
assertEquals(600000, datanodeManager.getSlowPeerCollectionInterval());
}
+ @Test
+ public void testReconfigureMaxDirItems()
+ throws ReconfigurationException {
+ final NameNode nameNode = cluster.getNameNode();
+ final FSDirectory fsd = nameNode.namesystem.getFSDirectory();
+
+ // By default, DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY is 1024*1024.
+ assertEquals(1024*1024, fsd.getMaxDirItems());
+
+ // Reconfigure.
+ nameNode.reconfigureProperty(
+ DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, Integer.toString(1024*1024*2));
+
+ // Assert DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY is 10.
+ assertEquals(1024*1024*2, fsd.getMaxDirItems());
Review Comment:
nit
this doesn't look formatted properly else there should have been some spacing
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java:
##########
@@ -217,6 +217,8 @@ private static INodeDirectory createRoot(FSNamesystem
namesystem) {
// authorizeWithContext() API or not.
private boolean useAuthorizationWithContextAPI = false;
+ private static final int maxDirItemsLimit = 64 * 100 * 1000;
Review Comment:
I don't catch this changed, why we dropped ``MAX_DIR_ITEMS`` in favour of
this?
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java:
##########
@@ -398,11 +400,10 @@ public enum DirOp {
// We need a maximum maximum because by default, PB limits message sizes
// to 64MB. This means we can only store approximately 6.7 million entries
// per directory, but let's use 6.4 million for some safety.
Review Comment:
the comment above becomes orphan now
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java:
##########
@@ -865,6 +866,23 @@ public void testReconfigureSlowPeerCollectInterval()
throws Exception {
assertEquals(600000, datanodeManager.getSlowPeerCollectionInterval());
}
+ @Test
+ public void testReconfigureMaxDirItems()
+ throws ReconfigurationException {
+ final NameNode nameNode = cluster.getNameNode();
+ final FSDirectory fsd = nameNode.namesystem.getFSDirectory();
+
+ // By default, DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY is 1024*1024.
+ assertEquals(1024*1024, fsd.getMaxDirItems());
+
+ // Reconfigure.
+ nameNode.reconfigureProperty(
+ DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, Integer.toString(1024*1024*2));
+
+ // Assert DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY is 10.
+ assertEquals(1024*1024*2, fsd.getMaxDirItems());
+ }
Review Comment:
can you add the -ve cases as well
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]