This is an automated email from the ASF dual-hosted git repository.
tasanuma pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.3 by this push:
new 8559ba4 HDFS-16327. Make DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY
reconfigurable (#3716)
8559ba4 is described below
commit 8559ba46c2d47cb0df1f51ecfbc72ecb6f1e75fe
Author: litao <[email protected]>
AuthorDate: Tue Dec 14 10:19:06 2021 +0800
HDFS-16327. Make DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY reconfigurable
(#3716)
(cherry picked from commit c56a07f36b561c9aacb5e74bd20195a817d5fd8d)
---
.../server/blockmanagement/DatanodeManager.java | 11 +++++++++-
.../hadoop/hdfs/server/namenode/NameNode.java | 24 ++++++++++++++++------
.../server/namenode/TestNameNodeReconfigure.java | 19 +++++++++++++++++
.../org/apache/hadoop/hdfs/tools/TestDFSAdmin.java | 4 +++-
4 files changed, 50 insertions(+), 8 deletions(-)
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 66bdf7a..44dffcb 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -214,7 +214,7 @@ public class DatanodeManager {
private static Set<String> slowNodesUuidSet = Sets.newConcurrentHashSet();
private Daemon slowPeerCollectorDaemon;
private final long slowPeerCollectionInterval;
- private final int maxSlowPeerReportNodes;
+ private volatile int maxSlowPeerReportNodes;
@Nullable
private final SlowDiskTracker slowDiskTracker;
@@ -515,6 +515,15 @@ public class DatanodeManager {
return this.avoidSlowDataNodesForRead;
}
+ public void setMaxSlowpeerCollectNodes(int maxNodes) {
+ this.maxSlowPeerReportNodes = maxNodes;
+ }
+
+ @VisibleForTesting
+ public int getMaxSlowpeerCollectNodes() {
+ return this.maxSlowPeerReportNodes;
+ }
+
/**
* Sort the non-striped located blocks by the distance to the target host.
*
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 9bb6cd8..79074a2 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -190,6 +190,8 @@ import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATAN
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_DEFAULT;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT;
+import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY;
+import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_DEFAULT;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
@@ -333,7 +335,8 @@ public class NameNode extends ReconfigurableBase implements
DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY,
DFS_IMAGE_PARALLEL_LOAD_KEY,
DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY,
- DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY));
+ DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY,
+ DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY));
private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
@@ -2200,7 +2203,8 @@ public class NameNode extends ReconfigurableBase
implements
} else if (property.equals(DFS_IMAGE_PARALLEL_LOAD_KEY)) {
return reconfigureParallelLoad(newVal);
} else if (property.equals(DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY)
- ||
(property.equals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY)))
{
+ ||
(property.equals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY))
+ || (property.equals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY))) {
return reconfigureSlowNodesParameters(datanodeManager, property, newVal);
} else {
throw new ReconfigurationException(property, newVal, getConf().get(
@@ -2392,24 +2396,32 @@ public class NameNode extends ReconfigurableBase
implements
final String property, final String newVal) throws
ReconfigurationException {
BlockManager bm = namesystem.getBlockManager();
namesystem.writeLock();
- boolean enable;
+ String result;
try {
if (property.equals(DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY)) {
- enable = (newVal == null ?
DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_DEFAULT :
+ boolean enable = (newVal == null ?
DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_DEFAULT :
Boolean.parseBoolean(newVal));
+ result = Boolean.toString(enable);
datanodeManager.setAvoidSlowDataNodesForReadEnabled(enable);
} else if (property.equals(
DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY))
{
- enable = (newVal == null ?
+ boolean enable = (newVal == null ?
DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT :
Boolean.parseBoolean(newVal));
+ result = Boolean.toString(enable);
bm.setExcludeSlowNodesEnabled(enable);
+ } else if (property.equals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY))
{
+ int maxSlowpeerCollectNodes = (newVal == null ?
+ DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_DEFAULT :
+ Integer.parseInt(newVal));
+ result = Integer.toString(maxSlowpeerCollectNodes);
+ datanodeManager.setMaxSlowpeerCollectNodes(maxSlowpeerCollectNodes);
} else {
throw new IllegalArgumentException("Unexpected property " +
property + " in reconfigureSlowNodesParameters");
}
LOG.info("RECONFIGURE* changed {} to {}", property, newVal);
- return Boolean.toString(enable);
+ return result;
} catch (IllegalArgumentException e) {
throw new ReconfigurationException(property, newVal, getConf().get(
property), e);
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index da9b447..fe55553 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -55,6 +55,7 @@ import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY;
+import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY;
import static
org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
public class TestNameNodeReconfigure {
@@ -430,6 +431,24 @@ public class TestNameNodeReconfigure {
getExcludeSlowNodesEnabled(BlockType.STRIPED));
}
+ @Test
+ public void testReconfigureMaxSlowpeerCollectNodes()
+ throws ReconfigurationException {
+ final NameNode nameNode = cluster.getNameNode();
+ final DatanodeManager datanodeManager = nameNode.namesystem
+ .getBlockManager().getDatanodeManager();
+
+ // By default, DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY is 5.
+ assertEquals(5, datanodeManager.getMaxSlowpeerCollectNodes());
+
+ // Reconfigure.
+ nameNode.reconfigureProperty(
+ DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, Integer.toString(10));
+
+ // Assert DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY is 10.
+ assertEquals(10, datanodeManager.getMaxSlowpeerCollectNodes());
+ }
+
@After
public void shutDown() throws IOException {
if (cluster != null) {
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index b719dd6..25a46ea 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -87,6 +87,7 @@ import java.util.List;
import java.util.Scanner;
import java.util.concurrent.TimeoutException;
+import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY;
import static org.hamcrest.CoreMatchers.allOf;
import static org.hamcrest.CoreMatchers.anyOf;
import static org.hamcrest.CoreMatchers.is;
@@ -424,7 +425,7 @@ public class TestDFSAdmin {
final List<String> outs = Lists.newArrayList();
final List<String> errs = Lists.newArrayList();
getReconfigurableProperties("namenode", address, outs, errs);
- assertEquals(15, outs.size());
+ assertEquals(16, outs.size());
assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(1));
assertEquals(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, outs.get(2));
assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(3));
@@ -432,6 +433,7 @@ public class TestDFSAdmin {
assertEquals(DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY, outs.get(5));
assertEquals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY,
outs.get(6));
assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(7));
+ assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(8));
assertEquals(errs.size(), 0);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]