ayushtkn commented on code in PR #5597:
URL: https://github.com/apache/hadoop/pull/5597#discussion_r1203482790
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java:
##########
@@ -2630,6 +2632,31 @@ public int getActiveTransferThreadCount() {
@Override // DataNodeMXBean
public Map<String, Map<String, Long>> getDatanodeNetworkCounts() {
+ int maxDisplay =
getConf().getInt(DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT,
+ DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT_DEFAULT);
+ if (maxDisplay >= 0) {
+ ConcurrentMap<String, Map<String, Long>> map =
datanodeNetworkCounts.asMap();
+ Set<Map.Entry<String, Map<String, Long>>> entries = map.entrySet();
+ List<Map.Entry<String, Map<String, Long>>> list = new
ArrayList<>(entries);
+ Collections.sort(list, new Comparator<Entry<String, Map<String,
Long>>>() {
+ @Override
+ public int compare(Map.Entry<String, Map<String, Long>> o1,
+ Map.Entry<String, Map<String, Long>> o2) {
+ Map<String, Long> value1Map = o1.getValue();
+ Map<String, Long> value2Map = o2.getValue();
+ long compared = value2Map.getOrDefault(DataNode.NETWORK_ERRORS, 0L) -
+ value1Map.getOrDefault(DataNode.NETWORK_ERRORS, 0L);
+ return (int)compared;
+ }
+ });
Review Comment:
Can use lambda
```
list.sort((o1, o2) -> {
Map<String, Long> value1Map = o1.getValue();
Map<String, Long> value2Map = o2.getValue();
long compared =
value2Map.getOrDefault(DataNode.NETWORK_ERRORS, 0L) -
value1Map.getOrDefault(
DataNode.NETWORK_ERRORS, 0L);
return (int) compared;
});
```
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java:
##########
@@ -2630,6 +2632,31 @@ public int getActiveTransferThreadCount() {
@Override // DataNodeMXBean
public Map<String, Map<String, Long>> getDatanodeNetworkCounts() {
+ int maxDisplay =
getConf().getInt(DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT,
+ DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT_DEFAULT);
+ if (maxDisplay >= 0) {
+ ConcurrentMap<String, Map<String, Long>> map =
datanodeNetworkCounts.asMap();
+ Set<Map.Entry<String, Map<String, Long>>> entries = map.entrySet();
+ List<Map.Entry<String, Map<String, Long>>> list = new
ArrayList<>(entries);
+ Collections.sort(list, new Comparator<Entry<String, Map<String,
Long>>>() {
Review Comment:
list.sort would have worked
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java:
##########
@@ -2630,6 +2632,31 @@ public int getActiveTransferThreadCount() {
@Override // DataNodeMXBean
public Map<String, Map<String, Long>> getDatanodeNetworkCounts() {
+ int maxDisplay =
getConf().getInt(DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT,
+ DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT_DEFAULT);
+ if (maxDisplay >= 0) {
+ ConcurrentMap<String, Map<String, Long>> map =
datanodeNetworkCounts.asMap();
+ Set<Map.Entry<String, Map<String, Long>>> entries = map.entrySet();
+ List<Map.Entry<String, Map<String, Long>>> list = new
ArrayList<>(entries);
+ Collections.sort(list, new Comparator<Entry<String, Map<String,
Long>>>() {
+ @Override
+ public int compare(Map.Entry<String, Map<String, Long>> o1,
+ Map.Entry<String, Map<String, Long>> o2) {
+ Map<String, Long> value1Map = o1.getValue();
+ Map<String, Long> value2Map = o2.getValue();
+ long compared = value2Map.getOrDefault(DataNode.NETWORK_ERRORS, 0L) -
+ value1Map.getOrDefault(DataNode.NETWORK_ERRORS, 0L);
+ return (int)compared;
+ }
+ });
+ Map<String, Map<String, Long>> resultMap = new ConcurrentHashMap<>();
+ maxDisplay = list.size() > maxDisplay ? maxDisplay : list.size();
Review Comment:
```
maxDisplay = Math.min(list.size(), maxDisplay);
```
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java:
##########
@@ -2630,6 +2632,31 @@ public int getActiveTransferThreadCount() {
@Override // DataNodeMXBean
public Map<String, Map<String, Long>> getDatanodeNetworkCounts() {
+ int maxDisplay =
getConf().getInt(DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT,
+ DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT_DEFAULT);
+ if (maxDisplay >= 0) {
+ ConcurrentMap<String, Map<String, Long>> map =
datanodeNetworkCounts.asMap();
+ Set<Map.Entry<String, Map<String, Long>>> entries = map.entrySet();
+ List<Map.Entry<String, Map<String, Long>>> list = new
ArrayList<>(entries);
+ Collections.sort(list, new Comparator<Entry<String, Map<String,
Long>>>() {
+ @Override
+ public int compare(Map.Entry<String, Map<String, Long>> o1,
+ Map.Entry<String, Map<String, Long>> o2) {
+ Map<String, Long> value1Map = o1.getValue();
+ Map<String, Long> value2Map = o2.getValue();
+ long compared = value2Map.getOrDefault(DataNode.NETWORK_ERRORS, 0L) -
+ value1Map.getOrDefault(DataNode.NETWORK_ERRORS, 0L);
+ return (int)compared;
+ }
+ });
+ Map<String, Map<String, Long>> resultMap = new ConcurrentHashMap<>();
+ maxDisplay = list.size() > maxDisplay ? maxDisplay : list.size();
+ for (int i = 0; i < maxDisplay; i++) {
+ resultMap.put(list.get(i).getKey(), list.get(i).getValue());
+ }
+ list.clear();
Review Comment:
is this required?
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml:
##########
@@ -6419,6 +6419,16 @@
problem. In produce default set false, because it's have little
performance loss.
</description>
</property>
+
+ <property>
+ <name>dfs.datanode.networkerrors.display.topcount</name>
+ <value>-1</value>
+ <description>
+ The number of datanodenetworkerror metric per datanode displays.
+ -1 represents having no limit.
Review Comment:
negative number represents having no limit
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]