[ 
https://issues.apache.org/jira/browse/HDFS-16974?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17710052#comment-17710052
 ] 

ASF GitHub Bot commented on HDFS-16974:
---------------------------------------

Hexiaoqiao commented on code in PR #5541:
URL: https://github.com/apache/hadoop/pull/5541#discussion_r1161395510


##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java:
##########
@@ -271,6 +271,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
       "dfs.namenode.redundancy.considerLoad.factor";
   public static final double
       DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR_DEFAULT = 2.0;
+

Review Comment:
   remove the redundant space.



##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java:
##########
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestReplicationPolicyRatioConsiderLoadWithStorage
+    extends BaseReplicationPolicyTest {
+
+  public TestReplicationPolicyRatioConsiderLoadWithStorage() {
+    this.blockPlacementPolicy = BlockPlacementPolicyDefault.class.getName();
+  }
+
+  @Override
+  DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) {
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
+        true);
+    conf.setDouble(DFSConfigKeys
+        .DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR, 2);
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOADBYVOLUME_KEY, true);
+
+    final String[] racks = {
+        "/rack1",
+        "/rack2",
+        "/rack3",
+        "/rack4",
+        "/rack5"};
+    storages = DFSTestUtil.createDatanodeStorageInfos(racks);
+    DatanodeDescriptor[] descriptors =
+        DFSTestUtil.toDatanodeDescriptor(storages);
+    long storageCapacity =
+        2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE;
+    // Each datanode has 6 storages, but the number of available storages
+    // varies.
+    for (int i = 0; i < descriptors.length; i++) {
+      for (int j = 0; j < 5; j++) {

Review Comment:
   it seems that the storage number is different from annotate, 5 vs 6?



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java:
##########
@@ -271,6 +271,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
       "dfs.namenode.redundancy.considerLoad.factor";
   public static final double
       DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR_DEFAULT = 2.0;
+
+  public static final String DFS_NAMENODE_REDUNDANCY_CONSIDERLOADBYVOLUME_KEY =
+      "dfs.namenode.redundancy.considerLoadByVolume";
+  public static final boolean
+      DFS_NAMENODE_REDUNDANCY_CONSIDERLOADBYVOLUME_DEFAULT
+      = false;
+

Review Comment:
   +1 as mentioned above.



##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java:
##########
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestReplicationPolicyRatioConsiderLoadWithStorage
+    extends BaseReplicationPolicyTest {
+
+  public TestReplicationPolicyRatioConsiderLoadWithStorage() {
+    this.blockPlacementPolicy = BlockPlacementPolicyDefault.class.getName();
+  }
+
+  @Override
+  DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) {
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
+        true);
+    conf.setDouble(DFSConfigKeys
+        .DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR, 2);
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOADBYVOLUME_KEY, true);
+
+    final String[] racks = {
+        "/rack1",
+        "/rack2",
+        "/rack3",
+        "/rack4",
+        "/rack5"};
+    storages = DFSTestUtil.createDatanodeStorageInfos(racks);
+    DatanodeDescriptor[] descriptors =
+        DFSTestUtil.toDatanodeDescriptor(storages);
+    long storageCapacity =
+        2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE;
+    // Each datanode has 6 storages, but the number of available storages
+    // varies.
+    for (int i = 0; i < descriptors.length; i++) {
+      for (int j = 0; j < 5; j++) {
+        DatanodeStorage s =
+            new DatanodeStorage("s" + i + j);
+        descriptors[i].updateStorage(s);
+
+      }
+      for (int j = 0; j < descriptors[i].getStorageInfos().length; j++) {
+        DatanodeStorageInfo dsInfo = descriptors[i].getStorageInfos()[j];
+        if (j > i + 1) {
+          dsInfo.setUtilizationForTesting(storageCapacity, storageCapacity, 0,
+              storageCapacity);
+        } else {
+          dsInfo.setUtilizationForTesting(storageCapacity, 0, storageCapacity,
+              0);
+        }
+      }
+    }
+    return descriptors;
+  }
+
+

Review Comment:
   Please remove the redundant blank line.



##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java:
##########
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestReplicationPolicyRatioConsiderLoadWithStorage
+    extends BaseReplicationPolicyTest {
+
+  public TestReplicationPolicyRatioConsiderLoadWithStorage() {
+    this.blockPlacementPolicy = BlockPlacementPolicyDefault.class.getName();
+  }
+
+  @Override
+  DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) {
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
+        true);
+    conf.setDouble(DFSConfigKeys
+        .DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR, 2);
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOADBYVOLUME_KEY, true);
+
+    final String[] racks = {
+        "/rack1",
+        "/rack2",
+        "/rack3",
+        "/rack4",
+        "/rack5"};
+    storages = DFSTestUtil.createDatanodeStorageInfos(racks);
+    DatanodeDescriptor[] descriptors =
+        DFSTestUtil.toDatanodeDescriptor(storages);
+    long storageCapacity =
+        2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE;
+    // Each datanode has 6 storages, but the number of available storages
+    // varies.
+    for (int i = 0; i < descriptors.length; i++) {
+      for (int j = 0; j < 5; j++) {
+        DatanodeStorage s =
+            new DatanodeStorage("s" + i + j);

Review Comment:
   the expected storage name is 's00','s01', ... and so on?



##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java:
##########
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestReplicationPolicyRatioConsiderLoadWithStorage

Review Comment:
   Just suggest to add some javadoc for this class and note what scenario it 
covers.



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java:
##########
@@ -981,6 +988,10 @@ public VolumeFailureSummary getVolumeFailureSummary() {
     return volumeFailureSummary;
   }
 
+  public int getNumVolumesAvailable() {

Review Comment:
   suggest to add some javadoc.



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java:
##########
@@ -233,6 +233,8 @@ public Type getType() {
   // HB processing can use it to tell if it is the first HB since DN restarted
   private boolean heartbeatedSinceRegistration = false;
 
+  private int numVolumesAvailable = 0;

Review Comment:
   suggest to add some javadoc for this new attribute.





> Consider load of every volume when choosing target
> --------------------------------------------------
>
>                 Key: HDFS-16974
>                 URL: https://issues.apache.org/jira/browse/HDFS-16974
>             Project: Hadoop HDFS
>          Issue Type: Improvement
>            Reporter: Shuyan Zhang
>            Assignee: Shuyan Zhang
>            Priority: Major
>              Labels: pull-request-available
>
> The current target choosing policy only considers the load of the entire 
> datanode. If both DN1 and DN2 have an `xceiverCount` of 100, but DN1 has 10 
> volumes to write to and DN2 only has 1, then the pressure on DN2 is actually 
> much greater than that on DN1. This patch has added a configuration that 
> allows us to avoid nodes with too much pressure on a single volume when 
> choosing targets, so as to avoid overloading datanodes with few volumes or 
> slowing down writes.



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to