This is an automated email from the ASF dual-hosted git repository.

myskov pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 1c0b445a5d HDDS-12426. SCM replication should check double of 
container size. (#8080)
1c0b445a5d is described below

commit 1c0b445a5d4078e3bc8b5e6f1f912e78f8384686
Author: Ashish Kumar <[email protected]>
AuthorDate: Thu Mar 20 13:51:32 2025 +0530

    HDDS-12426. SCM replication should check double of container size. (#8080)
    
    Co-authored-by: ashishk <[email protected]>
---
 .../ozone/container/replication/ContainerImporter.java      |  3 ++-
 .../java/org/apache/hadoop/hdds/utils/HddsServerUtil.java   |  5 +++++
 .../scm/container/replication/ReplicationManagerUtil.java   |  3 ++-
 .../hdds/scm/container/replication/ReplicationTestUtil.java | 13 +++++++++++++
 4 files changed, 22 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
index 46bbb66620..90fc50f84a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
@@ -149,7 +150,7 @@ HddsVolume chooseNextVolume() throws IOException {
     // Choose volume that can hold both container in tmp and dest directory
     return volumeChoosingPolicy.chooseVolume(
         StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()),
-        containerSize * 2);
+        HddsServerUtil.requiredReplicationSpace(containerSize));
   }
 
   public static Path getUntarDirectory(HddsVolume hddsVolume)
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
index 5d04f2060f..f554990fc9 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
@@ -403,6 +403,11 @@ public static Collection<String> 
getOzoneDatanodeRatisDirectory(
     return rawLocations;
   }
 
+  public static long requiredReplicationSpace(long defaultContainerSize) {
+    // During container import it requires double the container size to hold 
container in tmp and dest directory
+    return 2 * defaultContainerSize;
+  }
+
   public static Collection<String> getDatanodeStorageDirs(ConfigurationSource 
conf) {
     Collection<String> rawLocations = 
conf.getTrimmedStringCollection(HDDS_DATANODE_DIR_KEY);
     if (rawLocations.isEmpty()) {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java
index f0be5b231d..503126198a 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -78,7 +79,7 @@ public static List<DatanodeDetails> 
getTargetDatanodes(PlacementPolicy policy,
     // Ensure that target datanodes have enough space to hold a complete
     // container.
     final long dataSizeRequired =
-        Math.max(container.getUsedBytes(), defaultContainerSize);
+        
HddsServerUtil.requiredReplicationSpace(Math.max(container.getUsedBytes(), 
defaultContainerSize));
 
     int mutableRequiredNodes = requiredNodes;
     while (mutableRequiredNodes > 0) {
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java
index 444affca7d..d8949df818 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java
@@ -20,6 +20,7 @@
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
 import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
 import static 
org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyBoolean;
 import static org.mockito.Mockito.anyInt;
@@ -36,6 +37,7 @@
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -43,6 +45,7 @@
 import org.apache.hadoop.hdds.scm.ContainerPlacementStatus;
 import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
@@ -51,6 +54,7 @@
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.net.Node;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
 import 
org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand;
 import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
@@ -328,6 +332,9 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
               List<DatanodeDetails> favoredNodes, int nodesRequiredToChoose,
               long metadataSizeRequired, long dataSizeRequired)
               throws SCMException {
+        long containerSize = (long) 
conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
+            ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
+        assertEquals(HddsServerUtil.requiredReplicationSpace(containerSize), 
dataSizeRequired);
         if (nodesRequiredToChoose > 1) {
           throw new IllegalArgumentException("Only one node is allowed");
         }
@@ -356,6 +363,9 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
               List<DatanodeDetails> favoredNodes, int nodesRequiredToChoose,
               long metadataSizeRequired, long dataSizeRequired)
               throws SCMException {
+        long containerSize = (long) 
conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
+            ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
+        assertEquals(HddsServerUtil.requiredReplicationSpace(containerSize), 
dataSizeRequired);
         throw new SCMException("No nodes available",
                 FAILED_TO_FIND_SUITABLE_NODE);
       }
@@ -383,6 +393,9 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
           List<DatanodeDetails> favoredNodes, int nodesRequiredToChoose,
           long metadataSizeRequired, long dataSizeRequired)
           throws SCMException {
+        long containerSize = (long) 
conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
+            ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
+        assertEquals(HddsServerUtil.requiredReplicationSpace(containerSize), 
dataSizeRequired);
         if (nodesRequiredToChoose >= throwWhenThisOrMoreNodesRequested) {
           throw new SCMException("No nodes available",
               FAILED_TO_FIND_SUITABLE_NODE);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to