This is an automated email from the ASF dual-hosted git repository.
siddhant pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 7a237fe7fbb HDDS-12468. Check for space availability for all dns while
container creation in pipeline (#8663)
7a237fe7fbb is described below
commit 7a237fe7fbba084d01325627e7910a1e07088f21
Author: Siddhant Sangwan <[email protected]>
AuthorDate: Thu Jun 26 19:27:08 2025 +0530
HDDS-12468. Check for space availability for all dns while container
creation in pipeline (#8663)
---
.../hdds/scm/container/ContainerManager.java | 5 +-
.../hdds/scm/container/ContainerManagerImpl.java | 22 ++++++-
.../apache/hadoop/hdds/scm/node/NodeManager.java | 3 +
.../hadoop/hdds/scm/node/SCMNodeManager.java | 10 ++++
.../hadoop/hdds/scm/pipeline/PipelineManager.java | 9 +++
.../hdds/scm/pipeline/PipelineManagerImpl.java | 16 +++++
.../scm/pipeline/WritableECContainerProvider.java | 6 ++
.../org/apache/hadoop/hdds/scm/HddsTestUtils.java | 14 +++++
.../hadoop/hdds/scm/container/MockNodeManager.java | 16 +++++
.../hdds/scm/container/SimpleMockNodeManager.java | 7 +++
.../scm/container/TestContainerManagerImpl.java | 63 +++++++++++++++++++-
.../hdds/scm/pipeline/MockPipelineManager.java | 5 ++
.../hdds/scm/pipeline/TestPipelineManagerImpl.java | 69 ++++++++++++++++++++++
13 files changed, 239 insertions(+), 6 deletions(-)
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
index 179ff82a812..fb349720d23 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.hdds.scm.container;
+import jakarta.annotation.Nullable;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
@@ -189,8 +190,10 @@ default ContainerInfo getMatchingContainer(long size,
String owner,
* @param owner - the user which requires space in its owned container
* @param pipeline - pipeline to which the container should belong.
* @param excludedContainerIDS - containerIds to be excluded.
- * @return ContainerInfo for the matching container.
+ * @return ContainerInfo for the matching container, or null if a container
could not be found and could not be
+ * allocated
*/
+ @Nullable
ContainerInfo getMatchingContainer(long size, String owner,
Pipeline pipeline,
Set<ContainerID> excludedContainerIDS);
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
index 31652191a05..d255bc9a672 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
@@ -31,6 +31,7 @@
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -80,6 +81,8 @@ public class ContainerManagerImpl implements ContainerManager
{
@SuppressWarnings("java:S2245") // no need for secure random
private final Random random = new Random();
+ private final long maxContainerSize;
+
/**
*
*/
@@ -109,6 +112,9 @@ public ContainerManagerImpl(
.getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT);
+ maxContainerSize = (long)
conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
+ ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
+
this.scmContainerManagerMetrics = SCMContainerManagerMetrics.create();
}
@@ -346,14 +352,24 @@ public ContainerInfo getMatchingContainer(final long
size, final String owner,
synchronized (pipeline.getId()) {
containerIDs = getContainersForOwner(pipeline, owner);
if (containerIDs.size() < getOpenContainerCountPerPipeline(pipeline)) {
- allocateContainer(pipeline, owner);
- containerIDs = getContainersForOwner(pipeline, owner);
+ if (pipelineManager.hasEnoughSpace(pipeline, maxContainerSize)) {
+ allocateContainer(pipeline, owner);
+ containerIDs = getContainersForOwner(pipeline, owner);
+ } else {
+ LOG.debug("Cannot allocate a new container because pipeline {}
does not have the required space {}.",
+ pipeline, maxContainerSize);
+ }
}
containerIDs.removeAll(excludedContainerIDs);
containerInfo = containerStateManager.getMatchingContainer(
size, owner, pipeline.getId(), containerIDs);
if (containerInfo == null) {
- containerInfo = allocateContainer(pipeline, owner);
+ if (pipelineManager.hasEnoughSpace(pipeline, maxContainerSize)) {
+ containerInfo = allocateContainer(pipeline, owner);
+ } else {
+ LOG.debug("Cannot allocate a new container because pipeline {}
does not have the required space {}.",
+ pipeline, maxContainerSize);
+ }
}
return containerInfo;
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index cb8093ff1bc..db4b8daf0ea 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -361,6 +361,9 @@ Map<SCMCommandProto.Type, Integer>
getTotalDatanodeCommandCounts(
/** @return the datanode of the given id if it exists; otherwise, return
null. */
@Nullable DatanodeDetails getNode(@Nullable DatanodeID id);
+ @Nullable
+ DatanodeInfo getDatanodeInfo(DatanodeDetails datanodeDetails);
+
/**
* Given datanode address(Ipaddress or hostname), returns a list of
* DatanodeDetails for the datanodes running at that address.
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index d45046b3191..8c62e26a837 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -26,6 +26,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
+import jakarta.annotation.Nullable;
import java.io.IOException;
import java.math.RoundingMode;
import java.net.InetAddress;
@@ -1712,6 +1713,15 @@ public DatanodeInfo getNode(DatanodeID id) {
}
}
+ @Override
+ @Nullable
+ public DatanodeInfo getDatanodeInfo(DatanodeDetails datanodeDetails) {
+ if (datanodeDetails == null) {
+ return null;
+ }
+ return getNode(datanodeDetails.getID());
+ }
+
/**
* Given datanode address(Ipaddress or hostname), return a list of
* DatanodeDetails for the datanodes registered on that address.
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
index de588c712e0..db43a4747c4 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
@@ -216,4 +216,13 @@ void reinitialize(Table<PipelineID, Pipeline>
pipelineStore)
* Release write lock.
*/
void releaseWriteLock();
+
+ /**
+ * Checks whether all Datanodes in the specified pipeline have greater than
the specified space, containerSize.
+ * @param pipeline pipeline to check
+ * @param containerSize the required amount of space
+ * @return false if all the volumes on any Datanode in the pipeline have
space less than equal to the specified
+ * containerSize, otherwise true
+ */
+ boolean hasEnoughSpace(Pipeline pipeline, long containerSize);
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
index 2b7b0823505..45d85a1a3ae 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
@@ -44,6 +44,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
@@ -53,6 +54,7 @@
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
+import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.server.upgrade.FinalizationManager;
import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -633,6 +635,20 @@ private boolean isOpenWithUnregisteredNodes(Pipeline
pipeline) {
return false;
}
+ @Override
+ public boolean hasEnoughSpace(Pipeline pipeline, long containerSize) {
+ for (DatanodeDetails node : pipeline.getNodes()) {
+ if (!(node instanceof DatanodeInfo)) {
+ node = nodeManager.getDatanodeInfo(node);
+ }
+ if (!SCMCommonPlacementPolicy.hasEnoughSpace(node, 0, containerSize,
null)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
/**
* Schedules a fixed interval job to create pipelines.
*/
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java
index 89a36f9b92d..68cd1cec512 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java
@@ -203,8 +203,14 @@ private ContainerInfo allocateContainer(ReplicationConfig
repConfig,
Pipeline newPipeline = pipelineManager.createPipeline(repConfig,
excludedNodes, Collections.emptyList());
+ // the returned ContainerInfo should not be null (due to not enough space
in the Datanodes specifically) because
+ // this is a new pipeline and pipeline creation checks for sufficient
space in the Datanodes
ContainerInfo container =
containerManager.getMatchingContainer(size, owner, newPipeline);
+ if (container == null) {
+ // defensive null handling
+ throw new IOException("Could not allocate a new container");
+ }
pipelineManager.openPipeline(newPipeline.getId());
LOG.info("Created and opened new pipeline {}", newPipeline);
return container;
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
index 25c3ba35068..eed37f4fad9 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
@@ -26,6 +26,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.UUID;
@@ -253,6 +254,19 @@ public static StorageReportProto
createStorageReport(DatanodeID nodeId, String p
StorageTypeProto.DISK);
}
+ public static List<StorageReportProto> createStorageReports(DatanodeID
nodeID, long capacity, long remaining,
+ long committed) {
+ return Collections.singletonList(
+ StorageReportProto.newBuilder()
+ .setStorageUuid(nodeID.toString())
+ .setStorageLocation("test")
+ .setCapacity(capacity)
+ .setRemaining(remaining)
+ .setCommitted(committed)
+ .setScmUsed(200L - remaining)
+ .build());
+ }
+
public static StorageReportProto createStorageReport(DatanodeID nodeId,
String path,
long capacity, long used, long remaining, StorageTypeProto type) {
return createStorageReport(nodeId, path, capacity, used, remaining,
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 5c2dafb2719..34500919c44 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -21,6 +21,7 @@
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
+import jakarta.annotation.Nullable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
@@ -833,6 +834,21 @@ public DatanodeDetails getNode(DatanodeID id) {
return node == null ? null : (DatanodeDetails)node;
}
+ @Nullable
+ @Override
+ public DatanodeInfo getDatanodeInfo(DatanodeDetails datanodeDetails) {
+ DatanodeDetails node = getNode(datanodeDetails.getID());
+ if (node == null) {
+ return null;
+ }
+
+ DatanodeInfo datanodeInfo = new DatanodeInfo(datanodeDetails,
NodeStatus.inServiceHealthy(), null);
+ long capacity = 50L * 1024 * 1024 * 1024;
+
datanodeInfo.updateStorageReports(HddsTestUtils.createStorageReports(datanodeInfo.getID(),
capacity, capacity,
+ 0L));
+ return datanodeInfo;
+ }
+
@Override
public List<DatanodeDetails> getNodesByAddress(String address) {
List<DatanodeDetails> results = new LinkedList<>();
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
index 592c94f250b..2e6076b7c90 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.hdds.scm.container;
+import jakarta.annotation.Nullable;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
@@ -347,6 +348,12 @@ public DatanodeDetails getNode(DatanodeID id) {
return null;
}
+ @Nullable
+ @Override
+ public DatanodeInfo getDatanodeInfo(DatanodeDetails datanodeDetails) {
+ return null;
+ }
+
@Override
public List<DatanodeDetails> getNodesByAddress(String address) {
return null;
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
index e0c1a05d2f8..826bc9055f6 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
@@ -22,15 +22,21 @@
import static
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.OPEN;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
@@ -49,6 +55,7 @@
import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.pipeline.MockPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
@@ -77,6 +84,7 @@ public class TestContainerManagerImpl {
private SequenceIdGenerator sequenceIdGen;
private NodeManager nodeManager;
private ContainerReplicaPendingOps pendingOpsMock;
+ private PipelineManager pipelineManager;
@BeforeAll
static void init() {
@@ -92,8 +100,7 @@ void setUp() throws Exception {
nodeManager = new MockNodeManager(true, 10);
sequenceIdGen = new SequenceIdGenerator(
conf, scmhaManager, SCMDBDefinition.SEQUENCE_ID.getTable(dbStore));
- final PipelineManager pipelineManager =
- new MockPipelineManager(dbStore, scmhaManager, nodeManager);
+ pipelineManager = new MockPipelineManager(dbStore, scmhaManager,
nodeManager);
pipelineManager.createPipeline(RatisReplicationConfig.getInstance(
ReplicationFactor.THREE));
pendingOpsMock = mock(ContainerReplicaPendingOps.class);
@@ -121,6 +128,58 @@ void testAllocateContainer() throws Exception {
container.containerID()));
}
+ /**
+ * getMatchingContainer allocates a new container in some cases. This test
verifies that a container is not
+ * allocated when nodes in that pipeline don't have enough space for a new
container.
+ */
+ @Test
+ public void
testGetMatchingContainerReturnsNullWhenNotEnoughSpaceInDatanodes() throws
IOException {
+ long sizeRequired = 256 * 1024 * 1024; // 256 MB
+ Pipeline pipeline = pipelineManager.getPipelines().iterator().next();
+ // MockPipelineManager#hasEnoughSpace always returns false
+ // the pipeline has no existing containers, so a new container gets
allocated in getMatchingContainer
+ ContainerInfo container = containerManager
+ .getMatchingContainer(sizeRequired, "test", pipeline,
Collections.emptySet());
+ assertNull(container);
+
+ // create an EC pipeline to test for EC containers
+ ECReplicationConfig ecReplicationConfig = new ECReplicationConfig(3, 2);
+ pipelineManager.createPipeline(ecReplicationConfig);
+ pipeline =
pipelineManager.getPipelines(ecReplicationConfig).iterator().next();
+ container = containerManager.getMatchingContainer(sizeRequired, "test",
pipeline, Collections.emptySet());
+ assertNull(container);
+ }
+
+ @Test
+ public void
testGetMatchingContainerReturnsContainerWhenEnoughSpaceInDatanodes() throws
IOException {
+ long sizeRequired = 256 * 1024 * 1024; // 256 MB
+
+ // create a spy to mock hasEnoughSpace to always return true
+ PipelineManager spyPipelineManager = spy(pipelineManager);
+ doReturn(true).when(spyPipelineManager)
+ .hasEnoughSpace(any(Pipeline.class), anyLong());
+
+ // create a new ContainerManager using the spy
+ File tempDir = new File(testDir, "tempDir");
+ OzoneConfiguration conf = SCMTestUtils.getConf(tempDir);
+ ContainerManager manager = new ContainerManagerImpl(conf,
+ scmhaManager, sequenceIdGen, spyPipelineManager,
+ SCMDBDefinition.CONTAINERS.getTable(dbStore), pendingOpsMock);
+
+ Pipeline pipeline = spyPipelineManager.getPipelines().iterator().next();
+ // the pipeline has no existing containers, so a new container gets
allocated in getMatchingContainer
+ ContainerInfo container = manager
+ .getMatchingContainer(sizeRequired, "test", pipeline,
Collections.emptySet());
+ assertNotNull(container);
+
+ // create an EC pipeline to test for EC containers
+ ECReplicationConfig ecReplicationConfig = new ECReplicationConfig(3, 2);
+ spyPipelineManager.createPipeline(ecReplicationConfig);
+ pipeline =
spyPipelineManager.getPipelines(ecReplicationConfig).iterator().next();
+ container = manager.getMatchingContainer(sizeRequired, "test", pipeline,
Collections.emptySet());
+ assertNotNull(container);
+ }
+
@Test
void testUpdateContainerState() throws Exception {
final ContainerInfo container = containerManager.allocateContainer(
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
index cc4292b82b4..78f1865d2b9 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
@@ -349,4 +349,9 @@ public void releaseWriteLock() {
public boolean isPipelineCreationFrozen() {
return false;
}
+
+ @Override
+ public boolean hasEnoughSpace(Pipeline pipeline, long containerSize) {
+ return false;
+ }
}
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index c7b9aac971e..f3c6a077cf5 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.hdds.scm.pipeline;
+import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT;
@@ -49,6 +50,7 @@
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
+import com.google.common.collect.ImmutableList;
import java.io.File;
import java.io.IOException;
import java.time.Instant;
@@ -64,9 +66,11 @@
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.DatanodeID;
+import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
@@ -88,6 +92,7 @@
import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub;
import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
+import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
import
org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.HealthyPipelineChoosePolicy;
@@ -111,6 +116,7 @@
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
/**
* Tests for PipelineManagerImpl.
@@ -933,6 +939,69 @@ public void testCreatePipelineForRead() throws IOException
{
}
}
+ /**
+ * {@link PipelineManager#hasEnoughSpace(Pipeline, long)} should return
false if all the
+ * volumes on any Datanode in the pipeline have less than equal to the space
required for creating a new container.
+ */
+ @Test
+ public void testHasEnoughSpace() throws IOException {
+ // create a Mock NodeManager, the MockNodeManager class doesn't work for
this test
+ NodeManager mockedNodeManager = Mockito.mock(NodeManager.class);
+ PipelineManagerImpl pipelineManager =
PipelineManagerImpl.newPipelineManager(conf,
+ SCMHAManagerStub.getInstance(true),
+ mockedNodeManager,
+ SCMDBDefinition.PIPELINES.getTable(dbStore),
+ new EventQueue(),
+ scmContext,
+ serviceManager,
+ testClock);
+
+ Pipeline pipeline = Pipeline.newBuilder()
+ .setId(PipelineID.randomId())
+ .setNodes(ImmutableList.of(MockDatanodeDetails.randomDatanodeDetails(),
+ MockDatanodeDetails.randomDatanodeDetails(),
+ MockDatanodeDetails.randomDatanodeDetails()))
+ .setState(OPEN)
+
.setReplicationConfig(ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
THREE))
+ .build();
+ List<DatanodeDetails> nodes = pipeline.getNodes();
+ assertEquals(3, nodes.size());
+
+ long containerSize = 100L;
+
+ // Case 1: All nodes have enough space.
+ List<DatanodeInfo> datanodeInfoList = new ArrayList<>();
+ for (DatanodeDetails dn : nodes) {
+ // the method being tested needs NodeManager to return DatanodeInfo
because DatanodeInfo has storage
+ // information (it extends DatanodeDetails)
+ DatanodeInfo info = new DatanodeInfo(dn, null, null);
+ info.updateStorageReports(HddsTestUtils.createStorageReports(dn.getID(),
200L, 200L, 10L));
+ doReturn(info).when(mockedNodeManager).getDatanodeInfo(dn);
+ datanodeInfoList.add(info);
+ }
+ assertTrue(pipelineManager.hasEnoughSpace(pipeline, containerSize));
+
+ // Case 2: One node does not have enough space.
+ /*
+ Interestingly, SCMCommonPlacementPolicy#hasEnoughSpace returns false if
exactly the required amount of space
+ is available. Which means it won't allow creating a pipeline on a node
if all volumes have exactly 5 GB
+ available. We follow the same behavior here in the case of a new replica.
+
+ So here, remaining - committed == containerSize, and hasEnoughSpace
returns false.
+ TODO should this return true instead?
+ */
+ DatanodeInfo datanodeInfo = datanodeInfoList.get(0);
+
datanodeInfo.updateStorageReports(HddsTestUtils.createStorageReports(datanodeInfo.getID(),
200L, 120L,
+ 20L));
+ assertFalse(pipelineManager.hasEnoughSpace(pipeline, containerSize));
+
+ // Case 3: All nodes do not have enough space.
+ for (DatanodeInfo info : datanodeInfoList) {
+
info.updateStorageReports(HddsTestUtils.createStorageReports(info.getID(),
200L, 100L, 20L));
+ }
+ assertFalse(pipelineManager.hasEnoughSpace(pipeline, containerSize));
+ }
+
private Set<ContainerReplica> createContainerReplicasList(
List <DatanodeDetails> dns) {
Set<ContainerReplica> replicas = new HashSet<>();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]