This is an automated email from the ASF dual-hosted git repository.

ritesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new b8b226c3c8 HDDS-12928. datanode min free space configuration (#8388)
b8b226c3c8 is described below

commit b8b226c3c85edef881c11d03b88b9157199e86fa
Author: Sumit Agrawal <[email protected]>
AuthorDate: Wed May 28 10:07:28 2025 +0530

    HDDS-12928. datanode min free space configuration (#8388)
---
 .../common/statemachine/DatanodeConfiguration.java |  49 +++-------
 .../statemachine/TestDatanodeConfiguration.java    |  25 +++--
 .../common/volume/TestReservedVolumeSpace.java     |   9 +-
 .../replication/TestReplicationSupervisor.java     |   4 +-
 .../content/design/dn-min-space-configuration.md   | 108 +++++++++++++++++++++
 .../src/main/k8s/definitions/ozone/config.yaml     |   1 +
 .../examples/getting-started/config-configmap.yaml |   1 +
 .../k8s/examples/minikube/config-configmap.yaml    |   1 +
 .../k8s/examples/ozone-dev/config-configmap.yaml   |   1 +
 .../k8s/examples/ozone-ha/config-configmap.yaml    |   1 +
 .../main/k8s/examples/ozone/config-configmap.yaml  |   1 +
 .../src/test/resources/ozone-site.xml              |   4 +
 .../src/test/resources/ozone-site.xml              |   5 +-
 .../TestRefreshVolumeUsageHandler.java             |   2 +
 14 files changed, 161 insertions(+), 51 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
index 371aed1ae0..4e346f5969 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
@@ -66,13 +66,13 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
   // Ex: If volume has 1000GB and minFreeSpace is configured as 10GB,
   // In this case when availableSpace is 10GB or below, volume is assumed as 
full
   public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE = 
"hdds.datanode.volume.min.free.space";
-  public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT = 
"5GB";
+  public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT = 
"20GB";
   // Minimum percent of space should be left on volume.
   // Ex: If volume has 1000GB and minFreeSpacePercent is configured as 2%,
   // In this case when availableSpace is 20GB(2% of 1000) or below, volume is 
assumed as full
   public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT =
       "hdds.datanode.volume.min.free.space.percent";
-  static final byte MIN_FREE_SPACE_UNSET = -1;
+  public static final float 
HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT_DEFAULT = 0.001f;
 
   public static final String WAIT_ON_ALL_FOLLOWERS = 
"hdds.datanode.wait.on.all.followers";
   public static final String CONTAINER_SCHEMA_V3_ENABLED = 
"hdds.datanode.container.schema.v3.enabled";
@@ -280,10 +280,9 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
           " When the difference between volume capacity and used reaches this 
number," +
           " containers that reside on this volume will be closed and no new 
containers" +
           " would be allocated on this volume." +
-          " Either of min.free.space or min.free.space.percent should be 
configured, when both are set then" +
-          " min.free.space will be used."
+          " Max of min.free.space and min.free.space.percent will be used as 
final value."
   )
-  private long minFreeSpace = MIN_FREE_SPACE_UNSET;
+  private long minFreeSpace = getDefaultFreeSpace();
 
   @Config(key = "hdds.datanode.volume.min.free.space.percent",
       defaultValue = "-1",
@@ -293,10 +292,9 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
           " When the difference between volume capacity and used reaches 
(free.space.percent of volume capacity)," +
           " containers that reside on this volume will be closed and no new 
containers" +
           " would be allocated on this volume." +
-          " Either of min.free.space or min.free.space.percent should be 
configured, when both are set then" +
-          " min.free.space will be used."
+          " Max of min.free.space or min.free.space.percent will be used as 
final value."
   )
-  private float minFreeSpaceRatio = MIN_FREE_SPACE_UNSET;
+  private float minFreeSpaceRatio = 
HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT_DEFAULT;
 
   @Config(key = "periodic.disk.check.interval.minutes",
       defaultValue = "60",
@@ -683,39 +681,18 @@ public void validate() {
   }
 
   /**
-   * If 'hdds.datanode.volume.min.free.space' is defined,
-   * it will be honored first. If it is not defined and
-   * 'hdds.datanode.volume.min.free.space.percent' is defined, it will honor 
this
-   * else it will fall back to 'hdds.datanode.volume.min.free.space.default'
+   * validate value of 'hdds.datanode.volume.min.free.space' and 
'hdds.datanode.volume.min.free.space.percent'
+   * and update with default value if not within range.
    */
   private void validateMinFreeSpace() {
-    if (minFreeSpaceRatio > 1) {
+    if (minFreeSpaceRatio > 1 || minFreeSpaceRatio < 0) {
       LOG.warn("{} = {} is invalid, should be between 0 and 1",
           HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
           minFreeSpaceRatio);
-
-      minFreeSpaceRatio = MIN_FREE_SPACE_UNSET;
-    }
-
-    final boolean minFreeSpaceConfigured = minFreeSpace >= 0;
-    final boolean minFreeSpaceRatioConfigured = minFreeSpaceRatio >= 0;
-
-    if (minFreeSpaceConfigured && minFreeSpaceRatioConfigured) {
-      // Only one property should be configured.
-      // Since both properties are configured, 
HDDS_DATANODE_VOLUME_MIN_FREE_SPACE is used to determine minFreeSpace
-      LOG.warn("Only one of {}={} and {}={} should be set. With both set, {} 
value will be used.",
-          HDDS_DATANODE_VOLUME_MIN_FREE_SPACE,
-          minFreeSpace,
-          HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
-          minFreeSpaceRatio,
-          HDDS_DATANODE_VOLUME_MIN_FREE_SPACE);
-
-      minFreeSpaceRatio = MIN_FREE_SPACE_UNSET;
+      minFreeSpaceRatio = HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT_DEFAULT;
     }
 
-    if (!minFreeSpaceConfigured && !minFreeSpaceRatioConfigured) {
-      // If both are not configured use defaultFreeSpace
-      minFreeSpaceRatio = MIN_FREE_SPACE_UNSET;
+    if (minFreeSpace < 0) {
       minFreeSpace = getDefaultFreeSpace();
     }
   }
@@ -781,9 +758,7 @@ public void setContainerCloseThreads(int 
containerCloseThreads) {
   }
 
   public long getMinFreeSpace(long capacity) {
-    return minFreeSpaceRatio >= 0
-        ? ((long) (capacity * minFreeSpaceRatio))
-        : minFreeSpace;
+    return Math.max((long) (capacity * minFreeSpaceRatio), minFreeSpace);
   }
 
   public long getMinFreeSpace() {
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
index 69906308f0..857bcc4a6a 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
@@ -29,6 +29,7 @@
 import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.FAILED_DB_VOLUMES_TOLERATED_KEY;
 import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.FAILED_METADATA_VOLUMES_TOLERATED_KEY;
 import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.FAILED_VOLUMES_TOLERATED_DEFAULT;
+import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT_DEFAULT;
 import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT;
 import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -153,6 +154,7 @@ public void overridesInvalidValues() {
   public void isCreatedWitDefaultValues() {
     // GIVEN
     OzoneConfiguration conf = new OzoneConfiguration();
+    // unset over-ridding configuration from ozone-site.xml defined for the 
test module
     conf.unset(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE); // 
set in ozone-site.xml
 
     // WHEN
@@ -176,7 +178,13 @@ public void isCreatedWitDefaultValues() {
     assertEquals(BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT,
         subject.getBlockDeleteCommandWorkerInterval());
     assertEquals(DatanodeConfiguration.getDefaultFreeSpace(), 
subject.getMinFreeSpace());
-    assertEquals(DatanodeConfiguration.MIN_FREE_SPACE_UNSET, 
subject.getMinFreeSpaceRatio());
+    assertEquals(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT_DEFAULT, 
subject.getMinFreeSpaceRatio());
+    final long oneGB = 1024 * 1024 * 1024;
+    // capacity is less, consider default min_free_space
+    assertEquals(DatanodeConfiguration.getDefaultFreeSpace(), 
subject.getMinFreeSpace(oneGB));
+    // capacity is large, consider min_free_space_percent, max(min_free_space, 
min_free_space_percent * capacity)ß
+    assertEquals(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT_DEFAULT * oneGB * 
oneGB,
+        subject.getMinFreeSpace(oneGB * oneGB));
   }
 
   @Test
@@ -186,11 +194,11 @@ void rejectsInvalidMinFreeSpaceRatio() {
 
     DatanodeConfiguration subject = 
conf.getObject(DatanodeConfiguration.class);
 
-    assertEquals(DatanodeConfiguration.MIN_FREE_SPACE_UNSET, 
subject.getMinFreeSpaceRatio());
+    assertEquals(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT_DEFAULT, 
subject.getMinFreeSpaceRatio());
   }
 
   @Test
-  void useMinFreeSpaceIfBothMinFreeSpacePropertiesSet() {
+  void useMaxIfBothMinFreeSpacePropertiesSet() {
     OzoneConfiguration conf = new OzoneConfiguration();
     int minFreeSpace = 10000;
     conf.setLong(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, 
minFreeSpace);
@@ -199,10 +207,11 @@ void useMinFreeSpaceIfBothMinFreeSpacePropertiesSet() {
     DatanodeConfiguration subject = 
conf.getObject(DatanodeConfiguration.class);
 
     assertEquals(minFreeSpace, subject.getMinFreeSpace());
-    assertEquals(DatanodeConfiguration.MIN_FREE_SPACE_UNSET, 
subject.getMinFreeSpaceRatio());
+    assertEquals(.5f, subject.getMinFreeSpaceRatio());
 
     for (long capacity : CAPACITIES) {
-      assertEquals(minFreeSpace, subject.getMinFreeSpace(capacity));
+      // disk percent is higher than minFreeSpace configured 10000 bytes
+      assertEquals((long)(capacity * 0.5f), subject.getMinFreeSpace(capacity));
     }
   }
 
@@ -211,11 +220,12 @@ void useMinFreeSpaceIfBothMinFreeSpacePropertiesSet() {
   void usesFixedMinFreeSpace(long bytes) {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setLong(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, 
bytes);
+    // keeping %cent low so that min free space is picked up
+    
conf.setFloat(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
 0.00001f);
 
     DatanodeConfiguration subject = 
conf.getObject(DatanodeConfiguration.class);
 
     assertEquals(bytes, subject.getMinFreeSpace());
-    assertEquals(DatanodeConfiguration.MIN_FREE_SPACE_UNSET, 
subject.getMinFreeSpaceRatio());
 
     for (long capacity : CAPACITIES) {
       assertEquals(bytes, subject.getMinFreeSpace(capacity));
@@ -226,7 +236,8 @@ void usesFixedMinFreeSpace(long bytes) {
   @ValueSource(ints = {1, 10, 100})
   void calculatesMinFreeSpaceRatio(int percent) {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.unset(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE); // 
set in ozone-site.xml
+    // keeping min free space low so that %cent is picked up after calculation
+    conf.set(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, 
"1000"); // set in ozone-site.xml
     
conf.setFloat(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
 percent / 100.0f);
 
     DatanodeConfiguration subject = 
conf.getObject(DatanodeConfiguration.class);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
index ef5279a168..e2a9851b1c 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
@@ -19,7 +19,6 @@
 
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT;
-import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE;
 import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
@@ -202,12 +201,12 @@ public void testMinFreeSpaceCalculator() throws Exception 
{
     assertEquals(minSpace, 
conf.getObject(DatanodeConfiguration.class).getMinFreeSpace(capacity));
 
     conf.setFloat(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT, 0.01f);
-    // When both are set, minSpace will be used
+    // When both are set, max(minSpace, %cent), minSpace will be used
     assertEquals(minSpace, 
conf.getObject(DatanodeConfiguration.class).getMinFreeSpace(capacity));
 
-    // capacity * 1% = 10
-    conf.unset(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE);
-    assertEquals(10, 
conf.getObject(DatanodeConfiguration.class).getMinFreeSpace(capacity));
+    conf.setFloat(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT, 1f);
+    // When both are set, max(minSpace, %cent), hence %cent will be used
+    assertEquals(1000, 
conf.getObject(DatanodeConfiguration.class).getMinFreeSpace(capacity));
   }
 
   private long getExpectedDefaultReserved(HddsVolume volume) {
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
index 1e69eac2ea..08e18cab3b 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
@@ -407,7 +407,9 @@ public void 
testReplicationImportReserveSpace(ContainerLayoutVersion layout)
     // Initially volume has 0 used space
     assertEquals(0, usedSpace);
     // Increase committed bytes so that volume has only remaining 3 times 
container size space
-    long initialCommittedBytes = vol1.getCurrentUsage().getCapacity() - 
containerMaxSize * 3;
+    long minFreeSpace =
+        
conf.getObject(DatanodeConfiguration.class).getMinFreeSpace(vol1.getCurrentUsage().getCapacity());
+    long initialCommittedBytes = vol1.getCurrentUsage().getCapacity() - 
containerMaxSize * 3 - minFreeSpace;
     vol1.incCommittedBytes(initialCommittedBytes);
     ContainerReplicator replicator =
         new DownloadAndImportReplicator(conf, set, importer, moc);
diff --git a/hadoop-hdds/docs/content/design/dn-min-space-configuration.md 
b/hadoop-hdds/docs/content/design/dn-min-space-configuration.md
new file mode 100644
index 0000000000..ab62e51428
--- /dev/null
+++ b/hadoop-hdds/docs/content/design/dn-min-space-configuration.md
@@ -0,0 +1,108 @@
+---
+title: Minimum free space configuration for datanode volumes
+summary: Describe proposal for minimum free space configuration which volume 
must have to function correctly.
+date: 2025-05-05
+jira: HDDS-12928
+status: implemented
+author: Sumit Agrawal
+---
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+   http://www.apache.org/licenses/LICENSE-2.0
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+# Abstract
+Volume in the datanode stores the container data and metadata (rocks db 
co-located on the volume).
+There are various parallel operation going on such as import container, export 
container, write and delete data blocks,
+container repairs, create and delete containers. The space is also required 
for volume db to perform compaction at regular interval.
+This is hard to capture exact usages and free available space. So, this is 
required to configure minimum free space
+so that datanode operation can perform without any corruption and environment 
being stuck and support read of data.
+
+This free space is used to ensure volume allocation if `required space < 
(volume available space - free space - reserved space - committed space)`.
+Any container creation and import container need to ensure that this 
constraint is met. And block byte writes need ensure that `free space` space is 
available.
+Note: Any issue related to ensuring free space is tracked with separate JIRA.
+
+# Existing configuration (before HDDS-12928)
+Two configurations are provided,
+- hdds.datanode.volume.min.free.space  (default: 5GB)
+- hdds.datanode.volume.min.free.space.percent
+
+1. If nothing is configured, takes default value as 5GB
+2. if both are configured, priority to hdds.datanode.volume.min.free.space
+3. else respective configuration is used.
+
+# Problem Statement
+
+- With 5GB default configuration, its not avoiding full disk scenario due to 
error in ensuring free space availability.
+This is due to container size being imported is 5GB which is near boundary, 
and other parallel operation.
+- Volume DB size can increase with increase in disk space as container and 
blocks it can hold can more and hence metadata.
+- Volume DB size can also vary due to small files and big files combination, 
as more small files can lead to more metadata.
+
+Solution involves
+- appropriate default min free space
+- depends on disk size variation
+
+# Approach 1 Combination of minimum free space and percent increase on disk 
size
+
+Configuration:
+1. Minimum free space: hdds.datanode.volume.min.free.space: default value 
`20GB`
+2. disk size variation: hdds.datanode.volume.min.free.space.percent: default 
0.1% or 0.001 ratio
+
+Minimum free space = Max (`<Min free space>`, `<percent disk space>`)
+
+| Disk space | Min Free Space (percent: 1%) | Min Free Space ( percent: 0.1%) |
+| -- |------------------------------|---------------------------------|
+| 100 GB | 20 GB                        | 20 GB (min space default)       |
+| 1 TB | 20 GB                        | 20 GB (min space default)       |
+| 10 TB | 100 GB                       | 20 GB  (min space default) |
+| 100 TB | 1 TB                         | 100 GB                          |
+
+considering above table with this solution,
+- 0.1 % to be sufficient to hold almost all cases, as not observed any dn 
volume db to be more that 1-2 GB
+
+# Approach 2 Only minimum free space configuration
+
+Considering above approach, 20 GB as default should be sufficient for most of 
the disk, as usually disk size is 10-15TB as seen.
+Higher disk is rarely used, and instead multiple volumes are attached to same 
DN with multiple disk.
+
+Considering this scenario, Minimum free space: 
`hdds.datanode.volume.min.free.space` itself is enough and
+percent based configuration can be removed.
+
+### Compatibility
+If `hdds.datanode.volume.min.free.space.percent` is configured, this should 
not have any impact
+as default value is increased to 20GB which will consider most of the use case.
+
+# Approach 3 Combination of maximum free space and percent configuration on 
disk size
+
+Configuration:
+1. Maximum free space: hdds.datanode.volume.min.free.space: default value 
`20GB`
+2. disk size variation: hdds.datanode.volume.min.free.space.percent: default 
10% or 0.1 ratio
+
+Minimum free space = **Min** (`<Max free space>`, `<percent disk space>`)
+> Difference with approach `one` is, Min function over the 2 above 
configuration
+
+| Disk space | Min Free Space (20GB, 10% of disk) |
+| -- |------------------------------------|
+| 10 GB | 1 GB (=Min(20GB, 1GB)              |
+| 100 GB | 10 GB (=Min(20GB, 10GB)            |
+| 1 TB | 20 GB   (=Min(20GB, 100GB)         |
+| 10 TB | 20 GB (=Min(20GB, 1TB)             |
+| 100 TB | 20GB  (=Min(20GB, 10TB)            |
+
+This case is more useful for test environment where disk space is less and no 
need any additional configuration.
+
+# Conclusion
+1. Going with Approach 1
+- Approach 2 is simple setting only min-free-space, but it does not expand 
with higher disk size.
+- Approach 3 is more applicable for test environment where disk space is less, 
else same as Approach 2.
+- So Approach 1 is selected considering advantage where higher free space can 
be configured by default.
+2. Min Space will be 20GB as default
+
+
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml
index 2767faaee6..5e762d06c6 100644
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml
@@ -28,6 +28,7 @@ data:
   OZONE-SITE.XML_hdds.datanode.dir: "/data/storage"
   OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3"
   OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1"
+  OZONE-SITE.XML_hdds.datanode.volume.min.free.space: "1GB"
   OZONE-SITE.XML_ozone.metadata.dirs: "/data/metadata"
   OZONE-SITE.XML_ozone.om.address: "om-0.om"
   OZONE-SITE.XML_ozone.recon.address: "recon-0.recon"
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml
index 94811f70b0..9771fb6764 100644
--- 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml
+++ 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml
@@ -28,6 +28,7 @@ data:
   OZONE-SITE.XML_hdds.datanode.dir: /data/storage
   OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3"
   OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1"
+  OZONE-SITE.XML_hdds.datanode.volume.min.free.space: "1GB"
   OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata
   OZONE-SITE.XML_ozone.om.address: om-0.om
   OZONE-SITE.XML_ozone.recon.address: recon-0.recon
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml
index 94811f70b0..9771fb6764 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml
@@ -28,6 +28,7 @@ data:
   OZONE-SITE.XML_hdds.datanode.dir: /data/storage
   OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3"
   OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1"
+  OZONE-SITE.XML_hdds.datanode.volume.min.free.space: "1GB"
   OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata
   OZONE-SITE.XML_ozone.om.address: om-0.om
   OZONE-SITE.XML_ozone.recon.address: recon-0.recon
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml
index 62a34179ab..abd37af871 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml
@@ -28,6 +28,7 @@ data:
   OZONE-SITE.XML_hdds.datanode.dir: /data/storage
   OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3"
   OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1"
+  OZONE-SITE.XML_hdds.datanode.volume.min.free.space: "1GB"
   OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata
   OZONE-SITE.XML_ozone.om.address: om-0.om
   OZONE-SITE.XML_ozone.recon.address: recon-0.recon
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml
index 78cbe31619..c7adbc8025 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml
@@ -28,6 +28,7 @@ data:
   OZONE-SITE.XML_hdds.datanode.dir: /data/storage
   OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3"
   OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1"
+  OZONE-SITE.XML_hdds.datanode.volume.min.free.space: "1GB"
   OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata
   OZONE-SITE.XML_ozone.om.address: om-0.om
   OZONE-SITE.XML_ozone.recon.address: recon-0.recon
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml
index 561b5f5a8b..91597a0923 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml
@@ -28,6 +28,7 @@ data:
   OZONE-SITE.XML_hdds.datanode.dir: /data/storage
   OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3"
   OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1"
+  OZONE-SITE.XML_hdds.datanode.volume.min.free.space: "1GB"
   OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata
   OZONE-SITE.XML_ozone.om.address: om-0.om
   OZONE-SITE.XML_ozone.recon.address: recon-0.recon
diff --git 
a/hadoop-ozone/integration-test-recon/src/test/resources/ozone-site.xml 
b/hadoop-ozone/integration-test-recon/src/test/resources/ozone-site.xml
index a9c6511de7..95a78dd5f3 100644
--- a/hadoop-ozone/integration-test-recon/src/test/resources/ozone-site.xml
+++ b/hadoop-ozone/integration-test-recon/src/test/resources/ozone-site.xml
@@ -110,4 +110,8 @@
     <name>ozone.client.datastream.window.size</name>
     <value>8MB</value>
   </property>
+  <property>
+    <name>hdds.datanode.volume.min.free.space</name>
+    <value>5GB</value>
+  </property>
 </configuration>
diff --git a/hadoop-ozone/integration-test-s3/src/test/resources/ozone-site.xml 
b/hadoop-ozone/integration-test-s3/src/test/resources/ozone-site.xml
index 916d7beeb7..da0ea9ab8c 100644
--- a/hadoop-ozone/integration-test-s3/src/test/resources/ozone-site.xml
+++ b/hadoop-ozone/integration-test-s3/src/test/resources/ozone-site.xml
@@ -125,5 +125,8 @@
     <name>ozone.client.datastream.window.size</name>
     <value>8MB</value>
   </property>
-
+  <property>
+    <name>hdds.datanode.volume.min.free.space</name>
+    <value>5GB</value>
+  </property>
 </configuration>
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
index 748577766f..2cd1090251 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
@@ -22,6 +22,7 @@
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
+import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.HashMap;
@@ -56,6 +57,7 @@ public void setup() throws Exception {
     //setup a cluster (1G free space is enough for a unit test)
     conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
+    conf.set(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, "5GB");
     conf.set(HDDS_NODE_REPORT_INTERVAL, "1s");
     conf.set("hdds.datanode.du.factory.classname",
         "org.apache.hadoop.ozone.container.common.volume.HddsVolumeFactory");


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to