xichen01 commented on code in PR #5721:
URL: https://github.com/apache/ozone/pull/5721#discussion_r1420709857


##########
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java:
##########
@@ -448,11 +461,68 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
           }
         };
     dummyPlacementPolicy.chooseDatanodes(null, null, 1, 1, 1);
-    Assertions.assertFalse(usedNodesIdentity.get());
+    assertFalse(usedNodesIdentity.get());
     dummyPlacementPolicy.chooseDatanodes(null, null, null, 1, 1, 1);
     Assertions.assertTrue(usedNodesIdentity.get());
   }
 
+  @Test
+  public void testDatanodeIsInvalidIsCaseOfIncreasingCommittedBytes() {
+    NodeManager nodeMngr = mock(NodeManager.class);
+    ConfigurationSource confing = mock(ConfigurationSource.class);
+    when(confing.isConfigured(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE)))
+        .thenReturn(true);
+    when(confing.getStorageSize(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE),
+            eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT),
+            eq(StorageUnit.BYTES))).thenReturn(100000.0);
+    UUID datanodeUuid = spy(UUID.randomUUID());
+    DummyPlacementPolicy placementPolicy =
+        new DummyPlacementPolicy(nodeMngr, confing, 1);
+    DatanodeDetails datanodeDetails = mock(DatanodeDetails.class);
+    when(datanodeDetails.getUuid()).thenReturn(datanodeUuid);
+
+    DatanodeInfo datanodeInfo = mock(DatanodeInfo.class);
+    NodeStatus nodeStatus = mock(NodeStatus.class);
+    when(nodeStatus.isNodeWritable()).thenReturn(true);
+    when(datanodeInfo.getNodeStatus()).thenReturn(nodeStatus);
+    when(nodeMngr.getNodeByUuid(eq(datanodeUuid))).thenReturn(datanodeInfo);
+
+    StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 =

Review Comment:
   nit: can consider using the 
`org.apache.hadoop.hdds.scm.HddsTestUtils#createStorageReport`



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java:
##########
@@ -70,6 +70,17 @@ public String toString() {
         ", volumes: " + fullVolumes;
   }
 
+  public static boolean hasVolumeEnoughSpace(long volumeCapacity,

Review Comment:
   Can put it into `VolumeUsage`, just like `getMinVolumeFreeSpace`



##########
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java:
##########
@@ -448,11 +461,68 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
           }
         };
     dummyPlacementPolicy.chooseDatanodes(null, null, 1, 1, 1);
-    Assertions.assertFalse(usedNodesIdentity.get());
+    assertFalse(usedNodesIdentity.get());
     dummyPlacementPolicy.chooseDatanodes(null, null, null, 1, 1, 1);
     Assertions.assertTrue(usedNodesIdentity.get());
   }
 
+  @Test
+  public void testDatanodeIsInvalidIsCaseOfIncreasingCommittedBytes() {
+    NodeManager nodeMngr = mock(NodeManager.class);
+    ConfigurationSource confing = mock(ConfigurationSource.class);
+    when(confing.isConfigured(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE)))
+        .thenReturn(true);
+    when(confing.getStorageSize(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE),
+            eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT),
+            eq(StorageUnit.BYTES))).thenReturn(100000.0);
+    UUID datanodeUuid = spy(UUID.randomUUID());
+    DummyPlacementPolicy placementPolicy =
+        new DummyPlacementPolicy(nodeMngr, confing, 1);
+    DatanodeDetails datanodeDetails = mock(DatanodeDetails.class);
+    when(datanodeDetails.getUuid()).thenReturn(datanodeUuid);
+
+    DatanodeInfo datanodeInfo = mock(DatanodeInfo.class);
+    NodeStatus nodeStatus = mock(NodeStatus.class);
+    when(nodeStatus.isNodeWritable()).thenReturn(true);
+    when(datanodeInfo.getNodeStatus()).thenReturn(nodeStatus);
+    when(nodeMngr.getNodeByUuid(eq(datanodeUuid))).thenReturn(datanodeInfo);
+
+    StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 =
+        StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder()
+            .setCommitted(500)
+            .setCapacity(200000)
+            .setRemaining(101000)
+            .setStorageUuid(UUID.randomUUID().toString())
+            .setStorageLocation("/data/hdds")
+            .build();
+    StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 =
+        StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder()
+            .setCommitted(1000)
+            .setCapacity(200000)
+            .setRemaining(101000)
+            .setStorageUuid(UUID.randomUUID().toString())
+            .setStorageLocation("/data/hdds")
+            .build();
+    StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto
+        metaReport =
+        StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto
+            .newBuilder()
+            .setRemaining(200)
+            .setStorageLocation("/data/metadata")
+            .build();
+    when(datanodeInfo.getStorageReports())
+        .thenReturn(Collections.singletonList(storageReport1))
+        .thenReturn(Collections.singletonList(storageReport2));
+    when(datanodeInfo.getMetadataStorageReports())
+        .thenReturn(Collections.singletonList(metaReport));
+
+
+    // 500 committed bytes

Review Comment:
   nit: It would be good to comment on the process of calculating.



##########
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java:
##########
@@ -448,11 +461,68 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
           }
         };
     dummyPlacementPolicy.chooseDatanodes(null, null, 1, 1, 1);
-    Assertions.assertFalse(usedNodesIdentity.get());
+    assertFalse(usedNodesIdentity.get());
     dummyPlacementPolicy.chooseDatanodes(null, null, null, 1, 1, 1);
     Assertions.assertTrue(usedNodesIdentity.get());
   }
 
+  @Test
+  public void testDatanodeIsInvalidIsCaseOfIncreasingCommittedBytes() {
+    NodeManager nodeMngr = mock(NodeManager.class);
+    ConfigurationSource confing = mock(ConfigurationSource.class);
+    when(confing.isConfigured(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE)))
+        .thenReturn(true);
+    when(confing.getStorageSize(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE),
+            eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT),
+            eq(StorageUnit.BYTES))).thenReturn(100000.0);
+    UUID datanodeUuid = spy(UUID.randomUUID());

Review Comment:
   `spy` Is it necessary for UUID?



##########
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java:
##########
@@ -448,11 +461,68 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
           }
         };
     dummyPlacementPolicy.chooseDatanodes(null, null, 1, 1, 1);
-    Assertions.assertFalse(usedNodesIdentity.get());
+    assertFalse(usedNodesIdentity.get());
     dummyPlacementPolicy.chooseDatanodes(null, null, null, 1, 1, 1);
     Assertions.assertTrue(usedNodesIdentity.get());
   }
 
+  @Test
+  public void testDatanodeIsInvalidIsCaseOfIncreasingCommittedBytes() {
+    NodeManager nodeMngr = mock(NodeManager.class);
+    ConfigurationSource confing = mock(ConfigurationSource.class);
+    when(confing.isConfigured(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE)))
+        .thenReturn(true);
+    when(confing.getStorageSize(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE),
+            eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT),
+            eq(StorageUnit.BYTES))).thenReturn(100000.0);
+    UUID datanodeUuid = spy(UUID.randomUUID());
+    DummyPlacementPolicy placementPolicy =
+        new DummyPlacementPolicy(nodeMngr, confing, 1);
+    DatanodeDetails datanodeDetails = mock(DatanodeDetails.class);
+    when(datanodeDetails.getUuid()).thenReturn(datanodeUuid);
+
+    DatanodeInfo datanodeInfo = mock(DatanodeInfo.class);
+    NodeStatus nodeStatus = mock(NodeStatus.class);
+    when(nodeStatus.isNodeWritable()).thenReturn(true);
+    when(datanodeInfo.getNodeStatus()).thenReturn(nodeStatus);
+    when(nodeMngr.getNodeByUuid(eq(datanodeUuid))).thenReturn(datanodeInfo);
+
+    StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 =
+        StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder()
+            .setCommitted(500)
+            .setCapacity(200000)
+            .setRemaining(101000)
+            .setStorageUuid(UUID.randomUUID().toString())
+            .setStorageLocation("/data/hdds")
+            .build();
+    StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 =
+        StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder()
+            .setCommitted(1000)
+            .setCapacity(200000)
+            .setRemaining(101000)
+            .setStorageUuid(UUID.randomUUID().toString())
+            .setStorageLocation("/data/hdds")
+            .build();
+    StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto
+        metaReport =
+        StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto
+            .newBuilder()
+            .setRemaining(200)
+            .setStorageLocation("/data/metadata")
+            .build();
+    when(datanodeInfo.getStorageReports())
+        .thenReturn(Collections.singletonList(storageReport1))
+        .thenReturn(Collections.singletonList(storageReport2));
+    when(datanodeInfo.getMetadataStorageReports())
+        .thenReturn(Collections.singletonList(metaReport));
+
+
+    // 500 committed bytes
+    assertTrue(placementPolicy.isValidNode(datanodeDetails, 100, 4000));

Review Comment:
   nit: can directly use `hasEnoughSpace` to assert



##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java:
##########
@@ -308,7 +312,9 @@ public static boolean hasEnoughSpace(DatanodeDetails 
datanodeDetails,
 
     if (dataSizeRequired > 0) {
       for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) {
-        if (reportProto.getRemaining() > dataSizeRequired) {
+        if (hasVolumeEnoughSpace(reportProto.getCapacity(),
+            reportProto.getRemaining(), reportProto.getCommitted(),
+            dataSizeRequired, conf)) {

Review Comment:
   The configuration used in `getMinVolumeFreeSpace` is the Datanode 
configuration, what if the configuration is different for different Datanodes 
or not available in SCM?
   Maybe `volumeFreeSpaceToSpare` should also report



##########
hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java:
##########
@@ -155,6 +155,8 @@ private void printInfo(DatanodeUsage info) {
         + " B", StringUtils.byteDesc(info.getRemaining()));
     System.out.printf("%-13s: %s %n", "Remaining %",
         PERCENT_FORMAT.format(info.getRemainingRatio()));
+    System.out.printf("%-13s: %s (%s) %n", "Committed", info.getCommitted()

Review Comment:
   `Committed` doesn't seem to be easy to understand outside of the code 
context, suggesting: `Container Preallocated`. 
   And add a `Remaining Allocatable` (remaining - commited)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to