adoroszlai commented on code in PR #4798:
URL: https://github.com/apache/ozone/pull/4798#discussion_r1267541253


##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java:
##########
@@ -18,21 +18,94 @@
 package org.apache.hadoop.ozone.reconfig;
 
 import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
+import org.apache.hadoop.ozone.HddsDatanodeService;
+import 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler;
 import org.junit.jupiter.api.Test;
 
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_WORKERS;
+import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX;
+import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_BLOCK_DELETING_LIMIT_PER_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
 /**
  * Tests for Datanode reconfiguration.
  */
 class TestDatanodeReconfiguration extends ReconfigurationTestBase {
   @Override
   ReconfigurationHandler getSubject() {
-    return getCluster().getHddsDatanodes().get(0).getReconfigurationHandler();
+    return getFirstDatanode().getReconfigurationHandler();
   }
 
   @Test
   void reconfigurableProperties() {
-    assertProperties(getSubject(), ImmutableSet.of());
+    assertProperties(getSubject(), ImmutableSet.of(
+        HDDS_DATANODE_BLOCK_DELETING_LIMIT_PER_INTERVAL,
+        HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX,
+        OZONE_BLOCK_DELETING_SERVICE_WORKERS));
+  }
+
+  @Test
+  public void blockDeletingLimitPerInterval() throws ReconfigurationException {
+    getFirstDatanode().getReconfigurationHandler().reconfigurePropertyImpl(
+        HDDS_DATANODE_BLOCK_DELETING_LIMIT_PER_INTERVAL, "1");
+
+    getFirstDatanode().getDatanodeStateMachine().getContainer()
+        .getBlockDeletingService().getBlockLimitPerInterval();
+
+    assertEquals(1, getFirstDatanode().getDatanodeStateMachine().getContainer()
+        .getBlockDeletingService().getBlockLimitPerInterval());
+  }
+
+  @Test
+  public void blockDeleteThreadMax() throws ReconfigurationException {
+    // Start the service and get the original pool size
+    ThreadPoolExecutor executor = ((DeleteBlocksCommandHandler)
+        getFirstDatanode().getDatanodeStateMachine().getCommandDispatcher()
+            .getDeleteBlocksCommandHandler()).getExecutor();
+    int originPoolSize = executor.getMaximumPoolSize();
+
+    // Attempt to increase the pool size by 1 and verify if it's successful
+    getFirstDatanode().getReconfigurationHandler().reconfigurePropertyImpl(
+        HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX,
+        String.valueOf(originPoolSize + 1));
+    assertEquals(originPoolSize + 1, executor.getMaximumPoolSize());
+    assertEquals(originPoolSize + 1, executor.getCorePoolSize());
+
+    // Attempt to decrease the pool size by 1 and verify if it's successful
+    getFirstDatanode().getReconfigurationHandler().reconfigurePropertyImpl(
+        HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX,
+        String.valueOf(originPoolSize - 1));
+    assertEquals(originPoolSize - 1, executor.getMaximumPoolSize());
+    assertEquals(originPoolSize - 1,  executor.getCorePoolSize());
+  }
+
+  @Test
+  public void blockDeletingServiceWorkers() throws ReconfigurationException {
+    ScheduledThreadPoolExecutor executor = (ScheduledThreadPoolExecutor)
+        getFirstDatanode().getDatanodeStateMachine().getContainer()
+            .getBlockDeletingService().getExecutorService();
+    int originPoolSize = executor.getCorePoolSize();
+
+    // Attempt to increase the pool size by 1 and verify if it's successful
+    getFirstDatanode().getReconfigurationHandler().reconfigurePropertyImpl(
+        OZONE_BLOCK_DELETING_SERVICE_WORKERS,
+        String.valueOf(originPoolSize + 1));
+    assertEquals(originPoolSize + 1, executor.getCorePoolSize());
+
+    // Attempt to decrease the pool size by 1 and verify if it's successful
+    getFirstDatanode().getReconfigurationHandler().reconfigurePropertyImpl(
+        OZONE_BLOCK_DELETING_SERVICE_WORKERS,
+        String.valueOf(originPoolSize - 1));
+    assertEquals(originPoolSize - 1, executor.getCorePoolSize());
+  }

Review Comment:
   We can avoid duplication by converting to `@ParameterizedTest`: 
https://github.com/adoroszlai/hadoop-ozone/commit/5aeb768560b36bf182dcc96bc50a0c71625f279d
   
   Also fixes findbugs error:
   
   ```
   M D RV: Return value of 
org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService.getBlockLimitPerInterval()
 ignored, but method has no side effect  At 
TestDatanodeReconfiguration.java:[line 58]
   ```
   
   
https://github.com/xichen01/ozone/actions/runs/5577408467/jobs/10190274215#step:6:2367



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestOmReconfiguration.java:
##########
@@ -26,6 +26,7 @@
 import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS;
+import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK;

Review Comment:
   `reconfigurableProperties()` test case needs to be updated: 
https://github.com/adoroszlai/hadoop-ozone/commit/9d9226e0afe713e70418874d26a156d2ea862688
   
   Fixes 
https://github.com/xichen01/ozone/actions/runs/5577408467/jobs/10190272872#step:5:4011



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java:
##########
@@ -669,4 +668,12 @@ public long getBytesReleased() {
       return bytesReleased;
     }
   }
+
+  public int getBlockLimitPerInterval() {
+    return dfsConf.getBlockDeletionLimit();
+  }
+
+  public DatanodeConfiguration getDfsConf() {
+    return dfsConf;

Review Comment:
   Nit: I think `dnConf` / `getDnConf` would be better.



##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java:
##########
@@ -2125,6 +2128,14 @@ private String reconfOzoneReadOnlyAdmins(String newVal) {
     return String.valueOf(newVal);
   }
 
+  private String reconfHddsScmBlockDeletionPerIntervalMax(String newVal) {
+    getConfiguration().set(HDDS_SCM_BLOCK_DELETION_PER_INTERVAL_MAX, newVal);
+
+    getScmBlockManager().getSCMBlockDeletingService()
+        .setBlockDeleteTXNum(Integer.parseInt(newVal));
+    return newVal;
+  }

Review Comment:
   We simplify reconfiguration of this property by using `reconfigurable = 
true` and making sure the same instance of `scmConfig` is used in 
`SCMBlockDeletingService` as the one being reconfigured in SCM.
   
   Full change (also fixes `TestScmReconfiguration` failure):
   
https://github.com/adoroszlai/hadoop-ozone/commit/1ce698fab5f9b6a55dab6e28f382bb29f6d65cc0



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to