This is an automated email from the ASF dual-hosted git repository.

tanxinyu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iotdb.git


The following commit(s) were added to refs/heads/master by this push:
     new 4165e4c95f5 [IOTDB-6307] GCR algorithm for multi-database (#12183)
4165e4c95f5 is described below

commit 4165e4c95f5a8bc8c7d269d21767773a14bf4cd3
Author: Yongzao <[email protected]>
AuthorDate: Sat Mar 30 11:00:22 2024 +0800

    [IOTDB-6307] GCR algorithm for multi-database (#12183)
---
 .../partition/IoTDBAutoRegionGroupExtensionIT.java | 119 ++++++++++++---------
 .../iotdb/confignode/conf/ConfigNodeConfig.java    |   2 +-
 .../manager/load/balancer/RegionBalancer.java      |   9 +-
 .../region/GreedyCopySetRegionGroupAllocator.java  | 100 ++++++++++++-----
 .../region/GreedyRegionGroupAllocator.java         |  40 +++----
 .../balancer/region/IRegionGroupAllocator.java     |   3 +
 .../manager/partition/PartitionManager.java        |  24 +++++
 .../persistence/partition/PartitionInfo.java       |  40 ++++++-
 .../region/AllocatorScatterWidthManualTest.java    |   5 +
 .../GreedyCopySetRegionGroupAllocatorTest.java     | 115 +++++++++++++++-----
 .../region/GreedyRegionGroupAllocatorTest.java     |   4 +
 11 files changed, 321 insertions(+), 140 deletions(-)

diff --git 
a/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBAutoRegionGroupExtensionIT.java
 
b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBAutoRegionGroupExtensionIT.java
index 8d0f0f2c03e..4c48c004624 100644
--- 
a/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBAutoRegionGroupExtensionIT.java
+++ 
b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBAutoRegionGroupExtensionIT.java
@@ -51,9 +51,8 @@ import org.junit.runner.RunWith;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
+import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
 
 import static 
org.apache.iotdb.confignode.it.utils.ConfigNodeTestUtils.generatePatternTreeBuffer;
 
@@ -61,29 +60,30 @@ import static 
org.apache.iotdb.confignode.it.utils.ConfigNodeTestUtils.generateP
 @Category({ClusterIT.class})
 public class IoTDBAutoRegionGroupExtensionIT {
 
-  private static final String testDataRegionGroupExtensionPolicy = "AUTO";
-  private static final String testConsensusProtocolClass = 
ConsensusFactory.RATIS_CONSENSUS;
-  private static final int testReplicationFactor = 1;
+  private static final int TEST_DATA_NODE_NUM = 3;
+  private static final String TEST_DATA_REGION_GROUP_EXTENSION_POLICY = "AUTO";
+  private static final String TEST_CONSENSUS_PROTOCOL_CLASS = 
ConsensusFactory.RATIS_CONSENSUS;
+  private static final int TEST_REPLICATION_FACTOR = 2;
 
-  private static final String sg = "root.sg";
-  private static final int testSgNum = 2;
-  private static final long testTimePartitionInterval = 604800000;
-  private static final int testMinSchemaRegionGroupNum = 2;
-  private static final int testMinDataRegionGroupNum = 2;
+  private static final String DATABASE = "root.db";
+  private static final int TEST_DATABASE_NUM = 2;
+  private static final long TEST_TIME_PARTITION_INTERVAL = 604800000;
+  private static final int TEST_MIN_SCHEMA_REGION_GROUP_NUM = 2;
+  private static final int TEST_MIN_DATA_REGION_GROUP_NUM = 2;
 
   @Before
   public void setUp() throws Exception {
     EnvFactory.getEnv()
         .getConfig()
         .getCommonConfig()
-        .setSchemaRegionConsensusProtocolClass(testConsensusProtocolClass)
-        .setDataRegionConsensusProtocolClass(testConsensusProtocolClass)
-        .setSchemaReplicationFactor(testReplicationFactor)
-        .setDataReplicationFactor(testReplicationFactor)
-        .setDataRegionGroupExtensionPolicy(testDataRegionGroupExtensionPolicy)
-        .setTimePartitionInterval(testTimePartitionInterval);
+        .setSchemaRegionConsensusProtocolClass(TEST_CONSENSUS_PROTOCOL_CLASS)
+        .setDataRegionConsensusProtocolClass(TEST_CONSENSUS_PROTOCOL_CLASS)
+        .setSchemaReplicationFactor(TEST_REPLICATION_FACTOR)
+        .setDataReplicationFactor(TEST_REPLICATION_FACTOR)
+        
.setDataRegionGroupExtensionPolicy(TEST_DATA_REGION_GROUP_EXTENSION_POLICY)
+        .setTimePartitionInterval(TEST_TIME_PARTITION_INTERVAL);
     // Init 1C3D environment
-    EnvFactory.getEnv().initClusterEnvironment(1, 3);
+    EnvFactory.getEnv().initClusterEnvironment(1, TEST_DATA_NODE_NUM);
   }
 
   @After
@@ -102,8 +102,8 @@ public class IoTDBAutoRegionGroupExtensionIT {
       setStorageGroupAndCheckRegionGroupDistribution(client);
 
       // Delete all StorageGroups
-      for (int i = 0; i < testSgNum; i++) {
-        String curSg = sg + i;
+      for (int i = 0; i < TEST_DATABASE_NUM; i++) {
+        String curSg = DATABASE + i;
         client.deleteDatabase(new TDeleteDatabaseReq(curSg));
       }
       boolean isAllRegionGroupDeleted = false;
@@ -126,13 +126,13 @@ public class IoTDBAutoRegionGroupExtensionIT {
   private void 
setStorageGroupAndCheckRegionGroupDistribution(SyncConfigNodeIServiceClient 
client)
       throws TException, IllegalPathException, IOException {
 
-    for (int i = 0; i < testSgNum; i++) {
-      String curSg = sg + i;
+    for (int i = 0; i < TEST_DATABASE_NUM; i++) {
+      String curSg = DATABASE + i;
       TSStatus status =
           client.setDatabase(
               new TDatabaseSchema(curSg)
-                  .setMinSchemaRegionGroupNum(testMinSchemaRegionGroupNum)
-                  .setMinDataRegionGroupNum(testMinDataRegionGroupNum));
+                  .setMinSchemaRegionGroupNum(TEST_MIN_SCHEMA_REGION_GROUP_NUM)
+                  .setMinDataRegionGroupNum(TEST_MIN_DATA_REGION_GROUP_NUM));
       Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), 
status.getCode());
 
       // Insert SchemaPartitions to create SchemaRegionGroups
@@ -152,7 +152,7 @@ public class IoTDBAutoRegionGroupExtensionIT {
       // Insert DataPartitions to create DataRegionGroups
       Map<String, Map<TSeriesPartitionSlot, TTimeSlotList>> partitionSlotsMap =
           ConfigNodeTestUtils.constructPartitionSlotsMap(
-              curSg, 0, 10, 0, 10, testTimePartitionInterval);
+              curSg, 0, 10, 0, 10, TEST_TIME_PARTITION_INTERVAL);
       TDataPartitionTableResp dataPartitionTableResp =
           client.getOrCreateDataPartitionTable(new 
TDataPartitionReq(partitionSlotsMap));
       Assert.assertEquals(
@@ -160,36 +160,51 @@ public class IoTDBAutoRegionGroupExtensionIT {
           dataPartitionTableResp.getStatus().getCode());
     }
 
-    // The number of SchemaRegionGroups should not less than the 
testMinSchemaRegionGroupNum
-    TShowRegionResp resp =
-        client.showRegion(
-            new 
TShowRegionReq().setConsensusGroupType(TConsensusGroupType.SchemaRegion));
-    Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), 
resp.getStatus().getCode());
-    Map<String, AtomicInteger> regionCounter = new ConcurrentHashMap<>();
-    resp.getRegionInfoList()
-        .forEach(
-            regionInfo ->
-                regionCounter
-                    .computeIfAbsent(regionInfo.getDatabase(), empty -> new 
AtomicInteger(0))
-                    .getAndIncrement());
-    Assert.assertEquals(testSgNum, regionCounter.size());
-    regionCounter.forEach(
-        (sg, regionCount) -> Assert.assertTrue(regionCount.get() >= 
testMinSchemaRegionGroupNum));
-
-    // The number of DataRegionGroups should not less than the 
testMinDataRegionGroupNum
-    resp =
-        client.showRegion(
-            new 
TShowRegionReq().setConsensusGroupType(TConsensusGroupType.DataRegion));
+    /* Check Region distribution */
+    checkRegionDistribution(TConsensusGroupType.SchemaRegion, client);
+    checkRegionDistribution(TConsensusGroupType.DataRegion, client);
+  }
+
+  private void checkRegionDistribution(
+      TConsensusGroupType type, SyncConfigNodeIServiceClient client) throws 
TException {
+    TShowRegionResp resp = client.showRegion(new 
TShowRegionReq().setConsensusGroupType(type));
     Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), 
resp.getStatus().getCode());
-    regionCounter.clear();
+    Map<String, Integer> databaseRegionCounter = new TreeMap<>();
+    Map<Integer, Integer> dataNodeRegionCounter = new TreeMap<>();
+    Map<String, Map<Integer, Integer>> databaseDataNodeRegionCounter = new 
TreeMap<>();
     resp.getRegionInfoList()
         .forEach(
-            regionInfo ->
-                regionCounter
-                    .computeIfAbsent(regionInfo.getDatabase(), empty -> new 
AtomicInteger(0))
-                    .getAndIncrement());
-    Assert.assertEquals(testSgNum, regionCounter.size());
-    regionCounter.forEach(
-        (sg, regionCount) -> Assert.assertTrue(regionCount.get() >= 
testMinDataRegionGroupNum));
+            regionInfo -> {
+              databaseRegionCounter.merge(regionInfo.getDatabase(), 1, 
Integer::sum);
+              dataNodeRegionCounter.merge(regionInfo.getDataNodeId(), 1, 
Integer::sum);
+              databaseDataNodeRegionCounter
+                  .computeIfAbsent(regionInfo.getDatabase(), empty -> new 
TreeMap<>())
+                  .merge(regionInfo.getDataNodeId(), 1, Integer::sum);
+            });
+    // The number of RegionGroups should not less than the 
testMinRegionGroupNum for each database
+    Assert.assertEquals(TEST_DATABASE_NUM, databaseRegionCounter.size());
+    databaseRegionCounter.forEach(
+        (database, regionCount) ->
+            Assert.assertTrue(
+                regionCount
+                    >= (type == TConsensusGroupType.SchemaRegion
+                        ? TEST_MIN_SCHEMA_REGION_GROUP_NUM
+                        : TEST_MIN_DATA_REGION_GROUP_NUM)));
+    // The maximal Region count - minimal Region count should be less than or 
equal to 1 for each
+    // DataNode
+    Assert.assertEquals(TEST_DATA_NODE_NUM, dataNodeRegionCounter.size());
+    Assert.assertTrue(
+        
dataNodeRegionCounter.values().stream().max(Integer::compareTo).orElse(0)
+                - 
dataNodeRegionCounter.values().stream().min(Integer::compareTo).orElse(0)
+            <= 1);
+    // The maximal Region count - minimal Region count should be less than or 
equal to 1 for each
+    // Database
+    Assert.assertEquals(TEST_DATABASE_NUM, 
databaseDataNodeRegionCounter.size());
+    databaseDataNodeRegionCounter.forEach(
+        (database, dataNodeRegionCount) ->
+            Assert.assertTrue(
+                
dataNodeRegionCount.values().stream().max(Integer::compareTo).orElse(0)
+                        - 
dataNodeRegionCount.values().stream().min(Integer::compareTo).orElse(0)
+                    <= 1));
   }
 }
diff --git 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
index 6ed563ec137..678b7c868a5 100644
--- 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
+++ 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
@@ -108,7 +108,7 @@ public class ConfigNodeConfig {
 
   /** RegionGroup allocate policy. */
   private RegionBalancer.RegionGroupAllocatePolicy regionGroupAllocatePolicy =
-      RegionBalancer.RegionGroupAllocatePolicy.GREEDY_COPY_SET;
+      RegionBalancer.RegionGroupAllocatePolicy.GCR;
 
   /** Max concurrent client number. */
   private int rpcMaxConcurrentClientNum = 65535;
diff --git 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RegionBalancer.java
 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RegionBalancer.java
index 32f47a81812..9dc3bba9f6e 100644
--- 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RegionBalancer.java
+++ 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RegionBalancer.java
@@ -57,7 +57,7 @@ public class RegionBalancer {
       case GREEDY:
         this.regionGroupAllocator = new GreedyRegionGroupAllocator();
         break;
-      case GREEDY_COPY_SET:
+      case GCR:
       default:
         this.regionGroupAllocator = new GreedyCopySetRegionGroupAllocator();
     }
@@ -100,6 +100,9 @@ public class RegionBalancer {
       int allotment = entry.getValue();
       int replicationFactor =
           getClusterSchemaManager().getReplicationFactor(database, 
consensusGroupType);
+      // Only considering the specified Database when doing allocation
+      List<TRegionReplicaSet> databaseAllocatedRegionGroups =
+          getPartitionManager().getAllReplicaSets(database, 
consensusGroupType);
 
       for (int i = 0; i < allotment; i++) {
         // Prepare input data
@@ -119,6 +122,7 @@ public class RegionBalancer {
                 availableDataNodeMap,
                 freeDiskSpaceMap,
                 allocatedRegionGroups,
+                databaseAllocatedRegionGroups,
                 replicationFactor,
                 new TConsensusGroupId(
                     consensusGroupType, 
getPartitionManager().generateNextRegionGroupId()));
@@ -126,6 +130,7 @@ public class RegionBalancer {
 
         // Mark the new RegionGroup as allocated
         allocatedRegionGroups.add(newRegionGroup);
+        databaseAllocatedRegionGroups.add(newRegionGroup);
       }
     }
 
@@ -150,6 +155,6 @@ public class RegionBalancer {
 
   public enum RegionGroupAllocatePolicy {
     GREEDY,
-    GREEDY_COPY_SET
+    GCR
   }
 }
diff --git 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocator.java
 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocator.java
index 0fbb7dea071..2577455d41c 100644
--- 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocator.java
+++ 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocator.java
@@ -39,51 +39,54 @@ import static java.util.Map.Entry.comparingByValue;
 public class GreedyCopySetRegionGroupAllocator implements 
IRegionGroupAllocator {
 
   private static final Random RANDOM = new Random();
+  private static final int GCR_MAX_OPTIMAL_PLAN_NUM = 100;
 
   private int replicationFactor;
   // Sorted available DataNodeIds
   private int[] dataNodeIds;
   // The number of allocated Regions in each DataNode
   private int[] regionCounter;
+  // The number of allocated Regions in each DataNode within the same Database
+  private int[] databaseRegionCounter;
   // The number of 2-Region combinations in current cluster
   private int[][] combinationCounter;
 
   // First Key: the sum of Regions at the DataNodes in the allocation result 
is minimal
   int optimalRegionSum;
-  // Second Key: the sum of overlapped 2-Region combination Regions with other 
allocated
-  // RegionGroups is minimal
+  // Second Key: the sum of Regions at the DataNodes within the same Database
+  // in the allocation result is minimal
+  int optimalDatabaseRegionSum;
+  // Third Key: the sum of overlapped 2-Region combination Regions with
+  // other allocated RegionGroups is minimal
   int optimalCombinationSum;
   List<int[]> optimalReplicaSets;
-  private static final int MAX_OPTIMAL_PLAN_NUM = 10;
 
   private static class DataNodeEntry {
 
-    private final int dataNodeId;
-
-    // First key: the number of Regions in the DataNode
+    // First key: the number of Regions in the DataNode, ascending order
     private final int regionCount;
-    // Second key: the scatter width of the DataNode
+    // Second key: the number of Regions in the DataNode within the same 
Database, ascending order
+    private final int databaseRegionCount;
+    // Third key: the scatter width of the DataNode, ascending order
     private final int scatterWidth;
-    // Third key: a random weight
+    // Forth key: a random weight, ascending order
     private final int randomWeight;
 
-    public DataNodeEntry(int dataNodeId, int regionCount, int scatterWidth) {
-      this.dataNodeId = dataNodeId;
+    public DataNodeEntry(int databaseRegionCount, int regionCount, int 
scatterWidth) {
+      this.databaseRegionCount = databaseRegionCount;
       this.regionCount = regionCount;
       this.scatterWidth = scatterWidth;
       this.randomWeight = RANDOM.nextInt();
     }
 
-    public int getDataNodeId() {
-      return dataNodeId;
-    }
-
     public int compare(DataNodeEntry e) {
       return regionCount != e.regionCount
           ? Integer.compare(regionCount, e.regionCount)
-          : scatterWidth != e.scatterWidth
-              ? Integer.compare(scatterWidth, e.scatterWidth)
-              : Integer.compare(randomWeight, e.randomWeight);
+          : databaseRegionCount != e.databaseRegionCount
+              ? Integer.compare(databaseRegionCount, e.databaseRegionCount)
+              : scatterWidth != e.scatterWidth
+                  ? Integer.compare(scatterWidth, e.scatterWidth)
+                  : Integer.compare(randomWeight, e.randomWeight);
     }
   }
 
@@ -96,11 +99,16 @@ public class GreedyCopySetRegionGroupAllocator implements 
IRegionGroupAllocator
       Map<Integer, TDataNodeConfiguration> availableDataNodeMap,
       Map<Integer, Double> freeDiskSpaceMap,
       List<TRegionReplicaSet> allocatedRegionGroups,
+      List<TRegionReplicaSet> databaseAllocatedRegionGroups,
       int replicationFactor,
       TConsensusGroupId consensusGroupId) {
     try {
-      prepare(replicationFactor, availableDataNodeMap, allocatedRegionGroups);
-      dfs(-1, 0, new int[replicationFactor], 0);
+      prepare(
+          replicationFactor,
+          availableDataNodeMap,
+          allocatedRegionGroups,
+          databaseAllocatedRegionGroups);
+      dfs(-1, 0, new int[replicationFactor], 0, 0);
 
       // Randomly pick one optimal plan as result
       Collections.shuffle(optimalReplicaSets);
@@ -110,6 +118,7 @@ public class GreedyCopySetRegionGroupAllocator implements 
IRegionGroupAllocator
       for (int i = 0; i < replicationFactor; i++) {
         
result.addToDataNodeLocations(availableDataNodeMap.get(optimalReplicaSet[i]).getLocation());
       }
+
       return result;
     } finally {
       clear();
@@ -122,11 +131,13 @@ public class GreedyCopySetRegionGroupAllocator implements 
IRegionGroupAllocator
    * @param replicationFactor replication factor in the cluster
    * @param availableDataNodeMap currently available DataNodes, ensure size() 
>= replicationFactor
    * @param allocatedRegionGroups already allocated RegionGroups in the cluster
+   * @param databaseAllocatedRegionGroups already allocated RegionGroups in 
the same Database
    */
   private void prepare(
       int replicationFactor,
       Map<Integer, TDataNodeConfiguration> availableDataNodeMap,
-      List<TRegionReplicaSet> allocatedRegionGroups) {
+      List<TRegionReplicaSet> allocatedRegionGroups,
+      List<TRegionReplicaSet> databaseAllocatedRegionGroups) {
 
     this.replicationFactor = replicationFactor;
     // Store the maximum DataNodeId
@@ -139,9 +150,11 @@ public class GreedyCopySetRegionGroupAllocator implements 
IRegionGroupAllocator
                 .max()
                 .orElse(0));
 
-    // Compute regionCounter and combinationCounter
+    // Compute regionCounter, databaseRegionCounter and combinationCounter
     regionCounter = new int[maxDataNodeId + 1];
     Arrays.fill(regionCounter, 0);
+    databaseRegionCounter = new int[maxDataNodeId + 1];
+    Arrays.fill(databaseRegionCounter, 0);
     combinationCounter = new int[maxDataNodeId + 1][maxDataNodeId + 1];
     for (int i = 0; i <= maxDataNodeId; i++) {
       Arrays.fill(combinationCounter[i], 0);
@@ -158,6 +171,12 @@ public class GreedyCopySetRegionGroupAllocator implements 
IRegionGroupAllocator
         }
       }
     }
+    for (TRegionReplicaSet regionReplicaSet : databaseAllocatedRegionGroups) {
+      List<TDataNodeLocation> dataNodeLocations = 
regionReplicaSet.getDataNodeLocations();
+      for (TDataNodeLocation dataNodeLocation : dataNodeLocations) {
+        databaseRegionCounter[dataNodeLocation.getDataNodeId()]++;
+      }
+    }
 
     // Compute the DataNodeIds through sorting the DataNodeEntryMap
     Map<Integer, DataNodeEntry> dataNodeEntryMap = new HashMap<>(maxDataNodeId 
+ 1);
@@ -175,7 +194,8 @@ public class GreedyCopySetRegionGroupAllocator implements 
IRegionGroupAllocator
               }
               dataNodeEntryMap.put(
                   dataNodeId,
-                  new DataNodeEntry(dataNodeId, regionCounter[dataNodeId], 
scatterWidth));
+                  new DataNodeEntry(
+                      databaseRegionCounter[dataNodeId], 
regionCounter[dataNodeId], scatterWidth));
             });
     dataNodeIds =
         dataNodeEntryMap.entrySet().stream()
@@ -187,6 +207,7 @@ public class GreedyCopySetRegionGroupAllocator implements 
IRegionGroupAllocator
             .toArray();
 
     // Reset the optimal result
+    optimalDatabaseRegionSum = Integer.MAX_VALUE;
     optimalRegionSum = Integer.MAX_VALUE;
     optimalCombinationSum = Integer.MAX_VALUE;
     optimalReplicaSets = new ArrayList<>();
@@ -200,14 +221,26 @@ public class GreedyCopySetRegionGroupAllocator implements 
IRegionGroupAllocator
    * @param lastIndex last decided index in dataNodeIds
    * @param currentReplica current replica index
    * @param currentReplicaSet current allocation plan
+   * @param databaseRegionSum the sum of Regions at the DataNodes within the 
same Database in the
+   *     current allocation plan
    * @param regionSum the sum of Regions at the DataNodes in the current 
allocation plan
    */
-  private void dfs(int lastIndex, int currentReplica, int[] currentReplicaSet, 
int regionSum) {
+  private void dfs(
+      int lastIndex,
+      int currentReplica,
+      int[] currentReplicaSet,
+      int databaseRegionSum,
+      int regionSum) {
     if (regionSum > optimalRegionSum) {
       // Pruning: no needs for further searching when the first key
       // is bigger than the historical optimal result
       return;
     }
+    if (regionSum == optimalRegionSum && databaseRegionSum > 
optimalDatabaseRegionSum) {
+      // Pruning: no needs for further searching when the second key
+      // is bigger than the historical optimal result
+      return;
+    }
 
     if (currentReplica == replicationFactor) {
       // A complete allocation plan is found
@@ -217,9 +250,19 @@ public class GreedyCopySetRegionGroupAllocator implements 
IRegionGroupAllocator
           combinationSum += 
combinationCounter[currentReplicaSet[i]][currentReplicaSet[j]];
         }
       }
+      if (regionSum == optimalRegionSum
+          && databaseRegionSum == optimalDatabaseRegionSum
+          && combinationSum > optimalCombinationSum) {
+        // Pruning: no needs for further searching when the third key
+        // is bigger than the historical optimal result
+        return;
+      }
 
-      if (regionSum < optimalRegionSum || combinationSum < 
optimalCombinationSum) {
+      if (regionSum < optimalRegionSum
+          || databaseRegionSum < optimalDatabaseRegionSum
+          || combinationSum < optimalCombinationSum) {
         // Reset the optimal result when a better one is found
+        optimalDatabaseRegionSum = databaseRegionSum;
         optimalRegionSum = regionSum;
         optimalCombinationSum = combinationSum;
         optimalReplicaSets.clear();
@@ -231,8 +274,13 @@ public class GreedyCopySetRegionGroupAllocator implements 
IRegionGroupAllocator
     for (int i = lastIndex + 1; i < dataNodeIds.length; i++) {
       // Decide the next DataNodeId in the allocation plan
       currentReplicaSet[currentReplica] = dataNodeIds[i];
-      dfs(i, currentReplica + 1, currentReplicaSet, regionSum + 
regionCounter[dataNodeIds[i]]);
-      if (optimalReplicaSets.size() == MAX_OPTIMAL_PLAN_NUM) {
+      dfs(
+          i,
+          currentReplica + 1,
+          currentReplicaSet,
+          databaseRegionSum + databaseRegionCounter[dataNodeIds[i]],
+          regionSum + regionCounter[dataNodeIds[i]]);
+      if (optimalReplicaSets.size() == GCR_MAX_OPTIMAL_PLAN_NUM) {
         // Pruning: no needs for further searching when
         // the number of optimal plans reaches the limitation
         return;
diff --git 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocator.java
 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocator.java
index c01c94a4e60..65388b9e998 100644
--- 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocator.java
+++ 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocator.java
@@ -25,9 +25,6 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
 import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet;
 import org.apache.iotdb.tsfile.utils.Pair;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -39,8 +36,6 @@ import static java.util.Map.Entry.comparingByValue;
 /** Allocate Region Greedily */
 public class GreedyRegionGroupAllocator implements IRegionGroupAllocator {
 
-  private static final Logger LOGGER = 
LoggerFactory.getLogger(GreedyRegionGroupAllocator.class);
-
   public GreedyRegionGroupAllocator() {
     // Empty constructor
   }
@@ -50,6 +45,7 @@ public class GreedyRegionGroupAllocator implements 
IRegionGroupAllocator {
       Map<Integer, TDataNodeConfiguration> availableDataNodeMap,
       Map<Integer, Double> freeDiskSpaceMap,
       List<TRegionReplicaSet> allocatedRegionGroups,
+      List<TRegionReplicaSet> databaseAllocatedRegionGroups,
       int replicationFactor,
       TConsensusGroupId consensusGroupId) {
     // Build weightList order by number of regions allocated asc
@@ -87,28 +83,16 @@ public class GreedyRegionGroupAllocator implements 
IRegionGroupAllocator {
                     freeDiskSpaceMap.getOrDefault(datanodeId, 0d))));
 
     // Sort weightList
-    List<TDataNodeLocation> result =
-        priorityMap.entrySet().stream()
-            .sorted(
-                comparingByValue(
-                    (o1, o2) ->
-                        !Objects.equals(o1.getLeft(), o2.getLeft())
-                            // Compare the first key(The number of Regions) by 
ascending order
-                            ? o1.getLeft() - o2.getLeft()
-                            // Compare the second key(The free disk space) by 
descending order
-                            : (int) (o2.getRight() - o1.getRight())))
-            .map(entry -> entry.getKey().deepCopy())
-            .collect(Collectors.toList());
-
-    // Record weightList
-    for (TDataNodeLocation dataNodeLocation : result) {
-      LOGGER.info(
-          "[RegionGroupWeightList] DataNodeId: {}, RegionCount: {}, 
FreeDiskSpace: {}",
-          dataNodeLocation.getDataNodeId(),
-          priorityMap.get(dataNodeLocation).getLeft(),
-          priorityMap.get(dataNodeLocation).getRight());
-    }
-
-    return result;
+    return priorityMap.entrySet().stream()
+        .sorted(
+            comparingByValue(
+                (o1, o2) ->
+                    !Objects.equals(o1.getLeft(), o2.getLeft())
+                        // Compare the first key(The number of Regions) by 
ascending order
+                        ? o1.getLeft() - o2.getLeft()
+                        // Compare the second key(The free disk space) by 
descending order
+                        : (int) (o2.getRight() - o1.getRight())))
+        .map(entry -> entry.getKey().deepCopy())
+        .collect(Collectors.toList());
   }
 }
diff --git 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/IRegionGroupAllocator.java
 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/IRegionGroupAllocator.java
index 25a61b00ea0..554168d8497 100644
--- 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/IRegionGroupAllocator.java
+++ 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/IRegionGroupAllocator.java
@@ -34,6 +34,8 @@ public interface IRegionGroupAllocator {
    * @param availableDataNodeMap DataNodes that can be used for allocation
    * @param freeDiskSpaceMap The free disk space of the DataNodes
    * @param allocatedRegionGroups Allocated RegionGroups
+   * @param databaseAllocatedRegionGroups Allocated RegionGroups within the 
same Database with the
+   *     result
    * @param replicationFactor Replication factor of TRegionReplicaSet
    * @param consensusGroupId TConsensusGroupId of result TRegionReplicaSet
    * @return The optimal TRegionReplicaSet derived by the specified algorithm
@@ -42,6 +44,7 @@ public interface IRegionGroupAllocator {
       Map<Integer, TDataNodeConfiguration> availableDataNodeMap,
       Map<Integer, Double> freeDiskSpaceMap,
       List<TRegionReplicaSet> allocatedRegionGroups,
+      List<TRegionReplicaSet> databaseAllocatedRegionGroups,
       int replicationFactor,
       TConsensusGroupId consensusGroupId);
 }
diff --git 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java
 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java
index 2172087be0d..60db21bc2bc 100644
--- 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java
+++ 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java
@@ -711,6 +711,18 @@ public class PartitionManager {
     return partitionInfo.getAllReplicaSets(database);
   }
 
+  /**
+   * Only leader use this interface.
+   *
+   * @param database The specified Database
+   * @param type SchemaRegion or DataRegion
+   * @return Deep copy of all Regions' RegionReplicaSet with the specified 
Database and
+   *     TConsensusGroupType
+   */
+  public List<TRegionReplicaSet> getAllReplicaSets(String database, 
TConsensusGroupType type) {
+    return partitionInfo.getAllReplicaSets(database, type);
+  }
+
   /**
    * Get all RegionGroups currently owned by the specified Database.
    *
@@ -782,6 +794,18 @@ public class PartitionManager {
     return partitionInfo.getRegionGroupCount(database, type);
   }
 
+  /**
+   * Only leader use this interface.
+   *
+   * <p>Get the all RegionGroups currently in the cluster
+   *
+   * @param type SchemaRegion or DataRegion
+   * @return Map<Database, List<RegionGroupIds>>
+   */
+  public Map<String, List<TConsensusGroupId>> 
getAllRegionGroupIdMap(TConsensusGroupType type) {
+    return partitionInfo.getAllRegionGroupIdMap(type);
+  }
+
   /**
    * Only leader use this interface.
    *
diff --git 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java
 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java
index e26082401fc..7480d194064 100644
--- 
a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java
+++ 
b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java
@@ -90,6 +90,7 @@ import java.util.Map;
 import java.util.Objects;
 import java.util.Optional;
 import java.util.Set;
+import java.util.TreeMap;
 import java.util.UUID;
 import java.util.Vector;
 import java.util.concurrent.ConcurrentHashMap;
@@ -702,7 +703,23 @@ public class PartitionInfo implements SnapshotProcessor {
     if (databasePartitionTables.containsKey(database)) {
       return databasePartitionTables.get(database).getAllReplicaSets();
     } else {
-      return new ArrayList<>();
+      return Collections.emptyList();
+    }
+  }
+
+  /**
+   * Only leader use this interface.
+   *
+   * @param database The specified Database
+   * @param type SchemaRegion or DataRegion
+   * @return Deep copy of all Regions' RegionReplicaSet with the specified 
Database and
+   *     TConsensusGroupType
+   */
+  public List<TRegionReplicaSet> getAllReplicaSets(String database, 
TConsensusGroupType type) {
+    if (databasePartitionTables.containsKey(database)) {
+      return databasePartitionTables.get(database).getAllReplicaSets(type);
+    } else {
+      return Collections.emptyList();
     }
   }
 
@@ -734,7 +751,7 @@ public class PartitionInfo implements SnapshotProcessor {
     if (databasePartitionTables.containsKey(database)) {
       return 
databasePartitionTables.get(database).getReplicaSets(regionGroupIds);
     } else {
-      return new ArrayList<>();
+      return Collections.emptyList();
     }
   }
 
@@ -800,6 +817,25 @@ public class PartitionInfo implements SnapshotProcessor {
     return databasePartitionTables.get(database).getRegionGroupCount(type);
   }
 
+  /**
+   * Only leader use this interface.
+   *
+   * <p>Get the all RegionGroups currently in the cluster
+   *
+   * @param type SchemaRegion or DataRegion
+   * @return Map<Database, List<RegionGroupIds>>
+   */
+  public Map<String, List<TConsensusGroupId>> 
getAllRegionGroupIdMap(TConsensusGroupType type) {
+    Map<String, List<TConsensusGroupId>> result = new TreeMap<>();
+    databasePartitionTables.forEach(
+        (database, databasePartitionTable) -> {
+          if (databasePartitionTable.isNotPreDeleted()) {
+            result.put(database, 
databasePartitionTable.getAllRegionGroupIds(type));
+          }
+        });
+    return result;
+  }
+
   /**
    * Only leader use this interface.
    *
diff --git 
a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/AllocatorScatterWidthManualTest.java
 
b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/AllocatorScatterWidthManualTest.java
index d211b979034..b159525268f 100644
--- 
a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/AllocatorScatterWidthManualTest.java
+++ 
b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/AllocatorScatterWidthManualTest.java
@@ -39,6 +39,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
+/**
+ * Assign an allocator than run this test manually. This test will show the 
scatter width
+ * distribution of the specified allocator
+ */
 public class AllocatorScatterWidthManualTest {
 
   private static final Logger LOGGER =
@@ -80,6 +84,7 @@ public class AllocatorScatterWidthManualTest {
               AVAILABLE_DATA_NODE_MAP,
               FREE_SPACE_MAP,
               allocateResult,
+              allocateResult,
               DATA_REPLICATION_FACTOR,
               new TConsensusGroupId(TConsensusGroupType.DataRegion, index)));
     }
diff --git 
a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java
 
b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java
index 623f8c2b2c3..95a23ab3e64 100644
--- 
a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java
+++ 
b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java
@@ -38,6 +38,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.TreeMap;
+import java.util.stream.Collectors;
 
 public class GreedyCopySetRegionGroupAllocatorTest {
 
@@ -49,6 +51,8 @@ public class GreedyCopySetRegionGroupAllocatorTest {
   private static final GreedyCopySetRegionGroupAllocator 
GREEDY_COPY_SET_ALLOCATOR =
       new GreedyCopySetRegionGroupAllocator();
 
+  private static final Random RANDOM = new Random();
+  private static final int TEST_DATABASE_NUM = 3;
   private static final int TEST_DATA_NODE_NUM = 21;
   private static final int DATA_REGION_PER_DATA_NODE =
       (int) 
ConfigNodeDescriptor.getInstance().getConf().getDataRegionPerDataNode();
@@ -78,45 +82,81 @@ public class GreedyCopySetRegionGroupAllocatorTest {
   }
 
   private void testRegionDistributionAndScatterWidth(int replicationFactor) {
-    final int dataRegionGroupNum =
+    final int dataRegionGroupAllotment =
         DATA_REGION_PER_DATA_NODE * TEST_DATA_NODE_NUM / replicationFactor;
+    final int dataRegionGroupPerDatabase = dataRegionGroupAllotment / 
TEST_DATABASE_NUM;
 
     /* Allocate DataRegionGroups */
     List<TRegionReplicaSet> greedyResult = new ArrayList<>();
     List<TRegionReplicaSet> greedyCopySetResult = new ArrayList<>();
-    for (int index = 0; index < dataRegionGroupNum; index++) {
-      greedyResult.add(
+    Map<Integer, List<TRegionReplicaSet>> greedyCopySetDatabaseResult = new 
TreeMap<>();
+    // Map<DataNodeId, RegionGroup Count> for greedy algorithm
+    Map<Integer, Integer> greedyRegionCounter = new TreeMap<>();
+    // Map<DataNodeId, RegionGroup Count> for greedy-copy-set algorithm
+    Map<Integer, Integer> greedyCopySetRegionCounter = new TreeMap<>();
+    // Map<DatabaseId, Map<DataNodeId, RegionGroup Count>>
+    Map<Integer, Map<Integer, Integer>> greedyCopySetDatabaseRegionCounter = 
new TreeMap<>();
+    for (int i = 0; i < TEST_DATABASE_NUM; i++) {
+      greedyCopySetDatabaseResult.put(i, new ArrayList<>());
+    }
+    for (int index = 0; index < dataRegionGroupPerDatabase * 
TEST_DATABASE_NUM; index++) {
+      TRegionReplicaSet greedyRegionGroup =
           GREEDY_ALLOCATOR.generateOptimalRegionReplicasDistribution(
               AVAILABLE_DATA_NODE_MAP,
               FREE_SPACE_MAP,
               greedyResult,
+              greedyResult,
               replicationFactor,
-              new TConsensusGroupId(TConsensusGroupType.DataRegion, index)));
-      greedyCopySetResult.add(
+              new TConsensusGroupId(TConsensusGroupType.DataRegion, index));
+      greedyResult.add(greedyRegionGroup);
+      greedyRegionGroup
+          .getDataNodeLocations()
+          .forEach(
+              dataNodeLocation ->
+                  greedyRegionCounter.merge(dataNodeLocation.getDataNodeId(), 
1, Integer::sum));
+      int databaseId = RANDOM.nextInt(TEST_DATABASE_NUM);
+      TRegionReplicaSet greedyCopySetRegionGroup =
           GREEDY_COPY_SET_ALLOCATOR.generateOptimalRegionReplicasDistribution(
               AVAILABLE_DATA_NODE_MAP,
               FREE_SPACE_MAP,
               greedyCopySetResult,
+              greedyCopySetDatabaseResult.get(databaseId),
               replicationFactor,
-              new TConsensusGroupId(TConsensusGroupType.DataRegion, index)));
+              new TConsensusGroupId(TConsensusGroupType.DataRegion, index));
+      greedyCopySetResult.add(greedyCopySetRegionGroup);
+      
greedyCopySetDatabaseResult.get(databaseId).add(greedyCopySetRegionGroup);
+      greedyCopySetRegionGroup
+          .getDataNodeLocations()
+          .forEach(
+              dataNodeLocation -> {
+                
greedyCopySetRegionCounter.merge(dataNodeLocation.getDataNodeId(), 1, 
Integer::sum);
+                greedyCopySetDatabaseRegionCounter
+                    .computeIfAbsent(databaseId, empty -> new TreeMap<>())
+                    .merge(dataNodeLocation.getDataNodeId(), 1, Integer::sum);
+              });
+      LOGGER.info(
+          "After allocate RegionGroup: {}, Database: {}, plan: {}",
+          index,
+          databaseId,
+          greedyCopySetRegionGroup.getDataNodeLocations().stream()
+              .map(TDataNodeLocation::getDataNodeId)
+              .collect(Collectors.toList()));
+      for (int i = 0; i < TEST_DATABASE_NUM; i++) {
+        LOGGER.info("Database {}: {}", i, 
greedyCopySetDatabaseRegionCounter.get(i));
+      }
+      LOGGER.info("Cluster   : {}", greedyCopySetRegionCounter);
+      for (int i = 1; i <= TEST_DATA_NODE_NUM; i++) {
+        Assert.assertTrue(
+            greedyCopySetRegionCounter.getOrDefault(i, 0) <= 
DATA_REGION_PER_DATA_NODE);
+      }
     }
 
     /* Statistics result */
-    // Map<DataNodeId, RegionGroup Count> for greedy algorithm
-    Map<Integer, Integer> greedyRegionCounter = new HashMap<>();
-    greedyResult.forEach(
-        regionReplicaSet ->
-            regionReplicaSet
-                .getDataNodeLocations()
-                .forEach(
-                    dataNodeLocation ->
-                        greedyRegionCounter.merge(
-                            dataNodeLocation.getDataNodeId(), 1, 
Integer::sum)));
     // Map<DataNodeId, ScatterWidth> for greedy algorithm
     // where a true in the bitset denotes the corresponding DataNode can help 
the DataNode in
     // Map-Key to share the RegionGroup-leader and restore data when 
restarting.
     // The more true in the bitset, the more safety the cluster DataNode in 
Map-Key is.
-    Map<Integer, BitSet> greedyScatterWidth = new HashMap<>();
+    Map<Integer, BitSet> greedyScatterWidth = new TreeMap<>();
     for (TRegionReplicaSet replicaSet : greedyResult) {
       for (int i = 0; i < replicationFactor; i++) {
         for (int j = i + 1; j < replicationFactor; j++) {
@@ -127,19 +167,8 @@ public class GreedyCopySetRegionGroupAllocatorTest {
         }
       }
     }
-
-    // Map<DataNodeId, RegionGroup Count> for greedy-copy-set algorithm
-    Map<Integer, Integer> greedyCopySetRegionCounter = new HashMap<>();
-    greedyCopySetResult.forEach(
-        regionReplicaSet ->
-            regionReplicaSet
-                .getDataNodeLocations()
-                .forEach(
-                    dataNodeLocation ->
-                        greedyCopySetRegionCounter.merge(
-                            dataNodeLocation.getDataNodeId(), 1, 
Integer::sum)));
     // Map<DataNodeId, ScatterWidth> for greedy-copy-set algorithm, ditto
-    Map<Integer, BitSet> greedyCopySetScatterWidth = new HashMap<>();
+    Map<Integer, BitSet> greedyCopySetScatterWidth = new TreeMap<>();
     for (TRegionReplicaSet replicaSet : greedyCopySetResult) {
       for (int i = 0; i < replicationFactor; i++) {
         for (int j = i + 1; j < replicationFactor; j++) {
@@ -162,9 +191,15 @@ public class GreedyCopySetRegionGroupAllocatorTest {
     int greedyCopySetScatterWidthSum = 0;
     int greedyCopySetMinScatterWidth = Integer.MAX_VALUE;
     int greedyCopySetMaxScatterWidth = Integer.MIN_VALUE;
+    int greedyCopySetMaxRegionCount = 0;
+    int greedyCopySetMinRegionCount = Integer.MAX_VALUE;
     for (int i = 1; i <= TEST_DATA_NODE_NUM; i++) {
       Assert.assertTrue(greedyRegionCounter.get(i) <= 
DATA_REGION_PER_DATA_NODE);
       Assert.assertTrue(greedyCopySetRegionCounter.get(i) <= 
DATA_REGION_PER_DATA_NODE);
+      greedyCopySetMinRegionCount =
+          Math.min(greedyCopySetMinRegionCount, 
greedyCopySetRegionCounter.get(i));
+      greedyCopySetMaxRegionCount =
+          Math.max(greedyCopySetMaxRegionCount, 
greedyCopySetRegionCounter.get(i));
 
       int scatterWidth = greedyScatterWidth.get(i).cardinality();
       greedyScatterWidthSum += scatterWidth;
@@ -176,6 +211,28 @@ public class GreedyCopySetRegionGroupAllocatorTest {
       greedyCopySetMinScatterWidth = Math.min(greedyCopySetMinScatterWidth, 
scatterWidth);
       greedyCopySetMaxScatterWidth = Math.max(greedyCopySetMaxScatterWidth, 
scatterWidth);
     }
+    // The maximal Region count - minimal Region count should be less than or 
equal to 1
+    Assert.assertTrue(greedyCopySetMaxRegionCount - 
greedyCopySetMinRegionCount <= 1);
+    for (int i = 0; i < TEST_DATABASE_NUM; i++) {
+      greedyCopySetMaxRegionCount = 0;
+      greedyCopySetMinRegionCount = Integer.MAX_VALUE;
+      if (greedyCopySetDatabaseRegionCounter.containsKey(i)) {
+        continue;
+      }
+      for (int j = 1; j <= TEST_DATA_NODE_NUM; j++) {
+        if (greedyCopySetDatabaseRegionCounter.get(i).containsKey(j)) {
+          greedyCopySetMinRegionCount =
+              Math.min(
+                  greedyCopySetMinRegionCount, 
greedyCopySetDatabaseRegionCounter.get(i).get(j));
+          greedyCopySetMaxRegionCount =
+              Math.max(
+                  greedyCopySetMaxRegionCount, 
greedyCopySetDatabaseRegionCounter.get(i).get(j));
+        }
+      }
+      // The maximal Region count - minimal Region count should be less than 
or equal to 1 for each
+      // database
+      Assert.assertTrue(greedyCopySetMaxRegionCount - 
greedyCopySetMinRegionCount <= 1);
+    }
 
     LOGGER.info(
         "replicationFactor: {}, Scatter width for greedy: avg={}, min={}, 
max={}",
diff --git 
a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocatorTest.java
 
b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocatorTest.java
index 67aebe6ba6e..b0dd6769f2e 100644
--- 
a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocatorTest.java
+++ 
b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocatorTest.java
@@ -16,6 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+
 package org.apache.iotdb.confignode.manager.load.balancer.region;
 
 import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId;
@@ -62,6 +63,7 @@ public class GreedyRegionGroupAllocatorTest {
               availableDataNodeMap,
               freeSpaceMap,
               allocatedRegionGroups,
+              allocatedRegionGroups,
               TEST_REPLICATION_FACTOR,
               new TConsensusGroupId(TConsensusGroupType.DataRegion, index));
       allocatedRegionGroups.add(newRegionGroup);
@@ -107,6 +109,7 @@ public class GreedyRegionGroupAllocatorTest {
             availableDataNodeMap,
             freeSpaceMap,
             allocatedRegionGroups,
+            allocatedRegionGroups,
             TEST_REPLICATION_FACTOR,
             new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 0));
     allocatedRegionGroups.add(newRegionGroup);
@@ -126,6 +129,7 @@ public class GreedyRegionGroupAllocatorTest {
             availableDataNodeMap,
             freeSpaceMap,
             allocatedRegionGroups,
+            allocatedRegionGroups,
             TEST_REPLICATION_FACTOR,
             new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 1));
     newRegionGroup


Reply via email to