This is an automated email from the ASF dual-hosted git repository.

alberto pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/geode.git


The following commit(s) were added to refs/heads/develop by this push:
     new 67ebd727be GEODE-10410: Fix bucket lost during rebalance (#7857)
67ebd727be is described below

commit 67ebd727bef5c613bfe2aaf4258a5472ac433978
Author: WeijieEST <[email protected]>
AuthorDate: Wed Sep 21 01:37:57 2022 +0800

    GEODE-10410: Fix bucket lost during rebalance (#7857)
    
    * GEODE-10410: Fix bucket lost during rebalance
    
    * improve test case name
    
    * improve test case comments and test case names
---
 .../partitioned/rebalance/model/MemberRollup.java  | 31 ++++----
 .../PartitionedRegionLoadModelJUnitTest.java       | 82 +++++++++++++++++-----
 2 files changed, 79 insertions(+), 34 deletions(-)

diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/model/MemberRollup.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/model/MemberRollup.java
index be9c4df2ed..6078cf028d 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/model/MemberRollup.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/model/MemberRollup.java
@@ -131,26 +131,23 @@ class MemberRollup extends Member {
 
   @Override
   public RefusalReason willAcceptBucket(Bucket bucket, Member source, boolean 
checkIPAddress) {
-    RefusalReason reason = super.willAcceptBucket(bucket, source, 
checkIPAddress);
-    if (reason.willAccept()) {
-      BucketRollup bucketRollup = (BucketRollup) bucket;
-      MemberRollup sourceRollup = (MemberRollup) source;
-      for (Map.Entry<String, Member> entry : getColocatedMembers().entrySet()) 
{
-        String region = entry.getKey();
-        Member member = entry.getValue();
-        Bucket colocatedBucket = 
bucketRollup.getColocatedBuckets().get(region);
-        Member colocatedSource =
-            sourceRollup == null ? null : 
sourceRollup.getColocatedMembers().get(region);
-        if (colocatedBucket != null) {
-          reason = member.willAcceptBucket(colocatedBucket, colocatedSource, 
checkIPAddress);
-          if (!reason.willAccept()) {
-            return reason;
-          }
+    RefusalReason reason;
+    BucketRollup bucketRollup = (BucketRollup) bucket;
+    MemberRollup sourceRollup = (MemberRollup) source;
+    for (Map.Entry<String, Member> entry : getColocatedMembers().entrySet()) {
+      String region = entry.getKey();
+      Member member = entry.getValue();
+      Bucket colocatedBucket = bucketRollup.getColocatedBuckets().get(region);
+      Member colocatedSource =
+          sourceRollup == null ? null : 
sourceRollup.getColocatedMembers().get(region);
+      if (colocatedBucket != null) {
+        reason = member.willAcceptBucket(colocatedBucket, colocatedSource, 
checkIPAddress);
+        if (!reason.willAccept()) {
+          return reason;
         }
       }
-      return RefusalReason.NONE;
     }
-    return reason;
+    return RefusalReason.NONE;
   }
 
   Map<String, Member> getColocatedMembers() {
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/rebalance/PartitionedRegionLoadModelJUnitTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/rebalance/PartitionedRegionLoadModelJUnitTest.java
index 26b2b98b8e..5423b08a28 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/rebalance/PartitionedRegionLoadModelJUnitTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/rebalance/PartitionedRegionLoadModelJUnitTest.java
@@ -66,7 +66,7 @@ public class PartitionedRegionLoadModelJUnitTest {
 
   private static final int MAX_MOVES = 5000;
   private static final boolean DEBUG = true;
-
+  private static final long MB = 1024 * 1024;
   private MyBucketOperator bucketOperator;
   private final PartitionedRegion partitionedRegion = 
mock(PartitionedRegion.class);
   final ClusterDistributionManager clusterDistributionManager =
@@ -443,7 +443,8 @@ public class PartitionedRegionLoadModelJUnitTest {
    * lmm, it will prevent a bucket move
    */
   @Test
-  public void testColocationEnforceLocalMaxMemory() throws 
UnknownHostException {
+  public void testColocationTwoNonEvictionRegionsEnforceLocalMaxMemory()
+      throws UnknownHostException {
     PartitionedRegionLoadModel model = new 
PartitionedRegionLoadModel(bucketOperator, 1, 4,
         getAddressComparor(false), Collections.emptySet(), partitionedRegion);
 
@@ -452,25 +453,27 @@ public class PartitionedRegionLoadModelJUnitTest {
     InternalDistributedMember member2 =
         new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
 
-    // Create some buckets with low redundancy on member 1
+    // Create some buckets with low redundancy on member 1 and enough lmm for 
region a
     PartitionMemberInfoImpl details1 =
-        buildDetails(member1, new long[] {1, 1, 1, 1}, new long[] {1, 1, 1, 
1});
+        buildDetails(member1, 500, 500 * MB, new long[] {1 * MB, 1 * MB, 1 * 
MB, 1 * MB},
+            new long[] {1, 1, 1, 1});
     PartitionMemberInfoImpl details2 =
-        buildDetails(member2, new long[] {0, 0, 0, 0}, new long[] {0, 0, 0, 
0});
+        buildDetails(member2, 500, 500 * MB, new long[] {0, 0, 0, 0}, new 
long[] {0, 0, 0, 0});
     model.addRegion("a", Arrays.asList(details1, details2), new 
FakeOfflineDetails(), true);
 
 
-    // Member 2 has a lmm of 2, so it should only accept 2 buckets
+    // Region b has a lmm of 2MB, so member2 should only accept 2 buckets
     PartitionMemberInfoImpl bDetails1 =
-        buildDetails(member1, 2, 2, new long[] {1, 1, 1, 1}, new long[] {1, 1, 
1, 1});
+        buildDetails(member1, 2, 2 * MB, new long[] {1 * MB, 1 * MB, 1 * MB, 1 
* MB},
+            new long[] {1, 1, 1, 1});
     PartitionMemberInfoImpl bDetails2 =
-        buildDetails(member2, 2, 2, new long[] {0, 0, 0, 0}, new long[] {0, 0, 
0, 0});
+        buildDetails(member2, 2, 2 * MB, new long[] {0, 0, 0, 0}, new long[] 
{0, 0, 0, 0});
     model.addRegion("b", Arrays.asList(bDetails1, bDetails2), new 
FakeOfflineDetails(), true);
 
 
     assertThat(doMoves(new CompositeDirector(true, true, false, true), 
model)).isEqualTo(4);
 
-    // Everything should be create on member2
+    // Only (2+2)MB data should be create on member2
     Set<Create> expectedCreates = new HashSet<>();
     expectedCreates.add(new Create(member2, 0));
     expectedCreates.add(new Create(member2, 1));
@@ -483,10 +486,12 @@ public class PartitionedRegionLoadModelJUnitTest {
   }
 
   /**
-   * Test that each region individually honors it's enforce local max memory 
flag.
+   * Test that a region with enforceLocalMaxMemory disabled colocated with
+   * a region with memory full and enforceLocalmaxMemory enabled will prevent 
a bucket move.
    */
   @Test
-  public void testColocationIgnoreEnforceLocalMaxMemory() throws 
UnknownHostException {
+  public void testColocationOneNonEvictionRegionReachesLocalMaxMemoryLimit()
+      throws UnknownHostException {
     PartitionedRegionLoadModel model = new 
PartitionedRegionLoadModel(bucketOperator, 1, 4,
         getAddressComparor(false), Collections.emptySet(), partitionedRegion);
     InternalDistributedMember member1 =
@@ -494,19 +499,62 @@ public class PartitionedRegionLoadModelJUnitTest {
     InternalDistributedMember member2 =
         new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
 
-    // Create some buckets with low redundancy on member 1
     PartitionMemberInfoImpl details1 =
-        buildDetails(member1, new long[] {1, 1, 1, 1}, new long[] {1, 1, 1, 
1});
+        buildDetails(member1, 1, 8 * MB, new long[] {1 * MB, 1 * MB, 1 * MB, 1 
* MB},
+            new long[] {1, 1, 1, 1});
     PartitionMemberInfoImpl details2 =
-        buildDetails(member2, new long[] {0, 0, 0, 0}, new long[] {0, 0, 0, 
0});
+        buildDetails(member2, 1, 8 * MB, new long[] {0, 0, 0, 0}, new long[] 
{0, 0, 0, 0});
+    model.addRegion("a", Arrays.asList(details1, details2), new 
FakeOfflineDetails(), false);
+
+
+    PartitionMemberInfoImpl bDetails1 =
+        buildDetails(member1, 1, 2 * MB, new long[] {1 * MB, 1 * MB, 1 * MB, 1 
* MB},
+            new long[] {1, 1, 1, 1});
+    PartitionMemberInfoImpl bDetails2 =
+        buildDetails(member2, 1, 2 * MB, new long[] {0, 0, 0, 0}, new long[] 
{0, 0, 0, 0});
+    model.addRegion("b", Arrays.asList(bDetails1, bDetails2), new 
FakeOfflineDetails(), true);
+
+
+    assertThat(doMoves(new CompositeDirector(true, true, false, true), 
model)).isEqualTo(4);
+
+    Set<Create> expectedCreates = new HashSet<>();
+    expectedCreates.add(new Create(member2, 0));
+    expectedCreates.add(new Create(member2, 1));
+    assertThat(new 
HashSet<>(bucketOperator.creates)).isEqualTo(expectedCreates);
+
+    Set<Move> expectedMoves = new HashSet<>();
+    expectedMoves.add(new Move(member1, member2));
+    expectedMoves.add(new Move(member1, member2));
+    assertThat(new 
HashSet<>(bucketOperator.primaryMoves)).isEqualTo(expectedMoves);
+  }
+
+  /**
+   * Test that a region with memory full and enforceLocalMaxMemory disabled 
will not prevent a
+   * bucket move.
+   */
+  @Test
+  public void testColocationOneEvictionRegionReachesLocalMaxMemoryLimit()
+      throws UnknownHostException {
+    PartitionedRegionLoadModel model = new 
PartitionedRegionLoadModel(bucketOperator, 1, 4,
+        getAddressComparor(false), Collections.emptySet(), partitionedRegion);
+    InternalDistributedMember member1 =
+        new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
+    InternalDistributedMember member2 =
+        new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
+
+    PartitionMemberInfoImpl details1 =
+        buildDetails(member1, 1, 4 * MB, new long[] {1 * MB, 1 * MB, 1 * MB, 1 
* MB},
+            new long[] {1, 1, 1, 1});
+    PartitionMemberInfoImpl details2 =
+        buildDetails(member2, 1, 4 * MB, new long[] {0, 0, 0, 0}, new long[] 
{0, 0, 0, 0});
     model.addRegion("a", Arrays.asList(details1, details2), new 
FakeOfflineDetails(), true);
 
 
-    // Member 2 has a lmm of 2, so it should only accept 2 buckets
     PartitionMemberInfoImpl bDetails1 =
-        buildDetails(member1, 2, 2, new long[] {1, 1, 1, 1}, new long[] {1, 1, 
1, 1});
+        buildDetails(member1, 1, 2 * MB, new long[] {1 * MB, 1 * MB, 1 * MB, 1 
* MB},
+            new long[] {1, 1, 1, 1});
     PartitionMemberInfoImpl bDetails2 =
-        buildDetails(member2, 2, 2, new long[] {0, 0, 0, 0}, new long[] {0, 0, 
0, 0});
+        buildDetails(member2, 1, 2 * MB, new long[] {0, 0, 0, 0}, new long[] 
{0, 0, 0, 0});
     model.addRegion("b", Arrays.asList(bDetails1, bDetails2), new 
FakeOfflineDetails(), false);
 
 

Reply via email to