This is an automated email from the ASF dual-hosted git repository.

sai_boorlagadda pushed a commit to branch feature/gha1
in repository https://gitbox.apache.org/repos/asf/geode.git

commit f479e58cc06ae2b3a627ae56ff8ba03dfbe4cd33
Author: Sai Boorlagadda <sai.boorlaga...@gmail.com>
AuthorDate: Sat Jan 21 19:41:42 2023 -0800

    another try to see if clear works
---
 .../apache/geode/internal/cache/DiskStoreImpl.java | 26 +++++-----------------
 .../geode/internal/cache/PersistentOplogSet.java   |  2 +-
 .../OplogEntryIdSetDrfHashSetThresholdTest.java    |  6 ++++-
 .../geode/internal/cache/OplogEntryIdSetTest.java  |  6 ++++-
 4 files changed, 17 insertions(+), 23 deletions(-)

diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java 
b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
index dd2bb2c300..8415a00b34 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
@@ -71,7 +71,6 @@ import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
 import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
 import org.apache.commons.io.FileUtils;
 import org.apache.logging.log4j.Logger;
-import org.jetbrains.annotations.TestOnly;
 
 import org.apache.geode.CancelCriterion;
 import org.apache.geode.CancelException;
@@ -199,7 +198,7 @@ public class DiskStoreImpl implements DiskStore {
       GeodeGlossary.GEMFIRE_PREFIX + "disk.recoverLruValues";
 
   static final long DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT = 805306368;
-  final long DRF_HASHMAP_OVERFLOW_THRESHOLD =
+  static final long DRF_HASHMAP_OVERFLOW_THRESHOLD =
       Long.getLong(DRF_HASHMAP_OVERFLOW_THRESHOLD_NAME, 
DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT);
 
   boolean RECOVER_VALUES = 
getBoolean(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, true);
@@ -3533,26 +3532,18 @@ public class DiskStoreImpl implements DiskStore {
     private final List<LongOpenHashSet> allLongs;
     private final AtomicReference<IntOpenHashSet> currentInts;
     private final AtomicReference<LongOpenHashSet> currentLongs;
-    private final long drfHashMapOverFlowThreashold;
 
     // For testing purposes only.
     @VisibleForTesting
-    OplogEntryIdSet(List<IntOpenHashSet> allInts, List<LongOpenHashSet> 
allLongs,
-        long drfHashMapOverflowThreshold) {
+    OplogEntryIdSet(List<IntOpenHashSet> allInts, List<LongOpenHashSet> 
allLongs) {
       this.allInts = allInts;
       currentInts = new AtomicReference<>(this.allInts.get(0));
 
       this.allLongs = allLongs;
       currentLongs = new AtomicReference<>(this.allLongs.get(0));
-      this.drfHashMapOverFlowThreashold = drfHashMapOverflowThreshold;
-    }
-
-    @TestOnly
-    OplogEntryIdSet(List<IntOpenHashSet> allInts, List<LongOpenHashSet> 
allLongs) {
-      this(allInts, allLongs, DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT);
     }
 
-    public OplogEntryIdSet(long drfHashMapOverflowThreshold) {
+    public OplogEntryIdSet() {
       IntOpenHashSet intHashSet = new IntOpenHashSet((int) INVALID_ID);
       allInts = new ArrayList<>();
       allInts.add(intHashSet);
@@ -3562,11 +3553,6 @@ public class DiskStoreImpl implements DiskStore {
       allLongs = new ArrayList<>();
       allLongs.add(longHashSet);
       currentLongs = new AtomicReference<>(longHashSet);
-      this.drfHashMapOverFlowThreashold = drfHashMapOverflowThreshold;
-    }
-
-    public OplogEntryIdSet() {
-      this(DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT);
     }
 
     public void add(long id) {
@@ -3594,14 +3580,14 @@ public class DiskStoreImpl implements DiskStore {
 
     boolean shouldOverflow(final long id) {
       if (id > 0 && id <= 0x00000000FFFFFFFFL) {
-        return currentInts.get().size() == drfHashMapOverFlowThreashold;
+        return currentInts.get().size() == DRF_HASHMAP_OVERFLOW_THRESHOLD;
       } else {
-        return currentLongs.get().size() == drfHashMapOverFlowThreashold;
+        return currentLongs.get().size() == DRF_HASHMAP_OVERFLOW_THRESHOLD;
       }
     }
 
     void overflowToNewHashMap(final long id) {
-      if (drfHashMapOverFlowThreashold == 
DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT) {
+      if (DRF_HASHMAP_OVERFLOW_THRESHOLD == 
DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT) {
         logger.warn(
             "There is a large number of deleted entries within the disk-store, 
please execute an offline compaction.");
       }
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/PersistentOplogSet.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/PersistentOplogSet.java
index 80a9378cfd..67291651a3 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/PersistentOplogSet.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/PersistentOplogSet.java
@@ -440,7 +440,7 @@ public class PersistentOplogSet implements OplogSet {
   }
 
   private long recoverOplogs(long byteCount) {
-    OplogEntryIdSet deletedIds = new 
OplogEntryIdSet(parent.DRF_HASHMAP_OVERFLOW_THRESHOLD);
+    OplogEntryIdSet deletedIds = new OplogEntryIdSet();
     TreeSet<Oplog> oplogSet = getSortedOplogs();
 
     if (!getAlreadyRecoveredOnce().get()) {
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/OplogEntryIdSetDrfHashSetThresholdTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/OplogEntryIdSetDrfHashSetThresholdTest.java
index e4b3c56f36..57b08c45b8 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/OplogEntryIdSetDrfHashSetThresholdTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/OplogEntryIdSetDrfHashSetThresholdTest.java
@@ -25,6 +25,8 @@ import java.util.stream.LongStream;
 import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
 import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
 import org.junit.jupiter.api.Test;
+import org.junitpioneer.jupiter.SetSystemProperty;
+import org.junitpioneer.jupiter.WritesSystemProperty;
 
 import org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet;
 
@@ -33,6 +35,8 @@ import 
org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet;
  */
 public class OplogEntryIdSetDrfHashSetThresholdTest {
   @Test
+  @WritesSystemProperty
+  @SetSystemProperty(key = "gemfire.disk.drfHashMapOverflowThreshold", value = 
"10")
   public void addMethodOverflowBasedOnDrfOverflowThresholdParameters() {
     int testEntries = 41;
     IntOpenHashSet intOpenHashSet = new IntOpenHashSet();
@@ -43,7 +47,7 @@ public class OplogEntryIdSetDrfHashSetThresholdTest {
     List<LongOpenHashSet> longOpenHashSets =
         new ArrayList<>(Collections.singletonList(longOpenHashSet));
 
-    OplogEntryIdSet oplogEntryIdSet = new OplogEntryIdSet(intOpenHashSets, 
longOpenHashSets, 10);
+    OplogEntryIdSet oplogEntryIdSet = new OplogEntryIdSet(intOpenHashSets, 
longOpenHashSets);
     IntStream.range(1, testEntries).forEach(oplogEntryIdSet::add);
     LongStream.range(0x00000000FFFFFFFFL + 1, 0x00000000FFFFFFFFL + 
testEntries)
         .forEach(oplogEntryIdSet::add);
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/OplogEntryIdSetTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/OplogEntryIdSetTest.java
index 53ded131a6..100e062eec 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/OplogEntryIdSetTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/OplogEntryIdSetTest.java
@@ -29,7 +29,9 @@ import java.util.stream.LongStream;
 
 import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
 import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junitpioneer.jupiter.ClearSystemProperty;
+import org.junitpioneer.jupiter.ReadsSystemProperty;
 import org.mockito.stubbing.Answer;
 
 import org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet;
@@ -72,6 +74,8 @@ public class OplogEntryIdSetTest {
   }
 
   @Test
+  @ReadsSystemProperty
+  @ClearSystemProperty(key = "gemfire.disk.drfHashMapOverflowThreshold")
   public void 
addMethodOverflowsWhenInternalAddThrowsIllegalArgumentException() {
     int testEntries = 1000;
     int magicInt = testEntries + 1;

Reply via email to