albertogpz commented on a change in pull request #7145:
URL: https://github.com/apache/geode/pull/7145#discussion_r765694850



##########
File path: 
geode-core/src/integrationTest/java/org/apache/geode/internal/cache/DiskRegionCompactorClearsObjectThatAreNoLongerNeededIntegrationTest.java
##########
@@ -0,0 +1,455 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
+import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.Disconnect.disconnectAllFromDS;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.Properties;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.DiskStoreFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
+
+/**
+ * Verifies that the unnecessary memory is cleared when operational log is 
compacted.
+ */
+public class 
DiskRegionCompactorClearsObjectThatAreNoLongerNeededIntegrationTest {
+
+  private final Properties config = new Properties();
+  private Cache cache;
+
+  private File[] diskDirs;
+  private int[] diskDirSizes;
+
+  private String regionName;
+  private String diskStoreName;
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+  @Rule
+  public TestName testName = new TestName();
+
+  @Before
+  public void setUp() throws Exception {
+    String uniqueName = getClass().getSimpleName() + "_" + 
testName.getMethodName();
+    regionName = uniqueName + "_region";
+    diskStoreName = uniqueName + "_diskStore";
+
+    config.setProperty(MCAST_PORT, "0");
+    config.setProperty(LOCATORS, "");
+
+    cache = new CacheFactory(config).create();
+
+    diskDirs = new File[1];
+    diskDirs[0] = createDirectory(temporaryFolder.getRoot(), 
testName.getMethodName());
+    diskDirSizes = new int[1];
+    Arrays.fill(diskDirSizes, Integer.MAX_VALUE);
+
+    DiskStoreImpl.SET_IGNORE_PREALLOCATE = true;
+    TombstoneService.EXPIRED_TOMBSTONE_LIMIT = 1;
+    TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = 1;
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    try {
+      cache.close();
+    } finally {
+      DiskStoreImpl.SET_IGNORE_PREALLOCATE = false;
+      disconnectAllFromDS();
+    }
+  }
+
+  /**
+   * Verifies that the unnecessary memory is cleared when operational log 
(.crf adn .drf) is
+   * compacted.
+   * This test case covers the following scenario:
+   *
+   * 1. Create several Oplog files (.crf, .drf and .krf) by executing put 
operations
+   * 2. Execute destroy operation for every fifth entry, and each time add new 
entry. This will
+   * result with few additional Oplog files. Compaction threshold will not be 
reached.
+   * 3. Destroy all operations created in step 2. This will trigger compaction 
of files that
+   * were created in step 2. Compaction will delete only .crf and .krf files, 
but will not
+   * delete .drf files because they contain destroy operations for events 
located in
+   * .crf files crated in step 1. Check that unnecessary objects are cleared 
for the
+   * Oplog that represents orphaned .drf file (no accompanying .crf and .krf 
file)
+   **/
+  @Test
+  public void 
testCompactorRegionMapDeletedForOnlyDrfOplogAfterCompactionIsPerformed()
+      throws InterruptedException {
+
+    final int ENTRY_RANGE_1 = 300;
+    final int ENTRY_RANGE_2 = 600;
+
+    createDiskStore(30, 10000);
+    Region<Object, Object> region = createRegion();
+    DiskStoreImpl diskStore = ((LocalRegion) region).getDiskStore();
+
+    // Create several oplog files (.crf and .drf) by executing put operations 
in defined range
+    for (int i = 0; i < ENTRY_RANGE_1; i++) {
+      region.put(i, new byte[100]);
+    }
+    await().untilAsserted(() -> 
assertThat(getCurrentNumberOfOplogs(diskStore)).isEqualTo(5));
+
+    // Destroy every fifth entry from previous range and each time put new 
entry in new range.
+    // This will create additional oplog files (.crf and .drf), but compaction 
will not be triggered
+    // as threshold will not be reached. Oplog files (.drf) created in this 
step will contain
+    // destroys for events that are located in .crf files from previous range.
+    TombstoneService tombstoneService = ((InternalCache) 
cache).getTombstoneService();
+    int key = 0;
+    while (key < ENTRY_RANGE_1) {
+      region.destroy(key);
+      // It is necessary to force tombstone expiration, otherwise event won't 
be stored in .drf file
+      // and total live count won't be decreased
+      await().untilAsserted(
+          () -> 
assertThat(tombstoneService.forceBatchExpirationForTests(1)).isTrue());
+      region.put(key + ENTRY_RANGE_1, new byte[300]);
+      key = key + 5;
+    }
+    await().untilAsserted(() -> 
assertThat(getCurrentNumberOfOplogs(diskStore)).isEqualTo(7));
+
+    // Destroy all events created in previous step in order to trigger 
automatic compaction.
+    // This will trigger compaction for the files that were created in 
previous step.
+    // Compaction will delete .crf and .krf file, but will leave .drf file 
because it contains
+    // destroy operation for the events that are located in some older .crf 
files.
+    key = ENTRY_RANGE_1;
+    while (key < ENTRY_RANGE_2) {
+      region.destroy(key);
+      assertThat(tombstoneService.forceBatchExpirationForTests(1)).isTrue();
+      key = key + 5;
+    }
+
+    // wait for all Oplog's to be compacted
+    await().untilAsserted(() -> 
assertThat(isOplogToBeCompactedAvailable(diskStore)).isFalse());

Review comment:
       Seems to me that this part of the test case is repeated in three test 
cases. Could this code be taken out to a method so that the code is reused 
across the test cases?

##########
File path: 
geode-core/src/integrationTest/java/org/apache/geode/internal/cache/DiskRegionCompactorClearsObjectThatAreNoLongerNeededIntegrationTest.java
##########
@@ -0,0 +1,455 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
+import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.Disconnect.disconnectAllFromDS;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.Properties;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.DiskStoreFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
+
+/**
+ * Verifies that the unnecessary memory is cleared when operational log is 
compacted.
+ */
+public class 
DiskRegionCompactorClearsObjectThatAreNoLongerNeededIntegrationTest {
+
+  private final Properties config = new Properties();
+  private Cache cache;
+
+  private File[] diskDirs;
+  private int[] diskDirSizes;
+
+  private String regionName;
+  private String diskStoreName;
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+  @Rule
+  public TestName testName = new TestName();
+
+  @Before
+  public void setUp() throws Exception {
+    String uniqueName = getClass().getSimpleName() + "_" + 
testName.getMethodName();
+    regionName = uniqueName + "_region";
+    diskStoreName = uniqueName + "_diskStore";
+
+    config.setProperty(MCAST_PORT, "0");
+    config.setProperty(LOCATORS, "");
+
+    cache = new CacheFactory(config).create();
+
+    diskDirs = new File[1];
+    diskDirs[0] = createDirectory(temporaryFolder.getRoot(), 
testName.getMethodName());
+    diskDirSizes = new int[1];
+    Arrays.fill(diskDirSizes, Integer.MAX_VALUE);
+
+    DiskStoreImpl.SET_IGNORE_PREALLOCATE = true;
+    TombstoneService.EXPIRED_TOMBSTONE_LIMIT = 1;
+    TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = 1;
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    try {
+      cache.close();
+    } finally {
+      DiskStoreImpl.SET_IGNORE_PREALLOCATE = false;
+      disconnectAllFromDS();
+    }
+  }
+
+  /**
+   * Verifies that the unnecessary memory is cleared when operational log 
(.crf adn .drf) is
+   * compacted.
+   * This test case covers the following scenario:
+   *
+   * 1. Create several Oplog files (.crf, .drf and .krf) by executing put 
operations
+   * 2. Execute destroy operation for every fifth entry, and each time add new 
entry. This will
+   * result with few additional Oplog files. Compaction threshold will not be 
reached.
+   * 3. Destroy all operations created in step 2. This will trigger compaction 
of files that
+   * were created in step 2. Compaction will delete only .crf and .krf files, 
but will not
+   * delete .drf files because they contain destroy operations for events 
located in
+   * .crf files crated in step 1. Check that unnecessary objects are cleared 
for the

Review comment:
       Could the check be put in a different step (step 4) as you have done in 
other test cases to see more clearly what the scenario consists of and what you 
are checking?

##########
File path: 
geode-core/src/integrationTest/java/org/apache/geode/internal/cache/DiskRegionCompactorClearsObjectThatAreNoLongerNeededIntegrationTest.java
##########
@@ -0,0 +1,455 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
+import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.Disconnect.disconnectAllFromDS;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.Properties;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.DiskStoreFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
+
+/**
+ * Verifies that the unnecessary memory is cleared when operational log is 
compacted.
+ */
+public class 
DiskRegionCompactorClearsObjectThatAreNoLongerNeededIntegrationTest {
+
+  private final Properties config = new Properties();
+  private Cache cache;
+
+  private File[] diskDirs;
+  private int[] diskDirSizes;
+
+  private String regionName;
+  private String diskStoreName;
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+  @Rule
+  public TestName testName = new TestName();
+
+  @Before
+  public void setUp() throws Exception {
+    String uniqueName = getClass().getSimpleName() + "_" + 
testName.getMethodName();
+    regionName = uniqueName + "_region";
+    diskStoreName = uniqueName + "_diskStore";
+
+    config.setProperty(MCAST_PORT, "0");
+    config.setProperty(LOCATORS, "");
+
+    cache = new CacheFactory(config).create();
+
+    diskDirs = new File[1];
+    diskDirs[0] = createDirectory(temporaryFolder.getRoot(), 
testName.getMethodName());
+    diskDirSizes = new int[1];
+    Arrays.fill(diskDirSizes, Integer.MAX_VALUE);
+
+    DiskStoreImpl.SET_IGNORE_PREALLOCATE = true;
+    TombstoneService.EXPIRED_TOMBSTONE_LIMIT = 1;
+    TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = 1;
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    try {
+      cache.close();
+    } finally {
+      DiskStoreImpl.SET_IGNORE_PREALLOCATE = false;
+      disconnectAllFromDS();
+    }
+  }
+
+  /**
+   * Verifies that the unnecessary memory is cleared when operational log 
(.crf adn .drf) is
+   * compacted.
+   * This test case covers the following scenario:
+   *
+   * 1. Create several Oplog files (.crf, .drf and .krf) by executing put 
operations
+   * 2. Execute destroy operation for every fifth entry, and each time add new 
entry. This will
+   * result with few additional Oplog files. Compaction threshold will not be 
reached.
+   * 3. Destroy all operations created in step 2. This will trigger compaction 
of files that
+   * were created in step 2. Compaction will delete only .crf and .krf files, 
but will not
+   * delete .drf files because they contain destroy operations for events 
located in
+   * .crf files crated in step 1. Check that unnecessary objects are cleared 
for the
+   * Oplog that represents orphaned .drf file (no accompanying .crf and .krf 
file)
+   **/
+  @Test
+  public void 
testCompactorRegionMapDeletedForOnlyDrfOplogAfterCompactionIsPerformed()
+      throws InterruptedException {
+
+    final int ENTRY_RANGE_1 = 300;
+    final int ENTRY_RANGE_2 = 600;
+
+    createDiskStore(30, 10000);
+    Region<Object, Object> region = createRegion();
+    DiskStoreImpl diskStore = ((LocalRegion) region).getDiskStore();
+
+    // Create several oplog files (.crf and .drf) by executing put operations 
in defined range
+    for (int i = 0; i < ENTRY_RANGE_1; i++) {
+      region.put(i, new byte[100]);
+    }
+    await().untilAsserted(() -> 
assertThat(getCurrentNumberOfOplogs(diskStore)).isEqualTo(5));
+
+    // Destroy every fifth entry from previous range and each time put new 
entry in new range.
+    // This will create additional oplog files (.crf and .drf), but compaction 
will not be triggered
+    // as threshold will not be reached. Oplog files (.drf) created in this 
step will contain
+    // destroys for events that are located in .crf files from previous range.
+    TombstoneService tombstoneService = ((InternalCache) 
cache).getTombstoneService();
+    int key = 0;
+    while (key < ENTRY_RANGE_1) {
+      region.destroy(key);
+      // It is necessary to force tombstone expiration, otherwise event won't 
be stored in .drf file
+      // and total live count won't be decreased
+      await().untilAsserted(
+          () -> 
assertThat(tombstoneService.forceBatchExpirationForTests(1)).isTrue());
+      region.put(key + ENTRY_RANGE_1, new byte[300]);
+      key = key + 5;
+    }
+    await().untilAsserted(() -> 
assertThat(getCurrentNumberOfOplogs(diskStore)).isEqualTo(7));
+
+    // Destroy all events created in previous step in order to trigger 
automatic compaction.
+    // This will trigger compaction for the files that were created in 
previous step.
+    // Compaction will delete .crf and .krf file, but will leave .drf file 
because it contains
+    // destroy operation for the events that are located in some older .crf 
files.
+    key = ENTRY_RANGE_1;
+    while (key < ENTRY_RANGE_2) {
+      region.destroy(key);
+      assertThat(tombstoneService.forceBatchExpirationForTests(1)).isTrue();
+      key = key + 5;
+    }
+
+    // wait for all Oplog's to be compacted
+    await().untilAsserted(() -> 
assertThat(isOplogToBeCompactedAvailable(diskStore)).isFalse());
+
+    await().untilAsserted(
+        () -> 
assertThat(areAllUnnecessaryObjectClearedForOnlyDrfOplog(diskStore)).isTrue());
+  }
+
+  /**
+   * Verifies that the unnecessary memory is cleared when operational log 
(.crf and .drf) is
+   * compacted.This is special scenario were creation of .krf file is 
cancelled by ongoing
+   * compaction. This usually happens when new oplog is rolled out and 
previous oplog is
+   * immediately marked as eligible for compaction. Compaction and .krf 
creation start at the
+   * similar time and compactor cancels creation of .krf if it is executed 
first.
+   *
+   * This test case covers the following scenario:
+   *
+   * 1. Create several Oplog files (.crf, .drf and .krf) by executing put 
operations.
+   * 2. Execute destroy operation for every fifth entry, and each time add new 
entry. When
+   * it is time for oplog to roll out, the previous oplog will be immediately 
marked as ready
+   * for compaction because compaction threshold is set to high value in this 
case. This way
+   * we force race condition where compaction cancel creation of .krf file. 
Compaction will
+   * delete only .crf file (.krf was not created at all), but will not delete 
.drf files because
+   * they contain destroy operations for events located in .crf files created 
in step 1. Check

Review comment:
       Same comment as for the previous test case about putting the check in a 
new step.

##########
File path: 
geode-core/src/integrationTest/java/org/apache/geode/internal/cache/DiskRegionCompactorClearsObjectThatAreNoLongerNeededIntegrationTest.java
##########
@@ -0,0 +1,455 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
+import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.apache.geode.test.dunit.Disconnect.disconnectAllFromDS;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.Properties;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.DiskStoreFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
+
+/**
+ * Verifies that the unnecessary memory is cleared when operational log is 
compacted.
+ */
+public class 
DiskRegionCompactorClearsObjectThatAreNoLongerNeededIntegrationTest {
+
+  private final Properties config = new Properties();
+  private Cache cache;
+
+  private File[] diskDirs;
+  private int[] diskDirSizes;
+
+  private String regionName;
+  private String diskStoreName;
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+  @Rule
+  public TestName testName = new TestName();
+
+  @Before
+  public void setUp() throws Exception {
+    String uniqueName = getClass().getSimpleName() + "_" + 
testName.getMethodName();
+    regionName = uniqueName + "_region";
+    diskStoreName = uniqueName + "_diskStore";
+
+    config.setProperty(MCAST_PORT, "0");
+    config.setProperty(LOCATORS, "");
+
+    cache = new CacheFactory(config).create();
+
+    diskDirs = new File[1];
+    diskDirs[0] = createDirectory(temporaryFolder.getRoot(), 
testName.getMethodName());
+    diskDirSizes = new int[1];
+    Arrays.fill(diskDirSizes, Integer.MAX_VALUE);
+
+    DiskStoreImpl.SET_IGNORE_PREALLOCATE = true;
+    TombstoneService.EXPIRED_TOMBSTONE_LIMIT = 1;
+    TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = 1;
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    try {
+      cache.close();
+    } finally {
+      DiskStoreImpl.SET_IGNORE_PREALLOCATE = false;
+      disconnectAllFromDS();
+    }
+  }
+
+  /**
+   * Verifies that the unnecessary memory is cleared when operational log 
(.crf adn .drf) is
+   * compacted.
+   * This test case covers the following scenario:
+   *
+   * 1. Create several Oplog files (.crf, .drf and .krf) by executing put 
operations
+   * 2. Execute destroy operation for every fifth entry, and each time add new 
entry. This will
+   * result with few additional Oplog files. Compaction threshold will not be 
reached.
+   * 3. Destroy all operations created in step 2. This will trigger compaction 
of files that
+   * were created in step 2. Compaction will delete only .crf and .krf files, 
but will not
+   * delete .drf files because they contain destroy operations for events 
located in
+   * .crf files crated in step 1. Check that unnecessary objects are cleared 
for the
+   * Oplog that represents orphaned .drf file (no accompanying .crf and .krf 
file)
+   **/
+  @Test
+  public void 
testCompactorRegionMapDeletedForOnlyDrfOplogAfterCompactionIsPerformed()
+      throws InterruptedException {
+
+    final int ENTRY_RANGE_1 = 300;
+    final int ENTRY_RANGE_2 = 600;
+
+    createDiskStore(30, 10000);
+    Region<Object, Object> region = createRegion();
+    DiskStoreImpl diskStore = ((LocalRegion) region).getDiskStore();
+
+    // Create several oplog files (.crf and .drf) by executing put operations 
in defined range
+    for (int i = 0; i < ENTRY_RANGE_1; i++) {
+      region.put(i, new byte[100]);
+    }
+    await().untilAsserted(() -> 
assertThat(getCurrentNumberOfOplogs(diskStore)).isEqualTo(5));
+
+    // Destroy every fifth entry from previous range and each time put new 
entry in new range.
+    // This will create additional oplog files (.crf and .drf), but compaction 
will not be triggered
+    // as threshold will not be reached. Oplog files (.drf) created in this 
step will contain
+    // destroys for events that are located in .crf files from previous range.
+    TombstoneService tombstoneService = ((InternalCache) 
cache).getTombstoneService();
+    int key = 0;
+    while (key < ENTRY_RANGE_1) {
+      region.destroy(key);
+      // It is necessary to force tombstone expiration, otherwise event won't 
be stored in .drf file
+      // and total live count won't be decreased
+      await().untilAsserted(
+          () -> 
assertThat(tombstoneService.forceBatchExpirationForTests(1)).isTrue());
+      region.put(key + ENTRY_RANGE_1, new byte[300]);
+      key = key + 5;
+    }
+    await().untilAsserted(() -> 
assertThat(getCurrentNumberOfOplogs(diskStore)).isEqualTo(7));
+
+    // Destroy all events created in previous step in order to trigger 
automatic compaction.
+    // This will trigger compaction for the files that were created in 
previous step.
+    // Compaction will delete .crf and .krf file, but will leave .drf file 
because it contains
+    // destroy operation for the events that are located in some older .crf 
files.
+    key = ENTRY_RANGE_1;
+    while (key < ENTRY_RANGE_2) {
+      region.destroy(key);
+      assertThat(tombstoneService.forceBatchExpirationForTests(1)).isTrue();
+      key = key + 5;
+    }
+
+    // wait for all Oplog's to be compacted
+    await().untilAsserted(() -> 
assertThat(isOplogToBeCompactedAvailable(diskStore)).isFalse());
+
+    await().untilAsserted(
+        () -> 
assertThat(areAllUnnecessaryObjectClearedForOnlyDrfOplog(diskStore)).isTrue());
+  }
+
+  /**
+   * Verifies that the unnecessary memory is cleared when operational log 
(.crf and .drf) is
+   * compacted.This is special scenario were creation of .krf file is 
cancelled by ongoing
+   * compaction. This usually happens when new oplog is rolled out and 
previous oplog is
+   * immediately marked as eligible for compaction. Compaction and .krf 
creation start at the
+   * similar time and compactor cancels creation of .krf if it is executed 
first.
+   *
+   * This test case covers the following scenario:
+   *
+   * 1. Create several Oplog files (.crf, .drf and .krf) by executing put 
operations.
+   * 2. Execute destroy operation for every fifth entry, and each time add new 
entry. When
+   * it is time for oplog to roll out, the previous oplog will be immediately 
marked as ready
+   * for compaction because compaction threshold is set to high value in this 
case. This way
+   * we force race condition where compaction cancel creation of .krf file. 
Compaction will
+   * delete only .crf file (.krf was not created at all), but will not delete 
.drf files because
+   * they contain destroy operations for events located in .crf files created 
in step 1. Check
+   * that unnecessary objects are cleared for the Oplog that represents 
orphaned .drf file
+   * (no accompanying .crf and .krf file)
+   **/
+  @Test
+  public void 
testCompactorRegionMapDeletedAfterCompactionForOnlyDrfOplogIsDoneRaceCondition()

Review comment:
       Instead of using `RaceCondition` in the test case name (and the 
description), I would use `KrfCreationCanceledByCompaction`.
   Test case name would then be:
   
`testCompactorRegionMapDeletedAfterCompactionForOnlyDrfOplogAndKrfCreationCanceledByCompactionIsPerformed()`
   Also, I would change in the test name `IsDone` to `IsPerformed` to have the 
name aligned with the previous test case.
   




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to