This is an automated email from the ASF dual-hosted git repository.

andor pushed a commit to branch HBASE-28957_rebase
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 616aba34715a7ea4418164828e7b2f1ae612ddd0
Author: vinayak hegde <vinayakph...@gmail.com>
AuthorDate: Tue May 20 21:39:27 2025 +0530

    HBASE-29261: Investigate flaw in backup deletion validation of 
PITR-critical backups and propose correct approach (#6922)
    
    * improve the logic of backup deletion validation of PITR-critical backups
    
    * add new tests
---
 .../hadoop/hbase/backup/impl/BackupCommands.java   | 169 +++++++-------
 .../hbase/backup/impl/BackupSystemTable.java       |  43 ++++
 ...estBackupDeleteWithContinuousBackupAndPITR.java | 248 ++++++++++++++-------
 3 files changed, 305 insertions(+), 155 deletions(-)

diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
index e9d14d1426d..804dc7141a1 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -53,9 +53,9 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
-import org.agrona.collections.MutableLong;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
@@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.com.google.common.base.Splitter;
@@ -735,19 +736,20 @@ public final class BackupCommands {
     }
 
     /**
-     * Identifies tables that rely on the specified backup for PITR. If a 
table has no other valid
-     * FULL backups that can facilitate recovery to all points within the PITR 
retention window, it
-     * is added to the dependent list.
-     * @param backupId The backup ID being evaluated.
-     * @return List of tables dependent on the specified backup for PITR.
-     * @throws IOException If backup metadata cannot be retrieved.
+     * Identifies tables that rely on the specified backup for PITR 
(Point-In-Time Recovery). A
+     * table is considered dependent on the backup if it does not have any 
other valid full backups
+     * that can cover the PITR window enabled by the specified backup.
+     * @param backupId The ID of the backup being evaluated for PITR coverage.
+     * @return A list of tables that are dependent on the specified backup for 
PITR recovery.
+     * @throws IOException If there is an error retrieving the backup metadata 
or backup system
+     *                     table.
      */
     private List<TableName> getTablesDependentOnBackupForPITR(String backupId) 
throws IOException {
       List<TableName> dependentTables = new ArrayList<>();
 
       try (final BackupSystemTable backupSystemTable = new 
BackupSystemTable(conn)) {
+        // Fetch the target backup's info using the backup ID
         BackupInfo targetBackup = backupSystemTable.readBackupInfo(backupId);
-
         if (targetBackup == null) {
           throw new IOException("Backup info not found for backupId: " + 
backupId);
         }
@@ -757,104 +759,121 @@ public final class BackupCommands {
           return List.of();
         }
 
-        // Retrieve the tables with continuous backup enabled and their start 
times
+        // Retrieve the tables with continuous backup enabled along with their 
start times
         Map<TableName, Long> continuousBackupStartTimes =
           backupSystemTable.getContinuousBackupTableSet();
 
-        // Determine the PITR time window
+        // Calculate the PITR window by fetching configuration and current time
         long pitrWindowDays = 
getConf().getLong(CONF_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS,
           DEFAULT_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS);
         long currentTime = EnvironmentEdgeManager.getDelegate().currentTime();
-        final MutableLong pitrMaxStartTime =
-          new MutableLong(currentTime - 
TimeUnit.DAYS.toMillis(pitrWindowDays));
-
-        // For all tables, determine the earliest (minimum) continuous backup 
start time.
-        // This represents the actual earliest point-in-time recovery (PITR) 
timestamp
-        // that can be used, ensuring we do not go beyond the available backup 
data.
-        long minContinuousBackupStartTime = currentTime;
-        for (TableName table : targetBackup.getTableNames()) {
-          minContinuousBackupStartTime = Math.min(minContinuousBackupStartTime,
-            continuousBackupStartTimes.getOrDefault(table, currentTime));
-        }
-
-        // The PITR max start time should be the maximum of the calculated 
minimum continuous backup
-        // start time and the default PITR max start time (based on the 
configured window).
-        // This ensures that PITR does not extend beyond what is practically 
possible.
-        pitrMaxStartTime.set(Math.max(minContinuousBackupStartTime, 
pitrMaxStartTime.longValue()));
+        final long maxAllowedPITRTime = currentTime - 
TimeUnit.DAYS.toMillis(pitrWindowDays);
 
+        // Check each table associated with the target backup
         for (TableName table : targetBackup.getTableNames()) {
-          // This backup is not necessary for this table since it doesn't have 
PITR enabled
+          // Skip tables without continuous backup enabled
           if (!continuousBackupStartTimes.containsKey(table)) {
             continue;
           }
-          if (
-            !isValidPITRBackup(targetBackup, table, continuousBackupStartTimes,
-              pitrMaxStartTime.longValue())
-          ) {
-            continue; // This backup is not crucial for PITR of this table
+
+          // Calculate the PITR window this backup covers for the table
+          Optional<Pair<Long, Long>> coveredPitrWindow = 
getCoveredPitrWindowForTable(targetBackup,
+            continuousBackupStartTimes.get(table), maxAllowedPITRTime, 
currentTime);
+
+          // If this backup does not cover a valid PITR window for the table, 
skip
+          if (coveredPitrWindow.isEmpty()) {
+            continue;
           }
 
-          // Check if another valid full backup exists for this table
-          List<BackupInfo> backupHistory = 
backupSystemTable.getBackupInfos(BackupState.COMPLETE);
-          boolean hasAnotherValidBackup = backupHistory.stream()
-            .anyMatch(backup -> !backup.getBackupId().equals(backupId) && 
isValidPITRBackup(backup,
-              table, continuousBackupStartTimes, 
pitrMaxStartTime.longValue()));
+          // Check if there is any other valid backup that can cover the PITR 
window
+          List<BackupInfo> allBackups = 
backupSystemTable.getBackupInfos(BackupState.COMPLETE);
+          boolean hasAnotherValidBackup =
+            canAnyOtherBackupCover(allBackups, targetBackup, table, 
coveredPitrWindow.get(),
+              continuousBackupStartTimes.get(table), maxAllowedPITRTime, 
currentTime);
 
+          // If no other valid backup exists, add the table to the dependent 
list
           if (!hasAnotherValidBackup) {
             dependentTables.add(table);
           }
         }
       }
+
       return dependentTables;
     }
 
     /**
-     * Determines if a given backup is a valid candidate for Point-In-Time 
Recovery (PITR) for a
-     * specific table. A valid backup ensures that recovery is possible to any 
point within the PITR
-     * retention window. A backup qualifies if:
-     * <ul>
-     * <li>It is a FULL backup.</li>
-     * <li>It contains the specified table.</li>
-     * <li>Its completion timestamp is before the PITR retention window start 
time.</li>
-     * <li>Its completion timestamp is on or after the table’s continuous 
backup start time.</li>
-     * </ul>
-     * @param backupInfo             The backup information being evaluated.
-     * @param tableName              The table for which PITR validity is 
being checked.
-     * @param continuousBackupTables A map of tables to their continuous 
backup start time.
-     * @param pitrMaxStartTime       The maximum allowed start timestamp for 
PITR eligibility.
-     * @return {@code true} if the backup enables recovery to all valid points 
in time for the
-     *         table; {@code false} otherwise.
+     * Calculates the PITR (Point-In-Time Recovery) window that the given 
backup enables for a
+     * table.
+     * @param backupInfo                Metadata of the backup being evaluated.
+     * @param continuousBackupStartTime When continuous backups started for 
the table.
+     * @param maxAllowedPITRTime        The earliest timestamp from which PITR 
is supported in the
+     *                                  cluster.
+     * @param currentTime               Current time.
+     * @return Optional PITR window as a pair (start, end), or empty if backup 
is not useful for
+     *         PITR.
      */
-    private boolean isValidPITRBackup(BackupInfo backupInfo, TableName 
tableName,
-      Map<TableName, Long> continuousBackupTables, long pitrMaxStartTime) {
-      // Only FULL backups are mandatory for PITR
-      if (!BackupType.FULL.equals(backupInfo.getType())) {
-        return false;
-      }
+    private Optional<Pair<Long, Long>> getCoveredPitrWindowForTable(BackupInfo 
backupInfo,
+      long continuousBackupStartTime, long maxAllowedPITRTime, long 
currentTime) {
 
-      // The backup must include the table to be relevant for PITR
-      if (!backupInfo.getTableNames().contains(tableName)) {
-        return false;
-      }
+      long backupStartTs = backupInfo.getStartTs();
+      long backupEndTs = backupInfo.getCompleteTs();
+      long effectiveStart = Math.max(continuousBackupStartTime, 
maxAllowedPITRTime);
 
-      // The backup must have been completed before the PITR retention window 
starts,
-      // otherwise, it won't be helpful in cases where the recovery point is 
between
-      // pitrMaxStartTime and the backup completion time.
-      if (backupInfo.getCompleteTs() > pitrMaxStartTime) {
-        return false;
+      if (backupStartTs < continuousBackupStartTime) {
+        return Optional.empty();
       }
 
-      // Retrieve the table's continuous backup start time
-      long continuousBackupStartTime = 
continuousBackupTables.getOrDefault(tableName, 0L);
+      return Optional.of(Pair.newPair(Math.max(backupEndTs, effectiveStart), 
currentTime));
+    }
 
-      // The backup must have been started on or after the table’s continuous 
backup start time,
-      // otherwise, it won't be helpful in few cases because we wouldn't have 
the WAL entries
-      // between the backup start time and the continuous backup start time.
-      if (backupInfo.getStartTs() < continuousBackupStartTime) {
-        return false;
+    /**
+     * Checks if any backup (excluding the current backup) can cover the 
specified PITR window for
+     * the given table. A backup can cover the PITR window if it fully 
encompasses the target time
+     * range specified.
+     * @param allBackups                List of all backups available.
+     * @param currentBackup             The current backup that should not be 
considered for
+     *                                  coverage.
+     * @param table                     The table for which we need to check 
backup coverage.
+     * @param targetWindow              A pair representing the target PITR 
window (start and end
+     *                                  times).
+     * @param continuousBackupStartTime When continuous backups started for 
the table.
+     * @param maxAllowedPITRTime        The earliest timestamp from which PITR 
is supported in the
+     *                                  cluster.
+     * @param currentTime               Current time.
+     * @return {@code true} if any backup (excluding the current one) fully 
covers the target PITR
+     *         window; {@code false} otherwise.
+     */
+    private boolean canAnyOtherBackupCover(List<BackupInfo> allBackups, 
BackupInfo currentBackup,
+      TableName table, Pair<Long, Long> targetWindow, long 
continuousBackupStartTime,
+      long maxAllowedPITRTime, long currentTime) {
+
+      long targetStart = targetWindow.getFirst();
+      long targetEnd = targetWindow.getSecond();
+
+      // Iterate through all backups (including the current one)
+      for (BackupInfo backup : allBackups) {
+        // Skip if the backup is not full or doesn't contain the table
+        if (!BackupType.FULL.equals(backup.getType())) continue;
+        if (!backup.getTableNames().contains(table)) continue;
+
+        // Skip the current backup itself
+        if (backup.equals(currentBackup)) continue;
+
+        // Get the covered PITR window for this backup
+        Optional<Pair<Long, Long>> coveredWindow = 
getCoveredPitrWindowForTable(backup,
+          continuousBackupStartTime, maxAllowedPITRTime, currentTime);
+
+        if (coveredWindow.isPresent()) {
+          Pair<Long, Long> covered = coveredWindow.get();
+
+          // The backup must fully cover the target window
+          if (covered.getFirst() <= targetStart && covered.getSecond() >= 
targetEnd) {
+            return true;
+          }
+        }
       }
 
-      return true;
+      return false;
     }
 
     @Override
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
index 58cfccda37c..fa01cd88345 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -973,6 +973,36 @@ public final class BackupSystemTable implements Closeable {
     }
   }
 
+  /**
+   * Removes tables from the global continuous backup set. Only removes 
entries that currently exist
+   * in the backup system table.
+   * @param tables set of tables to remove
+   * @throws IOException if an error occurs while updating the backup system 
table
+   */
+  public void removeContinuousBackupTableSet(Set<TableName> tables) throws 
IOException {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Remove continuous backup table set from backup system table. 
tables ["
+        + StringUtils.join(tables, " ") + "]");
+    }
+    if (LOG.isDebugEnabled()) {
+      tables.forEach(table -> LOG.debug("Removing: " + table));
+    }
+
+    Map<TableName, Long> existingTables = getContinuousBackupTableSet();
+    Set<TableName> toRemove =
+      
tables.stream().filter(existingTables::containsKey).collect(Collectors.toSet());
+
+    if (toRemove.isEmpty()) {
+      LOG.debug("No matching tables found to remove from continuous backup 
set.");
+      return;
+    }
+
+    try (Table table = connection.getTable(tableName)) {
+      Delete delete = createDeleteForContinuousBackupTableSet(toRemove);
+      table.delete(delete);
+    }
+  }
+
   /**
    * Deletes incremental backup set for a backup destination
    * @param backupRoot backup root
@@ -1360,6 +1390,19 @@ public final class BackupSystemTable implements 
Closeable {
     return delete;
   }
 
+  /**
+   * Creates Delete for continuous backup table set
+   * @param tables tables to remove
+   * @return delete operation
+   */
+  private Delete createDeleteForContinuousBackupTableSet(Set<TableName> 
tables) {
+    Delete delete = new Delete(rowkey(CONTINUOUS_BACKUP_SET));
+    for (TableName tableName : tables) {
+      delete.addColumns(META_FAMILY, 
Bytes.toBytes(tableName.getNameAsString()));
+    }
+    return delete;
+  }
+
   /**
    * Creates Scan operation to load backup history
    * @return scan operation
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithContinuousBackupAndPITR.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithContinuousBackupAndPITR.java
index 919d3e79f72..248e8e7b757 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithContinuousBackupAndPITR.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithContinuousBackupAndPITR.java
@@ -24,21 +24,33 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 
-import java.util.List;
+import java.io.IOException;
 import java.util.Set;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
+/**
+ * Tests the deletion of HBase backups under continuous backup and PITR 
settings.
+ * <p>
+ * Terminology:
+ * <ul>
+ * <li><b>ct (current time)</b>: Current timestamp</li>
+ * <li><b>maxAllowedPITRTime (mapt)</b>: Maximum allowed time range for PITR, 
typically a
+ * cluster-level config (e.g., 30 days ago)</li>
+ * <li><b>cst (continuousBackupStartTime)</b>: Earliest time from which 
continuous backup is
+ * available</li>
+ * <li><b>fs</b>: Full backup start time (not reliably usable)</li>
+ * <li><b>fm</b>: Time when snapshot (logical freeze) was taken (we don't have 
this)</li>
+ * <li><b>fe</b>: Full backup end time (used as conservative proxy for fm)</li>
+ * </ul>
+ */
 @Category(LargeTests.class)
 public class TestBackupDeleteWithContinuousBackupAndPITR extends 
TestBackupBase {
   @ClassRule
@@ -46,112 +58,183 @@ public class TestBackupDeleteWithContinuousBackupAndPITR 
extends TestBackupBase
     
HBaseClassTestRule.forClass(TestBackupDeleteWithContinuousBackupAndPITR.class);
 
   private BackupSystemTable backupSystemTable;
-  private String backupId1;
-  private String backupId2;
-  private String backupId3;
-  private String backupId4;
-  private String backupId5;
 
   /**
-   * Sets up the backup environment before each test.
-   * <p>
-   * This includes:
-   * <ul>
-   * <li>Setting a 30-day PITR (Point-In-Time Recovery) window</li>
-   * <li>Registering table2 as a continuous backup table starting 40 days 
ago</li>
-   * <li>Creating a mix of full and incremental backups at specific time 
offsets (using
-   * EnvironmentEdge injection) to simulate scenarios like: - backups outside 
PITR window - valid
-   * PITR backups - incomplete PITR chains</li>
-   * <li>Resetting the system clock after time manipulation</li>
-   * </ul>
-   * This setup enables tests to evaluate deletion behavior of backups based 
on age, table type, and
-   * PITR chain requirements.
+   * Configures continuous backup with the specified CST (continuous backup 
start time).
    */
-  @Before
-  public void setup() throws Exception {
+  private void configureContinuousBackup(long cstTimestamp) throws IOException 
{
     conf1.setLong(CONF_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS, 30);
     backupSystemTable = new BackupSystemTable(TEST_UTIL.getConnection());
 
-    long currentTime = System.currentTimeMillis();
-    long backupStartTime = currentTime - 40 * ONE_DAY_IN_MILLISECONDS;
-    backupSystemTable.addContinuousBackupTableSet(Set.of(table2), 
backupStartTime);
+    backupSystemTable.addContinuousBackupTableSet(Set.of(table1), 
cstTimestamp);
+  }
 
-    backupId1 = fullTableBackup(Lists.newArrayList(table1));
-    assertTrue(checkSucceeded(backupId1));
+  private void cleanupContinuousBackup() throws IOException {
+    backupSystemTable.removeContinuousBackupTableSet(Set.of(table1));
+  }
 
-    // 31 days back
-    EnvironmentEdgeManager
-      .injectEdge(() -> System.currentTimeMillis() - 31 * 
ONE_DAY_IN_MILLISECONDS);
-    backupId2 = fullTableBackup(Lists.newArrayList(table2));
-    assertTrue(checkSucceeded(backupId2));
+  /**
+   * Main Case: continuousBackupStartTime < maxAllowedPITRTime
+   * <p>
+   * Sub Case: fe < cst
+   */
+  @Test
+  public void testDeletionWhenBackupCompletesBeforeCST() throws Exception {
+    long now = System.currentTimeMillis();
+    long cst = now - 40 * ONE_DAY_IN_MILLISECONDS; // CST = 40 days ago
+    configureContinuousBackup(cst);
 
-    // 32 days back
-    EnvironmentEdgeManager
-      .injectEdge(() -> System.currentTimeMillis() - 32 * 
ONE_DAY_IN_MILLISECONDS);
-    backupId3 = fullTableBackup(Lists.newArrayList(table2));
-    assertTrue(checkSucceeded(backupId3));
+    String backupId =
+      createAndUpdateBackup(cst - ONE_DAY_IN_MILLISECONDS, cst - 
ONE_DAY_IN_MILLISECONDS + 1000);
+    assertDeletionSucceeds(backupSystemTable, backupId, false);
 
-    // 15 days back
-    EnvironmentEdgeManager
-      .injectEdge(() -> System.currentTimeMillis() - 15 * 
ONE_DAY_IN_MILLISECONDS);
-    backupId4 = fullTableBackup(Lists.newArrayList(table2));
-    assertTrue(checkSucceeded(backupId4));
+    cleanupContinuousBackup();
+  }
 
-    // Reset clock
-    EnvironmentEdgeManager.reset();
+  /**
+   * Main Case: continuousBackupStartTime < maxAllowedPITRTime
+   * <p>
+   * Sub Case: fs < cst < fe
+   */
+  @Test
+  public void testDeletionWhenBackupStraddlesCST() throws Exception {
+    long now = System.currentTimeMillis();
+    long cst = now - 40 * ONE_DAY_IN_MILLISECONDS; // CST = 40 days ago
+    configureContinuousBackup(cst);
 
-    backupId5 = incrementalTableBackup(Lists.newArrayList(table1));
-    assertTrue(checkSucceeded(backupId5));
-  }
+    String backupId = createAndUpdateBackup(cst - 1000, cst + 1000);
+    assertDeletionSucceeds(backupSystemTable, backupId, false);
 
-  @After
-  public void teardown() throws Exception {
-    EnvironmentEdgeManager.reset();
-    // Try to delete all backups forcefully if they exist
-    for (String id : List.of(backupId1, backupId2, backupId3, backupId4, 
backupId5)) {
-      try {
-        deleteBackup(id, true);
-      } catch (Exception ignored) {
-      }
-    }
+    cleanupContinuousBackup();
   }
 
+  /**
+   * Main Case: continuousBackupStartTime < maxAllowedPITRTime
+   * <p>
+   * Sub Case: fs >= cst && fe < mapt
+   */
   @Test
-  public void testDeleteIncrementalBackup() throws Exception {
-    assertDeletionSucceeds(backupSystemTable, backupId5, false);
+  public void testDeletionWhenBackupWithinCSTToMAPTRangeAndUncovered() throws 
Exception {
+    long now = System.currentTimeMillis();
+    long cst = now - 40 * ONE_DAY_IN_MILLISECONDS;
+    long mapt = now - 30 * ONE_DAY_IN_MILLISECONDS;
+    configureContinuousBackup(cst);
+
+    String backupId = createAndUpdateBackup(cst, mapt - 1000);
+    assertDeletionFails(backupSystemTable, backupId);
+
+    // Cover the backup with another backup
+    String coverId = createAndUpdateBackup(cst, mapt - 1000);
+
+    // Now, deletion should succeed because the backup is covered by the new 
one
+    assertDeletionSucceeds(backupSystemTable, backupId, false);
+    assertDeletionSucceeds(backupSystemTable, coverId, true);
+
+    cleanupContinuousBackup();
   }
 
+  /**
+   * Main Case: continuousBackupStartTime < maxAllowedPITRTime
+   * <p>
+   * Sub Case: fs >= cst && fe >= mapt
+   */
   @Test
-  public void testDeleteFullBackupNonContinuousTable() throws Exception {
-    assertDeletionSucceeds(backupSystemTable, backupId1, false);
+  public void testDeletionWhenBackupExtendsBeyondMAPTAndUncovered() throws 
Exception {
+    long now = System.currentTimeMillis();
+    long cst = now - 40 * ONE_DAY_IN_MILLISECONDS;
+    long mapt = now - 30 * ONE_DAY_IN_MILLISECONDS;
+    configureContinuousBackup(cst);
+
+    String backupId = createAndUpdateBackup(cst + 1000, mapt + 1000);
+    assertDeletionFails(backupSystemTable, backupId);
+
+    // Cover the backup with another backup
+    String coverId = createAndUpdateBackup(cst + 1000, mapt + 1000);
+
+    // Now, deletion should succeed because the backup is covered by the new 
one
+    assertDeletionSucceeds(backupSystemTable, backupId, false);
+    assertDeletionSucceeds(backupSystemTable, coverId, true);
+
+    cleanupContinuousBackup();
   }
 
+  /**
+   * Main Case: continuousBackupStartTime >= maxAllowedPITRTime
+   * <p>
+   * Sub Case: fs < cst
+   */
   @Test
-  public void testDeletePITRIncompleteBackup() throws Exception {
-    assertDeletionSucceeds(backupSystemTable, backupId4, false);
+  public void testDeletionWhenBackupBeforeCST_ShouldSucceed() throws Exception 
{
+    long now = System.currentTimeMillis();
+    long cst = now - 20 * ONE_DAY_IN_MILLISECONDS;
+    configureContinuousBackup(cst);
+
+    String backupId = createAndUpdateBackup(cst - 1000, cst + 1000);
+    assertDeletionSucceeds(backupSystemTable, backupId, false);
+
+    cleanupContinuousBackup();
   }
 
+  /**
+   * Main Case: continuousBackupStartTime >= maxAllowedPITRTime
+   * <p>
+   * Sub Case: fs >= cst
+   */
   @Test
-  public void testDeleteValidPITRBackupWithAnotherPresent() throws Exception {
-    assertDeletionSucceeds(backupSystemTable, backupId2, false);
+  public void testDeletionWhenBackupAfterCST_ShouldFailUnlessCovered() throws 
Exception {
+    long now = System.currentTimeMillis();
+    long cst = now - 20 * ONE_DAY_IN_MILLISECONDS;
+    configureContinuousBackup(cst);
+
+    String backupId = createAndUpdateBackup(cst + 1000, cst + 2000);
+    assertDeletionFails(backupSystemTable, backupId);
+
+    // Cover the backup with another backup
+    String coverId = createAndUpdateBackup(cst + 1000, cst + 2000);
+
+    assertDeletionSucceeds(backupSystemTable, backupId, false);
+    assertDeletionSucceeds(backupSystemTable, coverId, true);
+
+    cleanupContinuousBackup();
   }
 
   @Test
-  public void testDeleteOnlyValidPITRBackupFails() throws Exception {
-    // Delete backupId2 (31 days ago) — this should succeed
-    assertDeletionSucceeds(backupSystemTable, backupId2, false);
+  public void testDeleteIncrementalBackup() throws Exception {
+    long now = System.currentTimeMillis();
+    long cst = now - 20 * ONE_DAY_IN_MILLISECONDS;
+    configureContinuousBackup(cst);
+
+    String fullBackupId = fullTableBackup(Lists.newArrayList(table1));
+    String incrementalTableBackupId = 
incrementalTableBackup(Lists.newArrayList(table1));
+    assertDeletionSucceeds(backupSystemTable, incrementalTableBackupId, false);
 
-    // Now backupId3 (32 days ago) is the only remaining PITR backup — 
deletion should fail
-    assertDeletionFails(backupSystemTable, backupId3, false);
+    assertDeletionSucceeds(backupSystemTable, fullBackupId, true);
   }
 
   @Test
-  public void testForceDeleteOnlyValidPITRBackup() throws Exception {
-    // Delete backupId2 (31 days ago)
-    assertDeletionSucceeds(backupSystemTable, backupId2, false);
+  public void testDeleteFullBackupNonContinuousTable() throws Exception {
+    conf1.setLong(CONF_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS, 30);
+    backupSystemTable = new BackupSystemTable(TEST_UTIL.getConnection());
+
+    long now = System.currentTimeMillis();
+    String backupId =
+      createAndUpdateBackup(now - ONE_DAY_IN_MILLISECONDS, now - 
ONE_DAY_IN_MILLISECONDS + 1000);
+    assertDeletionSucceeds(backupSystemTable, backupId, false);
+  }
 
-    // Force delete backupId3 — should succeed despite PITR constraints
-    assertDeletionSucceeds(backupSystemTable, backupId3, true);
+  /**
+   * Creates a full backup and updates its timestamps.
+   */
+  private String createAndUpdateBackup(long startTs, long completeTs) throws 
Exception {
+    String backupId = fullTableBackup(Lists.newArrayList(table1));
+    assertTrue(checkSucceeded(backupId));
+
+    BackupInfo backupInfo = getBackupInfoById(backupId);
+    backupInfo.setStartTs(startTs);
+    backupInfo.setCompleteTs(completeTs);
+    backupSystemTable.updateBackupInfo(backupInfo);
+
+    return backupId;
   }
 
   private void assertDeletionSucceeds(BackupSystemTable table, String backupId,
@@ -161,9 +244,8 @@ public class TestBackupDeleteWithContinuousBackupAndPITR 
extends TestBackupBase
     assertFalse("Backup should be deleted but still exists!", 
backupExists(table, backupId));
   }
 
-  private void assertDeletionFails(BackupSystemTable table, String backupId, 
boolean isForceDelete)
-    throws Exception {
-    int ret = deleteBackup(backupId, isForceDelete);
+  private void assertDeletionFails(BackupSystemTable table, String backupId) 
throws Exception {
+    int ret = deleteBackup(backupId, false);
     assertNotEquals(0, ret);
     assertTrue("Backup should still exist after failed deletion!", 
backupExists(table, backupId));
   }
@@ -183,4 +265,10 @@ public class TestBackupDeleteWithContinuousBackupAndPITR 
extends TestBackupBase
       ? new String[] { "delete", "-l", backupId, "-fd" }
       : new String[] { "delete", "-l", backupId };
   }
+
+  private BackupInfo getBackupInfoById(String backupId) throws IOException {
+    return 
backupSystemTable.getBackupInfos(BackupInfo.BackupState.COMPLETE).stream()
+      .filter(b -> b.getBackupId().equals(backupId)).findFirst()
+      .orElseThrow(() -> new IllegalStateException("Backup should exist: " + 
backupId));
+  }
 }

Reply via email to