This is an automated email from the ASF dual-hosted git repository.
ndimiduk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/master by this push:
new ed6613e1afa HBASE-28697 Don't clean bulk load system entries until
backup is complete (#6089)
ed6613e1afa is described below
commit ed6613e1afa589e1d6ebcad829ae3e6d2a79651e
Author: Ray Mattingly <[email protected]>
AuthorDate: Mon Sep 2 04:38:29 2024 -0400
HBASE-28697 Don't clean bulk load system entries until backup is complete
(#6089)
Co-authored-by: Ray Mattingly <[email protected]>
---
.../hbase/backup/impl/IncrementalTableBackupClient.java | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
index b7d1c4a95cc..bbb39cb3a03 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
@@ -103,13 +103,14 @@ public class IncrementalTableBackupClient extends
TableBackupClient {
/*
* Reads bulk load records from backup table, iterates through the records
and forms the paths for
- * bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination
+ * bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination.
This method does NOT
+ * clean up the entries in the bulk load system table. Those entries should
not be cleaned until
+ * the backup is marked as complete.
* @param sTableList list of tables to be backed up
- * @return map of table to List of files
+ * @return the rowkeys of bulk loaded files
*/
@SuppressWarnings("unchecked")
- protected Map<byte[], List<Path>>[] handleBulkLoad(List<TableName>
sTableList)
- throws IOException {
+ protected List<byte[]> handleBulkLoad(List<TableName> sTableList) throws
IOException {
Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList.size()];
List<String> activeFiles = new ArrayList<>();
List<String> archiveFiles = new ArrayList<>();
@@ -191,8 +192,8 @@ public class IncrementalTableBackupClient extends
TableBackupClient {
}
copyBulkLoadedFiles(activeFiles, archiveFiles);
- backupManager.deleteBulkLoadedRows(pair.getSecond());
- return mapForSrc;
+
+ return pair.getSecond();
}
private void copyBulkLoadedFiles(List<String> activeFiles, List<String>
archiveFiles)
@@ -308,10 +309,12 @@ public class IncrementalTableBackupClient extends
TableBackupClient {
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode);
- handleBulkLoad(backupInfo.getTableNames());
+ List<byte[]> bulkLoadedRows = handleBulkLoad(backupInfo.getTableNames());
+
// backup complete
completeBackup(conn, backupInfo, BackupType.INCREMENTAL, conf);
+ backupManager.deleteBulkLoadedRows(bulkLoadedRows);
} catch (IOException e) {
failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ",
BackupType.INCREMENTAL, conf);