This is an automated email from the ASF dual-hosted git repository.
rmattingly pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-3 by this push:
new 48b700cd08b HBASE-29114 Restoring to original splits fails if backups
are on separate FileSystem (#6667) (#6676)
48b700cd08b is described below
commit 48b700cd08bed74c9e38e272303a4970fe597111
Author: Ray Mattingly <[email protected]>
AuthorDate: Tue Feb 11 10:40:26 2025 -0500
HBASE-29114 Restoring to original splits fails if backups are on separate
FileSystem (#6667) (#6676)
Signed-off-by: Ray Mattingly <[email protected]>
Co-authored-by: Hernan Romer <[email protected]>
Co-authored-by: Hernan Gelaf-Romer <[email protected]>
---
.../org/apache/hadoop/hbase/backup/RestoreJob.java | 1 +
.../MapReduceRestoreToOriginalSplitsJob.java | 4 +-
.../hadoop/hbase/backup/util/RestoreTool.java | 9 +++-
.../hadoop/hbase/backup/TestIncrementalBackup.java | 54 +++++++++++++++++++++-
4 files changed, 64 insertions(+), 4 deletions(-)
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java
index 207684e7588..9e483057d69 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java
@@ -32,6 +32,7 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface RestoreJob extends Configurable {
String KEEP_ORIGINAL_SPLITS_KEY =
"hbase.backup.restorejob.keep.original.splits";
+ String BACKUP_ROOT_PATH_KEY = "hbase.backup.root.path";
boolean KEEP_ORIGINAL_SPLITS_DEFAULT = false;
/**
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreToOriginalSplitsJob.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreToOriginalSplitsJob.java
index 942f69a2fb8..54859d427a8 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreToOriginalSplitsJob.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreToOriginalSplitsJob.java
@@ -45,7 +45,9 @@ public class MapReduceRestoreToOriginalSplitsJob implements
RestoreJob {
// We are using the files from the snapshot. We should copy them rather
than move them over
conf.setBoolean(BulkLoadHFiles.ALWAYS_COPY_FILES, true);
- FileSystem fs = FileSystem.get(conf);
+ Path backupRootDir = new Path(conf.get(RestoreJob.BACKUP_ROOT_PATH_KEY));
+
+ FileSystem fs = backupRootDir.getFileSystem(conf);
Map<byte[], List<Path>> family2Files = buildFamily2Files(fs, dirPaths,
fullBackupRestore);
BulkLoadHFiles bulkLoad = BulkLoadHFiles.create(conf);
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
index 6248d7932dd..7549b9a8c69 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -203,7 +203,7 @@ public class RestoreTool {
LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " +
newTableDescriptor);
}
}
- conf.setBoolean(RestoreJob.KEEP_ORIGINAL_SPLITS_KEY, keepOriginalSplits);
+ configureForRestoreJob(keepOriginalSplits);
RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
restoreService.run(logDirs, tableNames, restoreRootDir, newTableNames,
false);
@@ -355,7 +355,7 @@ public class RestoreTool {
// should only try to create the table with all region informations, so
we could pre-split
// the regions in fine grain
checkAndCreateTable(conn, newTableName, regionPathList, tableDescriptor,
truncateIfExists);
- conf.setBoolean(RestoreJob.KEEP_ORIGINAL_SPLITS_KEY,
isKeepOriginalSplits);
+ configureForRestoreJob(isKeepOriginalSplits);
RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
Path[] paths = new Path[regionPathList.size()];
regionPathList.toArray(paths);
@@ -536,4 +536,9 @@ public class RestoreTool {
}
}
}
+
+ private void configureForRestoreJob(boolean keepOriginalSplits) {
+ conf.setBoolean(RestoreJob.KEEP_ORIGINAL_SPLITS_KEY, keepOriginalSplits);
+ conf.set(RestoreJob.BACKUP_ROOT_PATH_KEY, backupRootPath.toString());
+ }
}
diff --git
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
index d000dba9a64..eba322ea919 100644
---
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
+++
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
+import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@@ -378,6 +379,51 @@ public class TestIncrementalBackup extends TestBackupBase {
}
}
+ @Test
+ public void TestIncBackupRestoreWithOriginalSplitsSeperateFs() throws
Exception {
+ String originalBackupRoot = BACKUP_ROOT_DIR;
+ // prepare BACKUP_ROOT_DIR on a different filesystem from HBase.
+ try (Connection conn = ConnectionFactory.createConnection(conf1);
+ BackupAdminImpl admin = new BackupAdminImpl(conn)) {
+ String backupTargetDir =
TEST_UTIL.getDataTestDir("backupTarget").toString();
+ BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString();
+
+ List<TableName> tables = Lists.newArrayList(table1);
+
+ insertIntoTable(conn, table1, famName, 3, 100);
+ String fullBackupId = takeFullBackup(tables, admin, true);
+ assertTrue(checkSucceeded(fullBackupId));
+
+ insertIntoTable(conn, table1, famName, 4, 100);
+ BackupRequest request =
+ createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR,
true);
+ String incrementalBackupId = admin.backupTables(request);
+ assertTrue(checkSucceeded(incrementalBackupId));
+
+ TableName[] fromTable = new TableName[] { table1 };
+ TableName[] toTable = new TableName[] { table1_restore };
+
+ // Using original splits
+ admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId, false,
+ fromTable, toTable, true, true));
+
+ int actualRowCount = TEST_UTIL.countRows(table1_restore);
+ int expectedRowCount = TEST_UTIL.countRows(table1);
+ assertEquals(expectedRowCount, actualRowCount);
+
+ // Using new splits
+ admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId, false,
+ fromTable, toTable, true, false));
+
+ expectedRowCount = TEST_UTIL.countRows(table1);
+ assertEquals(expectedRowCount, actualRowCount);
+
+ } finally {
+ BACKUP_ROOT_DIR = originalBackupRoot;
+ }
+
+ }
+
private void checkThrowsCFMismatch(IOException ex, List<TableName> tables) {
Throwable cause = Throwables.getRootCause(ex);
assertEquals(cause.getClass(), ColumnFamilyMismatchException.class);
@@ -387,7 +433,13 @@ public class TestIncrementalBackup extends TestBackupBase {
private String takeFullBackup(List<TableName> tables, BackupAdminImpl
backupAdmin)
throws IOException {
- BackupRequest req = createBackupRequest(BackupType.FULL, tables,
BACKUP_ROOT_DIR);
+ return takeFullBackup(tables, backupAdmin, false);
+ }
+
+ private String takeFullBackup(List<TableName> tables, BackupAdminImpl
backupAdmin,
+ boolean noChecksumVerify) throws IOException {
+ BackupRequest req =
+ createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR,
noChecksumVerify);
String backupId = backupAdmin.backupTables(req);
checkSucceeded(backupId);
return backupId;