This is an automated email from the ASF dual-hosted git repository.

bbeaudreault pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
     new 14b4dc3d2e9 HBASE-28412 Select correct target table for incremental 
backup restore (#5776)
14b4dc3d2e9 is described below

commit 14b4dc3d2e91597d82272e0f9f60297d83f8c53a
Author: Ruben Van Wanzeele <rube...@ngdata.com>
AuthorDate: Tue Mar 26 13:15:00 2024 +0100

    HBASE-28412 Select correct target table for incremental backup restore 
(#5776)
    
    Contributed-by: Ruben Van Wanzeele <rube...@ngdata.com>
    Signed-off-by: Bryan Beaudreault <bbeaudrea...@apache.org>
---
 .../backup/mapreduce/MapReduceRestoreJob.java      |   4 +-
 .../apache/hadoop/hbase/backup/BackupTestUtil.java |  55 +++++++
 ...va => TestBackupRestoreOnEmptyEnvironment.java} | 159 +++++++++------------
 .../backup/TestBackupRestoreWithModifications.java |  28 +---
 4 files changed, 130 insertions(+), 116 deletions(-)

diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
index 55f6bff04cb..5d654c0d85b 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
@@ -74,9 +74,7 @@ public class MapReduceRestoreJob implements RestoreJob {
         BackupUtils.getFileNameCompatibleString(newTableNames[i]), getConf());
       Configuration conf = getConf();
       conf.set(bulkOutputConfKey, bulkOutputPath.toString());
-      String[] playerArgs = { dirs,
-        fullBackupRestore ? newTableNames[i].getNameAsString() : 
tableNames[i].getNameAsString() };
-
+      String[] playerArgs = { dirs, newTableNames[i].getNameAsString() };
       int result;
       try {
 
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/BackupTestUtil.java 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/BackupTestUtil.java
new file mode 100644
index 00000000000..3665eeb7a76
--- /dev/null
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/BackupTestUtil.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class BackupTestUtil {
+  private BackupTestUtil() {
+  }
+
+  static BackupInfo verifyBackup(Configuration conf, String backupId, 
BackupType expectedType,
+    BackupInfo.BackupState expectedState) throws IOException {
+    try (Connection connection = ConnectionFactory.createConnection(conf);
+      BackupAdmin backupAdmin = new BackupAdminImpl(connection)) {
+      BackupInfo backupInfo = backupAdmin.getBackupInfo(backupId);
+
+      // Verify managed backup in HBase
+      assertEquals(backupId, backupInfo.getBackupId());
+      assertEquals(expectedState, backupInfo.getState());
+      assertEquals(expectedType, backupInfo.getType());
+      return backupInfo;
+    }
+  }
+
+  static void enableBackup(Configuration conf) {
+    // Enable backup
+    conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
+    BackupManager.decorateMasterConfiguration(conf);
+    BackupManager.decorateRegionServerConfiguration(conf);
+  }
+}
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreOnEmptyEnvironment.java
similarity index 63%
copy from 
hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java
copy to 
hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreOnEmptyEnvironment.java
index d01df687eda..300ca360a4e 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreOnEmptyEnvironment.java
@@ -18,30 +18,27 @@
 package org.apache.hadoop.hbase.backup;
 
 import static org.apache.hadoop.hbase.backup.BackupInfo.BackupState.COMPLETE;
+import static org.apache.hadoop.hbase.backup.BackupTestUtil.enableBackup;
+import static org.apache.hadoop.hbase.backup.BackupTestUtil.verifyBackup;
 import static org.apache.hadoop.hbase.backup.BackupType.FULL;
+import static org.apache.hadoop.hbase.backup.BackupType.INCREMENTAL;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.time.Instant;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.List;
-import java.util.Map;
-import java.util.UUID;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
@@ -51,13 +48,11 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testing.TestingHBaseCluster;
 import org.apache.hadoop.hbase.testing.TestingHBaseClusterOption;
-import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -71,29 +66,27 @@ import org.slf4j.LoggerFactory;
 
 @Category(MediumTests.class)
 @RunWith(Parameterized.class)
-public class TestBackupRestoreWithModifications {
+public class TestBackupRestoreOnEmptyEnvironment {
 
   private static final Logger LOG =
-    LoggerFactory.getLogger(TestBackupRestoreWithModifications.class);
+    LoggerFactory.getLogger(TestBackupRestoreOnEmptyEnvironment.class);
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-    HBaseClassTestRule.forClass(TestBackupRestoreWithModifications.class);
+    HBaseClassTestRule.forClass(TestBackupRestoreOnEmptyEnvironment.class);
 
-  @Parameterized.Parameters(name = "{index}: useBulkLoad={0}")
+  @Parameterized.Parameters(name = "{index}: restoreToOtherTable={0}")
   public static Iterable<Object[]> data() {
     return HBaseCommonTestingUtil.BOOLEAN_PARAMETERIZED;
   }
 
   @Parameterized.Parameter(0)
-  public boolean useBulkLoad;
-
+  public boolean restoreToOtherTable;
   private TableName sourceTable;
   private TableName targetTable;
 
-  private List<TableName> allTables;
   private static TestingHBaseCluster cluster;
-  private static final Path BACKUP_ROOT_DIR = new Path("backupIT");
+  private static Path BACKUP_ROOT_DIR;
   private static final byte[] COLUMN_FAMILY = Bytes.toBytes("0");
 
   @BeforeClass
@@ -102,6 +95,7 @@ public class TestBackupRestoreWithModifications {
     enableBackup(conf);
     cluster = 
TestingHBaseCluster.create(TestingHBaseClusterOption.builder().conf(conf).build());
     cluster.start();
+    BACKUP_ROOT_DIR = new Path(new Path(conf.get("fs.defaultFS")), new 
Path("/backupIT"));
   }
 
   @AfterClass
@@ -111,38 +105,68 @@ public class TestBackupRestoreWithModifications {
 
   @Before
   public void setUp() throws Exception {
-    sourceTable = TableName.valueOf("table-" + useBulkLoad);
-    targetTable = TableName.valueOf("another-table-" + useBulkLoad);
-    allTables = Arrays.asList(sourceTable, targetTable);
+    sourceTable = TableName.valueOf("table");
+    targetTable = TableName.valueOf("another-table");
     createTable(sourceTable);
     createTable(targetTable);
   }
 
+  @After
+  public void removeTables() throws Exception {
+    deleteTables();
+  }
+
   @Test
-  public void testModificationsOnTable() throws Exception {
-    Instant timestamp = Instant.now();
+  public void testRestoreToCorrectTable() throws Exception {
+    Instant timestamp = Instant.now().minusSeconds(10);
 
     // load some data
-    load(sourceTable, timestamp, "data");
+    putLoad(sourceTable, timestamp, "data");
 
-    String backupId = backup(FULL, allTables);
-    BackupInfo backupInfo = verifyBackup(backupId, FULL, COMPLETE);
+    String backupId = backup(FULL, Collections.singletonList(sourceTable));
+    BackupInfo backupInfo = verifyBackup(cluster.getConf(), backupId, FULL, 
COMPLETE);
     assertTrue(backupInfo.getTables().contains(sourceTable));
 
-    restore(backupId, sourceTable, targetTable);
-    validateDataEquals(sourceTable, "data");
-    validateDataEquals(targetTable, "data");
+    LOG.info("Deleting the tables before restore ...");
+    deleteTables();
+
+    if (restoreToOtherTable) {
+      restore(backupId, sourceTable, targetTable);
+      validateDataEquals(targetTable, "data");
+    } else {
+      restore(backupId, sourceTable, sourceTable);
+      validateDataEquals(sourceTable, "data");
+    }
 
-    // load new data on the same timestamp
-    load(sourceTable, timestamp, "changed_data");
+  }
 
-    backupId = backup(FULL, allTables);
-    backupInfo = verifyBackup(backupId, FULL, COMPLETE);
-    assertTrue(backupInfo.getTables().contains(sourceTable));
+  @Test
+  public void testRestoreCorrectTableForIncremental() throws Exception {
+    Instant timestamp = Instant.now().minusSeconds(10);
+
+    // load some data
+    putLoad(sourceTable, timestamp, "data");
+
+    String backupId = backup(FULL, Collections.singletonList(sourceTable));
+    verifyBackup(cluster.getConf(), backupId, FULL, COMPLETE);
+
+    // some incremental data
+    putLoad(sourceTable, timestamp.plusMillis(1), "new_data");
+
+    String backupId2 = backup(INCREMENTAL, 
Collections.singletonList(sourceTable));
+    verifyBackup(cluster.getConf(), backupId2, INCREMENTAL, COMPLETE);
+
+    LOG.info("Deleting the tables before restore ...");
+    deleteTables();
+
+    if (restoreToOtherTable) {
+      restore(backupId2, sourceTable, targetTable);
+      validateDataEquals(targetTable, "new_data");
+    } else {
+      restore(backupId2, sourceTable, sourceTable);
+      validateDataEquals(sourceTable, "new_data");
+    }
 
-    restore(backupId, sourceTable, targetTable);
-    validateDataEquals(sourceTable, "changed_data");
-    validateDataEquals(targetTable, "changed_data");
   }
 
   private void createTable(TableName tableName) throws IOException {
@@ -154,11 +178,15 @@ public class TestBackupRestoreWithModifications {
     }
   }
 
-  private void load(TableName tableName, Instant timestamp, String data) 
throws IOException {
-    if (useBulkLoad) {
-      hFileBulkLoad(tableName, timestamp, data);
-    } else {
-      putLoad(tableName, timestamp, data);
+  private void deleteTables() throws IOException {
+    try (Connection connection = 
ConnectionFactory.createConnection(cluster.getConf());
+      Admin admin = connection.getAdmin()) {
+      for (TableName table : Arrays.asList(sourceTable, targetTable)) {
+        if (admin.tableExists(table)) {
+          admin.disableTable(table);
+          admin.deleteTable(table);
+        }
+      }
     }
   }
 
@@ -184,30 +212,6 @@ public class TestBackupRestoreWithModifications {
     }
   }
 
-  private void hFileBulkLoad(TableName tableName, Instant timestamp, String 
data)
-    throws IOException {
-    FileSystem fs = FileSystem.get(cluster.getConf());
-    LOG.info("Writing new data to HBase using BulkLoad: {}", data);
-    // HFiles require this strict directory structure to allow to load them
-    Path hFileRootPath = new Path("/tmp/hfiles_" + UUID.randomUUID());
-    fs.mkdirs(hFileRootPath);
-    Path hFileFamilyPath = new Path(hFileRootPath, 
Bytes.toString(COLUMN_FAMILY));
-    fs.mkdirs(hFileFamilyPath);
-    try (HFile.Writer writer = HFile.getWriterFactoryNoCache(cluster.getConf())
-      .withPath(fs, new Path(hFileFamilyPath, "hfile_" + UUID.randomUUID()))
-      .withFileContext(new 
HFileContextBuilder().withTableName(tableName.toBytes())
-        .withColumnFamily(COLUMN_FAMILY).build())
-      .create()) {
-      for (int i = 0; i < 10; i++) {
-        writer.append(new KeyValue(Bytes.toBytes(i), COLUMN_FAMILY, 
Bytes.toBytes("data"),
-          timestamp.toEpochMilli(), Bytes.toBytes(data)));
-      }
-    }
-    Map<BulkLoadHFiles.LoadQueueItem, ByteBuffer> result =
-      BulkLoadHFiles.create(cluster.getConf()).bulkLoad(tableName, 
hFileRootPath);
-    assertFalse(result.isEmpty());
-  }
-
   private String backup(BackupType backupType, List<TableName> tables) throws 
IOException {
     LOG.info("Creating the backup ...");
 
@@ -238,7 +242,6 @@ public class TestBackupRestoreWithModifications {
     try (Connection connection = 
ConnectionFactory.createConnection(cluster.getConf());
       Table table = connection.getTable(tableName)) {
       Scan scan = new Scan();
-      scan.readAllVersions();
       scan.setRaw(true);
       scan.setBatch(100);
 
@@ -251,26 +254,4 @@ public class TestBackupRestoreWithModifications {
       }
     }
   }
-
-  private BackupInfo verifyBackup(String backupId, BackupType expectedType,
-    BackupInfo.BackupState expectedState) throws IOException {
-    try (Connection connection = 
ConnectionFactory.createConnection(cluster.getConf());
-      BackupAdmin backupAdmin = new BackupAdminImpl(connection)) {
-      BackupInfo backupInfo = backupAdmin.getBackupInfo(backupId);
-
-      // Verify managed backup in HBase
-      assertEquals(backupId, backupInfo.getBackupId());
-      assertEquals(expectedState, backupInfo.getState());
-      assertEquals(expectedType, backupInfo.getType());
-      return backupInfo;
-    }
-  }
-
-  private static void enableBackup(Configuration conf) {
-    // Enable backup
-    conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
-    BackupManager.decorateMasterConfiguration(conf);
-    BackupManager.decorateRegionServerConfiguration(conf);
-  }
-
 }
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java
index d01df687eda..62ba5006ac7 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hbase.backup;
 
 import static org.apache.hadoop.hbase.backup.BackupInfo.BackupState.COMPLETE;
+import static org.apache.hadoop.hbase.backup.BackupTestUtil.enableBackup;
+import static org.apache.hadoop.hbase.backup.BackupTestUtil.verifyBackup;
 import static org.apache.hadoop.hbase.backup.BackupType.FULL;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -41,7 +43,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
@@ -126,7 +127,7 @@ public class TestBackupRestoreWithModifications {
     load(sourceTable, timestamp, "data");
 
     String backupId = backup(FULL, allTables);
-    BackupInfo backupInfo = verifyBackup(backupId, FULL, COMPLETE);
+    BackupInfo backupInfo = verifyBackup(cluster.getConf(), backupId, FULL, 
COMPLETE);
     assertTrue(backupInfo.getTables().contains(sourceTable));
 
     restore(backupId, sourceTable, targetTable);
@@ -137,7 +138,7 @@ public class TestBackupRestoreWithModifications {
     load(sourceTable, timestamp, "changed_data");
 
     backupId = backup(FULL, allTables);
-    backupInfo = verifyBackup(backupId, FULL, COMPLETE);
+    backupInfo = verifyBackup(cluster.getConf(), backupId, FULL, COMPLETE);
     assertTrue(backupInfo.getTables().contains(sourceTable));
 
     restore(backupId, sourceTable, targetTable);
@@ -252,25 +253,4 @@ public class TestBackupRestoreWithModifications {
     }
   }
 
-  private BackupInfo verifyBackup(String backupId, BackupType expectedType,
-    BackupInfo.BackupState expectedState) throws IOException {
-    try (Connection connection = 
ConnectionFactory.createConnection(cluster.getConf());
-      BackupAdmin backupAdmin = new BackupAdminImpl(connection)) {
-      BackupInfo backupInfo = backupAdmin.getBackupInfo(backupId);
-
-      // Verify managed backup in HBase
-      assertEquals(backupId, backupInfo.getBackupId());
-      assertEquals(expectedState, backupInfo.getState());
-      assertEquals(expectedType, backupInfo.getType());
-      return backupInfo;
-    }
-  }
-
-  private static void enableBackup(Configuration conf) {
-    // Enable backup
-    conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
-    BackupManager.decorateMasterConfiguration(conf);
-    BackupManager.decorateRegionServerConfiguration(conf);
-  }
-
 }

Reply via email to