This is an automated email from the ASF dual-hosted git repository.
rmattingly pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2 by this push:
new 148dd9e762e HBASE-29003 Proper bulk load tracking (#6506) (#6921)
148dd9e762e is described below
commit 148dd9e762ea8d882c932ef79e85f81e15ec2f4d
Author: Ray Mattingly <[email protected]>
AuthorDate: Wed Apr 23 15:41:07 2025 -0400
HBASE-29003 Proper bulk load tracking (#6506) (#6921)
Signed-off-by: Ray Mattingly <[email protected]>
The HBase backup mechanism keeps track of which HFiles
were bulk loaded, so they can be included in incremental
backups.
Before this ticket, these bulk load records were only
deleted when an incremental backup is created. This
commit adds 2 more locations:
1) after a full backup. Since a full backup already
captures all data, this meant that unnecessary HFiles
were being included in the next incremental backup.
2) after a table delete/truncate/CF-deletion. Previously,
if an HFile was loaded before a table was cleared, the next
incremental backup would effectively still include the
HFile. This lead to incorrect data being restored.
This commit also completely refactors & simplifies the
test for this functionality.
Co-authored-by: DieterDP <[email protected]>
---
.../hadoop/hbase/backup/BackupMasterObserver.java | 117 +++++++++++++++++++++
.../hbase/backup/BackupRestoreConstants.java | 22 ++--
.../hadoop/hbase/backup/impl/BackupManager.java | 10 +-
.../hbase/backup/impl/BackupSystemTable.java | 29 ++---
.../hbase/backup/impl/FullTableBackupClient.java | 10 ++
.../hbase/tool/TestLoadIncrementalHFiles.java | 8 --
6 files changed, 156 insertions(+), 40 deletions(-)
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMasterObserver.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMasterObserver.java
new file mode 100644
index 00000000000..3e95e7bbcbc
--- /dev/null
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMasterObserver.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.impl.BulkLoad;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+
+/**
+ * An Observer to facilitate backup operations
+ */
[email protected](HBaseInterfaceAudience.CONFIG)
+public class BackupMasterObserver implements MasterCoprocessor, MasterObserver
{
+ private static final Logger LOG =
LoggerFactory.getLogger(BackupMasterObserver.class);
+
+ @Override
+ public Optional<MasterObserver> getMasterObserver() {
+ return Optional.of(this);
+ }
+
+ @Override
+ public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment>
ctx,
+ TableName tableName) throws IOException {
+ Configuration cfg = ctx.getEnvironment().getConfiguration();
+ if (!BackupManager.isBackupEnabled(cfg)) {
+ LOG.debug("Skipping postDeleteTable hook since backup is disabled");
+ return;
+ }
+ deleteBulkLoads(cfg, tableName, (ignored) -> true);
+ }
+
+ @Override
+ public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment>
ctx,
+ TableName tableName) throws IOException {
+ Configuration cfg = ctx.getEnvironment().getConfiguration();
+ if (!BackupManager.isBackupEnabled(cfg)) {
+ LOG.debug("Skipping postTruncateTable hook since backup is disabled");
+ return;
+ }
+ deleteBulkLoads(cfg, tableName, (ignored) -> true);
+ }
+
+ @Override
+ public void postModifyTable(final
ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final TableName tableName, TableDescriptor oldDescriptor, TableDescriptor
currentDescriptor)
+ throws IOException {
+ Configuration cfg = ctx.getEnvironment().getConfiguration();
+ if (!BackupManager.isBackupEnabled(cfg)) {
+ LOG.debug("Skipping postModifyTable hook since backup is disabled");
+ return;
+ }
+
+ Set<String> oldFamilies = Arrays.stream(oldDescriptor.getColumnFamilies())
+
.map(ColumnFamilyDescriptor::getNameAsString).collect(Collectors.toSet());
+ Set<String> newFamilies =
Arrays.stream(currentDescriptor.getColumnFamilies())
+
.map(ColumnFamilyDescriptor::getNameAsString).collect(Collectors.toSet());
+
+ Set<String> removedFamilies = Sets.difference(oldFamilies, newFamilies);
+ if (!removedFamilies.isEmpty()) {
+ Predicate<BulkLoad> filter = bulkload ->
removedFamilies.contains(bulkload.getColumnFamily());
+ deleteBulkLoads(cfg, tableName, filter);
+ }
+ }
+
+ /**
+ * Deletes all bulk load entries for the given table, matching the provided
predicate.
+ */
+ private void deleteBulkLoads(Configuration config, TableName tableName,
+ Predicate<BulkLoad> filter) throws IOException {
+ try (Connection connection = ConnectionFactory.createConnection(config);
+ BackupSystemTable tbl = new BackupSystemTable(connection)) {
+ List<BulkLoad> bulkLoads =
tbl.readBulkloadRows(ImmutableList.of(tableName));
+ List<byte[]> rowsToDelete =
+
bulkLoads.stream().filter(filter).map(BulkLoad::getRowKey).collect(Collectors.toList());
+ tbl.deleteBulkLoadedRows(rowsToDelete);
+ }
+ }
+}
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
index 30a5674eb02..d05a421a395 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.backup;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -98,16 +99,17 @@ public interface BackupRestoreConstants {
String JOB_NAME_CONF_KEY = "mapreduce.job.name";
- String BACKUP_CONFIG_STRING =
- BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n" +
"hbase.master.logcleaner.plugins="
- + "YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
- + "hbase.procedure.master.classes=YOUR_CLASSES,"
- + "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
- + "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
- +
"org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
- + "hbase.coprocessor.region.classes=YOUR_CLASSES,"
- + "org.apache.hadoop.hbase.backup.BackupObserver\n" + "and restart the
cluster\n"
- + "For more information please see
http://hbase.apache.org/book.html#backuprestore\n";
+ String BACKUP_CONFIG_STRING = BackupRestoreConstants.BACKUP_ENABLE_KEY +
"=true\n"
+ + "hbase.master.logcleaner.plugins="
+ + "YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
+ + "hbase.procedure.master.classes=YOUR_CLASSES,"
+ + "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
+ + "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
+ +
"org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
+ + CoprocessorHost.REGION_COPROCESSOR_CONF_KEY + "=YOUR_CLASSES,"
+ + BackupObserver.class.getSimpleName() + "\n" +
CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY
+ + "=YOUR_CLASSES," + BackupMasterObserver.class.getSimpleName() + "\nand
restart the cluster\n"
+ + "For more information please see
http://hbase.apache.org/book.html#backuprestore\n";
String ENABLE_BACKUP = "Backup is not enabled. To enable backup, " + "in
hbase-site.xml, set:\n "
+ BACKUP_CONFIG_STRING;
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
index 5afd580a649..e294cb887c1 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -119,11 +119,17 @@ public class BackupManager implements Closeable {
plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
(plugins == null ? "" : plugins + ",") +
BackupHFileCleaner.class.getName());
+
+ String observerClass = BackupObserver.class.getName();
+ String masterCoProc =
conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
+ conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
+ (masterCoProc == null ? "" : masterCoProc + ",") + observerClass);
+
if (LOG.isDebugEnabled()) {
LOG.debug(
"Added log cleaner: {}. Added master procedure manager: {}."
- + "Added master procedure manager: {}",
- cleanerClass, masterProcedureClass,
BackupHFileCleaner.class.getName());
+ + " Added master procedure manager: {}. Added master observer: {}",
+ cleanerClass, masterProcedureClass,
BackupHFileCleaner.class.getName(), observerClass);
}
}
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
index 203f3f61b0f..705cac02542 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -411,25 +411,24 @@ public final class BackupSystemTable implements Closeable
{
try (BufferedMutator bufferedMutator =
connection.getBufferedMutator(bulkLoadTableName)) {
List<Put> puts = BackupSystemTable.createPutForBulkLoad(tableName,
region, cfToHfilePath);
bufferedMutator.mutate(puts);
- LOG.debug("Written {} rows for bulk load of {}", puts.size(), tableName);
+ LOG.debug("Written {} rows for bulk load of table {}", puts.size(),
tableName);
}
}
- /*
- * Removes rows recording bulk loaded hfiles from backup table
- * @param lst list of table names
- * @param rows the rows to be deleted
+ /**
+ * Removes entries from the table that tracks all bulk loaded hfiles.
+ * @param rows the row keys of the entries to be deleted
*/
public void deleteBulkLoadedRows(List<byte[]> rows) throws IOException {
try (BufferedMutator bufferedMutator =
connection.getBufferedMutator(bulkLoadTableName)) {
- List<Delete> lstDels = new ArrayList<>();
+ List<Delete> deletes = new ArrayList<>();
for (byte[] row : rows) {
Delete del = new Delete(row);
- lstDels.add(del);
- LOG.debug("orig deleting the row: " + Bytes.toString(row));
+ deletes.add(del);
+ LOG.debug("Deleting bulk load entry with key: {}",
Bytes.toString(row));
}
- bufferedMutator.mutate(lstDels);
- LOG.debug("deleted " + rows.size() + " original bulkload rows");
+ bufferedMutator.mutate(deletes);
+ LOG.debug("Deleted {} bulk load entries.", rows.size());
}
}
@@ -1522,16 +1521,6 @@ public final class BackupSystemTable implements
Closeable {
}
}
- public static List<Delete> createDeleteForOrigBulkLoad(List<TableName> lst) {
- List<Delete> lstDels = new ArrayList<>(lst.size());
- for (TableName table : lst) {
- Delete del = new Delete(rowkey(BULK_LOAD_PREFIX, table.toString(),
BLK_LD_DELIM));
- del.addFamily(BackupSystemTable.META_FAMILY);
- lstDels.add(del);
- }
- return lstDels;
- }
-
private Put createPutForDeleteOperation(String[] backupIdList) {
byte[] value = Bytes.toBytes(StringUtils.join(backupIdList, ","));
Put put = new Put(DELETE_OP_ROW);
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
index c4017e8c1a1..7fb7a576880 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
@@ -26,7 +26,9 @@ import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CON
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
+import java.util.stream.Collectors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupCopyJob;
import org.apache.hadoop.hbase.backup.BackupInfo;
@@ -152,6 +154,11 @@ public class FullTableBackupClient extends
TableBackupClient {
// the snapshot.
LOG.info("Execute roll log procedure for full backup ...");
+ // Gather the bulk loads being tracked by the system, which can be
deleted (since their data
+ // will be part of the snapshot being taken). We gather this list before
taking the actual
+ // snapshots for the same reason as the log rolls.
+ List<BulkLoad> bulkLoadsToDelete =
backupManager.readBulkloadRows(tableList);
+
Map<String, String> props = new HashMap<>();
props.put("backupRoot", backupInfo.getBackupRootDir());
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
@@ -192,6 +199,9 @@ public class FullTableBackupClient extends
TableBackupClient {
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode);
+ backupManager.deleteBulkLoadedRows(
+
bulkLoadsToDelete.stream().map(BulkLoad::getRowKey).collect(Collectors.toList()));
+
// backup complete
completeBackup(conn, backupInfo, BackupType.FULL, conf);
} catch (Exception e) {
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
index 833ce35edd0..5c8d4b96c53 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
@@ -293,14 +293,6 @@ public class TestLoadIncrementalHFiles {
runTest(testName, htd, preCreateTable, tableSplitKeys, hfileRanges,
useMap, false, depth);
}
- public static int loadHFiles(String testName, TableDescriptor htd,
HBaseTestingUtility util,
- byte[] fam, byte[] qual, boolean preCreateTable, byte[][] tableSplitKeys,
- byte[][][] hfileRanges, boolean useMap, boolean deleteFile, boolean
copyFiles, int initRowCount,
- int factor) throws Exception {
- return loadHFiles(testName, htd, util, fam, qual, preCreateTable,
tableSplitKeys, hfileRanges,
- useMap, deleteFile, copyFiles, initRowCount, factor, 2);
- }
-
public static int loadHFiles(String testName, TableDescriptor htd,
HBaseTestingUtility util,
byte[] fam, byte[] qual, boolean preCreateTable, byte[][] tableSplitKeys,
byte[][][] hfileRanges, boolean useMap, boolean deleteFile, boolean
copyFiles, int initRowCount,