This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2 by this push:
new a150b90356c HBASE-29760 TestIncrementalBackup fails 100% on flaky
dashboard (#7549)
a150b90356c is described below
commit a150b90356c8bd314234499f60a8205acc89df54
Author: Duo Zhang <[email protected]>
AuthorDate: Tue Dec 16 21:46:47 2025 +0800
HBASE-29760 TestIncrementalBackup fails 100% on flaky dashboard (#7549)
Signed-off-by: Nihal Jain <[email protected]>
Signed-off-by: Peng Lu <[email protected]>
(cherry picked from commit 93768907bfe4f37c93ee3d5a77252627e56bc61d)
---
.../backup/IncrementalBackupRestoreTestBase.java | 132 +++++
.../apache/hadoop/hbase/backup/TestBackupBase.java | 6 +
.../hadoop/hbase/backup/TestIncrementalBackup.java | 577 ---------------------
.../hbase/backup/TestIncrementalBackupRestore.java | 213 ++++++++
...crementalBackupRestoreHandlesArchivedFiles.java | 117 +++++
...IncrementalBackupRestoreWithOriginalSplits.java | 138 +++++
...lBackupRestoreWithOriginalSplitsSeperateFs.java | 85 +++
.../hadoop/hbase/io/hfile/bucket/BucketCache.java | 7 +-
.../hfile/TestBlockEvictionOnRegionMovement.java | 2 +-
.../io/hfile/bucket/TestVerifyBucketCacheFile.java | 63 ++-
10 files changed, 726 insertions(+), 614 deletions(-)
diff --git
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/IncrementalBackupRestoreTestBase.java
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/IncrementalBackupRestoreTestBase.java
new file mode 100644
index 00000000000..3ad686514ff
--- /dev/null
+++
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/IncrementalBackupRestoreTestBase.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest;
+import org.apache.hadoop.hbase.backup.impl.ColumnFamilyMismatchException;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.HFileTestUtil;
+import org.junit.jupiter.api.BeforeAll;
+
+import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
+
+public class IncrementalBackupRestoreTestBase extends TestBackupBase {
+
+ private static final byte[] BULKLOAD_START_KEY = new byte[] { 0x00 };
+ private static final byte[] BULKLOAD_END_KEY = new byte[] { Byte.MAX_VALUE };
+
+ @BeforeAll
+ public static void setUp() throws Exception {
+ provider = "multiwal";
+ TestBackupBase.setUp();
+ }
+
+ protected void checkThrowsCFMismatch(IOException ex, List<TableName> tables)
{
+ Throwable cause = Throwables.getRootCause(ex);
+ assertEquals(cause.getClass(), ColumnFamilyMismatchException.class);
+ ColumnFamilyMismatchException e = (ColumnFamilyMismatchException) cause;
+ assertEquals(tables, e.getMismatchedTables());
+ }
+
+ protected String takeFullBackup(List<TableName> tables, BackupAdminImpl
backupAdmin)
+ throws IOException {
+ return takeFullBackup(tables, backupAdmin, false);
+ }
+
+ protected String takeFullBackup(List<TableName> tables, BackupAdminImpl
backupAdmin,
+ boolean noChecksumVerify) throws IOException {
+ BackupRequest req =
+ createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR,
noChecksumVerify);
+ String backupId = backupAdmin.backupTables(req);
+ checkSucceeded(backupId);
+ return backupId;
+ }
+
+ protected static Path doBulkload(TableName tn, String regionName, byte[]...
fams)
+ throws IOException {
+ Path regionDir = createHFiles(tn, regionName, fams);
+ Map<BulkLoadHFiles.LoadQueueItem, ByteBuffer> results =
+ BulkLoadHFiles.create(conf1).bulkLoad(tn, regionDir);
+ assertFalse(results.isEmpty());
+ return regionDir;
+ }
+
+ private static Path createHFiles(TableName tn, String regionName, byte[]...
fams)
+ throws IOException {
+ Path rootdir = CommonFSUtils.getRootDir(conf1);
+ Path regionDir = CommonFSUtils.getRegionDir(rootdir, tn, regionName);
+
+ FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
+ fs.mkdirs(rootdir);
+
+ for (byte[] fam : fams) {
+ Path famDir = new Path(regionDir, Bytes.toString(fam));
+ Path hFileDir = new Path(famDir, UUID.randomUUID().toString());
+ HFileTestUtil.createHFile(conf1, fs, hFileDir, fam, qualName,
BULKLOAD_START_KEY,
+ BULKLOAD_END_KEY, 1000);
+ }
+
+ return regionDir;
+ }
+
+ /**
+ * Check that backup manifest can be produced for a different root. Users
may want to move
+ * existing backups to a different location.
+ */
+ protected void validateRootPathCanBeOverridden(String originalPath, String
backupId)
+ throws IOException {
+ String anotherRootDir = "/some/other/root/dir";
+ Path anotherPath = new Path(anotherRootDir, backupId);
+ BackupManifest.BackupImage differentLocationImage =
BackupManifest.hydrateRootDir(
+ HBackupFileSystem.getManifest(conf1, new Path(originalPath),
backupId).getBackupImage(),
+ anotherPath);
+ assertEquals(differentLocationImage.getRootDir(), anotherRootDir);
+ for (BackupManifest.BackupImage ancestor :
differentLocationImage.getAncestors()) {
+ assertEquals(anotherRootDir, ancestor.getRootDir());
+ }
+ }
+
+ protected List<LocatedFileStatus> getBackupFiles() throws IOException {
+ FileSystem fs = TEST_UTIL.getTestFileSystem();
+ RemoteIterator<LocatedFileStatus> iter = fs.listFiles(new
Path(BACKUP_ROOT_DIR), true);
+ List<LocatedFileStatus> files = new ArrayList<>();
+
+ while (iter.hasNext()) {
+ files.add(iter.next());
+ }
+
+ return files;
+ }
+}
diff --git
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index feb403ae549..775b38a6171 100644
---
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -71,6 +71,9 @@ import org.apache.hadoop.hbase.wal.WALFactory;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -118,6 +121,7 @@ public class TestBackupBase {
super(conn, backupId, request);
}
+ @BeforeEach
@Before
public void ensurePreviousBackupTestsAreCleanedUp() throws Exception {
// Every operation here may not be necessary for any given test,
@@ -348,6 +352,7 @@ public class TestBackupBase {
* Setup Cluster with appropriate configurations before running tests.
* @throws Exception if starting the mini cluster or setting up the tables
fails
*/
+ @BeforeAll
@BeforeClass
public static void setUp() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
@@ -365,6 +370,7 @@ public class TestBackupBase {
}
}
+ @AfterAll
@AfterClass
public static void tearDown() throws Exception {
try {
diff --git
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
deleted file mode 100644
index df187f75295..00000000000
---
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertThrows;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.backup.impl.BackupManifest;
-import org.apache.hadoop.hbase.backup.impl.ColumnFamilyMismatchException;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.LogRoller;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.HFileArchiveUtil;
-import org.apache.hadoop.hbase.util.HFileTestUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-
-@Category(LargeTests.class)
-@RunWith(Parameterized.class)
-public class TestIncrementalBackup extends TestBackupBase {
-
- @ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestIncrementalBackup.class);
-
- private static final Logger LOG =
LoggerFactory.getLogger(TestIncrementalBackup.class);
- private static final byte[] BULKLOAD_START_KEY = new byte[] { 0x00 };
- private static final byte[] BULKLOAD_END_KEY = new byte[] { Byte.MAX_VALUE };
-
- @Parameterized.Parameters
- public static Collection<Object[]> data() {
- provider = "multiwal";
- List<Object[]> params = new ArrayList<>();
- params.add(new Object[] { Boolean.TRUE });
- return params;
- }
-
- public TestIncrementalBackup(Boolean b) {
- }
-
- @After
- public void ensurePreviousBackupTestsAreCleanedUp() throws Exception {
- TEST_UTIL.flush(table1);
- TEST_UTIL.flush(table2);
-
- TEST_UTIL.truncateTable(table1).close();
- TEST_UTIL.truncateTable(table2).close();
-
- if (TEST_UTIL.getAdmin().tableExists(table1_restore)) {
- TEST_UTIL.flush(table1_restore);
- TEST_UTIL.truncateTable(table1_restore).close();
- }
-
- TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().forEach(rst -> {
- try {
- LogRoller walRoller = rst.getRegionServer().getWalRoller();
- walRoller.requestRollAll();
- walRoller.waitUntilWalRollFinished();
- } catch (Exception ignored) {
- }
- });
-
- try (Table table = TEST_UTIL.getConnection().getTable(table1)) {
- loadTable(table);
- }
-
- try (Table table = TEST_UTIL.getConnection().getTable(table2)) {
- loadTable(table);
- }
- }
-
- // implement all test cases in 1 test since incremental
- // backup/restore has dependencies
- @Test
- public void TestIncBackupRestore() throws Exception {
- int ADD_ROWS = 99;
-
- // #1 - create full backup for all tables
- LOG.info("create full backup image for all tables");
- List<TableName> tables = Lists.newArrayList(table1, table2);
- final byte[] fam3Name = Bytes.toBytes("f3");
- final byte[] mobName = Bytes.toBytes("mob");
-
- TableDescriptor newTable1Desc =
TableDescriptorBuilder.newBuilder(table1Desc)
- .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name))
-
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true)
- .setMobThreshold(5L).build())
- .build();
- TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
-
- try (Connection conn = ConnectionFactory.createConnection(conf1)) {
- int NB_ROWS_FAM3 = 6;
- insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
- insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
- Admin admin = conn.getAdmin();
- BackupAdminImpl client = new BackupAdminImpl(conn);
- BackupRequest request = createBackupRequest(BackupType.FULL, tables,
BACKUP_ROOT_DIR);
- String backupIdFull = takeFullBackup(tables, client);
- validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdFull);
- assertTrue(checkSucceeded(backupIdFull));
-
- // #2 - insert some data to table
- Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
- LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
- Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS
+ NB_ROWS_FAM3);
- LOG.debug("written " + ADD_ROWS + " rows to " + table1);
- // additionally, insert rows to MOB cf
- int NB_ROWS_MOB = 111;
- insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB);
- LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob
enabled CF");
- t1.close();
- Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS
+ NB_ROWS_MOB);
- Table t2 = conn.getTable(table2);
- Put p2;
- for (int i = 0; i < 5; i++) {
- p2 = new Put(Bytes.toBytes("row-t2" + i));
- p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
- t2.put(p2);
- }
- Assert.assertEquals(NB_ROWS_IN_BATCH + 5, TEST_UTIL.countRows(t2));
- t2.close();
- LOG.debug("written " + 5 + " rows to " + table2);
- // split table1
- MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
- List<HRegion> regions = cluster.getRegions(table1);
- byte[] name = regions.get(0).getRegionInfo().getRegionName();
- long startSplitTime = EnvironmentEdgeManager.currentTime();
- try {
- admin.splitRegionAsync(name).get();
- } catch (Exception e) {
- // although split fail, this may not affect following check in current
API,
- // exception will be thrown.
- LOG.debug("region is not splittable, because " + e);
- }
- TEST_UTIL.waitTableAvailable(table1);
- long endSplitTime = EnvironmentEdgeManager.currentTime();
- // split finished
- LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
-
- // #3 - incremental backup for multiple tables
- tables = Lists.newArrayList(table1, table2);
- request = createBackupRequest(BackupType.INCREMENTAL, tables,
BACKUP_ROOT_DIR);
- String backupIdIncMultiple = client.backupTables(request);
- assertTrue(checkSucceeded(backupIdIncMultiple));
- BackupManifest manifest =
- HBackupFileSystem.getManifest(conf1, new Path(BACKUP_ROOT_DIR),
backupIdIncMultiple);
- assertEquals(Sets.newHashSet(table1, table2), new
HashSet<>(manifest.getTableList()));
- validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdIncMultiple);
-
- // add column family f2 to table1
- // drop column family f3
- final byte[] fam2Name = Bytes.toBytes("f2");
- newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc)
-
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name)
- .build();
- TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
-
- // check that an incremental backup fails because the CFs don't match
- final List<TableName> tablesCopy = tables;
- IOException ex = assertThrows(IOException.class, () -> client
- .backupTables(createBackupRequest(BackupType.INCREMENTAL, tablesCopy,
BACKUP_ROOT_DIR)));
- checkThrowsCFMismatch(ex, ImmutableList.of(table1));
- takeFullBackup(tables, client);
-
- int NB_ROWS_FAM2 = 7;
- Table t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
- t3.close();
-
- // Wait for 5 sec to make sure that old WALs were deleted
- Thread.sleep(5000);
-
- // #4 - additional incremental backup for multiple tables
- request = createBackupRequest(BackupType.INCREMENTAL, tables,
BACKUP_ROOT_DIR);
- String backupIdIncMultiple2 = client.backupTables(request);
- assertTrue(checkSucceeded(backupIdIncMultiple2));
- validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdIncMultiple2);
-
- // #5 - restore full backup for all tables
- TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
- TableName[] tablesMapFull = new TableName[] { table1_restore,
table2_restore };
-
- LOG.debug("Restoring full " + backupIdFull);
- client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
backupIdFull, false,
- tablesRestoreFull, tablesMapFull, true));
-
- // #6.1 - check tables for full restore
- Admin hAdmin = TEST_UTIL.getAdmin();
- assertTrue(hAdmin.tableExists(table1_restore));
- assertTrue(hAdmin.tableExists(table2_restore));
- hAdmin.close();
-
- // #6.2 - checking row count of tables for full restore
- Table hTable = conn.getTable(table1_restore);
- Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH +
NB_ROWS_FAM3);
- hTable.close();
-
- hTable = conn.getTable(table2_restore);
- Assert.assertEquals(NB_ROWS_IN_BATCH, TEST_UTIL.countRows(hTable));
- hTable.close();
-
- // #7 - restore incremental backup for multiple tables, with overwrite
- TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2
};
- TableName[] tablesMapIncMultiple = new TableName[] { table1_restore,
table2_restore };
- client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
backupIdIncMultiple2, false,
- tablesRestoreIncMultiple, tablesMapIncMultiple, true));
- hTable = conn.getTable(table1_restore);
-
- LOG.debug("After incremental restore: " + hTable.getDescriptor());
- int countFamName = TEST_UTIL.countRows(hTable, famName);
- LOG.debug("f1 has " + countFamName + " rows");
- Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS);
-
- int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name);
- LOG.debug("f2 has " + countFam2Name + " rows");
- Assert.assertEquals(countFam2Name, NB_ROWS_FAM2);
-
- int countMobName = TEST_UTIL.countRows(hTable, mobName);
- LOG.debug("mob has " + countMobName + " rows");
- Assert.assertEquals(countMobName, NB_ROWS_MOB);
- hTable.close();
-
- hTable = conn.getTable(table2_restore);
- Assert.assertEquals(NB_ROWS_IN_BATCH + 5, TEST_UTIL.countRows(hTable));
- hTable.close();
- admin.close();
- }
- }
-
- @Test
- public void TestIncBackupRestoreWithOriginalSplits() throws Exception {
- byte[] mobFam = Bytes.toBytes("mob");
-
- List<TableName> tables = Lists.newArrayList(table1);
- TableDescriptor newTable1Desc =
-
TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder
-
.newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build();
- TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
-
- Connection conn = TEST_UTIL.getConnection();
- BackupAdminImpl backupAdmin = new BackupAdminImpl(conn);
- BackupRequest request = createBackupRequest(BackupType.FULL, tables,
BACKUP_ROOT_DIR);
- String fullBackupId = backupAdmin.backupTables(request);
- assertTrue(checkSucceeded(fullBackupId));
-
- TableName[] fromTables = new TableName[] { table1 };
- TableName[] toTables = new TableName[] { table1_restore };
-
- List<LocatedFileStatus> preRestoreBackupFiles = getBackupFiles();
- backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
fullBackupId, false,
- fromTables, toTables, true, true));
- List<LocatedFileStatus> postRestoreBackupFiles = getBackupFiles();
-
- // Check that the backup files are the same before and after the restore
process
- Assert.assertEquals(postRestoreBackupFiles, preRestoreBackupFiles);
- Assert.assertEquals(TEST_UTIL.countRows(table1_restore), NB_ROWS_IN_BATCH);
-
- int ROWS_TO_ADD = 1_000;
- // different IDs so that rows don't overlap
- insertIntoTable(conn, table1, famName, 3, ROWS_TO_ADD);
- insertIntoTable(conn, table1, mobFam, 4, ROWS_TO_ADD);
-
- try (Admin admin = conn.getAdmin()) {
- List<HRegion> currentRegions =
TEST_UTIL.getHBaseCluster().getRegions(table1);
- for (HRegion region : currentRegions) {
- byte[] name = region.getRegionInfo().getEncodedNameAsBytes();
- admin.splitRegionAsync(name).get();
- }
-
- TEST_UTIL.waitTableAvailable(table1);
-
- // Make sure we've split regions
- assertNotEquals(currentRegions,
TEST_UTIL.getHBaseCluster().getRegions(table1));
-
- request = createBackupRequest(BackupType.INCREMENTAL, tables,
BACKUP_ROOT_DIR);
- String incrementalBackupId = backupAdmin.backupTables(request);
- assertTrue(checkSucceeded(incrementalBackupId));
- preRestoreBackupFiles = getBackupFiles();
- backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId,
- false, fromTables, toTables, true, true));
- postRestoreBackupFiles = getBackupFiles();
- Assert.assertEquals(postRestoreBackupFiles, preRestoreBackupFiles);
- Assert.assertEquals(NB_ROWS_IN_BATCH + ROWS_TO_ADD + ROWS_TO_ADD,
- TEST_UTIL.countRows(table1_restore));
-
- // test bulkloads
- HRegion regionToBulkload =
TEST_UTIL.getHBaseCluster().getRegions(table1).get(0);
- String regionName = regionToBulkload.getRegionInfo().getEncodedName();
-
- insertIntoTable(conn, table1, famName, 5, ROWS_TO_ADD);
- insertIntoTable(conn, table1, mobFam, 6, ROWS_TO_ADD);
-
- doBulkload(table1, regionName, famName, mobFam);
-
- // we need to major compact the regions to make sure there are no
references
- // and the regions are once again splittable
- TEST_UTIL.compact(true);
- TEST_UTIL.flush();
- TEST_UTIL.waitTableAvailable(table1);
-
- for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(table1)) {
- if (region.isSplittable()) {
-
admin.splitRegionAsync(region.getRegionInfo().getEncodedNameAsBytes()).get();
- }
- }
-
- request = createBackupRequest(BackupType.INCREMENTAL, tables,
BACKUP_ROOT_DIR);
- incrementalBackupId = backupAdmin.backupTables(request);
- assertTrue(checkSucceeded(incrementalBackupId));
-
- preRestoreBackupFiles = getBackupFiles();
- backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId,
- false, fromTables, toTables, true, true));
- postRestoreBackupFiles = getBackupFiles();
-
- Assert.assertEquals(postRestoreBackupFiles, preRestoreBackupFiles);
-
- int rowsExpected = TEST_UTIL.countRows(table1);
- int rowsActual = TEST_UTIL.countRows(table1_restore);
-
- Assert.assertEquals(rowsExpected, rowsActual);
- }
- }
-
- @Test
- public void TestIncBackupRestoreWithOriginalSplitsSeperateFs() throws
Exception {
- String originalBackupRoot = BACKUP_ROOT_DIR;
- // prepare BACKUP_ROOT_DIR on a different filesystem from HBase.
- try (Connection conn = ConnectionFactory.createConnection(conf1);
- BackupAdminImpl admin = new BackupAdminImpl(conn)) {
- String backupTargetDir =
TEST_UTIL.getDataTestDir("backupTarget").toString();
- BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString();
-
- List<TableName> tables = Lists.newArrayList(table1);
-
- insertIntoTable(conn, table1, famName, 3, 100);
- String fullBackupId = takeFullBackup(tables, admin, true);
- assertTrue(checkSucceeded(fullBackupId));
-
- insertIntoTable(conn, table1, famName, 4, 100);
-
- HRegion regionToBulkload =
TEST_UTIL.getHBaseCluster().getRegions(table1).get(0);
- String regionName = regionToBulkload.getRegionInfo().getEncodedName();
- doBulkload(table1, regionName, famName);
-
- BackupRequest request =
- createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR,
true);
- String incrementalBackupId = admin.backupTables(request);
- assertTrue(checkSucceeded(incrementalBackupId));
-
- TableName[] fromTable = new TableName[] { table1 };
- TableName[] toTable = new TableName[] { table1_restore };
-
- // Using original splits
- admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId, false,
- fromTable, toTable, true, true));
-
- int actualRowCount = TEST_UTIL.countRows(table1_restore);
- int expectedRowCount = TEST_UTIL.countRows(table1);
- assertEquals(expectedRowCount, actualRowCount);
-
- // Using new splits
- admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId, false,
- fromTable, toTable, true, false));
-
- expectedRowCount = TEST_UTIL.countRows(table1);
- assertEquals(expectedRowCount, actualRowCount);
-
- } finally {
- BACKUP_ROOT_DIR = originalBackupRoot;
- }
-
- }
-
- @Test
- public void TestIncBackupRestoreHandlesArchivedFiles() throws Exception {
- byte[] fam2 = Bytes.toBytes("f2");
- TableDescriptor newTable1Desc =
TableDescriptorBuilder.newBuilder(table1Desc)
-
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2).build()).build();
- TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
- try (Connection conn = ConnectionFactory.createConnection(conf1);
- BackupAdminImpl admin = new BackupAdminImpl(conn)) {
- String backupTargetDir =
TEST_UTIL.getDataTestDir("backupTarget").toString();
- BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString();
-
- List<TableName> tables = Lists.newArrayList(table1);
-
- insertIntoTable(conn, table1, famName, 3, 100);
- String fullBackupId = takeFullBackup(tables, admin, true);
- assertTrue(checkSucceeded(fullBackupId));
-
- insertIntoTable(conn, table1, famName, 4, 100);
-
- HRegion regionToBulkload =
TEST_UTIL.getHBaseCluster().getRegions(table1).get(0);
- String regionName = regionToBulkload.getRegionInfo().getEncodedName();
- // Requires a mult-fam bulkload to ensure we're appropriately handling
- // multi-file bulkloads
- Path regionDir = doBulkload(table1, regionName, famName, fam2);
-
- // archive the files in the region directory
- Path archiveDir =
- HFileArchiveUtil.getStoreArchivePath(conf1, table1, regionName,
Bytes.toString(famName));
- TEST_UTIL.getTestFileSystem().mkdirs(archiveDir);
- RemoteIterator<LocatedFileStatus> iter =
- TEST_UTIL.getTestFileSystem().listFiles(regionDir, true);
- List<Path> paths = new ArrayList<>();
- while (iter.hasNext()) {
- Path path = iter.next().getPath();
- if (path.toString().contains("_SeqId_")) {
- paths.add(path);
- }
- }
- assertTrue(paths.size() > 1);
- Path path = paths.get(0);
- String name = path.toString();
- int startIdx = name.lastIndexOf(Path.SEPARATOR);
- String filename = name.substring(startIdx + 1);
- Path archiveFile = new Path(archiveDir, filename);
- // archive 1 of the files
- boolean success = TEST_UTIL.getTestFileSystem().rename(path,
archiveFile);
- assertTrue(success);
- assertTrue(TEST_UTIL.getTestFileSystem().exists(archiveFile));
- assertFalse(TEST_UTIL.getTestFileSystem().exists(path));
-
- BackupRequest request =
- createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR,
true);
- String incrementalBackupId = admin.backupTables(request);
- assertTrue(checkSucceeded(incrementalBackupId));
-
- TableName[] fromTable = new TableName[] { table1 };
- TableName[] toTable = new TableName[] { table1_restore };
-
- admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId, false,
- fromTable, toTable, true));
-
- int actualRowCount = TEST_UTIL.countRows(table1_restore);
- int expectedRowCount = TEST_UTIL.countRows(table1);
- assertEquals(expectedRowCount, actualRowCount);
- }
- }
-
- private void checkThrowsCFMismatch(IOException ex, List<TableName> tables) {
- Throwable cause = Throwables.getRootCause(ex);
- assertEquals(cause.getClass(), ColumnFamilyMismatchException.class);
- ColumnFamilyMismatchException e = (ColumnFamilyMismatchException) cause;
- assertEquals(tables, e.getMismatchedTables());
- }
-
- private String takeFullBackup(List<TableName> tables, BackupAdminImpl
backupAdmin)
- throws IOException {
- return takeFullBackup(tables, backupAdmin, false);
- }
-
- private String takeFullBackup(List<TableName> tables, BackupAdminImpl
backupAdmin,
- boolean noChecksumVerify) throws IOException {
- BackupRequest req =
- createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR,
noChecksumVerify);
- String backupId = backupAdmin.backupTables(req);
- checkSucceeded(backupId);
- return backupId;
- }
-
- private static Path doBulkload(TableName tn, String regionName, byte[]...
fams)
- throws IOException {
- Path regionDir = createHFiles(tn, regionName, fams);
- Map<BulkLoadHFiles.LoadQueueItem, ByteBuffer> results =
- BulkLoadHFiles.create(conf1).bulkLoad(tn, regionDir);
- assertFalse(results.isEmpty());
- return regionDir;
- }
-
- private static Path createHFiles(TableName tn, String regionName, byte[]...
fams)
- throws IOException {
- Path rootdir = CommonFSUtils.getRootDir(conf1);
- Path regionDir = CommonFSUtils.getRegionDir(rootdir, tn, regionName);
-
- FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
- fs.mkdirs(rootdir);
-
- for (byte[] fam : fams) {
- Path famDir = new Path(regionDir, Bytes.toString(fam));
- Path hFileDir = new Path(famDir, UUID.randomUUID().toString());
- HFileTestUtil.createHFile(conf1, fs, hFileDir, fam, qualName,
BULKLOAD_START_KEY,
- BULKLOAD_END_KEY, 1000);
- }
-
- return regionDir;
- }
-
- /**
- * Check that backup manifest can be produced for a different root. Users
may want to move
- * existing backups to a different location.
- */
- private void validateRootPathCanBeOverridden(String originalPath, String
backupId)
- throws IOException {
- String anotherRootDir = "/some/other/root/dir";
- Path anotherPath = new Path(anotherRootDir, backupId);
- BackupManifest.BackupImage differentLocationImage =
BackupManifest.hydrateRootDir(
- HBackupFileSystem.getManifest(conf1, new Path(originalPath),
backupId).getBackupImage(),
- anotherPath);
- assertEquals(differentLocationImage.getRootDir(), anotherRootDir);
- for (BackupManifest.BackupImage ancestor :
differentLocationImage.getAncestors()) {
- assertEquals(anotherRootDir, ancestor.getRootDir());
- }
- }
-
- private List<LocatedFileStatus> getBackupFiles() throws IOException {
- FileSystem fs = TEST_UTIL.getTestFileSystem();
- RemoteIterator<LocatedFileStatus> iter = fs.listFiles(new
Path(BACKUP_ROOT_DIR), true);
- List<LocatedFileStatus> files = new ArrayList<>();
-
- while (iter.hasNext()) {
- files.add(iter.next());
- }
-
- return files;
- }
-}
diff --git
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestore.java
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestore.java
new file mode 100644
index 00000000000..f7a29fa5df4
--- /dev/null
+++
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestore.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+
+@Tag(LargeTests.TAG)
+public class TestIncrementalBackupRestore extends
IncrementalBackupRestoreTestBase {
+
+ private static final Logger LOG =
LoggerFactory.getLogger(TestIncrementalBackupRestore.class);
+
+ // implement all test cases in 1 test since incremental
+ // backup/restore has dependencies
+ @Test
+ public void testIncBackupRestore() throws Exception {
+ int ADD_ROWS = 99;
+
+ // #1 - create full backup for all tables
+ LOG.info("create full backup image for all tables");
+ List<TableName> tables = Lists.newArrayList(table1, table2);
+ final byte[] fam3Name = Bytes.toBytes("f3");
+ final byte[] mobName = Bytes.toBytes("mob");
+
+ TableDescriptor newTable1Desc =
TableDescriptorBuilder.newBuilder(table1Desc)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name))
+
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true)
+ .setMobThreshold(5L).build())
+ .build();
+ TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
+
+ try (Connection conn = ConnectionFactory.createConnection(conf1);
+ Admin admin = conn.getAdmin()) {
+ int NB_ROWS_FAM3 = 6;
+ insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
+ insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
+
+ BackupAdminImpl client = new BackupAdminImpl(conn);
+ BackupRequest request = createBackupRequest(BackupType.FULL, tables,
BACKUP_ROOT_DIR);
+ String backupIdFull = takeFullBackup(tables, client);
+ validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdFull);
+ assertTrue(checkSucceeded(backupIdFull));
+
+ // #2 - insert some data to table
+ Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
+ LOG.debug("writing {} rows to {}", ADD_ROWS, table1);
+ assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS +
NB_ROWS_FAM3);
+ LOG.debug("written {} rows to {}", ADD_ROWS, table1);
+ // additionally, insert rows to MOB cf
+ int NB_ROWS_MOB = 111;
+ insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB);
+ LOG.debug("written {} rows to {} to Mob enabled CF", NB_ROWS_MOB,
table1);
+ t1.close();
+ assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS +
NB_ROWS_MOB);
+ Table t2 = conn.getTable(table2);
+ Put p2;
+ for (int i = 0; i < 5; i++) {
+ p2 = new Put(Bytes.toBytes("row-t2" + i));
+ p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ t2.put(p2);
+ }
+ assertEquals(NB_ROWS_IN_BATCH + 5, TEST_UTIL.countRows(t2));
+ t2.close();
+ LOG.debug("written 5 rows to {}", table2);
+ // split table1
+ MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+ List<HRegion> regions = cluster.getRegions(table1);
+ byte[] name = regions.get(0).getRegionInfo().getRegionName();
+ long startSplitTime = EnvironmentEdgeManager.currentTime();
+ try {
+ admin.splitRegionAsync(name).get();
+ } catch (Exception e) {
+ // although split fail, this may not affect following check in current
API,
+ // exception will be thrown.
+ LOG.debug("region is not splittable, because " + e);
+ }
+ TEST_UTIL.waitTableAvailable(table1);
+ long endSplitTime = EnvironmentEdgeManager.currentTime();
+ // split finished
+ LOG.debug("split finished in = {}", endSplitTime - startSplitTime);
+
+ // #3 - incremental backup for multiple tables
+ tables = Lists.newArrayList(table1, table2);
+ request = createBackupRequest(BackupType.INCREMENTAL, tables,
BACKUP_ROOT_DIR);
+ String backupIdIncMultiple = client.backupTables(request);
+ assertTrue(checkSucceeded(backupIdIncMultiple));
+ BackupManifest manifest =
+ HBackupFileSystem.getManifest(conf1, new Path(BACKUP_ROOT_DIR),
backupIdIncMultiple);
+ assertEquals(Sets.newHashSet(table1, table2), new
HashSet<>(manifest.getTableList()));
+ validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdIncMultiple);
+
+ // add column family f2 to table1
+ // drop column family f3
+ final byte[] fam2Name = Bytes.toBytes("f2");
+ newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc)
+
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name)
+ .build();
+ TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
+
+ // check that an incremental backup fails because the CFs don't match
+ final List<TableName> tablesCopy = tables;
+ IOException ex = assertThrows(IOException.class, () -> client
+ .backupTables(createBackupRequest(BackupType.INCREMENTAL, tablesCopy,
BACKUP_ROOT_DIR)));
+ checkThrowsCFMismatch(ex, ImmutableList.of(table1));
+ takeFullBackup(tables, client);
+
+ int NB_ROWS_FAM2 = 7;
+ Table t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
+ t3.close();
+
+ // Wait for 5 sec to make sure that old WALs were deleted
+ Thread.sleep(5000);
+
+ // #4 - additional incremental backup for multiple tables
+ request = createBackupRequest(BackupType.INCREMENTAL, tables,
BACKUP_ROOT_DIR);
+ String backupIdIncMultiple2 = client.backupTables(request);
+ assertTrue(checkSucceeded(backupIdIncMultiple2));
+ validateRootPathCanBeOverridden(BACKUP_ROOT_DIR, backupIdIncMultiple2);
+
+ // #5 - restore full backup for all tables
+ TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
+ TableName[] tablesMapFull = new TableName[] { table1_restore,
table2_restore };
+
+ LOG.debug("Restoring full {}", backupIdFull);
+ client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
backupIdFull, false,
+ tablesRestoreFull, tablesMapFull, true));
+
+ // #6.1 - check tables for full restore
+ Admin hAdmin = TEST_UTIL.getAdmin();
+ assertTrue(hAdmin.tableExists(table1_restore));
+ assertTrue(hAdmin.tableExists(table2_restore));
+
+ // #6.2 - checking row count of tables for full restore
+ try (Table hTable = conn.getTable(table1_restore)) {
+ assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH +
NB_ROWS_FAM3);
+ }
+
+ try (Table hTable = conn.getTable(table2_restore)) {
+ assertEquals(NB_ROWS_IN_BATCH, TEST_UTIL.countRows(hTable));
+ }
+
+ // #7 - restore incremental backup for multiple tables, with overwrite
+ TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2
};
+ TableName[] tablesMapIncMultiple = new TableName[] { table1_restore,
table2_restore };
+ client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
backupIdIncMultiple2, false,
+ tablesRestoreIncMultiple, tablesMapIncMultiple, true));
+ try (Table hTable = conn.getTable(table1_restore)) {
+ LOG.debug("After incremental restore: {}", hTable.getDescriptor());
+ int countFamName = TEST_UTIL.countRows(hTable, famName);
+ LOG.debug("f1 has " + countFamName + " rows");
+ assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS);
+
+ int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name);
+ LOG.debug("f2 has {} rows", countFam2Name);
+ assertEquals(countFam2Name, NB_ROWS_FAM2);
+
+ int countMobName = TEST_UTIL.countRows(hTable, mobName);
+ LOG.debug("mob has {} rows", countMobName);
+ assertEquals(countMobName, NB_ROWS_MOB);
+ }
+
+ try (Table hTable = conn.getTable(table2_restore)) {
+ assertEquals(NB_ROWS_IN_BATCH + 5, TEST_UTIL.countRows(hTable));
+ }
+ }
+ }
+}
diff --git
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestoreHandlesArchivedFiles.java
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestoreHandlesArchivedFiles.java
new file mode 100644
index 00000000000..8e9deed2ab8
--- /dev/null
+++
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestoreHandlesArchivedFiles.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
+@Tag(LargeTests.TAG)
+public class TestIncrementalBackupRestoreHandlesArchivedFiles
+ extends IncrementalBackupRestoreTestBase {
+
+ @Test
+ public void testIncBackupRestoreHandlesArchivedFiles() throws Exception {
+ byte[] fam2 = Bytes.toBytes("f2");
+ TableDescriptor newTable1Desc =
TableDescriptorBuilder.newBuilder(table1Desc)
+
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2).build()).build();
+ TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
+ try (Connection conn = ConnectionFactory.createConnection(conf1);
+ BackupAdminImpl admin = new BackupAdminImpl(conn)) {
+ String backupTargetDir =
TEST_UTIL.getDataTestDir("backupTarget").toString();
+ BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString();
+
+ List<TableName> tables = Lists.newArrayList(table1);
+
+ insertIntoTable(conn, table1, famName, 3, 100);
+ String fullBackupId = takeFullBackup(tables, admin, true);
+ assertTrue(checkSucceeded(fullBackupId));
+
+ insertIntoTable(conn, table1, famName, 4, 100);
+
+ HRegion regionToBulkload =
TEST_UTIL.getHBaseCluster().getRegions(table1).get(0);
+ String regionName = regionToBulkload.getRegionInfo().getEncodedName();
+ // Requires a mult-fam bulkload to ensure we're appropriately handling
+ // multi-file bulkloads
+ Path regionDir = doBulkload(table1, regionName, famName, fam2);
+
+ // archive the files in the region directory
+ Path archiveDir =
+ HFileArchiveUtil.getStoreArchivePath(conf1, table1, regionName,
Bytes.toString(famName));
+ TEST_UTIL.getTestFileSystem().mkdirs(archiveDir);
+ RemoteIterator<LocatedFileStatus> iter =
+ TEST_UTIL.getTestFileSystem().listFiles(regionDir, true);
+ List<Path> paths = new ArrayList<>();
+ while (iter.hasNext()) {
+ Path path = iter.next().getPath();
+ if (path.toString().contains("_SeqId_")) {
+ paths.add(path);
+ }
+ }
+ assertTrue(paths.size() > 1);
+ Path path = paths.get(0);
+ String name = path.toString();
+ int startIdx = name.lastIndexOf(Path.SEPARATOR);
+ String filename = name.substring(startIdx + 1);
+ Path archiveFile = new Path(archiveDir, filename);
+ // archive 1 of the files
+ boolean success = TEST_UTIL.getTestFileSystem().rename(path,
archiveFile);
+ assertTrue(success);
+ assertTrue(TEST_UTIL.getTestFileSystem().exists(archiveFile));
+ assertFalse(TEST_UTIL.getTestFileSystem().exists(path));
+
+ BackupRequest request =
+ createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR,
true);
+ String incrementalBackupId = admin.backupTables(request);
+ assertTrue(checkSucceeded(incrementalBackupId));
+
+ TableName[] fromTable = new TableName[] { table1 };
+ TableName[] toTable = new TableName[] { table1_restore };
+
+ admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId, false,
+ fromTable, toTable, true));
+
+ int actualRowCount = TEST_UTIL.countRows(table1_restore);
+ int expectedRowCount = TEST_UTIL.countRows(table1);
+ assertEquals(expectedRowCount, actualRowCount);
+ }
+ }
+}
diff --git
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestoreWithOriginalSplits.java
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestoreWithOriginalSplits.java
new file mode 100644
index 00000000000..8bece20839c
--- /dev/null
+++
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestoreWithOriginalSplits.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.util.List;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
+@Tag(LargeTests.TAG)
+public class TestIncrementalBackupRestoreWithOriginalSplits
+ extends IncrementalBackupRestoreTestBase {
+
+ @Test
+ public void testIncBackupRestoreWithOriginalSplits() throws Exception {
+ byte[] mobFam = Bytes.toBytes("mob");
+
+ List<TableName> tables = Lists.newArrayList(table1);
+ TableDescriptor newTable1Desc =
+
TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder
+
.newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build();
+ TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
+
+ try (Connection conn = TEST_UTIL.getConnection();
+ BackupAdminImpl backupAdmin = new BackupAdminImpl(conn); Admin admin =
conn.getAdmin()) {
+ BackupRequest request = createBackupRequest(BackupType.FULL, tables,
BACKUP_ROOT_DIR);
+ String fullBackupId = backupAdmin.backupTables(request);
+ assertTrue(checkSucceeded(fullBackupId));
+
+ TableName[] fromTables = new TableName[] { table1 };
+ TableName[] toTables = new TableName[] { table1_restore };
+
+ List<LocatedFileStatus> preRestoreBackupFiles = getBackupFiles();
+ backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
fullBackupId, false,
+ fromTables, toTables, true, true));
+ List<LocatedFileStatus> postRestoreBackupFiles = getBackupFiles();
+
+ // Check that the backup files are the same before and after the restore
process
+ assertEquals(postRestoreBackupFiles, preRestoreBackupFiles);
+ assertEquals(TEST_UTIL.countRows(table1_restore), NB_ROWS_IN_BATCH);
+
+ int ROWS_TO_ADD = 1_000;
+ // different IDs so that rows don't overlap
+ insertIntoTable(conn, table1, famName, 3, ROWS_TO_ADD);
+ insertIntoTable(conn, table1, mobFam, 4, ROWS_TO_ADD);
+ List<HRegion> currentRegions =
TEST_UTIL.getHBaseCluster().getRegions(table1);
+ for (HRegion region : currentRegions) {
+ byte[] name = region.getRegionInfo().getEncodedNameAsBytes();
+ admin.splitRegionAsync(name).get();
+ }
+
+ TEST_UTIL.waitTableAvailable(table1);
+
+ // Make sure we've split regions
+ assertNotEquals(currentRegions,
TEST_UTIL.getHBaseCluster().getRegions(table1));
+
+ request = createBackupRequest(BackupType.INCREMENTAL, tables,
BACKUP_ROOT_DIR);
+ String incrementalBackupId = backupAdmin.backupTables(request);
+ assertTrue(checkSucceeded(incrementalBackupId));
+ preRestoreBackupFiles = getBackupFiles();
+ backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId,
+ false, fromTables, toTables, true, true));
+ postRestoreBackupFiles = getBackupFiles();
+ assertEquals(postRestoreBackupFiles, preRestoreBackupFiles);
+ assertEquals(NB_ROWS_IN_BATCH + ROWS_TO_ADD + ROWS_TO_ADD,
+ TEST_UTIL.countRows(table1_restore));
+
+ // test bulkloads
+ HRegion regionToBulkload =
TEST_UTIL.getHBaseCluster().getRegions(table1).get(0);
+ String regionName = regionToBulkload.getRegionInfo().getEncodedName();
+
+ insertIntoTable(conn, table1, famName, 5, ROWS_TO_ADD);
+ insertIntoTable(conn, table1, mobFam, 6, ROWS_TO_ADD);
+
+ doBulkload(table1, regionName, famName, mobFam);
+
+ // we need to major compact the regions to make sure there are no
references
+ // and the regions are once again splittable
+ TEST_UTIL.compact(true);
+ TEST_UTIL.flush();
+ TEST_UTIL.waitTableAvailable(table1);
+
+ for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(table1)) {
+ if (region.isSplittable()) {
+
admin.splitRegionAsync(region.getRegionInfo().getEncodedNameAsBytes()).get();
+ }
+ }
+
+ request = createBackupRequest(BackupType.INCREMENTAL, tables,
BACKUP_ROOT_DIR);
+ incrementalBackupId = backupAdmin.backupTables(request);
+ assertTrue(checkSucceeded(incrementalBackupId));
+
+ preRestoreBackupFiles = getBackupFiles();
+ backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId,
+ false, fromTables, toTables, true, true));
+ postRestoreBackupFiles = getBackupFiles();
+
+ assertEquals(postRestoreBackupFiles, preRestoreBackupFiles);
+
+ int rowsExpected = TEST_UTIL.countRows(table1);
+ int rowsActual = TEST_UTIL.countRows(table1_restore);
+
+ assertEquals(rowsExpected, rowsActual);
+ }
+ }
+}
diff --git
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestoreWithOriginalSplitsSeperateFs.java
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestoreWithOriginalSplitsSeperateFs.java
new file mode 100644
index 00000000000..24dd0aa4e2d
--- /dev/null
+++
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupRestoreWithOriginalSplitsSeperateFs.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.io.File;
+import java.util.List;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
+@Tag(LargeTests.TAG)
+public class TestIncrementalBackupRestoreWithOriginalSplitsSeperateFs
+ extends IncrementalBackupRestoreTestBase {
+
+ @Test
+ public void testIncBackupRestoreWithOriginalSplitsSeperateFs() throws
Exception {
+ // prepare BACKUP_ROOT_DIR on a different filesystem from HBase.
+ try (Connection conn = ConnectionFactory.createConnection(conf1);
+ BackupAdminImpl admin = new BackupAdminImpl(conn)) {
+ String backupTargetDir =
TEST_UTIL.getDataTestDir("backupTarget").toString();
+ BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString();
+
+ List<TableName> tables = Lists.newArrayList(table1);
+
+ insertIntoTable(conn, table1, famName, 3, 100);
+ String fullBackupId = takeFullBackup(tables, admin, true);
+ assertTrue(checkSucceeded(fullBackupId));
+
+ insertIntoTable(conn, table1, famName, 4, 100);
+
+ HRegion regionToBulkload =
TEST_UTIL.getHBaseCluster().getRegions(table1).get(0);
+ String regionName = regionToBulkload.getRegionInfo().getEncodedName();
+ doBulkload(table1, regionName, famName);
+
+ BackupRequest request =
+ createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR,
true);
+ String incrementalBackupId = admin.backupTables(request);
+ assertTrue(checkSucceeded(incrementalBackupId));
+
+ TableName[] fromTable = new TableName[] { table1 };
+ TableName[] toTable = new TableName[] { table1_restore };
+
+ // Using original splits
+ admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId, false,
+ fromTable, toTable, true, true));
+
+ int actualRowCount = TEST_UTIL.countRows(table1_restore);
+ int expectedRowCount = TEST_UTIL.countRows(table1);
+ assertEquals(expectedRowCount, actualRowCount);
+
+ // Using new splits
+ admin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
incrementalBackupId, false,
+ fromTable, toTable, true, false));
+
+ expectedRowCount = TEST_UTIL.countRows(table1);
+ assertEquals(expectedRowCount, actualRowCount);
+ }
+ }
+}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 6667af0de25..493a8c0b01d 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -850,9 +850,10 @@ public class BucketCache implements BlockCache, HeapSize {
blockEvicted(cacheKey, bucketEntryToUse, !existedInRamCache,
evictedByEvictionProcess);
return true;
}
- LOG.debug("Failed to remove key {} from map. Maybe entries in the map
now differ? "
- + "Original found entry: {}, what's in the map now: {}", cacheKey,
- bucketEntryToUse, backingMap.get(cacheKey));
+ LOG.debug(
+ "Failed to remove key {} from map. Maybe entries in the map now
differ? "
+ + "Original found entry: {}, what's in the map now: {}",
+ cacheKey, bucketEntryToUse, backingMap.get(cacheKey));
return false;
});
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
index 5001113f248..53c47fea0a3 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
@@ -180,7 +180,7 @@ public class TestBlockEvictionOnRegionMovement {
try {
TEST_UTIL.shutdownMiniCluster();
} catch (NullPointerException e) {
- //shutdown clears the FilePathStringPool. Since it's a singleton, the
second RS shutting down
+ // shutdown clears the FilePathStringPool. Since it's a singleton, the
second RS shutting down
// might try to persist bucket cache after string pool is cleared and
NPE is thrown. This
// won't happen in real clusters, since there will be only one
BucketCache instance per JVM.
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java
index 9cf677b77f1..c6bc8da5f6b 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java
@@ -109,10 +109,9 @@ public class TestVerifyBucketCacheFile {
BucketCache bucketCache = null;
BucketCache recoveredBucketCache = null;
try {
- bucketCache =
- new BucketCache("file:" + testDir + "/bucket.cache", capacitySize,
constructedBlockSize,
- constructedBlockSizes, writeThreads, writerQLen, testDir +
"/bucket.persistence"
- + name.getMethodName());
+ bucketCache = new BucketCache("file:" + testDir + "/bucket.cache",
capacitySize,
+ constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen,
+ testDir + "/bucket.persistence" + name.getMethodName());
assertTrue(bucketCache.waitForCacheInitialization(10000));
long usedSize = bucketCache.getAllocator().getUsedSize();
assertEquals(0, usedSize);
@@ -128,10 +127,9 @@ public class TestVerifyBucketCacheFile {
// 1.persist cache to file
bucketCache.shutdown();
// restore cache from file
- bucketCache =
- new BucketCache("file:" + testDir + "/bucket.cache", capacitySize,
constructedBlockSize,
- constructedBlockSizes, writeThreads, writerQLen, testDir
- + "/bucket.persistence" + name.getMethodName());
+ bucketCache = new BucketCache("file:" + testDir + "/bucket.cache",
capacitySize,
+ constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen,
+ testDir + "/bucket.persistence" + name.getMethodName());
assertTrue(bucketCache.waitForCacheInitialization(10000));
assertEquals(usedSize, bucketCache.getAllocator().getUsedSize());
// persist cache to file
@@ -142,10 +140,9 @@ public class TestVerifyBucketCacheFile {
FileSystems.getDefault().getPath(testDir.toString(), "bucket.cache");
assertTrue(Files.deleteIfExists(cacheFile));
// can't restore cache from file
- recoveredBucketCache =
- new BucketCache("file:" + testDir + "/bucket.cache", capacitySize,
constructedBlockSize,
- constructedBlockSizes, writeThreads, writerQLen, testDir
- + "/bucket.persistence" + name.getMethodName());
+ recoveredBucketCache = new BucketCache("file:" + testDir +
"/bucket.cache", capacitySize,
+ constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen,
+ testDir + "/bucket.persistence" + name.getMethodName());
assertTrue(recoveredBucketCache.waitForCacheInitialization(10000));
assertEquals(0, recoveredBucketCache.getAllocator().getUsedSize());
assertEquals(0, recoveredBucketCache.backingMap.size());
@@ -160,14 +157,14 @@ public class TestVerifyBucketCacheFile {
recoveredBucketCache.shutdown();
// 3.delete backingMap persistence file
- final java.nio.file.Path mapFile =
- FileSystems.getDefault().getPath(testDir.toString(),
"bucket.persistence" + name.getMethodName());
+ final java.nio.file.Path mapFile =
FileSystems.getDefault().getPath(testDir.toString(),
+ "bucket.persistence" + name.getMethodName());
assertTrue(Files.deleteIfExists(mapFile));
// can't restore cache from file
bucketCache = new BucketCache("file:" + testDir + "/bucket.cache",
capacitySize,
constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen,
- testDir + "/bucket.persistence" + name.getMethodName(),
- DEFAULT_ERROR_TOLERATION_DURATION, conf);
+ testDir + "/bucket.persistence" + name.getMethodName(),
DEFAULT_ERROR_TOLERATION_DURATION,
+ conf);
assertTrue(bucketCache.waitForCacheInitialization(10000));
waitPersistentCacheValidation(conf, bucketCache);
assertEquals(0, bucketCache.getAllocator().getUsedSize());
@@ -190,11 +187,11 @@ public class TestVerifyBucketCacheFile {
TEST_UTIL.getTestFileSystem().mkdirs(testDir);
Configuration conf = TEST_UTIL.getConfiguration();
conf.setLong(CacheConfig.BUCKETCACHE_PERSIST_INTERVAL_KEY, 300);
- String mapFileName = testDir + "/bucket.persistence"
- + name.getMethodName() + EnvironmentEdgeManager.currentTime();
+ String mapFileName =
+ testDir + "/bucket.persistence" + name.getMethodName() +
EnvironmentEdgeManager.currentTime();
BucketCache bucketCache = null;
try {
- bucketCache = new BucketCache("file:" + testDir + "/bucket.cache" ,
capacitySize,
+ bucketCache = new BucketCache("file:" + testDir + "/bucket.cache",
capacitySize,
constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen,
mapFileName,
DEFAULT_ERROR_TOLERATION_DURATION, conf);
assertTrue(bucketCache.waitForCacheInitialization(10000));
@@ -249,8 +246,8 @@ public class TestVerifyBucketCacheFile {
try {
bucketCache = new BucketCache("file:" + testDir + "/bucket.cache",
capacitySize,
constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen,
- testDir + "/bucket.persistence" + name.getMethodName(),
- DEFAULT_ERROR_TOLERATION_DURATION, conf);
+ testDir + "/bucket.persistence" + name.getMethodName(),
DEFAULT_ERROR_TOLERATION_DURATION,
+ conf);
assertTrue(bucketCache.waitForCacheInitialization(10000));
long usedSize = bucketCache.getAllocator().getUsedSize();
assertEquals(0, usedSize);
@@ -275,8 +272,8 @@ public class TestVerifyBucketCacheFile {
// can't restore cache from file
bucketCache = new BucketCache("file:" + testDir + "/bucket.cache",
capacitySize,
constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen,
- testDir + "/bucket.persistence" + name.getMethodName(),
- DEFAULT_ERROR_TOLERATION_DURATION, conf);
+ testDir + "/bucket.persistence" + name.getMethodName(),
DEFAULT_ERROR_TOLERATION_DURATION,
+ conf);
assertTrue(bucketCache.waitForCacheInitialization(10000));
waitPersistentCacheValidation(conf, bucketCache);
assertEquals(0, bucketCache.getAllocator().getUsedSize());
@@ -315,8 +312,8 @@ public class TestVerifyBucketCacheFile {
try {
bucketCache = new BucketCache("file:" + testDir + "/bucket.cache",
capacitySize,
constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen,
- testDir + "/bucket.persistence" + name.getMethodName(),
- DEFAULT_ERROR_TOLERATION_DURATION, conf);
+ testDir + "/bucket.persistence" + name.getMethodName(),
DEFAULT_ERROR_TOLERATION_DURATION,
+ conf);
assertTrue(bucketCache.waitForCacheInitialization(10000));
long usedSize = bucketCache.getAllocator().getUsedSize();
assertEquals(0, usedSize);
@@ -343,8 +340,8 @@ public class TestVerifyBucketCacheFile {
// can't restore cache from file
bucketCache = new BucketCache("file:" + testDir + "/bucket.cache",
capacitySize,
constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen,
- testDir + "/bucket.persistence" + name.getMethodName(),
- DEFAULT_ERROR_TOLERATION_DURATION, conf);
+ testDir + "/bucket.persistence" + name.getMethodName(),
DEFAULT_ERROR_TOLERATION_DURATION,
+ conf);
assertTrue(bucketCache.waitForCacheInitialization(10000));
waitPersistentCacheValidation(conf, bucketCache);
assertEquals(usedSize, bucketCache.getAllocator().getUsedSize());
@@ -373,8 +370,8 @@ public class TestVerifyBucketCacheFile {
Configuration conf = HBaseConfiguration.create();
// Disables the persister thread by setting its interval to MAX_VALUE
conf.setLong(BUCKETCACHE_PERSIST_INTERVAL_KEY, Long.MAX_VALUE);
- String mapFileName = testDir + "/bucket.persistence"
- + EnvironmentEdgeManager.currentTime() + name.getMethodName();
+ String mapFileName =
+ testDir + "/bucket.persistence" + EnvironmentEdgeManager.currentTime() +
name.getMethodName();
BucketCache bucketCache = null;
BucketCache newBucketCache = null;
try {
@@ -449,8 +446,8 @@ public class TestVerifyBucketCacheFile {
Configuration conf = HBaseConfiguration.create();
conf.setLong(BACKING_MAP_PERSISTENCE_CHUNK_SIZE, chunkSize);
- String mapFileName = testDir + "/bucket.persistence"
- + EnvironmentEdgeManager.currentTime() + name.getMethodName();
+ String mapFileName =
+ testDir + "/bucket.persistence" + EnvironmentEdgeManager.currentTime() +
name.getMethodName();
BucketCache bucketCache = null;
BucketCache newBucketCache = null;
try {
@@ -491,8 +488,8 @@ public class TestVerifyBucketCacheFile {
// We need to enforce these two shutdown to make sure we don't leave
"orphan" persister
// threads running while the unit test JVM instance is up.
// This would lead to a NPE because of the StringPoolCleanup in
bucketCache.shutdown
- // but that's fine because we don't have more than one bucket cache
instance in real life
- // and here we passed the point where we stop background threads
inside shutdown.
+ // but that's fine because we don't have more than one bucket cache
instance in real life
+ // and here we passed the point where we stop background threads
inside shutdown.
}
}
TEST_UTIL.cleanupTestDir();