http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java deleted file mode 100644 index 0944ea2..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java +++ /dev/null @@ -1,158 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Assert; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; - -/** - * Create multiple backups for two tables: table1, table2 then perform 1 delete - */ -@Category(LargeTests.class) -public class TestBackupMultipleDeletes extends TestBackupBase { - private static final Log LOG = LogFactory.getLog(TestBackupMultipleDeletes.class); - - @Test - public void testBackupMultipleDeletes() throws Exception { - // #1 - create full backup for all tables - LOG.info("create full backup image for all tables"); - List<TableName> tables = Lists.newArrayList(table1, table2); - HBaseAdmin admin = null; - Connection conn = ConnectionFactory.createConnection(conf1); - admin = (HBaseAdmin) conn.getAdmin(); - BackupAdmin client = new BackupAdminImpl(conn); - BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); - assertTrue(checkSucceeded(backupIdFull)); - // #2 - insert some data to table table1 - HTable t1 = (HTable) conn.getTable(table1); - Put p1; - for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { - p1 = new Put(Bytes.toBytes("row-t1" + i)); - p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); - t1.put(p1); - } - Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2); - t1.close(); - // #3 - incremental backup for table1 - tables = Lists.newArrayList(table1); - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc1 = client.backupTables(request); - assertTrue(checkSucceeded(backupIdInc1)); - // #4 - insert some data to table table2 - HTable t2 = (HTable) conn.getTable(table2); - Put p2 = null; - for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { - p2 = new Put(Bytes.toBytes("row-t2" + i)); - p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); - t2.put(p2); - } - // #5 - incremental backup for table1, table2 - tables = Lists.newArrayList(table1, table2); - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc2 = client.backupTables(request); - assertTrue(checkSucceeded(backupIdInc2)); - // #6 - insert some data to table table1 - t1 = (HTable) conn.getTable(table1); - for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) { - p1 = new Put(Bytes.toBytes("row-t1" + i)); - p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); - t1.put(p1); - } - // #7 - incremental backup for table1 - tables = Lists.newArrayList(table1); - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc3 = client.backupTables(request); - assertTrue(checkSucceeded(backupIdInc3)); - // #8 - insert some data to table table2 - t2 = (HTable) conn.getTable(table2); - for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) { - p2 = new Put(Bytes.toBytes("row-t1" + i)); - p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); - t2.put(p2); - } - // #9 - incremental backup for table1, table2 - tables = Lists.newArrayList(table1, table2); - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc4 = client.backupTables(request); - assertTrue(checkSucceeded(backupIdInc4)); - // #10 full backup for table3 - tables = Lists.newArrayList(table3); - request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull2 = client.backupTables(request); - assertTrue(checkSucceeded(backupIdFull2)); - // #11 - incremental backup for table3 - tables = Lists.newArrayList(table3); - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc5 = client.backupTables(request); - assertTrue(checkSucceeded(backupIdInc5)); - LOG.error("Delete backupIdInc2"); - client.deleteBackups(new String[] { backupIdInc2 }); - LOG.error("Delete backupIdInc2 done"); - List<BackupInfo> list = client.getHistory(100); - // First check number of backup images before and after - assertEquals(4, list.size()); - // then verify that no backupIdInc2,3,4 - Set<String> ids = new HashSet<String>(); - ids.add(backupIdInc2); - ids.add(backupIdInc3); - ids.add(backupIdInc4); - for (BackupInfo info : list) { - String backupId = info.getBackupId(); - if (ids.contains(backupId)) { - assertTrue(false); - } - } - // Verify that backupInc5 contains only table3 - boolean found = false; - for (BackupInfo info : list) { - String backupId = info.getBackupId(); - if (backupId.equals(backupIdInc5)) { - assertTrue(info.getTables().size() == 1); - assertEquals(table3, info.getTableNames().get(0)); - found = true; - } - } - assertTrue(found); - admin.close(); - conn.close(); - } - -}
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java deleted file mode 100644 index 686d34b..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.util.List; -import java.util.Set; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.impl.TableBackupClient; -import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.util.ToolRunner; -import org.junit.Test; -import org.junit.experimental.categories.Category; - - -@Category(LargeTests.class) -public class TestBackupRepair extends TestBackupBase { - - private static final Log LOG = LogFactory.getLog(TestBackupRepair.class); - - - @Test - public void testFullBackupWithFailuresAndRestore() throws Exception { - - autoRestoreOnFailure = false; - - conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS, - FullTableBackupClientForTest.class.getName()); - int maxStage = Stage.values().length -1; - // Fail stage in loop between 0 and 4 inclusive - for (int stage = 0; stage < maxStage; stage++) { - LOG.info("Running stage " + stage); - runBackupAndFailAtStageWithRestore(stage); - } - } - - public void runBackupAndFailAtStageWithRestore(int stage) throws Exception { - - conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { - int before = table.getBackupHistory().size(); - String[] args = - new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", - table1.getNameAsString() + "," + table2.getNameAsString() }; - // Run backup - int ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertFalse(ret == 0); - - // Now run restore - args = new String[] {"repair"}; - - ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertTrue(ret == 0); - - List<BackupInfo> backups = table.getBackupHistory(); - int after = table.getBackupHistory().size(); - - assertTrue(after == before +1); - for (BackupInfo data : backups) { - String backupId = data.getBackupId(); - assertFalse(checkSucceeded(backupId)); - } - Set<TableName> tables = table.getIncrementalBackupTableSet(BACKUP_ROOT_DIR); - assertTrue(tables.size() == 0); - } - } - - -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java deleted file mode 100644 index 4e922a2..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java +++ /dev/null @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayOutputStream; -import java.io.PrintStream; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.util.BackupUtils; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.util.ToolRunner; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; - -@Category(LargeTests.class) -public class TestBackupShowHistory extends TestBackupBase { - - private static final Log LOG = LogFactory.getLog(TestBackupShowHistory.class); - - private boolean findBackup(List<BackupInfo> history, String backupId) { - assertTrue(history.size() > 0); - boolean success = false; - for (BackupInfo info : history) { - if (info.getBackupId().equals(backupId)) { - success = true; - break; - } - } - return success; - } - - /** - * Verify that full backup is created on a single table with data correctly. Verify that history - * works as expected - * @throws Exception - */ - @Test - public void testBackupHistory() throws Exception { - - LOG.info("test backup history on a single table with data"); - - List<TableName> tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); - assertTrue(checkSucceeded(backupId)); - LOG.info("backup complete"); - - List<BackupInfo> history = getBackupAdmin().getHistory(10); - assertTrue(findBackup(history, backupId)); - BackupInfo.Filter nullFilter = new BackupInfo.Filter() { - @Override - public boolean apply(BackupInfo info) { - return true; - } - }; - history = BackupUtils.getHistory(conf1, 10, new Path(BACKUP_ROOT_DIR), nullFilter); - assertTrue(findBackup(history, backupId)); - - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - System.setOut(new PrintStream(baos)); - - String[] args = new String[] { "history", "-n", "10", "-p", BACKUP_ROOT_DIR }; - // Run backup - int ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertTrue(ret == 0); - LOG.info("show_history"); - String output = baos.toString(); - LOG.info(output); - baos.close(); - assertTrue(output.indexOf(backupId) > 0); - - tableList = Lists.newArrayList(table2); - String backupId2 = fullTableBackup(tableList); - assertTrue(checkSucceeded(backupId2)); - LOG.info("backup complete: " + table2); - BackupInfo.Filter tableNameFilter = new BackupInfo.Filter() { - @Override - public boolean apply(BackupInfo image) { - if (table1 == null) return true; - List<TableName> names = image.getTableNames(); - return names.contains(table1); - } - }; - BackupInfo.Filter tableSetFilter = new BackupInfo.Filter() { - @Override - public boolean apply(BackupInfo info) { - String backupId = info.getBackupId(); - return backupId.startsWith("backup"); - } - }; - - history = getBackupAdmin().getHistory(10, tableNameFilter, tableSetFilter); - assertTrue(history.size() > 0); - boolean success = true; - for (BackupInfo info : history) { - if (!info.getTableNames().contains(table1)) { - success = false; - break; - } - } - assertTrue(success); - - history = - BackupUtils.getHistory(conf1, 10, new Path(BACKUP_ROOT_DIR), tableNameFilter, - tableSetFilter); - assertTrue(history.size() > 0); - success = true; - for (BackupInfo info : history) { - if (!info.getTableNames().contains(table1)) { - success = false; - break; - } - } - assertTrue(success); - - args = - new String[] { "history", "-n", "10", "-p", BACKUP_ROOT_DIR, - "-t", "table1", "-s", "backup" }; - // Run backup - ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertTrue(ret == 0); - LOG.info("show_history"); - } - -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java deleted file mode 100644 index 73d8d9f..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayOutputStream; -import java.io.PrintStream; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.util.ToolRunner; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; - -@Category(LargeTests.class) -public class TestBackupStatusProgress extends TestBackupBase { - - private static final Log LOG = LogFactory.getLog(TestBackupStatusProgress.class); - - /** - * Verify that full backup is created on a single table with data correctly. - * @throws Exception - */ - @Test - public void testBackupStatusProgress() throws Exception { - - LOG.info("test backup status/progress on a single table with data"); - - List<TableName> tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); - LOG.info("backup complete"); - assertTrue(checkSucceeded(backupId)); - - BackupInfo info = getBackupAdmin().getBackupInfo(backupId); - assertTrue(info.getState() == BackupState.COMPLETE); - - LOG.debug(info.getShortDescription()); - assertTrue(info.getProgress() > 0); - - } - - @Test - public void testBackupStatusProgressCommand() throws Exception { - - LOG.info("test backup status/progress on a single table with data: command-line"); - - List<TableName> tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); - LOG.info("backup complete"); - assertTrue(checkSucceeded(backupId)); - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - System.setOut(new PrintStream(baos)); - - String[] args = new String[] { "describe", backupId }; - int ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertTrue(ret == 0); - String responce = baos.toString(); - assertTrue(responce.indexOf(backupId) > 0); - assertTrue(responce.indexOf("COMPLETE") > 0); - - baos = new ByteArrayOutputStream(); - System.setOut(new PrintStream(baos)); - - args = new String[] { "progress", backupId }; - ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertTrue(ret == 0); - responce = baos.toString(); - assertTrue(responce.indexOf(backupId) >= 0); - assertTrue(responce.indexOf("progress") > 0); - assertTrue(responce.indexOf("100") > 0); - - } -} http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java deleted file mode 100644 index 5814d87..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java +++ /dev/null @@ -1,511 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Set; -import java.util.TreeSet; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; -import org.apache.hadoop.hbase.backup.impl.BackupManager; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Test cases for backup system table API - */ -@Category(MediumTests.class) -public class TestBackupSystemTable { - - private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - protected static Configuration conf = UTIL.getConfiguration(); - protected static MiniHBaseCluster cluster; - protected static Connection conn; - protected BackupSystemTable table; - - @BeforeClass - public static void setUp() throws Exception { - conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true); - BackupManager.decorateMasterConfiguration(conf); - BackupManager.decorateRegionServerConfiguration(conf); - cluster = UTIL.startMiniCluster(); - conn = UTIL.getConnection(); - } - - @Before - public void before() throws IOException { - table = new BackupSystemTable(conn); - } - - @After - public void after() { - if (table != null) { - table.close(); - } - - } - - @Test - public void testUpdateReadDeleteBackupStatus() throws IOException { - BackupInfo ctx = createBackupInfo(); - table.updateBackupInfo(ctx); - BackupInfo readCtx = table.readBackupInfo(ctx.getBackupId()); - assertTrue(compare(ctx, readCtx)); - // try fake backup id - readCtx = table.readBackupInfo("fake"); - assertNull(readCtx); - // delete backup info - table.deleteBackupInfo(ctx.getBackupId()); - readCtx = table.readBackupInfo(ctx.getBackupId()); - assertNull(readCtx); - cleanBackupTable(); - } - - @Test - public void testWriteReadBackupStartCode() throws IOException { - Long code = 100L; - table.writeBackupStartCode(code, "root"); - String readCode = table.readBackupStartCode("root"); - assertEquals(code, new Long(Long.parseLong(readCode))); - cleanBackupTable(); - } - - private void cleanBackupTable() throws IOException { - Admin admin = UTIL.getHBaseAdmin(); - admin.disableTable(BackupSystemTable.getTableName(conf)); - admin.truncateTable(BackupSystemTable.getTableName(conf), true); - if (admin.isTableDisabled(BackupSystemTable.getTableName(conf))) { - admin.enableTable(BackupSystemTable.getTableName(conf)); - } - } - - @Test - public void testBackupHistory() throws IOException { - int n = 10; - List<BackupInfo> list = createBackupInfoList(n); - - // Load data - for (BackupInfo bc : list) { - // Make sure we set right status - bc.setState(BackupState.COMPLETE); - table.updateBackupInfo(bc); - } - - // Reverse list for comparison - Collections.reverse(list); - List<BackupInfo> history = table.getBackupHistory(); - assertTrue(history.size() == n); - - for (int i = 0; i < n; i++) { - BackupInfo ctx = list.get(i); - BackupInfo data = history.get(i); - assertTrue(compare(ctx, data)); - } - - cleanBackupTable(); - - } - - @Test - public void testBackupDelete() throws IOException { - - try (BackupSystemTable table = new BackupSystemTable(conn)) { - - int n = 10; - List<BackupInfo> list = createBackupInfoList(n); - - // Load data - for (BackupInfo bc : list) { - // Make sure we set right status - bc.setState(BackupState.COMPLETE); - table.updateBackupInfo(bc); - } - - // Verify exists - for (BackupInfo bc : list) { - assertNotNull(table.readBackupInfo(bc.getBackupId())); - } - - // Delete all - for (BackupInfo bc : list) { - table.deleteBackupInfo(bc.getBackupId()); - } - - // Verify do not exists - for (BackupInfo bc : list) { - assertNull(table.readBackupInfo(bc.getBackupId())); - } - - cleanBackupTable(); - } - - } - - @Test - public void testRegionServerLastLogRollResults() throws IOException { - String[] servers = new String[] { "server1", "server2", "server3" }; - Long[] timestamps = new Long[] { 100L, 102L, 107L }; - - for (int i = 0; i < servers.length; i++) { - table.writeRegionServerLastLogRollResult(servers[i], timestamps[i], "root"); - } - - HashMap<String, Long> result = table.readRegionServerLastLogRollResult("root"); - assertTrue(servers.length == result.size()); - Set<String> keys = result.keySet(); - String[] keysAsArray = new String[keys.size()]; - keys.toArray(keysAsArray); - Arrays.sort(keysAsArray); - - for (int i = 0; i < keysAsArray.length; i++) { - assertEquals(keysAsArray[i], servers[i]); - Long ts1 = timestamps[i]; - Long ts2 = result.get(keysAsArray[i]); - assertEquals(ts1, ts2); - } - - cleanBackupTable(); - } - - @Test - public void testIncrementalBackupTableSet() throws IOException { - TreeSet<TableName> tables1 = new TreeSet<>(); - - tables1.add(TableName.valueOf("t1")); - tables1.add(TableName.valueOf("t2")); - tables1.add(TableName.valueOf("t3")); - - TreeSet<TableName> tables2 = new TreeSet<>(); - - tables2.add(TableName.valueOf("t3")); - tables2.add(TableName.valueOf("t4")); - tables2.add(TableName.valueOf("t5")); - - table.addIncrementalBackupTableSet(tables1, "root"); - BackupSystemTable table = new BackupSystemTable(conn); - TreeSet<TableName> res1 = (TreeSet<TableName>) table.getIncrementalBackupTableSet("root"); - assertTrue(tables1.size() == res1.size()); - Iterator<TableName> desc1 = tables1.descendingIterator(); - Iterator<TableName> desc2 = res1.descendingIterator(); - while (desc1.hasNext()) { - assertEquals(desc1.next(), desc2.next()); - } - - table.addIncrementalBackupTableSet(tables2, "root"); - TreeSet<TableName> res2 = (TreeSet<TableName>) table.getIncrementalBackupTableSet("root"); - assertTrue((tables2.size() + tables1.size() - 1) == res2.size()); - - tables1.addAll(tables2); - - desc1 = tables1.descendingIterator(); - desc2 = res2.descendingIterator(); - - while (desc1.hasNext()) { - assertEquals(desc1.next(), desc2.next()); - } - cleanBackupTable(); - - } - - @Test - public void testRegionServerLogTimestampMap() throws IOException { - TreeSet<TableName> tables = new TreeSet<>(); - - tables.add(TableName.valueOf("t1")); - tables.add(TableName.valueOf("t2")); - tables.add(TableName.valueOf("t3")); - - HashMap<String, Long> rsTimestampMap = new HashMap<String, Long>(); - - rsTimestampMap.put("rs1:100", 100L); - rsTimestampMap.put("rs2:100", 101L); - rsTimestampMap.put("rs3:100", 103L); - - table.writeRegionServerLogTimestamp(tables, rsTimestampMap, "root"); - - HashMap<TableName, HashMap<String, Long>> result = table.readLogTimestampMap("root"); - - assertTrue(tables.size() == result.size()); - - for (TableName t : tables) { - HashMap<String, Long> rstm = result.get(t); - assertNotNull(rstm); - assertEquals(rstm.get("rs1:100"), new Long(100L)); - assertEquals(rstm.get("rs2:100"), new Long(101L)); - assertEquals(rstm.get("rs3:100"), new Long(103L)); - } - - Set<TableName> tables1 = new TreeSet<>(); - - tables1.add(TableName.valueOf("t3")); - tables1.add(TableName.valueOf("t4")); - tables1.add(TableName.valueOf("t5")); - - HashMap<String, Long> rsTimestampMap1 = new HashMap<String, Long>(); - - rsTimestampMap1.put("rs1:100", 200L); - rsTimestampMap1.put("rs2:100", 201L); - rsTimestampMap1.put("rs3:100", 203L); - - table.writeRegionServerLogTimestamp(tables1, rsTimestampMap1, "root"); - - result = table.readLogTimestampMap("root"); - - assertTrue(5 == result.size()); - - for (TableName t : tables) { - HashMap<String, Long> rstm = result.get(t); - assertNotNull(rstm); - if (t.equals(TableName.valueOf("t3")) == false) { - assertEquals(rstm.get("rs1:100"), new Long(100L)); - assertEquals(rstm.get("rs2:100"), new Long(101L)); - assertEquals(rstm.get("rs3:100"), new Long(103L)); - } else { - assertEquals(rstm.get("rs1:100"), new Long(200L)); - assertEquals(rstm.get("rs2:100"), new Long(201L)); - assertEquals(rstm.get("rs3:100"), new Long(203L)); - } - } - - for (TableName t : tables1) { - HashMap<String, Long> rstm = result.get(t); - assertNotNull(rstm); - assertEquals(rstm.get("rs1:100"), new Long(200L)); - assertEquals(rstm.get("rs2:100"), new Long(201L)); - assertEquals(rstm.get("rs3:100"), new Long(203L)); - } - - cleanBackupTable(); - - } - - @Test - public void testAddWALFiles() throws IOException { - List<String> files = - Arrays.asList("hdfs://server/WALs/srv1,101,15555/srv1,101,15555.default.1", - "hdfs://server/WALs/srv2,102,16666/srv2,102,16666.default.2", - "hdfs://server/WALs/srv3,103,17777/srv3,103,17777.default.3"); - String newFile = "hdfs://server/WALs/srv1,101,15555/srv1,101,15555.default.5"; - - table.addWALFiles(files, "backup", "root"); - - assertTrue(table.isWALFileDeletable(files.get(0))); - assertTrue(table.isWALFileDeletable(files.get(1))); - assertTrue(table.isWALFileDeletable(files.get(2))); - assertFalse(table.isWALFileDeletable(newFile)); - - cleanBackupTable(); - } - - /** - * Backup set tests - */ - - @Test - public void testBackupSetAddNotExists() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { - - String[] tables = new String[] { "table1", "table2", "table3" }; - String setName = "name"; - table.addToBackupSet(setName, tables); - List<TableName> tnames = table.describeBackupSet(setName); - assertTrue(tnames != null); - assertTrue(tnames.size() == tables.length); - for (int i = 0; i < tnames.size(); i++) { - assertTrue(tnames.get(i).getNameAsString().equals(tables[i])); - } - cleanBackupTable(); - } - - } - - @Test - public void testBackupSetAddExists() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { - - String[] tables = new String[] { "table1", "table2", "table3" }; - String setName = "name"; - table.addToBackupSet(setName, tables); - String[] addTables = new String[] { "table4", "table5", "table6" }; - table.addToBackupSet(setName, addTables); - - List<TableName> tnames = table.describeBackupSet(setName); - assertTrue(tnames != null); - assertTrue(tnames.size() == tables.length + addTables.length); - for (int i = 0; i < tnames.size(); i++) { - assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); - } - cleanBackupTable(); - } - } - - @Test - public void testBackupSetAddExistsIntersects() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { - - String[] tables = new String[] { "table1", "table2", "table3" }; - String setName = "name"; - table.addToBackupSet(setName, tables); - String[] addTables = new String[] { "table3", "table4", "table5", "table6" }; - table.addToBackupSet(setName, addTables); - - List<TableName> tnames = table.describeBackupSet(setName); - assertTrue(tnames != null); - assertTrue(tnames.size() == tables.length + addTables.length - 1); - for (int i = 0; i < tnames.size(); i++) { - assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); - } - cleanBackupTable(); - } - } - - @Test - public void testBackupSetRemoveSomeNotExists() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { - - String[] tables = new String[] { "table1", "table2", "table3", "table4" }; - String setName = "name"; - table.addToBackupSet(setName, tables); - String[] removeTables = new String[] { "table4", "table5", "table6" }; - table.removeFromBackupSet(setName, removeTables); - - List<TableName> tnames = table.describeBackupSet(setName); - assertTrue(tnames != null); - assertTrue(tnames.size() == tables.length - 1); - for (int i = 0; i < tnames.size(); i++) { - assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); - } - cleanBackupTable(); - } - } - - @Test - public void testBackupSetRemove() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { - - String[] tables = new String[] { "table1", "table2", "table3", "table4" }; - String setName = "name"; - table.addToBackupSet(setName, tables); - String[] removeTables = new String[] { "table4", "table3" }; - table.removeFromBackupSet(setName, removeTables); - - List<TableName> tnames = table.describeBackupSet(setName); - assertTrue(tnames != null); - assertTrue(tnames.size() == tables.length - 2); - for (int i = 0; i < tnames.size(); i++) { - assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); - } - cleanBackupTable(); - } - } - - @Test - public void testBackupSetDelete() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { - - String[] tables = new String[] { "table1", "table2", "table3", "table4" }; - String setName = "name"; - table.addToBackupSet(setName, tables); - table.deleteBackupSet(setName); - - List<TableName> tnames = table.describeBackupSet(setName); - assertTrue(tnames == null); - cleanBackupTable(); - } - } - - @Test - public void testBackupSetList() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { - - String[] tables = new String[] { "table1", "table2", "table3", "table4" }; - String setName1 = "name1"; - String setName2 = "name2"; - table.addToBackupSet(setName1, tables); - table.addToBackupSet(setName2, tables); - - List<String> list = table.listBackupSets(); - - assertTrue(list.size() == 2); - assertTrue(list.get(0).equals(setName1)); - assertTrue(list.get(1).equals(setName2)); - - cleanBackupTable(); - } - } - - private boolean compare(BackupInfo one, BackupInfo two) { - return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType()) - && one.getBackupRootDir().equals(two.getBackupRootDir()) - && one.getStartTs() == two.getStartTs() && one.getCompleteTs() == two.getCompleteTs(); - } - - private BackupInfo createBackupInfo() { - - BackupInfo ctxt = - new BackupInfo("backup_" + System.nanoTime(), BackupType.FULL, new TableName[] { - TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") }, - "/hbase/backup"); - ctxt.setStartTs(System.currentTimeMillis()); - ctxt.setCompleteTs(System.currentTimeMillis() + 1); - return ctxt; - } - - private List<BackupInfo> createBackupInfoList(int size) { - List<BackupInfo> list = new ArrayList<BackupInfo>(); - for (int i = 0; i < size; i++) { - list.add(createBackupInfo()); - try { - Thread.sleep(10); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - return list; - } - - @AfterClass - public static void tearDown() throws IOException { - if (cluster != null) cluster.shutdown(); - } -} http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java deleted file mode 100644 index 124d19f..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertTrue; - -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.util.ToolRunner; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(LargeTests.class) -public class TestFullBackup extends TestBackupBase { - - private static final Log LOG = LogFactory.getLog(TestFullBackup.class); - - @Test - public void testFullBackupMultipleCommand() throws Exception { - LOG.info("test full backup on a multiple tables with data: command-line"); - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { - int before = table.getBackupHistory().size(); - String[] args = - new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", - table1.getNameAsString() + "," + table2.getNameAsString() }; - // Run backup - int ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertTrue(ret == 0); - List<BackupInfo> backups = table.getBackupHistory(); - int after = table.getBackupHistory().size(); - assertTrue(after == before + 1); - for (BackupInfo data : backups) { - String backupId = data.getBackupId(); - assertTrue(checkSucceeded(backupId)); - } - } - LOG.info("backup complete"); - } - -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java deleted file mode 100644 index 4dc894b..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.util.ToolRunner; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(LargeTests.class) -public class TestFullBackupSet extends TestBackupBase { - - private static final Log LOG = LogFactory.getLog(TestFullBackupSet.class); - - /** - * Verify that full backup is created on a single table with data correctly. - * @throws Exception - */ - @Test - public void testFullBackupSetExist() throws Exception { - - LOG.info("Test full backup, backup set exists"); - - // Create set - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { - String name = "name"; - table.addToBackupSet(name, new String[] { table1.getNameAsString() }); - List<TableName> names = table.describeBackupSet(name); - - assertNotNull(names); - assertTrue(names.size() == 1); - assertTrue(names.get(0).equals(table1)); - - String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", name }; - // Run backup - int ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertTrue(ret == 0); - List<BackupInfo> backups = table.getBackupHistory(); - assertTrue(backups.size() == 1); - String backupId = backups.get(0).getBackupId(); - assertTrue(checkSucceeded(backupId)); - - LOG.info("backup complete"); - - // Restore from set into other table - args = - new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", - table1_restore.getNameAsString(), "-o" }; - // Run backup - ret = ToolRunner.run(conf1, new RestoreDriver(), args); - assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); - assertTrue(hba.tableExists(table1_restore)); - // Verify number of rows in both tables - assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore)); - TEST_UTIL.deleteTable(table1_restore); - LOG.info("restore into other table is complete"); - hba.close(); - - } - - } - - @Test - public void testFullBackupSetDoesNotExist() throws Exception { - - LOG.info("test full backup, backup set does not exist"); - String name = "name1"; - String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", name }; - // Run backup - int ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertTrue(ret != 0); - - } - -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java deleted file mode 100644 index 6b007f9..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.util.ToolRunner; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(LargeTests.class) -public class TestFullBackupSetRestoreSet extends TestBackupBase { - - private static final Log LOG = LogFactory.getLog(TestFullBackupSetRestoreSet.class); - - @Test - public void testFullRestoreSetToOtherTable() throws Exception { - - LOG.info("Test full restore set"); - - // Create set - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { - String name = "name"; - table.addToBackupSet(name, new String[] { table1.getNameAsString() }); - List<TableName> names = table.describeBackupSet(name); - - assertNotNull(names); - assertTrue(names.size() == 1); - assertTrue(names.get(0).equals(table1)); - - String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", name }; - // Run backup - int ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertTrue(ret == 0); - List<BackupInfo> backups = table.getBackupHistory(); - assertTrue(backups.size() == 1); - String backupId = backups.get(0).getBackupId(); - assertTrue(checkSucceeded(backupId)); - - LOG.info("backup complete"); - - // Restore from set into other table - args = - new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", - table1_restore.getNameAsString(), "-o" }; - // Run backup - ret = ToolRunner.run(conf1, new RestoreDriver(), args); - assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); - assertTrue(hba.tableExists(table1_restore)); - // Verify number of rows in both tables - assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore)); - TEST_UTIL.deleteTable(table1_restore); - LOG.info("restore into other table is complete"); - hba.close(); - } - } - - @Test - public void testFullRestoreSetToSameTable() throws Exception { - - LOG.info("Test full restore set to same table"); - - // Create set - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { - String name = "name1"; - table.addToBackupSet(name, new String[] { table1.getNameAsString() }); - List<TableName> names = table.describeBackupSet(name); - - assertNotNull(names); - assertTrue(names.size() == 1); - assertTrue(names.get(0).equals(table1)); - - String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", name }; - // Run backup - int ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertTrue(ret == 0); - List<BackupInfo> backups = table.getBackupHistory(); - String backupId = backups.get(0).getBackupId(); - assertTrue(checkSucceeded(backupId)); - - LOG.info("backup complete"); - int count = TEST_UTIL.countRows(table1); - TEST_UTIL.deleteTable(table1); - - // Restore from set into other table - args = new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-o" }; - // Run backup - ret = ToolRunner.run(conf1, new RestoreDriver(), args); - assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); - assertTrue(hba.tableExists(table1)); - // Verify number of rows in both tables - assertEquals(count, TEST_UTIL.countRows(table1)); - LOG.info("restore into same table is complete"); - hba.close(); - - } - - } - -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java deleted file mode 100644 index d18de88..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.util.List; -import java.util.Set; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.impl.TableBackupClient; -import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.util.ToolRunner; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(LargeTests.class) -public class TestFullBackupWithFailures extends TestBackupBase { - - private static final Log LOG = LogFactory.getLog(TestFullBackupWithFailures.class); - - @Test - public void testFullBackupWithFailures() throws Exception { - conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS, - FullTableBackupClientForTest.class.getName()); - int maxStage = Stage.values().length -1; - // Fail stages between 0 and 4 inclusive - for (int stage = 0; stage <= maxStage; stage++) { - LOG.info("Running stage " + stage); - runBackupAndFailAtStage(stage); - } - } - - public void runBackupAndFailAtStage(int stage) throws Exception { - - conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { - int before = table.getBackupHistory().size(); - String[] args = - new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", - table1.getNameAsString() + "," + table2.getNameAsString() }; - // Run backup - int ret = ToolRunner.run(conf1, new BackupDriver(), args); - assertFalse(ret == 0); - List<BackupInfo> backups = table.getBackupHistory(); - int after = table.getBackupHistory().size(); - - assertTrue(after == before +1); - for (BackupInfo data : backups) { - String backupId = data.getBackupId(); - assertFalse(checkSucceeded(backupId)); - } - Set<TableName> tables = table.getIncrementalBackupTableSet(BACKUP_ROOT_DIR); - assertTrue(tables.size() == 0); - } - } - - -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java deleted file mode 100644 index 48a553f..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ /dev/null @@ -1,345 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.List; - -import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.util.BackupUtils; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.util.ToolRunner; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; - -@Category(LargeTests.class) -public class TestFullRestore extends TestBackupBase { - - private static final Log LOG = LogFactory.getLog(TestFullRestore.class); - - /** - * Verify that a single table is restored to a new table - * @throws Exception - */ - @Test - public void testFullRestoreSingle() throws Exception { - - LOG.info("test full restore on a single table empty table"); - - List<TableName> tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); - assertTrue(checkSucceeded(backupId)); - - LOG.info("backup complete"); - - TableName[] tableset = new TableName[] { table1 }; - TableName[] tablemap = new TableName[] { table1_restore }; - BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - tableset, tablemap, false)); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); - assertTrue(hba.tableExists(table1_restore)); - TEST_UTIL.deleteTable(table1_restore); - hba.close(); - } - - - - @Test - public void testFullRestoreSingleCommand() throws Exception { - - LOG.info("test full restore on a single table empty table: command-line"); - - List<TableName> tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); - LOG.info("backup complete"); - assertTrue(checkSucceeded(backupId)); - // restore <backup_root_path> <backup_id> <tables> [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", table1.getNameAsString(), "-m", - table1_restore.getNameAsString() }; - // Run backup - int ret = ToolRunner.run(conf1, new RestoreDriver(), args); - - assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); - assertTrue(hba.tableExists(table1_restore)); - TEST_UTIL.deleteTable(table1_restore); - hba.close(); - } - - @Test - public void testFullRestoreCheckCommand() throws Exception { - - LOG.info("test full restore on a single table: command-line, check only"); - - List<TableName> tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); - LOG.info("backup complete"); - assertTrue(checkSucceeded(backupId)); - // restore <backup_root_path> <backup_id> <tables> [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", table1.getNameAsString(), "-m", - table1_restore.getNameAsString(), "-c" }; - // Run backup - int ret = ToolRunner.run(conf1, new RestoreDriver(), args); - assertTrue(ret == 0); - //Verify that table has not been restored - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); - assertFalse(hba.tableExists(table1_restore)); - } - - /** - * Verify that multiple tables are restored to new tables. - * @throws Exception - */ - @Test - public void testFullRestoreMultiple() throws Exception { - LOG.info("create full backup image on multiple tables"); - List<TableName> tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); - assertTrue(checkSucceeded(backupId)); - - TableName[] restore_tableset = new TableName[] { table2, table3 }; - TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - restore_tableset, tablemap, false)); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); - assertTrue(hba.tableExists(table2_restore)); - assertTrue(hba.tableExists(table3_restore)); - TEST_UTIL.deleteTable(table2_restore); - TEST_UTIL.deleteTable(table3_restore); - hba.close(); - } - - /** - * Verify that multiple tables are restored to new tables. - * @throws Exception - */ - @Test - public void testFullRestoreMultipleCommand() throws Exception { - LOG.info("create full backup image on multiple tables: command-line"); - List<TableName> tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); - assertTrue(checkSucceeded(backupId)); - - TableName[] restore_tableset = new TableName[] { table2, table3 }; - TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - - // restore <backup_root_path> <backup_id> <tables> [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", StringUtils.join(restore_tableset, ","), - "-m", StringUtils.join(tablemap, ",") }; - // Run backup - int ret = ToolRunner.run(conf1, new RestoreDriver(), args); - - assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); - assertTrue(hba.tableExists(table2_restore)); - assertTrue(hba.tableExists(table3_restore)); - TEST_UTIL.deleteTable(table2_restore); - TEST_UTIL.deleteTable(table3_restore); - hba.close(); - } - - /** - * Verify that a single table is restored using overwrite - * @throws Exception - */ - @Test - public void testFullRestoreSingleOverwrite() throws Exception { - - LOG.info("test full restore on a single table empty table"); - List<TableName> tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); - assertTrue(checkSucceeded(backupId)); - - LOG.info("backup complete"); - - TableName[] tableset = new TableName[] { table1 }; - BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - tableset, null, true)); - } - - /** - * Verify that a single table is restored using overwrite - * @throws Exception - */ - @Test - public void testFullRestoreSingleOverwriteCommand() throws Exception { - - LOG.info("test full restore on a single table empty table: command-line"); - List<TableName> tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); - assertTrue(checkSucceeded(backupId)); - LOG.info("backup complete"); - TableName[] tableset = new TableName[] { table1 }; - // restore <backup_root_path> <backup_id> <tables> [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", StringUtils.join(tableset, ","), "-o" }; - // Run restore - int ret = ToolRunner.run(conf1, new RestoreDriver(), args); - assertTrue(ret == 0); - - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); - assertTrue(hba.tableExists(table1)); - hba.close(); - - } - - /** - * Verify that multiple tables are restored to new tables using overwrite. - * @throws Exception - */ - @Test - public void testFullRestoreMultipleOverwrite() throws Exception { - LOG.info("create full backup image on multiple tables"); - - List<TableName> tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); - assertTrue(checkSucceeded(backupId)); - - TableName[] restore_tableset = new TableName[] { table2, table3 }; - BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - restore_tableset, null, true)); - } - - /** - * Verify that multiple tables are restored to new tables using overwrite. - * @throws Exception - */ - @Test - public void testFullRestoreMultipleOverwriteCommand() throws Exception { - LOG.info("create full backup image on multiple tables: command-line"); - - List<TableName> tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); - assertTrue(checkSucceeded(backupId)); - - TableName[] restore_tableset = new TableName[] { table2, table3 }; - // restore <backup_root_path> <backup_id> <tables> [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", - StringUtils.join(restore_tableset, ","), "-o" }; - // Run backup - int ret = ToolRunner.run(conf1, new RestoreDriver(), args); - - assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); - assertTrue(hba.tableExists(table2)); - assertTrue(hba.tableExists(table3)); - hba.close(); - } - - /** - * Verify that restore fails on a single table that does not exist. - * @throws Exception - */ - @Test(expected = IOException.class) - public void testFullRestoreSingleDNE() throws Exception { - - LOG.info("test restore fails on a single table that does not exist"); - List<TableName> tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); - assertTrue(checkSucceeded(backupId)); - - LOG.info("backup complete"); - - TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; - TableName[] tablemap = new TableName[] { table1_restore }; - BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - tableset, tablemap, false)); - } - - /** - * Verify that restore fails on a single table that does not exist. - * @throws Exception - */ - @Test - public void testFullRestoreSingleDNECommand() throws Exception { - - LOG.info("test restore fails on a single table that does not exist: command-line"); - List<TableName> tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); - assertTrue(checkSucceeded(backupId)); - - LOG.info("backup complete"); - - TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; - TableName[] tablemap = new TableName[] { table1_restore }; - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, StringUtils.join(tableset, ","), "-m", - StringUtils.join(tablemap, ",") }; - // Run restore - int ret = ToolRunner.run(conf1, new RestoreDriver(), args); - assertTrue(ret != 0); - - } - - /** - * Verify that restore fails on multiple tables that do not exist. - * @throws Exception - */ - @Test(expected = IOException.class) - public void testFullRestoreMultipleDNE() throws Exception { - - LOG.info("test restore fails on multiple tables that do not exist"); - - List<TableName> tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); - assertTrue(checkSucceeded(backupId)); - - TableName[] restore_tableset = - new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; - TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - restore_tableset, tablemap, false)); - } - - /** - * Verify that restore fails on multiple tables that do not exist. - * @throws Exception - */ - @Test - public void testFullRestoreMultipleDNECommand() throws Exception { - - LOG.info("test restore fails on multiple tables that do not exist: command-line"); - - List<TableName> tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); - assertTrue(checkSucceeded(backupId)); - - TableName[] restore_tableset = - new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; - TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, StringUtils.join(restore_tableset, ","), "-m", - StringUtils.join(tablemap, ",") }; - // Run restore - int ret = ToolRunner.run(conf1, new RestoreDriver(), args); - assertTrue(ret != 0); - } -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java deleted file mode 100644 index 73598f3..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ /dev/null @@ -1,209 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; -import org.apache.hadoop.hbase.backup.util.BackupUtils; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.Assert; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; - -@Category(LargeTests.class) -@RunWith(Parameterized.class) -public class TestIncrementalBackup extends TestBackupBase { - private static final Log LOG = LogFactory.getLog(TestIncrementalBackup.class); - - @Parameterized.Parameters - public static Collection<Object[]> data() { - provider = "multiwal"; - List<Object[]> params = new ArrayList<Object[]>(); - params.add(new Object[] { Boolean.TRUE }); - return params; - } - - public TestIncrementalBackup(Boolean b) { - } - - // implement all test cases in 1 test since incremental backup/restore has dependencies - @Test - public void TestIncBackupRestore() throws Exception { - - int ADD_ROWS = 99; - // #1 - create full backup for all tables - LOG.info("create full backup image for all tables"); - - List<TableName> tables = Lists.newArrayList(table1, table2); - final byte[] fam3Name = Bytes.toBytes("f3"); - table1Desc.addFamily(new HColumnDescriptor(fam3Name)); - HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc); - - Connection conn = ConnectionFactory.createConnection(conf1); - int NB_ROWS_FAM3 = 6; - insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close(); - - HBaseAdmin admin = null; - admin = (HBaseAdmin) conn.getAdmin(); - BackupAdminImpl client = new BackupAdminImpl(conn); - - BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); - - assertTrue(checkSucceeded(backupIdFull)); - - // #2 - insert some data to table - HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS); - LOG.debug("writing " + ADD_ROWS + " rows to " + table1); - - Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); - t1.close(); - LOG.debug("written " + ADD_ROWS + " rows to " + table1); - - HTable t2 = (HTable) conn.getTable(table2); - Put p2; - for (int i = 0; i < 5; i++) { - p2 = new Put(Bytes.toBytes("row-t2" + i)); - p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); - t2.put(p2); - } - - Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5); - t2.close(); - LOG.debug("written " + 5 + " rows to " + table2); - // split table1 - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - List<HRegion> regions = cluster.getRegions(table1); - - byte[] name = regions.get(0).getRegionInfo().getRegionName(); - long startSplitTime = EnvironmentEdgeManager.currentTime(); - try { - admin.splitRegion(name); - } catch (IOException e) { - //although split fail, this may not affect following check - //In old split without AM2, if region's best split key is not found, - //there are not exception thrown. But in current API, exception - //will be thrown. - LOG.debug("region is not splittable, because " + e); - } - - while (!admin.isTableAvailable(table1)) { - Thread.sleep(100); - } - - long endSplitTime = EnvironmentEdgeManager.currentTime(); - - // split finished - LOG.debug("split finished in =" + (endSplitTime - startSplitTime)); - - // #3 - incremental backup for multiple tables - tables = Lists.newArrayList(table1, table2); - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); - assertTrue(checkSucceeded(backupIdIncMultiple)); - - // add column family f2 to table1 - final byte[] fam2Name = Bytes.toBytes("f2"); - table1Desc.addFamily(new HColumnDescriptor(fam2Name)); - // drop column family f3 - table1Desc.removeFamily(fam3Name); - HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc); - - int NB_ROWS_FAM2 = 7; - HTable t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2); - t3.close(); - - // #3 - incremental backup for multiple tables - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = client.backupTables(request); - assertTrue(checkSucceeded(backupIdIncMultiple2)); - - // #4 - restore full backup for all tables, without overwrite - TableName[] tablesRestoreFull = new TableName[] { table1, table2 }; - - TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; - - LOG.debug("Restoring full " + backupIdFull); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, - tablesRestoreFull, tablesMapFull, false)); - - // #5.1 - check tables for full restore - HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); - assertTrue(hAdmin.tableExists(table1_restore)); - assertTrue(hAdmin.tableExists(table2_restore)); - - hAdmin.close(); - - // #5.2 - checking row count of tables for full restore - HTable hTable = (HTable) conn.getTable(table1_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3); - hTable.close(); - - hTable = (HTable) conn.getTable(table2_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH); - hTable.close(); - - // #6 - restore incremental backup for multiple tables, with overwrite - TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 }; - TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, - false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); - - hTable = (HTable) conn.getTable(table1_restore); - LOG.debug("After incremental restore: " + hTable.getDescriptor()); - LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows"); - Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + ADD_ROWS); - LOG.debug("f2 has " + TEST_UTIL.countRows(hTable, fam2Name) + " rows"); - Assert.assertEquals(TEST_UTIL.countRows(hTable, fam2Name), NB_ROWS_FAM2); - hTable.close(); - - hTable = (HTable) conn.getTable(table2_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 5); - hTable.close(); - - admin.close(); - conn.close(); - - } - -} http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java deleted file mode 100644 index 747c1dd..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertTrue; - -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; -import org.apache.hadoop.hbase.backup.util.BackupUtils; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Assert; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; - -/** - * 1. Create table t1, t2 - * 2. Load data to t1, t2 - * 3 Full backup t1, t2 - * 4 Delete t2 - * 5 Load data to t1 - * 6 Incremental backup t1 - */ -@Category(LargeTests.class) -public class TestIncrementalBackupDeleteTable extends TestBackupBase { - private static final Log LOG = LogFactory.getLog(TestIncrementalBackupDeleteTable.class); - - // implement all test cases in 1 test since incremental backup/restore has dependencies - @Test - public void testIncBackupDeleteTable() throws Exception { - // #1 - create full backup for all tables - LOG.info("create full backup image for all tables"); - - List<TableName> tables = Lists.newArrayList(table1, table2); - HBaseAdmin admin = null; - Connection conn = ConnectionFactory.createConnection(conf1); - admin = (HBaseAdmin) conn.getAdmin(); - BackupAdminImpl client = new BackupAdminImpl(conn); - - BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); - - assertTrue(checkSucceeded(backupIdFull)); - - // #2 - insert some data to table table1 - HTable t1 = (HTable) conn.getTable(table1); - Put p1; - for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { - p1 = new Put(Bytes.toBytes("row-t1" + i)); - p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); - t1.put(p1); - } - - Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2); - t1.close(); - - // Delete table table2 - admin.disableTable(table2); - admin.deleteTable(table2); - - // #3 - incremental backup for table1 - tables = Lists.newArrayList(table1); - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); - assertTrue(checkSucceeded(backupIdIncMultiple)); - - // #4 - restore full backup for all tables, without overwrite - TableName[] tablesRestoreFull = new TableName[] { table1, table2 }; - - TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; - - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, - tablesRestoreFull, tablesMapFull, false)); - - // #5.1 - check tables for full restore - HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); - assertTrue(hAdmin.tableExists(table1_restore)); - assertTrue(hAdmin.tableExists(table2_restore)); - - // #5.2 - checking row count of tables for full restore - HTable hTable = (HTable) conn.getTable(table1_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH); - hTable.close(); - - hTable = (HTable) conn.getTable(table2_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH); - hTable.close(); - - // #6 - restore incremental backup for table1 - TableName[] tablesRestoreIncMultiple = new TableName[] { table1 }; - TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, - false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); - - hTable = (HTable) conn.getTable(table1_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2); - hTable.close(); - admin.close(); - conn.close(); - } - -}