http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
deleted file mode 100644
index 7011ed3..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
+++ /dev/null
@@ -1,336 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob;
-import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Pair;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.collect.Lists;
-
-@Category(LargeTests.class)
-public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
-  private static final Log LOG = 
LogFactory.getLog(TestIncrementalBackupMergeWithFailures.class);
-
-  static enum FailurePhase {
-    PHASE1, PHASE2, PHASE3, PHASE4
-  }
-  public final static String FAILURE_PHASE_KEY = "failurePhase";
-
-  static class BackupMergeJobWithFailures extends MapReduceBackupMergeJob {
-
-    FailurePhase failurePhase;
-
-    @Override
-    public void setConf(Configuration conf) {
-      super.setConf(conf);
-      String val = conf.get(FAILURE_PHASE_KEY);
-      if (val != null) {
-        failurePhase = FailurePhase.valueOf(val);
-      } else {
-        Assert.fail("Failure phase is not set");
-      }
-    }
-
-
-    /**
-     * This is the exact copy of parent's run() with injections
-     * of different types of failures
-     */
-    @Override
-    public void run(String[] backupIds) throws IOException {
-      String bulkOutputConfKey;
-
-      // TODO : run player on remote cluster
-      player = new MapReduceHFileSplitterJob();
-      bulkOutputConfKey = MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY;
-      // Player reads all files in arbitrary directory structure and creates
-      // a Map task for each file
-      String bids = StringUtils.join(backupIds, ",");
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Merge backup images " + bids);
-      }
-
-      List<Pair<TableName, Path>> processedTableList = new 
ArrayList<Pair<TableName, Path>>();
-      boolean finishedTables = false;
-      Connection conn = ConnectionFactory.createConnection(getConf());
-      BackupSystemTable table = new BackupSystemTable(conn);
-      FileSystem fs = FileSystem.get(getConf());
-
-      try {
-
-        // Start backup exclusive operation
-        table.startBackupExclusiveOperation();
-        // Start merge operation
-        table.startMergeOperation(backupIds);
-
-        // Select most recent backup id
-        String mergedBackupId = findMostRecentBackupId(backupIds);
-
-        TableName[] tableNames = getTableNamesInBackupImages(backupIds);
-        String backupRoot = null;
-
-        BackupInfo bInfo = table.readBackupInfo(backupIds[0]);
-        backupRoot = bInfo.getBackupRootDir();
-        // PHASE 1
-        checkFailure(FailurePhase.PHASE1);
-
-        for (int i = 0; i < tableNames.length; i++) {
-
-          LOG.info("Merge backup images for " + tableNames[i]);
-
-          // Find input directories for table
-
-          Path[] dirPaths = findInputDirectories(fs, backupRoot, 
tableNames[i], backupIds);
-          String dirs = StringUtils.join(dirPaths, ",");
-          Path bulkOutputPath =
-              
BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]),
-                getConf(), false);
-          // Delete content if exists
-          if (fs.exists(bulkOutputPath)) {
-            if (!fs.delete(bulkOutputPath, true)) {
-              LOG.warn("Can not delete: " + bulkOutputPath);
-            }
-          }
-          Configuration conf = getConf();
-          conf.set(bulkOutputConfKey, bulkOutputPath.toString());
-          String[] playerArgs = { dirs, tableNames[i].getNameAsString() };
-
-          int result = 0;
-          // PHASE 2
-          checkFailure(FailurePhase.PHASE2);
-          player.setConf(getConf());
-          result = player.run(playerArgs);
-          if (succeeded(result)) {
-            // Add to processed table list
-            processedTableList.add(new Pair<TableName, Path>(tableNames[i], 
bulkOutputPath));
-          } else {
-            throw new IOException("Can not merge backup images for " + dirs
-                + " (check Hadoop/MR and HBase logs). Player return code =" + 
result);
-          }
-          LOG.debug("Merge Job finished:" + result);
-        }
-        List<TableName> tableList = toTableNameList(processedTableList);
-        // PHASE 3
-        checkFailure(FailurePhase.PHASE3);
-        table.updateProcessedTablesForMerge(tableList);
-        finishedTables = true;
-
-        // Move data
-        for (Pair<TableName, Path> tn : processedTableList) {
-          moveData(fs, backupRoot, tn.getSecond(), tn.getFirst(), 
mergedBackupId);
-        }
-        // PHASE 4
-        checkFailure(FailurePhase.PHASE4);
-        // Delete old data and update manifest
-        List<String> backupsToDelete = getBackupIdsToDelete(backupIds, 
mergedBackupId);
-        deleteBackupImages(backupsToDelete, conn, fs, backupRoot);
-        updateBackupManifest(backupRoot, mergedBackupId, backupsToDelete);
-        // Finish merge session
-        table.finishMergeOperation();
-      } catch (RuntimeException e) {
-        throw e;
-      } catch (Exception e) {
-        LOG.error(e);
-        if (!finishedTables) {
-          // cleanup bulk directories and finish merge
-          // merge MUST be repeated (no need for repair)
-          cleanupBulkLoadDirs(fs, toPathList(processedTableList));
-          table.finishMergeOperation();
-          table.finishBackupExclusiveOperation();
-          throw new IOException("Backup merge operation failed, you should try 
it again", e);
-        } else {
-          // backup repair must be run
-          throw new IOException(
-              "Backup merge operation failed, run backup repair tool to 
restore system's integrity",
-              e);
-        }
-      } finally {
-        table.close();
-        conn.close();
-      }
-
-    }
-
-    private void checkFailure(FailurePhase phase) throws IOException {
-      if ( failurePhase != null && failurePhase == phase) {
-        throw new IOException (phase.toString());
-      }
-    }
-
-  }
-
-
-  @Test
-  public void TestIncBackupMergeRestore() throws Exception {
-
-    int ADD_ROWS = 99;
-    // #1 - create full backup for all tables
-    LOG.info("create full backup image for all tables");
-
-    List<TableName> tables = Lists.newArrayList(table1, table2);
-    // Set custom Merge Job implementation
-    conf1.setClass(BackupRestoreFactory.HBASE_BACKUP_MERGE_IMPL_CLASS,
-      BackupMergeJobWithFailures.class, BackupMergeJob.class);
-
-    Connection conn = ConnectionFactory.createConnection(conf1);
-
-    HBaseAdmin admin = null;
-    admin = (HBaseAdmin) conn.getAdmin();
-    BackupAdminImpl client = new BackupAdminImpl(conn);
-
-    BackupRequest request = createBackupRequest(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
-    String backupIdFull = client.backupTables(request);
-
-    assertTrue(checkSucceeded(backupIdFull));
-
-    // #2 - insert some data to table1
-    HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
-    LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
-
-    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS);
-    t1.close();
-    LOG.debug("written " + ADD_ROWS + " rows to " + table1);
-
-    HTable t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS);
-
-    Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS);
-    t2.close();
-    LOG.debug("written " + ADD_ROWS + " rows to " + table2);
-
-    // #3 - incremental backup for multiple tables
-    tables = Lists.newArrayList(table1, table2);
-    request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
-    String backupIdIncMultiple = client.backupTables(request);
-
-    assertTrue(checkSucceeded(backupIdIncMultiple));
-
-    t1 = insertIntoTable(conn, table1, famName, 2, ADD_ROWS);
-    t1.close();
-
-    t2 = insertIntoTable(conn, table2, famName, 2, ADD_ROWS);
-    t2.close();
-
-    // #3 - incremental backup for multiple tables
-    request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
-    String backupIdIncMultiple2 = client.backupTables(request);
-    assertTrue(checkSucceeded(backupIdIncMultiple2));
-
-    // #4 Merge backup images with failures
-
-    for ( FailurePhase phase : FailurePhase.values()) {
-      Configuration conf = conn.getConfiguration();
-
-      conf.set(FAILURE_PHASE_KEY, phase.toString());
-
-      try (BackupAdmin bAdmin = new BackupAdminImpl(conn);)
-      {
-        String[] backups = new String[] { backupIdIncMultiple, 
backupIdIncMultiple2 };
-        bAdmin.mergeBackups(backups);
-        Assert.fail("Expected IOException");
-      } catch (IOException e) {
-        BackupSystemTable table = new BackupSystemTable(conn);
-        if(phase.ordinal() < FailurePhase.PHASE4.ordinal()) {
-          // No need to repair:
-          // Both Merge and backup exclusive operations are finished
-          assertFalse(table.isMergeInProgress());
-          try {
-            table.finishBackupExclusiveOperation();
-            Assert.fail("IOException is expected");
-          } catch(IOException ee) {
-            // Expected
-          }
-        } else {
-          // Repair is required
-          assertTrue(table.isMergeInProgress());
-          try {
-            table.startBackupExclusiveOperation();
-            Assert.fail("IOException is expected");
-          } catch(IOException ee) {
-            // Expected - clean up before proceeding
-            table.finishMergeOperation();
-            table.finishBackupExclusiveOperation();
-          }
-        }
-        table.close();
-        LOG.debug("Expected :"+ e.getMessage());
-      }
-    }
-
-    // Now merge w/o failures
-    Configuration conf = conn.getConfiguration();
-    conf.unset(FAILURE_PHASE_KEY);
-    conf.unset(BackupRestoreFactory.HBASE_BACKUP_MERGE_IMPL_CLASS);
-
-    try (BackupAdmin bAdmin = new BackupAdminImpl(conn);) {
-      String[] backups = new String[] { backupIdIncMultiple, 
backupIdIncMultiple2 };
-      bAdmin.mergeBackups(backups);
-    }
-
-    // #6 - restore incremental backup for multiple tables, with overwrite
-    TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
-    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, 
table2_restore };
-    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, 
backupIdIncMultiple2, false,
-      tablesRestoreIncMultiple, tablesMapIncMultiple, true));
-
-    Table hTable = conn.getTable(table1_restore);
-    LOG.debug("After incremental restore: " + hTable.getTableDescriptor());
-    LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows");
-    Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH 
+ 2 * ADD_ROWS);
-
-    hTable.close();
-
-    hTable = conn.getTable(table2_restore);
-    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 2 * 
ADD_ROWS);
-    hTable.close();
-
-    admin.close();
-    conn.close();
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
deleted file mode 100644
index 769785f..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.mapreduce.TestLoadIncrementalHFiles;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
-/**
- * 1. Create table t1
- * 2. Load data to t1
- * 3 Full backup t1
- * 4 Load data to t1
- * 5 bulk load into t1
- * 6 Incremental backup t1
- */
-@Category(LargeTests.class)
-@RunWith(Parameterized.class)
-public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
-  private static final Log LOG = 
LogFactory.getLog(TestIncrementalBackupDeleteTable.class);
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> data() {
-    secure = true;
-    List<Object[]> params = new ArrayList<Object[]>();
-    params.add(new Object[] {Boolean.TRUE});
-    return params;
-  }
-
-  public TestIncrementalBackupWithBulkLoad(Boolean b) {
-  }
-  // implement all test cases in 1 test since incremental backup/restore has 
dependencies
-  @Test
-  public void TestIncBackupDeleteTable() throws Exception {
-    String testName = "TestIncBackupDeleteTable";
-    // #1 - create full backup for all tables
-    LOG.info("create full backup image for all tables");
-
-    List<TableName> tables = Lists.newArrayList(table1);
-    HBaseAdmin admin = null;
-    Connection conn = ConnectionFactory.createConnection(conf1);
-    admin = (HBaseAdmin) conn.getAdmin();
-    BackupAdminImpl client = new BackupAdminImpl(conn);
-
-    BackupRequest request = createBackupRequest(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
-    String backupIdFull = client.backupTables(request);
-
-    assertTrue(checkSucceeded(backupIdFull));
-
-    // #2 - insert some data to table table1
-    HTable t1 = (HTable) conn.getTable(table1);
-    Put p1;
-    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
-      p1 = new Put(Bytes.toBytes("row-t1" + i));
-      p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-      t1.put(p1);
-    }
-
-    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
-    t1.close();
-
-    int NB_ROWS2 = 20;
-    LOG.debug("bulk loading into " + testName);
-    int actual = TestLoadIncrementalHFiles.loadHFiles(testName, table1Desc, 
TEST_UTIL, famName,
-        qualName, false, null, new byte[][][] {
-      new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
-      new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
-    }, true, false, true, NB_ROWS_IN_BATCH*2, NB_ROWS2);
-
-    // #3 - incremental backup for table1
-    tables = Lists.newArrayList(table1);
-    request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
-    String backupIdIncMultiple = client.backupTables(request);
-    assertTrue(checkSucceeded(backupIdIncMultiple));
-
-    // #5.1 - check tables for full restore */
-    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
-
-    // #6 - restore incremental backup for table1
-    TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
-    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
-    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, 
backupIdIncMultiple,
-      false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
-
-    HTable hTable = (HTable) conn.getTable(table1_restore);
-    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 
2+actual);
-    request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
-
-    backupIdFull = client.backupTables(request);
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      Pair<Map<TableName, Map<String, Map<String, List<Pair<String, 
Boolean>>>>>, List<byte[]>> pair
-      = table.readBulkloadRows(tables);
-      assertTrue("map still has " + pair.getSecond().size() + " entries",
-          pair.getSecond().isEmpty());
-    }
-    assertTrue(checkSucceeded(backupIdFull));
-
-    hTable.close();
-    admin.close();
-    conn.close();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
deleted file mode 100644
index 84a596e..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
-import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
-import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
-@Category(LargeTests.class)
-@RunWith(Parameterized.class)
-public class TestIncrementalBackupWithFailures extends TestBackupBase {
-  private static final Log LOG = 
LogFactory.getLog(TestIncrementalBackupWithFailures.class);
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> data() {
-    provider = "multiwal";
-    List<Object[]> params = new ArrayList<Object[]>();
-    params.add(new Object[] { Boolean.TRUE });
-    return params;
-  }
-
-  public TestIncrementalBackupWithFailures(Boolean b) {
-  }
-
-  // implement all test cases in 1 test since incremental backup/restore has 
dependencies
-  @Test
-  public void testIncBackupRestore() throws Exception {
-
-    int ADD_ROWS = 99;
-    // #1 - create full backup for all tables
-    LOG.info("create full backup image for all tables");
-
-    List<TableName> tables = Lists.newArrayList(table1, table2);
-    final byte[] fam3Name = Bytes.toBytes("f3");
-    table1Desc.addFamily(new HColumnDescriptor(fam3Name));
-    HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
-
-    Connection conn = ConnectionFactory.createConnection(conf1);
-    int NB_ROWS_FAM3 = 6;
-    insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
-
-    HBaseAdmin admin = null;
-    admin = (HBaseAdmin) conn.getAdmin();
-    BackupAdminImpl client = new BackupAdminImpl(conn);
-
-    BackupRequest request = createBackupRequest(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
-    String backupIdFull = client.backupTables(request);
-
-    assertTrue(checkSucceeded(backupIdFull));
-
-    // #2 - insert some data to table
-    HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
-    LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
-
-    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + 
NB_ROWS_FAM3);
-    t1.close();
-    LOG.debug("written " + ADD_ROWS + " rows to " + table1);
-
-    HTable t2 = (HTable) conn.getTable(table2);
-    Put p2;
-    for (int i = 0; i < 5; i++) {
-      p2 = new Put(Bytes.toBytes("row-t2" + i));
-      p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-      t2.put(p2);
-    }
-
-    Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5);
-    t2.close();
-    LOG.debug("written " + 5 + " rows to " + table2);
-
-    // #3 - incremental backup for multiple tables
-    incrementalBackupWithFailures();
-
-    admin.close();
-    conn.close();
-
-  }
-
-
-  private void incrementalBackupWithFailures() throws Exception {
-    conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS,
-      IncrementalTableBackupClientForTest.class.getName());
-    int maxStage = Stage.values().length -1;
-    // Fail stages between 0 and 4 inclusive
-    for (int stage = 0; stage <= maxStage; stage++) {
-      LOG.info("Running stage " + stage);
-      runBackupAndFailAtStage(stage);
-    }
-  }
-
-  private void runBackupAndFailAtStage(int stage) throws Exception {
-
-    conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage);
-    try (BackupSystemTable table = new 
BackupSystemTable(TEST_UTIL.getConnection())) {
-      int before = table.getBackupHistory().size();
-      String[] args =
-          new String[] { "create", "incremental", BACKUP_ROOT_DIR, "-t",
-              table1.getNameAsString() + "," + table2.getNameAsString() };
-      // Run backup
-      int ret = ToolRunner.run(conf1, new BackupDriver(), args);
-      assertFalse(ret == 0);
-      List<BackupInfo> backups = table.getBackupHistory();
-      int after = table.getBackupHistory().size();
-
-      assertTrue(after ==  before +1);
-      for (BackupInfo data : backups) {
-        if(data.getType() == BackupType.FULL) {
-          assertTrue(data.getState() == BackupState.COMPLETE);
-        } else {
-          assertTrue(data.getState() == BackupState.FAILED);
-        }
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
deleted file mode 100644
index 36a9ee2..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
- * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
- * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
- * for the specific language governing permissions and limitations under the 
License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.concurrent.CountDownLatch;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
-@Category(LargeTests.class)
-public class TestRemoteBackup extends TestBackupBase {
-
-  private static final Log LOG = LogFactory.getLog(TestRemoteBackup.class);
-
-  @Override
-  public void setUp () throws Exception {
-    useSecondCluster = true;
-    super.setUp();
-  }
-
-  /**
-   * Verify that a remote full backup is created on a single table with data 
correctly.
-   * @throws Exception
-   */
-  @Test
-  public void testFullBackupRemote() throws Exception {
-    LOG.info("test remote full backup on a single table");
-    final CountDownLatch latch = new CountDownLatch(1);
-    final int NB_ROWS_IN_FAM3 = 6;
-    final byte[] fam3Name = Bytes.toBytes("f3");
-    final byte[] fam2Name = Bytes.toBytes("f2");
-    final Connection conn = ConnectionFactory.createConnection(conf1);
-    Thread t = new Thread() {
-      @Override
-      public void run() {
-        try {
-          latch.await();
-        } catch (InterruptedException ie) {
-        }
-        try {
-          HTable t1 = (HTable) conn.getTable(table1);
-          Put p1;
-          for (int i = 0; i < NB_ROWS_IN_FAM3; i++) {
-            p1 = new Put(Bytes.toBytes("row-t1" + i));
-            p1.addColumn(fam3Name, qualName, Bytes.toBytes("val" + i));
-            t1.put(p1);
-          }
-          LOG.debug("Wrote " + NB_ROWS_IN_FAM3 + " rows into family3");
-          t1.close();
-        } catch (IOException ioe) {
-          throw new RuntimeException(ioe);
-        }
-      }
-    };
-    t.start();
-
-    table1Desc.addFamily(new HColumnDescriptor(fam3Name));
-    // family 2 is MOB enabled
-    HColumnDescriptor hcd = new HColumnDescriptor(fam2Name);
-    hcd.setMobEnabled(true);
-    hcd.setMobThreshold(0L);
-    table1Desc.addFamily(hcd);
-    HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
-
-    SnapshotTestingUtils.loadData(TEST_UTIL, table1, 50, fam2Name);
-    HTable t1 = (HTable) conn.getTable(table1);
-    int rows0 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
-
-    latch.countDown();
-    String backupId =
-        backupTables(BackupType.FULL, Lists.newArrayList(table1), 
BACKUP_REMOTE_ROOT_DIR);
-    assertTrue(checkSucceeded(backupId));
-
-    LOG.info("backup complete " + backupId);
-    Assert.assertEquals(TEST_UTIL.countRows(t1, famName), NB_ROWS_IN_BATCH);
-
-    t.join();
-    Assert.assertEquals(TEST_UTIL.countRows(t1, fam3Name), NB_ROWS_IN_FAM3);
-    t1.close();
-
-    TableName[] tablesRestoreFull = new TableName[] { table1 };
-
-    TableName[] tablesMapFull = new TableName[] { table1_restore };
-
-    BackupAdmin client = getBackupAdmin();
-    client.restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, 
backupId, false,
-      tablesRestoreFull, tablesMapFull, false));
-
-    // check tables for full restore
-    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
-    assertTrue(hAdmin.tableExists(table1_restore));
-
-    // #5.2 - checking row count of tables for full restore
-    HTable hTable = (HTable) conn.getTable(table1_restore);
-    Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), 
NB_ROWS_IN_BATCH);
-    int cnt3 = TEST_UTIL.countRows(hTable, fam3Name);
-    Assert.assertTrue(cnt3 >= 0 && cnt3 <= NB_ROWS_IN_FAM3);
-
-    int rows1 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
-    Assert.assertEquals(rows0, rows1);
-    hTable.close();
-
-    hAdmin.close();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
deleted file mode 100644
index 0386c27..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
- * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
- * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
- * for the specific language governing permissions and limitations under the 
License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.junit.Assert.assertTrue;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(LargeTests.class)
-public class TestRemoteRestore extends TestBackupBase {
-
-  private static final Log LOG = LogFactory.getLog(TestRemoteRestore.class);
-
-  @Override
-  public void setUp () throws Exception {
-    useSecondCluster = true;
-    super.setUp();
-  }
-
-
-  /**
-   * Verify that a remote restore on a single table is successful.
-   * @throws Exception
-   */
-  @Test
-  public void testFullRestoreRemote() throws Exception {
-
-    LOG.info("test remote full backup on a single table");
-    String backupId =
-        backupTables(BackupType.FULL, toList(table1.getNameAsString()), 
BACKUP_REMOTE_ROOT_DIR);
-    LOG.info("backup complete");
-    TableName[] tableset = new TableName[] { table1 };
-    TableName[] tablemap = new TableName[] { table1_restore };
-    getBackupAdmin().restore(
-      BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, 
false, tableset,
-        tablemap, false));
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
-    assertTrue(hba.tableExists(table1_restore));
-    TEST_UTIL.deleteTable(table1_restore);
-    hba.close();
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
deleted file mode 100644
index 556521f..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
-@Category(LargeTests.class)
-public class TestRepairAfterFailedDelete extends TestBackupBase {
-
-  private static final Log LOG = 
LogFactory.getLog(TestRepairAfterFailedDelete.class);
-
-  @Test
-  public void testRepairBackupDelete() throws Exception {
-    LOG.info("test repair backup delete on a single table with data");
-    List<TableName> tableList = Lists.newArrayList(table1);
-    String backupId = fullTableBackup(tableList);
-    assertTrue(checkSucceeded(backupId));
-    LOG.info("backup complete");
-    String[] backupIds = new String[] { backupId };
-    BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection());
-    BackupInfo info = table.readBackupInfo(backupId);
-    Path path = new Path(info.getBackupRootDir(), backupId);
-    FileSystem fs = FileSystem.get(path.toUri(), conf1);
-    assertTrue(fs.exists(path));
-
-    // Snapshot backup system table before delete
-    String snapshotName = "snapshot-backup";
-    Connection conn = TEST_UTIL.getConnection();
-    Admin admin = conn.getAdmin();
-    admin.snapshot(snapshotName, BackupSystemTable.getTableName(conf1));
-
-    int deleted = getBackupAdmin().deleteBackups(backupIds);
-
-    assertTrue(!fs.exists(path));
-    assertTrue(fs.exists(new Path(info.getBackupRootDir())));
-    assertTrue(1 == deleted);
-
-    // Emulate delete failure
-    // Restore backup system table
-    admin.disableTable(BackupSystemTable.getTableName(conf1));
-    admin.restoreSnapshot(snapshotName);
-    admin.enableTable(BackupSystemTable.getTableName(conf1));
-    // Start backup session
-    table.startBackupExclusiveOperation();
-    // Start delete operation
-    table.startDeleteOperation(backupIds);
-
-    // Now run repair command to repair "failed" delete operation
-    String[] args = new String[] {"repair"};
-    // Run restore
-    int ret = ToolRunner.run(conf1, new BackupDriver(), args);
-    assertTrue(ret == 0);
-    // Verify that history length == 0
-    assertTrue (table.getBackupHistory().size() == 0);
-    table.close();
-    admin.close();
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
deleted file mode 100644
index c61b018..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(LargeTests.class)
-public class TestRestoreBoundaryTests extends TestBackupBase {
-
-  private static final Log LOG = 
LogFactory.getLog(TestRestoreBoundaryTests.class);
-
-  /**
-   * Verify that a single empty table is restored to a new table
-   * @throws Exception
-   */
-  @Test
-  public void testFullRestoreSingleEmpty() throws Exception {
-    LOG.info("test full restore on a single table empty table");
-    String backupId = fullTableBackup(toList(table1.getNameAsString()));
-    LOG.info("backup complete");
-    TableName[] tableset = new TableName[] { table1 };
-    TableName[] tablemap = new TableName[] { table1_restore };
-    getBackupAdmin().restore(
-      BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, 
tableset, tablemap,
-        false));
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
-    assertTrue(hba.tableExists(table1_restore));
-    TEST_UTIL.deleteTable(table1_restore);
-  }
-
-  /**
-   * Verify that multiple tables are restored to new tables.
-   * @throws Exception
-   */
-  @Test
-  public void testFullRestoreMultipleEmpty() throws Exception {
-    LOG.info("create full backup image on multiple tables");
-
-    List<TableName> tables = toList(table2.getNameAsString(), 
table3.getNameAsString());
-    String backupId = fullTableBackup(tables);
-    TableName[] restore_tableset = new TableName[] { table2, table3 };
-    TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
-    getBackupAdmin().restore(
-      BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, 
restore_tableset,
-        tablemap, false));
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
-    assertTrue(hba.tableExists(table2_restore));
-    assertTrue(hba.tableExists(table3_restore));
-    TEST_UTIL.deleteTable(table2_restore);
-    TEST_UTIL.deleteTable(table3_restore);
-    hba.close();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
deleted file mode 100644
index 6443421..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.junit.experimental.categories.Category;
-
-@Category(LargeTests.class)
-public class TestSystemTableSnapshot extends TestBackupBase {
-
-  private static final Log LOG = 
LogFactory.getLog(TestSystemTableSnapshot.class);
-
-  /**
-   * Verify backup system table snapshot
-   * @throws Exception
-   */
- // @Test
-  public void _testBackupRestoreSystemTable() throws Exception {
-
-    LOG.info("test snapshot system table");
-
-    TableName backupSystem = BackupSystemTable.getTableName(conf1);
-
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
-    String snapshotName = "sysTable";
-    hba.snapshot(snapshotName, backupSystem);
-
-    hba.disableTable(backupSystem);
-    hba.restoreSnapshot(snapshotName);
-    hba.enableTable(backupSystem);
-    hba.close();
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
deleted file mode 100644
index 5f72f45..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.master;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.TestBackupBase;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Iterables;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
-@Category(LargeTests.class)
-public class TestBackupLogCleaner extends TestBackupBase {
-  private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class);
-
-  // implements all test cases in 1 test since incremental full backup/
-  // incremental backup has dependencies
-  @Test
-  public void testBackupLogCleaner() throws Exception {
-
-    // #1 - create full backup for all tables
-    LOG.info("create full backup image for all tables");
-
-    List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, 
table3, table4);
-
-    try (BackupSystemTable systemTable = new 
BackupSystemTable(TEST_UTIL.getConnection())) {
-      // Verify that we have no backup sessions yet
-      assertFalse(systemTable.hasBackupSessions());
-
-      List<FileStatus> walFiles = 
getListOfWALFiles(TEST_UTIL.getConfiguration());
-      List<String> swalFiles = convert(walFiles);
-      BackupLogCleaner cleaner = new BackupLogCleaner();
-      cleaner.setConf(TEST_UTIL.getConfiguration());
-      cleaner.init(null);
-      cleaner.setConf(TEST_UTIL.getConfiguration());
-
-      Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
-      int size = Iterables.size(deletable);
-
-      // We can delete all files because we do not have yet recorded backup 
sessions
-      assertTrue(size == walFiles.size());
-
-      systemTable.addWALFiles(swalFiles, "backup", "root");
-      String backupIdFull = fullTableBackup(tableSetFullList);
-      assertTrue(checkSucceeded(backupIdFull));
-      // Check one more time
-      deletable = cleaner.getDeletableFiles(walFiles);
-      // We can delete wal files because they were saved into backup system 
table table
-      size = Iterables.size(deletable);
-      assertTrue(size == walFiles.size());
-
-      List<FileStatus> newWalFiles = 
getListOfWALFiles(TEST_UTIL.getConfiguration());
-      LOG.debug("WAL list after full backup");
-      convert(newWalFiles);
-
-      // New list of wal files is greater than the previous one,
-      // because new wal per RS have been opened after full backup
-      assertTrue(walFiles.size() < newWalFiles.size());
-      Connection conn = ConnectionFactory.createConnection(conf1);
-      // #2 - insert some data to table
-      HTable t1 = (HTable) conn.getTable(table1);
-      Put p1;
-      for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
-        p1 = new Put(Bytes.toBytes("row-t1" + i));
-        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-        t1.put(p1);
-      }
-
-      t1.close();
-
-      HTable t2 = (HTable) conn.getTable(table2);
-      Put p2;
-      for (int i = 0; i < 5; i++) {
-        p2 = new Put(Bytes.toBytes("row-t2" + i));
-        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-        t2.put(p2);
-      }
-
-      t2.close();
-
-      // #3 - incremental backup for multiple tables
-
-      List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, 
table3);
-      String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, 
tableSetIncList,
-        BACKUP_ROOT_DIR);
-      assertTrue(checkSucceeded(backupIdIncMultiple));
-      deletable = cleaner.getDeletableFiles(newWalFiles);
-
-      assertTrue(Iterables.size(deletable) == newWalFiles.size());
-
-      conn.close();
-    }
-  }
-
-  private List<String> convert(List<FileStatus> walFiles) {
-    List<String> result = new ArrayList<String>();
-    for (FileStatus fs : walFiles) {
-      LOG.debug("+++WAL: " + fs.getPath().toString());
-      result.add(fs.getPath().toString());
-    }
-    return result;
-  }
-
-  private List<FileStatus> getListOfWALFiles(Configuration c) throws 
IOException {
-    Path logRoot = new Path(FSUtils.getRootDir(c), 
HConstants.HREGION_LOGDIR_NAME);
-    FileSystem fs = FileSystem.get(c);
-    RemoteIterator<LocatedFileStatus> it = fs.listFiles(logRoot, true);
-    List<FileStatus> logFiles = new ArrayList<FileStatus>();
-    while (it.hasNext()) {
-      LocatedFileStatus lfs = it.next();
-      if (lfs.isFile() && !AbstractFSWALProvider.isMetaFile(lfs.getPath())) {
-        logFiles.add(lfs);
-        LOG.info(lfs);
-      }
-    }
-    return logFiles;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 484ba19..0ee9ba0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -87,6 +87,7 @@
     <module>hbase-metrics-api</module>
     <module>hbase-metrics</module>
     <module>hbase-spark-it</module>
+    <module>hbase-backup</module>
   </modules>
   <!--Add apache snapshots in case we want to use unreleased versions of 
plugins:
       e.g. surefire 2.18-SNAPSHOT-->

Reply via email to