http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
index cf34d14..2d6cf26 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -89,7 +89,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
  * value = backupId and full WAL file name</li>
  * </ul></p>
  */
-
 @InterfaceAudience.Private
 public final class BackupSystemTable implements Closeable {
   private static final Logger LOG = 
LoggerFactory.getLogger(BackupSystemTable.class);
@@ -121,7 +120,6 @@ public final class BackupSystemTable implements Closeable {
     public String toString() {
       return Path.SEPARATOR + backupRoot + Path.SEPARATOR + backupId + 
Path.SEPARATOR + walFile;
     }
-
   }
 
   /**
@@ -136,7 +134,6 @@ public final class BackupSystemTable implements Closeable {
    * from activity of RegionObserver, which controls process of a bulk loading
    * {@link org.apache.hadoop.hbase.backup.BackupObserver}
    */
-
   private TableName bulkLoadTableName;
 
   /**
@@ -212,7 +209,6 @@ public final class BackupSystemTable implements Closeable {
       }
       waitForSystemTable(admin, tableName);
       waitForSystemTable(admin, bulkLoadTableName);
-
     }
   }
 
@@ -246,7 +242,6 @@ public final class BackupSystemTable implements Closeable {
       }
     }
     LOG.debug("Backup table "+tableName+" exists and available");
-
   }
 
   @Override
@@ -260,7 +255,6 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException exception
    */
   public void updateBackupInfo(BackupInfo info) throws IOException {
-
     if (LOG.isTraceEnabled()) {
       LOG.trace("update backup status in backup system table for: " + 
info.getBackupId()
           + " set status=" + info.getState());
@@ -356,9 +350,7 @@ public final class BackupSystemTable implements Closeable {
    * @param backupId backup id
    * @throws IOException exception
    */
-
   public void deleteBackupInfo(String backupId) throws IOException {
-
     if (LOG.isTraceEnabled()) {
       LOG.trace("delete backup status in backup system table for " + backupId);
     }
@@ -447,7 +439,7 @@ public final class BackupSystemTable implements Closeable {
           String fam = null;
           String path = null;
           boolean raw = false;
-          byte[] row = null;
+          byte[] row;
           String region = null;
           for (Cell cell : res.listCells()) {
             row = CellUtil.cloneRow(cell);
@@ -465,19 +457,21 @@ public final class BackupSystemTable implements Closeable 
{
               byte[] state = CellUtil.cloneValue(cell);
               if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) {
                 raw = true;
-              } else raw = false;
+              } else {
+                raw = false;
+              }
             }
           }
           if (map.get(tTable) == null) {
-            map.put(tTable, new HashMap<String, Map<String, List<Pair<String, 
Boolean>>>>());
+            map.put(tTable, new HashMap<>());
             tblMap = map.get(tTable);
           }
           if (tblMap.get(region) == null) {
-            tblMap.put(region, new HashMap<String, List<Pair<String, 
Boolean>>>());
+            tblMap.put(region, new HashMap<>());
           }
           Map<String, List<Pair<String, Boolean>>> famMap = tblMap.get(region);
           if (famMap.get(fam) == null) {
-            famMap.put(fam, new ArrayList<Pair<String, Boolean>>());
+            famMap.put(fam, new ArrayList<>());
           }
           famMap.get(fam).add(new Pair<>(path, raw));
           LOG.debug("found orig " + path + " for " + fam + " of table " + 
region);
@@ -501,7 +495,11 @@ public final class BackupSystemTable implements Closeable {
       for (int idx = 0; idx < maps.length; idx++) {
         Map<byte[], List<Path>> map = maps[idx];
         TableName tn = sTableList.get(idx);
-        if (map == null) continue;
+
+        if (map == null) {
+          continue;
+        }
+
         for (Map.Entry<byte[], List<Path>> entry : map.entrySet()) {
           byte[] fam = entry.getKey();
           List<Path> paths = entry.getValue();
@@ -524,7 +522,6 @@ public final class BackupSystemTable implements Closeable {
    * @param backupId backup id
    * @return Current status of backup session or null
    */
-
   public BackupInfo readBackupInfo(String backupId) throws IOException {
     if (LOG.isTraceEnabled()) {
       LOG.trace("read backup status from backup system table for: " + 
backupId);
@@ -585,7 +582,8 @@ public final class BackupSystemTable implements Closeable {
   /**
    * Exclusive operations are:
    * create, delete, merge
-   * @throws IOException
+   * @throws IOException if a table operation fails or an active backup 
exclusive operation is
+   *                     already underway
    */
   public void startBackupExclusiveOperation() throws IOException {
     LOG.debug("Start new backup exclusive operation");
@@ -642,8 +640,8 @@ public final class BackupSystemTable implements Closeable {
 
     try (Table table = connection.getTable(tableName);
         ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
-      HashMap<String, Long> rsTimestampMap = new HashMap<String, Long>();
+      Result res;
+      HashMap<String, Long> rsTimestampMap = new HashMap<>();
       while ((res = scanner.next()) != null) {
         res.advance();
         Cell cell = res.current();
@@ -690,7 +688,7 @@ public final class BackupSystemTable implements Closeable {
   /**
    * Get all backups history
    * @return list of backup info
-   * @throws IOException
+   * @throws IOException if getting the backup history fails
    */
   public List<BackupInfo> getBackupHistory() throws IOException {
     return getBackupHistory(false);
@@ -701,7 +699,7 @@ public final class BackupSystemTable implements Closeable {
    * @param n number of records, if n== -1 - max number
    *        is ignored
    * @return list of records
-   * @throws IOException
+   * @throws IOException if getting the backup history fails
    */
   public List<BackupInfo> getHistory(int n) throws IOException {
     List<BackupInfo> history = getBackupHistory();
@@ -717,15 +715,20 @@ public final class BackupSystemTable implements Closeable 
{
    *        is ignored
    * @param filters list of filters
    * @return backup records
-   * @throws IOException
+   * @throws IOException if getting the backup history fails
    */
   public List<BackupInfo> getBackupHistory(int n, BackupInfo.Filter... 
filters) throws IOException {
-    if (filters.length == 0) return getHistory(n);
+    if (filters.length == 0) {
+      return getHistory(n);
+    }
 
     List<BackupInfo> history = getBackupHistory();
-    List<BackupInfo> result = new ArrayList<BackupInfo>();
+    List<BackupInfo> result = new ArrayList<>();
     for (BackupInfo bi : history) {
-      if (n >= 0 && result.size() == n) break;
+      if (n >= 0 && result.size() == n) {
+        break;
+      }
+
       boolean passed = true;
       for (int i = 0; i < filters.length; i++) {
         if (!filters[i].apply(bi)) {
@@ -738,7 +741,6 @@ public final class BackupSystemTable implements Closeable {
       }
     }
     return result;
-
   }
 
   /*
@@ -761,7 +763,7 @@ public final class BackupSystemTable implements Closeable {
    * Get history for backup destination
    * @param backupRoot backup destination path
    * @return List of backup info
-   * @throws IOException
+   * @throws IOException if getting the backup history fails
    */
   public List<BackupInfo> getBackupHistory(String backupRoot) throws 
IOException {
     ArrayList<BackupInfo> history = getBackupHistory(false);
@@ -778,11 +780,11 @@ public final class BackupSystemTable implements Closeable 
{
    * Get history for a table
    * @param name table name
    * @return history for a table
-   * @throws IOException
+   * @throws IOException if getting the backup history fails
    */
   public List<BackupInfo> getBackupHistoryForTable(TableName name) throws 
IOException {
     List<BackupInfo> history = getBackupHistory();
-    List<BackupInfo> tableHistory = new ArrayList<BackupInfo>();
+    List<BackupInfo> tableHistory = new ArrayList<>();
     for (BackupInfo info : history) {
       List<TableName> tables = info.getTableNames();
       if (tables.contains(name)) {
@@ -795,8 +797,7 @@ public final class BackupSystemTable implements Closeable {
   public Map<TableName, ArrayList<BackupInfo>> 
getBackupHistoryForTableSet(Set<TableName> set,
       String backupRoot) throws IOException {
     List<BackupInfo> history = getBackupHistory(backupRoot);
-    Map<TableName, ArrayList<BackupInfo>> tableHistoryMap =
-        new HashMap<TableName, ArrayList<BackupInfo>>();
+    Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<>();
     for (Iterator<BackupInfo> iterator = history.iterator(); 
iterator.hasNext();) {
       BackupInfo info = iterator.next();
       if (!backupRoot.equals(info.getBackupRootDir())) {
@@ -807,7 +808,7 @@ public final class BackupSystemTable implements Closeable {
         if (set.contains(tableName)) {
           ArrayList<BackupInfo> list = tableHistoryMap.get(tableName);
           if (list == null) {
-            list = new ArrayList<BackupInfo>();
+            list = new ArrayList<>();
             tableHistoryMap.put(tableName, list);
           }
           list.add(info);
@@ -827,11 +828,11 @@ public final class BackupSystemTable implements Closeable 
{
     LOG.trace("get backup infos from backup system table");
 
     Scan scan = createScanForBackupHistory();
-    ArrayList<BackupInfo> list = new ArrayList<BackupInfo>();
+    ArrayList<BackupInfo> list = new ArrayList<>();
 
     try (Table table = connection.getTable(tableName);
         ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
+      Result res;
       while ((res = scanner.next()) != null) {
         res.advance();
         BackupInfo context = cellToBackupInfo(res.current());
@@ -859,7 +860,7 @@ public final class BackupSystemTable implements Closeable {
       LOG.trace("write RS log time stamps to backup system table for tables ["
           + StringUtils.join(tables, ",") + "]");
     }
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<>();
     for (TableName table : tables) {
       byte[] smapData = toTableServerTimestampProto(table, 
newTimestamps).toByteArray();
       Put put = createPutForWriteRegionServerLogTimestamp(table, smapData, 
backupRoot);
@@ -885,13 +886,12 @@ public final class BackupSystemTable implements Closeable 
{
       LOG.trace("read RS log ts from backup system table for root=" + 
backupRoot);
     }
 
-    HashMap<TableName, HashMap<String, Long>> tableTimestampMap =
-        new HashMap<TableName, HashMap<String, Long>>();
+    HashMap<TableName, HashMap<String, Long>> tableTimestampMap = new 
HashMap<>();
 
     Scan scan = createScanForReadLogTimestampMap(backupRoot);
     try (Table table = connection.getTable(tableName);
         ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
+      Result res;
       while ((res = scanner.next()) != null) {
         res.advance();
         Cell cell = res.current();
@@ -936,7 +936,7 @@ public final class BackupSystemTable implements Closeable {
 
   private HashMap<String, Long> fromTableServerTimestampProto(
       BackupProtos.TableServerTimestamp proto) {
-    HashMap<String, Long> map = new HashMap<String, Long>();
+    HashMap<String, Long> map = new HashMap<>();
     List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
     for (BackupProtos.ServerTimestamp st : list) {
       ServerName sn =
@@ -997,7 +997,6 @@ public final class BackupSystemTable implements Closeable {
    * Deletes incremental backup set for a backup destination
    * @param backupRoot backup root
    */
-
   public void deleteIncrementalBackupTableSet(String backupRoot) throws 
IOException {
     if (LOG.isTraceEnabled()) {
       LOG.trace("Delete incremental backup table set to backup system table. 
ROOT=" + backupRoot);
@@ -1084,7 +1083,6 @@ public final class BackupSystemTable implements Closeable 
{
         throw new RuntimeException("remove is not supported");
       }
     };
-
   }
 
   /**
@@ -1092,8 +1090,8 @@ public final class BackupSystemTable implements Closeable 
{
    * @param file name of a file to check
    * @return true, if deletable, false otherwise.
    * @throws IOException exception
-   * TODO: multiple backup destination support
    */
+  // TODO: multiple backup destination support
   public boolean isWALFileDeletable(String file) throws IOException {
     if (LOG.isTraceEnabled()) {
       LOG.trace("Check if WAL file has been already backed up in backup system 
table " + file);
@@ -1174,12 +1172,12 @@ public final class BackupSystemTable implements 
Closeable {
   /**
    * Get backup set list
    * @return backup set list
-   * @throws IOException
+   * @throws IOException if a table or scanner operation fails
    */
   public List<String> listBackupSets() throws IOException {
     LOG.trace("Backup set list");
 
-    List<String> list = new ArrayList<String>();
+    List<String> list = new ArrayList<>();
     Table table = null;
     ResultScanner scanner = null;
     try {
@@ -1187,7 +1185,7 @@ public final class BackupSystemTable implements Closeable 
{
       Scan scan = createScanForBackupSetList();
       scan.setMaxVersions(1);
       scanner = table.getScanner(scan);
-      Result res = null;
+      Result res;
       while ((res = scanner.next()) != null) {
         res.advance();
         list.add(cellKeyToBackupSetName(res.current()));
@@ -1207,7 +1205,7 @@ public final class BackupSystemTable implements Closeable 
{
    * Get backup set description (list of tables)
    * @param name set's name
    * @return list of tables in a backup set
-   * @throws IOException
+   * @throws IOException if a table operation fails
    */
   public List<TableName> describeBackupSet(String name) throws IOException {
     if (LOG.isTraceEnabled()) {
@@ -1218,7 +1216,11 @@ public final class BackupSystemTable implements 
Closeable {
       table = connection.getTable(tableName);
       Get get = createGetForBackupSet(name);
       Result res = table.get(get);
-      if (res.isEmpty()) return null;
+
+      if (res.isEmpty()) {
+        return null;
+      }
+
       res.advance();
       String[] tables = cellValueToBackupSet(res.current());
       return Arrays.asList(tables).stream().map(item -> 
TableName.valueOf(item)).
@@ -1234,7 +1236,7 @@ public final class BackupSystemTable implements Closeable 
{
    * Add backup set (list of tables)
    * @param name set name
    * @param newTables list of tables, comma-separated
-   * @throws IOException
+   * @throws IOException if a table operation fails
    */
   public void addToBackupSet(String name, String[] newTables) throws 
IOException {
     if (LOG.isTraceEnabled()) {
@@ -1260,15 +1262,15 @@ public final class BackupSystemTable implements 
Closeable {
    * Remove tables from backup set (list of tables)
    * @param name set name
    * @param toRemove list of tables
-   * @throws IOException
+   * @throws IOException if a table operation or deleting the backup set fails
    */
   public void removeFromBackupSet(String name, String[] toRemove) throws 
IOException {
     if (LOG.isTraceEnabled()) {
       LOG.trace(" Backup set remove from : " + name + " tables [" + 
StringUtils.join(toRemove, " ")
           + "]");
     }
-    String[] disjoint = null;
-    String[] tables = null;
+    String[] disjoint;
+    String[] tables;
     try (Table table = connection.getTable(tableName)) {
       Get get = createGetForBackupSet(name);
       Result res = table.get(get);
@@ -1309,7 +1311,7 @@ public final class BackupSystemTable implements Closeable 
{
   /**
    * Delete backup set
    * @param name set's name
-   * @throws IOException
+   * @throws IOException if getting or deleting the table fails
    */
   public void deleteBackupSet(String name) throws IOException {
     if (LOG.isTraceEnabled()) {
@@ -1326,7 +1328,6 @@ public final class BackupSystemTable implements Closeable 
{
    * @return table's descriptor
    */
   public static TableDescriptor getSystemTableDescriptor(Configuration conf) {
-
     TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(getTableName(conf));
 
     ColumnFamilyDescriptorBuilder colBuilder =
@@ -1334,8 +1335,7 @@ public final class BackupSystemTable implements Closeable 
{
 
     colBuilder.setMaxVersions(1);
     Configuration config = HBaseConfiguration.create();
-    int ttl =
-        config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
+    int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
           BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT);
     colBuilder.setTimeToLive(ttl);
 
@@ -1369,7 +1369,6 @@ public final class BackupSystemTable implements Closeable 
{
    * @return table's descriptor
    */
   public static TableDescriptor 
getSystemTableForBulkLoadedDataDescriptor(Configuration conf) {
-
     TableDescriptorBuilder builder =
         TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf));
 
@@ -1377,8 +1376,7 @@ public final class BackupSystemTable implements Closeable 
{
         ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
     colBuilder.setMaxVersions(1);
     Configuration config = HBaseConfiguration.create();
-    int ttl =
-        config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
+    int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
           BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT);
     colBuilder.setTimeToLive(ttl);
     ColumnFamilyDescriptor colSessionsDesc = colBuilder.build();
@@ -1391,8 +1389,7 @@ public final class BackupSystemTable implements Closeable 
{
   }
 
   public static TableName getTableNameForBulkLoadedData(Configuration conf) {
-    String name =
-        conf.get(BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_KEY,
+    String name = conf.get(BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_KEY,
           BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_DEFAULT) + "_bulk";
     return TableName.valueOf(name);
   }
@@ -1426,7 +1423,6 @@ public final class BackupSystemTable implements Closeable 
{
    * Creates Delete operation for a given backup id
    * @param backupId backup's ID
    * @return delete operation
-   * @throws IOException exception
    */
   private Delete createDeleteForBackupInfo(String backupId) {
     Delete del = new Delete(rowkey(BACKUP_INFO_PREFIX, backupId));
@@ -1461,7 +1457,6 @@ public final class BackupSystemTable implements Closeable 
{
   /**
    * Creates Put operation to store start code to backup system table
    * @return put operation
-   * @throws IOException exception
    */
   private Put createPutForStartCode(String startCode, String rootPath) {
     Put put = new Put(rowkey(START_CODE_ROW, rootPath));
@@ -1635,7 +1630,8 @@ public final class BackupSystemTable implements Closeable 
{
         put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, 
file.getBytes());
         put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
         puts.add(put);
-        LOG.debug("writing done bulk path " + file + " for " + table + " " + 
Bytes.toString(region));
+        LOG.debug("writing done bulk path " + file + " for " + table + " "
+                + Bytes.toString(region));
       }
     }
     return puts;
@@ -1867,7 +1863,7 @@ public final class BackupSystemTable implements Closeable 
{
     }
   }
 
-  static Scan createScanForOrigBulkLoadedFiles(TableName table) throws 
IOException {
+  static Scan createScanForOrigBulkLoadedFiles(TableName table) {
     Scan scan = new Scan();
     byte[] startRow = rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM);
     byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
@@ -1901,7 +1897,7 @@ public final class BackupSystemTable implements Closeable 
{
    * @param backupId the backup Id. It can be null when querying for all tables
    * @return the Scan object
    */
-  static Scan createScanForBulkLoadedFiles(String backupId) throws IOException 
{
+  static Scan createScanForBulkLoadedFiles(String backupId) {
     Scan scan = new Scan();
     byte[] startRow =
         backupId == null ? BULK_LOAD_PREFIX_BYTES : rowkey(BULK_LOAD_PREFIX, 
backupId
@@ -1910,7 +1906,6 @@ public final class BackupSystemTable implements Closeable 
{
     stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
     scan.setStartRow(startRow);
     scan.setStopRow(stopRow);
-    // scan.setTimeRange(lower, Long.MAX_VALUE);
     scan.addFamily(BackupSystemTable.META_FAMILY);
     scan.setMaxVersions(1);
     return scan;
@@ -1930,18 +1925,17 @@ public final class BackupSystemTable implements 
Closeable {
    * @param files list of WAL file paths
    * @param backupId backup id
    * @return put list
-   * @throws IOException exception
    */
-  private List<Put>
-      createPutsForAddWALFiles(List<String> files, String backupId, String 
backupRoot)
-          throws IOException {
-    List<Put> puts = new ArrayList<Put>(files.size());
+  private List<Put> createPutsForAddWALFiles(List<String> files, String 
backupId,
+          String backupRoot) {
+    List<Put> puts = new ArrayList<>(files.size());
     for (String file : files) {
       Put put = new Put(rowkey(WALS_PREFIX, 
BackupUtils.getUniqueWALFileNamePart(file)));
       put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("backupId"),
         Bytes.toBytes(backupId));
       put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("file"), 
Bytes.toBytes(file));
-      put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("root"), 
Bytes.toBytes(backupRoot));
+      put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("root"),
+              Bytes.toBytes(backupRoot));
       puts.add(put);
     }
     return puts;
@@ -1968,9 +1962,8 @@ public final class BackupSystemTable implements Closeable 
{
    * Creates Get operation for a given wal file name TODO: support for backup 
destination
    * @param file file
    * @return get operation
-   * @throws IOException exception
    */
-  private Get createGetForCheckWALFile(String file) throws IOException {
+  private Get createGetForCheckWALFile(String file) {
     Get get = new Get(rowkey(WALS_PREFIX, 
BackupUtils.getUniqueWALFileNamePart(file)));
     // add backup root column
     get.addFamily(BackupSystemTable.META_FAMILY);
@@ -2034,9 +2027,8 @@ public final class BackupSystemTable implements Closeable 
{
    * Converts cell to backup set list.
    * @param current current cell
    * @return backup set as array of table names
-   * @throws IOException
    */
-  private String[] cellValueToBackupSet(Cell current) throws IOException {
+  private String[] cellValueToBackupSet(Cell current) {
     byte[] data = CellUtil.cloneValue(current);
     if (!ArrayUtils.isEmpty(data)) {
       return Bytes.toString(data).split(",");
@@ -2048,9 +2040,8 @@ public final class BackupSystemTable implements Closeable 
{
    * Converts cell key to backup set name.
    * @param current current cell
    * @return backup set name
-   * @throws IOException
    */
-  private String cellKeyToBackupSetName(Cell current) throws IOException {
+  private String cellKeyToBackupSetName(Cell current) {
     byte[] data = CellUtil.cloneRow(current);
     return Bytes.toString(data).substring(SET_KEY_PREFIX.length());
   }
@@ -2062,5 +2053,4 @@ public final class BackupSystemTable implements Closeable 
{
     }
     return sb.toString().getBytes();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
index c88c896..c0103f5 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
@@ -39,12 +39,12 @@ import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
 import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Full table backup implementation
@@ -86,7 +86,7 @@ public class FullTableBackupClient extends TableBackupClient {
       // Currently we simply set the sub copy tasks by counting the table 
snapshot number, we can
       // calculate the real files' size for the percentage in the future.
       // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
-      int res = 0;
+      int res;
       String[] args = new String[4];
       args[0] = "-snapshot";
       args[1] = backupInfo.getSnapshotName(table);
@@ -116,23 +116,24 @@ public class FullTableBackupClient extends 
TableBackupClient {
   }
 
   /**
-   * Backup request execution
-   * @throws IOException
+   * Backup request execution.
+   *
+   * @throws IOException if the execution of the backup fails
    */
   @Override
   public void execute() throws IOException {
     try (Admin admin = conn.getAdmin()) {
       // Begin BACKUP
       beginBackup(backupManager, backupInfo);
-      String savedStartCode = null;
-      boolean firstBackup = false;
+      String savedStartCode;
+      boolean firstBackup;
       // do snapshot for full table backup
 
       savedStartCode = backupManager.readBackupStartCode();
       firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) 
== 0L;
       if (firstBackup) {
-        // This is our first backup. Let's put some marker to system table so 
that we can hold the logs
-        // while we do the backup.
+        // This is our first backup. Let's put some marker to system table so 
that we can hold the
+        // logs while we do the backup.
         backupManager.writeBackupStartCode(0L);
       }
       // We roll log here before we do the snapshot. It is possible there is 
duplicate data
@@ -142,7 +143,7 @@ public class FullTableBackupClient extends 
TableBackupClient {
       // the snapshot.
       LOG.info("Execute roll log procedure for full backup ...");
 
-      Map<String, String> props = new HashMap<String, String>();
+      Map<String, String> props = new HashMap<>();
       props.put("backupRoot", backupInfo.getBackupRootDir());
       
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
         LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
@@ -198,13 +199,10 @@ public class FullTableBackupClient extends 
TableBackupClient {
         BackupType.FULL, conf);
       throw new IOException(e);
     }
-
   }
 
-
   protected void snapshotTable(Admin admin, TableName tableName, String 
snapshotName)
       throws IOException {
-
     int maxAttempts =
         conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS);
     int pause =

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index 5470823..a20f9b5 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -63,8 +63,7 @@ public class IncrementalBackupManager extends BackupManager {
    * @return The new HashMap of RS log time stamps after the log roll for this 
incremental backup.
    * @throws IOException exception
    */
-  public HashMap<String, Long> getIncrBackupLogFileMap()
-      throws IOException {
+  public HashMap<String, Long> getIncrBackupLogFileMap() throws IOException {
     List<String> logList;
     HashMap<String, Long> newTimestamps;
     HashMap<String, Long> previousTimestampMins;
@@ -89,7 +88,7 @@ public class IncrementalBackupManager extends BackupManager {
     }
 
     LOG.info("Execute roll log procedure for incremental backup ...");
-    HashMap<String, String> props = new HashMap<String, String>();
+    HashMap<String, String> props = new HashMap<>();
     props.put("backupRoot", backupInfo.getBackupRootDir());
 
     try (Admin admin = conn.getAdmin()) {
@@ -109,12 +108,12 @@ public class IncrementalBackupManager extends 
BackupManager {
   }
 
   /**
-   * Get list of WAL files eligible for incremental backup
+   * Get list of WAL files eligible for incremental backup.
+   *
    * @return list of WAL files
-   * @throws IOException
+   * @throws IOException if getting the list of WAL files fails
    */
-  public List<String> getIncrBackupLogFileList()
-      throws IOException {
+  public List<String> getIncrBackupLogFileList() throws IOException {
     List<String> logList;
     HashMap<String, Long> newTimestamps;
     HashMap<String, Long> previousTimestampMins;
@@ -154,14 +153,17 @@ public class IncrementalBackupManager extends 
BackupManager {
 
   private List<String> excludeAlreadyBackedUpWALs(List<String> logList,
       List<WALItem> logFromSystemTable) {
-
     Set<String> walFileNameSet = convertToSet(logFromSystemTable);
 
-    List<String> list = new ArrayList<String>();
+    List<String> list = new ArrayList<>();
     for (int i=0; i < logList.size(); i++) {
       Path p = new Path(logList.get(i));
       String name  = p.getName();
-      if (walFileNameSet.contains(name)) continue;
+
+      if (walFileNameSet.contains(name)) {
+        continue;
+      }
+
       list.add(logList.get(i));
     }
     return list;
@@ -169,12 +171,11 @@ public class IncrementalBackupManager extends 
BackupManager {
 
   /**
    * Create Set of WAL file names (not full path names)
-   * @param logFromSystemTable
+   * @param logFromSystemTable the logs from the system table to convert
    * @return set of WAL file names
    */
   private Set<String> convertToSet(List<WALItem> logFromSystemTable) {
-
-    Set<String> set = new HashSet<String>();
+    Set<String> set = new HashSet<>();
     for (int i=0; i < logFromSystemTable.size(); i++) {
       WALItem item = logFromSystemTable.get(i);
       set.add(item.walFile);
@@ -188,11 +189,11 @@ public class IncrementalBackupManager extends 
BackupManager {
    * @param olderTimestamps timestamp map for each region server of the last 
backup.
    * @param newestTimestamps timestamp map for each region server that the 
backup should lead to.
    * @return list of log files which needs to be added to this backup
-   * @throws IOException
+   * @throws IOException if getting the WAL files from the backup system fails
    */
   private List<WALItem> getLogFilesFromBackupSystem(HashMap<String, Long> 
olderTimestamps,
       HashMap<String, Long> newestTimestamps, String backupRoot) throws 
IOException {
-    List<WALItem> logFiles = new ArrayList<WALItem>();
+    List<WALItem> logFiles = new ArrayList<>();
     Iterator<WALItem> it = getWALFilesFromBackupSystem();
     while (it.hasNext()) {
       WALItem item = it.next();
@@ -248,8 +249,8 @@ public class IncrementalBackupManager extends BackupManager 
{
     FileSystem fs = rootdir.getFileSystem(conf);
     NewestLogFilter pathFilter = new NewestLogFilter();
 
-    List<String> resultLogFiles = new ArrayList<String>();
-    List<String> newestLogs = new ArrayList<String>();
+    List<String> resultLogFiles = new ArrayList<>();
+    List<String> newestLogs = new ArrayList<>();
 
     /*
      * The old region servers and timestamps info we kept in backup system 
table may be out of sync
@@ -259,7 +260,6 @@ public class IncrementalBackupManager extends BackupManager 
{
      * with. We'll just use all the logs in that directory. We always write 
up-to-date region server
      * and timestamp info to backup system table at the end of successful 
backup.
      */
-
     FileStatus[] rss;
     Path p;
     String host;
@@ -381,5 +381,4 @@ public class IncrementalBackupManager extends BackupManager 
{
       }
     }
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
index 34d713d..c897ae2 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
@@ -72,7 +72,7 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
   }
 
   protected List<String> filterMissingFiles(List<String> incrBackupFileList) 
throws IOException {
-    List<String> list = new ArrayList<String>();
+    List<String> list = new ArrayList<>();
     for (String file : incrBackupFileList) {
       Path p = new Path(file);
       if (fs.exists(p) || isActiveWalPath(p)) {
@@ -94,7 +94,10 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
   }
 
   protected static int getIndex(TableName tbl, List<TableName> sTableList) {
-    if (sTableList == null) return 0;
+    if (sTableList == null) {
+      return 0;
+    }
+
     for (int i = 0; i < sTableList.size(); i++) {
       if (tbl.equals(sTableList.get(i))) {
         return i;
@@ -110,12 +113,13 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
    * @return map of table to List of files
    */
   @SuppressWarnings("unchecked")
-  protected Map<byte[], List<Path>>[] handleBulkLoad(List<TableName> 
sTableList) throws IOException {
+  protected Map<byte[], List<Path>>[] handleBulkLoad(List<TableName> 
sTableList)
+          throws IOException {
     Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList.size()];
-    List<String> activeFiles = new ArrayList<String>();
-    List<String> archiveFiles = new ArrayList<String>();
+    List<String> activeFiles = new ArrayList<>();
+    List<String> archiveFiles = new ArrayList<>();
     Pair<Map<TableName, Map<String, Map<String, List<Pair<String, 
Boolean>>>>>, List<byte[]>> pair =
-    backupManager.readBulkloadRows(sTableList);
+            backupManager.readBulkloadRows(sTableList);
     Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> map 
= pair.getFirst();
     FileSystem tgtFs;
     try {
@@ -136,7 +140,7 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
         continue;
       }
       if (mapForSrc[srcIdx] == null) {
-        mapForSrc[srcIdx] = new TreeMap<byte[], 
List<Path>>(Bytes.BYTES_COMPARATOR);
+        mapForSrc[srcIdx] = new TreeMap<>(Bytes.BYTES_COMPARATOR);
       }
       Path tblDir = FSUtils.getTableDir(rootdir, srcTable);
       Path tgtTable = new Path(new Path(tgtRoot, 
srcTable.getNamespaceAsString()),
@@ -152,7 +156,7 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
           Path famDir = new Path(regionDir, fam);
           List<Path> files;
           if (!mapForSrc[srcIdx].containsKey(fam.getBytes())) {
-            files = new ArrayList<Path>();
+            files = new ArrayList<>();
             mapForSrc[srcIdx].put(fam.getBytes(), files);
           } else {
             files = mapForSrc[srcIdx].get(fam.getBytes());
@@ -177,13 +181,13 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
               if (LOG.isTraceEnabled()) {
                 LOG.trace("found bulk hfile " + file + " in " + famDir + " for 
" + tblName);
               }
-                if (LOG.isTraceEnabled()) {
-                  LOG.trace("copying " + p + " to " + tgt);
-                }
-                activeFiles.add(p.toString());
+              if (LOG.isTraceEnabled()) {
+                LOG.trace("copying " + p + " to " + tgt);
+              }
+              activeFiles.add(p.toString());
             } else if (fs.exists(archive)){
               LOG.debug("copying archive " + archive + " to " + tgt);
-                archiveFiles.add(archive.toString());
+              archiveFiles.add(archive.toString());
             }
             files.add(tgt);
           }
@@ -198,7 +202,6 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
 
   private void copyBulkLoadedFiles(List<String> activeFiles, List<String> 
archiveFiles)
       throws IOException {
-
     try {
       // Enable special mode of BackupDistCp
       conf.setInt(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 5);
@@ -220,7 +223,6 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
           // Update active and archived lists
           // When file is being moved from active to archive
           // directory, the number of active files decreases
-
           int numOfActive = activeFiles.size();
           updateFileLists(activeFiles, archiveFiles);
           if (activeFiles.size() < numOfActive) {
@@ -242,12 +244,11 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
       // Disable special mode of BackupDistCp
       conf.unset(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY);
     }
-
   }
 
   private void updateFileLists(List<String> activeFiles, List<String> 
archiveFiles)
       throws IOException {
-    List<String> newlyArchived = new ArrayList<String>();
+    List<String> newlyArchived = new ArrayList<>();
 
     for (String spath : activeFiles) {
       if (!fs.exists(new Path(spath))) {
@@ -261,12 +262,10 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
     }
 
     LOG.debug(newlyArchived.size() + " files have been archived.");
-
   }
 
   @Override
   public void execute() throws IOException {
-
     try {
       // case PREPARE_INCREMENTAL:
       beginBackup(backupManager, backupInfo);
@@ -288,7 +287,8 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
       BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
       // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
       convertWALsToHFiles();
-      incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()}, 
backupInfo.getBackupRootDir());
+      incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()},
+              backupInfo.getBackupRootDir());
       // Save list of WAL files copied
       backupManager.recordWALFiles(backupInfo.getIncrBackupFileList());
     } catch (Exception e) {
@@ -329,9 +329,8 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
   }
 
   protected void incrementalCopyHFiles(String[] files, String backupDest) 
throws IOException {
-
     try {
-      LOG.debug("Incremental copy HFiles is starting. dest="+backupDest);
+      LOG.debug("Incremental copy HFiles is starting. dest=" + backupDest);
       // set overall backup phase: incremental_copy
       backupInfo.setPhase(BackupPhase.INCREMENTAL_COPY);
       // get incremental backup file list and prepare parms for DistCp
@@ -366,7 +365,6 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
     if (!result) {
       LOG.warn("Could not delete " + path);
     }
-
   }
 
   protected void convertWALsToHFiles() throws IOException {
@@ -386,7 +384,6 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
     }
   }
 
-
   protected boolean tableExists(TableName table, Connection conn) throws 
IOException {
     try (Admin admin = conn.getAdmin()) {
       return admin.tableExists(table);
@@ -394,7 +391,6 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
   }
 
   protected void walToHFiles(List<String> dirPaths, TableName tableName) 
throws IOException {
-
     Tool player = new WALPlayer();
 
     // Player reads all files in arbitrary directory structure and creates
@@ -439,5 +435,4 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
     path = new Path(path, backupId);
     return path;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
index c6b6bad..c52d658 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
@@ -63,7 +63,7 @@ public class RestoreTablesClient {
   private String targetRootDir;
   private boolean isOverwrite;
 
-  public RestoreTablesClient(Connection conn, RestoreRequest request) throws 
IOException {
+  public RestoreTablesClient(Connection conn, RestoreRequest request) {
     this.targetRootDir = request.getBackupRootDir();
     this.backupId = request.getBackupId();
     this.sTableArray = request.getFromTables();
@@ -74,13 +74,11 @@ public class RestoreTablesClient {
     this.isOverwrite = request.isOverwrite();
     this.conn = conn;
     this.conf = conn.getConfiguration();
-
   }
 
   /**
-   * Validate target tables
-   * @param conn connection
-   * @param mgr table state manager
+   * Validate target tables.
+   *
    * @param tTableArray: target tables
    * @param isOverwrite overwrite existing table
    * @throws IOException exception
@@ -125,8 +123,8 @@ public class RestoreTablesClient {
   }
 
   /**
-   * Restore operation handle each backupImage in array
-   * @param svc: master services
+   * Restore operation handle each backupImage in array.
+   *
    * @param images: array BackupImage
    * @param sTable: table to be restored
    * @param tTable: table to be restored to
@@ -136,7 +134,6 @@ public class RestoreTablesClient {
 
   private void restoreImages(BackupImage[] images, TableName sTable, TableName 
tTable,
       boolean truncateIfExists) throws IOException {
-
     // First image MUST be image of a FULL backup
     BackupImage image = images[0];
     String rootDir = image.getRootDir();
@@ -163,7 +160,7 @@ public class RestoreTablesClient {
       return;
     }
 
-    List<Path> dirList = new ArrayList<Path>();
+    List<Path> dirList = new ArrayList<>();
     // add full backup path
     // full backup path comes first
     for (int i = 1; i < images.length; i++) {
@@ -188,7 +185,7 @@ public class RestoreTablesClient {
   private List<Path> getFilesRecursively(String fileBackupDir)
       throws IllegalArgumentException, IOException {
     FileSystem fs = FileSystem.get((new Path(fileBackupDir)).toUri(), new 
Configuration());
-    List<Path> list = new ArrayList<Path>();
+    List<Path> list = new ArrayList<>();
     RemoteIterator<LocatedFileStatus> it = fs.listFiles(new 
Path(fileBackupDir), true);
     while (it.hasNext()) {
       Path p = it.next().getPath();
@@ -204,13 +201,11 @@ public class RestoreTablesClient {
    * @param backupManifestMap : tableName, Manifest
    * @param sTableArray The array of tables to be restored
    * @param tTableArray The array of mapping tables to restore to
-   * @return set of BackupImages restored
    * @throws IOException exception
    */
   private void restore(HashMap<TableName, BackupManifest> backupManifestMap,
       TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) 
throws IOException {
-    TreeSet<BackupImage> restoreImageSet = new TreeSet<BackupImage>();
-    boolean truncateIfExists = isOverwrite;
+    TreeSet<BackupImage> restoreImageSet = new TreeSet<>();
     Set<String> backupIdSet = new HashSet<>();
 
     for (int i = 0; i < sTableArray.length; i++) {
@@ -219,20 +214,21 @@ public class RestoreTablesClient {
       BackupManifest manifest = backupManifestMap.get(table);
       // Get the image list of this backup for restore in time order from old
       // to new.
-      List<BackupImage> list = new ArrayList<BackupImage>();
+      List<BackupImage> list = new ArrayList<>();
       list.add(manifest.getBackupImage());
-      TreeSet<BackupImage> set = new TreeSet<BackupImage>(list);
+      TreeSet<BackupImage> set = new TreeSet<>(list);
       List<BackupImage> depList = manifest.getDependentListByTable(table);
       set.addAll(depList);
       BackupImage[] arr = new BackupImage[set.size()];
       set.toArray(arr);
-      restoreImages(arr, table, tTableArray[i], truncateIfExists);
+      restoreImages(arr, table, tTableArray[i], isOverwrite);
       restoreImageSet.addAll(list);
       if (restoreImageSet != null && !restoreImageSet.isEmpty()) {
         LOG.info("Restore includes the following image(s):");
         for (BackupImage image : restoreImageSet) {
           LOG.info("Backup: " + image.getBackupId() + " "
-              + HBackupFileSystem.getTableBackupDir(image.getRootDir(), 
image.getBackupId(), table));
+              + HBackupFileSystem.getTableBackupDir(image.getRootDir(), 
image.getBackupId(),
+                  table));
           if (image.getType() == BackupType.INCREMENTAL) {
             backupIdSet.add(image.getBackupId());
             LOG.debug("adding " + image.getBackupId() + " for bulk load");
@@ -251,14 +247,10 @@ public class RestoreTablesClient {
   }
 
   static boolean withinRange(long a, long lower, long upper) {
-    if (a < lower || a > upper) {
-      return false;
-    }
-    return true;
+    return a >= lower && a <= upper;
   }
 
   public void execute() throws IOException {
-
     // case VALIDATION:
     // check the target tables
     checkTargetTables(tTableArray, isOverwrite);
@@ -272,5 +264,4 @@ public class RestoreTablesClient {
 
     restore(backupManifestMap, sTableArray, tTableArray, isOverwrite);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
index ab24cca..7d960b4 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
@@ -35,13 +35,13 @@ import 
org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.HBackupFileSystem;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
@@ -80,8 +80,7 @@ public abstract class TableBackupClient {
   }
 
   public void init(final Connection conn, final String backupId, BackupRequest 
request)
-      throws IOException
-  {
+      throws IOException {
     if (request.getBackupType() == BackupType.FULL) {
       backupManager = new BackupManager(conn, conn.getConfiguration());
     } else {
@@ -137,10 +136,10 @@ public abstract class TableBackupClient {
   /**
    * Delete HBase snapshot for backup.
    * @param backupInfo backup info
-   * @throws Exception exception
+   * @throws IOException exception
    */
-  protected static void deleteSnapshots(final Connection conn, BackupInfo 
backupInfo, Configuration conf)
-      throws IOException {
+  protected static void deleteSnapshots(final Connection conn, BackupInfo 
backupInfo,
+      Configuration conf) throws IOException {
     LOG.debug("Trying to delete snapshot for full backup.");
     for (String snapshotName : backupInfo.getSnapshotNames()) {
       if (snapshotName == null) {
@@ -228,11 +227,10 @@ public abstract class TableBackupClient {
    * Fail the overall backup.
    * @param backupInfo backup info
    * @param e exception
-   * @throws Exception exception
+   * @throws IOException exception
    */
   protected void failBackup(Connection conn, BackupInfo backupInfo, 
BackupManager backupManager,
       Exception e, String msg, BackupType type, Configuration conf) throws 
IOException {
-
     try {
       LOG.error(msg + getMessage(e), e);
       // If this is a cancel exception, then we've already cleaned.
@@ -277,16 +275,13 @@ public abstract class TableBackupClient {
     cleanupTargetDir(backupInfo, conf);
   }
 
-
-
   /**
    * Add manifest for the current backup. The manifest is stored within the 
table backup directory.
    * @param backupInfo The current backup info
    * @throws IOException exception
-   * @throws BackupException exception
    */
   protected void addManifest(BackupInfo backupInfo, BackupManager 
backupManager, BackupType type,
-      Configuration conf) throws IOException, BackupException {
+      Configuration conf) throws IOException {
     // set the overall backup phase : store manifest
     backupInfo.setPhase(BackupPhase.STORE_MANIFEST);
 
@@ -303,8 +298,7 @@ public abstract class TableBackupClient {
 
       if (type == BackupType.INCREMENTAL) {
         // We'll store the log timestamps for this table only in its manifest.
-        HashMap<TableName, HashMap<String, Long>> tableTimestampMap =
-            new HashMap<TableName, HashMap<String, Long>>();
+        HashMap<TableName, HashMap<String, Long>> tableTimestampMap = new 
HashMap<>();
         tableTimestampMap.put(table, 
backupInfo.getIncrTimestampMap().get(table));
         manifest.setIncrTimestampMap(tableTimestampMap);
         ArrayList<BackupImage> ancestorss = 
backupManager.getAncestors(backupInfo);
@@ -371,7 +365,7 @@ public abstract class TableBackupClient {
   /**
    * Complete the overall backup.
    * @param backupInfo backup info
-   * @throws Exception exception
+   * @throws IOException exception
    */
   protected void completeBackup(final Connection conn, BackupInfo backupInfo,
       BackupManager backupManager, BackupType type, Configuration conf) throws 
IOException {
@@ -412,8 +406,9 @@ public abstract class TableBackupClient {
   }
 
   /**
-   * Backup request execution
-   * @throws IOException
+   * Backup request execution.
+   *
+   * @throws IOException if the execution of the backup fails
    */
   public abstract void execute() throws IOException;
 
@@ -430,7 +425,7 @@ public abstract class TableBackupClient {
     }
   }
 
-  public static enum Stage {
+  public enum Stage {
     stage_0, stage_1, stage_2, stage_3, stage_4
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
index b1f17cf..6f2c44c 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
@@ -38,20 +38,19 @@ import org.apache.hadoop.hbase.backup.HBackupFileSystem;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.util.Tool;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * MapReduce implementation of {@link BackupMergeJob}
  * Must be initialized with configuration of a backup destination cluster
  *
  */
-
 @InterfaceAudience.Private
 public class MapReduceBackupMergeJob implements BackupMergeJob {
   public static final Logger LOG = 
LoggerFactory.getLogger(MapReduceBackupMergeJob.class);
@@ -87,7 +86,7 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
       LOG.debug("Merge backup images " + bids);
     }
 
-    List<Pair<TableName, Path>> processedTableList = new 
ArrayList<Pair<TableName, Path>>();
+    List<Pair<TableName, Path>> processedTableList = new ArrayList<>();
     boolean finishedTables = false;
     Connection conn = ConnectionFactory.createConnection(getConf());
     BackupSystemTable table = new BackupSystemTable(conn);
@@ -104,17 +103,14 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
       String mergedBackupId = findMostRecentBackupId(backupIds);
 
       TableName[] tableNames = getTableNamesInBackupImages(backupIds);
-      String backupRoot = null;
 
       BackupInfo bInfo = table.readBackupInfo(backupIds[0]);
-      backupRoot = bInfo.getBackupRootDir();
+      String backupRoot = bInfo.getBackupRootDir();
 
       for (int i = 0; i < tableNames.length; i++) {
-
         LOG.info("Merge backup images for " + tableNames[i]);
 
         // Find input directories for table
-
         Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], 
backupIds);
         String dirs = StringUtils.join(dirPaths, ",");
         Path bulkOutputPath =
@@ -130,16 +126,14 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
         conf.set(bulkOutputConfKey, bulkOutputPath.toString());
         String[] playerArgs = { dirs, tableNames[i].getNameAsString() };
 
-        int result = 0;
-
         player.setConf(getConf());
-        result = player.run(playerArgs);
+        int result = player.run(playerArgs);
         if (!succeeded(result)) {
           throw new IOException("Can not merge backup images for " + dirs
               + " (check Hadoop/MR and HBase logs). Player return code =" + 
result);
         }
         // Add to processed table list
-        processedTableList.add(new Pair<TableName, Path>(tableNames[i], 
bulkOutputPath));
+        processedTableList.add(new Pair<>(tableNames[i], bulkOutputPath));
         LOG.debug("Merge Job finished:" + result);
       }
       List<TableName> tableList = toTableNameList(processedTableList);
@@ -184,7 +178,7 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
   }
 
   protected List<Path> toPathList(List<Pair<TableName, Path>> 
processedTableList) {
-    ArrayList<Path> list = new ArrayList<Path>();
+    ArrayList<Path> list = new ArrayList<>();
     for (Pair<TableName, Path> p : processedTableList) {
       list.add(p.getSecond());
     }
@@ -192,7 +186,7 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
   }
 
   protected List<TableName> toTableNameList(List<Pair<TableName, Path>> 
processedTableList) {
-    ArrayList<TableName> list = new ArrayList<TableName>();
+    ArrayList<TableName> list = new ArrayList<>();
     for (Pair<TableName, Path> p : processedTableList) {
       list.add(p.getFirst());
     }
@@ -201,7 +195,6 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
 
   protected void cleanupBulkLoadDirs(FileSystem fs, List<Path> pathList) 
throws IOException {
     for (Path path : pathList) {
-
       if (!fs.delete(path, true)) {
         LOG.warn("Can't delete " + path);
       }
@@ -210,18 +203,15 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
 
   protected void updateBackupManifest(String backupRoot, String mergedBackupId,
       List<String> backupsToDelete) throws IllegalArgumentException, 
IOException {
-
     BackupManifest manifest =
         HBackupFileSystem.getManifest(conf, new Path(backupRoot), 
mergedBackupId);
     manifest.getBackupImage().removeAncestors(backupsToDelete);
     // save back
     manifest.store(conf);
-
   }
 
   protected void deleteBackupImages(List<String> backupIds, Connection conn, 
FileSystem fs,
       String backupRoot) throws IOException {
-
     // Delete from backup system table
     try (BackupSystemTable table = new BackupSystemTable(conn)) {
       for (String backupId : backupIds) {
@@ -240,7 +230,7 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
   }
 
   protected List<String> getBackupIdsToDelete(String[] backupIds, String 
mergedBackupId) {
-    List<String> list = new ArrayList<String>();
+    List<String> list = new ArrayList<>();
     for (String id : backupIds) {
       if (id.equals(mergedBackupId)) {
         continue;
@@ -250,9 +240,8 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
     return list;
   }
 
-  protected void moveData(FileSystem fs, String backupRoot, Path 
bulkOutputPath, TableName tableName,
-      String mergedBackupId) throws IllegalArgumentException, IOException {
-
+  protected void moveData(FileSystem fs, String backupRoot, Path 
bulkOutputPath,
+          TableName tableName, String mergedBackupId) throws 
IllegalArgumentException, IOException {
     Path dest =
         new Path(HBackupFileSystem.getTableBackupDataDir(backupRoot, 
mergedBackupId, tableName));
 
@@ -267,7 +256,6 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
         fs.rename(fst.getPath().getParent(), dest);
       }
     }
-
   }
 
   protected String findMostRecentBackupId(String[] backupIds) {
@@ -282,8 +270,7 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
   }
 
   protected TableName[] getTableNamesInBackupImages(String[] backupIds) throws 
IOException {
-
-    Set<TableName> allSet = new HashSet<TableName>();
+    Set<TableName> allSet = new HashSet<>();
 
     try (Connection conn = ConnectionFactory.createConnection(conf);
         BackupSystemTable table = new BackupSystemTable(conn)) {
@@ -300,8 +287,7 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
 
   protected Path[] findInputDirectories(FileSystem fs, String backupRoot, 
TableName tableName,
       String[] backupIds) throws IOException {
-
-    List<Path> dirs = new ArrayList<Path>();
+    List<Path> dirs = new ArrayList<>();
 
     for (String backupId : backupIds) {
       Path fileBackupDirPath =
@@ -317,5 +303,4 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
     Path[] ret = new Path[dirs.size()];
     return dirs.toArray(ret);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
index 47bf3f9..1a3c465 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
@@ -118,7 +118,7 @@ public class MapReduceHFileSplitterJob extends Configured 
implements Tool {
       try (Connection conn = ConnectionFactory.createConnection(conf);
           Table table = conn.getTable(tableName);
           RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
-          HFileOutputFormat2.configureIncrementalLoad(job, 
table.getDescriptor(), regionLocator);
+        HFileOutputFormat2.configureIncrementalLoad(job, 
table.getDescriptor(), regionLocator);
       }
       LOG.debug("success configuring load incremental job");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
index e4b63f4..1256289 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
@@ -29,12 +29,11 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.RestoreJob;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.util.Tool;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
-import org.apache.hadoop.util.Tool;
-
 
 /**
  * MapReduce implementation of {@link RestoreJob}
@@ -59,7 +58,6 @@ public class MapReduceRestoreJob implements RestoreJob {
   @Override
   public void run(Path[] dirPaths, TableName[] tableNames, TableName[] 
newTableNames,
       boolean fullBackupRestore) throws IOException {
-
     String bulkOutputConfKey;
 
     player = new MapReduceHFileSplitterJob();
@@ -77,7 +75,6 @@ public class MapReduceRestoreJob implements RestoreJob {
     }
 
     for (int i = 0; i < tableNames.length; i++) {
-
       LOG.info("Restore " + tableNames[i] + " into " + newTableNames[i]);
 
       Path bulkOutputPath =
@@ -85,14 +82,13 @@ public class MapReduceRestoreJob implements RestoreJob {
             getConf());
       Configuration conf = getConf();
       conf.set(bulkOutputConfKey, bulkOutputPath.toString());
-      String[] playerArgs =
-          {
-              dirs,
-              fullBackupRestore ? newTableNames[i].getNameAsString() : 
tableNames[i]
-                  .getNameAsString() };
-
-      int result = 0;
-      int loaderResult = 0;
+      String[] playerArgs = {
+        dirs, fullBackupRestore ? newTableNames[i].getNameAsString() : 
tableNames[i]
+              .getNameAsString()
+      };
+
+      int result;
+      int loaderResult;
       try {
 
         player.setConf(getConf());
@@ -132,5 +128,4 @@ public class MapReduceRestoreJob implements RestoreJob {
   public void setConf(Configuration conf) {
     this.conf = conf;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
index a8ece39..bd13d6e 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
@@ -30,14 +30,14 @@ import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Implementation of a log cleaner that checks if a log is still scheduled for 
incremental backup
@@ -83,7 +83,7 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
       return files;
     }
 
-    List<FileStatus> list = new ArrayList<FileStatus>();
+    List<FileStatus> list = new ArrayList<>();
     try (final BackupSystemTable table = new BackupSystemTable(conn)) {
       // If we do not have recorded backup sessions
       try {
@@ -116,7 +116,7 @@ public class BackupLogCleaner extends 
BaseLogCleanerDelegate {
     } catch (IOException e) {
       LOG.error("Failed to get backup system table table, therefore will keep 
all files", e);
       // nothing to delete
-      return new ArrayList<FileStatus>();
+      return new ArrayList<>();
     }
   }
 
@@ -143,5 +143,4 @@ public class BackupLogCleaner extends 
BaseLogCleanerDelegate {
   public boolean isStopped() {
     return this.stopped;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
index 5c41a3b..486b991 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
@@ -24,12 +24,11 @@ import java.util.List;
 import java.util.concurrent.ThreadPoolExecutor;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -39,12 +38,13 @@ import org.apache.hadoop.hbase.procedure.Procedure;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
 import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-import org.apache.zookeeper.KeeperException;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
+
 /**
  * Master procedure manager for coordinated cluster-wide WAL roll operation, 
which is run during
  * backup operation, see {@link MasterProcedureManager} and and {@link 
RegionServerProcedureManager}
@@ -57,7 +57,8 @@ public class LogRollMasterProcedureManager extends 
MasterProcedureManager {
   public static final String ROLLLOG_PROCEDURE_NAME = "rolllog";
   public static final String BACKUP_WAKE_MILLIS_KEY = 
"hbase.backup.logroll.wake.millis";
   public static final String BACKUP_TIMEOUT_MILLIS_KEY = 
"hbase.backup.logroll.timeout.millis";
-  public static final String BACKUP_POOL_THREAD_NUMBER_KEY = 
"hbase.backup.logroll.pool.thread.number";
+  public static final String BACKUP_POOL_THREAD_NUMBER_KEY =
+          "hbase.backup.logroll.pool.thread.number";
 
   public static final int BACKUP_WAKE_MILLIS_DEFAULT = 500;
   public static final int BACKUP_TIMEOUT_MILLIS_DEFAULT = 180000;
@@ -78,7 +79,7 @@ public class LogRollMasterProcedureManager extends 
MasterProcedureManager {
 
   @Override
   public void initialize(MasterServices master, MetricsMaster metricsMaster)
-      throws KeeperException, IOException, UnsupportedOperationException {
+      throws IOException, UnsupportedOperationException {
     this.master = master;
     this.done = false;
 
@@ -118,7 +119,7 @@ public class LogRollMasterProcedureManager extends 
MasterProcedureManager {
     // start the process on the RS
     ForeignExceptionDispatcher monitor = new 
ForeignExceptionDispatcher(desc.getInstance());
     List<ServerName> serverNames = 
master.getServerManager().getOnlineServersList();
-    List<String> servers = new ArrayList<String>();
+    List<String> servers = new ArrayList<>();
     for (ServerName sn : serverNames) {
       servers.add(sn.toString());
     }
@@ -161,8 +162,7 @@ public class LogRollMasterProcedureManager extends 
MasterProcedureManager {
   }
 
   @Override
-  public boolean isProcedureDone(ProcedureDescription desc) throws IOException 
{
+  public boolean isProcedureDone(ProcedureDescription desc) {
     return done;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
index 4353b46..575be39 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
@@ -52,7 +52,6 @@ public class LogRollBackupSubprocedure extends Subprocedure {
   public LogRollBackupSubprocedure(RegionServerServices rss, ProcedureMember 
member,
       ForeignExceptionDispatcher errorListener, long wakeFrequency, long 
timeout,
       LogRollBackupSubprocedurePool taskManager, byte[] data) {
-
     super(member, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, 
errorListener,
         wakeFrequency, timeout);
     LOG.info("Constructing a LogRollBackupSubprocedure.");
@@ -82,7 +81,10 @@ public class LogRollBackupSubprocedure extends Subprocedure {
       List<WAL> wals = rss.getWALs();
       long highest = -1;
       for (WAL wal : wals) {
-        if (wal == null) continue;
+        if (wal == null) {
+          continue;
+        }
+
         if (((AbstractFSWAL<?>) wal).getFilenum() > highest) {
           highest = ((AbstractFSWAL<?>) wal).getFilenum();
         }
@@ -109,7 +111,8 @@ public class LogRollBackupSubprocedure extends Subprocedure 
{
         String server = host + ":" + port;
         Long sts = serverTimestampMap.get(host);
         if (sts != null && sts > highest) {
-          LOG.warn("Won't update server's last roll log result: current=" + 
sts + " new=" + highest);
+          LOG.warn("Won't update server's last roll log result: current=" + 
sts + " new="
+                  + highest);
           return null;
         }
         // write the log number to backup system table.
@@ -131,11 +134,10 @@ public class LogRollBackupSubprocedure extends 
Subprocedure {
     // wait for everything to complete.
     taskManager.waitForOutstandingTasks();
     monitor.rethrowException();
-
   }
 
   @Override
-  public void acquireBarrier() throws ForeignException {
+  public void acquireBarrier() {
     // do nothing, executing in inside barrier step.
   }
 
@@ -163,5 +165,4 @@ public class LogRollBackupSubprocedure extends Subprocedure 
{
   public void releaseBarrier() {
     // NO OP
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java
index 3363638..0a05157 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java
@@ -32,10 +32,10 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.DaemonThreadFactory;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.errorhandling.ForeignException;
 
 /**
  * Handle running each of the individual tasks for completing a backup 
procedure on a region
@@ -52,7 +52,7 @@ public class LogRollBackupSubprocedurePool implements 
Closeable, Abortable {
   private final ExecutorCompletionService<Void> taskPool;
   private final ThreadPoolExecutor executor;
   private volatile boolean aborted;
-  private final List<Future<Void>> futures = new ArrayList<Future<Void>>();
+  private final List<Future<Void>> futures = new ArrayList<>();
   private final String name;
 
   public LogRollBackupSubprocedurePool(String name, Configuration conf) {
@@ -64,9 +64,9 @@ public class LogRollBackupSubprocedurePool implements 
Closeable, Abortable {
     this.name = name;
     executor =
         new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.SECONDS,
-            new LinkedBlockingQueue<Runnable>(), new DaemonThreadFactory("rs(" 
+ name
+            new LinkedBlockingQueue<>(), new DaemonThreadFactory("rs(" + name
                 + ")-backup-pool"));
-    taskPool = new ExecutorCompletionService<Void>(executor);
+    taskPool = new ExecutorCompletionService<>(executor);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
index 82d9dcf..6c743a8 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
 import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.procedure.ProcedureMember;
 import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs;
@@ -35,6 +34,7 @@ import 
org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
 import org.apache.hadoop.hbase.procedure.Subprocedure;
 import org.apache.hadoop.hbase.procedure.SubprocedureFactory;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -52,7 +52,6 @@ import org.slf4j.LoggerFactory;
  */
 @InterfaceAudience.Private
 public class LogRollRegionServerProcedureManager extends 
RegionServerProcedureManager {
-
   private static final Logger LOG =
       LoggerFactory.getLogger(LogRollRegionServerProcedureManager.class);
 
@@ -120,7 +119,6 @@ public class LogRollRegionServerProcedureManager extends 
RegionServerProcedureMa
    * @return Subprocedure to submit to the ProcedureMemeber.
    */
   public Subprocedure buildSubprocedure(byte[] data) {
-
     // don't run a backup if the parent is stop(ping)
     if (rss.isStopping() || rss.isStopped()) {
       throw new IllegalStateException("Can't start backup procedure on RS: " + 
rss.getServerName()
@@ -138,14 +136,12 @@ public class LogRollRegionServerProcedureManager extends 
RegionServerProcedureMa
         new LogRollBackupSubprocedurePool(rss.getServerName().toString(), 
conf);
     return new LogRollBackupSubprocedure(rss, member, errorDispatcher, 
wakeMillis, timeoutMillis,
         taskManager, data);
-
   }
 
   /**
    * Build the actual backup procedure runner that will do all the 'hard' work
    */
   public class BackupSubprocedureBuilder implements SubprocedureFactory {
-
     @Override
     public Subprocedure buildSubprocedure(String name, byte[] data) {
       return LogRollRegionServerProcedureManager.this.buildSubprocedure(data);
@@ -178,5 +174,4 @@ public class LogRollRegionServerProcedureManager extends 
RegionServerProcedureMa
   public String getProcedureSignature() {
     return "backup-proc";
   }
-
 }

Reply via email to