HBASE-16940 Address review of "Backup/Restore (HBASE-7912, HBASE-14030, HBASE-14123) mega patch" posted on RB - patch v2 (Vladimir Rodionov)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ff68ba06 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ff68ba06 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ff68ba06 Branch: refs/heads/HBASE-7912 Commit: ff68ba0650873d39021cb2f1927ab72d99215a87 Parents: 0c59d23 Author: tedyu <[email protected]> Authored: Tue Dec 6 10:41:23 2016 -0800 Committer: tedyu <[email protected]> Committed: Tue Dec 6 10:41:23 2016 -0800 ---------------------------------------------------------------------- .../hbase/IntegrationTestBackupRestore.java | 4 +- .../apache/hadoop/hbase/backup/BackupAdmin.java | 2 +- .../apache/hadoop/hbase/backup/BackupInfo.java | 10 +- .../hadoop/hbase/backup/HBackupFileSystem.java | 15 +- .../hadoop/hbase/backup/RestoreDriver.java | 4 +- .../hbase/backup/impl/BackupAdminImpl.java | 556 +++++++++++++++++++ .../hbase/backup/impl/BackupCommands.java | 14 +- .../hadoop/hbase/backup/impl/BackupManager.java | 62 +-- .../hbase/backup/impl/BackupManifest.java | 247 ++++---- .../hbase/backup/impl/BackupSystemTable.java | 4 +- .../backup/impl/BackupSystemTableHelper.java | 3 +- .../backup/impl/FullTableBackupClient.java | 357 +----------- .../hbase/backup/impl/HBaseBackupAdmin.java | 556 ------------------- .../backup/impl/IncrementalBackupManager.java | 40 +- .../impl/IncrementalTableBackupClient.java | 43 +- .../hbase/backup/impl/RestoreTablesClient.java | 5 - .../hbase/backup/impl/TableBackupClient.java | 386 +++++++++++++ .../mapreduce/MapReduceBackupCopyTask.java | 17 +- .../master/LogRollMasterProcedureManager.java | 7 + .../regionserver/LogRollBackupSubprocedure.java | 4 +- .../LogRollRegionServerProcedureManager.java | 3 +- .../hbase/backup/util/RestoreServerUtil.java | 2 +- .../procedure/ZKProcedureCoordinatorRpcs.java | 4 +- .../hbase/regionserver/HRegionServer.java | 15 +- .../hadoop/hbase/backup/TestBackupBase.java | 6 +- .../hbase/backup/TestBackupCommandLineTool.java | 79 +-- .../hbase/backup/TestBackupMultipleDeletes.java | 4 +- .../hbase/backup/TestIncrementalBackup.java | 4 +- .../TestIncrementalBackupDeleteTable.java | 4 +- 29 files changed, 1210 insertions(+), 1247 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java ---------------------------------------------------------------------- diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java index da197f1..3871b03 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.backup.BackupRequest; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -162,7 +162,7 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase { List<TableName> tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2); HBaseAdmin admin = null; admin = (HBaseAdmin) conn.getAdmin(); - BackupAdmin client = new HBaseBackupAdmin(util.getConnection()); + BackupAdmin client = new BackupAdminImpl(util.getConnection()); BackupRequest request = new BackupRequest(); request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java index ffafb97..dbdb981 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; /** * The administrative API for HBase Backup. Construct an instance from - * {@link HBaseBackupAdmin(Connection)} and call {@link #close()} afterwards. + * {@link BackupAdminImpl(Connection)} and call {@link #close()} afterwards. * <p>BackupAdmin can be used to create backups, restore data from backups and for * other backup-related operations. * http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java index 45981f9..834245f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -176,7 +176,7 @@ public class BackupInfo implements Comparable<BackupInfo> { this.addTables(tables); if (type == BackupType.INCREMENTAL) { - setHlogTargetDir(BackupClientUtil.getLogBackupDir(targetRootDir, backupId)); + setHLogTargetDir(BackupClientUtil.getLogBackupDir(targetRootDir, backupId)); } this.startTs = 0; @@ -220,10 +220,6 @@ public class BackupInfo implements Comparable<BackupInfo> { this.tableSetTimestampMap = tableSetTimestampMap; } - public String getHlogTargetDir() { - return hlogTargetDir; - } - public void setType(BackupType type) { this.type = type; } @@ -355,7 +351,7 @@ public class BackupInfo implements Comparable<BackupInfo> { return targetRootDir; } - public void setHlogTargetDir(String hlogTagetDir) { + public void setHLogTargetDir(String hlogTagetDir) { this.hlogTargetDir = hlogTagetDir; } @@ -488,7 +484,7 @@ public class BackupInfo implements Comparable<BackupInfo> { context.setState(BackupInfo.BackupState.valueOf(proto.getState().name())); } - context.setHlogTargetDir(BackupClientUtil.getLogBackupDir(proto.getTargetRootDir(), + context.setHLogTargetDir(BackupClientUtil.getLogBackupDir(proto.getTargetRootDir(), proto.getBackupId())); if (proto.hasPhase()) { http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java index a130a9b..08145fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java @@ -66,7 +66,7 @@ public class HBackupFileSystem { */ public static String getTableBackupDir(String backupRootDir, String backupId, TableName tableName) { - return backupRootDir + Path.SEPARATOR+ backupId + Path.SEPARATOR + + return backupRootDir + Path.SEPARATOR+ backupId + Path.SEPARATOR + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() + Path.SEPARATOR ; } @@ -84,8 +84,8 @@ public class HBackupFileSystem { return new Path(getTableBackupDir(backupRootPath.toString(), backupId, tableName)); } - - public static List<HRegionInfo> loadRegionInfos(TableName tableName, + + public static List<HRegionInfo> loadRegionInfos(TableName tableName, Path backupRootPath, String backupId, Configuration conf) throws IOException { Path backupTableRoot = getTableBackupPath(tableName, backupRootPath, backupId); @@ -100,11 +100,11 @@ public class HBackupFileSystem { infos.add(info); } } - + Collections.sort(infos); return infos; } - + /** * Given the backup root dir and the backup id, return the log file location for an incremental * backup. @@ -125,6 +125,7 @@ public class HBackupFileSystem { Path backupRootPath, String backupId) throws IOException { Path manifestPath = new Path(getTableBackupPath(tableName, backupRootPath, backupId), BackupManifest.MANIFEST_FILE_NAME); + FileSystem fs = backupRootPath.getFileSystem(conf); if (!fs.exists(manifestPath)) { // check log dir for incremental backup case @@ -134,8 +135,8 @@ public class HBackupFileSystem { if (!fs.exists(manifestPath)) { String errorMsg = "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for " + - backupId + " in " + backupRootPath.toString() + - ". Did " + backupId + " correspond to previously taken backup ?"; + backupId + ". File " + manifestPath + + " does not exists. Did " + backupId + " correspond to previously taken backup ?"; throw new IOException(errorMsg); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java index 6b04944..1ca512e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.util.BackupServerUtil; import org.apache.hadoop.hbase.backup.util.LogUtils; import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; @@ -110,7 +110,7 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon String tableMapping = cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null; try (final Connection conn = ConnectionFactory.createConnection(conf); - BackupAdmin client = new HBaseBackupAdmin(conn);) { + BackupAdmin client = new BackupAdminImpl(conn);) { // Check backup set if (cmd.hasOption(OPTION_SET)) { String setName = cmd.getOptionValue(OPTION_SET); http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java new file mode 100644 index 0000000..0e094d5 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -0,0 +1,556 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupAdmin; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.BackupRestoreConstants; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.RestoreRequest; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.BackupSet; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +import com.google.common.collect.Lists; + +/** + * The administrative API implementation for HBase Backup . Create an instance from + * {@link BackupAdminImpl(Connection)} and call {@link #close()} afterwards. + * <p>BackupAdmin can be used to create backups, restore data from backups and for + * other backup-related operations. + * + * @see Admin + * @since 2.0 + */ [email protected] [email protected] + +public class BackupAdminImpl implements BackupAdmin { + private static final Log LOG = LogFactory.getLog(BackupAdminImpl.class); + + private final Connection conn; + + public BackupAdminImpl(Connection conn) { + this.conn = conn; + } + + @Override + public void close() throws IOException { + if (conn != null) { + conn.close(); + } + } + + @Override + public BackupInfo getBackupInfo(String backupId) throws IOException { + BackupInfo backupInfo = null; + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + backupInfo = table.readBackupInfo(backupId); + return backupInfo; + } + } + + @Override + public int getProgress(String backupId) throws IOException { + BackupInfo backupInfo = null; + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + if (backupId == null) { + ArrayList<BackupInfo> recentSessions = table.getBackupContexts(BackupState.RUNNING); + if (recentSessions.isEmpty()) { + LOG.warn("No ongoing sessions found."); + return -1; + } + // else show status for ongoing session + // must be one maximum + return recentSessions.get(0).getProgress(); + } else { + + backupInfo = table.readBackupInfo(backupId); + if (backupInfo != null) { + return backupInfo.getProgress(); + } else { + LOG.warn("No information found for backupID=" + backupId); + return -1; + } + } + } + } + + @Override + public int deleteBackups(String[] backupIds) throws IOException { + // TODO: requires FT, failure will leave system + // in non-consistent state + // see HBASE-15227 + + int totalDeleted = 0; + Map<String, HashSet<TableName>> allTablesMap = new HashMap<String, HashSet<TableName>>(); + + try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { + for (int i = 0; i < backupIds.length; i++) { + BackupInfo info = sysTable.readBackupInfo(backupIds[i]); + if (info != null) { + String rootDir = info.getTargetRootDir(); + HashSet<TableName> allTables = allTablesMap.get(rootDir); + if (allTables == null) { + allTables = new HashSet<TableName>(); + allTablesMap.put(rootDir, allTables); + } + allTables.addAll(info.getTableNames()); + totalDeleted += deleteBackup(backupIds[i], sysTable); + } + } + finalizeDelete(allTablesMap, sysTable); + } + return totalDeleted; + } + + /** + * Updates incremental backup set for every backupRoot + * @param tablesMap - Map [backupRoot: Set<TableName>] + * @param table - backup system table + * @throws IOException + */ + + private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table) + throws IOException { + for (String backupRoot : tablesMap.keySet()) { + Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot); + Map<TableName, ArrayList<BackupInfo>> tableMap = + table.getBackupHistoryForTableSet(incrTableSet, backupRoot); + for(Map.Entry<TableName, ArrayList<BackupInfo>> entry: tableMap.entrySet()) { + if(entry.getValue() == null) { + // No more backups for a table + incrTableSet.remove(entry.getKey()); + } + } + if (!incrTableSet.isEmpty()) { + table.addIncrementalBackupTableSet(incrTableSet, backupRoot); + } else { // empty + table.deleteIncrementalBackupTableSet(backupRoot); + } + } + } + + /** + * Delete single backup and all related backups + * Algorithm: + * + * Backup type: FULL or INCREMENTAL + * Is this last backup session for table T: YES or NO + * For every table T from table list 'tables': + * if(FULL, YES) deletes only physical data (PD) + * if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo, until + * we either reach the most recent backup for T in the system or FULL backup which + * includes T + * if(INCREMENTAL, YES) deletes only physical data (PD) + * if(INCREMENTAL, NO) deletes physical data and for table T scans all backup images + * between last FULL backup, which is older than the backup being deleted and the next + * FULL backup (if exists) or last one for a particular table T and removes T from list + * of backup tables. + * @param backupId - backup id + * @param sysTable - backup system table + * @return total - number of deleted backup images + * @throws IOException + */ + private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException { + + BackupInfo backupInfo = sysTable.readBackupInfo(backupId); + + int totalDeleted = 0; + if (backupInfo != null) { + LOG.info("Deleting backup " + backupInfo.getBackupId() + " ..."); + BackupClientUtil.cleanupBackupData(backupInfo, conn.getConfiguration()); + // List of tables in this backup; + List<TableName> tables = backupInfo.getTableNames(); + long startTime = backupInfo.getStartTs(); + for (TableName tn : tables) { + boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime); + if (isLastBackupSession) { + continue; + } + // else + List<BackupInfo> affectedBackups = getAffectedBackupInfos(backupInfo, tn, sysTable); + for (BackupInfo info : affectedBackups) { + if (info.equals(backupInfo)) { + continue; + } + removeTableFromBackupImage(info, tn, sysTable); + } + } + LOG.debug("Delete backup info "+ backupInfo.getBackupId()); + + sysTable.deleteBackupInfo(backupInfo.getBackupId()); + LOG.info("Delete backup " + backupInfo.getBackupId() + " completed."); + totalDeleted++; + } else { + LOG.warn("Delete backup failed: no information found for backupID=" + backupId); + } + return totalDeleted; + } + + private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable) + throws IOException { + List<TableName> tables = info.getTableNames(); + LOG.debug("Remove "+ tn +" from " + info.getBackupId() + " tables=" + + info.getTableListAsString()); + if (tables.contains(tn)) { + tables.remove(tn); + + if (tables.isEmpty()) { + LOG.debug("Delete backup info "+ info.getBackupId()); + + sysTable.deleteBackupInfo(info.getBackupId()); + BackupClientUtil.cleanupBackupData(info, conn.getConfiguration()); + } else { + info.setTables(tables); + sysTable.updateBackupInfo(info); + // Now, clean up directory for table + cleanupBackupDir(info, tn, conn.getConfiguration()); + } + } + } + + private List<BackupInfo> getAffectedBackupInfos(BackupInfo backupInfo, TableName tn, + BackupSystemTable table) throws IOException { + LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn); + long ts = backupInfo.getStartTs(); + List<BackupInfo> list = new ArrayList<BackupInfo>(); + List<BackupInfo> history = table.getBackupHistory(backupInfo.getTargetRootDir()); + // Scan from most recent to backupInfo + // break when backupInfo reached + for (BackupInfo info : history) { + if (info.getStartTs() == ts) { + break; + } + List<TableName> tables = info.getTableNames(); + if (tables.contains(tn)) { + BackupType bt = info.getType(); + if (bt == BackupType.FULL) { + // Clear list if we encounter FULL backup + list.clear(); + } else { + LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn + + " added " + info.getBackupId() + " tables=" + info.getTableListAsString()); + list.add(info); + } + } + } + return list; + } + + + + /** + * Clean up the data at target directory + * @throws IOException + */ + private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf) + throws IOException { + try { + // clean up the data at target directory + String targetDir = backupInfo.getTargetRootDir(); + if (targetDir == null) { + LOG.warn("No target directory specified for " + backupInfo.getBackupId()); + return; + } + + FileSystem outputFs = FileSystem.get(new Path(backupInfo.getTargetRootDir()).toUri(), conf); + + Path targetDirPath = + new Path(BackupClientUtil.getTableBackupDir(backupInfo.getTargetRootDir(), + backupInfo.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); + } else { + LOG.info("No data has been found in " + targetDirPath.toString() + "."); + } + + } catch (IOException e1) { + LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table + + "at " + backupInfo.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + throw e1; + } + } + + private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime) + throws IOException { + List<BackupInfo> history = table.getBackupHistory(); + for (BackupInfo info : history) { + List<TableName> tables = info.getTableNames(); + if (!tables.contains(tn)) { + continue; + } + if (info.getStartTs() <= startTime) { + return true; + } else { + return false; + } + } + return false; + } + + @Override + public List<BackupInfo> getHistory(int n) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List<BackupInfo> history = table.getBackupHistory(); + if (history.size() <= n) return history; + List<BackupInfo> list = new ArrayList<BackupInfo>(); + for (int i = 0; i < n; i++) { + list.add(history.get(i)); + } + return list; + } + } + + @Override + public List<BackupInfo> getHistory(int n, BackupInfo.Filter ... filters) throws IOException { + if (filters.length == 0) return getHistory(n); + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List<BackupInfo> history = table.getBackupHistory(); + List<BackupInfo> result = new ArrayList<BackupInfo>(); + for(BackupInfo bi: history) { + if(result.size() == n) break; + boolean passed = true; + for(int i=0; i < filters.length; i++) { + if(!filters[i].apply(bi)) { + passed = false; + break; + } + } + if(passed) { + result.add(bi); + } + } + return result; + } + } + + @Override + public List<BackupSet> listBackupSets() throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List<String> list = table.listBackupSets(); + List<BackupSet> bslist = new ArrayList<BackupSet>(); + for (String s : list) { + List<TableName> tables = table.describeBackupSet(s); + if (tables != null) { + bslist.add(new BackupSet(s, tables)); + } + } + return bslist; + } + } + + @Override + public BackupSet getBackupSet(String name) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List<TableName> list = table.describeBackupSet(name); + if (list == null) return null; + return new BackupSet(name, list); + } + } + + @Override + public boolean deleteBackupSet(String name) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + if (table.describeBackupSet(name) == null) { + return false; + } + table.deleteBackupSet(name); + return true; + } + } + + @Override + public void addToBackupSet(String name, TableName[] tables) throws IOException { + String[] tableNames = new String[tables.length]; + try (final BackupSystemTable table = new BackupSystemTable(conn); + final Admin admin = conn.getAdmin();) { + for (int i = 0; i < tables.length; i++) { + tableNames[i] = tables[i].getNameAsString(); + if (!admin.tableExists(TableName.valueOf(tableNames[i]))) { + throw new IOException("Cannot add " + tableNames[i] + " because it doesn't exist"); + } + } + table.addToBackupSet(name, tableNames); + LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + + "' backup set"); + } + } + + @Override + public void removeFromBackupSet(String name, String[] tables) throws IOException { + LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'"); + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + table.removeFromBackupSet(name, tables); + LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + + "' completed."); + } + } + + @Override + public void restore(RestoreRequest request) throws IOException { + if (request.isCheck()) { + HashMap<TableName, BackupManifest> backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + Path rootPath = new Path(request.getBackupRootDir()); + String backupId = request.getBackupId(); + TableName[] sTableArray = request.getFromTables(); + HBackupFileSystem.checkImageManifestExist(backupManifestMap, + sTableArray, conn.getConfiguration(), rootPath, backupId); + + // Check and validate the backup image and its dependencies + + if (RestoreServerUtil.validate(backupManifestMap, conn.getConfiguration())) { + LOG.info("Checking backup images: ok"); + } else { + String errMsg = "Some dependencies are missing for restore"; + LOG.error(errMsg); + throw new IOException(errMsg); + } + + } + // Execute restore request + new RestoreTablesClient(conn, request).execute(); + } + + @Override + public Future<Void> restoreAsync(RestoreRequest request) throws IOException { + throw new UnsupportedOperationException("Asynchronous restore is not supported yet"); + } + + @Override + public String backupTables(final BackupRequest request) throws IOException { + String setName = request.getBackupSetName(); + BackupType type = request.getBackupType(); + String targetRootDir = request.getTargetRootDir(); + List<TableName> tableList = request.getTableList(); + + String backupId = + (setName == null || setName.length() == 0 ? BackupRestoreConstants.BACKUPID_PREFIX + : setName + "_") + EnvironmentEdgeManager.currentTime(); + if (type == BackupType.INCREMENTAL) { + Set<TableName> incrTableSet = null; + try (BackupSystemTable table = new BackupSystemTable(conn)) { + incrTableSet = table.getIncrementalBackupTableSet(targetRootDir); + } + + if (incrTableSet.isEmpty()) { + System.err.println("Incremental backup table set contains no table.\n" + + "Use 'backup create full' or 'backup stop' to \n " + + "change the tables covered by incremental backup."); + throw new IOException("No table covered by incremental backup."); + } + + tableList.removeAll(incrTableSet); + if (!tableList.isEmpty()) { + String extraTables = StringUtils.join(tableList, ","); + System.err.println("Some tables (" + extraTables + ") haven't gone through full backup"); + throw new IOException("Perform full backup on " + extraTables + " first, " + + "then retry the command"); + } + System.out.println("Incremental backup for the following table set: " + incrTableSet); + tableList = Lists.newArrayList(incrTableSet); + } + if (tableList != null && !tableList.isEmpty()) { + for (TableName table : tableList) { + String targetTableBackupDir = + HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); + Path targetTableBackupDirPath = new Path(targetTableBackupDir); + FileSystem outputFs = + FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration()); + if (outputFs.exists(targetTableBackupDirPath)) { + throw new IOException("Target backup directory " + targetTableBackupDir + + " exists already."); + } + } + ArrayList<TableName> nonExistingTableList = null; + try (Admin admin = conn.getAdmin();) { + for (TableName tableName : tableList) { + if (!admin.tableExists(tableName)) { + if (nonExistingTableList == null) { + nonExistingTableList = new ArrayList<>(); + } + nonExistingTableList.add(tableName); + } + } + } + if (nonExistingTableList != null) { + if (type == BackupType.INCREMENTAL) { + System.err.println("Incremental backup table set contains non-exising table: " + + nonExistingTableList); + // Update incremental backup set + tableList = excludeNonExistingTables(tableList, nonExistingTableList); + } else { + // Throw exception only in full mode - we try to backup non-existing table + throw new IOException("Non-existing tables found in the table list: " + + nonExistingTableList); + } + } + } + + // update table list + request.setTableList(tableList); + + if (type == BackupType.FULL) { + new FullTableBackupClient(conn, backupId, request).execute(); + } else { + new IncrementalTableBackupClient(conn, backupId, request).execute(); + } + return backupId; + } + + + private List<TableName> excludeNonExistingTables(List<TableName> tableList, + List<TableName> nonExistingTableList) { + + for (TableName table : nonExistingTableList) { + tableList.remove(table); + } + return tableList; + } + + @Override + public Future<String> backupTablesAsync(final BackupRequest userRequest) throws IOException { + throw new UnsupportedOperationException("Asynchronous backup is not supported yet"); + } + +} http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index 436f419..a1f3e25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -229,7 +229,7 @@ public final class BackupCommands implements BackupRestoreConstants { Integer.parseInt(cmdline.getOptionValue(OPTION_WORKERS)) : -1; try (Connection conn = ConnectionFactory.createConnection(getConf()); - HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) { + BackupAdminImpl admin = new BackupAdminImpl(conn);) { BackupRequest request = new BackupRequest(); request.setBackupType(BackupType.valueOf(args[1].toUpperCase())) .setTableList(tables != null?Lists.newArrayList(BackupClientUtil.parseTableNames(tables)): null) @@ -461,7 +461,7 @@ public final class BackupCommands implements BackupRestoreConstants { System.arraycopy(args, 1, backupIds, 0, backupIds.length); Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); try (final Connection conn = ConnectionFactory.createConnection(conf); - HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) { + BackupAdminImpl admin = new BackupAdminImpl(conn);) { int deleted = admin.deleteBackups(args); System.out.println("Deleted " + deleted + " backups. Total requested: " + args.length); } @@ -491,7 +491,7 @@ public final class BackupCommands implements BackupRestoreConstants { } Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); try (final Connection conn = ConnectionFactory.createConnection(conf); - HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) { + BackupAdminImpl admin = new BackupAdminImpl(conn);) { // TODO cancel backup } } @@ -666,7 +666,7 @@ public final class BackupCommands implements BackupRestoreConstants { // does not expect any args Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); try(final Connection conn = ConnectionFactory.createConnection(conf); - HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + BackupAdminImpl admin = new BackupAdminImpl(conn);){ List<BackupSet> list = admin.listBackupSets(); for(BackupSet bs: list){ System.out.println(bs); @@ -701,7 +701,7 @@ public final class BackupCommands implements BackupRestoreConstants { String setName = args[2]; Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); try(final Connection conn = ConnectionFactory.createConnection(conf); - final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + final BackupAdminImpl admin = new BackupAdminImpl(conn);){ boolean result = admin.deleteBackupSet(setName); if(result){ System.out.println("Delete set "+setName+" OK."); @@ -721,7 +721,7 @@ public final class BackupCommands implements BackupRestoreConstants { String[] tables = args[3].split(","); Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); try(final Connection conn = ConnectionFactory.createConnection(conf); - final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + final BackupAdminImpl admin = new BackupAdminImpl(conn);){ admin.removeFromBackupSet(setName, tables); } } @@ -739,7 +739,7 @@ public final class BackupCommands implements BackupRestoreConstants { } Configuration conf = getConf() != null? getConf():HBaseConfiguration.create(); try(final Connection conn = ConnectionFactory.createConnection(conf); - final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + final BackupAdminImpl admin = new BackupAdminImpl(conn);){ admin.addToBackupSet(setName, tableNames); } http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index af715d4..10afd65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -25,10 +25,6 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -52,8 +48,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; -import com.google.common.util.concurrent.ThreadFactoryBuilder; - /** * Handles backup requests on server-side, creates backup context records in hbase:backup * to keep track backup. The timestamps kept in hbase:backup table will be used for future @@ -63,11 +57,10 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; @InterfaceStability.Evolving public class BackupManager implements Closeable { private static final Log LOG = LogFactory.getLog(BackupManager.class); - private Configuration conf = null; - private BackupInfo backupContext = null; - private ExecutorService pool = null; - private BackupSystemTable systemTable; - private final Connection conn; + protected Configuration conf = null; + protected BackupInfo backupContext = null; + protected BackupSystemTable systemTable; + protected final Connection conn; /** * Backup manager constructor. @@ -173,11 +166,7 @@ public class BackupManager implements Closeable { */ @Override public void close() { - // currently, we shutdown now for all ongoing back handlers, we may need to do something like - // record the failed list somewhere later - if (this.pool != null) { - this.pool.shutdownNow(); - } + if (systemTable != null) { try { systemTable.close(); @@ -185,13 +174,6 @@ public class BackupManager implements Closeable { LOG.error(e); } } - if (conn != null) { - try { - conn.close(); - } catch (IOException e) { - LOG.error(e); - } - } } /** @@ -270,15 +252,6 @@ public class BackupManager implements Closeable { + ". Can not launch new backup until no ongoing backup remains."); throw new BackupException("There is ongoing backup."); } - - // Initialize thread pools - int nrThreads = this.conf.getInt("hbase.backup.threads.max", 1); - ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); - builder.setNameFormat("BackupHandler-%1$d"); - this.pool = - new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue<Runnable>(), builder.build()); - ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); } public void setBackupContext(BackupInfo backupContext) { @@ -309,11 +282,14 @@ public class BackupManager implements Closeable { ArrayList<BackupInfo> allHistoryList = getBackupHistory(true); for (BackupInfo backup : allHistoryList) { - BackupImage image = - new BackupImage(backup.getBackupId(), backup.getType(), - backup.getTargetRootDir(), - backup.getTableNames(), backup.getStartTs(), backup - .getEndTs()); + + BackupImage.Builder builder = BackupImage.newBuilder(); + + BackupImage image = builder.withBackupId(backup.getBackupId()). + withType(backup.getType()).withRootDir(backup.getTargetRootDir()). + withTableList(backup.getTableNames()).withStartTime(backup.getStartTs()). + withCompleteTime(backup.getEndTs()).build(); + // add the full backup image as an ancestor until the last incremental backup if (backup.getType().equals(BackupType.FULL)) { // check the backup image coverage, if previous image could be covered by the newer ones, @@ -328,10 +304,9 @@ public class BackupManager implements Closeable { // Otherwise, this incremental backup ancestor is the dependent ancestor of the ongoing // incremental backup if (BackupManifest.canCoverImage(ancestors, image)) { - LOG.debug("Met the backup boundary of the current table set. " - + "The root full backup images for the current backup scope:"); + LOG.debug("Met the backup boundary of the current table set:"); for (BackupImage image1 : ancestors) { - LOG.debug(" BackupId: " + image1.getBackupId() + ", Backup directory: " + LOG.debug(" BackupID=" + image1.getBackupId() + ", BackupDir=" + image1.getRootDir()); } } else { @@ -345,9 +320,10 @@ public class BackupManager implements Closeable { BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage(); ancestors.add(lastIncrImage); - LOG.debug("Last dependent incremental backup image information:"); - LOG.debug(" Token: " + lastIncrImage.getBackupId()); - LOG.debug(" Backup directory: " + lastIncrImage.getRootDir()); + LOG.debug("Last dependent incremental backup image: " + + "{BackupID=" + lastIncrImage.getBackupId()+"," + + "BackupDir=" + lastIncrImage.getRootDir()+"}" + ); } } } http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java index 9ea9f9d..00f55b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -35,7 +35,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; @@ -66,6 +65,49 @@ public class BackupManifest { public static class BackupImage implements Comparable<BackupImage> { + static class Builder { + BackupImage image; + + Builder() { + image = new BackupImage(); + } + + Builder withBackupId(String backupId) { + image.setBackupId(backupId); + return this; + } + + Builder withType(BackupType type) { + image.setType(type); + return this; + } + + Builder withRootDir(String rootDir) { + image.setRootDir(rootDir); + return this; + } + + Builder withTableList(List<TableName> tableList) { + image.setTableList(tableList); + return this; + } + + Builder withStartTime(long startTime) { + image.setStartTs(startTime); + return this; + } + + Builder withCompleteTime(long completeTime) { + image.setCompleteTs(completeTime); + return this; + } + + BackupImage build() { + return image; + } + + } + private String backupId; private BackupType type; private String rootDir; @@ -74,12 +116,16 @@ public class BackupManifest { private long completeTs; private ArrayList<BackupImage> ancestors; private HashMap<TableName, HashMap<String, Long>> incrTimeRanges; - + + static Builder newBuilder() { + return new Builder(); + } + public BackupImage() { super(); } - public BackupImage(String backupId, BackupType type, String rootDir, + private BackupImage(String backupId, BackupType type, String rootDir, List<TableName> tableList, long startTs, long completeTs) { this.backupId = backupId; this.type = type; @@ -99,9 +145,9 @@ public class BackupManifest { for(HBaseProtos.TableName tn : tableListList) { tableList.add(ProtobufUtil.toTableName(tn)); } - + List<BackupProtos.BackupImage> ancestorList = im.getAncestorsList(); - + BackupType type = im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL: BackupType.INCREMENTAL; @@ -135,17 +181,17 @@ public class BackupManifest { builder.addAncestors(im.toProto()); } } - - setIncrementalTimestampMap(builder); + + setIncrementalTimestampMap(builder); return builder.build(); } - - private static HashMap<TableName, HashMap<String, Long>> + + private static HashMap<TableName, HashMap<String, Long>> loadIncrementalTimestampMap(BackupProtos.BackupImage proto) { List<BackupProtos.TableServerTimestamp> list = proto.getTstMapList(); - - HashMap<TableName, HashMap<String, Long>> incrTimeRanges = + + HashMap<TableName, HashMap<String, Long>> incrTimeRanges = new HashMap<TableName, HashMap<String, Long>>(); if(list == null || list.size() == 0) return incrTimeRanges; for(BackupProtos.TableServerTimestamp tst: list){ @@ -164,7 +210,7 @@ public class BackupManifest { return incrTimeRanges; } - + private void setIncrementalTimestampMap(BackupProtos.BackupImage.Builder builder) { if (this.incrTimeRanges == null) { return; @@ -183,14 +229,14 @@ public class BackupManifest { ServerName sn = ServerName.parseServerName(s); snBuilder.setHostName(sn.getHostname()); snBuilder.setPort(sn.getPort()); - stBuilder.setServer(snBuilder.build()); + stBuilder.setServer(snBuilder.build()); stBuilder.setTimestamp(entry2.getValue()); tstBuilder.addServerTimestamp(stBuilder.build()); } builder.addTstMap(tstBuilder.build()); } - } - + } + public String getBackupId() { return backupId; } @@ -312,86 +358,44 @@ public class BackupManifest { } } - // hadoop hbase configuration - protected Configuration config = null; - - // backup root directory - private String rootDir = null; - // backup image directory private String tableBackupDir = null; - - // backup log directory if this is an incremental backup - private String logBackupDir = null; - - // backup token - private String backupId; - - // backup type, full or incremental - private BackupType type; - - // the table list for the backup - private ArrayList<TableName> tableList; - - // actual start timestamp of the backup process - private long startTs; - - // actual complete timestamp of the backup process - private long completeTs; - - // the region server timestamp for tables: - // <table, <rs, timestamp>> - private Map<TableName, HashMap<String, Long>> incrTimeRanges; - - // dependency of this backup, including all the dependent images to do PIT recovery - //private Map<String, BackupImage> dependency; private BackupImage backupImage; - + /** * Construct manifest for a ongoing backup. - * @param backupCtx The ongoing backup context + * @param backup The ongoing backup info */ - public BackupManifest(BackupInfo backupCtx) { - this.backupId = backupCtx.getBackupId(); - this.type = backupCtx.getType(); - this.rootDir = backupCtx.getTargetRootDir(); - if (this.type == BackupType.INCREMENTAL) { - this.logBackupDir = backupCtx.getHLogTargetDir(); - } - this.startTs = backupCtx.getStartTs(); - this.completeTs = backupCtx.getEndTs(); - this.loadTableList(backupCtx.getTableNames()); - this.backupImage = new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs, - this.completeTs); + public BackupManifest(BackupInfo backup) { + + BackupImage.Builder builder = BackupImage.newBuilder(); + this.backupImage = builder.withBackupId(backup.getBackupId()). + withType(backup.getType()).withRootDir(backup.getTargetRootDir()). + withTableList(backup.getTableNames()).withStartTime(backup.getStartTs()). + withCompleteTime(backup.getEndTs()).build(); } - - + + /** * Construct a table level manifest for a backup of the named table. - * @param backupCtx The ongoing backup context + * @param backup The ongoing backup session info */ - public BackupManifest(BackupInfo backupCtx, TableName table) { - this.backupId = backupCtx.getBackupId(); - this.type = backupCtx.getType(); - this.rootDir = backupCtx.getTargetRootDir(); - this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir(); - if (this.type == BackupType.INCREMENTAL) { - this.logBackupDir = backupCtx.getHLogTargetDir(); - } - this.startTs = backupCtx.getStartTs(); - this.completeTs = backupCtx.getEndTs(); + public BackupManifest(BackupInfo backup, TableName table) { + this.tableBackupDir = backup.getBackupStatus(table).getTargetDir(); List<TableName> tables = new ArrayList<TableName>(); tables.add(table); - this.loadTableList(tables); - this.backupImage = new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs, - this.completeTs); + BackupImage.Builder builder = BackupImage.newBuilder(); + this.backupImage = builder.withBackupId(backup.getBackupId()). + withType(backup.getType()).withRootDir(backup.getTargetRootDir()). + withTableList(tables).withStartTime(backup.getStartTs()). + withCompleteTime(backup.getEndTs()).build(); } /** * Construct manifest from a backup directory. * @param conf configuration * @param backupPath backup path - * @throws IOException + * @throws IOException */ public BackupManifest(Configuration conf, Path backupPath) throws IOException { @@ -413,8 +417,6 @@ public class BackupManifest { // It could be the backup log dir where there is also a manifest file stored. // This variable's purpose is to keep the correct and original location so // that we can store/persist it. - this.tableBackupDir = backupPath.toString(); - this.config = fs.getConf(); try { FileStatus[] subFiles = BackupClientUtil.listStatus(fs, backupPath, null); @@ -438,23 +440,6 @@ public class BackupManifest { throw new BackupException(e); } this.backupImage = BackupImage.fromProto(proto); - // Here the parameter backupDir is where the manifest file is. - // There should always be a manifest file under: - // backupRootDir/namespace/table/backupId/.backup.manifest - this.rootDir = backupPath.getParent().getParent().getParent().toString(); - - Path p = backupPath.getParent(); - if (p.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { - this.rootDir = p.getParent().toString(); - } else { - this.rootDir = p.getParent().getParent().toString(); - } - this.backupId = this.backupImage.getBackupId(); - this.startTs = this.backupImage.getStartTs(); - this.completeTs = this.backupImage.getCompleteTs(); - this.type = this.backupImage.getType(); - this.tableList = (ArrayList<TableName>)this.backupImage.getTableNames(); - this.incrTimeRanges = this.backupImage.getIncrTimeRanges(); LOG.debug("Loaded manifest instance from manifest file: " + BackupClientUtil.getPath(subFile.getPath())); return; @@ -469,39 +454,15 @@ public class BackupManifest { } public BackupType getType() { - return type; - } - - public void setType(BackupType type) { - this.type = type; - } - - /** - * Loads table list. - * @param tableList Table list - */ - private void loadTableList(List<TableName> tableList) { - - this.tableList = this.getTableList(); - if (this.tableList.size() > 0) { - this.tableList.clear(); - } - for (int i = 0; i < tableList.size(); i++) { - this.tableList.add(tableList.get(i)); - } - - LOG.debug(tableList.size() + " tables exist in table set."); + return backupImage.getType(); } /** * Get the table set of this image. * @return The table set list */ - public ArrayList<TableName> getTableList() { - if (this.tableList == null) { - this.tableList = new ArrayList<TableName>(); - } - return this.tableList; + public List<TableName> getTableList() { + return backupImage.getTableNames(); } /** @@ -512,15 +473,16 @@ public class BackupManifest { public void store(Configuration conf) throws BackupException { byte[] data = backupImage.toProto().toByteArray(); // write the file, overwrite if already exist + String logBackupDir = BackupClientUtil.getLogBackupDir(backupImage.getRootDir(), + backupImage.getBackupId()); Path manifestFilePath = - new Path(new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir)) + new Path(new Path((tableBackupDir != null ? tableBackupDir : logBackupDir)) ,MANIFEST_FILE_NAME); - try { - FSDataOutputStream out = - manifestFilePath.getFileSystem(conf).create(manifestFilePath, true); + try ( FSDataOutputStream out = + manifestFilePath.getFileSystem(conf).create(manifestFilePath, true);) + { out.write(data); - out.close(); - } catch (IOException e) { + } catch (IOException e) { throw new BackupException(e.getMessage()); } @@ -548,15 +510,11 @@ public class BackupManifest { * @param incrTimestampMap timestamp map */ public void setIncrTimestampMap(HashMap<TableName, HashMap<String, Long>> incrTimestampMap) { - this.incrTimeRanges = incrTimestampMap; this.backupImage.setIncrTimeRanges(incrTimestampMap); } public Map<TableName, HashMap<String, Long>> getIncrTimestampMap() { - if (this.incrTimeRanges == null) { - this.incrTimeRanges = new HashMap<TableName, HashMap<String, Long>>(); - } - return this.incrTimeRanges; + return backupImage.getIncrTimeRanges(); } /** @@ -692,18 +650,21 @@ public class BackupManifest { LOG.debug("Full image set can cover image " + image.getBackupId()); return true; } - + public BackupInfo toBackupInfo() { BackupInfo info = new BackupInfo(); - info.setType(type); - TableName[] tables = new TableName[tableList.size()]; - info.addTables(getTableList().toArray(tables)); - info.setBackupId(backupId); - info.setStartTs(startTs); - info.setTargetRootDir(rootDir); - if(type == BackupType.INCREMENTAL) { - info.setHlogTargetDir(logBackupDir); + info.setType(backupImage.getType()); + List<TableName> list = backupImage.getTableNames(); + TableName[] tables = new TableName[list.size()]; + info.addTables(list.toArray(tables)); + info.setBackupId(backupImage.getBackupId()); + info.setStartTs(backupImage.getStartTs()); + info.setTargetRootDir(backupImage.getRootDir()); + if(backupImage.getType() == BackupType.INCREMENTAL) { + + info.setHLogTargetDir(BackupClientUtil.getLogBackupDir(backupImage.getRootDir(), + backupImage.getBackupId())); } return info; } http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index e04c36e..b16f45f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -32,6 +32,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -90,7 +91,8 @@ public final class BackupSystemTable implements Closeable { @Override public String toString() { - return "/" + backupRoot + "/" + backupId + "/" + walFile; + return Path.SEPARATOR + backupRoot + + Path.SEPARATOR + backupId + Path.SEPARATOR + walFile; } } http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java index f5911b4..557ee08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java @@ -45,7 +45,8 @@ import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Private @InterfaceStability.Evolving -public final class BackupSystemTableHelper { + +final class BackupSystemTableHelper { /** * hbase:backup schema: http://git-wip-us.apache.org/repos/asf/hbase/blob/ff68ba06/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java index f1f09cc..e068ede 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java @@ -19,239 +19,40 @@ package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupCopyTask; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.BackupRequest; -import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory; import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.backup.impl.BackupException; -import org.apache.hadoop.hbase.backup.impl.BackupManifest; -import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.util.BackupClientUtil; import org.apache.hadoop.hbase.backup.util.BackupServerUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSUtils; @InterfaceAudience.Private -public class FullTableBackupClient { +public class FullTableBackupClient extends TableBackupClient{ private static final Log LOG = LogFactory.getLog(FullTableBackupClient.class); - private Configuration conf; - private Connection conn; - private String backupId; - private List<TableName> tableList; - HashMap<String, Long> newTimestamps = null; - - private BackupManager backupManager; - private BackupInfo backupContext; - - public FullTableBackupClient() { - // Required by the Procedure framework to create the procedure on replay - } public FullTableBackupClient(final Connection conn, final String backupId, BackupRequest request) throws IOException { - backupManager = new BackupManager(conn, conn.getConfiguration()); - this.backupId = backupId; - this.tableList = request.getTableList(); - this.conn = conn; - this.conf = conn.getConfiguration(); - backupContext = - backupManager.createBackupContext(backupId, BackupType.FULL, tableList, - request.getTargetRootDir(), - request.getWorkers(), request.getBandwidth()); - if (tableList == null || tableList.isEmpty()) { - this.tableList = new ArrayList<>(backupContext.getTables()); - } - } - - /** - * Begin the overall backup. - * @param backupContext backup context - * @throws IOException exception - */ - static void beginBackup(BackupManager backupManager, BackupInfo backupContext) throws IOException { - backupManager.setBackupContext(backupContext); - // set the start timestamp of the overall backup - long startTs = EnvironmentEdgeManager.currentTime(); - backupContext.setStartTs(startTs); - // set overall backup status: ongoing - backupContext.setState(BackupState.RUNNING); - LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + "."); - - backupManager.updateBackupInfo(backupContext); - if (LOG.isDebugEnabled()) { - LOG.debug("Backup session " + backupContext.getBackupId() + " has been started."); - } - } - - private static String getMessage(Exception e) { - String msg = e.getMessage(); - if (msg == null || msg.equals("")) { - msg = e.getClass().getName(); - } - return msg; - } - - /** - * Delete HBase snapshot for backup. - * @param backupCtx backup context - * @throws Exception exception - */ - private static void - deleteSnapshot(final Connection conn, BackupInfo backupCtx, Configuration conf) - throws IOException { - LOG.debug("Trying to delete snapshot for full backup."); - for (String snapshotName : backupCtx.getSnapshotNames()) { - if (snapshotName == null) { - continue; - } - LOG.debug("Trying to delete snapshot: " + snapshotName); - - try (Admin admin = conn.getAdmin();) { - admin.deleteSnapshot(snapshotName); - } catch (IOException ioe) { - LOG.debug("when deleting snapshot " + snapshotName, ioe); - } - LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + backupCtx.getBackupId() - + " succeeded."); - } + super(conn, backupId, request); } - /** - * Clean up directories with prefix "exportSnapshot-", which are generated when exporting - * snapshots. - * @throws IOException exception - */ - private static void cleanupExportSnapshotLog(Configuration conf) throws IOException { - FileSystem fs = FSUtils.getCurrentFileSystem(conf); - Path stagingDir = - new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() - .toString())); - FileStatus[] files = FSUtils.listStatus(fs, stagingDir); - if (files == null) { - return; - } - for (FileStatus file : files) { - if (file.getPath().getName().startsWith("exportSnapshot-")) { - LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); - if (FSUtils.delete(fs, file.getPath(), true) == false) { - LOG.warn("Can not delete " + file.getPath()); - } - } - } - } - - /** - * Clean up the uncompleted data at target directory if the ongoing backup has already entered the - * copy phase. - */ - static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) { - try { - // clean up the uncompleted data at target directory if the ongoing backup has already entered - // the copy phase - LOG.debug("Trying to cleanup up target dir. Current backup phase: " - + backupContext.getPhase()); - if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY) - || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY) - || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) { - FileSystem outputFs = - FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); - - // now treat one backup as a transaction, clean up data that has been partially copied at - // table level - for (TableName table : backupContext.getTables()) { - Path targetDirPath = - new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(), - backupContext.getBackupId(), table)); - if (outputFs.delete(targetDirPath, true)) { - LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString() - + " done."); - } else { - LOG.info("No data has been copied to " + targetDirPath.toString() + "."); - } - - Path tableDir = targetDirPath.getParent(); - FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir); - if (backups == null || backups.length == 0) { - outputFs.delete(tableDir, true); - LOG.debug(tableDir.toString() + " is empty, remove it."); - } - } - } - - } catch (IOException e1) { - LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at " - + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); - } - } - /** - * Fail the overall backup. - * @param backupContext backup context - * @param e exception - * @throws Exception exception - */ - static void failBackup(Connection conn, BackupInfo backupContext, BackupManager backupManager, - Exception e, String msg, BackupType type, Configuration conf) throws IOException { - LOG.error(msg + getMessage(e), e); - // If this is a cancel exception, then we've already cleaned. - - // set the failure timestamp of the overall backup - backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); - - // set failure message - backupContext.setFailedMsg(e.getMessage()); - - // set overall backup status: failed - backupContext.setState(BackupState.FAILED); - - // compose the backup failed data - String backupFailedData = - "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs() - + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase() - + ",failedmessage=" + backupContext.getFailedMsg(); - LOG.error(backupFailedData); - - backupManager.updateBackupInfo(backupContext); - - // if full backup, then delete HBase snapshots if there already are snapshots taken - // and also clean up export snapshot log files if exist - if (type == BackupType.FULL) { - deleteSnapshot(conn, backupContext, conf); - cleanupExportSnapshotLog(conf); - } - - // clean up the uncompleted data at target directory if the ongoing backup has already entered - // the copy phase - // For incremental backup, DistCp logs will be cleaned with the targetDir. - cleanupTargetDir(backupContext, conf); - - LOG.info("Backup " + backupContext.getBackupId() + " failed."); - } /** * Do snapshot copy. @@ -298,162 +99,10 @@ public class FullTableBackupClient { } /** - * Add manifest for the current backup. The manifest is stored within the table backup directory. - * @param backupContext The current backup context - * @throws IOException exception - * @throws BackupException exception - */ - private static void addManifest(BackupInfo backupContext, BackupManager backupManager, - BackupType type, Configuration conf) throws IOException, BackupException { - // set the overall backup phase : store manifest - backupContext.setPhase(BackupPhase.STORE_MANIFEST); - - BackupManifest manifest; - - // Since we have each table's backup in its own directory structure, - // we'll store its manifest with the table directory. - for (TableName table : backupContext.getTables()) { - manifest = new BackupManifest(backupContext, table); - ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupContext, table); - for (BackupImage image : ancestors) { - manifest.addDependentImage(image); - } - - if (type == BackupType.INCREMENTAL) { - // We'll store the log timestamps for this table only in its manifest. - HashMap<TableName, HashMap<String, Long>> tableTimestampMap = - new HashMap<TableName, HashMap<String, Long>>(); - tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table)); - manifest.setIncrTimestampMap(tableTimestampMap); - ArrayList<BackupImage> ancestorss = backupManager.getAncestors(backupContext); - for (BackupImage image : ancestorss) { - manifest.addDependentImage(image); - } - } - manifest.store(conf); - } - - // For incremental backup, we store a overall manifest in - // <backup-root-dir>/WALs/<backup-id> - // This is used when created the next incremental backup - if (type == BackupType.INCREMENTAL) { - manifest = new BackupManifest(backupContext); - // set the table region server start and end timestamps for incremental backup - manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap()); - ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupContext); - for (BackupImage image : ancestors) { - manifest.addDependentImage(image); - } - manifest.store(conf); - } - } - - /** - * Get backup request meta data dir as string. - * @param backupContext backup context - * @return meta data dir - */ - private static String obtainBackupMetaDataStr(BackupInfo backupContext) { - StringBuffer sb = new StringBuffer(); - sb.append("type=" + backupContext.getType() + ",tablelist="); - for (TableName table : backupContext.getTables()) { - sb.append(table + ";"); - } - if (sb.lastIndexOf(";") > 0) { - sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1); - } - sb.append(",targetRootDir=" + backupContext.getTargetRootDir()); - - return sb.toString(); - } - - /** - * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying - * hlogs. - * @throws IOException exception - */ - private static void cleanupDistCpLog(BackupInfo backupContext, Configuration conf) - throws IOException { - Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent(); - FileSystem fs = FileSystem.get(rootPath.toUri(), conf); - FileStatus[] files = FSUtils.listStatus(fs, rootPath); - if (files == null) { - return; - } - for (FileStatus file : files) { - if (file.getPath().getName().startsWith("_distcp_logs")) { - LOG.debug("Delete log files of DistCp: " + file.getPath().getName()); - FSUtils.delete(fs, file.getPath(), true); - } - } - } - - /** - * Complete the overall backup. - * @param backupContext backup context - * @throws Exception exception - */ - static void completeBackup(final Connection conn, BackupInfo backupContext, - BackupManager backupManager, BackupType type, Configuration conf) throws IOException { - // set the complete timestamp of the overall backup - backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); - // set overall backup status: complete - backupContext.setState(BackupState.COMPLETE); - backupContext.setProgress(100); - // add and store the manifest for the backup - addManifest(backupContext, backupManager, type, conf); - - // after major steps done and manifest persisted, do convert if needed for incremental backup - /* in-fly convert code here, provided by future jira */ - LOG.debug("in-fly convert code here, provided by future jira"); - - // compose the backup complete data - String backupCompleteData = - obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs() - + ",completets=" + backupContext.getEndTs() + ",bytescopied=" - + backupContext.getTotalBytesCopied(); - if (LOG.isDebugEnabled()) { - LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData); - } - backupManager.updateBackupInfo(backupContext); - - // when full backup is done: - // - delete HBase snapshot - // - clean up directories with prefix "exportSnapshot-", which are generated when exporting - // snapshots - if (type == BackupType.FULL) { - deleteSnapshot(conn, backupContext, conf); - cleanupExportSnapshotLog(conf); - } else if (type == BackupType.INCREMENTAL) { - cleanupDistCpLog(backupContext, conf); - } - - LOG.info("Backup " + backupContext.getBackupId() + " completed."); - } - - /** - * Wrap a SnapshotDescription for a target table. - * @param table table - * @return a SnapshotDescription especially for backup. - */ - static SnapshotDescription wrapSnapshotDescription(TableName tableName, String snapshotName) { - // Mock a SnapshotDescription from backupContext to call SnapshotManager function, - // Name it in the format "snapshot_<timestamp>_<table>" - HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); - builder.setTable(tableName.getNameAsString()); - builder.setName(snapshotName); - HBaseProtos.SnapshotDescription backupSnapshot = builder.build(); - - LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName() - + " from backupContext to request snapshot for backup."); - - return backupSnapshot; - } - - /** * Backup request execution * @throws IOException */ + @Override public void execute() throws IOException { try (Admin admin = conn.getAdmin();) {
