HBASE-14123 patch v40 (Vladimir)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c1eb653 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c1eb653 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c1eb653 Branch: refs/heads/14123 Commit: 7c1eb65365ac870746a3d422ab4b9491a2fdc8d7 Parents: f976dd1 Author: tedyu <yuzhih...@gmail.com> Authored: Wed Dec 7 13:08:04 2016 -0800 Committer: tedyu <yuzhih...@gmail.com> Committed: Wed Dec 7 13:08:04 2016 -0800 ---------------------------------------------------------------------- .../hadoop/hbase/protobuf/ProtobufUtil.java | 36 -- .../ClientSnapshotDescriptionUtils.java | 2 +- .../hbase/IntegrationTestBackupRestore.java | 4 +- .../src/main/protobuf/Backup.proto | 16 +- hbase-server/pom.xml | 5 - .../apache/hadoop/hbase/backup/BackupAdmin.java | 28 +- .../hadoop/hbase/backup/BackupCopyTask.java | 2 +- .../hadoop/hbase/backup/BackupDriver.java | 2 +- .../apache/hadoop/hbase/backup/BackupInfo.java | 18 +- .../hadoop/hbase/backup/BackupStatus.java | 2 +- .../hadoop/hbase/backup/HBackupFileSystem.java | 5 +- .../hadoop/hbase/backup/RestoreDriver.java | 10 +- .../hbase/backup/impl/BackupAdminImpl.java | 556 +++++++++++++++++++ .../hbase/backup/impl/BackupCommands.java | 65 ++- .../hadoop/hbase/backup/impl/BackupManager.java | 62 +-- .../hbase/backup/impl/BackupManifest.java | 218 +++----- .../hbase/backup/impl/BackupSystemTable.java | 103 ++-- .../backup/impl/BackupSystemTableHelper.java | 3 +- .../backup/impl/FullTableBackupClient.java | 357 +----------- .../hbase/backup/impl/HBaseBackupAdmin.java | 555 ------------------ .../backup/impl/IncrementalBackupManager.java | 30 +- .../impl/IncrementalTableBackupClient.java | 43 +- .../hbase/backup/impl/RestoreTablesClient.java | 4 - .../hbase/backup/impl/TableBackupClient.java | 386 +++++++++++++ .../mapreduce/MapReduceBackupCopyTask.java | 10 +- .../hbase/backup/master/BackupLogCleaner.java | 2 +- .../master/LogRollMasterProcedureManager.java | 7 + .../regionserver/LogRollBackupSubprocedure.java | 4 +- .../LogRollRegionServerProcedureManager.java | 3 +- .../hbase/backup/util/RestoreServerUtil.java | 22 +- .../hbase/mapreduce/HFileInputFormat2.java | 3 +- .../hbase/mapreduce/LoadIncrementalHFiles.java | 19 +- .../procedure/ZKProcedureCoordinatorRpcs.java | 3 +- .../hbase/regionserver/HRegionServer.java | 7 +- .../hadoop/hbase/backup/TestBackupBase.java | 6 +- .../hbase/backup/TestBackupCommandLineTool.java | 79 +-- .../hadoop/hbase/backup/TestBackupDescribe.java | 4 +- .../hbase/backup/TestBackupMultipleDeletes.java | 4 +- .../hbase/backup/TestIncrementalBackup.java | 4 +- .../TestIncrementalBackupDeleteTable.java | 4 +- .../hbase/master/MockNoopMasterServices.java | 11 - .../master/TestDistributedLogSplitting.java | 1 + 42 files changed, 1319 insertions(+), 1386 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 2cc8fa7..bdc3e54 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagUtil; -import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Consistency; @@ -79,7 +78,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; @@ -318,28 +316,6 @@ public final class ProtobufUtil { return ServerName.valueOf(hostName, port, startCode); } - - /** - * Convert a protocol buffer ServerName to a ServerName - * - * @param proto the protocol buffer ServerName to convert - * @return the converted ServerName - */ - public static ServerName toServerNameShaded( - final org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName proto) { - if (proto == null) return null; - String hostName = proto.getHostName(); - long startCode = -1; - int port = -1; - if (proto.hasPort()) { - port = proto.getPort(); - } - if (proto.hasStartCode()) { - startCode = proto.getStartCode(); - } - return ServerName.valueOf(hostName, port, startCode); - } - /** * Convert a protobuf Durability into a client Durability */ @@ -1694,14 +1670,6 @@ public final class ProtobufUtil { tableNamePB.getQualifier().asReadOnlyByteBuffer()); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName - toProtoTableNameShaded(TableName tableName) { - return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.newBuilder() - .setNamespace(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFrom(tableName.getNamespace())) - .setQualifier(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFrom(tableName.getQualifier())).build(); - } - - /** * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding * buffers when working with byte arrays @@ -1779,10 +1747,6 @@ public final class ProtobufUtil { return regionBuilder.build(); } - public static BackupProtos.BackupType toProtoBackupType(BackupType type) { - return BackupProtos.BackupType.valueOf(type.name()); - } - /** * Get a ServerName from the passed in data bytes. * @param data Data with a serialize server name in it; can handle the old style http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java index 7f19cbd..2a58b5a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java @@ -44,7 +44,7 @@ public class ClientSnapshotDescriptionUtils { // make sure the table name is valid, this will implicitly check validity TableName tableName = TableName.valueOf(snapshot.getTable()); - if (tableName.isSystemTable() && !tableName.toString().equals("hbase:backup")) { + if (tableName.isSystemTable() && !TableName.BACKUP_TABLE_NAME.equals(tableName)) { // allow hbase:backup table snapshot, but disallow other system tables throw new IllegalArgumentException("System table snapshots are not allowed"); } http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java ---------------------------------------------------------------------- diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java index 416ac13..35a09d6 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.backup.BackupRequest; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -162,7 +162,7 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase { List<TableName> tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2); HBaseAdmin admin = null; admin = (HBaseAdmin) conn.getAdmin(); - BackupAdmin client = new HBaseBackupAdmin(util.getConnection()); + BackupAdmin client = new BackupAdminImpl(util.getConnection()); BackupRequest request = new BackupRequest(); request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-protocol-shaded/src/main/protobuf/Backup.proto ---------------------------------------------------------------------- diff --git a/hbase-protocol-shaded/src/main/protobuf/Backup.proto b/hbase-protocol-shaded/src/main/protobuf/Backup.proto index b7196ca..7a535d9 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Backup.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Backup.proto @@ -49,16 +49,15 @@ message BackupImage { repeated TableName table_list = 4; optional uint64 start_ts = 5; optional uint64 complete_ts = 6; - repeated BackupImage ancestors = 7; + repeated BackupImage ancestors = 7; repeated TableServerTimestamp tst_map = 8; - -} +} message TableBackupStatus { optional TableName table = 1; optional string target_dir = 2; - optional string snapshot = 3; + optional string snapshot = 3; } message BackupInfo { @@ -71,11 +70,11 @@ message BackupInfo { repeated TableBackupStatus table_backup_status = 7; optional uint64 start_ts = 8; optional uint64 end_ts = 9; - optional uint32 progress = 10; + optional uint32 progress = 10; optional string job_id = 11; optional uint32 workers_number = 12; optional uint64 bandwidth = 13; - + enum BackupState { WAITING = 0; RUNNING = 1; @@ -91,6 +90,5 @@ message BackupInfo { SNAPSHOTCOPY = 3; INCREMENTAL_COPY = 4; STORE_MANIFEST = 5; - } -} - + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/pom.xml ---------------------------------------------------------------------- diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index e6aed8e..57910e7 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -408,11 +408,6 @@ <version>${hadoop-two.version}</version> </dependency> <dependency> - <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-distcp</artifactId> - <version>${hadoop-two.version}</version> - </dependency> - <dependency> <groupId>org.apache.hbase</groupId> <artifactId>hbase-hadoop-compat</artifactId> </dependency> http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java index 0b8de28..f024406 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; public interface BackupAdmin extends Closeable{ /** - * Backs up given list of tables fully. Synchronous operation. + * Backup given list of tables fully. Synchronous operation. * * @param userRequest BackupRequest instance * @return the backup Id @@ -59,14 +59,14 @@ public interface BackupAdmin extends Closeable{ /** * Restore backup - * @param request - restore request + * @param request restore request * @throws IOException exception */ public void restore(RestoreRequest request) throws IOException; /** * Restore backup - * @param request - restore request + * @param request restore request * @return Future which client can wait on * @throws IOException exception */ @@ -74,7 +74,7 @@ public interface BackupAdmin extends Closeable{ /** * Describe backup image command - * @param backupId - backup id + * @param backupId backup id * @return backup info * @throws IOException exception */ @@ -82,7 +82,7 @@ public interface BackupAdmin extends Closeable{ /** * Show backup progress command - * @param backupId - backup id (may be null) + * @param backupId backup id (may be null) * @return backup progress (0-100%), -1 if no active sessions * or session not found * @throws IOException exception @@ -91,7 +91,7 @@ public interface BackupAdmin extends Closeable{ /** * Delete backup image command - * @param backupIds - backup id + * @param backupIds backup id list * @return total number of deleted sessions * @throws IOException exception */ @@ -99,7 +99,7 @@ public interface BackupAdmin extends Closeable{ /** * Show backup history command - * @param n - last n backup sessions + * @param n last n backup sessions * @return list of backup infos * @throws IOException exception */ @@ -108,8 +108,8 @@ public interface BackupAdmin extends Closeable{ /** * Show backup history command with filters - * @param n - last n backup sessions - * @param f - list of filters + * @param n last n backup sessions + * @param f list of filters * @return list of backup infos * @throws IOException exception */ @@ -135,7 +135,7 @@ public interface BackupAdmin extends Closeable{ /** * Delete backup set command - * @param name - backup set name + * @param name backup set name * @return true, if success, false - otherwise * @throws IOException exception */ @@ -143,16 +143,16 @@ public interface BackupAdmin extends Closeable{ /** * Add tables to backup set command - * @param name - name of backup set. - * @param tables - list of tables to be added to this set. + * @param name name of backup set. + * @param tables list of tables to be added to this set. * @throws IOException exception */ public void addToBackupSet(String name, TableName[] tables) throws IOException; /** * Remove tables from backup set - * @param name - name of backup set. - * @param tables - list of tables to be removed from this set. + * @param name name of backup set. + * @param tables list of tables to be removed from this set. * @throws IOException exception */ public void removeFromBackupSet(String name, String[] tables) throws IOException; http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java index 26a7e44..c543062 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java @@ -41,7 +41,7 @@ public interface BackupCopyTask extends Configurable { * @throws IOException exception */ int copy(BackupInfo backupContext, BackupManager backupManager, Configuration conf, - BackupType copyType, String[] options) throws IOException; + BackupType backupType, String[] options) throws IOException; /** http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java index 099e418..fcfd5b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java @@ -58,7 +58,7 @@ public class BackupDriver extends AbstractHBaseTool implements BackupRestoreCons // Check if backup is enabled if (!BackupManager.isBackupEnabled(getConf())) { System.err.println("Backup is not enabled. To enable backup, "+ - "set \'hbase.backup.enabled'=true and restart "+ + "set " +BackupRestoreConstants.BACKUP_ENABLE_KEY+"=true and restart "+ "the cluster"); return -1; } http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java index 4ea0299..0f861a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -176,7 +176,7 @@ public class BackupInfo implements Comparable<BackupInfo> { this.addTables(tables); if (type == BackupType.INCREMENTAL) { - setHlogTargetDir(BackupClientUtil.getLogBackupDir(targetRootDir, backupId)); + setHLogTargetDir(BackupClientUtil.getLogBackupDir(targetRootDir, backupId)); } this.startTs = 0; @@ -220,10 +220,6 @@ public class BackupInfo implements Comparable<BackupInfo> { this.tableSetTimestampMap = tableSetTimestampMap; } - public String getHlogTargetDir() { - return hlogTargetDir; - } - public void setType(BackupType type) { this.type = type; } @@ -355,7 +351,7 @@ public class BackupInfo implements Comparable<BackupInfo> { return targetRootDir; } - public void setHlogTargetDir(String hlogTagetDir) { + public void setHLogTargetDir(String hlogTagetDir) { this.hlogTargetDir = hlogTagetDir; } @@ -488,7 +484,7 @@ public class BackupInfo implements Comparable<BackupInfo> { context.setState(BackupInfo.BackupState.valueOf(proto.getState().name())); } - context.setHlogTargetDir(BackupClientUtil.getLogBackupDir(proto.getTargetRootDir(), + context.setHLogTargetDir(BackupClientUtil.getLogBackupDir(proto.getTargetRootDir(), proto.getBackupId())); if (proto.hasPhase()) { @@ -537,7 +533,7 @@ public class BackupInfo implements Comparable<BackupInfo> { date = cal.getTime(); sb.append("End time : " + date).append("\n"); } - sb.append("Progress : " + getProgress()).append("\n"); + sb.append("Progress : " + getProgress()+"%").append("\n"); return sb.toString(); } @@ -549,7 +545,11 @@ public class BackupInfo implements Comparable<BackupInfo> { } public String getTableListAsString() { - return StringUtils.join(backupStatusMap.keySet(), ","); + StringBuffer sb = new StringBuffer(); + sb.append("{"); + sb.append(StringUtils.join(backupStatusMap.keySet(), ",")); + sb.append("}"); + return sb.toString(); } @Override http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java index fd856ec..0275140 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java @@ -97,7 +97,7 @@ public class BackupStatus implements Serializable { if(snapshotName != null) { builder.setSnapshot(snapshotName); } - builder.setTable(ProtobufUtil.toProtoTableNameShaded(table)); + builder.setTable(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table)); builder.setTargetDir(targetDir); return builder.build(); } http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java index 9deb15b..49586bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java @@ -125,6 +125,7 @@ public class HBackupFileSystem { Path backupRootPath, String backupId) throws IOException { Path manifestPath = new Path(getTableBackupPath(tableName, backupRootPath, backupId), BackupManifest.MANIFEST_FILE_NAME); + FileSystem fs = backupRootPath.getFileSystem(conf); if (!fs.exists(manifestPath)) { // check log dir for incremental backup case @@ -134,8 +135,8 @@ public class HBackupFileSystem { if (!fs.exists(manifestPath)) { String errorMsg = "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for " + - backupId + " in " + backupRootPath.toString() + - ". Did " + backupId + " correspond to previously taken backup ?"; + backupId + ". File " + manifestPath + + " does not exists. Did " + backupId + " correspond to previously taken backup ?"; throw new IOException(errorMsg); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java index 336060f..1ca512e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.util.BackupServerUtil; import org.apache.hadoop.hbase.backup.util.LogUtils; import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; @@ -52,8 +52,8 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon private static final String USAGE_STRING = "Usage: bin/hbase restore <backup_path> <backup_id> <table(s)> [options]\n" + " backup_path Path to a backup destination root\n" - + " backup_id Backup image ID to restore" - + " table(s) Comma-separated list of tables to restore"; + + " backup_id Backup image ID to restore\n" + + " table(s) Comma-separated list of tables to restore\n"; private static final String USAGE_FOOTER = ""; @@ -70,7 +70,7 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon // Check if backup is enabled if (!BackupManager.isBackupEnabled(getConf())) { System.err.println("Backup is not enabled. To enable backup, "+ - "set \'hbase.backup.enabled'=true and restart "+ + "set "+ BackupRestoreConstants.BACKUP_ENABLE_KEY+"=true and restart "+ "the cluster"); return -1; } @@ -110,7 +110,7 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon String tableMapping = cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null; try (final Connection conn = ConnectionFactory.createConnection(conf); - BackupAdmin client = new HBaseBackupAdmin(conn);) { + BackupAdmin client = new BackupAdminImpl(conn);) { // Check backup set if (cmd.hasOption(OPTION_SET)) { String setName = cmd.getOptionValue(OPTION_SET); http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java new file mode 100644 index 0000000..b73e576 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -0,0 +1,556 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupAdmin; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.BackupRestoreConstants; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.RestoreRequest; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.BackupSet; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +import com.google.common.collect.Lists; + +/** + * The administrative API implementation for HBase Backup . Create an instance from + * {@link #BackupAdminImpl(Connection)} and call {@link #close()} afterwards. + * <p>BackupAdmin can be used to create backups, restore data from backups and for + * other backup-related operations. + * + * @see Admin + * @since 2.0 + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving + +public class BackupAdminImpl implements BackupAdmin { + private static final Log LOG = LogFactory.getLog(BackupAdminImpl.class); + + private final Connection conn; + + public BackupAdminImpl(Connection conn) { + this.conn = conn; + } + + @Override + public void close() throws IOException { + if (conn != null) { + conn.close(); + } + } + + @Override + public BackupInfo getBackupInfo(String backupId) throws IOException { + BackupInfo backupInfo = null; + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + backupInfo = table.readBackupInfo(backupId); + return backupInfo; + } + } + + @Override + public int getProgress(String backupId) throws IOException { + BackupInfo backupInfo = null; + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + if (backupId == null) { + ArrayList<BackupInfo> recentSessions = table.getBackupContexts(BackupState.RUNNING); + if (recentSessions.isEmpty()) { + LOG.warn("No ongoing sessions found."); + return -1; + } + // else show status for ongoing session + // must be one maximum + return recentSessions.get(0).getProgress(); + } else { + + backupInfo = table.readBackupInfo(backupId); + if (backupInfo != null) { + return backupInfo.getProgress(); + } else { + LOG.warn("No information found for backupID=" + backupId); + return -1; + } + } + } + } + + @Override + public int deleteBackups(String[] backupIds) throws IOException { + // TODO: requires FT, failure will leave system + // in non-consistent state + // see HBASE-15227 + + int totalDeleted = 0; + Map<String, HashSet<TableName>> allTablesMap = new HashMap<String, HashSet<TableName>>(); + + try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { + for (int i = 0; i < backupIds.length; i++) { + BackupInfo info = sysTable.readBackupInfo(backupIds[i]); + if (info != null) { + String rootDir = info.getTargetRootDir(); + HashSet<TableName> allTables = allTablesMap.get(rootDir); + if (allTables == null) { + allTables = new HashSet<TableName>(); + allTablesMap.put(rootDir, allTables); + } + allTables.addAll(info.getTableNames()); + totalDeleted += deleteBackup(backupIds[i], sysTable); + } + } + finalizeDelete(allTablesMap, sysTable); + } + return totalDeleted; + } + + /** + * Updates incremental backup set for every backupRoot + * @param tablesMap - Map [backupRoot: Set<TableName>] + * @param table - backup system table + * @throws IOException + */ + + private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table) + throws IOException { + for (String backupRoot : tablesMap.keySet()) { + Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot); + Map<TableName, ArrayList<BackupInfo>> tableMap = + table.getBackupHistoryForTableSet(incrTableSet, backupRoot); + for(Map.Entry<TableName, ArrayList<BackupInfo>> entry: tableMap.entrySet()) { + if(entry.getValue() == null) { + // No more backups for a table + incrTableSet.remove(entry.getKey()); + } + } + if (!incrTableSet.isEmpty()) { + table.addIncrementalBackupTableSet(incrTableSet, backupRoot); + } else { // empty + table.deleteIncrementalBackupTableSet(backupRoot); + } + } + } + + /** + * Delete single backup and all related backups + * Algorithm: + * + * Backup type: FULL or INCREMENTAL + * Is this last backup session for table T: YES or NO + * For every table T from table list 'tables': + * if(FULL, YES) deletes only physical data (PD) + * if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo, until + * we either reach the most recent backup for T in the system or FULL backup which + * includes T + * if(INCREMENTAL, YES) deletes only physical data (PD) + * if(INCREMENTAL, NO) deletes physical data and for table T scans all backup images + * between last FULL backup, which is older than the backup being deleted and the next + * FULL backup (if exists) or last one for a particular table T and removes T from list + * of backup tables. + * @param backupId - backup id + * @param sysTable - backup system table + * @return total - number of deleted backup images + * @throws IOException + */ + private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException { + + BackupInfo backupInfo = sysTable.readBackupInfo(backupId); + + int totalDeleted = 0; + if (backupInfo != null) { + LOG.info("Deleting backup " + backupInfo.getBackupId() + " ..."); + BackupClientUtil.cleanupBackupData(backupInfo, conn.getConfiguration()); + // List of tables in this backup; + List<TableName> tables = backupInfo.getTableNames(); + long startTime = backupInfo.getStartTs(); + for (TableName tn : tables) { + boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime); + if (isLastBackupSession) { + continue; + } + // else + List<BackupInfo> affectedBackups = getAffectedBackupInfos(backupInfo, tn, sysTable); + for (BackupInfo info : affectedBackups) { + if (info.equals(backupInfo)) { + continue; + } + removeTableFromBackupImage(info, tn, sysTable); + } + } + LOG.debug("Delete backup info "+ backupInfo.getBackupId()); + + sysTable.deleteBackupInfo(backupInfo.getBackupId()); + LOG.info("Delete backup " + backupInfo.getBackupId() + " completed."); + totalDeleted++; + } else { + LOG.warn("Delete backup failed: no information found for backupID=" + backupId); + } + return totalDeleted; + } + + private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable) + throws IOException { + List<TableName> tables = info.getTableNames(); + LOG.debug("Remove "+ tn +" from " + info.getBackupId() + " tables=" + + info.getTableListAsString()); + if (tables.contains(tn)) { + tables.remove(tn); + + if (tables.isEmpty()) { + LOG.debug("Delete backup info "+ info.getBackupId()); + + sysTable.deleteBackupInfo(info.getBackupId()); + BackupClientUtil.cleanupBackupData(info, conn.getConfiguration()); + } else { + info.setTables(tables); + sysTable.updateBackupInfo(info); + // Now, clean up directory for table + cleanupBackupDir(info, tn, conn.getConfiguration()); + } + } + } + + private List<BackupInfo> getAffectedBackupInfos(BackupInfo backupInfo, TableName tn, + BackupSystemTable table) throws IOException { + LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn); + long ts = backupInfo.getStartTs(); + List<BackupInfo> list = new ArrayList<BackupInfo>(); + List<BackupInfo> history = table.getBackupHistory(backupInfo.getTargetRootDir()); + // Scan from most recent to backupInfo + // break when backupInfo reached + for (BackupInfo info : history) { + if (info.getStartTs() == ts) { + break; + } + List<TableName> tables = info.getTableNames(); + if (tables.contains(tn)) { + BackupType bt = info.getType(); + if (bt == BackupType.FULL) { + // Clear list if we encounter FULL backup + list.clear(); + } else { + LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn + + " added " + info.getBackupId() + " tables=" + info.getTableListAsString()); + list.add(info); + } + } + } + return list; + } + + + + /** + * Clean up the data at target directory + * @throws IOException + */ + private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf) + throws IOException { + try { + // clean up the data at target directory + String targetDir = backupInfo.getTargetRootDir(); + if (targetDir == null) { + LOG.warn("No target directory specified for " + backupInfo.getBackupId()); + return; + } + + FileSystem outputFs = FileSystem.get(new Path(backupInfo.getTargetRootDir()).toUri(), conf); + + Path targetDirPath = + new Path(BackupClientUtil.getTableBackupDir(backupInfo.getTargetRootDir(), + backupInfo.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); + } else { + LOG.info("No data has been found in " + targetDirPath.toString() + "."); + } + + } catch (IOException e1) { + LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table + + "at " + backupInfo.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + throw e1; + } + } + + private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime) + throws IOException { + List<BackupInfo> history = table.getBackupHistory(); + for (BackupInfo info : history) { + List<TableName> tables = info.getTableNames(); + if (!tables.contains(tn)) { + continue; + } + if (info.getStartTs() <= startTime) { + return true; + } else { + return false; + } + } + return false; + } + + @Override + public List<BackupInfo> getHistory(int n) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List<BackupInfo> history = table.getBackupHistory(); + if (history.size() <= n) return history; + List<BackupInfo> list = new ArrayList<BackupInfo>(); + for (int i = 0; i < n; i++) { + list.add(history.get(i)); + } + return list; + } + } + + @Override + public List<BackupInfo> getHistory(int n, BackupInfo.Filter ... filters) throws IOException { + if (filters.length == 0) return getHistory(n); + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List<BackupInfo> history = table.getBackupHistory(); + List<BackupInfo> result = new ArrayList<BackupInfo>(); + for(BackupInfo bi: history) { + if(result.size() == n) break; + boolean passed = true; + for(int i=0; i < filters.length; i++) { + if(!filters[i].apply(bi)) { + passed = false; + break; + } + } + if(passed) { + result.add(bi); + } + } + return result; + } + } + + @Override + public List<BackupSet> listBackupSets() throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List<String> list = table.listBackupSets(); + List<BackupSet> bslist = new ArrayList<BackupSet>(); + for (String s : list) { + List<TableName> tables = table.describeBackupSet(s); + if (tables != null) { + bslist.add(new BackupSet(s, tables)); + } + } + return bslist; + } + } + + @Override + public BackupSet getBackupSet(String name) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List<TableName> list = table.describeBackupSet(name); + if (list == null) return null; + return new BackupSet(name, list); + } + } + + @Override + public boolean deleteBackupSet(String name) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + if (table.describeBackupSet(name) == null) { + return false; + } + table.deleteBackupSet(name); + return true; + } + } + + @Override + public void addToBackupSet(String name, TableName[] tables) throws IOException { + String[] tableNames = new String[tables.length]; + try (final BackupSystemTable table = new BackupSystemTable(conn); + final Admin admin = conn.getAdmin();) { + for (int i = 0; i < tables.length; i++) { + tableNames[i] = tables[i].getNameAsString(); + if (!admin.tableExists(TableName.valueOf(tableNames[i]))) { + throw new IOException("Cannot add " + tableNames[i] + " because it doesn't exist"); + } + } + table.addToBackupSet(name, tableNames); + LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + + "' backup set"); + } + } + + @Override + public void removeFromBackupSet(String name, String[] tables) throws IOException { + LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'"); + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + table.removeFromBackupSet(name, tables); + LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + + "' completed."); + } + } + + @Override + public void restore(RestoreRequest request) throws IOException { + if (request.isCheck()) { + HashMap<TableName, BackupManifest> backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + Path rootPath = new Path(request.getBackupRootDir()); + String backupId = request.getBackupId(); + TableName[] sTableArray = request.getFromTables(); + HBackupFileSystem.checkImageManifestExist(backupManifestMap, + sTableArray, conn.getConfiguration(), rootPath, backupId); + + // Check and validate the backup image and its dependencies + + if (RestoreServerUtil.validate(backupManifestMap, conn.getConfiguration())) { + LOG.info("Checking backup images: ok"); + } else { + String errMsg = "Some dependencies are missing for restore"; + LOG.error(errMsg); + throw new IOException(errMsg); + } + + } + // Execute restore request + new RestoreTablesClient(conn, request).execute(); + } + + @Override + public Future<Void> restoreAsync(RestoreRequest request) throws IOException { + throw new UnsupportedOperationException("Asynchronous restore is not supported yet"); + } + + @Override + public String backupTables(final BackupRequest request) throws IOException { + String setName = request.getBackupSetName(); + BackupType type = request.getBackupType(); + String targetRootDir = request.getTargetRootDir(); + List<TableName> tableList = request.getTableList(); + + String backupId = + (setName == null || setName.length() == 0 ? BackupRestoreConstants.BACKUPID_PREFIX + : setName + "_") + EnvironmentEdgeManager.currentTime(); + if (type == BackupType.INCREMENTAL) { + Set<TableName> incrTableSet = null; + try (BackupSystemTable table = new BackupSystemTable(conn)) { + incrTableSet = table.getIncrementalBackupTableSet(targetRootDir); + } + + if (incrTableSet.isEmpty()) { + System.err.println("Incremental backup table set contains no table.\n" + + "Use 'backup create full' or 'backup stop' to \n " + + "change the tables covered by incremental backup."); + throw new IOException("No table covered by incremental backup."); + } + + tableList.removeAll(incrTableSet); + if (!tableList.isEmpty()) { + String extraTables = StringUtils.join(tableList, ","); + System.err.println("Some tables (" + extraTables + ") haven't gone through full backup"); + throw new IOException("Perform full backup on " + extraTables + " first, " + + "then retry the command"); + } + System.out.println("Incremental backup for the following table set: " + incrTableSet); + tableList = Lists.newArrayList(incrTableSet); + } + if (tableList != null && !tableList.isEmpty()) { + for (TableName table : tableList) { + String targetTableBackupDir = + HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); + Path targetTableBackupDirPath = new Path(targetTableBackupDir); + FileSystem outputFs = + FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration()); + if (outputFs.exists(targetTableBackupDirPath)) { + throw new IOException("Target backup directory " + targetTableBackupDir + + " exists already."); + } + } + ArrayList<TableName> nonExistingTableList = null; + try (Admin admin = conn.getAdmin();) { + for (TableName tableName : tableList) { + if (!admin.tableExists(tableName)) { + if (nonExistingTableList == null) { + nonExistingTableList = new ArrayList<>(); + } + nonExistingTableList.add(tableName); + } + } + } + if (nonExistingTableList != null) { + if (type == BackupType.INCREMENTAL) { + System.err.println("Incremental backup table set contains non-exising table: " + + nonExistingTableList); + // Update incremental backup set + tableList = excludeNonExistingTables(tableList, nonExistingTableList); + } else { + // Throw exception only in full mode - we try to backup non-existing table + throw new IOException("Non-existing tables found in the table list: " + + nonExistingTableList); + } + } + } + + // update table list + request.setTableList(tableList); + + if (type == BackupType.FULL) { + new FullTableBackupClient(conn, backupId, request).execute(); + } else { + new IncrementalTableBackupClient(conn, backupId, request).execute(); + } + return backupId; + } + + + private List<TableName> excludeNonExistingTables(List<TableName> tableList, + List<TableName> nonExistingTableList) { + + for (TableName table : nonExistingTableList) { + tableList.remove(table); + } + return tableList; + } + + @Override + public Future<String> backupTablesAsync(final BackupRequest userRequest) throws IOException { + throw new UnsupportedOperationException("Asynchronous backup is not supported yet"); + } + +} http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index c4227f5..8da489d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.BackupRequest; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupType; @@ -66,31 +67,33 @@ public final class BackupCommands implements BackupRestoreConstants { + "Run \'bin/hbase backup COMMAND -h\' to see help message for each command\n"; public static final String CREATE_CMD_USAGE = - "Usage: bin/hbase backup create <type> <backup_root> [tables] [options]\n" - + " type \"full\" to create a full backup image\n" - + " \"incremental\" to create an incremental backup image\n" - + " backup_root Full path to store the backup image\n" + "Usage: bin/hbase backup create <type> <backup_path> [tables] [options]\n" + + " type \"full\" to create a full backup image\n" + + " \"incremental\" to create an incremental backup image\n" + + " backup_path Full path to store the backup image\n" + " tables If no tables (\"\") are specified, all tables are backed up.\n" + " otherwise it is a comma separated list of tables."; - public static final String PROGRESS_CMD_USAGE = "Usage: bin/hbase backup progress <backupId>\n" - + " backupId Backup image id\n"; + public static final String PROGRESS_CMD_USAGE = "Usage: bin/hbase backup progress <backup_id>\n" + + " backup_id Backup image id (optional). If no id specified, the command will show\n"+ + " progress for currently running backup session."; public static final String NO_INFO_FOUND = "No info was found for backup id: "; + public static final String NO_ACTIVE_SESSION_FOUND = "No active backup sessions found."; - public static final String DESCRIBE_CMD_USAGE = "Usage: bin/hbase backup describe <backupId>\n" - + " backupId Backup image id\n"; + public static final String DESCRIBE_CMD_USAGE = "Usage: bin/hbase backup describe <backup_id>\n" + + " backup_id Backup image id\n"; public static final String HISTORY_CMD_USAGE = "Usage: bin/hbase backup history [options]"; - public static final String DELETE_CMD_USAGE = "Usage: bin/hbase backup delete <backupId>\n" - + " backupId Backup image id\n"; + public static final String DELETE_CMD_USAGE = "Usage: bin/hbase backup delete <backup_id>\n" + + " backup_id Backup image id\n"; - public static final String CANCEL_CMD_USAGE = "Usage: bin/hbase backup cancel <backupId>\n" - + " backupId Backup image id\n"; + public static final String CANCEL_CMD_USAGE = "Usage: bin/hbase backup cancel <backup_id>\n" + + " backup_id Backup image id\n"; public static final String SET_CMD_USAGE = "Usage: bin/hbase backup set COMMAND [name] [tables]\n" + " name Backup set name\n" @@ -227,7 +230,7 @@ public final class BackupCommands implements BackupRestoreConstants { Integer.parseInt(cmdline.getOptionValue(OPTION_WORKERS)) : -1; try (Connection conn = ConnectionFactory.createConnection(getConf()); - HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) { + BackupAdminImpl admin = new BackupAdminImpl(conn);) { BackupRequest request = new BackupRequest(); request.setBackupType(BackupType.valueOf(args[1].toUpperCase())) .setTableList(tables != null?Lists.newArrayList(BackupClientUtil.parseTableNames(tables)): null) @@ -392,8 +395,8 @@ public final class BackupCommands implements BackupRestoreConstants { if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 1) { - System.err.println("No backup id was specified, " - + "will retrieve the most recent (ongoing) sessions"); + System.out.println("No backup id was specified, " + + "will retrieve the most recent (ongoing) session"); } String[] args = cmdline == null ? null : cmdline.getArgs(); if (args != null && args.length > 2) { @@ -406,10 +409,26 @@ public final class BackupCommands implements BackupRestoreConstants { Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); try(final Connection conn = ConnectionFactory.createConnection(conf); final BackupSystemTable sysTable = new BackupSystemTable(conn);){ - BackupInfo info = sysTable.readBackupInfo(backupId); + BackupInfo info = null; + + if (backupId != null) { + info = sysTable.readBackupInfo(backupId); + } else { + List<BackupInfo> infos = sysTable.getBackupContexts(BackupState.RUNNING); + if(infos != null && infos.size() > 0) { + info = infos.get(0); + backupId = info.getBackupId(); + System.out.println("Found ongoing session with backupId="+ backupId); + } else { + } + } int progress = info == null? -1: info.getProgress(); if(progress < 0){ - System.out.println(NO_INFO_FOUND + backupId); + if(backupId != null) { + System.out.println(NO_INFO_FOUND + backupId); + } else { + System.err.println(NO_ACTIVE_SESSION_FOUND); + } } else{ System.out.println(backupId+" progress=" + progress+"%"); } @@ -443,7 +462,7 @@ public final class BackupCommands implements BackupRestoreConstants { System.arraycopy(args, 1, backupIds, 0, backupIds.length); Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); try (final Connection conn = ConnectionFactory.createConnection(conf); - HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) { + BackupAdminImpl admin = new BackupAdminImpl(conn);) { int deleted = admin.deleteBackups(args); System.out.println("Deleted " + deleted + " backups. Total requested: " + args.length); } @@ -473,7 +492,7 @@ public final class BackupCommands implements BackupRestoreConstants { } Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); try (final Connection conn = ConnectionFactory.createConnection(conf); - HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) { + BackupAdminImpl admin = new BackupAdminImpl(conn);) { // TODO cancel backup } } @@ -648,7 +667,7 @@ public final class BackupCommands implements BackupRestoreConstants { // does not expect any args Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); try(final Connection conn = ConnectionFactory.createConnection(conf); - HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + BackupAdminImpl admin = new BackupAdminImpl(conn);){ List<BackupSet> list = admin.listBackupSets(); for(BackupSet bs: list){ System.out.println(bs); @@ -683,7 +702,7 @@ public final class BackupCommands implements BackupRestoreConstants { String setName = args[2]; Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); try(final Connection conn = ConnectionFactory.createConnection(conf); - final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + final BackupAdminImpl admin = new BackupAdminImpl(conn);){ boolean result = admin.deleteBackupSet(setName); if(result){ System.out.println("Delete set "+setName+" OK."); @@ -703,7 +722,7 @@ public final class BackupCommands implements BackupRestoreConstants { String[] tables = args[3].split(","); Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); try(final Connection conn = ConnectionFactory.createConnection(conf); - final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + final BackupAdminImpl admin = new BackupAdminImpl(conn);){ admin.removeFromBackupSet(setName, tables); } } @@ -721,7 +740,7 @@ public final class BackupCommands implements BackupRestoreConstants { } Configuration conf = getConf() != null? getConf():HBaseConfiguration.create(); try(final Connection conn = ConnectionFactory.createConnection(conf); - final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + final BackupAdminImpl admin = new BackupAdminImpl(conn);){ admin.addToBackupSet(setName, tableNames); } http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index 6fb7cfd..ece07b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -25,10 +25,6 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -52,8 +48,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; -import com.google.common.util.concurrent.ThreadFactoryBuilder; - /** * Handles backup requests on server-side, creates backup context records in hbase:backup * to keep track backup. The timestamps kept in hbase:backup table will be used for future @@ -64,11 +58,10 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; public class BackupManager implements Closeable { private static final Log LOG = LogFactory.getLog(BackupManager.class); - private Configuration conf = null; - private BackupInfo backupContext = null; - private ExecutorService pool = null; - private BackupSystemTable systemTable; - private final Connection conn; + protected Configuration conf = null; + protected BackupInfo backupContext = null; + protected BackupSystemTable systemTable; + protected final Connection conn; /** * Backup manager constructor. @@ -175,11 +168,7 @@ public class BackupManager implements Closeable { */ @Override public void close() { - // currently, we shutdown now for all ongoing back handlers, we may need to do something like - // record the failed list somewhere later - if (this.pool != null) { - this.pool.shutdownNow(); - } + if (systemTable != null) { try { systemTable.close(); @@ -187,13 +176,6 @@ public class BackupManager implements Closeable { LOG.error(e); } } - if (conn != null) { - try { - conn.close(); - } catch (IOException e) { - LOG.error(e); - } - } } /** @@ -273,15 +255,6 @@ public class BackupManager implements Closeable { + ". Can not launch new backup until no ongoing backup remains."); throw new BackupException("There is ongoing backup."); } - - // Initialize thread pools - int nrThreads = this.conf.getInt("hbase.backup.threads.max", 1); - ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); - builder.setNameFormat("BackupHandler-%1$d"); - this.pool = - new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue<Runnable>(), builder.build()); - ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); } public void setBackupContext(BackupInfo backupContext) { @@ -312,11 +285,14 @@ public class BackupManager implements Closeable { ArrayList<BackupInfo> allHistoryList = getBackupHistory(true); for (BackupInfo backup : allHistoryList) { - BackupImage image = - new BackupImage(backup.getBackupId(), backup.getType(), - backup.getTargetRootDir(), - backup.getTableNames(), backup.getStartTs(), backup - .getEndTs()); + + BackupImage.Builder builder = BackupImage.newBuilder(); + + BackupImage image = builder.withBackupId(backup.getBackupId()). + withType(backup.getType()).withRootDir(backup.getTargetRootDir()). + withTableList(backup.getTableNames()).withStartTime(backup.getStartTs()). + withCompleteTime(backup.getEndTs()).build(); + // add the full backup image as an ancestor until the last incremental backup if (backup.getType().equals(BackupType.FULL)) { // check the backup image coverage, if previous image could be covered by the newer ones, @@ -331,10 +307,9 @@ public class BackupManager implements Closeable { // Otherwise, this incremental backup ancestor is the dependent ancestor of the ongoing // incremental backup if (BackupManifest.canCoverImage(ancestors, image)) { - LOG.debug("Met the backup boundary of the current table set. " - + "The root full backup images for the current backup scope:"); + LOG.debug("Met the backup boundary of the current table set:"); for (BackupImage image1 : ancestors) { - LOG.debug(" BackupId: " + image1.getBackupId() + ", Backup directory: " + LOG.debug(" BackupID=" + image1.getBackupId() + ", BackupDir=" + image1.getRootDir()); } } else { @@ -348,9 +323,10 @@ public class BackupManager implements Closeable { BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage(); ancestors.add(lastIncrImage); - LOG.debug("Last dependent incremental backup image information:"); - LOG.debug(" Token: " + lastIncrImage.getBackupId()); - LOG.debug(" Backup directory: " + lastIncrImage.getRootDir()); + LOG.debug("Last dependent incremental backup image: " + + "{BackupID=" + lastIncrImage.getBackupId()+"," + + "BackupDir=" + lastIncrImage.getRootDir()+"}" + ); } } } http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java index c3dc539..51f3cfb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -35,7 +35,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; @@ -43,7 +42,7 @@ import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.util.BackupClientUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -66,6 +65,49 @@ public class BackupManifest { public static class BackupImage implements Comparable<BackupImage> { + static class Builder { + BackupImage image; + + Builder() { + image = new BackupImage(); + } + + Builder withBackupId(String backupId) { + image.setBackupId(backupId); + return this; + } + + Builder withType(BackupType type) { + image.setType(type); + return this; + } + + Builder withRootDir(String rootDir) { + image.setRootDir(rootDir); + return this; + } + + Builder withTableList(List<TableName> tableList) { + image.setTableList(tableList); + return this; + } + + Builder withStartTime(long startTime) { + image.setStartTs(startTime); + return this; + } + + Builder withCompleteTime(long completeTime) { + image.setCompleteTs(completeTime); + return this; + } + + BackupImage build() { + return image; + } + + } + private String backupId; private BackupType type; private String rootDir; @@ -75,11 +117,15 @@ public class BackupManifest { private ArrayList<BackupImage> ancestors; private HashMap<TableName, HashMap<String, Long>> incrTimeRanges; + static Builder newBuilder() { + return new Builder(); + } + public BackupImage() { super(); } - public BackupImage(String backupId, BackupType type, String rootDir, + private BackupImage(String backupId, BackupType type, String rootDir, List<TableName> tableList, long startTs, long completeTs) { this.backupId = backupId; this.type = type; @@ -127,7 +173,7 @@ public class BackupManifest { } for (TableName name: tableList) { - builder.addTableList(ProtobufUtil.toProtoTableNameShaded(name)); + builder.addTableList(ProtobufUtil.toProtoTableName(name)); } if (ancestors != null){ @@ -157,7 +203,7 @@ public class BackupManifest { } List<BackupProtos.ServerTimestamp> listSt = tst.getServerTimestampList(); for(BackupProtos.ServerTimestamp stm: listSt) { - ServerName sn = ProtobufUtil.toServerNameShaded(stm.getServer()); + ServerName sn = ProtobufUtil.toServerName(stm.getServer()); map.put(sn.getHostname() +":" + sn.getPort(), stm.getTimestamp()); } } @@ -174,7 +220,7 @@ public class BackupManifest { HashMap<String, Long> value = entry.getValue(); BackupProtos.TableServerTimestamp.Builder tstBuilder = BackupProtos.TableServerTimestamp.newBuilder(); - tstBuilder.setTable(ProtobufUtil.toProtoTableNameShaded(key)); + tstBuilder.setTable(ProtobufUtil.toProtoTableName(key)); for (Map.Entry<String, Long> entry2 : value.entrySet()) { String s = entry2.getKey(); @@ -312,79 +358,37 @@ public class BackupManifest { } } - // hadoop hbase configuration - protected Configuration config = null; - - // backup root directory - private String rootDir = null; - // backup image directory private String tableBackupDir = null; - - // backup log directory if this is an incremental backup - private String logBackupDir = null; - - // backup token - private String backupId; - - // backup type, full or incremental - private BackupType type; - - // the table list for the backup - private ArrayList<TableName> tableList; - - // actual start timestamp of the backup process - private long startTs; - - // actual complete timestamp of the backup process - private long completeTs; - - // the region server timestamp for tables: - // <table, <rs, timestamp>> - private Map<TableName, HashMap<String, Long>> incrTimeRanges; - - // dependency of this backup, including all the dependent images to do PIT recovery - //private Map<String, BackupImage> dependency; private BackupImage backupImage; /** * Construct manifest for a ongoing backup. - * @param backupCtx The ongoing backup context + * @param backup The ongoing backup info */ - public BackupManifest(BackupInfo backupCtx) { - this.backupId = backupCtx.getBackupId(); - this.type = backupCtx.getType(); - this.rootDir = backupCtx.getTargetRootDir(); - if (this.type == BackupType.INCREMENTAL) { - this.logBackupDir = backupCtx.getHLogTargetDir(); - } - this.startTs = backupCtx.getStartTs(); - this.completeTs = backupCtx.getEndTs(); - this.loadTableList(backupCtx.getTableNames()); - this.backupImage = new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs, - this.completeTs); + public BackupManifest(BackupInfo backup) { + + BackupImage.Builder builder = BackupImage.newBuilder(); + this.backupImage = builder.withBackupId(backup.getBackupId()). + withType(backup.getType()).withRootDir(backup.getTargetRootDir()). + withTableList(backup.getTableNames()).withStartTime(backup.getStartTs()). + withCompleteTime(backup.getEndTs()).build(); } /** * Construct a table level manifest for a backup of the named table. - * @param backupCtx The ongoing backup context + * @param backup The ongoing backup session info */ - public BackupManifest(BackupInfo backupCtx, TableName table) { - this.backupId = backupCtx.getBackupId(); - this.type = backupCtx.getType(); - this.rootDir = backupCtx.getTargetRootDir(); - this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir(); - if (this.type == BackupType.INCREMENTAL) { - this.logBackupDir = backupCtx.getHLogTargetDir(); - } - this.startTs = backupCtx.getStartTs(); - this.completeTs = backupCtx.getEndTs(); + public BackupManifest(BackupInfo backup, TableName table) { + this.tableBackupDir = backup.getBackupStatus(table).getTargetDir(); List<TableName> tables = new ArrayList<TableName>(); tables.add(table); - this.loadTableList(tables); - this.backupImage = new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs, - this.completeTs); + BackupImage.Builder builder = BackupImage.newBuilder(); + this.backupImage = builder.withBackupId(backup.getBackupId()). + withType(backup.getType()).withRootDir(backup.getTargetRootDir()). + withTableList(tables).withStartTime(backup.getStartTs()). + withCompleteTime(backup.getEndTs()).build(); } /** @@ -413,8 +417,6 @@ public class BackupManifest { // It could be the backup log dir where there is also a manifest file stored. // This variable's purpose is to keep the correct and original location so // that we can store/persist it. - this.tableBackupDir = backupPath.toString(); - this.config = fs.getConf(); try { FileStatus[] subFiles = BackupClientUtil.listStatus(fs, backupPath, null); @@ -438,23 +440,6 @@ public class BackupManifest { throw new BackupException(e); } this.backupImage = BackupImage.fromProto(proto); - // Here the parameter backupDir is where the manifest file is. - // There should always be a manifest file under: - // backupRootDir/namespace/table/backupId/.backup.manifest - this.rootDir = backupPath.getParent().getParent().getParent().toString(); - - Path p = backupPath.getParent(); - if (p.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { - this.rootDir = p.getParent().toString(); - } else { - this.rootDir = p.getParent().getParent().toString(); - } - this.backupId = this.backupImage.getBackupId(); - this.startTs = this.backupImage.getStartTs(); - this.completeTs = this.backupImage.getCompleteTs(); - this.type = this.backupImage.getType(); - this.tableList = (ArrayList<TableName>)this.backupImage.getTableNames(); - this.incrTimeRanges = this.backupImage.getIncrTimeRanges(); LOG.debug("Loaded manifest instance from manifest file: " + BackupClientUtil.getPath(subFile.getPath())); return; @@ -469,39 +454,15 @@ public class BackupManifest { } public BackupType getType() { - return type; - } - - public void setType(BackupType type) { - this.type = type; - } - - /** - * Loads table list. - * @param tableList Table list - */ - private void loadTableList(List<TableName> tableList) { - - this.tableList = this.getTableList(); - if (this.tableList.size() > 0) { - this.tableList.clear(); - } - for (int i = 0; i < tableList.size(); i++) { - this.tableList.add(tableList.get(i)); - } - - LOG.debug(tableList.size() + " tables exist in table set."); + return backupImage.getType(); } /** * Get the table set of this image. * @return The table set list */ - public ArrayList<TableName> getTableList() { - if (this.tableList == null) { - this.tableList = new ArrayList<TableName>(); - } - return this.tableList; + public List<TableName> getTableList() { + return backupImage.getTableNames(); } /** @@ -512,14 +473,15 @@ public class BackupManifest { public void store(Configuration conf) throws BackupException { byte[] data = backupImage.toProto().toByteArray(); // write the file, overwrite if already exist + String logBackupDir = BackupClientUtil.getLogBackupDir(backupImage.getRootDir(), + backupImage.getBackupId()); Path manifestFilePath = - new Path(new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir)) + new Path(new Path((tableBackupDir != null ? tableBackupDir : logBackupDir)) ,MANIFEST_FILE_NAME); - try { - FSDataOutputStream out = - manifestFilePath.getFileSystem(conf).create(manifestFilePath, true); + try ( FSDataOutputStream out = + manifestFilePath.getFileSystem(conf).create(manifestFilePath, true);) + { out.write(data); - out.close(); } catch (IOException e) { throw new BackupException(e.getMessage()); } @@ -527,7 +489,6 @@ public class BackupManifest { LOG.info("Manifest file stored to " + manifestFilePath); } - /** * Get this backup image. * @return the backup image. @@ -549,15 +510,11 @@ public class BackupManifest { * @param incrTimestampMap timestamp map */ public void setIncrTimestampMap(HashMap<TableName, HashMap<String, Long>> incrTimestampMap) { - this.incrTimeRanges = incrTimestampMap; this.backupImage.setIncrTimeRanges(incrTimestampMap); } public Map<TableName, HashMap<String, Long>> getIncrTimestampMap() { - if (this.incrTimeRanges == null) { - this.incrTimeRanges = new HashMap<TableName, HashMap<String, Long>>(); - } - return this.incrTimeRanges; + return backupImage.getIncrTimeRanges(); } /** @@ -697,14 +654,17 @@ public class BackupManifest { public BackupInfo toBackupInfo() { BackupInfo info = new BackupInfo(); - info.setType(type); - TableName[] tables = new TableName[tableList.size()]; - info.addTables(getTableList().toArray(tables)); - info.setBackupId(backupId); - info.setStartTs(startTs); - info.setTargetRootDir(rootDir); - if(type == BackupType.INCREMENTAL) { - info.setHlogTargetDir(logBackupDir); + info.setType(backupImage.getType()); + List<TableName> list = backupImage.getTableNames(); + TableName[] tables = new TableName[list.size()]; + info.addTables(list.toArray(tables)); + info.setBackupId(backupImage.getBackupId()); + info.setStartTs(backupImage.getStartTs()); + info.setTargetRootDir(backupImage.getRootDir()); + if(backupImage.getType() == BackupType.INCREMENTAL) { + + info.setHLogTargetDir(BackupClientUtil.getLogBackupDir(backupImage.getRootDir(), + backupImage.getBackupId())); } return info; }